summaryrefslogtreecommitdiff
path: root/lib/gcc
diff options
context:
space:
mode:
authoralk3pInjection <webmaster@raspii.tech>2024-02-04 16:16:35 +0800
committeralk3pInjection <webmaster@raspii.tech>2024-02-04 16:16:35 +0800
commitabdaadbcae30fe0c9a66c7516798279fdfd97750 (patch)
tree00a54a6e25601e43876d03c1a4a12a749d4a914c /lib/gcc
Import stripped Arm GNU Toolchain 13.2.Rel1HEADumineko
https://developer.arm.com/downloads/-/arm-gnu-toolchain-downloads Change-Id: I7303388733328cd98ab9aa3c30236db67f2e9e9c
Diffstat (limited to 'lib/gcc')
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtbegin.obin0 -> 2992 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtfastmath.obin0 -> 1060 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crti.obin0 -> 772 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libcaf_single.abin0 -> 44178 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcc.abin0 -> 1633232 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcov.abin0 -> 60782 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtbegin.obin0 -> 2992 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtfastmath.obin0 -> 1060 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crti.obin0 -> 772 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libcaf_single.abin0 -> 44178 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcc.abin0 -> 1633796 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcov.abin0 -> 60782 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/crtbegin.obin0 -> 3044 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/crtend.obin0 -> 972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/crtfastmath.obin0 -> 588 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/crti.obin0 -> 768 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/crtn.obin0 -> 828 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include-fixed/README14
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/ISO_Fortran_binding.h321
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_acle.h772
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_bf16.h55
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_cde.h176
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_cmse.h200
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_fp16.h255
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_mve.h42004
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_mve_types.h1462
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/arm_neon.h20040
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/float.h631
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/gcov.h70
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/iso646.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/limits.h208
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/mmintrin.h1836
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdalign.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdarg.h135
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdatomic.h255
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdbool.h51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stddef.h463
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdfix.h204
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdint-gcc.h369
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdint.h14
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/stdnoreturn.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/syslimits.h8
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/tgmath.h127
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/unwind-arm-common.h251
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/unwind.h118
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/include/varargs.h7
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/install-tools/fixinc_list1
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/install-tools/gsyslimits.h8
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/install-tools/include/README14
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/install-tools/include/limits.h208
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/install-tools/macro_list0
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/install-tools/mkheaders.conf3
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/libcaf_single.abin0 -> 46074 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/libgcc.abin0 -> 1703264 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/libgcov.abin0 -> 62954 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/gtype.state38925
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ada/gcc-interface/ada-tree.def83
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/addresses.h90
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/alias.h51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/align.h83
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/all-tree.def8
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/alloc-pool.h576
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ansidecl.h354
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-cpu.h182
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-isa.h682
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/array-traits.h48
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/asan.h264
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/attr-fnspec.h304
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/attribs.h401
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-host.h2693
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-profile.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/b-header-vars95
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/backend.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/basic-block.h642
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/bb-reorder.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/bitmap.h1089
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-attrs.def427
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-types.def1062
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.def1190
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.h160
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/bversion.h4
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.def96
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.h1587
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-objc.h183
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pragma.h286
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pretty-print.h142
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-tree.h911
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/calls.h138
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ccmp.h25
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg-flags.def191
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg.h186
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfganal.h90
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgbuild.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgcleanup.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgexpand.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfghooks.h289
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloop.h936
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloopmanip.h63
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgrtl.h61
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cgraph.h3576
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cif-code.def144
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect-utils.h51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2-aix.h306
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2.h39
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/color-macros.h108
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/conditions.h69
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config.h10
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common-protos.h168
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common.h73
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aout.h302
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-flags.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-mlib.h22
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-opts.h78
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-protos.h601
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm.h2544
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/bpabi.h135
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/elf.h152
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/unknown-elf.h96
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/elfos.h484
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/initfini-array.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/newlib-stdint.h69
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/vxworks-dummy.h48
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/configargs.h7
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/context.h69
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/convert.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/coretypes.h495
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/coroutine-builtins.def53
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/coverage.h61
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/contracts.h305
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-trait.def108
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.def600
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.h8844
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cxx-pretty-print.h117
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/name-lookup.h502
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/operators.def163
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/type-utils.h54
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppbuiltin.h33
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppdefault.h76
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cpplib.h1605
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/cselib.h143
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ctfc.h450
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/d/d-tree.def29
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/data-streamer.h349
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.def217
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dce.h27
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ddg.h182
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/debug.h281
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/defaults.h1464
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/df.h1253
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dfp.h50
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-client-data-hooks.h105
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-color.h65
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-core.h128
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-event-id.h61
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-metadata.h85
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-path.h234
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-spec.h142
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-url.h52
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.def55
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.h622
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/digraph.h246
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dojump.h82
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dominance.h94
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/domwalk.h115
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/double-int.h470
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dump-context.h305
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dumpfile.h774
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2asm.h100
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2ctf.h55
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2out.h470
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/edit-context.h67
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/emit-rtl.h548
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/errors.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/escaped_string.h43
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/et-forest.h85
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/except.h334
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/explow.h143
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/expmed.h728
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/expr.h364
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/fibonacci_heap.h684
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-find.h47
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-prefix-map.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/filenames.h100
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/fixed-value.h111
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/flag-types.h508
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/flags.h117
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const-call.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const.h276
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/function-abi.h320
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/function.h728
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-plugin.h47
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-rich-location.h226
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-symtab.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc.h100
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-counter.def51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-io.h394
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse-common.h47
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/generic-match.h33
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gengtype.h521
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/genrtl.h1678
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gensupport.h228
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc-internal.h115
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc.h371
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-array-bounds.h49
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-builder.h36
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-expr.h179
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-fold.h281
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-iterator.h415
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-low.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-match.h338
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predicate-analysis.h175
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predict.h91
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-pretty-print.h41
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-cache.h121
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-edge.h58
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-fold.h173
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-gori.h229
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-infer.h86
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-op.h55
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-path.h115
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-trace.h78
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range.h103
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-access.h48
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-restrict.h29
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa.h201
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-streamer.h34
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-walk.h101
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.def413
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.h6911
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify-me.h37
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify.h92
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/glimits.h163
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gomp-constants.h354
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/graph.h27
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphds.h69
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphite.h452
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphviz.h59
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsstruct.def54
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyms.h97
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyslimits.h8
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtm-builtins.def212
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtype-desc.h3853
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hard-reg-set.h527
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map-traits.h194
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map.h388
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-set.h217
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-table.h1321
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-traits.h471
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hashtab.h207
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/highlev-plugin-common.h33
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hooks.h137
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks-def.h51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks.h50
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hw-doloop.h160
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/hwint.h378
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ifcvt.h121
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/inchash.h211
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/incpath.h46
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/input.h292
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-addr.h63
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr-common.h66
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr.h370
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-codes.h5598
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-config.h20
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-constants.h1214
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-flags.h11510
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes-inline.h605
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes.h834
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-notes.def98
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/int-vector-builder.h93
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.def472
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.h260
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/intl.h73
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-fnsummary.h453
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf-gimple.h296
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf.h680
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-inline.h134
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref-tree.h766
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref.h131
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-param-manipulation.h445
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-predicate.h273
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-prop.h1204
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-ref.h139
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-reference.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-utils.h286
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira-int.h1711
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira.h245
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/is-a.h284
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/iterator-utils.h203
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/json.h200
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks-def.h400
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks.h674
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lcm.h34
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/libfuncs.h84
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/libiberty.h761
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/limitx.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/limity.h10
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/line-map.h2152
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/logical-location.h72
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/loop-unroll.h27
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lower-subreg.h60
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra-int.h528
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra.h42
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-compress.h43
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-section-names.h41
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-streamer.h1248
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/m2/m2-tree.def24
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.def284
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.h1264
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/make-unique.h44
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/md5.h160
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats-traits.h41
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats.h658
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/memmodel.h116
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/memory-block.h84
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/mode-classes.def40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/mux-utils.h251
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/objc/objc-tree.def76
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack-utils.h86
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack.h535
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-builtins.def472
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-expand.h32
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-general.h155
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-low.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-offload.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-simd-clone.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-problem.h289
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-suggestions.h71
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-libfuncs.h79
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-query.h216
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-tree.h51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.def478
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.h387
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo-emit-json.h60
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo.h170
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/options.h11279
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-diagnostic.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-jobserver.h62
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts.h566
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ordered-hash-map.h188
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/output.h631
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass-instances.def563
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass_manager.h148
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/passes.def540
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-api.h605
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-version.h18
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.def112
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.h208
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/pointer-query.h297
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int-types.h103
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int.h2748
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.def238
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.h111
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/prefix.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/pretty-print.h443
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-rtl.h165
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-tree.h50
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile-count.h1294
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile.h80
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/range-op.h318
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/range.h58
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-md.h408
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-rtl-function.h28
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/real.h559
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/realmpfr.h35
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/recog.h565
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/reg-notes.def254
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/regcprop.h25
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/regrename.h111
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/regs.h392
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/regset.h123
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/reload.h466
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/resource.h55
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-error.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-iter.h292
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-ssa.h71
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.def1368
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.h4623
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlanal.h343
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhash.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhooks-def.h48
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtx-vector-builder.h125
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/run-rtl-passes.h25
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/safe-ctype.h150
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sanitizer.def669
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sbitmap.h321
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sched-int.h1687
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-dump.h233
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-ir.h1674
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched.h27
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-diagnostic.h49
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-rtl.h100
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest.h492
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sese.h310
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/shortest-paths.h215
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/shrink-wrap.h34
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/signop.h33
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sparseset.h218
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck-tree.h51
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck.h229
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree-utils.h491
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree.h165
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sreal.h285
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa-iterators.h1013
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa.h34
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/statistics.h71
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/stmt.h53
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/stor-layout.h117
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/streamer-hooks.h92
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/stringpool.h43
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/substring-locations.h126
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/symbol-summary.h1013
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-clones.h77
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-thunks.h173
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab.h106
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/sync-builtins.def614
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/system.h1334
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-def.h125
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-globals.h95
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-hooks-macros.h80
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-insns.def108
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.def7143
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.h321
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/targhooks.h303
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.def346
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.h301
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm-preds.h414
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm.h37
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm_p.h9
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/toplev.h102
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tracer.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/trans-mem.h52
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-affine.h129
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfg.h134
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfgcleanup.h32
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-check.h380
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-chrec.h253
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-core.h2389
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-data-ref.h792
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dfa.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-diagnostic.h68
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dump.h92
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-eh.h58
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hash-traits.h44
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hasher.h66
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-if-conv.h24
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-inline.h256
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-into-ssa.h53
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-iterator.h150
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-logical-location.h67
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-nested.h89
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-object-size.h38
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-outof-ssa.h82
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-parloops.h25
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pass.h685
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-phinodes.h68
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pretty-print.h60
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-scalar-evolution.h74
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-sra.h31
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-address.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias-compare.h43
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias.h211
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ccp.h29
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-coalesce.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dce.h22
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dom.h25
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dse.h37
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-live.h331
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-ivopts.h37
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-manip.h56
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-niter.h64
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop.h84
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-math-opts.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-operands.h122
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-propagate.h123
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-reassoc.h48
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-sccvn.h316
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-scopedtables.h212
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-strlen.h43
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ter.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadedge.h134
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadupdate.h150
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa.h118
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssanames.h140
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-stdarg.h36
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-streamer.h122
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-switch-conversion.h927
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vector-builder.h145
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vectorizer.h2586
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vrp.h45
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.def1518
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.h6717
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/treestruct.def71
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tristate.h85
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsan.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsystem.h137
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/typeclass.h43
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/typed-splay-tree.h652
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/ubsan.h70
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/valtrack.h139
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-pointer-equiv.h62
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-prof.h120
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-query.h150
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-pretty-print.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-storage.h233
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range.h1487
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-relation.h523
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/varasm.h84
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec-perm-indices.h153
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec.h2386
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/vector-builder.h612
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/version.h26
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/vmsdbg.h249
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/vr-values.h85
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/vtable-verify.h143
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-bitmask.h143
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-print.h38
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int.h3513
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/plugin/include/xcoff.h40
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtbegin.obin0 -> 2976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtend.obin0 -> 972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtfastmath.obin0 -> 588 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crti.obin0 -> 752 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtn.obin0 -> 716 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libcaf_single.abin0 -> 35994 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcc.abin0 -> 1640944 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcov.abin0 -> 58186 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtbegin.obin0 -> 2912 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtend.obin0 -> 972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtfastmath.obin0 -> 588 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crti.obin0 -> 756 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtn.obin0 -> 720 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libcaf_single.abin0 -> 35882 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcc.abin0 -> 1638760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcov.abin0 -> 57806 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtfastmath.obin0 -> 1060 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crti.obin0 -> 752 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtn.obin0 -> 716 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libcaf_single.abin0 -> 36002 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcc.abin0 -> 1611082 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcov.abin0 -> 58622 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtbegin.obin0 -> 2960 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtend.obin0 -> 972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtfastmath.obin0 -> 1056 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crti.obin0 -> 752 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtn.obin0 -> 716 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libcaf_single.abin0 -> 35998 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcc.abin0 -> 1604630 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcov.abin0 -> 58510 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtbegin.obin0 -> 2968 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libcaf_single.abin0 -> 36006 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcc.abin0 -> 1618354 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcov.abin0 -> 62682 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtbegin.obin0 -> 2968 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libcaf_single.abin0 -> 36006 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcc.abin0 -> 1618662 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcov.abin0 -> 62682 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtbegin.obin0 -> 2972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtend.obin0 -> 984 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtfastmath.obin0 -> 1068 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libcaf_single.abin0 -> 36170 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcc.abin0 -> 1624962 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcov.abin0 -> 62902 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtbegin.obin0 -> 2968 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libcaf_single.abin0 -> 36166 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcc.abin0 -> 1618510 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcov.abin0 -> 62790 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtfastmath.obin0 -> 592 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crti.obin0 -> 756 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtn.obin0 -> 720 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libcaf_single.abin0 -> 36214 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcc.abin0 -> 1627714 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcov.abin0 -> 62494 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtbegin.obin0 -> 2904 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtfastmath.obin0 -> 592 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crti.obin0 -> 756 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtn.obin0 -> 720 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libcaf_single.abin0 -> 34902 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcc.abin0 -> 1628694 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcov.abin0 -> 57538 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtbegin.obin0 -> 2972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtend.obin0 -> 984 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtfastmath.obin0 -> 1068 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libcaf_single.abin0 -> 35822 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcc.abin0 -> 1637098 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcov.abin0 -> 62778 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtbegin.obin0 -> 2968 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libcaf_single.abin0 -> 35818 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcc.abin0 -> 1630422 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcov.abin0 -> 62666 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtbegin.obin0 -> 2960 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtend.obin0 -> 972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtfastmath.obin0 -> 588 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crti.obin0 -> 752 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtn.obin0 -> 716 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libcaf_single.abin0 -> 36214 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcc.abin0 -> 1620610 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcov.abin0 -> 58446 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtbegin.obin0 -> 2908 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crti.obin0 -> 756 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtn.obin0 -> 720 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libcaf_single.abin0 -> 34474 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcc.abin0 -> 1618382 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcov.abin0 -> 57730 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtbegin.obin0 -> 2904 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtfastmath.obin0 -> 1060 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crti.obin0 -> 756 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtn.obin0 -> 720 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libcaf_single.abin0 -> 34470 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcc.abin0 -> 1611922 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcov.abin0 -> 57618 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtbegin.obin0 -> 2908 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libcaf_single.abin0 -> 34782 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcc.abin0 -> 1629454 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcov.abin0 -> 57730 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtbegin.obin0 -> 2908 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libcaf_single.abin0 -> 34782 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcc.abin0 -> 1629722 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcov.abin0 -> 57730 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtbegin.obin0 -> 2904 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtfastmath.obin0 -> 592 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crti.obin0 -> 756 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtn.obin0 -> 720 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libcaf_single.abin0 -> 34794 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcc.abin0 -> 1628134 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcov.abin0 -> 57558 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtbegin.obin0 -> 2976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtend.obin0 -> 988 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtfastmath.obin0 -> 1072 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crti.obin0 -> 768 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtn.obin0 -> 732 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libcaf_single.abin0 -> 35438 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcc.abin0 -> 1633410 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcov.abin0 -> 63014 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtbegin.obin0 -> 2976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtend.obin0 -> 988 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtfastmath.obin0 -> 1072 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crti.obin0 -> 768 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtn.obin0 -> 732 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libcaf_single.abin0 -> 35438 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcc.abin0 -> 1633878 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcov.abin0 -> 63014 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtbegin.obin0 -> 2976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtend.obin0 -> 988 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtfastmath.obin0 -> 1072 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libcaf_single.abin0 -> 35426 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcc.abin0 -> 1633886 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcov.abin0 -> 63030 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtbegin.obin0 -> 2972 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtend.obin0 -> 984 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtfastmath.obin0 -> 1068 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libcaf_single.abin0 -> 35422 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcc.abin0 -> 1627434 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcov.abin0 -> 62918 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtbegin.obin0 -> 2968 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtfastmath.obin0 -> 596 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libcaf_single.abin0 -> 35562 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcc.abin0 -> 1636674 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcov.abin0 -> 62622 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtend.obin0 -> 976 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtfastmath.obin0 -> 592 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libcaf_single.abin0 -> 36330 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcc.abin0 -> 1649402 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcov.abin0 -> 58598 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtbegin.obin0 -> 2916 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtend.obin0 -> 984 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtfastmath.obin0 -> 1068 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libcaf_single.abin0 -> 34526 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcc.abin0 -> 1629410 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcov.abin0 -> 57810 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtbegin.obin0 -> 2912 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtfastmath.obin0 -> 1064 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libcaf_single.abin0 -> 34522 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcc.abin0 -> 1622910 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcov.abin0 -> 57698 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtbegin.obin0 -> 2916 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtend.obin0 -> 984 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtfastmath.obin0 -> 1068 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libcaf_single.abin0 -> 34834 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcc.abin0 -> 1640482 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcov.abin0 -> 57810 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtbegin.obin0 -> 2916 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtend.obin0 -> 984 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtfastmath.obin0 -> 1068 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libcaf_single.abin0 -> 34834 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcc.abin0 -> 1640622 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcov.abin0 -> 57810 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtbegin.obin0 -> 2912 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtend.obin0 -> 980 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtfastmath.obin0 -> 596 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libcaf_single.abin0 -> 34814 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcc.abin0 -> 1639262 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcov.abin0 -> 57622 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtbegin.obin0 -> 2916 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtend.obin0 -> 988 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtfastmath.obin0 -> 604 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libcaf_single.abin0 -> 35538 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcc.abin0 -> 1653202 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcov.abin0 -> 57938 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtend.obin0 -> 996 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtfastmath.obin0 -> 1084 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libcaf_single.abin0 -> 36814 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcc.abin0 -> 1658702 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcov.abin0 -> 60106 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtbegin.obin0 -> 2960 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtend.obin0 -> 992 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtfastmath.obin0 -> 1080 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libcaf_single.abin0 -> 36810 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcc.abin0 -> 1652438 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcov.abin0 -> 59994 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtend.obin0 -> 996 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtfastmath.obin0 -> 1084 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libcaf_single.abin0 -> 37118 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcc.abin0 -> 1671618 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcov.abin0 -> 60106 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtend.obin0 -> 996 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtfastmath.obin0 -> 1084 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libcaf_single.abin0 -> 37118 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcc.abin0 -> 1671774 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcov.abin0 -> 60106 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtbegin.obin0 -> 2964 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtend.obin0 -> 996 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtfastmath.obin0 -> 612 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crti.obin0 -> 764 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtn.obin0 -> 728 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libcaf_single.abin0 -> 37022 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcc.abin0 -> 1679042 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcov.abin0 -> 60122 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtbegin.obin0 -> 2956 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtend.obin0 -> 988 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtfastmath.obin0 -> 604 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crti.obin0 -> 760 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtn.obin0 -> 724 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_arithmetic.modbin0 -> 6783 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_exceptions.modbin0 -> 2213 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_features.modbin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libcaf_single.abin0 -> 37146 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcc.abin0 -> 1664662 bytes
-rw-r--r--lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcov.abin0 -> 59822 bytes
930 files changed, 311959 insertions, 0 deletions
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtbegin.o
new file mode 100644
index 0000000..4ce18de
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtend.o
new file mode 100644
index 0000000..98d3a27
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtfastmath.o
new file mode 100644
index 0000000..0353e15
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crti.o
new file mode 100644
index 0000000..80efb52
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtn.o
new file mode 100644
index 0000000..08ea7d9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libcaf_single.a
new file mode 100644
index 0000000..f733ea0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcc.a
new file mode 100644
index 0000000..0dc3ca1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcov.a
new file mode 100644
index 0000000..c5d63e0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtbegin.o
new file mode 100644
index 0000000..198a88d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtend.o
new file mode 100644
index 0000000..a1c1c2d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtfastmath.o
new file mode 100644
index 0000000..68b41ed
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crti.o
new file mode 100644
index 0000000..80efb52
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtn.o
new file mode 100644
index 0000000..08ea7d9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libcaf_single.a
new file mode 100644
index 0000000..85223a6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcc.a
new file mode 100644
index 0000000..87e7176
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcov.a
new file mode 100644
index 0000000..ac26770
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/arm/v5te/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/crtbegin.o
new file mode 100644
index 0000000..089d69e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/crtend.o
new file mode 100644
index 0000000..5a37ac8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/crtfastmath.o
new file mode 100644
index 0000000..3297223
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/crti.o b/lib/gcc/arm-none-eabi/13.2.1/crti.o
new file mode 100644
index 0000000..40d6dd7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/crtn.o
new file mode 100644
index 0000000..eadbf7a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include-fixed/README b/lib/gcc/arm-none-eabi/13.2.1/include-fixed/README
new file mode 100644
index 0000000..7086a77
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include-fixed/README
@@ -0,0 +1,14 @@
+This README file is copied into the directory for GCC-only header files
+when fixincludes is run by the makefile for GCC.
+
+Many of the files in this directory were automatically edited from the
+standard system header files by the fixincludes process. They are
+system-specific, and will not work on any other kind of system. They
+are also not part of GCC. The reason we have to do this is because
+GCC requires ANSI C headers and many vendors supply ANSI-incompatible
+headers.
+
+Because this is an automated process, sometimes headers get "fixed"
+that do not, strictly speaking, need a fix. As long as nothing is broken
+by the process, it is just an unfortunate collateral inconvenience.
+We would like to rectify it, if it is not "too inconvenient".
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/ISO_Fortran_binding.h b/lib/gcc/arm-none-eabi/13.2.1/include/ISO_Fortran_binding.h
new file mode 100644
index 0000000..ca63f77
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/ISO_Fortran_binding.h
@@ -0,0 +1,321 @@
+/* Declarations for ISO Fortran binding.
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+ Contributed by Daniel Celis Garza <celisdanieljr@gmail.com>
+
+This file is part of the GNU Fortran runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+Libgfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef ISO_FORTRAN_BINDING_H
+#define ISO_FORTRAN_BINDING_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h> /* Standard ptrdiff_t tand size_t. */
+#include <stdint.h> /* Integer types. */
+
+/* Constants, defined as macros. */
+#define CFI_VERSION 1
+#define CFI_MAX_RANK 15
+
+/* Attributes. */
+#define CFI_attribute_pointer 0
+#define CFI_attribute_allocatable 1
+#define CFI_attribute_other 2
+
+/* Error codes.
+ Note that CFI_FAILURE and CFI_INVALID_STRIDE are specific to GCC
+ and not part of the Fortran standard */
+#define CFI_SUCCESS 0
+#define CFI_FAILURE 1
+#define CFI_ERROR_BASE_ADDR_NULL 2
+#define CFI_ERROR_BASE_ADDR_NOT_NULL 3
+#define CFI_INVALID_ELEM_LEN 4
+#define CFI_INVALID_RANK 5
+#define CFI_INVALID_TYPE 6
+#define CFI_INVALID_ATTRIBUTE 7
+#define CFI_INVALID_EXTENT 8
+#define CFI_INVALID_STRIDE 9
+#define CFI_INVALID_DESCRIPTOR 10
+#define CFI_ERROR_MEM_ALLOCATION 11
+#define CFI_ERROR_OUT_OF_BOUNDS 12
+
+/* CFI type definitions. */
+typedef ptrdiff_t CFI_index_t;
+typedef int8_t CFI_rank_t;
+typedef int8_t CFI_attribute_t;
+typedef int16_t CFI_type_t;
+
+/* CFI_dim_t. */
+typedef struct CFI_dim_t
+ {
+ CFI_index_t lower_bound;
+ CFI_index_t extent;
+ CFI_index_t sm;
+ }
+CFI_dim_t;
+
+/* CFI_cdesc_t, C descriptors are cast to this structure as follows:
+ CFI_CDESC_T(CFI_MAX_RANK) foo;
+ CFI_cdesc_t * bar = (CFI_cdesc_t *) &foo;
+ */
+typedef struct CFI_cdesc_t
+ {
+ void *base_addr;
+ size_t elem_len;
+ int version;
+ CFI_rank_t rank;
+ CFI_attribute_t attribute;
+ CFI_type_t type;
+ CFI_dim_t dim[];
+ }
+CFI_cdesc_t;
+
+/* CFI_CDESC_T with an explicit type. */
+#define CFI_CDESC_TYPE_T(r, base_type) \
+ struct { \
+ base_type *base_addr; \
+ size_t elem_len; \
+ int version; \
+ CFI_rank_t rank; \
+ CFI_attribute_t attribute; \
+ CFI_type_t type; \
+ CFI_dim_t dim[r]; \
+ }
+#define CFI_CDESC_T(r) CFI_CDESC_TYPE_T (r, void)
+
+/* CFI function declarations. */
+extern void *CFI_address (const CFI_cdesc_t *, const CFI_index_t []);
+extern int CFI_allocate (CFI_cdesc_t *, const CFI_index_t [], const CFI_index_t [],
+ size_t);
+extern int CFI_deallocate (CFI_cdesc_t *);
+extern int CFI_establish (CFI_cdesc_t *, void *, CFI_attribute_t, CFI_type_t, size_t,
+ CFI_rank_t, const CFI_index_t []);
+extern int CFI_is_contiguous (const CFI_cdesc_t *);
+extern int CFI_section (CFI_cdesc_t *, const CFI_cdesc_t *, const CFI_index_t [],
+ const CFI_index_t [], const CFI_index_t []);
+extern int CFI_select_part (CFI_cdesc_t *, const CFI_cdesc_t *, size_t, size_t);
+extern int CFI_setpointer (CFI_cdesc_t *, CFI_cdesc_t *, const CFI_index_t []);
+
+/* Types and kind numbers. Allows bitwise and to reveal the intrinsic type of a kind type. It also allows us to find the kind parameter by inverting the bit-shift equation.
+ CFI_type_kind_shift = 8
+ CFI_intrinsic_type = 0 0 0 0 0 0 0 0 0 0 1 0
+ CFI_type_kind = 0 0 0 0 0 0 0 0 1 0 0 0
+ CFI_type_example = CFI_intrinsic_type + (CFI_type_kind << CFI_type_kind_shift)
+ Defining the CFI_type_example.
+ CFI_type_kind = 0 0 0 0 0 0 0 0 1 0 0 0 << CFI_type_kind_shift
+ -------------------------
+ 1 0 0 0 0 0 0 0 0 0 0 0 +
+ CFI_intrinsic_type = 0 0 0 0 0 0 0 0 0 0 1 0
+ -------------------------
+ CFI_type_example = 1 0 0 0 0 0 0 0 0 0 1 0
+ Finding the intrinsic type with the logical mask.
+ CFI_type_example = 1 0 0 0 0 0 0 0 0 0 1 0 &
+ CFI_type_mask = 0 0 0 0 1 1 1 1 1 1 1 1
+ -------------------------
+ CFI_intrinsic_type = 0 0 0 0 0 0 0 0 0 0 1 0
+ Using the intrinsic type and kind shift to find the kind value of the type.
+ CFI_type_kind = (CFI_type_example - CFI_intrinsic_type) >> CFI_type_kind_shift
+ CFI_type_example = 1 0 0 0 0 0 0 0 0 0 1 0 -
+ CFI_intrinsic_type = 0 0 0 0 0 0 0 0 0 0 1 0
+ -------------------------
+ 1 0 0 0 0 0 0 0 0 0 0 0 >> CFI_type_kind_shift
+ -------------------------
+ CFI_type_kind = 0 0 0 0 0 0 0 0 1 0 0 0
+ */
+#define CFI_type_mask 0xFF
+#define CFI_type_kind_shift 8
+
+/* Intrinsic types. Their kind number defines their storage size. */
+#define CFI_type_Integer 1
+#define CFI_type_Logical 2
+#define CFI_type_Real 3
+#define CFI_type_Complex 4
+#define CFI_type_Character 5
+
+/* Types with no kind. */
+#define CFI_type_struct 6
+#define CFI_type_cptr 7
+#define CFI_type_cfunptr 8
+#define CFI_type_other -1
+
+/* Types with kind parameter.
+ The kind parameter represents the type's byte size. The exception is
+ real kind = 10, which has byte size of 128 bits but 80 bit precision.
+ Complex variables are double the byte size of their real counterparts.
+ The ucs4_char matches wchar_t if sizeof (wchar_t) == 4.
+ */
+#define CFI_type_char (CFI_type_Character + (1 << CFI_type_kind_shift))
+#define CFI_type_ucs4_char (CFI_type_Character + (4 << CFI_type_kind_shift))
+
+/* C-Fortran Interoperability types. */
+#define CFI_type_signed_char (CFI_type_Integer + (sizeof (char) << CFI_type_kind_shift))
+#define CFI_type_short (CFI_type_Integer + (sizeof (short) << CFI_type_kind_shift))
+#define CFI_type_int (CFI_type_Integer + (sizeof (int) << CFI_type_kind_shift))
+#define CFI_type_long (CFI_type_Integer + (sizeof (long) << CFI_type_kind_shift))
+#define CFI_type_long_long (CFI_type_Integer + (sizeof (long long) << CFI_type_kind_shift))
+#define CFI_type_size_t (CFI_type_Integer + (sizeof (size_t) << CFI_type_kind_shift))
+#define CFI_type_int8_t (CFI_type_Integer + (sizeof (int8_t) << CFI_type_kind_shift))
+#define CFI_type_int16_t (CFI_type_Integer + (sizeof (int16_t) << CFI_type_kind_shift))
+#define CFI_type_int32_t (CFI_type_Integer + (sizeof (int32_t) << CFI_type_kind_shift))
+#define CFI_type_int64_t (CFI_type_Integer + (sizeof (int64_t) << CFI_type_kind_shift))
+#define CFI_type_int_least8_t (CFI_type_Integer + (sizeof (int_least8_t) << CFI_type_kind_shift))
+#define CFI_type_int_least16_t (CFI_type_Integer + (sizeof (int_least16_t) << CFI_type_kind_shift))
+#define CFI_type_int_least32_t (CFI_type_Integer + (sizeof (int_least32_t) << CFI_type_kind_shift))
+#define CFI_type_int_least64_t (CFI_type_Integer + (sizeof (int_least64_t) << CFI_type_kind_shift))
+#define CFI_type_int_fast8_t (CFI_type_Integer + (sizeof (int_fast8_t) << CFI_type_kind_shift))
+#define CFI_type_int_fast16_t (CFI_type_Integer + (sizeof (int_fast16_t) << CFI_type_kind_shift))
+#define CFI_type_int_fast32_t (CFI_type_Integer + (sizeof (int_fast32_t) << CFI_type_kind_shift))
+#define CFI_type_int_fast64_t (CFI_type_Integer + (sizeof (int_fast64_t) << CFI_type_kind_shift))
+#define CFI_type_intmax_t (CFI_type_Integer + (sizeof (intmax_t) << CFI_type_kind_shift))
+#define CFI_type_intptr_t (CFI_type_Integer + (sizeof (intptr_t) << CFI_type_kind_shift))
+#define CFI_type_ptrdiff_t (CFI_type_Integer + (sizeof (ptrdiff_t) << CFI_type_kind_shift))
+#define CFI_type_Bool (CFI_type_Logical + (sizeof (_Bool) << CFI_type_kind_shift))
+#define CFI_type_float (CFI_type_Real + (sizeof (float) << CFI_type_kind_shift))
+#define CFI_type_double (CFI_type_Real + (sizeof (double) << CFI_type_kind_shift))
+#define CFI_type_float_Complex (CFI_type_Complex + (sizeof (float) << CFI_type_kind_shift))
+#define CFI_type_double_Complex (CFI_type_Complex + (sizeof (double) << CFI_type_kind_shift))
+
+/* If GCC supports int128_t on this target, it predefines
+ __SIZEOF_INT128__ to 16. */
+#if defined(__SIZEOF_INT128__)
+#if (__SIZEOF_INT128__ == 16)
+#define CFI_type_int128_t (CFI_type_Integer + (16 << CFI_type_kind_shift))
+#define CFI_type_int_least128_t (CFI_type_Integer + (16 << CFI_type_kind_shift))
+#define CFI_type_int_fast128_t (CFI_type_Integer + (16 << CFI_type_kind_shift))
+#else
+#error "Can't determine kind of int128_t"
+#endif
+#else
+#define CFI_type_int128_t -2
+#define CFI_type_int_least128_t -2
+#define CFI_type_int_fast128_t -2
+#endif
+
+/* The situation with long double support is more complicated; we need to
+ examine the type in more detail to figure out its kind.
+ GCC and some other compilers predefine the __LDBL* macros; otherwise
+ get the parameters we need from float.h. */
+
+#if (defined (__LDBL_MANT_DIG__) \
+ && defined (__LDBL_MIN_EXP__) \
+ && defined (__LDBL_MAX_EXP__) \
+ && defined (__DBL_MANT_DIG__) \
+ && defined (__DBL_MIN_EXP__) \
+ && defined (__DBL_MAX_EXP__))
+#define __CFI_LDBL_MANT_DIG__ __LDBL_MANT_DIG__
+#define __CFI_LDBL_MIN_EXP__ __LDBL_MIN_EXP__
+#define __CFI_LDBL_MAX_EXP__ __LDBL_MAX_EXP__
+#define __CFI_DBL_MANT_DIG__ __DBL_MANT_DIG__
+#define __CFI_DBL_MIN_EXP__ __DBL_MIN_EXP__
+#define __CFI_DBL_MAX_EXP__ __DBL_MAX_EXP__
+
+#else
+#include <float.h>
+
+#if (defined (LDBL_MANT_DIG) \
+ && defined (LDBL_MIN_EXP) \
+ && defined (LDBL_MAX_EXP) \
+ && defined (DBL_MANT_DIG) \
+ && defined (DBL_MIN_EXP) \
+ && defined (DBL_MAX_EXP))
+#define __CFI_LDBL_MANT_DIG__ LDBL_MANT_DIG
+#define __CFI_LDBL_MIN_EXP__ LDBL_MIN_EXP
+#define __CFI_LDBL_MAX_EXP__ LDBL_MAX_EXP
+#define __CFI_DBL_MANT_DIG__ DBL_MANT_DIG
+#define __CFI_DBL_MIN_EXP__ DBL_MIN_EXP
+#define __CFI_DBL_MAX_EXP__ DBL_MAX_EXP
+
+#else
+#define CFI_no_long_double 1
+
+#endif /* Definitions from float.h. */
+#endif /* Definitions from compiler builtins. */
+
+/* Can't determine anything about long double support? */
+#if (defined (CFI_no_long_double))
+#define CFI_type_long_double -2
+#define CFI_type_long_double_Complex -2
+
+/* Long double is the same kind as double. */
+#elif (__CFI_LDBL_MANT_DIG__ == __CFI_DBL_MANT_DIG__ \
+ && __CFI_LDBL_MIN_EXP__ == __CFI_DBL_MIN_EXP__ \
+ && __CFI_LDBL_MAX_EXP__ == __CFI_DBL_MAX_EXP__)
+#define CFI_type_long_double CFI_type_double
+#define CFI_type_long_double_Complex CFI_type_double_Complex
+
+/* This is the 80-bit encoding on x86; Fortran assigns it kind 10. */
+#elif ((__CFI_LDBL_MANT_DIG__ == 64 || __CFI_LDBL_MANT_DIG__ == 53) \
+ && __CFI_LDBL_MIN_EXP__ == -16381 \
+ && __CFI_LDBL_MAX_EXP__ == 16384)
+#define CFI_type_long_double (CFI_type_Real + (10 << CFI_type_kind_shift))
+#define CFI_type_long_double_Complex (CFI_type_Complex + (10 << CFI_type_kind_shift))
+
+/* This is the 96-bit encoding on m68k; Fortran assigns it kind 10. */
+#elif (__CFI_LDBL_MANT_DIG__ == 64 \
+ && __CFI_LDBL_MIN_EXP__ == -16382 \
+ && __CFI_LDBL_MAX_EXP__ == 16384)
+#define CFI_type_long_double (CFI_type_Real + (10 << CFI_type_kind_shift))
+#define CFI_type_long_double_Complex (CFI_type_Complex + (10 << CFI_type_kind_shift))
+
+/* This is the IEEE 128-bit encoding, same as _Float128. */
+#elif (__CFI_LDBL_MANT_DIG__ == 113 \
+ && __CFI_LDBL_MIN_EXP__ == -16381 \
+ && __CFI_LDBL_MAX_EXP__ == 16384)
+#define CFI_type_long_double (CFI_type_Real + (16 << CFI_type_kind_shift))
+#define CFI_type_long_double_Complex (CFI_type_Complex + (16 << CFI_type_kind_shift))
+
+/* This is the IBM128 encoding used on PowerPC; also assigned kind 16. */
+#elif (__CFI_LDBL_MANT_DIG__ == 106 \
+ && __CFI_LDBL_MIN_EXP__ == -968 \
+ && __CFI_LDBL_MAX_EXP__ == 1024)
+#define CFI_type_long_double (CFI_type_Real + (16 << CFI_type_kind_shift))
+#define CFI_type_long_double_Complex (CFI_type_Complex + (16 << CFI_type_kind_shift))
+#define CFI_no_float128 1
+
+/* It's a bug if we get here. If you've got a target that has some other
+ long double encoding, you need add something here for Fortran to
+ recognize it. */
+#else
+#error "Can't determine kind of long double"
+#endif
+
+/* Similarly for _Float128. This always refers to the IEEE encoding
+ and not some other 128-bit representation, so if we already used
+ kind 16 for a non-IEEE representation, this one must be unsupported
+ in Fortran even if it's available in C. */
+#if (!defined (CFI_no_float128) \
+ && defined(__FLT128_MANT_DIG__) && __FLT128_MANT_DIG__ == 113 \
+ && defined(__FLT128_MIN_EXP__) && __FLT128_MIN_EXP__ == -16381 \
+ && defined(__FLT128_MAX_EXP__) && __FLT128_MAX_EXP__ == 16384)
+#define CFI_type_float128 (CFI_type_Real + (16 << CFI_type_kind_shift))
+#define CFI_type_float128_Complex (CFI_type_Complex + (16 << CFI_type_kind_shift))
+#else
+#define CFI_type_float128 -2
+#define CFI_type_float128_Complex -2
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ISO_FORTRAN_BINDING_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_acle.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_acle.h
new file mode 100644
index 0000000..4a5a6a8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_acle.h
@@ -0,0 +1,772 @@
+/* ARM Non-NEON ACLE intrinsics include file.
+
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_ACLE_H
+#define _GCC_ARM_ACLE_H
+
+#include <stdint.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_cdp (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRd, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ __builtin_arm_cdp (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldc (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ __builtin_arm_ldc (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldcl (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ __builtin_arm_ldcl (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stc (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ __builtin_arm_stc (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stcl (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ __builtin_arm_stcl (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcr (const unsigned int __coproc, const unsigned int __opc1,
+ uint32_t __value, const unsigned int __CRn, const unsigned int __CRm,
+ const unsigned int __opc2)
+{
+ __builtin_arm_mcr (__coproc, __opc1, __value, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__arm_mrc (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRn, const unsigned int __CRm,
+ const unsigned int __opc2)
+{
+ return __builtin_arm_mrc (__coproc, __opc1, __CRn, __CRm, __opc2);
+}
+#if __ARM_ARCH >= 5
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_cdp2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRd, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ __builtin_arm_cdp2 (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldc2 (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ __builtin_arm_ldc2 (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_ldc2l (const unsigned int __coproc, const unsigned int __CRd,
+ const void * __p)
+{
+ __builtin_arm_ldc2l (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stc2 (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ __builtin_arm_stc2 (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_stc2l (const unsigned int __coproc, const unsigned int __CRd,
+ void * __p)
+{
+ __builtin_arm_stc2l (__coproc, __CRd, __p);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcr2 (const unsigned int __coproc, const unsigned int __opc1,
+ uint32_t __value, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ __builtin_arm_mcr2 (__coproc, __opc1, __value, __CRn, __CRm, __opc2);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__arm_mrc2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRn, const unsigned int __CRm,
+ const unsigned int __opc2)
+{
+ return __builtin_arm_mrc2 (__coproc, __opc1, __CRn, __CRm, __opc2);
+}
+
+#if __ARM_ARCH >= 6 || defined (__ARM_ARCH_5TE__)
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcrr (const unsigned int __coproc, const unsigned int __opc1,
+ uint64_t __value, const unsigned int __CRm)
+{
+ __builtin_arm_mcrr (__coproc, __opc1, __value, __CRm);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__arm_mrrc (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRm)
+{
+ return __builtin_arm_mrrc (__coproc, __opc1, __CRm);
+}
+
+#if __ARM_ARCH >= 6
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_mcrr2 (const unsigned int __coproc, const unsigned int __opc1,
+ uint64_t __value, const unsigned int __CRm)
+{
+ __builtin_arm_mcrr2 (__coproc, __opc1, __value, __CRm);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__arm_mrrc2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRm)
+{
+ return __builtin_arm_mrrc2 (__coproc, __opc1, __CRm);
+}
+#endif /* __ARM_ARCH >= 6. */
+#endif /* __ARM_ARCH >= 6 || defined (__ARM_ARCH_5TE__). */
+#endif /* __ARM_ARCH >= 5. */
+#endif /* (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4. */
+
+#ifdef __ARM_FEATURE_SIMD32
+typedef int32_t int16x2_t;
+typedef uint32_t uint16x2_t;
+typedef int32_t int8x4_t;
+typedef uint32_t uint8x4_t;
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__sxtab16 (int16x2_t __a, int8x4_t __b)
+{
+ return __builtin_arm_sxtab16 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__sxtb16 (int8x4_t __a)
+{
+ return __builtin_arm_sxtb16 (__a);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uxtab16 (uint16x2_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_uxtab16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uxtb16 (uint8x4_t __a)
+{
+ return __builtin_arm_uxtb16 (__a);
+}
+
+__extension__ extern __inline int8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qadd8 (int8x4_t __a, int8x4_t __b)
+{
+ return __builtin_arm_qadd8 (__a, __b);
+}
+
+__extension__ extern __inline int8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qsub8 (int8x4_t __a, int8x4_t __b)
+{
+ return __builtin_arm_qsub8 (__a, __b);
+}
+
+__extension__ extern __inline int8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__shadd8 (int8x4_t __a, int8x4_t __b)
+{
+ return __builtin_arm_shadd8 (__a, __b);
+}
+
+__extension__ extern __inline int8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__shsub8 (int8x4_t __a, int8x4_t __b)
+{
+ return __builtin_arm_shsub8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uhadd8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_uhadd8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uhsub8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_uhsub8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uqadd8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_uqadd8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uqsub8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_uqsub8 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qadd16 (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_qadd16 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qasx (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_qasx (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qsax (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_qsax (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qsub16 (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_qsub16 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__shadd16 (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_shadd16 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__shasx (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_shasx (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__shsax (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_shsax (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__shsub16 (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_shsub16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uhadd16 (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uhadd16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uhasx (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uhasx (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uhsax (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uhsax (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uhsub16 (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uhsub16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uqadd16 (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uqadd16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uqasx (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uqasx (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uqsax (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uqsax (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uqsub16 (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uqsub16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smusd (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_smusd (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smusdx (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_smusdx (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__usad8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_usad8 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__usada8 (uint8x4_t __a, uint8x4_t __b, uint32_t __c)
+{
+ return __builtin_arm_usada8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlald (int16x2_t __a, int16x2_t __b, int64_t __c)
+{
+ return __builtin_arm_smlald (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlaldx (int16x2_t __a, int16x2_t __b, int64_t __c)
+{
+ return __builtin_arm_smlaldx (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlsld (int16x2_t __a, int16x2_t __b, int64_t __c)
+{
+ return __builtin_arm_smlsld (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlsldx (int16x2_t __a, int16x2_t __b, int64_t __c)
+{
+ return __builtin_arm_smlsldx (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__sel (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_sel (__a, __b);
+}
+
+__extension__ extern __inline int8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__sadd8 (int8x4_t __a, int8x4_t __b)
+{
+ return __builtin_arm_sadd8 (__a, __b);
+}
+
+__extension__ extern __inline int8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__ssub8 (int8x4_t __a, int8x4_t __b)
+{
+ return __builtin_arm_ssub8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uadd8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_uadd8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__usub8 (uint8x4_t __a, uint8x4_t __b)
+{
+ return __builtin_arm_usub8 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__sadd16 (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_sadd16 (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__sasx (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_sasx (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__ssax (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_ssax (__a, __b);
+}
+
+__extension__ extern __inline int16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__ssub16 (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_ssub16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uadd16 (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uadd16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__uasx (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_uasx (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__usax (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_usax (__a, __b);
+}
+
+__extension__ extern __inline uint16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__usub16 (uint16x2_t __a, uint16x2_t __b)
+{
+ return __builtin_arm_usub16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlad (int16x2_t __a, int16x2_t __b, int32_t __c)
+{
+ return __builtin_arm_smlad (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smladx (int16x2_t __a, int16x2_t __b, int32_t __c)
+{
+ return __builtin_arm_smladx (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlsd (int16x2_t __a, int16x2_t __b, int32_t __c)
+{
+ return __builtin_arm_smlsd (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlsdx (int16x2_t __a, int16x2_t __b, int32_t __c)
+{
+ return __builtin_arm_smlsdx (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smuad (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_smuad (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smuadx (int16x2_t __a, int16x2_t __b)
+{
+ return __builtin_arm_smuadx (__a, __b);
+}
+
+#define __ssat16(__a, __sat) \
+ __extension__ \
+ ({ \
+ int16x2_t __arg = (__a); \
+ __builtin_sat_imm_check (__sat, 1, 16); \
+ int16x2_t __res = __builtin_arm_ssat16 (__arg, __sat); \
+ __res; \
+ })
+
+#define __usat16(__a, __sat) \
+ __extension__ \
+ ({ \
+ int16x2_t __arg = (__a); \
+ __builtin_sat_imm_check (__sat, 0, 15); \
+ int16x2_t __res = __builtin_arm_usat16 (__arg, __sat); \
+ __res; \
+ })
+
+#endif
+
+#ifdef __ARM_FEATURE_SAT
+
+#define __ssat(__a, __sat) \
+ __extension__ \
+ ({ \
+ int32_t __arg = (__a); \
+ __builtin_sat_imm_check (__sat, 1, 32); \
+ int32_t __res = __builtin_arm_ssat (__arg, __sat); \
+ __res; \
+ })
+
+#define __usat(__a, __sat) \
+ __extension__ \
+ ({ \
+ int32_t __arg = (__a); \
+ __builtin_sat_imm_check (__sat, 0, 31); \
+ uint32_t __res = __builtin_arm_usat (__arg, __sat); \
+ __res; \
+ })
+
+#endif
+
+#ifdef __ARM_FEATURE_QBIT
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__ignore_saturation (void)
+{
+ /* ACLE designates this intrinsic as a hint.
+ Implement as a nop for now. */
+}
+
+/* These are defined as macros because the implementation of the builtins
+ requires easy access to the current function so wrapping it in an
+ always_inline function complicates things. */
+
+#define __saturation_occurred __builtin_arm_saturation_occurred
+
+#define __set_saturation_occurred(__a) \
+ __extension__ \
+ ({ \
+ int __arg = (__a); \
+ __builtin_arm_set_saturation (__arg); \
+ })
+#endif
+
+#ifdef __ARM_FEATURE_DSP
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qadd (int32_t __a, int32_t __b)
+{
+ return __builtin_arm_qadd (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qsub (int32_t __a, int32_t __b)
+{
+ return __builtin_arm_qsub (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__qdbl (int32_t __x)
+{
+ return __qadd (__x, __x);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlabb (int32_t __a, int32_t __b, int32_t __c)
+{
+ return __builtin_arm_smlabb (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlatb (int32_t __a, int32_t __b, int32_t __c)
+{
+ return __builtin_arm_smlatb (__a, __b, __c);
+}
+
+/* smlatb is equivalent to smlabt with the two multiplication operands
+ swapped around. */
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlabt (int32_t __a, int32_t __b, int32_t __c)
+{
+ return __smlatb (__b, __a, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlatt (int32_t __a, int32_t __b, int32_t __c)
+{
+ return __builtin_arm_smlatt (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlawb (int32_t __a, int32_t __b, int32_t __c)
+{
+ return __builtin_arm_smlawb (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__smlawt (int32_t __a, int32_t __b, int32_t __c)
+{
+ return __builtin_arm_smlawt (__a, __b, __c);
+}
+#endif
+
+#pragma GCC push_options
+#ifdef __ARM_FEATURE_CRC32
+#ifdef __ARM_FP
+#pragma GCC target ("arch=armv8-a+crc+simd")
+#else
+#pragma GCC target ("arch=armv8-a+crc")
+#endif
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32b (uint32_t __a, uint8_t __b)
+{
+ return __builtin_arm_crc32b (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32h (uint32_t __a, uint16_t __b)
+{
+ return __builtin_arm_crc32h (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32w (uint32_t __a, uint32_t __b)
+{
+ return __builtin_arm_crc32w (__a, __b);
+}
+
+#ifdef __ARM_32BIT_STATE
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32d (uint32_t __a, uint64_t __b)
+{
+ uint32_t __d;
+
+ __d = __crc32w (__crc32w (__a, __b & 0xffffffffULL), __b >> 32);
+ return __d;
+}
+#endif
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cb (uint32_t __a, uint8_t __b)
+{
+ return __builtin_arm_crc32cb (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32ch (uint32_t __a, uint16_t __b)
+{
+ return __builtin_arm_crc32ch (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cw (uint32_t __a, uint32_t __b)
+{
+ return __builtin_arm_crc32cw (__a, __b);
+}
+
+#ifdef __ARM_32BIT_STATE
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cd (uint32_t __a, uint64_t __b)
+{
+ uint32_t __d;
+
+ __d = __crc32cw (__crc32cw (__a, __b & 0xffffffffULL), __b >> 32);
+ return __d;
+}
+#endif
+
+#endif /* __ARM_FEATURE_CRC32 */
+#pragma GCC pop_options
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_bf16.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_bf16.h
new file mode 100644
index 0000000..15e45bc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_bf16.h
@@ -0,0 +1,55 @@
+/* Arm BF16 intrinsics include file.
+
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef _GCC_ARM_BF16_H
+#define _GCC_ARM_BF16_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef __bf16 bfloat16_t;
+typedef float float32_t;
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtah_f32_bf16 (bfloat16_t __a)
+{
+ return __builtin_neon_vbfcvtbf (__a);
+}
+
+__extension__ extern __inline bfloat16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvth_bf16_f32 (float32_t __a)
+{
+ return __builtin_neon_vbfcvtsf (__a);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_cde.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_cde.h
new file mode 100644
index 0000000..34a149a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_cde.h
@@ -0,0 +1,176 @@
+/* Arm Custom Datapath Extension (CDE) intrinsics include file.
+
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Arm Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_CDE_H
+#define _GCC_ARM_CDE_H 1
+
+#include <stdint.h>
+
+#if defined (__ARM_FEATURE_CDE)
+
+#define __arm_cx1(coproc, imm) \
+ __builtin_arm_cx1si(coproc, imm)
+
+#define __arm_cx1a(coproc, acc, imm) \
+ __builtin_arm_cx1asi(coproc, acc, imm)
+
+#define __arm_cx2(coproc, n, imm) \
+ __builtin_arm_cx2si(coproc, n, imm)
+
+#define __arm_cx2a(coproc, acc, n, imm) \
+ __builtin_arm_cx2asi(coproc, acc, n, imm)
+
+#define __arm_cx3(coproc, n, m, imm) \
+ __builtin_arm_cx3si(coproc, n, m, imm)
+
+#define __arm_cx3a(coproc, acc, n, m, imm) \
+ __builtin_arm_cx3asi(coproc, acc, n, m, imm)
+
+#define __arm_cx1d(coproc, imm) \
+ __builtin_arm_cx1di(coproc, imm)
+
+#define __arm_cx1da(coproc, acc, imm) \
+ __builtin_arm_cx1adi(coproc, acc, imm)
+
+#define __arm_cx2d(coproc, n, imm) \
+ __builtin_arm_cx2di(coproc, n, imm)
+
+#define __arm_cx2da(coproc, acc, n, imm) \
+ __builtin_arm_cx2adi(coproc, acc, n, imm)
+
+#define __arm_cx3d(coproc, n, m, imm) \
+ __builtin_arm_cx3di(coproc, n, m, imm)
+
+#define __arm_cx3da(coproc, acc, n, m, imm) \
+ __builtin_arm_cx3adi(coproc, acc, n, m, imm)
+
+#if defined (__ARM_FP) || defined (__ARM_FEATURE_MVE)
+
+/* CDE builtins using FPU/MVE registers. */
+
+/* uint32_t
+ __arm_vcx1_u32(int coproc, uint32_t imm); */
+#define __arm_vcx1_u32(coproc, imm) \
+ __builtin_arm_vcx1si(coproc, imm)
+
+/* uint32_t
+ __arm_vcx1a_u32(int coproc, uint32_t acc, uint32_t imm); */
+#define __arm_vcx1a_u32(coproc, acc, imm) \
+ __builtin_arm_vcx1asi(coproc, acc, imm)
+
+/* uint32_t
+ __arm_vcx2_u32(int coproc, uint32_t n, uint32_t imm); */
+#define __arm_vcx2_u32(coproc, n, imm) \
+ __builtin_arm_vcx2si(coproc, n, imm)
+
+/* uint32_t
+ __arm_vcx2a_u32(int coproc, uint32_t acc, uint32_t n, uint32_t imm); */
+#define __arm_vcx2a_u32(coproc, acc, n, imm) \
+ __builtin_arm_vcx2asi(coproc, acc, n, imm)
+
+/* uint32_t
+ __arm_vcx3_u32(int coproc, uint32_t n, uint32_t m, uint32_t imm); */
+#define __arm_vcx3_u32(coproc, n, m, imm) \
+ __builtin_arm_vcx3si(coproc, n, m, imm)
+
+/* uint32_t
+ __arm_vcx3a_u32(int coproc, uint32_t acc, uint32_t n, uint32_t m,
+ uint32_t imm); */
+#define __arm_vcx3a_u32(coproc, acc, n, m, imm) \
+ __builtin_arm_vcx3asi(coproc, acc, n, m, imm)
+
+/* uint64_t
+ __arm_vcx1d_u64(int coproc, uint32_t imm); */
+#define __arm_vcx1d_u64(coproc, imm) \
+ __builtin_arm_vcx1di(coproc, imm)
+
+/* uint64_t
+ __arm_vcx1da_u64(int coproc, uint64_t acc, uint32_t imm); */
+#define __arm_vcx1da_u64(coproc, acc, imm) \
+ __builtin_arm_vcx1adi(coproc, acc, imm)
+
+/* uint64_t
+ __arm_vcx2d_u64(int coproc, uint64_t m, uint32_t imm); */
+#define __arm_vcx2d_u64(coproc, m, imm) \
+ __builtin_arm_vcx2di(coproc, m, imm)
+
+/* uint64_t
+ __arm_vcx2da_u64(int coproc, uint64_t acc, uint64_t m, uint32_t imm); */
+#define __arm_vcx2da_u64(coproc, acc, m, imm) \
+ __builtin_arm_vcx2adi(coproc, acc, m, imm)
+
+/* uint64_t
+ __arm_vcx3d_u64(int coproc, uint64_t n, uint64_t m, uint32_t imm); */
+#define __arm_vcx3d_u64(coproc, n, m, imm) \
+ __builtin_arm_vcx3di(coproc, n, m, imm)
+
+/* uint64_t
+ __arm_vcx3da_u64(int coproc, uint64_t acc, uint64_t n, uint64_t m,
+ uint32_t imm); */
+#define __arm_vcx3da_u64(coproc, acc, n, m, imm) \
+ __builtin_arm_vcx3adi(coproc, acc, n, m, imm)
+
+#endif /* __ARM_FP || __ARM_FEATURE_MVE. */
+#endif /* __ARM_FEATURE_CDE. */
+
+#if __ARM_FEATURE_MVE
+#include "arm_mve_types.h"
+
+#define __arm_vcx1q_u8(coproc, imm) \
+ (uint8x16_t)__builtin_arm_vcx1qv16qi(coproc, imm)
+#define __arm_vcx1qa(coproc, acc, imm) \
+ __builtin_arm_vcx1qav16qi(coproc, acc, imm)
+#define __arm_vcx2q(coproc, n, imm) \
+ __builtin_arm_vcx2qv16qi(coproc, n, imm)
+#define __arm_vcx2q_u8(coproc, n, imm) \
+ (uint8x16_t)__builtin_arm_vcx2qv16qi(coproc, n, imm)
+#define __arm_vcx2qa(coproc, acc, n, imm) \
+ __builtin_arm_vcx2qav16qi(coproc, acc, n, imm)
+#define __arm_vcx3q(coproc, n, m, imm) \
+ __builtin_arm_vcx3qv16qi(coproc, n, m, imm)
+#define __arm_vcx3q_u8(coproc, n, m, imm) \
+ (uint8x16_t)__builtin_arm_vcx3qv16qi(coproc, n, m, imm)
+#define __arm_vcx3qa(coproc, acc, n, m, imm) \
+ __builtin_arm_vcx3qav16qi(coproc, acc, n, m, imm)
+
+#define __arm_vcx1q_m(coproc, inactive, imm, pred) \
+ __builtin_arm_vcx1q_p_v16qi(coproc, inactive, imm, pred)
+#define __arm_vcx1qa_m(coproc, acc, imm, pred) \
+ __builtin_arm_vcx1qa_p_v16qi(coproc, acc, imm, pred)
+
+#define __arm_vcx2q_m(coproc, inactive, n, imm, pred) \
+ __builtin_arm_vcx2q_p_v16qi(coproc, inactive, n, imm, pred)
+#define __arm_vcx2qa_m(coproc, acc, n, imm, pred) \
+ __builtin_arm_vcx2qa_p_v16qi(coproc, acc, n, imm, pred)
+
+#define __arm_vcx3q_m(coproc, inactive, n, m, imm, pred) \
+ __builtin_arm_vcx3q_p_v16qi(coproc, inactive, n, m, imm, pred)
+#define __arm_vcx3qa_m(coproc, acc, n, m, imm, pred) \
+ __builtin_arm_vcx3qa_p_v16qi(coproc, acc, n, m, imm, pred)
+
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_cmse.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_cmse.h
new file mode 100644
index 0000000..3534dcb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_cmse.h
@@ -0,0 +1,200 @@
+/* ARMv8-M Secure Extensions intrinsics include file.
+
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef _GCC_ARM_CMSE_H
+#define _GCC_ARM_CMSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if __ARM_FEATURE_CMSE & 1
+
+#include <stddef.h>
+
+#ifdef __ARM_BIG_ENDIAN
+
+typedef union {
+ struct cmse_address_info {
+#if __ARM_FEATURE_CMSE & 2
+ unsigned idau_region:8;
+ unsigned idau_region_valid:1;
+ unsigned secure:1;
+ unsigned nonsecure_readwrite_ok:1;
+ unsigned nonsecure_read_ok:1;
+#else
+ unsigned :12;
+#endif
+ unsigned readwrite_ok:1;
+ unsigned read_ok:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region_valid:1;
+#else
+ unsigned :1;
+#endif
+ unsigned mpu_region_valid:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region:8;
+#else
+ unsigned :8;
+#endif
+ unsigned mpu_region:8;
+ } flags;
+ unsigned value;
+} cmse_address_info_t;
+
+#else
+
+typedef union {
+ struct cmse_address_info {
+ unsigned mpu_region:8;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region:8;
+#else
+ unsigned :8;
+#endif
+ unsigned mpu_region_valid:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned sau_region_valid:1;
+#else
+ unsigned :1;
+#endif
+ unsigned read_ok:1;
+ unsigned readwrite_ok:1;
+#if __ARM_FEATURE_CMSE & 2
+ unsigned nonsecure_read_ok:1;
+ unsigned nonsecure_readwrite_ok:1;
+ unsigned secure:1;
+ unsigned idau_region_valid:1;
+ unsigned idau_region:8;
+#else
+ unsigned :12;
+#endif
+ } flags;
+ unsigned value;
+} cmse_address_info_t;
+
+#endif /* __ARM_BIG_ENDIAN */
+
+#define cmse_TT_fptr(p) (__cmse_TT_fptr ((__cmse_fptr)(p)))
+
+typedef void (*__cmse_fptr)(void);
+
+#define __CMSE_TT_ASM(flags) \
+{ \
+ cmse_address_info_t __result; \
+ __asm__ ("tt" # flags " %0,%1" \
+ : "=r"(__result) \
+ : "r"(__p) \
+ : "memory"); \
+ return __result; \
+}
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+__cmse_TT_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM ()
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+cmse_TT (void *__p)
+__CMSE_TT_ASM ()
+
+#define cmse_TTT_fptr(p) (__cmse_TTT_fptr ((__cmse_fptr)(p)))
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+__cmse_TTT_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM (t)
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+cmse_TTT (void *__p)
+__CMSE_TT_ASM (t)
+
+#if __ARM_FEATURE_CMSE & 2
+
+#define cmse_TTA_fptr(p) (__cmse_TTA_fptr ((__cmse_fptr)(p)))
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+__cmse_TTA_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM (a)
+
+__extension__ static __inline __attribute__ ((__always_inline__))
+cmse_address_info_t
+cmse_TTA (void *__p)
+__CMSE_TT_ASM (a)
+
+#define cmse_TTAT_fptr(p) (__cmse_TTAT_fptr ((__cmse_fptr)(p)))
+
+__extension__ static __inline cmse_address_info_t
+__attribute__ ((__always_inline__))
+__cmse_TTAT_fptr (__cmse_fptr __p)
+__CMSE_TT_ASM (at)
+
+__extension__ static __inline cmse_address_info_t
+__attribute__ ((__always_inline__))
+cmse_TTAT (void *__p)
+__CMSE_TT_ASM (at)
+
+/* FIXME: diagnose use outside cmse_nonsecure_entry functions. */
+__extension__ static __inline int __attribute__ ((__always_inline__))
+__attribute__ ((warn_unused_result))
+cmse_nonsecure_caller (void)
+{
+ return __builtin_arm_cmse_nonsecure_caller ();
+}
+
+#define CMSE_AU_NONSECURE 2
+#define CMSE_MPU_NONSECURE 16
+#define CMSE_NONSECURE 18
+
+#define cmse_nsfptr_create(p) ((__typeof__ ((p))) ((__INTPTR_TYPE__) (p) & ~1))
+
+#define cmse_is_nsfptr(p) (!((__INTPTR_TYPE__) (p) & 1))
+
+#endif /* __ARM_FEATURE_CMSE & 2 */
+
+#define CMSE_MPU_UNPRIV 4
+#define CMSE_MPU_READWRITE 1
+#define CMSE_MPU_READ 8
+
+__extension__ void *
+__attribute__ ((warn_unused_result))
+cmse_check_address_range (void *, size_t, int);
+
+#define cmse_check_pointed_object(p, f) \
+ ((__typeof__ ((p))) cmse_check_address_range ((p), sizeof (*(p)), (f)))
+
+#endif /* __ARM_FEATURE_CMSE & 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _GCC_ARM_CMSE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_fp16.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_fp16.h
new file mode 100644
index 0000000..1c7c16f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_fp16.h
@@ -0,0 +1,255 @@
+/* ARM FP16 intrinsics include file.
+
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_FP16_H
+#define _GCC_ARM_FP16_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* Intrinsics for FP16 instructions. */
+#pragma GCC push_options
+#pragma GCC target ("fpu=fp-armv8")
+
+#if defined (__ARM_FEATURE_FP16_SCALAR_ARITHMETIC)
+
+typedef __fp16 float16_t;
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vabsh_f16 (float16_t __a)
+{
+ return __builtin_neon_vabshf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vaddh_f16 (float16_t __a, float16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtah_s32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtahssi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtah_u32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtahusi (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_s32 (int32_t __a)
+{
+ return __builtin_neon_vcvthshf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_u32 (uint32_t __a)
+{
+ return __builtin_neon_vcvthuhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_s32 (int32_t __a, const int __b)
+{
+ return __builtin_neon_vcvths_nhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_u32 (uint32_t __a, const int __b)
+{
+ return __builtin_neon_vcvthu_nhf ((int32_t)__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvth_n_s32_f16 (float16_t __a, const int __b)
+{
+ return __builtin_neon_vcvths_nsi (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvth_n_u32_f16 (float16_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vcvthu_nsi (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvth_s32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvthssi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvth_u32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvthusi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtmh_s32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtmhssi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtmh_u32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtmhusi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtnh_s32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtnhssi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtnh_u32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtnhusi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtph_s32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtphssi (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtph_u32_f16 (float16_t __a)
+{
+ return __builtin_neon_vcvtphusi (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vdivh_f16 (float16_t __a, float16_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vfmah_f16 (float16_t __a, float16_t __b, float16_t __c)
+{
+ return __builtin_neon_vfmahf (__a, __b, __c);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vfmsh_f16 (float16_t __a, float16_t __b, float16_t __c)
+{
+ return __builtin_neon_vfmshf (__a, __b, __c);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vmaxnmh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_neon_vmaxnmhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vminnmh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_neon_vminnmhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vmulh_f16 (float16_t __a, float16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vnegh_f16 (float16_t __a)
+{
+ return - __a;
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndah_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndahf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndh_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndih_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndihf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndmh_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndmhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndnh_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndnhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndph_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndphf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndxh_f16 (float16_t __a)
+{
+ return __builtin_neon_vrndxhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vsqrth_f16 (float16_t __a)
+{
+ return __builtin_neon_vsqrthf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vsubh_f16 (float16_t __a, float16_t __b)
+{
+ return __a - __b;
+}
+
+#endif /* __ARM_FEATURE_FP16_SCALAR_ARITHMETIC */
+#pragma GCC pop_options
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_mve.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_mve.h
new file mode 100644
index 0000000..71ea3ee
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_mve.h
@@ -0,0 +1,42004 @@
+/* Arm MVE intrinsics include file.
+
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by Arm.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_MVE_H
+#define _GCC_ARM_MVE_H
+
+#if __ARM_BIG_ENDIAN
+#error "MVE intrinsics are not supported in Big-Endian mode."
+#elif !__ARM_FEATURE_MVE
+#error "MVE feature not supported"
+#else
+
+#include <stdint.h>
+#ifndef __cplusplus
+#include <stdbool.h>
+#endif
+#include "arm_mve_types.h"
+
+#ifndef __ARM_MVE_PRESERVE_USER_NAMESPACE
+#define vst4q(__addr, __value) __arm_vst4q(__addr, __value)
+#define vdupq_n(__a) __arm_vdupq_n(__a)
+#define vabsq(__a) __arm_vabsq(__a)
+#define vclsq(__a) __arm_vclsq(__a)
+#define vclzq(__a) __arm_vclzq(__a)
+#define vnegq(__a) __arm_vnegq(__a)
+#define vaddlvq(__a) __arm_vaddlvq(__a)
+#define vaddvq(__a) __arm_vaddvq(__a)
+#define vmovlbq(__a) __arm_vmovlbq(__a)
+#define vmovltq(__a) __arm_vmovltq(__a)
+#define vmvnq(__a) __arm_vmvnq(__a)
+#define vrev16q(__a) __arm_vrev16q(__a)
+#define vrev32q(__a) __arm_vrev32q(__a)
+#define vrev64q(__a) __arm_vrev64q(__a)
+#define vqabsq(__a) __arm_vqabsq(__a)
+#define vqnegq(__a) __arm_vqnegq(__a)
+#define vshrq(__a, __imm) __arm_vshrq(__a, __imm)
+#define vaddlvq_p(__a, __p) __arm_vaddlvq_p(__a, __p)
+#define vcmpneq(__a, __b) __arm_vcmpneq(__a, __b)
+#define vshlq(__a, __b) __arm_vshlq(__a, __b)
+#define vsubq(__a, __b) __arm_vsubq(__a, __b)
+#define vrmulhq(__a, __b) __arm_vrmulhq(__a, __b)
+#define vrhaddq(__a, __b) __arm_vrhaddq(__a, __b)
+#define vqsubq(__a, __b) __arm_vqsubq(__a, __b)
+#define vqaddq(__a, __b) __arm_vqaddq(__a, __b)
+#define vorrq(__a, __b) __arm_vorrq(__a, __b)
+#define vornq(__a, __b) __arm_vornq(__a, __b)
+#define vmulq(__a, __b) __arm_vmulq(__a, __b)
+#define vmulltq_int(__a, __b) __arm_vmulltq_int(__a, __b)
+#define vmullbq_int(__a, __b) __arm_vmullbq_int(__a, __b)
+#define vmulhq(__a, __b) __arm_vmulhq(__a, __b)
+#define vmladavq(__a, __b) __arm_vmladavq(__a, __b)
+#define vminvq(__a, __b) __arm_vminvq(__a, __b)
+#define vminq(__a, __b) __arm_vminq(__a, __b)
+#define vmaxvq(__a, __b) __arm_vmaxvq(__a, __b)
+#define vmaxq(__a, __b) __arm_vmaxq(__a, __b)
+#define vhsubq(__a, __b) __arm_vhsubq(__a, __b)
+#define vhaddq(__a, __b) __arm_vhaddq(__a, __b)
+#define veorq(__a, __b) __arm_veorq(__a, __b)
+#define vcmphiq(__a, __b) __arm_vcmphiq(__a, __b)
+#define vcmpeqq(__a, __b) __arm_vcmpeqq(__a, __b)
+#define vcmpcsq(__a, __b) __arm_vcmpcsq(__a, __b)
+#define vcaddq_rot90(__a, __b) __arm_vcaddq_rot90(__a, __b)
+#define vcaddq_rot270(__a, __b) __arm_vcaddq_rot270(__a, __b)
+#define vbicq(__a, __b) __arm_vbicq(__a, __b)
+#define vandq(__a, __b) __arm_vandq(__a, __b)
+#define vaddvq_p(__a, __p) __arm_vaddvq_p(__a, __p)
+#define vaddvaq(__a, __b) __arm_vaddvaq(__a, __b)
+#define vaddq(__a, __b) __arm_vaddq(__a, __b)
+#define vabdq(__a, __b) __arm_vabdq(__a, __b)
+#define vshlq_r(__a, __b) __arm_vshlq_r(__a, __b)
+#define vrshlq(__a, __b) __arm_vrshlq(__a, __b)
+#define vqshlq(__a, __b) __arm_vqshlq(__a, __b)
+#define vqshlq_r(__a, __b) __arm_vqshlq_r(__a, __b)
+#define vqrshlq(__a, __b) __arm_vqrshlq(__a, __b)
+#define vminavq(__a, __b) __arm_vminavq(__a, __b)
+#define vminaq(__a, __b) __arm_vminaq(__a, __b)
+#define vmaxavq(__a, __b) __arm_vmaxavq(__a, __b)
+#define vmaxaq(__a, __b) __arm_vmaxaq(__a, __b)
+#define vbrsrq(__a, __b) __arm_vbrsrq(__a, __b)
+#define vshlq_n(__a, __imm) __arm_vshlq_n(__a, __imm)
+#define vrshrq(__a, __imm) __arm_vrshrq(__a, __imm)
+#define vqshlq_n(__a, __imm) __arm_vqshlq_n(__a, __imm)
+#define vcmpltq(__a, __b) __arm_vcmpltq(__a, __b)
+#define vcmpleq(__a, __b) __arm_vcmpleq(__a, __b)
+#define vcmpgtq(__a, __b) __arm_vcmpgtq(__a, __b)
+#define vcmpgeq(__a, __b) __arm_vcmpgeq(__a, __b)
+#define vqshluq(__a, __imm) __arm_vqshluq(__a, __imm)
+#define vqrdmulhq(__a, __b) __arm_vqrdmulhq(__a, __b)
+#define vqdmulhq(__a, __b) __arm_vqdmulhq(__a, __b)
+#define vmlsdavxq(__a, __b) __arm_vmlsdavxq(__a, __b)
+#define vmlsdavq(__a, __b) __arm_vmlsdavq(__a, __b)
+#define vmladavxq(__a, __b) __arm_vmladavxq(__a, __b)
+#define vhcaddq_rot90(__a, __b) __arm_vhcaddq_rot90(__a, __b)
+#define vhcaddq_rot270(__a, __b) __arm_vhcaddq_rot270(__a, __b)
+#define vqmovntq(__a, __b) __arm_vqmovntq(__a, __b)
+#define vqmovnbq(__a, __b) __arm_vqmovnbq(__a, __b)
+#define vmulltq_poly(__a, __b) __arm_vmulltq_poly(__a, __b)
+#define vmullbq_poly(__a, __b) __arm_vmullbq_poly(__a, __b)
+#define vmovntq(__a, __b) __arm_vmovntq(__a, __b)
+#define vmovnbq(__a, __b) __arm_vmovnbq(__a, __b)
+#define vmlaldavq(__a, __b) __arm_vmlaldavq(__a, __b)
+#define vqmovuntq(__a, __b) __arm_vqmovuntq(__a, __b)
+#define vqmovunbq(__a, __b) __arm_vqmovunbq(__a, __b)
+#define vshlltq(__a, __imm) __arm_vshlltq(__a, __imm)
+#define vshllbq(__a, __imm) __arm_vshllbq(__a, __imm)
+#define vqdmulltq(__a, __b) __arm_vqdmulltq(__a, __b)
+#define vqdmullbq(__a, __b) __arm_vqdmullbq(__a, __b)
+#define vmlsldavxq(__a, __b) __arm_vmlsldavxq(__a, __b)
+#define vmlsldavq(__a, __b) __arm_vmlsldavq(__a, __b)
+#define vmlaldavxq(__a, __b) __arm_vmlaldavxq(__a, __b)
+#define vrmlaldavhq(__a, __b) __arm_vrmlaldavhq(__a, __b)
+#define vaddlvaq(__a, __b) __arm_vaddlvaq(__a, __b)
+#define vrmlsldavhxq(__a, __b) __arm_vrmlsldavhxq(__a, __b)
+#define vrmlsldavhq(__a, __b) __arm_vrmlsldavhq(__a, __b)
+#define vrmlaldavhxq(__a, __b) __arm_vrmlaldavhxq(__a, __b)
+#define vabavq(__a, __b, __c) __arm_vabavq(__a, __b, __c)
+#define vbicq_m_n(__a, __imm, __p) __arm_vbicq_m_n(__a, __imm, __p)
+#define vqrshrnbq(__a, __b, __imm) __arm_vqrshrnbq(__a, __b, __imm)
+#define vqrshrunbq(__a, __b, __imm) __arm_vqrshrunbq(__a, __b, __imm)
+#define vrmlaldavhaq(__a, __b, __c) __arm_vrmlaldavhaq(__a, __b, __c)
+#define vshlcq(__a, __b, __imm) __arm_vshlcq(__a, __b, __imm)
+#define vpselq(__a, __b, __p) __arm_vpselq(__a, __b, __p)
+#define vrev64q_m(__inactive, __a, __p) __arm_vrev64q_m(__inactive, __a, __p)
+#define vqrdmlashq(__a, __b, __c) __arm_vqrdmlashq(__a, __b, __c)
+#define vqrdmlahq(__a, __b, __c) __arm_vqrdmlahq(__a, __b, __c)
+#define vqdmlashq(__a, __b, __c) __arm_vqdmlashq(__a, __b, __c)
+#define vqdmlahq(__a, __b, __c) __arm_vqdmlahq(__a, __b, __c)
+#define vmvnq_m(__inactive, __a, __p) __arm_vmvnq_m(__inactive, __a, __p)
+#define vmlasq(__a, __b, __c) __arm_vmlasq(__a, __b, __c)
+#define vmlaq(__a, __b, __c) __arm_vmlaq(__a, __b, __c)
+#define vmladavq_p(__a, __b, __p) __arm_vmladavq_p(__a, __b, __p)
+#define vmladavaq(__a, __b, __c) __arm_vmladavaq(__a, __b, __c)
+#define vminvq_p(__a, __b, __p) __arm_vminvq_p(__a, __b, __p)
+#define vmaxvq_p(__a, __b, __p) __arm_vmaxvq_p(__a, __b, __p)
+#define vdupq_m(__inactive, __a, __p) __arm_vdupq_m(__inactive, __a, __p)
+#define vcmpneq_m(__a, __b, __p) __arm_vcmpneq_m(__a, __b, __p)
+#define vcmphiq_m(__a, __b, __p) __arm_vcmphiq_m(__a, __b, __p)
+#define vcmpeqq_m(__a, __b, __p) __arm_vcmpeqq_m(__a, __b, __p)
+#define vcmpcsq_m(__a, __b, __p) __arm_vcmpcsq_m(__a, __b, __p)
+#define vcmpcsq_m_n(__a, __b, __p) __arm_vcmpcsq_m_n(__a, __b, __p)
+#define vclzq_m(__inactive, __a, __p) __arm_vclzq_m(__inactive, __a, __p)
+#define vaddvaq_p(__a, __b, __p) __arm_vaddvaq_p(__a, __b, __p)
+#define vsriq(__a, __b, __imm) __arm_vsriq(__a, __b, __imm)
+#define vsliq(__a, __b, __imm) __arm_vsliq(__a, __b, __imm)
+#define vshlq_m_r(__a, __b, __p) __arm_vshlq_m_r(__a, __b, __p)
+#define vrshlq_m_n(__a, __b, __p) __arm_vrshlq_m_n(__a, __b, __p)
+#define vqshlq_m_r(__a, __b, __p) __arm_vqshlq_m_r(__a, __b, __p)
+#define vqrshlq_m_n(__a, __b, __p) __arm_vqrshlq_m_n(__a, __b, __p)
+#define vminavq_p(__a, __b, __p) __arm_vminavq_p(__a, __b, __p)
+#define vminaq_m(__a, __b, __p) __arm_vminaq_m(__a, __b, __p)
+#define vmaxavq_p(__a, __b, __p) __arm_vmaxavq_p(__a, __b, __p)
+#define vmaxaq_m(__a, __b, __p) __arm_vmaxaq_m(__a, __b, __p)
+#define vcmpltq_m(__a, __b, __p) __arm_vcmpltq_m(__a, __b, __p)
+#define vcmpleq_m(__a, __b, __p) __arm_vcmpleq_m(__a, __b, __p)
+#define vcmpgtq_m(__a, __b, __p) __arm_vcmpgtq_m(__a, __b, __p)
+#define vcmpgeq_m(__a, __b, __p) __arm_vcmpgeq_m(__a, __b, __p)
+#define vqnegq_m(__inactive, __a, __p) __arm_vqnegq_m(__inactive, __a, __p)
+#define vqabsq_m(__inactive, __a, __p) __arm_vqabsq_m(__inactive, __a, __p)
+#define vnegq_m(__inactive, __a, __p) __arm_vnegq_m(__inactive, __a, __p)
+#define vmlsdavxq_p(__a, __b, __p) __arm_vmlsdavxq_p(__a, __b, __p)
+#define vmlsdavq_p(__a, __b, __p) __arm_vmlsdavq_p(__a, __b, __p)
+#define vmladavxq_p(__a, __b, __p) __arm_vmladavxq_p(__a, __b, __p)
+#define vclsq_m(__inactive, __a, __p) __arm_vclsq_m(__inactive, __a, __p)
+#define vabsq_m(__inactive, __a, __p) __arm_vabsq_m(__inactive, __a, __p)
+#define vqrdmlsdhxq(__inactive, __a, __b) __arm_vqrdmlsdhxq(__inactive, __a, __b)
+#define vqrdmlsdhq(__inactive, __a, __b) __arm_vqrdmlsdhq(__inactive, __a, __b)
+#define vqrdmladhxq(__inactive, __a, __b) __arm_vqrdmladhxq(__inactive, __a, __b)
+#define vqrdmladhq(__inactive, __a, __b) __arm_vqrdmladhq(__inactive, __a, __b)
+#define vqdmlsdhxq(__inactive, __a, __b) __arm_vqdmlsdhxq(__inactive, __a, __b)
+#define vqdmlsdhq(__inactive, __a, __b) __arm_vqdmlsdhq(__inactive, __a, __b)
+#define vqdmladhxq(__inactive, __a, __b) __arm_vqdmladhxq(__inactive, __a, __b)
+#define vqdmladhq(__inactive, __a, __b) __arm_vqdmladhq(__inactive, __a, __b)
+#define vmlsdavaxq(__a, __b, __c) __arm_vmlsdavaxq(__a, __b, __c)
+#define vmlsdavaq(__a, __b, __c) __arm_vmlsdavaq(__a, __b, __c)
+#define vmladavaxq(__a, __b, __c) __arm_vmladavaxq(__a, __b, __c)
+#define vrmlaldavhaxq(__a, __b, __c) __arm_vrmlaldavhaxq(__a, __b, __c)
+#define vrmlsldavhaq(__a, __b, __c) __arm_vrmlsldavhaq(__a, __b, __c)
+#define vrmlsldavhaxq(__a, __b, __c) __arm_vrmlsldavhaxq(__a, __b, __c)
+#define vaddlvaq_p(__a, __b, __p) __arm_vaddlvaq_p(__a, __b, __p)
+#define vrev16q_m(__inactive, __a, __p) __arm_vrev16q_m(__inactive, __a, __p)
+#define vrmlaldavhq_p(__a, __b, __p) __arm_vrmlaldavhq_p(__a, __b, __p)
+#define vrmlaldavhxq_p(__a, __b, __p) __arm_vrmlaldavhxq_p(__a, __b, __p)
+#define vrmlsldavhq_p(__a, __b, __p) __arm_vrmlsldavhq_p(__a, __b, __p)
+#define vrmlsldavhxq_p(__a, __b, __p) __arm_vrmlsldavhxq_p(__a, __b, __p)
+#define vorrq_m_n(__a, __imm, __p) __arm_vorrq_m_n(__a, __imm, __p)
+#define vqrshrntq(__a, __b, __imm) __arm_vqrshrntq(__a, __b, __imm)
+#define vqshrnbq(__a, __b, __imm) __arm_vqshrnbq(__a, __b, __imm)
+#define vqshrntq(__a, __b, __imm) __arm_vqshrntq(__a, __b, __imm)
+#define vrshrnbq(__a, __b, __imm) __arm_vrshrnbq(__a, __b, __imm)
+#define vrshrntq(__a, __b, __imm) __arm_vrshrntq(__a, __b, __imm)
+#define vshrnbq(__a, __b, __imm) __arm_vshrnbq(__a, __b, __imm)
+#define vshrntq(__a, __b, __imm) __arm_vshrntq(__a, __b, __imm)
+#define vmlaldavaq(__a, __b, __c) __arm_vmlaldavaq(__a, __b, __c)
+#define vmlaldavaxq(__a, __b, __c) __arm_vmlaldavaxq(__a, __b, __c)
+#define vmlsldavaq(__a, __b, __c) __arm_vmlsldavaq(__a, __b, __c)
+#define vmlsldavaxq(__a, __b, __c) __arm_vmlsldavaxq(__a, __b, __c)
+#define vmlaldavq_p(__a, __b, __p) __arm_vmlaldavq_p(__a, __b, __p)
+#define vmlaldavxq_p(__a, __b, __p) __arm_vmlaldavxq_p(__a, __b, __p)
+#define vmlsldavq_p(__a, __b, __p) __arm_vmlsldavq_p(__a, __b, __p)
+#define vmlsldavxq_p(__a, __b, __p) __arm_vmlsldavxq_p(__a, __b, __p)
+#define vmovlbq_m(__inactive, __a, __p) __arm_vmovlbq_m(__inactive, __a, __p)
+#define vmovltq_m(__inactive, __a, __p) __arm_vmovltq_m(__inactive, __a, __p)
+#define vmovnbq_m(__a, __b, __p) __arm_vmovnbq_m(__a, __b, __p)
+#define vmovntq_m(__a, __b, __p) __arm_vmovntq_m(__a, __b, __p)
+#define vqmovnbq_m(__a, __b, __p) __arm_vqmovnbq_m(__a, __b, __p)
+#define vqmovntq_m(__a, __b, __p) __arm_vqmovntq_m(__a, __b, __p)
+#define vrev32q_m(__inactive, __a, __p) __arm_vrev32q_m(__inactive, __a, __p)
+#define vqrshruntq(__a, __b, __imm) __arm_vqrshruntq(__a, __b, __imm)
+#define vqshrunbq(__a, __b, __imm) __arm_vqshrunbq(__a, __b, __imm)
+#define vqshruntq(__a, __b, __imm) __arm_vqshruntq(__a, __b, __imm)
+#define vqmovunbq_m(__a, __b, __p) __arm_vqmovunbq_m(__a, __b, __p)
+#define vqmovuntq_m(__a, __b, __p) __arm_vqmovuntq_m(__a, __b, __p)
+#define vsriq_m(__a, __b, __imm, __p) __arm_vsriq_m(__a, __b, __imm, __p)
+#define vsubq_m(__inactive, __a, __b, __p) __arm_vsubq_m(__inactive, __a, __b, __p)
+#define vqshluq_m(__inactive, __a, __imm, __p) __arm_vqshluq_m(__inactive, __a, __imm, __p)
+#define vabavq_p(__a, __b, __c, __p) __arm_vabavq_p(__a, __b, __c, __p)
+#define vshlq_m(__inactive, __a, __b, __p) __arm_vshlq_m(__inactive, __a, __b, __p)
+#define vabdq_m(__inactive, __a, __b, __p) __arm_vabdq_m(__inactive, __a, __b, __p)
+#define vaddq_m(__inactive, __a, __b, __p) __arm_vaddq_m(__inactive, __a, __b, __p)
+#define vandq_m(__inactive, __a, __b, __p) __arm_vandq_m(__inactive, __a, __b, __p)
+#define vbicq_m(__inactive, __a, __b, __p) __arm_vbicq_m(__inactive, __a, __b, __p)
+#define vbrsrq_m(__inactive, __a, __b, __p) __arm_vbrsrq_m(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m(__inactive, __a, __b, __p)
+#define veorq_m(__inactive, __a, __b, __p) __arm_veorq_m(__inactive, __a, __b, __p)
+#define vhaddq_m(__inactive, __a, __b, __p) __arm_vhaddq_m(__inactive, __a, __b, __p)
+#define vhcaddq_rot270_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m(__inactive, __a, __b, __p)
+#define vhcaddq_rot90_m(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m(__inactive, __a, __b, __p)
+#define vhsubq_m(__inactive, __a, __b, __p) __arm_vhsubq_m(__inactive, __a, __b, __p)
+#define vmaxq_m(__inactive, __a, __b, __p) __arm_vmaxq_m(__inactive, __a, __b, __p)
+#define vminq_m(__inactive, __a, __b, __p) __arm_vminq_m(__inactive, __a, __b, __p)
+#define vmladavaq_p(__a, __b, __c, __p) __arm_vmladavaq_p(__a, __b, __c, __p)
+#define vmladavaxq_p(__a, __b, __c, __p) __arm_vmladavaxq_p(__a, __b, __c, __p)
+#define vmlaq_m(__a, __b, __c, __p) __arm_vmlaq_m(__a, __b, __c, __p)
+#define vmlasq_m(__a, __b, __c, __p) __arm_vmlasq_m(__a, __b, __c, __p)
+#define vmlsdavaq_p(__a, __b, __c, __p) __arm_vmlsdavaq_p(__a, __b, __c, __p)
+#define vmlsdavaxq_p(__a, __b, __c, __p) __arm_vmlsdavaxq_p(__a, __b, __c, __p)
+#define vmulhq_m(__inactive, __a, __b, __p) __arm_vmulhq_m(__inactive, __a, __b, __p)
+#define vmullbq_int_m(__inactive, __a, __b, __p) __arm_vmullbq_int_m(__inactive, __a, __b, __p)
+#define vmulltq_int_m(__inactive, __a, __b, __p) __arm_vmulltq_int_m(__inactive, __a, __b, __p)
+#define vmulq_m(__inactive, __a, __b, __p) __arm_vmulq_m(__inactive, __a, __b, __p)
+#define vornq_m(__inactive, __a, __b, __p) __arm_vornq_m(__inactive, __a, __b, __p)
+#define vorrq_m(__inactive, __a, __b, __p) __arm_vorrq_m(__inactive, __a, __b, __p)
+#define vqaddq_m(__inactive, __a, __b, __p) __arm_vqaddq_m(__inactive, __a, __b, __p)
+#define vqdmladhq_m(__inactive, __a, __b, __p) __arm_vqdmladhq_m(__inactive, __a, __b, __p)
+#define vqdmlashq_m(__a, __b, __c, __p) __arm_vqdmlashq_m(__a, __b, __c, __p)
+#define vqdmladhxq_m(__inactive, __a, __b, __p) __arm_vqdmladhxq_m(__inactive, __a, __b, __p)
+#define vqdmlahq_m(__a, __b, __c, __p) __arm_vqdmlahq_m(__a, __b, __c, __p)
+#define vqdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m(__inactive, __a, __b, __p)
+#define vqdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m(__inactive, __a, __b, __p)
+#define vqdmulhq_m(__inactive, __a, __b, __p) __arm_vqdmulhq_m(__inactive, __a, __b, __p)
+#define vqrdmladhq_m(__inactive, __a, __b, __p) __arm_vqrdmladhq_m(__inactive, __a, __b, __p)
+#define vqrdmladhxq_m(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m(__inactive, __a, __b, __p)
+#define vqrdmlahq_m(__a, __b, __c, __p) __arm_vqrdmlahq_m(__a, __b, __c, __p)
+#define vqrdmlashq_m(__a, __b, __c, __p) __arm_vqrdmlashq_m(__a, __b, __c, __p)
+#define vqrdmlsdhq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m(__inactive, __a, __b, __p)
+#define vqrdmlsdhxq_m(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m(__inactive, __a, __b, __p)
+#define vqrdmulhq_m(__inactive, __a, __b, __p) __arm_vqrdmulhq_m(__inactive, __a, __b, __p)
+#define vqrshlq_m(__inactive, __a, __b, __p) __arm_vqrshlq_m(__inactive, __a, __b, __p)
+#define vqshlq_m_n(__inactive, __a, __imm, __p) __arm_vqshlq_m_n(__inactive, __a, __imm, __p)
+#define vqshlq_m(__inactive, __a, __b, __p) __arm_vqshlq_m(__inactive, __a, __b, __p)
+#define vqsubq_m(__inactive, __a, __b, __p) __arm_vqsubq_m(__inactive, __a, __b, __p)
+#define vrhaddq_m(__inactive, __a, __b, __p) __arm_vrhaddq_m(__inactive, __a, __b, __p)
+#define vrmulhq_m(__inactive, __a, __b, __p) __arm_vrmulhq_m(__inactive, __a, __b, __p)
+#define vrshlq_m(__inactive, __a, __b, __p) __arm_vrshlq_m(__inactive, __a, __b, __p)
+#define vrshrq_m(__inactive, __a, __imm, __p) __arm_vrshrq_m(__inactive, __a, __imm, __p)
+#define vshlq_m_n(__inactive, __a, __imm, __p) __arm_vshlq_m_n(__inactive, __a, __imm, __p)
+#define vshrq_m(__inactive, __a, __imm, __p) __arm_vshrq_m(__inactive, __a, __imm, __p)
+#define vsliq_m(__a, __b, __imm, __p) __arm_vsliq_m(__a, __b, __imm, __p)
+#define vmlaldavaq_p(__a, __b, __c, __p) __arm_vmlaldavaq_p(__a, __b, __c, __p)
+#define vmlaldavaxq_p(__a, __b, __c, __p) __arm_vmlaldavaxq_p(__a, __b, __c, __p)
+#define vmlsldavaq_p(__a, __b, __c, __p) __arm_vmlsldavaq_p(__a, __b, __c, __p)
+#define vmlsldavaxq_p(__a, __b, __c, __p) __arm_vmlsldavaxq_p(__a, __b, __c, __p)
+#define vmullbq_poly_m(__inactive, __a, __b, __p) __arm_vmullbq_poly_m(__inactive, __a, __b, __p)
+#define vmulltq_poly_m(__inactive, __a, __b, __p) __arm_vmulltq_poly_m(__inactive, __a, __b, __p)
+#define vqdmullbq_m(__inactive, __a, __b, __p) __arm_vqdmullbq_m(__inactive, __a, __b, __p)
+#define vqdmulltq_m(__inactive, __a, __b, __p) __arm_vqdmulltq_m(__inactive, __a, __b, __p)
+#define vqrshrnbq_m(__a, __b, __imm, __p) __arm_vqrshrnbq_m(__a, __b, __imm, __p)
+#define vqrshrntq_m(__a, __b, __imm, __p) __arm_vqrshrntq_m(__a, __b, __imm, __p)
+#define vqrshrunbq_m(__a, __b, __imm, __p) __arm_vqrshrunbq_m(__a, __b, __imm, __p)
+#define vqrshruntq_m(__a, __b, __imm, __p) __arm_vqrshruntq_m(__a, __b, __imm, __p)
+#define vqshrnbq_m(__a, __b, __imm, __p) __arm_vqshrnbq_m(__a, __b, __imm, __p)
+#define vqshrntq_m(__a, __b, __imm, __p) __arm_vqshrntq_m(__a, __b, __imm, __p)
+#define vqshrunbq_m(__a, __b, __imm, __p) __arm_vqshrunbq_m(__a, __b, __imm, __p)
+#define vqshruntq_m(__a, __b, __imm, __p) __arm_vqshruntq_m(__a, __b, __imm, __p)
+#define vrmlaldavhaq_p(__a, __b, __c, __p) __arm_vrmlaldavhaq_p(__a, __b, __c, __p)
+#define vrmlaldavhaxq_p(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p(__a, __b, __c, __p)
+#define vrmlsldavhaq_p(__a, __b, __c, __p) __arm_vrmlsldavhaq_p(__a, __b, __c, __p)
+#define vrmlsldavhaxq_p(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p(__a, __b, __c, __p)
+#define vrshrnbq_m(__a, __b, __imm, __p) __arm_vrshrnbq_m(__a, __b, __imm, __p)
+#define vrshrntq_m(__a, __b, __imm, __p) __arm_vrshrntq_m(__a, __b, __imm, __p)
+#define vshllbq_m(__inactive, __a, __imm, __p) __arm_vshllbq_m(__inactive, __a, __imm, __p)
+#define vshlltq_m(__inactive, __a, __imm, __p) __arm_vshlltq_m(__inactive, __a, __imm, __p)
+#define vshrnbq_m(__a, __b, __imm, __p) __arm_vshrnbq_m(__a, __b, __imm, __p)
+#define vshrntq_m(__a, __b, __imm, __p) __arm_vshrntq_m(__a, __b, __imm, __p)
+#define vstrbq_scatter_offset(__base, __offset, __value) __arm_vstrbq_scatter_offset(__base, __offset, __value)
+#define vstrbq(__addr, __value) __arm_vstrbq(__addr, __value)
+#define vstrwq_scatter_base(__addr, __offset, __value) __arm_vstrwq_scatter_base(__addr, __offset, __value)
+#define vldrbq_gather_offset(__base, __offset) __arm_vldrbq_gather_offset(__base, __offset)
+#define vstrbq_p(__addr, __value, __p) __arm_vstrbq_p(__addr, __value, __p)
+#define vstrbq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p(__base, __offset, __value, __p)
+#define vstrwq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p(__addr, __offset, __value, __p)
+#define vldrbq_gather_offset_z(__base, __offset, __p) __arm_vldrbq_gather_offset_z(__base, __offset, __p)
+#define vld1q(__base) __arm_vld1q(__base)
+#define vldrhq_gather_offset(__base, __offset) __arm_vldrhq_gather_offset(__base, __offset)
+#define vldrhq_gather_offset_z(__base, __offset, __p) __arm_vldrhq_gather_offset_z(__base, __offset, __p)
+#define vldrhq_gather_shifted_offset(__base, __offset) __arm_vldrhq_gather_shifted_offset(__base, __offset)
+#define vldrhq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z(__base, __offset, __p)
+#define vldrdq_gather_offset(__base, __offset) __arm_vldrdq_gather_offset(__base, __offset)
+#define vldrdq_gather_offset_z(__base, __offset, __p) __arm_vldrdq_gather_offset_z(__base, __offset, __p)
+#define vldrdq_gather_shifted_offset(__base, __offset) __arm_vldrdq_gather_shifted_offset(__base, __offset)
+#define vldrdq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z(__base, __offset, __p)
+#define vldrwq_gather_offset(__base, __offset) __arm_vldrwq_gather_offset(__base, __offset)
+#define vldrwq_gather_offset_z(__base, __offset, __p) __arm_vldrwq_gather_offset_z(__base, __offset, __p)
+#define vldrwq_gather_shifted_offset(__base, __offset) __arm_vldrwq_gather_shifted_offset(__base, __offset)
+#define vldrwq_gather_shifted_offset_z(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z(__base, __offset, __p)
+#define vst1q(__addr, __value) __arm_vst1q(__addr, __value)
+#define vstrhq_scatter_offset(__base, __offset, __value) __arm_vstrhq_scatter_offset(__base, __offset, __value)
+#define vstrhq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p(__base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset(__base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p(__base, __offset, __value, __p)
+#define vstrhq(__addr, __value) __arm_vstrhq(__addr, __value)
+#define vstrhq_p(__addr, __value, __p) __arm_vstrhq_p(__addr, __value, __p)
+#define vstrwq(__addr, __value) __arm_vstrwq(__addr, __value)
+#define vstrwq_p(__addr, __value, __p) __arm_vstrwq_p(__addr, __value, __p)
+#define vstrdq_scatter_base_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p(__addr, __offset, __value, __p)
+#define vstrdq_scatter_base(__addr, __offset, __value) __arm_vstrdq_scatter_base(__addr, __offset, __value)
+#define vstrdq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p(__base, __offset, __value, __p)
+#define vstrdq_scatter_offset(__base, __offset, __value) __arm_vstrdq_scatter_offset(__base, __offset, __value)
+#define vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p(__base, __offset, __value, __p)
+#define vstrdq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset(__base, __offset, __value)
+#define vstrwq_scatter_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p(__base, __offset, __value, __p)
+#define vstrwq_scatter_offset(__base, __offset, __value) __arm_vstrwq_scatter_offset(__base, __offset, __value)
+#define vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p(__base, __offset, __value, __p)
+#define vstrwq_scatter_shifted_offset(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset(__base, __offset, __value)
+#define vuninitializedq(__v) __arm_vuninitializedq(__v)
+#define vreinterpretq_s16(__a) __arm_vreinterpretq_s16(__a)
+#define vreinterpretq_s32(__a) __arm_vreinterpretq_s32(__a)
+#define vreinterpretq_s64(__a) __arm_vreinterpretq_s64(__a)
+#define vreinterpretq_s8(__a) __arm_vreinterpretq_s8(__a)
+#define vreinterpretq_u16(__a) __arm_vreinterpretq_u16(__a)
+#define vreinterpretq_u32(__a) __arm_vreinterpretq_u32(__a)
+#define vreinterpretq_u64(__a) __arm_vreinterpretq_u64(__a)
+#define vreinterpretq_u8(__a) __arm_vreinterpretq_u8(__a)
+#define vddupq_m(__inactive, __a, __imm, __p) __arm_vddupq_m(__inactive, __a, __imm, __p)
+#define vddupq_u8(__a, __imm) __arm_vddupq_u8(__a, __imm)
+#define vddupq_u32(__a, __imm) __arm_vddupq_u32(__a, __imm)
+#define vddupq_u16(__a, __imm) __arm_vddupq_u16(__a, __imm)
+#define vdwdupq_m(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_u8(__a, __b, __imm) __arm_vdwdupq_u8(__a, __b, __imm)
+#define vdwdupq_u32(__a, __b, __imm) __arm_vdwdupq_u32(__a, __b, __imm)
+#define vdwdupq_u16(__a, __b, __imm) __arm_vdwdupq_u16(__a, __b, __imm)
+#define vidupq_m(__inactive, __a, __imm, __p) __arm_vidupq_m(__inactive, __a, __imm, __p)
+#define vidupq_u8(__a, __imm) __arm_vidupq_u8(__a, __imm)
+#define vidupq_u32(__a, __imm) __arm_vidupq_u32(__a, __imm)
+#define vidupq_u16(__a, __imm) __arm_vidupq_u16(__a, __imm)
+#define viwdupq_m(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m(__inactive, __a, __b, __imm, __p)
+#define viwdupq_u8(__a, __b, __imm) __arm_viwdupq_u8(__a, __b, __imm)
+#define viwdupq_u32(__a, __b, __imm) __arm_viwdupq_u32(__a, __b, __imm)
+#define viwdupq_u16(__a, __b, __imm) __arm_viwdupq_u16(__a, __b, __imm)
+#define vstrdq_scatter_base_wb(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb(__addr, __offset, __value)
+#define vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p(__addr, __offset, __value, __p)
+#define vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p(__addr, __offset, __value, __p)
+#define vstrwq_scatter_base_wb(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb(__addr, __offset, __value)
+#define vddupq_x_u8(__a, __imm, __p) __arm_vddupq_x_u8(__a, __imm, __p)
+#define vddupq_x_u16(__a, __imm, __p) __arm_vddupq_x_u16(__a, __imm, __p)
+#define vddupq_x_u32(__a, __imm, __p) __arm_vddupq_x_u32(__a, __imm, __p)
+#define vdwdupq_x_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_u8(__a, __b, __imm, __p)
+#define vdwdupq_x_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_u16(__a, __b, __imm, __p)
+#define vdwdupq_x_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_u32(__a, __b, __imm, __p)
+#define vidupq_x_u8(__a, __imm, __p) __arm_vidupq_x_u8(__a, __imm, __p)
+#define vidupq_x_u16(__a, __imm, __p) __arm_vidupq_x_u16(__a, __imm, __p)
+#define vidupq_x_u32(__a, __imm, __p) __arm_vidupq_x_u32(__a, __imm, __p)
+#define viwdupq_x_u8(__a, __b, __imm, __p) __arm_viwdupq_x_u8(__a, __b, __imm, __p)
+#define viwdupq_x_u16(__a, __b, __imm, __p) __arm_viwdupq_x_u16(__a, __b, __imm, __p)
+#define viwdupq_x_u32(__a, __b, __imm, __p) __arm_viwdupq_x_u32(__a, __b, __imm, __p)
+#define vminq_x(__a, __b, __p) __arm_vminq_x(__a, __b, __p)
+#define vmaxq_x(__a, __b, __p) __arm_vmaxq_x(__a, __b, __p)
+#define vabdq_x(__a, __b, __p) __arm_vabdq_x(__a, __b, __p)
+#define vabsq_x(__a, __p) __arm_vabsq_x(__a, __p)
+#define vaddq_x(__a, __b, __p) __arm_vaddq_x(__a, __b, __p)
+#define vclsq_x(__a, __p) __arm_vclsq_x(__a, __p)
+#define vclzq_x(__a, __p) __arm_vclzq_x(__a, __p)
+#define vnegq_x(__a, __p) __arm_vnegq_x(__a, __p)
+#define vmulhq_x(__a, __b, __p) __arm_vmulhq_x(__a, __b, __p)
+#define vmullbq_poly_x(__a, __b, __p) __arm_vmullbq_poly_x(__a, __b, __p)
+#define vmullbq_int_x(__a, __b, __p) __arm_vmullbq_int_x(__a, __b, __p)
+#define vmulltq_poly_x(__a, __b, __p) __arm_vmulltq_poly_x(__a, __b, __p)
+#define vmulltq_int_x(__a, __b, __p) __arm_vmulltq_int_x(__a, __b, __p)
+#define vmulq_x(__a, __b, __p) __arm_vmulq_x(__a, __b, __p)
+#define vsubq_x(__a, __b, __p) __arm_vsubq_x(__a, __b, __p)
+#define vcaddq_rot90_x(__a, __b, __p) __arm_vcaddq_rot90_x(__a, __b, __p)
+#define vcaddq_rot270_x(__a, __b, __p) __arm_vcaddq_rot270_x(__a, __b, __p)
+#define vhaddq_x(__a, __b, __p) __arm_vhaddq_x(__a, __b, __p)
+#define vhcaddq_rot90_x(__a, __b, __p) __arm_vhcaddq_rot90_x(__a, __b, __p)
+#define vhcaddq_rot270_x(__a, __b, __p) __arm_vhcaddq_rot270_x(__a, __b, __p)
+#define vhsubq_x(__a, __b, __p) __arm_vhsubq_x(__a, __b, __p)
+#define vrhaddq_x(__a, __b, __p) __arm_vrhaddq_x(__a, __b, __p)
+#define vrmulhq_x(__a, __b, __p) __arm_vrmulhq_x(__a, __b, __p)
+#define vandq_x(__a, __b, __p) __arm_vandq_x(__a, __b, __p)
+#define vbicq_x(__a, __b, __p) __arm_vbicq_x(__a, __b, __p)
+#define vbrsrq_x(__a, __b, __p) __arm_vbrsrq_x(__a, __b, __p)
+#define veorq_x(__a, __b, __p) __arm_veorq_x(__a, __b, __p)
+#define vmovlbq_x(__a, __p) __arm_vmovlbq_x(__a, __p)
+#define vmovltq_x(__a, __p) __arm_vmovltq_x(__a, __p)
+#define vmvnq_x(__a, __p) __arm_vmvnq_x(__a, __p)
+#define vornq_x(__a, __b, __p) __arm_vornq_x(__a, __b, __p)
+#define vorrq_x(__a, __b, __p) __arm_vorrq_x(__a, __b, __p)
+#define vrev16q_x(__a, __p) __arm_vrev16q_x(__a, __p)
+#define vrev32q_x(__a, __p) __arm_vrev32q_x(__a, __p)
+#define vrev64q_x(__a, __p) __arm_vrev64q_x(__a, __p)
+#define vrshlq_x(__a, __b, __p) __arm_vrshlq_x(__a, __b, __p)
+#define vshllbq_x(__a, __imm, __p) __arm_vshllbq_x(__a, __imm, __p)
+#define vshlltq_x(__a, __imm, __p) __arm_vshlltq_x(__a, __imm, __p)
+#define vshlq_x(__a, __b, __p) __arm_vshlq_x(__a, __b, __p)
+#define vshlq_x_n(__a, __imm, __p) __arm_vshlq_x_n(__a, __imm, __p)
+#define vrshrq_x(__a, __imm, __p) __arm_vrshrq_x(__a, __imm, __p)
+#define vshrq_x(__a, __imm, __p) __arm_vshrq_x(__a, __imm, __p)
+#define vadciq(__a, __b, __carry_out) __arm_vadciq(__a, __b, __carry_out)
+#define vadciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m(__inactive, __a, __b, __carry_out, __p)
+#define vadcq(__a, __b, __carry) __arm_vadcq(__a, __b, __carry)
+#define vadcq_m(__inactive, __a, __b, __carry, __p) __arm_vadcq_m(__inactive, __a, __b, __carry, __p)
+#define vsbciq(__a, __b, __carry_out) __arm_vsbciq(__a, __b, __carry_out)
+#define vsbciq_m(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m(__inactive, __a, __b, __carry_out, __p)
+#define vsbcq(__a, __b, __carry) __arm_vsbcq(__a, __b, __carry)
+#define vsbcq_m(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m(__inactive, __a, __b, __carry, __p)
+#define vst1q_p(__addr, __value, __p) __arm_vst1q_p(__addr, __value, __p)
+#define vst2q(__addr, __value) __arm_vst2q(__addr, __value)
+#define vld1q_z(__base, __p) __arm_vld1q_z(__base, __p)
+#define vld2q(__addr) __arm_vld2q(__addr)
+#define vld4q(__addr) __arm_vld4q(__addr)
+#define vsetq_lane(__a, __b, __idx) __arm_vsetq_lane(__a, __b, __idx)
+#define vgetq_lane(__a, __idx) __arm_vgetq_lane(__a, __idx)
+#define vshlcq_m(__a, __b, __imm, __p) __arm_vshlcq_m(__a, __b, __imm, __p)
+#define vrndxq(__a) __arm_vrndxq(__a)
+#define vrndq(__a) __arm_vrndq(__a)
+#define vrndpq(__a) __arm_vrndpq(__a)
+#define vrndnq(__a) __arm_vrndnq(__a)
+#define vrndmq(__a) __arm_vrndmq(__a)
+#define vrndaq(__a) __arm_vrndaq(__a)
+#define vcvttq_f32(__a) __arm_vcvttq_f32(__a)
+#define vcvtbq_f32(__a) __arm_vcvtbq_f32(__a)
+#define vcvtq(__a) __arm_vcvtq(__a)
+#define vcvtq_n(__a, __imm6) __arm_vcvtq_n(__a, __imm6)
+#define vminnmvq(__a, __b) __arm_vminnmvq(__a, __b)
+#define vminnmq(__a, __b) __arm_vminnmq(__a, __b)
+#define vminnmavq(__a, __b) __arm_vminnmavq(__a, __b)
+#define vminnmaq(__a, __b) __arm_vminnmaq(__a, __b)
+#define vmaxnmvq(__a, __b) __arm_vmaxnmvq(__a, __b)
+#define vmaxnmq(__a, __b) __arm_vmaxnmq(__a, __b)
+#define vmaxnmavq(__a, __b) __arm_vmaxnmavq(__a, __b)
+#define vmaxnmaq(__a, __b) __arm_vmaxnmaq(__a, __b)
+#define vcmulq_rot90(__a, __b) __arm_vcmulq_rot90(__a, __b)
+#define vcmulq_rot270(__a, __b) __arm_vcmulq_rot270(__a, __b)
+#define vcmulq_rot180(__a, __b) __arm_vcmulq_rot180(__a, __b)
+#define vcmulq(__a, __b) __arm_vcmulq(__a, __b)
+#define vcvtaq_m(__inactive, __a, __p) __arm_vcvtaq_m(__inactive, __a, __p)
+#define vcvtq_m(__inactive, __a, __p) __arm_vcvtq_m(__inactive, __a, __p)
+#define vcvtbq_m(__a, __b, __p) __arm_vcvtbq_m(__a, __b, __p)
+#define vcvttq_m(__a, __b, __p) __arm_vcvttq_m(__a, __b, __p)
+#define vcmlaq(__a, __b, __c) __arm_vcmlaq(__a, __b, __c)
+#define vcmlaq_rot180(__a, __b, __c) __arm_vcmlaq_rot180(__a, __b, __c)
+#define vcmlaq_rot270(__a, __b, __c) __arm_vcmlaq_rot270(__a, __b, __c)
+#define vcmlaq_rot90(__a, __b, __c) __arm_vcmlaq_rot90(__a, __b, __c)
+#define vfmaq(__a, __b, __c) __arm_vfmaq(__a, __b, __c)
+#define vfmasq(__a, __b, __c) __arm_vfmasq(__a, __b, __c)
+#define vfmsq(__a, __b, __c) __arm_vfmsq(__a, __b, __c)
+#define vcvtmq_m(__inactive, __a, __p) __arm_vcvtmq_m(__inactive, __a, __p)
+#define vcvtnq_m(__inactive, __a, __p) __arm_vcvtnq_m(__inactive, __a, __p)
+#define vcvtpq_m(__inactive, __a, __p) __arm_vcvtpq_m(__inactive, __a, __p)
+#define vmaxnmaq_m(__a, __b, __p) __arm_vmaxnmaq_m(__a, __b, __p)
+#define vmaxnmavq_p(__a, __b, __p) __arm_vmaxnmavq_p(__a, __b, __p)
+#define vmaxnmvq_p(__a, __b, __p) __arm_vmaxnmvq_p(__a, __b, __p)
+#define vminnmaq_m(__a, __b, __p) __arm_vminnmaq_m(__a, __b, __p)
+#define vminnmavq_p(__a, __b, __p) __arm_vminnmavq_p(__a, __b, __p)
+#define vminnmvq_p(__a, __b, __p) __arm_vminnmvq_p(__a, __b, __p)
+#define vrndaq_m(__inactive, __a, __p) __arm_vrndaq_m(__inactive, __a, __p)
+#define vrndmq_m(__inactive, __a, __p) __arm_vrndmq_m(__inactive, __a, __p)
+#define vrndnq_m(__inactive, __a, __p) __arm_vrndnq_m(__inactive, __a, __p)
+#define vrndpq_m(__inactive, __a, __p) __arm_vrndpq_m(__inactive, __a, __p)
+#define vrndq_m(__inactive, __a, __p) __arm_vrndq_m(__inactive, __a, __p)
+#define vrndxq_m(__inactive, __a, __p) __arm_vrndxq_m(__inactive, __a, __p)
+#define vcvtq_m_n(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n(__inactive, __a, __imm6, __p)
+#define vcmlaq_m(__a, __b, __c, __p) __arm_vcmlaq_m(__a, __b, __c, __p)
+#define vcmlaq_rot180_m(__a, __b, __c, __p) __arm_vcmlaq_rot180_m(__a, __b, __c, __p)
+#define vcmlaq_rot270_m(__a, __b, __c, __p) __arm_vcmlaq_rot270_m(__a, __b, __c, __p)
+#define vcmlaq_rot90_m(__a, __b, __c, __p) __arm_vcmlaq_rot90_m(__a, __b, __c, __p)
+#define vcmulq_m(__inactive, __a, __b, __p) __arm_vcmulq_m(__inactive, __a, __b, __p)
+#define vcmulq_rot180_m(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m(__inactive, __a, __b, __p)
+#define vcmulq_rot270_m(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m(__inactive, __a, __b, __p)
+#define vcmulq_rot90_m(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m(__inactive, __a, __b, __p)
+#define vfmaq_m(__a, __b, __c, __p) __arm_vfmaq_m(__a, __b, __c, __p)
+#define vfmasq_m(__a, __b, __c, __p) __arm_vfmasq_m(__a, __b, __c, __p)
+#define vfmsq_m(__a, __b, __c, __p) __arm_vfmsq_m(__a, __b, __c, __p)
+#define vmaxnmq_m(__inactive, __a, __b, __p) __arm_vmaxnmq_m(__inactive, __a, __b, __p)
+#define vminnmq_m(__inactive, __a, __b, __p) __arm_vminnmq_m(__inactive, __a, __b, __p)
+#define vreinterpretq_f16(__a) __arm_vreinterpretq_f16(__a)
+#define vreinterpretq_f32(__a) __arm_vreinterpretq_f32(__a)
+#define vminnmq_x(__a, __b, __p) __arm_vminnmq_x(__a, __b, __p)
+#define vmaxnmq_x(__a, __b, __p) __arm_vmaxnmq_x(__a, __b, __p)
+#define vcmulq_x(__a, __b, __p) __arm_vcmulq_x(__a, __b, __p)
+#define vcmulq_rot90_x(__a, __b, __p) __arm_vcmulq_rot90_x(__a, __b, __p)
+#define vcmulq_rot180_x(__a, __b, __p) __arm_vcmulq_rot180_x(__a, __b, __p)
+#define vcmulq_rot270_x(__a, __b, __p) __arm_vcmulq_rot270_x(__a, __b, __p)
+#define vcvtq_x(__a, __p) __arm_vcvtq_x(__a, __p)
+#define vcvtq_x_n(__a, __imm6, __p) __arm_vcvtq_x_n(__a, __imm6, __p)
+#define vrndq_x(__a, __p) __arm_vrndq_x(__a, __p)
+#define vrndnq_x(__a, __p) __arm_vrndnq_x(__a, __p)
+#define vrndmq_x(__a, __p) __arm_vrndmq_x(__a, __p)
+#define vrndpq_x(__a, __p) __arm_vrndpq_x(__a, __p)
+#define vrndaq_x(__a, __p) __arm_vrndaq_x(__a, __p)
+#define vrndxq_x(__a, __p) __arm_vrndxq_x(__a, __p)
+
+
+#define vst4q_s8( __addr, __value) __arm_vst4q_s8( __addr, __value)
+#define vst4q_s16( __addr, __value) __arm_vst4q_s16( __addr, __value)
+#define vst4q_s32( __addr, __value) __arm_vst4q_s32( __addr, __value)
+#define vst4q_u8( __addr, __value) __arm_vst4q_u8( __addr, __value)
+#define vst4q_u16( __addr, __value) __arm_vst4q_u16( __addr, __value)
+#define vst4q_u32( __addr, __value) __arm_vst4q_u32( __addr, __value)
+#define vst4q_f16( __addr, __value) __arm_vst4q_f16( __addr, __value)
+#define vst4q_f32( __addr, __value) __arm_vst4q_f32( __addr, __value)
+#define vrndxq_f16(__a) __arm_vrndxq_f16(__a)
+#define vrndxq_f32(__a) __arm_vrndxq_f32(__a)
+#define vrndq_f16(__a) __arm_vrndq_f16(__a)
+#define vrndq_f32(__a) __arm_vrndq_f32(__a)
+#define vrndpq_f16(__a) __arm_vrndpq_f16(__a)
+#define vrndpq_f32(__a) __arm_vrndpq_f32(__a)
+#define vrndnq_f16(__a) __arm_vrndnq_f16(__a)
+#define vrndnq_f32(__a) __arm_vrndnq_f32(__a)
+#define vrndmq_f16(__a) __arm_vrndmq_f16(__a)
+#define vrndmq_f32(__a) __arm_vrndmq_f32(__a)
+#define vrndaq_f16(__a) __arm_vrndaq_f16(__a)
+#define vrndaq_f32(__a) __arm_vrndaq_f32(__a)
+#define vrev64q_f16(__a) __arm_vrev64q_f16(__a)
+#define vrev64q_f32(__a) __arm_vrev64q_f32(__a)
+#define vnegq_f16(__a) __arm_vnegq_f16(__a)
+#define vnegq_f32(__a) __arm_vnegq_f32(__a)
+#define vdupq_n_f16(__a) __arm_vdupq_n_f16(__a)
+#define vdupq_n_f32(__a) __arm_vdupq_n_f32(__a)
+#define vabsq_f16(__a) __arm_vabsq_f16(__a)
+#define vabsq_f32(__a) __arm_vabsq_f32(__a)
+#define vrev32q_f16(__a) __arm_vrev32q_f16(__a)
+#define vcvttq_f32_f16(__a) __arm_vcvttq_f32_f16(__a)
+#define vcvtbq_f32_f16(__a) __arm_vcvtbq_f32_f16(__a)
+#define vcvtq_f16_s16(__a) __arm_vcvtq_f16_s16(__a)
+#define vcvtq_f32_s32(__a) __arm_vcvtq_f32_s32(__a)
+#define vcvtq_f16_u16(__a) __arm_vcvtq_f16_u16(__a)
+#define vcvtq_f32_u32(__a) __arm_vcvtq_f32_u32(__a)
+#define vdupq_n_s8(__a) __arm_vdupq_n_s8(__a)
+#define vdupq_n_s16(__a) __arm_vdupq_n_s16(__a)
+#define vdupq_n_s32(__a) __arm_vdupq_n_s32(__a)
+#define vabsq_s8(__a) __arm_vabsq_s8(__a)
+#define vabsq_s16(__a) __arm_vabsq_s16(__a)
+#define vabsq_s32(__a) __arm_vabsq_s32(__a)
+#define vclsq_s8(__a) __arm_vclsq_s8(__a)
+#define vclsq_s16(__a) __arm_vclsq_s16(__a)
+#define vclsq_s32(__a) __arm_vclsq_s32(__a)
+#define vclzq_s8(__a) __arm_vclzq_s8(__a)
+#define vclzq_s16(__a) __arm_vclzq_s16(__a)
+#define vclzq_s32(__a) __arm_vclzq_s32(__a)
+#define vnegq_s8(__a) __arm_vnegq_s8(__a)
+#define vnegq_s16(__a) __arm_vnegq_s16(__a)
+#define vnegq_s32(__a) __arm_vnegq_s32(__a)
+#define vaddlvq_s32(__a) __arm_vaddlvq_s32(__a)
+#define vaddvq_s8(__a) __arm_vaddvq_s8(__a)
+#define vaddvq_s16(__a) __arm_vaddvq_s16(__a)
+#define vaddvq_s32(__a) __arm_vaddvq_s32(__a)
+#define vmovlbq_s8(__a) __arm_vmovlbq_s8(__a)
+#define vmovlbq_s16(__a) __arm_vmovlbq_s16(__a)
+#define vmovltq_s8(__a) __arm_vmovltq_s8(__a)
+#define vmovltq_s16(__a) __arm_vmovltq_s16(__a)
+#define vmvnq_s8(__a) __arm_vmvnq_s8(__a)
+#define vmvnq_s16(__a) __arm_vmvnq_s16(__a)
+#define vmvnq_s32(__a) __arm_vmvnq_s32(__a)
+#define vmvnq_n_s16( __imm) __arm_vmvnq_n_s16( __imm)
+#define vmvnq_n_s32( __imm) __arm_vmvnq_n_s32( __imm)
+#define vrev16q_s8(__a) __arm_vrev16q_s8(__a)
+#define vrev32q_s8(__a) __arm_vrev32q_s8(__a)
+#define vrev32q_s16(__a) __arm_vrev32q_s16(__a)
+#define vrev64q_s8(__a) __arm_vrev64q_s8(__a)
+#define vrev64q_s16(__a) __arm_vrev64q_s16(__a)
+#define vrev64q_s32(__a) __arm_vrev64q_s32(__a)
+#define vqabsq_s8(__a) __arm_vqabsq_s8(__a)
+#define vqabsq_s16(__a) __arm_vqabsq_s16(__a)
+#define vqabsq_s32(__a) __arm_vqabsq_s32(__a)
+#define vqnegq_s8(__a) __arm_vqnegq_s8(__a)
+#define vqnegq_s16(__a) __arm_vqnegq_s16(__a)
+#define vqnegq_s32(__a) __arm_vqnegq_s32(__a)
+#define vcvtaq_s16_f16(__a) __arm_vcvtaq_s16_f16(__a)
+#define vcvtaq_s32_f32(__a) __arm_vcvtaq_s32_f32(__a)
+#define vcvtnq_s16_f16(__a) __arm_vcvtnq_s16_f16(__a)
+#define vcvtnq_s32_f32(__a) __arm_vcvtnq_s32_f32(__a)
+#define vcvtpq_s16_f16(__a) __arm_vcvtpq_s16_f16(__a)
+#define vcvtpq_s32_f32(__a) __arm_vcvtpq_s32_f32(__a)
+#define vcvtmq_s16_f16(__a) __arm_vcvtmq_s16_f16(__a)
+#define vcvtmq_s32_f32(__a) __arm_vcvtmq_s32_f32(__a)
+#define vcvtq_s16_f16(__a) __arm_vcvtq_s16_f16(__a)
+#define vcvtq_s32_f32(__a) __arm_vcvtq_s32_f32(__a)
+#define vrev64q_u8(__a) __arm_vrev64q_u8(__a)
+#define vrev64q_u16(__a) __arm_vrev64q_u16(__a)
+#define vrev64q_u32(__a) __arm_vrev64q_u32(__a)
+#define vmvnq_u8(__a) __arm_vmvnq_u8(__a)
+#define vmvnq_u16(__a) __arm_vmvnq_u16(__a)
+#define vmvnq_u32(__a) __arm_vmvnq_u32(__a)
+#define vdupq_n_u8(__a) __arm_vdupq_n_u8(__a)
+#define vdupq_n_u16(__a) __arm_vdupq_n_u16(__a)
+#define vdupq_n_u32(__a) __arm_vdupq_n_u32(__a)
+#define vclzq_u8(__a) __arm_vclzq_u8(__a)
+#define vclzq_u16(__a) __arm_vclzq_u16(__a)
+#define vclzq_u32(__a) __arm_vclzq_u32(__a)
+#define vaddvq_u8(__a) __arm_vaddvq_u8(__a)
+#define vaddvq_u16(__a) __arm_vaddvq_u16(__a)
+#define vaddvq_u32(__a) __arm_vaddvq_u32(__a)
+#define vrev32q_u8(__a) __arm_vrev32q_u8(__a)
+#define vrev32q_u16(__a) __arm_vrev32q_u16(__a)
+#define vmovltq_u8(__a) __arm_vmovltq_u8(__a)
+#define vmovltq_u16(__a) __arm_vmovltq_u16(__a)
+#define vmovlbq_u8(__a) __arm_vmovlbq_u8(__a)
+#define vmovlbq_u16(__a) __arm_vmovlbq_u16(__a)
+#define vmvnq_n_u16( __imm) __arm_vmvnq_n_u16( __imm)
+#define vmvnq_n_u32( __imm) __arm_vmvnq_n_u32( __imm)
+#define vrev16q_u8(__a) __arm_vrev16q_u8(__a)
+#define vaddlvq_u32(__a) __arm_vaddlvq_u32(__a)
+#define vcvtq_u16_f16(__a) __arm_vcvtq_u16_f16(__a)
+#define vcvtq_u32_f32(__a) __arm_vcvtq_u32_f32(__a)
+#define vcvtpq_u16_f16(__a) __arm_vcvtpq_u16_f16(__a)
+#define vcvtpq_u32_f32(__a) __arm_vcvtpq_u32_f32(__a)
+#define vcvtnq_u16_f16(__a) __arm_vcvtnq_u16_f16(__a)
+#define vcvtnq_u32_f32(__a) __arm_vcvtnq_u32_f32(__a)
+#define vcvtmq_u16_f16(__a) __arm_vcvtmq_u16_f16(__a)
+#define vcvtmq_u32_f32(__a) __arm_vcvtmq_u32_f32(__a)
+#define vcvtaq_u16_f16(__a) __arm_vcvtaq_u16_f16(__a)
+#define vcvtaq_u32_f32(__a) __arm_vcvtaq_u32_f32(__a)
+#define vctp16q(__a) __arm_vctp16q(__a)
+#define vctp32q(__a) __arm_vctp32q(__a)
+#define vctp64q(__a) __arm_vctp64q(__a)
+#define vctp8q(__a) __arm_vctp8q(__a)
+#define vpnot(__a) __arm_vpnot(__a)
+#define vsubq_n_f16(__a, __b) __arm_vsubq_n_f16(__a, __b)
+#define vsubq_n_f32(__a, __b) __arm_vsubq_n_f32(__a, __b)
+#define vbrsrq_n_f16(__a, __b) __arm_vbrsrq_n_f16(__a, __b)
+#define vbrsrq_n_f32(__a, __b) __arm_vbrsrq_n_f32(__a, __b)
+#define vcvtq_n_f16_s16(__a, __imm6) __arm_vcvtq_n_f16_s16(__a, __imm6)
+#define vcvtq_n_f32_s32(__a, __imm6) __arm_vcvtq_n_f32_s32(__a, __imm6)
+#define vcvtq_n_f16_u16(__a, __imm6) __arm_vcvtq_n_f16_u16(__a, __imm6)
+#define vcvtq_n_f32_u32(__a, __imm6) __arm_vcvtq_n_f32_u32(__a, __imm6)
+#define vcreateq_f16(__a, __b) __arm_vcreateq_f16(__a, __b)
+#define vcreateq_f32(__a, __b) __arm_vcreateq_f32(__a, __b)
+#define vcvtq_n_s16_f16(__a, __imm6) __arm_vcvtq_n_s16_f16(__a, __imm6)
+#define vcvtq_n_s32_f32(__a, __imm6) __arm_vcvtq_n_s32_f32(__a, __imm6)
+#define vcvtq_n_u16_f16(__a, __imm6) __arm_vcvtq_n_u16_f16(__a, __imm6)
+#define vcvtq_n_u32_f32(__a, __imm6) __arm_vcvtq_n_u32_f32(__a, __imm6)
+#define vcreateq_u8(__a, __b) __arm_vcreateq_u8(__a, __b)
+#define vcreateq_u16(__a, __b) __arm_vcreateq_u16(__a, __b)
+#define vcreateq_u32(__a, __b) __arm_vcreateq_u32(__a, __b)
+#define vcreateq_u64(__a, __b) __arm_vcreateq_u64(__a, __b)
+#define vcreateq_s8(__a, __b) __arm_vcreateq_s8(__a, __b)
+#define vcreateq_s16(__a, __b) __arm_vcreateq_s16(__a, __b)
+#define vcreateq_s32(__a, __b) __arm_vcreateq_s32(__a, __b)
+#define vcreateq_s64(__a, __b) __arm_vcreateq_s64(__a, __b)
+#define vshrq_n_s8(__a, __imm) __arm_vshrq_n_s8(__a, __imm)
+#define vshrq_n_s16(__a, __imm) __arm_vshrq_n_s16(__a, __imm)
+#define vshrq_n_s32(__a, __imm) __arm_vshrq_n_s32(__a, __imm)
+#define vshrq_n_u8(__a, __imm) __arm_vshrq_n_u8(__a, __imm)
+#define vshrq_n_u16(__a, __imm) __arm_vshrq_n_u16(__a, __imm)
+#define vshrq_n_u32(__a, __imm) __arm_vshrq_n_u32(__a, __imm)
+#define vaddlvq_p_s32(__a, __p) __arm_vaddlvq_p_s32(__a, __p)
+#define vaddlvq_p_u32(__a, __p) __arm_vaddlvq_p_u32(__a, __p)
+#define vcmpneq_s8(__a, __b) __arm_vcmpneq_s8(__a, __b)
+#define vcmpneq_s16(__a, __b) __arm_vcmpneq_s16(__a, __b)
+#define vcmpneq_s32(__a, __b) __arm_vcmpneq_s32(__a, __b)
+#define vcmpneq_u8(__a, __b) __arm_vcmpneq_u8(__a, __b)
+#define vcmpneq_u16(__a, __b) __arm_vcmpneq_u16(__a, __b)
+#define vcmpneq_u32(__a, __b) __arm_vcmpneq_u32(__a, __b)
+#define vshlq_s8(__a, __b) __arm_vshlq_s8(__a, __b)
+#define vshlq_s16(__a, __b) __arm_vshlq_s16(__a, __b)
+#define vshlq_s32(__a, __b) __arm_vshlq_s32(__a, __b)
+#define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b)
+#define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b)
+#define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b)
+#define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b)
+#define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b)
+#define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b)
+#define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b)
+#define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b)
+#define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b)
+#define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b)
+#define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b)
+#define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b)
+#define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b)
+#define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b)
+#define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b)
+#define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b)
+#define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b)
+#define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b)
+#define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b)
+#define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b)
+#define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b)
+#define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b)
+#define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b)
+#define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b)
+#define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b)
+#define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b)
+#define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b)
+#define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b)
+#define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b)
+#define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b)
+#define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b)
+#define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b)
+#define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b)
+#define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b)
+#define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b)
+#define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b)
+#define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b)
+#define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b)
+#define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
+#define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
+#define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
+#define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b)
+#define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
+#define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
+#define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
+#define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b)
+#define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b)
+#define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b)
+#define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b)
+#define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b)
+#define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b)
+#define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b)
+#define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b)
+#define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b)
+#define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b)
+#define vshlq_n_u8(__a, __imm) __arm_vshlq_n_u8(__a, __imm)
+#define vrshrq_n_u8(__a, __imm) __arm_vrshrq_n_u8(__a, __imm)
+#define vqshlq_n_u8(__a, __imm) __arm_vqshlq_n_u8(__a, __imm)
+#define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b)
+#define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b)
+#define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b)
+#define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b)
+#define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b)
+#define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b)
+#define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b)
+#define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b)
+#define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b)
+#define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b)
+#define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b)
+#define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm)
+#define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
+#define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b)
+#define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b)
+#define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
+#define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
+#define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
+#define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b)
+#define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b)
+#define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b)
+#define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b)
+#define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b)
+#define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b)
+#define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b)
+#define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b)
+#define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b)
+#define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b)
+#define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b)
+#define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b)
+#define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b)
+#define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b)
+#define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b)
+#define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
+#define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b)
+#define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b)
+#define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b)
+#define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b)
+#define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b)
+#define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b)
+#define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b)
+#define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b)
+#define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b)
+#define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b)
+#define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b)
+#define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b)
+#define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b)
+#define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b)
+#define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b)
+#define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b)
+#define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b)
+#define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b)
+#define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b)
+#define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b)
+#define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b)
+#define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b)
+#define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b)
+#define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
+#define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
+#define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
+#define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b)
+#define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
+#define vshlq_n_s8(__a, __imm) __arm_vshlq_n_s8(__a, __imm)
+#define vrshrq_n_s8(__a, __imm) __arm_vrshrq_n_s8(__a, __imm)
+#define vqshlq_n_s8(__a, __imm) __arm_vqshlq_n_s8(__a, __imm)
+#define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b)
+#define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b)
+#define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b)
+#define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b)
+#define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b)
+#define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b)
+#define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b)
+#define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b)
+#define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b)
+#define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b)
+#define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b)
+#define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b)
+#define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b)
+#define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b)
+#define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b)
+#define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b)
+#define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b)
+#define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b)
+#define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b)
+#define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b)
+#define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b)
+#define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b)
+#define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b)
+#define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b)
+#define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b)
+#define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b)
+#define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b)
+#define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b)
+#define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b)
+#define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b)
+#define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b)
+#define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b)
+#define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b)
+#define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b)
+#define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b)
+#define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
+#define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
+#define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
+#define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b)
+#define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
+#define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
+#define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
+#define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b)
+#define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b)
+#define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b)
+#define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b)
+#define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b)
+#define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b)
+#define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b)
+#define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b)
+#define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b)
+#define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b)
+#define vshlq_n_u16(__a, __imm) __arm_vshlq_n_u16(__a, __imm)
+#define vrshrq_n_u16(__a, __imm) __arm_vrshrq_n_u16(__a, __imm)
+#define vqshlq_n_u16(__a, __imm) __arm_vqshlq_n_u16(__a, __imm)
+#define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b)
+#define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b)
+#define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b)
+#define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b)
+#define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b)
+#define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b)
+#define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b)
+#define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b)
+#define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b)
+#define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b)
+#define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b)
+#define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm)
+#define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
+#define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b)
+#define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b)
+#define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
+#define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
+#define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
+#define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b)
+#define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b)
+#define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b)
+#define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b)
+#define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b)
+#define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b)
+#define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b)
+#define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b)
+#define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b)
+#define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b)
+#define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b)
+#define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b)
+#define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b)
+#define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b)
+#define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b)
+#define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
+#define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b)
+#define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b)
+#define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b)
+#define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b)
+#define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b)
+#define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b)
+#define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b)
+#define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b)
+#define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b)
+#define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b)
+#define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b)
+#define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b)
+#define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b)
+#define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b)
+#define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b)
+#define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b)
+#define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b)
+#define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b)
+#define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b)
+#define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b)
+#define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b)
+#define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b)
+#define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b)
+#define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
+#define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
+#define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
+#define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b)
+#define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
+#define vshlq_n_s16(__a, __imm) __arm_vshlq_n_s16(__a, __imm)
+#define vrshrq_n_s16(__a, __imm) __arm_vrshrq_n_s16(__a, __imm)
+#define vqshlq_n_s16(__a, __imm) __arm_vqshlq_n_s16(__a, __imm)
+#define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b)
+#define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b)
+#define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b)
+#define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b)
+#define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b)
+#define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b)
+#define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b)
+#define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b)
+#define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b)
+#define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b)
+#define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b)
+#define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b)
+#define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b)
+#define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b)
+#define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b)
+#define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b)
+#define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b)
+#define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b)
+#define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b)
+#define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b)
+#define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b)
+#define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b)
+#define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b)
+#define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b)
+#define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b)
+#define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b)
+#define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b)
+#define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b)
+#define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b)
+#define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b)
+#define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b)
+#define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b)
+#define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b)
+#define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b)
+#define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b)
+#define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
+#define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
+#define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
+#define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b)
+#define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
+#define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
+#define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
+#define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b)
+#define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b)
+#define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b)
+#define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b)
+#define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b)
+#define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b)
+#define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b)
+#define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b)
+#define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b)
+#define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b)
+#define vshlq_n_u32(__a, __imm) __arm_vshlq_n_u32(__a, __imm)
+#define vrshrq_n_u32(__a, __imm) __arm_vrshrq_n_u32(__a, __imm)
+#define vqshlq_n_u32(__a, __imm) __arm_vqshlq_n_u32(__a, __imm)
+#define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b)
+#define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b)
+#define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b)
+#define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b)
+#define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b)
+#define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b)
+#define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b)
+#define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b)
+#define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b)
+#define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b)
+#define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b)
+#define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm)
+#define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
+#define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b)
+#define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b)
+#define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
+#define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
+#define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
+#define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b)
+#define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b)
+#define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b)
+#define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b)
+#define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b)
+#define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b)
+#define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b)
+#define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b)
+#define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b)
+#define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b)
+#define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b)
+#define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b)
+#define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b)
+#define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b)
+#define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b)
+#define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
+#define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b)
+#define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b)
+#define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b)
+#define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b)
+#define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b)
+#define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b)
+#define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b)
+#define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b)
+#define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b)
+#define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b)
+#define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b)
+#define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b)
+#define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b)
+#define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b)
+#define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b)
+#define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b)
+#define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b)
+#define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b)
+#define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b)
+#define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b)
+#define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b)
+#define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b)
+#define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b)
+#define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
+#define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
+#define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
+#define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b)
+#define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
+#define vshlq_n_s32(__a, __imm) __arm_vshlq_n_s32(__a, __imm)
+#define vrshrq_n_s32(__a, __imm) __arm_vrshrq_n_s32(__a, __imm)
+#define vqshlq_n_s32(__a, __imm) __arm_vqshlq_n_s32(__a, __imm)
+#define vqmovntq_u16(__a, __b) __arm_vqmovntq_u16(__a, __b)
+#define vqmovnbq_u16(__a, __b) __arm_vqmovnbq_u16(__a, __b)
+#define vmulltq_poly_p8(__a, __b) __arm_vmulltq_poly_p8(__a, __b)
+#define vmullbq_poly_p8(__a, __b) __arm_vmullbq_poly_p8(__a, __b)
+#define vmovntq_u16(__a, __b) __arm_vmovntq_u16(__a, __b)
+#define vmovnbq_u16(__a, __b) __arm_vmovnbq_u16(__a, __b)
+#define vmlaldavq_u16(__a, __b) __arm_vmlaldavq_u16(__a, __b)
+#define vqmovuntq_s16(__a, __b) __arm_vqmovuntq_s16(__a, __b)
+#define vqmovunbq_s16(__a, __b) __arm_vqmovunbq_s16(__a, __b)
+#define vshlltq_n_u8(__a, __imm) __arm_vshlltq_n_u8(__a, __imm)
+#define vshllbq_n_u8(__a, __imm) __arm_vshllbq_n_u8(__a, __imm)
+#define vorrq_n_u16(__a, __imm) __arm_vorrq_n_u16(__a, __imm)
+#define vbicq_n_u16(__a, __imm) __arm_vbicq_n_u16(__a, __imm)
+#define vcmpneq_n_f16(__a, __b) __arm_vcmpneq_n_f16(__a, __b)
+#define vcmpneq_f16(__a, __b) __arm_vcmpneq_f16(__a, __b)
+#define vcmpltq_n_f16(__a, __b) __arm_vcmpltq_n_f16(__a, __b)
+#define vcmpltq_f16(__a, __b) __arm_vcmpltq_f16(__a, __b)
+#define vcmpleq_n_f16(__a, __b) __arm_vcmpleq_n_f16(__a, __b)
+#define vcmpleq_f16(__a, __b) __arm_vcmpleq_f16(__a, __b)
+#define vcmpgtq_n_f16(__a, __b) __arm_vcmpgtq_n_f16(__a, __b)
+#define vcmpgtq_f16(__a, __b) __arm_vcmpgtq_f16(__a, __b)
+#define vcmpgeq_n_f16(__a, __b) __arm_vcmpgeq_n_f16(__a, __b)
+#define vcmpgeq_f16(__a, __b) __arm_vcmpgeq_f16(__a, __b)
+#define vcmpeqq_n_f16(__a, __b) __arm_vcmpeqq_n_f16(__a, __b)
+#define vcmpeqq_f16(__a, __b) __arm_vcmpeqq_f16(__a, __b)
+#define vsubq_f16(__a, __b) __arm_vsubq_f16(__a, __b)
+#define vqmovntq_s16(__a, __b) __arm_vqmovntq_s16(__a, __b)
+#define vqmovnbq_s16(__a, __b) __arm_vqmovnbq_s16(__a, __b)
+#define vqdmulltq_s16(__a, __b) __arm_vqdmulltq_s16(__a, __b)
+#define vqdmulltq_n_s16(__a, __b) __arm_vqdmulltq_n_s16(__a, __b)
+#define vqdmullbq_s16(__a, __b) __arm_vqdmullbq_s16(__a, __b)
+#define vqdmullbq_n_s16(__a, __b) __arm_vqdmullbq_n_s16(__a, __b)
+#define vorrq_f16(__a, __b) __arm_vorrq_f16(__a, __b)
+#define vornq_f16(__a, __b) __arm_vornq_f16(__a, __b)
+#define vmulq_n_f16(__a, __b) __arm_vmulq_n_f16(__a, __b)
+#define vmulq_f16(__a, __b) __arm_vmulq_f16(__a, __b)
+#define vmovntq_s16(__a, __b) __arm_vmovntq_s16(__a, __b)
+#define vmovnbq_s16(__a, __b) __arm_vmovnbq_s16(__a, __b)
+#define vmlsldavxq_s16(__a, __b) __arm_vmlsldavxq_s16(__a, __b)
+#define vmlsldavq_s16(__a, __b) __arm_vmlsldavq_s16(__a, __b)
+#define vmlaldavxq_s16(__a, __b) __arm_vmlaldavxq_s16(__a, __b)
+#define vmlaldavq_s16(__a, __b) __arm_vmlaldavq_s16(__a, __b)
+#define vminnmvq_f16(__a, __b) __arm_vminnmvq_f16(__a, __b)
+#define vminnmq_f16(__a, __b) __arm_vminnmq_f16(__a, __b)
+#define vminnmavq_f16(__a, __b) __arm_vminnmavq_f16(__a, __b)
+#define vminnmaq_f16(__a, __b) __arm_vminnmaq_f16(__a, __b)
+#define vmaxnmvq_f16(__a, __b) __arm_vmaxnmvq_f16(__a, __b)
+#define vmaxnmq_f16(__a, __b) __arm_vmaxnmq_f16(__a, __b)
+#define vmaxnmavq_f16(__a, __b) __arm_vmaxnmavq_f16(__a, __b)
+#define vmaxnmaq_f16(__a, __b) __arm_vmaxnmaq_f16(__a, __b)
+#define veorq_f16(__a, __b) __arm_veorq_f16(__a, __b)
+#define vcmulq_rot90_f16(__a, __b) __arm_vcmulq_rot90_f16(__a, __b)
+#define vcmulq_rot270_f16(__a, __b) __arm_vcmulq_rot270_f16(__a, __b)
+#define vcmulq_rot180_f16(__a, __b) __arm_vcmulq_rot180_f16(__a, __b)
+#define vcmulq_f16(__a, __b) __arm_vcmulq_f16(__a, __b)
+#define vcaddq_rot90_f16(__a, __b) __arm_vcaddq_rot90_f16(__a, __b)
+#define vcaddq_rot270_f16(__a, __b) __arm_vcaddq_rot270_f16(__a, __b)
+#define vbicq_f16(__a, __b) __arm_vbicq_f16(__a, __b)
+#define vandq_f16(__a, __b) __arm_vandq_f16(__a, __b)
+#define vaddq_n_f16(__a, __b) __arm_vaddq_n_f16(__a, __b)
+#define vabdq_f16(__a, __b) __arm_vabdq_f16(__a, __b)
+#define vshlltq_n_s8(__a, __imm) __arm_vshlltq_n_s8(__a, __imm)
+#define vshllbq_n_s8(__a, __imm) __arm_vshllbq_n_s8(__a, __imm)
+#define vorrq_n_s16(__a, __imm) __arm_vorrq_n_s16(__a, __imm)
+#define vbicq_n_s16(__a, __imm) __arm_vbicq_n_s16(__a, __imm)
+#define vqmovntq_u32(__a, __b) __arm_vqmovntq_u32(__a, __b)
+#define vqmovnbq_u32(__a, __b) __arm_vqmovnbq_u32(__a, __b)
+#define vmulltq_poly_p16(__a, __b) __arm_vmulltq_poly_p16(__a, __b)
+#define vmullbq_poly_p16(__a, __b) __arm_vmullbq_poly_p16(__a, __b)
+#define vmovntq_u32(__a, __b) __arm_vmovntq_u32(__a, __b)
+#define vmovnbq_u32(__a, __b) __arm_vmovnbq_u32(__a, __b)
+#define vmlaldavq_u32(__a, __b) __arm_vmlaldavq_u32(__a, __b)
+#define vqmovuntq_s32(__a, __b) __arm_vqmovuntq_s32(__a, __b)
+#define vqmovunbq_s32(__a, __b) __arm_vqmovunbq_s32(__a, __b)
+#define vshlltq_n_u16(__a, __imm) __arm_vshlltq_n_u16(__a, __imm)
+#define vshllbq_n_u16(__a, __imm) __arm_vshllbq_n_u16(__a, __imm)
+#define vorrq_n_u32(__a, __imm) __arm_vorrq_n_u32(__a, __imm)
+#define vbicq_n_u32(__a, __imm) __arm_vbicq_n_u32(__a, __imm)
+#define vcmpneq_n_f32(__a, __b) __arm_vcmpneq_n_f32(__a, __b)
+#define vcmpneq_f32(__a, __b) __arm_vcmpneq_f32(__a, __b)
+#define vcmpltq_n_f32(__a, __b) __arm_vcmpltq_n_f32(__a, __b)
+#define vcmpltq_f32(__a, __b) __arm_vcmpltq_f32(__a, __b)
+#define vcmpleq_n_f32(__a, __b) __arm_vcmpleq_n_f32(__a, __b)
+#define vcmpleq_f32(__a, __b) __arm_vcmpleq_f32(__a, __b)
+#define vcmpgtq_n_f32(__a, __b) __arm_vcmpgtq_n_f32(__a, __b)
+#define vcmpgtq_f32(__a, __b) __arm_vcmpgtq_f32(__a, __b)
+#define vcmpgeq_n_f32(__a, __b) __arm_vcmpgeq_n_f32(__a, __b)
+#define vcmpgeq_f32(__a, __b) __arm_vcmpgeq_f32(__a, __b)
+#define vcmpeqq_n_f32(__a, __b) __arm_vcmpeqq_n_f32(__a, __b)
+#define vcmpeqq_f32(__a, __b) __arm_vcmpeqq_f32(__a, __b)
+#define vsubq_f32(__a, __b) __arm_vsubq_f32(__a, __b)
+#define vqmovntq_s32(__a, __b) __arm_vqmovntq_s32(__a, __b)
+#define vqmovnbq_s32(__a, __b) __arm_vqmovnbq_s32(__a, __b)
+#define vqdmulltq_s32(__a, __b) __arm_vqdmulltq_s32(__a, __b)
+#define vqdmulltq_n_s32(__a, __b) __arm_vqdmulltq_n_s32(__a, __b)
+#define vqdmullbq_s32(__a, __b) __arm_vqdmullbq_s32(__a, __b)
+#define vqdmullbq_n_s32(__a, __b) __arm_vqdmullbq_n_s32(__a, __b)
+#define vorrq_f32(__a, __b) __arm_vorrq_f32(__a, __b)
+#define vornq_f32(__a, __b) __arm_vornq_f32(__a, __b)
+#define vmulq_n_f32(__a, __b) __arm_vmulq_n_f32(__a, __b)
+#define vmulq_f32(__a, __b) __arm_vmulq_f32(__a, __b)
+#define vmovntq_s32(__a, __b) __arm_vmovntq_s32(__a, __b)
+#define vmovnbq_s32(__a, __b) __arm_vmovnbq_s32(__a, __b)
+#define vmlsldavxq_s32(__a, __b) __arm_vmlsldavxq_s32(__a, __b)
+#define vmlsldavq_s32(__a, __b) __arm_vmlsldavq_s32(__a, __b)
+#define vmlaldavxq_s32(__a, __b) __arm_vmlaldavxq_s32(__a, __b)
+#define vmlaldavq_s32(__a, __b) __arm_vmlaldavq_s32(__a, __b)
+#define vminnmvq_f32(__a, __b) __arm_vminnmvq_f32(__a, __b)
+#define vminnmq_f32(__a, __b) __arm_vminnmq_f32(__a, __b)
+#define vminnmavq_f32(__a, __b) __arm_vminnmavq_f32(__a, __b)
+#define vminnmaq_f32(__a, __b) __arm_vminnmaq_f32(__a, __b)
+#define vmaxnmvq_f32(__a, __b) __arm_vmaxnmvq_f32(__a, __b)
+#define vmaxnmq_f32(__a, __b) __arm_vmaxnmq_f32(__a, __b)
+#define vmaxnmavq_f32(__a, __b) __arm_vmaxnmavq_f32(__a, __b)
+#define vmaxnmaq_f32(__a, __b) __arm_vmaxnmaq_f32(__a, __b)
+#define veorq_f32(__a, __b) __arm_veorq_f32(__a, __b)
+#define vcmulq_rot90_f32(__a, __b) __arm_vcmulq_rot90_f32(__a, __b)
+#define vcmulq_rot270_f32(__a, __b) __arm_vcmulq_rot270_f32(__a, __b)
+#define vcmulq_rot180_f32(__a, __b) __arm_vcmulq_rot180_f32(__a, __b)
+#define vcmulq_f32(__a, __b) __arm_vcmulq_f32(__a, __b)
+#define vcaddq_rot90_f32(__a, __b) __arm_vcaddq_rot90_f32(__a, __b)
+#define vcaddq_rot270_f32(__a, __b) __arm_vcaddq_rot270_f32(__a, __b)
+#define vbicq_f32(__a, __b) __arm_vbicq_f32(__a, __b)
+#define vandq_f32(__a, __b) __arm_vandq_f32(__a, __b)
+#define vaddq_n_f32(__a, __b) __arm_vaddq_n_f32(__a, __b)
+#define vabdq_f32(__a, __b) __arm_vabdq_f32(__a, __b)
+#define vshlltq_n_s16(__a, __imm) __arm_vshlltq_n_s16(__a, __imm)
+#define vshllbq_n_s16(__a, __imm) __arm_vshllbq_n_s16(__a, __imm)
+#define vorrq_n_s32(__a, __imm) __arm_vorrq_n_s32(__a, __imm)
+#define vbicq_n_s32(__a, __imm) __arm_vbicq_n_s32(__a, __imm)
+#define vrmlaldavhq_u32(__a, __b) __arm_vrmlaldavhq_u32(__a, __b)
+#define vctp8q_m(__a, __p) __arm_vctp8q_m(__a, __p)
+#define vctp64q_m(__a, __p) __arm_vctp64q_m(__a, __p)
+#define vctp32q_m(__a, __p) __arm_vctp32q_m(__a, __p)
+#define vctp16q_m(__a, __p) __arm_vctp16q_m(__a, __p)
+#define vaddlvaq_u32(__a, __b) __arm_vaddlvaq_u32(__a, __b)
+#define vrmlsldavhxq_s32(__a, __b) __arm_vrmlsldavhxq_s32(__a, __b)
+#define vrmlsldavhq_s32(__a, __b) __arm_vrmlsldavhq_s32(__a, __b)
+#define vrmlaldavhxq_s32(__a, __b) __arm_vrmlaldavhxq_s32(__a, __b)
+#define vrmlaldavhq_s32(__a, __b) __arm_vrmlaldavhq_s32(__a, __b)
+#define vcvttq_f16_f32(__a, __b) __arm_vcvttq_f16_f32(__a, __b)
+#define vcvtbq_f16_f32(__a, __b) __arm_vcvtbq_f16_f32(__a, __b)
+#define vaddlvaq_s32(__a, __b) __arm_vaddlvaq_s32(__a, __b)
+#define vabavq_s8(__a, __b, __c) __arm_vabavq_s8(__a, __b, __c)
+#define vabavq_s16(__a, __b, __c) __arm_vabavq_s16(__a, __b, __c)
+#define vabavq_s32(__a, __b, __c) __arm_vabavq_s32(__a, __b, __c)
+#define vbicq_m_n_s16(__a, __imm, __p) __arm_vbicq_m_n_s16(__a, __imm, __p)
+#define vbicq_m_n_s32(__a, __imm, __p) __arm_vbicq_m_n_s32(__a, __imm, __p)
+#define vbicq_m_n_u16(__a, __imm, __p) __arm_vbicq_m_n_u16(__a, __imm, __p)
+#define vbicq_m_n_u32(__a, __imm, __p) __arm_vbicq_m_n_u32(__a, __imm, __p)
+#define vcmpeqq_m_f16(__a, __b, __p) __arm_vcmpeqq_m_f16(__a, __b, __p)
+#define vcmpeqq_m_f32(__a, __b, __p) __arm_vcmpeqq_m_f32(__a, __b, __p)
+#define vcvtaq_m_s16_f16(__inactive, __a, __p) __arm_vcvtaq_m_s16_f16(__inactive, __a, __p)
+#define vcvtaq_m_u16_f16(__inactive, __a, __p) __arm_vcvtaq_m_u16_f16(__inactive, __a, __p)
+#define vcvtaq_m_s32_f32(__inactive, __a, __p) __arm_vcvtaq_m_s32_f32(__inactive, __a, __p)
+#define vcvtaq_m_u32_f32(__inactive, __a, __p) __arm_vcvtaq_m_u32_f32(__inactive, __a, __p)
+#define vcvtq_m_f16_s16(__inactive, __a, __p) __arm_vcvtq_m_f16_s16(__inactive, __a, __p)
+#define vcvtq_m_f16_u16(__inactive, __a, __p) __arm_vcvtq_m_f16_u16(__inactive, __a, __p)
+#define vcvtq_m_f32_s32(__inactive, __a, __p) __arm_vcvtq_m_f32_s32(__inactive, __a, __p)
+#define vcvtq_m_f32_u32(__inactive, __a, __p) __arm_vcvtq_m_f32_u32(__inactive, __a, __p)
+#define vqrshrnbq_n_s16(__a, __b, __imm) __arm_vqrshrnbq_n_s16(__a, __b, __imm)
+#define vqrshrnbq_n_u16(__a, __b, __imm) __arm_vqrshrnbq_n_u16(__a, __b, __imm)
+#define vqrshrnbq_n_s32(__a, __b, __imm) __arm_vqrshrnbq_n_s32(__a, __b, __imm)
+#define vqrshrnbq_n_u32(__a, __b, __imm) __arm_vqrshrnbq_n_u32(__a, __b, __imm)
+#define vqrshrunbq_n_s16(__a, __b, __imm) __arm_vqrshrunbq_n_s16(__a, __b, __imm)
+#define vqrshrunbq_n_s32(__a, __b, __imm) __arm_vqrshrunbq_n_s32(__a, __b, __imm)
+#define vrmlaldavhaq_s32(__a, __b, __c) __arm_vrmlaldavhaq_s32(__a, __b, __c)
+#define vrmlaldavhaq_u32(__a, __b, __c) __arm_vrmlaldavhaq_u32(__a, __b, __c)
+#define vshlcq_s8(__a, __b, __imm) __arm_vshlcq_s8(__a, __b, __imm)
+#define vshlcq_u8(__a, __b, __imm) __arm_vshlcq_u8(__a, __b, __imm)
+#define vshlcq_s16(__a, __b, __imm) __arm_vshlcq_s16(__a, __b, __imm)
+#define vshlcq_u16(__a, __b, __imm) __arm_vshlcq_u16(__a, __b, __imm)
+#define vshlcq_s32(__a, __b, __imm) __arm_vshlcq_s32(__a, __b, __imm)
+#define vshlcq_u32(__a, __b, __imm) __arm_vshlcq_u32(__a, __b, __imm)
+#define vabavq_u8(__a, __b, __c) __arm_vabavq_u8(__a, __b, __c)
+#define vabavq_u16(__a, __b, __c) __arm_vabavq_u16(__a, __b, __c)
+#define vabavq_u32(__a, __b, __c) __arm_vabavq_u32(__a, __b, __c)
+#define vpselq_u8(__a, __b, __p) __arm_vpselq_u8(__a, __b, __p)
+#define vpselq_s8(__a, __b, __p) __arm_vpselq_s8(__a, __b, __p)
+#define vrev64q_m_u8(__inactive, __a, __p) __arm_vrev64q_m_u8(__inactive, __a, __p)
+#define vmvnq_m_u8(__inactive, __a, __p) __arm_vmvnq_m_u8(__inactive, __a, __p)
+#define vmlasq_n_u8(__a, __b, __c) __arm_vmlasq_n_u8(__a, __b, __c)
+#define vmlaq_n_u8(__a, __b, __c) __arm_vmlaq_n_u8(__a, __b, __c)
+#define vmladavq_p_u8(__a, __b, __p) __arm_vmladavq_p_u8(__a, __b, __p)
+#define vmladavaq_u8(__a, __b, __c) __arm_vmladavaq_u8(__a, __b, __c)
+#define vminvq_p_u8(__a, __b, __p) __arm_vminvq_p_u8(__a, __b, __p)
+#define vmaxvq_p_u8(__a, __b, __p) __arm_vmaxvq_p_u8(__a, __b, __p)
+#define vdupq_m_n_u8(__inactive, __a, __p) __arm_vdupq_m_n_u8(__inactive, __a, __p)
+#define vcmpneq_m_u8(__a, __b, __p) __arm_vcmpneq_m_u8(__a, __b, __p)
+#define vcmpneq_m_n_u8(__a, __b, __p) __arm_vcmpneq_m_n_u8(__a, __b, __p)
+#define vcmphiq_m_u8(__a, __b, __p) __arm_vcmphiq_m_u8(__a, __b, __p)
+#define vcmphiq_m_n_u8(__a, __b, __p) __arm_vcmphiq_m_n_u8(__a, __b, __p)
+#define vcmpeqq_m_u8(__a, __b, __p) __arm_vcmpeqq_m_u8(__a, __b, __p)
+#define vcmpeqq_m_n_u8(__a, __b, __p) __arm_vcmpeqq_m_n_u8(__a, __b, __p)
+#define vcmpcsq_m_u8(__a, __b, __p) __arm_vcmpcsq_m_u8(__a, __b, __p)
+#define vcmpcsq_m_n_u8(__a, __b, __p) __arm_vcmpcsq_m_n_u8(__a, __b, __p)
+#define vclzq_m_u8(__inactive, __a, __p) __arm_vclzq_m_u8(__inactive, __a, __p)
+#define vaddvaq_p_u8(__a, __b, __p) __arm_vaddvaq_p_u8(__a, __b, __p)
+#define vsriq_n_u8(__a, __b, __imm) __arm_vsriq_n_u8(__a, __b, __imm)
+#define vsliq_n_u8(__a, __b, __imm) __arm_vsliq_n_u8(__a, __b, __imm)
+#define vshlq_m_r_u8(__a, __b, __p) __arm_vshlq_m_r_u8(__a, __b, __p)
+#define vrshlq_m_n_u8(__a, __b, __p) __arm_vrshlq_m_n_u8(__a, __b, __p)
+#define vqshlq_m_r_u8(__a, __b, __p) __arm_vqshlq_m_r_u8(__a, __b, __p)
+#define vqrshlq_m_n_u8(__a, __b, __p) __arm_vqrshlq_m_n_u8(__a, __b, __p)
+#define vminavq_p_s8(__a, __b, __p) __arm_vminavq_p_s8(__a, __b, __p)
+#define vminaq_m_s8(__a, __b, __p) __arm_vminaq_m_s8(__a, __b, __p)
+#define vmaxavq_p_s8(__a, __b, __p) __arm_vmaxavq_p_s8(__a, __b, __p)
+#define vmaxaq_m_s8(__a, __b, __p) __arm_vmaxaq_m_s8(__a, __b, __p)
+#define vcmpneq_m_s8(__a, __b, __p) __arm_vcmpneq_m_s8(__a, __b, __p)
+#define vcmpneq_m_n_s8(__a, __b, __p) __arm_vcmpneq_m_n_s8(__a, __b, __p)
+#define vcmpltq_m_s8(__a, __b, __p) __arm_vcmpltq_m_s8(__a, __b, __p)
+#define vcmpltq_m_n_s8(__a, __b, __p) __arm_vcmpltq_m_n_s8(__a, __b, __p)
+#define vcmpleq_m_s8(__a, __b, __p) __arm_vcmpleq_m_s8(__a, __b, __p)
+#define vcmpleq_m_n_s8(__a, __b, __p) __arm_vcmpleq_m_n_s8(__a, __b, __p)
+#define vcmpgtq_m_s8(__a, __b, __p) __arm_vcmpgtq_m_s8(__a, __b, __p)
+#define vcmpgtq_m_n_s8(__a, __b, __p) __arm_vcmpgtq_m_n_s8(__a, __b, __p)
+#define vcmpgeq_m_s8(__a, __b, __p) __arm_vcmpgeq_m_s8(__a, __b, __p)
+#define vcmpgeq_m_n_s8(__a, __b, __p) __arm_vcmpgeq_m_n_s8(__a, __b, __p)
+#define vcmpeqq_m_s8(__a, __b, __p) __arm_vcmpeqq_m_s8(__a, __b, __p)
+#define vcmpeqq_m_n_s8(__a, __b, __p) __arm_vcmpeqq_m_n_s8(__a, __b, __p)
+#define vshlq_m_r_s8(__a, __b, __p) __arm_vshlq_m_r_s8(__a, __b, __p)
+#define vrshlq_m_n_s8(__a, __b, __p) __arm_vrshlq_m_n_s8(__a, __b, __p)
+#define vrev64q_m_s8(__inactive, __a, __p) __arm_vrev64q_m_s8(__inactive, __a, __p)
+#define vqshlq_m_r_s8(__a, __b, __p) __arm_vqshlq_m_r_s8(__a, __b, __p)
+#define vqrshlq_m_n_s8(__a, __b, __p) __arm_vqrshlq_m_n_s8(__a, __b, __p)
+#define vqnegq_m_s8(__inactive, __a, __p) __arm_vqnegq_m_s8(__inactive, __a, __p)
+#define vqabsq_m_s8(__inactive, __a, __p) __arm_vqabsq_m_s8(__inactive, __a, __p)
+#define vnegq_m_s8(__inactive, __a, __p) __arm_vnegq_m_s8(__inactive, __a, __p)
+#define vmvnq_m_s8(__inactive, __a, __p) __arm_vmvnq_m_s8(__inactive, __a, __p)
+#define vmlsdavxq_p_s8(__a, __b, __p) __arm_vmlsdavxq_p_s8(__a, __b, __p)
+#define vmlsdavq_p_s8(__a, __b, __p) __arm_vmlsdavq_p_s8(__a, __b, __p)
+#define vmladavxq_p_s8(__a, __b, __p) __arm_vmladavxq_p_s8(__a, __b, __p)
+#define vmladavq_p_s8(__a, __b, __p) __arm_vmladavq_p_s8(__a, __b, __p)
+#define vminvq_p_s8(__a, __b, __p) __arm_vminvq_p_s8(__a, __b, __p)
+#define vmaxvq_p_s8(__a, __b, __p) __arm_vmaxvq_p_s8(__a, __b, __p)
+#define vdupq_m_n_s8(__inactive, __a, __p) __arm_vdupq_m_n_s8(__inactive, __a, __p)
+#define vclzq_m_s8(__inactive, __a, __p) __arm_vclzq_m_s8(__inactive, __a, __p)
+#define vclsq_m_s8(__inactive, __a, __p) __arm_vclsq_m_s8(__inactive, __a, __p)
+#define vaddvaq_p_s8(__a, __b, __p) __arm_vaddvaq_p_s8(__a, __b, __p)
+#define vabsq_m_s8(__inactive, __a, __p) __arm_vabsq_m_s8(__inactive, __a, __p)
+#define vqrdmlsdhxq_s8(__inactive, __a, __b) __arm_vqrdmlsdhxq_s8(__inactive, __a, __b)
+#define vqrdmlsdhq_s8(__inactive, __a, __b) __arm_vqrdmlsdhq_s8(__inactive, __a, __b)
+#define vqrdmlashq_n_s8(__a, __b, __c) __arm_vqrdmlashq_n_s8(__a, __b, __c)
+#define vqrdmlahq_n_s8(__a, __b, __c) __arm_vqrdmlahq_n_s8(__a, __b, __c)
+#define vqrdmladhxq_s8(__inactive, __a, __b) __arm_vqrdmladhxq_s8(__inactive, __a, __b)
+#define vqrdmladhq_s8(__inactive, __a, __b) __arm_vqrdmladhq_s8(__inactive, __a, __b)
+#define vqdmlsdhxq_s8(__inactive, __a, __b) __arm_vqdmlsdhxq_s8(__inactive, __a, __b)
+#define vqdmlsdhq_s8(__inactive, __a, __b) __arm_vqdmlsdhq_s8(__inactive, __a, __b)
+#define vqdmlahq_n_s8(__a, __b, __c) __arm_vqdmlahq_n_s8(__a, __b, __c)
+#define vqdmlashq_n_s8(__a, __b, __c) __arm_vqdmlashq_n_s8(__a, __b, __c)
+#define vqdmladhxq_s8(__inactive, __a, __b) __arm_vqdmladhxq_s8(__inactive, __a, __b)
+#define vqdmladhq_s8(__inactive, __a, __b) __arm_vqdmladhq_s8(__inactive, __a, __b)
+#define vmlsdavaxq_s8(__a, __b, __c) __arm_vmlsdavaxq_s8(__a, __b, __c)
+#define vmlsdavaq_s8(__a, __b, __c) __arm_vmlsdavaq_s8(__a, __b, __c)
+#define vmlasq_n_s8(__a, __b, __c) __arm_vmlasq_n_s8(__a, __b, __c)
+#define vmlaq_n_s8(__a, __b, __c) __arm_vmlaq_n_s8(__a, __b, __c)
+#define vmladavaxq_s8(__a, __b, __c) __arm_vmladavaxq_s8(__a, __b, __c)
+#define vmladavaq_s8(__a, __b, __c) __arm_vmladavaq_s8(__a, __b, __c)
+#define vsriq_n_s8(__a, __b, __imm) __arm_vsriq_n_s8(__a, __b, __imm)
+#define vsliq_n_s8(__a, __b, __imm) __arm_vsliq_n_s8(__a, __b, __imm)
+#define vpselq_u16(__a, __b, __p) __arm_vpselq_u16(__a, __b, __p)
+#define vpselq_s16(__a, __b, __p) __arm_vpselq_s16(__a, __b, __p)
+#define vrev64q_m_u16(__inactive, __a, __p) __arm_vrev64q_m_u16(__inactive, __a, __p)
+#define vmvnq_m_u16(__inactive, __a, __p) __arm_vmvnq_m_u16(__inactive, __a, __p)
+#define vmlasq_n_u16(__a, __b, __c) __arm_vmlasq_n_u16(__a, __b, __c)
+#define vmlaq_n_u16(__a, __b, __c) __arm_vmlaq_n_u16(__a, __b, __c)
+#define vmladavq_p_u16(__a, __b, __p) __arm_vmladavq_p_u16(__a, __b, __p)
+#define vmladavaq_u16(__a, __b, __c) __arm_vmladavaq_u16(__a, __b, __c)
+#define vminvq_p_u16(__a, __b, __p) __arm_vminvq_p_u16(__a, __b, __p)
+#define vmaxvq_p_u16(__a, __b, __p) __arm_vmaxvq_p_u16(__a, __b, __p)
+#define vdupq_m_n_u16(__inactive, __a, __p) __arm_vdupq_m_n_u16(__inactive, __a, __p)
+#define vcmpneq_m_u16(__a, __b, __p) __arm_vcmpneq_m_u16(__a, __b, __p)
+#define vcmpneq_m_n_u16(__a, __b, __p) __arm_vcmpneq_m_n_u16(__a, __b, __p)
+#define vcmphiq_m_u16(__a, __b, __p) __arm_vcmphiq_m_u16(__a, __b, __p)
+#define vcmphiq_m_n_u16(__a, __b, __p) __arm_vcmphiq_m_n_u16(__a, __b, __p)
+#define vcmpeqq_m_u16(__a, __b, __p) __arm_vcmpeqq_m_u16(__a, __b, __p)
+#define vcmpeqq_m_n_u16(__a, __b, __p) __arm_vcmpeqq_m_n_u16(__a, __b, __p)
+#define vcmpcsq_m_u16(__a, __b, __p) __arm_vcmpcsq_m_u16(__a, __b, __p)
+#define vcmpcsq_m_n_u16(__a, __b, __p) __arm_vcmpcsq_m_n_u16(__a, __b, __p)
+#define vclzq_m_u16(__inactive, __a, __p) __arm_vclzq_m_u16(__inactive, __a, __p)
+#define vaddvaq_p_u16(__a, __b, __p) __arm_vaddvaq_p_u16(__a, __b, __p)
+#define vsriq_n_u16(__a, __b, __imm) __arm_vsriq_n_u16(__a, __b, __imm)
+#define vsliq_n_u16(__a, __b, __imm) __arm_vsliq_n_u16(__a, __b, __imm)
+#define vshlq_m_r_u16(__a, __b, __p) __arm_vshlq_m_r_u16(__a, __b, __p)
+#define vrshlq_m_n_u16(__a, __b, __p) __arm_vrshlq_m_n_u16(__a, __b, __p)
+#define vqshlq_m_r_u16(__a, __b, __p) __arm_vqshlq_m_r_u16(__a, __b, __p)
+#define vqrshlq_m_n_u16(__a, __b, __p) __arm_vqrshlq_m_n_u16(__a, __b, __p)
+#define vminavq_p_s16(__a, __b, __p) __arm_vminavq_p_s16(__a, __b, __p)
+#define vminaq_m_s16(__a, __b, __p) __arm_vminaq_m_s16(__a, __b, __p)
+#define vmaxavq_p_s16(__a, __b, __p) __arm_vmaxavq_p_s16(__a, __b, __p)
+#define vmaxaq_m_s16(__a, __b, __p) __arm_vmaxaq_m_s16(__a, __b, __p)
+#define vcmpneq_m_s16(__a, __b, __p) __arm_vcmpneq_m_s16(__a, __b, __p)
+#define vcmpneq_m_n_s16(__a, __b, __p) __arm_vcmpneq_m_n_s16(__a, __b, __p)
+#define vcmpltq_m_s16(__a, __b, __p) __arm_vcmpltq_m_s16(__a, __b, __p)
+#define vcmpltq_m_n_s16(__a, __b, __p) __arm_vcmpltq_m_n_s16(__a, __b, __p)
+#define vcmpleq_m_s16(__a, __b, __p) __arm_vcmpleq_m_s16(__a, __b, __p)
+#define vcmpleq_m_n_s16(__a, __b, __p) __arm_vcmpleq_m_n_s16(__a, __b, __p)
+#define vcmpgtq_m_s16(__a, __b, __p) __arm_vcmpgtq_m_s16(__a, __b, __p)
+#define vcmpgtq_m_n_s16(__a, __b, __p) __arm_vcmpgtq_m_n_s16(__a, __b, __p)
+#define vcmpgeq_m_s16(__a, __b, __p) __arm_vcmpgeq_m_s16(__a, __b, __p)
+#define vcmpgeq_m_n_s16(__a, __b, __p) __arm_vcmpgeq_m_n_s16(__a, __b, __p)
+#define vcmpeqq_m_s16(__a, __b, __p) __arm_vcmpeqq_m_s16(__a, __b, __p)
+#define vcmpeqq_m_n_s16(__a, __b, __p) __arm_vcmpeqq_m_n_s16(__a, __b, __p)
+#define vshlq_m_r_s16(__a, __b, __p) __arm_vshlq_m_r_s16(__a, __b, __p)
+#define vrshlq_m_n_s16(__a, __b, __p) __arm_vrshlq_m_n_s16(__a, __b, __p)
+#define vrev64q_m_s16(__inactive, __a, __p) __arm_vrev64q_m_s16(__inactive, __a, __p)
+#define vqshlq_m_r_s16(__a, __b, __p) __arm_vqshlq_m_r_s16(__a, __b, __p)
+#define vqrshlq_m_n_s16(__a, __b, __p) __arm_vqrshlq_m_n_s16(__a, __b, __p)
+#define vqnegq_m_s16(__inactive, __a, __p) __arm_vqnegq_m_s16(__inactive, __a, __p)
+#define vqabsq_m_s16(__inactive, __a, __p) __arm_vqabsq_m_s16(__inactive, __a, __p)
+#define vnegq_m_s16(__inactive, __a, __p) __arm_vnegq_m_s16(__inactive, __a, __p)
+#define vmvnq_m_s16(__inactive, __a, __p) __arm_vmvnq_m_s16(__inactive, __a, __p)
+#define vmlsdavxq_p_s16(__a, __b, __p) __arm_vmlsdavxq_p_s16(__a, __b, __p)
+#define vmlsdavq_p_s16(__a, __b, __p) __arm_vmlsdavq_p_s16(__a, __b, __p)
+#define vmladavxq_p_s16(__a, __b, __p) __arm_vmladavxq_p_s16(__a, __b, __p)
+#define vmladavq_p_s16(__a, __b, __p) __arm_vmladavq_p_s16(__a, __b, __p)
+#define vminvq_p_s16(__a, __b, __p) __arm_vminvq_p_s16(__a, __b, __p)
+#define vmaxvq_p_s16(__a, __b, __p) __arm_vmaxvq_p_s16(__a, __b, __p)
+#define vdupq_m_n_s16(__inactive, __a, __p) __arm_vdupq_m_n_s16(__inactive, __a, __p)
+#define vclzq_m_s16(__inactive, __a, __p) __arm_vclzq_m_s16(__inactive, __a, __p)
+#define vclsq_m_s16(__inactive, __a, __p) __arm_vclsq_m_s16(__inactive, __a, __p)
+#define vaddvaq_p_s16(__a, __b, __p) __arm_vaddvaq_p_s16(__a, __b, __p)
+#define vabsq_m_s16(__inactive, __a, __p) __arm_vabsq_m_s16(__inactive, __a, __p)
+#define vqrdmlsdhxq_s16(__inactive, __a, __b) __arm_vqrdmlsdhxq_s16(__inactive, __a, __b)
+#define vqrdmlsdhq_s16(__inactive, __a, __b) __arm_vqrdmlsdhq_s16(__inactive, __a, __b)
+#define vqrdmlashq_n_s16(__a, __b, __c) __arm_vqrdmlashq_n_s16(__a, __b, __c)
+#define vqrdmlahq_n_s16(__a, __b, __c) __arm_vqrdmlahq_n_s16(__a, __b, __c)
+#define vqrdmladhxq_s16(__inactive, __a, __b) __arm_vqrdmladhxq_s16(__inactive, __a, __b)
+#define vqrdmladhq_s16(__inactive, __a, __b) __arm_vqrdmladhq_s16(__inactive, __a, __b)
+#define vqdmlsdhxq_s16(__inactive, __a, __b) __arm_vqdmlsdhxq_s16(__inactive, __a, __b)
+#define vqdmlsdhq_s16(__inactive, __a, __b) __arm_vqdmlsdhq_s16(__inactive, __a, __b)
+#define vqdmlashq_n_s16(__a, __b, __c) __arm_vqdmlashq_n_s16(__a, __b, __c)
+#define vqdmlahq_n_s16(__a, __b, __c) __arm_vqdmlahq_n_s16(__a, __b, __c)
+#define vqdmladhxq_s16(__inactive, __a, __b) __arm_vqdmladhxq_s16(__inactive, __a, __b)
+#define vqdmladhq_s16(__inactive, __a, __b) __arm_vqdmladhq_s16(__inactive, __a, __b)
+#define vmlsdavaxq_s16(__a, __b, __c) __arm_vmlsdavaxq_s16(__a, __b, __c)
+#define vmlsdavaq_s16(__a, __b, __c) __arm_vmlsdavaq_s16(__a, __b, __c)
+#define vmlasq_n_s16(__a, __b, __c) __arm_vmlasq_n_s16(__a, __b, __c)
+#define vmlaq_n_s16(__a, __b, __c) __arm_vmlaq_n_s16(__a, __b, __c)
+#define vmladavaxq_s16(__a, __b, __c) __arm_vmladavaxq_s16(__a, __b, __c)
+#define vmladavaq_s16(__a, __b, __c) __arm_vmladavaq_s16(__a, __b, __c)
+#define vsriq_n_s16(__a, __b, __imm) __arm_vsriq_n_s16(__a, __b, __imm)
+#define vsliq_n_s16(__a, __b, __imm) __arm_vsliq_n_s16(__a, __b, __imm)
+#define vpselq_u32(__a, __b, __p) __arm_vpselq_u32(__a, __b, __p)
+#define vpselq_s32(__a, __b, __p) __arm_vpselq_s32(__a, __b, __p)
+#define vrev64q_m_u32(__inactive, __a, __p) __arm_vrev64q_m_u32(__inactive, __a, __p)
+#define vmvnq_m_u32(__inactive, __a, __p) __arm_vmvnq_m_u32(__inactive, __a, __p)
+#define vmlasq_n_u32(__a, __b, __c) __arm_vmlasq_n_u32(__a, __b, __c)
+#define vmlaq_n_u32(__a, __b, __c) __arm_vmlaq_n_u32(__a, __b, __c)
+#define vmladavq_p_u32(__a, __b, __p) __arm_vmladavq_p_u32(__a, __b, __p)
+#define vmladavaq_u32(__a, __b, __c) __arm_vmladavaq_u32(__a, __b, __c)
+#define vminvq_p_u32(__a, __b, __p) __arm_vminvq_p_u32(__a, __b, __p)
+#define vmaxvq_p_u32(__a, __b, __p) __arm_vmaxvq_p_u32(__a, __b, __p)
+#define vdupq_m_n_u32(__inactive, __a, __p) __arm_vdupq_m_n_u32(__inactive, __a, __p)
+#define vcmpneq_m_u32(__a, __b, __p) __arm_vcmpneq_m_u32(__a, __b, __p)
+#define vcmpneq_m_n_u32(__a, __b, __p) __arm_vcmpneq_m_n_u32(__a, __b, __p)
+#define vcmphiq_m_u32(__a, __b, __p) __arm_vcmphiq_m_u32(__a, __b, __p)
+#define vcmphiq_m_n_u32(__a, __b, __p) __arm_vcmphiq_m_n_u32(__a, __b, __p)
+#define vcmpeqq_m_u32(__a, __b, __p) __arm_vcmpeqq_m_u32(__a, __b, __p)
+#define vcmpeqq_m_n_u32(__a, __b, __p) __arm_vcmpeqq_m_n_u32(__a, __b, __p)
+#define vcmpcsq_m_u32(__a, __b, __p) __arm_vcmpcsq_m_u32(__a, __b, __p)
+#define vcmpcsq_m_n_u32(__a, __b, __p) __arm_vcmpcsq_m_n_u32(__a, __b, __p)
+#define vclzq_m_u32(__inactive, __a, __p) __arm_vclzq_m_u32(__inactive, __a, __p)
+#define vaddvaq_p_u32(__a, __b, __p) __arm_vaddvaq_p_u32(__a, __b, __p)
+#define vsriq_n_u32(__a, __b, __imm) __arm_vsriq_n_u32(__a, __b, __imm)
+#define vsliq_n_u32(__a, __b, __imm) __arm_vsliq_n_u32(__a, __b, __imm)
+#define vshlq_m_r_u32(__a, __b, __p) __arm_vshlq_m_r_u32(__a, __b, __p)
+#define vrshlq_m_n_u32(__a, __b, __p) __arm_vrshlq_m_n_u32(__a, __b, __p)
+#define vqshlq_m_r_u32(__a, __b, __p) __arm_vqshlq_m_r_u32(__a, __b, __p)
+#define vqrshlq_m_n_u32(__a, __b, __p) __arm_vqrshlq_m_n_u32(__a, __b, __p)
+#define vminavq_p_s32(__a, __b, __p) __arm_vminavq_p_s32(__a, __b, __p)
+#define vminaq_m_s32(__a, __b, __p) __arm_vminaq_m_s32(__a, __b, __p)
+#define vmaxavq_p_s32(__a, __b, __p) __arm_vmaxavq_p_s32(__a, __b, __p)
+#define vmaxaq_m_s32(__a, __b, __p) __arm_vmaxaq_m_s32(__a, __b, __p)
+#define vcmpneq_m_s32(__a, __b, __p) __arm_vcmpneq_m_s32(__a, __b, __p)
+#define vcmpneq_m_n_s32(__a, __b, __p) __arm_vcmpneq_m_n_s32(__a, __b, __p)
+#define vcmpltq_m_s32(__a, __b, __p) __arm_vcmpltq_m_s32(__a, __b, __p)
+#define vcmpltq_m_n_s32(__a, __b, __p) __arm_vcmpltq_m_n_s32(__a, __b, __p)
+#define vcmpleq_m_s32(__a, __b, __p) __arm_vcmpleq_m_s32(__a, __b, __p)
+#define vcmpleq_m_n_s32(__a, __b, __p) __arm_vcmpleq_m_n_s32(__a, __b, __p)
+#define vcmpgtq_m_s32(__a, __b, __p) __arm_vcmpgtq_m_s32(__a, __b, __p)
+#define vcmpgtq_m_n_s32(__a, __b, __p) __arm_vcmpgtq_m_n_s32(__a, __b, __p)
+#define vcmpgeq_m_s32(__a, __b, __p) __arm_vcmpgeq_m_s32(__a, __b, __p)
+#define vcmpgeq_m_n_s32(__a, __b, __p) __arm_vcmpgeq_m_n_s32(__a, __b, __p)
+#define vcmpeqq_m_s32(__a, __b, __p) __arm_vcmpeqq_m_s32(__a, __b, __p)
+#define vcmpeqq_m_n_s32(__a, __b, __p) __arm_vcmpeqq_m_n_s32(__a, __b, __p)
+#define vshlq_m_r_s32(__a, __b, __p) __arm_vshlq_m_r_s32(__a, __b, __p)
+#define vrshlq_m_n_s32(__a, __b, __p) __arm_vrshlq_m_n_s32(__a, __b, __p)
+#define vrev64q_m_s32(__inactive, __a, __p) __arm_vrev64q_m_s32(__inactive, __a, __p)
+#define vqshlq_m_r_s32(__a, __b, __p) __arm_vqshlq_m_r_s32(__a, __b, __p)
+#define vqrshlq_m_n_s32(__a, __b, __p) __arm_vqrshlq_m_n_s32(__a, __b, __p)
+#define vqnegq_m_s32(__inactive, __a, __p) __arm_vqnegq_m_s32(__inactive, __a, __p)
+#define vqabsq_m_s32(__inactive, __a, __p) __arm_vqabsq_m_s32(__inactive, __a, __p)
+#define vnegq_m_s32(__inactive, __a, __p) __arm_vnegq_m_s32(__inactive, __a, __p)
+#define vmvnq_m_s32(__inactive, __a, __p) __arm_vmvnq_m_s32(__inactive, __a, __p)
+#define vmlsdavxq_p_s32(__a, __b, __p) __arm_vmlsdavxq_p_s32(__a, __b, __p)
+#define vmlsdavq_p_s32(__a, __b, __p) __arm_vmlsdavq_p_s32(__a, __b, __p)
+#define vmladavxq_p_s32(__a, __b, __p) __arm_vmladavxq_p_s32(__a, __b, __p)
+#define vmladavq_p_s32(__a, __b, __p) __arm_vmladavq_p_s32(__a, __b, __p)
+#define vminvq_p_s32(__a, __b, __p) __arm_vminvq_p_s32(__a, __b, __p)
+#define vmaxvq_p_s32(__a, __b, __p) __arm_vmaxvq_p_s32(__a, __b, __p)
+#define vdupq_m_n_s32(__inactive, __a, __p) __arm_vdupq_m_n_s32(__inactive, __a, __p)
+#define vclzq_m_s32(__inactive, __a, __p) __arm_vclzq_m_s32(__inactive, __a, __p)
+#define vclsq_m_s32(__inactive, __a, __p) __arm_vclsq_m_s32(__inactive, __a, __p)
+#define vaddvaq_p_s32(__a, __b, __p) __arm_vaddvaq_p_s32(__a, __b, __p)
+#define vabsq_m_s32(__inactive, __a, __p) __arm_vabsq_m_s32(__inactive, __a, __p)
+#define vqrdmlsdhxq_s32(__inactive, __a, __b) __arm_vqrdmlsdhxq_s32(__inactive, __a, __b)
+#define vqrdmlsdhq_s32(__inactive, __a, __b) __arm_vqrdmlsdhq_s32(__inactive, __a, __b)
+#define vqrdmlashq_n_s32(__a, __b, __c) __arm_vqrdmlashq_n_s32(__a, __b, __c)
+#define vqrdmlahq_n_s32(__a, __b, __c) __arm_vqrdmlahq_n_s32(__a, __b, __c)
+#define vqrdmladhxq_s32(__inactive, __a, __b) __arm_vqrdmladhxq_s32(__inactive, __a, __b)
+#define vqrdmladhq_s32(__inactive, __a, __b) __arm_vqrdmladhq_s32(__inactive, __a, __b)
+#define vqdmlsdhxq_s32(__inactive, __a, __b) __arm_vqdmlsdhxq_s32(__inactive, __a, __b)
+#define vqdmlsdhq_s32(__inactive, __a, __b) __arm_vqdmlsdhq_s32(__inactive, __a, __b)
+#define vqdmlashq_n_s32(__a, __b, __c) __arm_vqdmlashq_n_s32(__a, __b, __c)
+#define vqdmlahq_n_s32(__a, __b, __c) __arm_vqdmlahq_n_s32(__a, __b, __c)
+#define vqdmladhxq_s32(__inactive, __a, __b) __arm_vqdmladhxq_s32(__inactive, __a, __b)
+#define vqdmladhq_s32(__inactive, __a, __b) __arm_vqdmladhq_s32(__inactive, __a, __b)
+#define vmlsdavaxq_s32(__a, __b, __c) __arm_vmlsdavaxq_s32(__a, __b, __c)
+#define vmlsdavaq_s32(__a, __b, __c) __arm_vmlsdavaq_s32(__a, __b, __c)
+#define vmlasq_n_s32(__a, __b, __c) __arm_vmlasq_n_s32(__a, __b, __c)
+#define vmlaq_n_s32(__a, __b, __c) __arm_vmlaq_n_s32(__a, __b, __c)
+#define vmladavaxq_s32(__a, __b, __c) __arm_vmladavaxq_s32(__a, __b, __c)
+#define vmladavaq_s32(__a, __b, __c) __arm_vmladavaq_s32(__a, __b, __c)
+#define vsriq_n_s32(__a, __b, __imm) __arm_vsriq_n_s32(__a, __b, __imm)
+#define vsliq_n_s32(__a, __b, __imm) __arm_vsliq_n_s32(__a, __b, __imm)
+#define vpselq_u64(__a, __b, __p) __arm_vpselq_u64(__a, __b, __p)
+#define vpselq_s64(__a, __b, __p) __arm_vpselq_s64(__a, __b, __p)
+#define vrmlaldavhaxq_s32(__a, __b, __c) __arm_vrmlaldavhaxq_s32(__a, __b, __c)
+#define vrmlsldavhaq_s32(__a, __b, __c) __arm_vrmlsldavhaq_s32(__a, __b, __c)
+#define vrmlsldavhaxq_s32(__a, __b, __c) __arm_vrmlsldavhaxq_s32(__a, __b, __c)
+#define vaddlvaq_p_s32(__a, __b, __p) __arm_vaddlvaq_p_s32(__a, __b, __p)
+#define vcvtbq_m_f16_f32(__a, __b, __p) __arm_vcvtbq_m_f16_f32(__a, __b, __p)
+#define vcvtbq_m_f32_f16(__inactive, __a, __p) __arm_vcvtbq_m_f32_f16(__inactive, __a, __p)
+#define vcvttq_m_f16_f32(__a, __b, __p) __arm_vcvttq_m_f16_f32(__a, __b, __p)
+#define vcvttq_m_f32_f16(__inactive, __a, __p) __arm_vcvttq_m_f32_f16(__inactive, __a, __p)
+#define vrev16q_m_s8(__inactive, __a, __p) __arm_vrev16q_m_s8(__inactive, __a, __p)
+#define vrev32q_m_f16(__inactive, __a, __p) __arm_vrev32q_m_f16(__inactive, __a, __p)
+#define vrmlaldavhq_p_s32(__a, __b, __p) __arm_vrmlaldavhq_p_s32(__a, __b, __p)
+#define vrmlaldavhxq_p_s32(__a, __b, __p) __arm_vrmlaldavhxq_p_s32(__a, __b, __p)
+#define vrmlsldavhq_p_s32(__a, __b, __p) __arm_vrmlsldavhq_p_s32(__a, __b, __p)
+#define vrmlsldavhxq_p_s32(__a, __b, __p) __arm_vrmlsldavhxq_p_s32(__a, __b, __p)
+#define vaddlvaq_p_u32(__a, __b, __p) __arm_vaddlvaq_p_u32(__a, __b, __p)
+#define vrev16q_m_u8(__inactive, __a, __p) __arm_vrev16q_m_u8(__inactive, __a, __p)
+#define vrmlaldavhq_p_u32(__a, __b, __p) __arm_vrmlaldavhq_p_u32(__a, __b, __p)
+#define vmvnq_m_n_s16(__inactive, __imm, __p) __arm_vmvnq_m_n_s16(__inactive, __imm, __p)
+#define vorrq_m_n_s16(__a, __imm, __p) __arm_vorrq_m_n_s16(__a, __imm, __p)
+#define vqrshrntq_n_s16(__a, __b, __imm) __arm_vqrshrntq_n_s16(__a, __b, __imm)
+#define vqshrnbq_n_s16(__a, __b, __imm) __arm_vqshrnbq_n_s16(__a, __b, __imm)
+#define vqshrntq_n_s16(__a, __b, __imm) __arm_vqshrntq_n_s16(__a, __b, __imm)
+#define vrshrnbq_n_s16(__a, __b, __imm) __arm_vrshrnbq_n_s16(__a, __b, __imm)
+#define vrshrntq_n_s16(__a, __b, __imm) __arm_vrshrntq_n_s16(__a, __b, __imm)
+#define vshrnbq_n_s16(__a, __b, __imm) __arm_vshrnbq_n_s16(__a, __b, __imm)
+#define vshrntq_n_s16(__a, __b, __imm) __arm_vshrntq_n_s16(__a, __b, __imm)
+#define vcmlaq_f16(__a, __b, __c) __arm_vcmlaq_f16(__a, __b, __c)
+#define vcmlaq_rot180_f16(__a, __b, __c) __arm_vcmlaq_rot180_f16(__a, __b, __c)
+#define vcmlaq_rot270_f16(__a, __b, __c) __arm_vcmlaq_rot270_f16(__a, __b, __c)
+#define vcmlaq_rot90_f16(__a, __b, __c) __arm_vcmlaq_rot90_f16(__a, __b, __c)
+#define vfmaq_f16(__a, __b, __c) __arm_vfmaq_f16(__a, __b, __c)
+#define vfmaq_n_f16(__a, __b, __c) __arm_vfmaq_n_f16(__a, __b, __c)
+#define vfmasq_n_f16(__a, __b, __c) __arm_vfmasq_n_f16(__a, __b, __c)
+#define vfmsq_f16(__a, __b, __c) __arm_vfmsq_f16(__a, __b, __c)
+#define vmlaldavaq_s16(__a, __b, __c) __arm_vmlaldavaq_s16(__a, __b, __c)
+#define vmlaldavaxq_s16(__a, __b, __c) __arm_vmlaldavaxq_s16(__a, __b, __c)
+#define vmlsldavaq_s16(__a, __b, __c) __arm_vmlsldavaq_s16(__a, __b, __c)
+#define vmlsldavaxq_s16(__a, __b, __c) __arm_vmlsldavaxq_s16(__a, __b, __c)
+#define vabsq_m_f16(__inactive, __a, __p) __arm_vabsq_m_f16(__inactive, __a, __p)
+#define vcvtmq_m_s16_f16(__inactive, __a, __p) __arm_vcvtmq_m_s16_f16(__inactive, __a, __p)
+#define vcvtnq_m_s16_f16(__inactive, __a, __p) __arm_vcvtnq_m_s16_f16(__inactive, __a, __p)
+#define vcvtpq_m_s16_f16(__inactive, __a, __p) __arm_vcvtpq_m_s16_f16(__inactive, __a, __p)
+#define vcvtq_m_s16_f16(__inactive, __a, __p) __arm_vcvtq_m_s16_f16(__inactive, __a, __p)
+#define vdupq_m_n_f16(__inactive, __a, __p) __arm_vdupq_m_n_f16(__inactive, __a, __p)
+#define vmaxnmaq_m_f16(__a, __b, __p) __arm_vmaxnmaq_m_f16(__a, __b, __p)
+#define vmaxnmavq_p_f16(__a, __b, __p) __arm_vmaxnmavq_p_f16(__a, __b, __p)
+#define vmaxnmvq_p_f16(__a, __b, __p) __arm_vmaxnmvq_p_f16(__a, __b, __p)
+#define vminnmaq_m_f16(__a, __b, __p) __arm_vminnmaq_m_f16(__a, __b, __p)
+#define vminnmavq_p_f16(__a, __b, __p) __arm_vminnmavq_p_f16(__a, __b, __p)
+#define vminnmvq_p_f16(__a, __b, __p) __arm_vminnmvq_p_f16(__a, __b, __p)
+#define vmlaldavq_p_s16(__a, __b, __p) __arm_vmlaldavq_p_s16(__a, __b, __p)
+#define vmlaldavxq_p_s16(__a, __b, __p) __arm_vmlaldavxq_p_s16(__a, __b, __p)
+#define vmlsldavq_p_s16(__a, __b, __p) __arm_vmlsldavq_p_s16(__a, __b, __p)
+#define vmlsldavxq_p_s16(__a, __b, __p) __arm_vmlsldavxq_p_s16(__a, __b, __p)
+#define vmovlbq_m_s8(__inactive, __a, __p) __arm_vmovlbq_m_s8(__inactive, __a, __p)
+#define vmovltq_m_s8(__inactive, __a, __p) __arm_vmovltq_m_s8(__inactive, __a, __p)
+#define vmovnbq_m_s16(__a, __b, __p) __arm_vmovnbq_m_s16(__a, __b, __p)
+#define vmovntq_m_s16(__a, __b, __p) __arm_vmovntq_m_s16(__a, __b, __p)
+#define vnegq_m_f16(__inactive, __a, __p) __arm_vnegq_m_f16(__inactive, __a, __p)
+#define vpselq_f16(__a, __b, __p) __arm_vpselq_f16(__a, __b, __p)
+#define vqmovnbq_m_s16(__a, __b, __p) __arm_vqmovnbq_m_s16(__a, __b, __p)
+#define vqmovntq_m_s16(__a, __b, __p) __arm_vqmovntq_m_s16(__a, __b, __p)
+#define vrev32q_m_s8(__inactive, __a, __p) __arm_vrev32q_m_s8(__inactive, __a, __p)
+#define vrev64q_m_f16(__inactive, __a, __p) __arm_vrev64q_m_f16(__inactive, __a, __p)
+#define vrndaq_m_f16(__inactive, __a, __p) __arm_vrndaq_m_f16(__inactive, __a, __p)
+#define vrndmq_m_f16(__inactive, __a, __p) __arm_vrndmq_m_f16(__inactive, __a, __p)
+#define vrndnq_m_f16(__inactive, __a, __p) __arm_vrndnq_m_f16(__inactive, __a, __p)
+#define vrndpq_m_f16(__inactive, __a, __p) __arm_vrndpq_m_f16(__inactive, __a, __p)
+#define vrndq_m_f16(__inactive, __a, __p) __arm_vrndq_m_f16(__inactive, __a, __p)
+#define vrndxq_m_f16(__inactive, __a, __p) __arm_vrndxq_m_f16(__inactive, __a, __p)
+#define vcmpeqq_m_n_f16(__a, __b, __p) __arm_vcmpeqq_m_n_f16(__a, __b, __p)
+#define vcmpgeq_m_f16(__a, __b, __p) __arm_vcmpgeq_m_f16(__a, __b, __p)
+#define vcmpgeq_m_n_f16(__a, __b, __p) __arm_vcmpgeq_m_n_f16(__a, __b, __p)
+#define vcmpgtq_m_f16(__a, __b, __p) __arm_vcmpgtq_m_f16(__a, __b, __p)
+#define vcmpgtq_m_n_f16(__a, __b, __p) __arm_vcmpgtq_m_n_f16(__a, __b, __p)
+#define vcmpleq_m_f16(__a, __b, __p) __arm_vcmpleq_m_f16(__a, __b, __p)
+#define vcmpleq_m_n_f16(__a, __b, __p) __arm_vcmpleq_m_n_f16(__a, __b, __p)
+#define vcmpltq_m_f16(__a, __b, __p) __arm_vcmpltq_m_f16(__a, __b, __p)
+#define vcmpltq_m_n_f16(__a, __b, __p) __arm_vcmpltq_m_n_f16(__a, __b, __p)
+#define vcmpneq_m_f16(__a, __b, __p) __arm_vcmpneq_m_f16(__a, __b, __p)
+#define vcmpneq_m_n_f16(__a, __b, __p) __arm_vcmpneq_m_n_f16(__a, __b, __p)
+#define vmvnq_m_n_u16(__inactive, __imm, __p) __arm_vmvnq_m_n_u16(__inactive, __imm, __p)
+#define vorrq_m_n_u16(__a, __imm, __p) __arm_vorrq_m_n_u16(__a, __imm, __p)
+#define vqrshruntq_n_s16(__a, __b, __imm) __arm_vqrshruntq_n_s16(__a, __b, __imm)
+#define vqshrunbq_n_s16(__a, __b, __imm) __arm_vqshrunbq_n_s16(__a, __b, __imm)
+#define vqshruntq_n_s16(__a, __b, __imm) __arm_vqshruntq_n_s16(__a, __b, __imm)
+#define vcvtmq_m_u16_f16(__inactive, __a, __p) __arm_vcvtmq_m_u16_f16(__inactive, __a, __p)
+#define vcvtnq_m_u16_f16(__inactive, __a, __p) __arm_vcvtnq_m_u16_f16(__inactive, __a, __p)
+#define vcvtpq_m_u16_f16(__inactive, __a, __p) __arm_vcvtpq_m_u16_f16(__inactive, __a, __p)
+#define vcvtq_m_u16_f16(__inactive, __a, __p) __arm_vcvtq_m_u16_f16(__inactive, __a, __p)
+#define vqmovunbq_m_s16(__a, __b, __p) __arm_vqmovunbq_m_s16(__a, __b, __p)
+#define vqmovuntq_m_s16(__a, __b, __p) __arm_vqmovuntq_m_s16(__a, __b, __p)
+#define vqrshrntq_n_u16(__a, __b, __imm) __arm_vqrshrntq_n_u16(__a, __b, __imm)
+#define vqshrnbq_n_u16(__a, __b, __imm) __arm_vqshrnbq_n_u16(__a, __b, __imm)
+#define vqshrntq_n_u16(__a, __b, __imm) __arm_vqshrntq_n_u16(__a, __b, __imm)
+#define vrshrnbq_n_u16(__a, __b, __imm) __arm_vrshrnbq_n_u16(__a, __b, __imm)
+#define vrshrntq_n_u16(__a, __b, __imm) __arm_vrshrntq_n_u16(__a, __b, __imm)
+#define vshrnbq_n_u16(__a, __b, __imm) __arm_vshrnbq_n_u16(__a, __b, __imm)
+#define vshrntq_n_u16(__a, __b, __imm) __arm_vshrntq_n_u16(__a, __b, __imm)
+#define vmlaldavaq_u16(__a, __b, __c) __arm_vmlaldavaq_u16(__a, __b, __c)
+#define vmlaldavq_p_u16(__a, __b, __p) __arm_vmlaldavq_p_u16(__a, __b, __p)
+#define vmovlbq_m_u8(__inactive, __a, __p) __arm_vmovlbq_m_u8(__inactive, __a, __p)
+#define vmovltq_m_u8(__inactive, __a, __p) __arm_vmovltq_m_u8(__inactive, __a, __p)
+#define vmovnbq_m_u16(__a, __b, __p) __arm_vmovnbq_m_u16(__a, __b, __p)
+#define vmovntq_m_u16(__a, __b, __p) __arm_vmovntq_m_u16(__a, __b, __p)
+#define vqmovnbq_m_u16(__a, __b, __p) __arm_vqmovnbq_m_u16(__a, __b, __p)
+#define vqmovntq_m_u16(__a, __b, __p) __arm_vqmovntq_m_u16(__a, __b, __p)
+#define vrev32q_m_u8(__inactive, __a, __p) __arm_vrev32q_m_u8(__inactive, __a, __p)
+#define vmvnq_m_n_s32(__inactive, __imm, __p) __arm_vmvnq_m_n_s32(__inactive, __imm, __p)
+#define vorrq_m_n_s32(__a, __imm, __p) __arm_vorrq_m_n_s32(__a, __imm, __p)
+#define vqrshrntq_n_s32(__a, __b, __imm) __arm_vqrshrntq_n_s32(__a, __b, __imm)
+#define vqshrnbq_n_s32(__a, __b, __imm) __arm_vqshrnbq_n_s32(__a, __b, __imm)
+#define vqshrntq_n_s32(__a, __b, __imm) __arm_vqshrntq_n_s32(__a, __b, __imm)
+#define vrshrnbq_n_s32(__a, __b, __imm) __arm_vrshrnbq_n_s32(__a, __b, __imm)
+#define vrshrntq_n_s32(__a, __b, __imm) __arm_vrshrntq_n_s32(__a, __b, __imm)
+#define vshrnbq_n_s32(__a, __b, __imm) __arm_vshrnbq_n_s32(__a, __b, __imm)
+#define vshrntq_n_s32(__a, __b, __imm) __arm_vshrntq_n_s32(__a, __b, __imm)
+#define vcmlaq_f32(__a, __b, __c) __arm_vcmlaq_f32(__a, __b, __c)
+#define vcmlaq_rot180_f32(__a, __b, __c) __arm_vcmlaq_rot180_f32(__a, __b, __c)
+#define vcmlaq_rot270_f32(__a, __b, __c) __arm_vcmlaq_rot270_f32(__a, __b, __c)
+#define vcmlaq_rot90_f32(__a, __b, __c) __arm_vcmlaq_rot90_f32(__a, __b, __c)
+#define vfmaq_f32(__a, __b, __c) __arm_vfmaq_f32(__a, __b, __c)
+#define vfmaq_n_f32(__a, __b, __c) __arm_vfmaq_n_f32(__a, __b, __c)
+#define vfmasq_n_f32(__a, __b, __c) __arm_vfmasq_n_f32(__a, __b, __c)
+#define vfmsq_f32(__a, __b, __c) __arm_vfmsq_f32(__a, __b, __c)
+#define vmlaldavaq_s32(__a, __b, __c) __arm_vmlaldavaq_s32(__a, __b, __c)
+#define vmlaldavaxq_s32(__a, __b, __c) __arm_vmlaldavaxq_s32(__a, __b, __c)
+#define vmlsldavaq_s32(__a, __b, __c) __arm_vmlsldavaq_s32(__a, __b, __c)
+#define vmlsldavaxq_s32(__a, __b, __c) __arm_vmlsldavaxq_s32(__a, __b, __c)
+#define vabsq_m_f32(__inactive, __a, __p) __arm_vabsq_m_f32(__inactive, __a, __p)
+#define vcvtmq_m_s32_f32(__inactive, __a, __p) __arm_vcvtmq_m_s32_f32(__inactive, __a, __p)
+#define vcvtnq_m_s32_f32(__inactive, __a, __p) __arm_vcvtnq_m_s32_f32(__inactive, __a, __p)
+#define vcvtpq_m_s32_f32(__inactive, __a, __p) __arm_vcvtpq_m_s32_f32(__inactive, __a, __p)
+#define vcvtq_m_s32_f32(__inactive, __a, __p) __arm_vcvtq_m_s32_f32(__inactive, __a, __p)
+#define vdupq_m_n_f32(__inactive, __a, __p) __arm_vdupq_m_n_f32(__inactive, __a, __p)
+#define vmaxnmaq_m_f32(__a, __b, __p) __arm_vmaxnmaq_m_f32(__a, __b, __p)
+#define vmaxnmavq_p_f32(__a, __b, __p) __arm_vmaxnmavq_p_f32(__a, __b, __p)
+#define vmaxnmvq_p_f32(__a, __b, __p) __arm_vmaxnmvq_p_f32(__a, __b, __p)
+#define vminnmaq_m_f32(__a, __b, __p) __arm_vminnmaq_m_f32(__a, __b, __p)
+#define vminnmavq_p_f32(__a, __b, __p) __arm_vminnmavq_p_f32(__a, __b, __p)
+#define vminnmvq_p_f32(__a, __b, __p) __arm_vminnmvq_p_f32(__a, __b, __p)
+#define vmlaldavq_p_s32(__a, __b, __p) __arm_vmlaldavq_p_s32(__a, __b, __p)
+#define vmlaldavxq_p_s32(__a, __b, __p) __arm_vmlaldavxq_p_s32(__a, __b, __p)
+#define vmlsldavq_p_s32(__a, __b, __p) __arm_vmlsldavq_p_s32(__a, __b, __p)
+#define vmlsldavxq_p_s32(__a, __b, __p) __arm_vmlsldavxq_p_s32(__a, __b, __p)
+#define vmovlbq_m_s16(__inactive, __a, __p) __arm_vmovlbq_m_s16(__inactive, __a, __p)
+#define vmovltq_m_s16(__inactive, __a, __p) __arm_vmovltq_m_s16(__inactive, __a, __p)
+#define vmovnbq_m_s32(__a, __b, __p) __arm_vmovnbq_m_s32(__a, __b, __p)
+#define vmovntq_m_s32(__a, __b, __p) __arm_vmovntq_m_s32(__a, __b, __p)
+#define vnegq_m_f32(__inactive, __a, __p) __arm_vnegq_m_f32(__inactive, __a, __p)
+#define vpselq_f32(__a, __b, __p) __arm_vpselq_f32(__a, __b, __p)
+#define vqmovnbq_m_s32(__a, __b, __p) __arm_vqmovnbq_m_s32(__a, __b, __p)
+#define vqmovntq_m_s32(__a, __b, __p) __arm_vqmovntq_m_s32(__a, __b, __p)
+#define vrev32q_m_s16(__inactive, __a, __p) __arm_vrev32q_m_s16(__inactive, __a, __p)
+#define vrev64q_m_f32(__inactive, __a, __p) __arm_vrev64q_m_f32(__inactive, __a, __p)
+#define vrndaq_m_f32(__inactive, __a, __p) __arm_vrndaq_m_f32(__inactive, __a, __p)
+#define vrndmq_m_f32(__inactive, __a, __p) __arm_vrndmq_m_f32(__inactive, __a, __p)
+#define vrndnq_m_f32(__inactive, __a, __p) __arm_vrndnq_m_f32(__inactive, __a, __p)
+#define vrndpq_m_f32(__inactive, __a, __p) __arm_vrndpq_m_f32(__inactive, __a, __p)
+#define vrndq_m_f32(__inactive, __a, __p) __arm_vrndq_m_f32(__inactive, __a, __p)
+#define vrndxq_m_f32(__inactive, __a, __p) __arm_vrndxq_m_f32(__inactive, __a, __p)
+#define vcmpeqq_m_n_f32(__a, __b, __p) __arm_vcmpeqq_m_n_f32(__a, __b, __p)
+#define vcmpgeq_m_f32(__a, __b, __p) __arm_vcmpgeq_m_f32(__a, __b, __p)
+#define vcmpgeq_m_n_f32(__a, __b, __p) __arm_vcmpgeq_m_n_f32(__a, __b, __p)
+#define vcmpgtq_m_f32(__a, __b, __p) __arm_vcmpgtq_m_f32(__a, __b, __p)
+#define vcmpgtq_m_n_f32(__a, __b, __p) __arm_vcmpgtq_m_n_f32(__a, __b, __p)
+#define vcmpleq_m_f32(__a, __b, __p) __arm_vcmpleq_m_f32(__a, __b, __p)
+#define vcmpleq_m_n_f32(__a, __b, __p) __arm_vcmpleq_m_n_f32(__a, __b, __p)
+#define vcmpltq_m_f32(__a, __b, __p) __arm_vcmpltq_m_f32(__a, __b, __p)
+#define vcmpltq_m_n_f32(__a, __b, __p) __arm_vcmpltq_m_n_f32(__a, __b, __p)
+#define vcmpneq_m_f32(__a, __b, __p) __arm_vcmpneq_m_f32(__a, __b, __p)
+#define vcmpneq_m_n_f32(__a, __b, __p) __arm_vcmpneq_m_n_f32(__a, __b, __p)
+#define vmvnq_m_n_u32(__inactive, __imm, __p) __arm_vmvnq_m_n_u32(__inactive, __imm, __p)
+#define vorrq_m_n_u32(__a, __imm, __p) __arm_vorrq_m_n_u32(__a, __imm, __p)
+#define vqrshruntq_n_s32(__a, __b, __imm) __arm_vqrshruntq_n_s32(__a, __b, __imm)
+#define vqshrunbq_n_s32(__a, __b, __imm) __arm_vqshrunbq_n_s32(__a, __b, __imm)
+#define vqshruntq_n_s32(__a, __b, __imm) __arm_vqshruntq_n_s32(__a, __b, __imm)
+#define vcvtmq_m_u32_f32(__inactive, __a, __p) __arm_vcvtmq_m_u32_f32(__inactive, __a, __p)
+#define vcvtnq_m_u32_f32(__inactive, __a, __p) __arm_vcvtnq_m_u32_f32(__inactive, __a, __p)
+#define vcvtpq_m_u32_f32(__inactive, __a, __p) __arm_vcvtpq_m_u32_f32(__inactive, __a, __p)
+#define vcvtq_m_u32_f32(__inactive, __a, __p) __arm_vcvtq_m_u32_f32(__inactive, __a, __p)
+#define vqmovunbq_m_s32(__a, __b, __p) __arm_vqmovunbq_m_s32(__a, __b, __p)
+#define vqmovuntq_m_s32(__a, __b, __p) __arm_vqmovuntq_m_s32(__a, __b, __p)
+#define vqrshrntq_n_u32(__a, __b, __imm) __arm_vqrshrntq_n_u32(__a, __b, __imm)
+#define vqshrnbq_n_u32(__a, __b, __imm) __arm_vqshrnbq_n_u32(__a, __b, __imm)
+#define vqshrntq_n_u32(__a, __b, __imm) __arm_vqshrntq_n_u32(__a, __b, __imm)
+#define vrshrnbq_n_u32(__a, __b, __imm) __arm_vrshrnbq_n_u32(__a, __b, __imm)
+#define vrshrntq_n_u32(__a, __b, __imm) __arm_vrshrntq_n_u32(__a, __b, __imm)
+#define vshrnbq_n_u32(__a, __b, __imm) __arm_vshrnbq_n_u32(__a, __b, __imm)
+#define vshrntq_n_u32(__a, __b, __imm) __arm_vshrntq_n_u32(__a, __b, __imm)
+#define vmlaldavaq_u32(__a, __b, __c) __arm_vmlaldavaq_u32(__a, __b, __c)
+#define vmlaldavq_p_u32(__a, __b, __p) __arm_vmlaldavq_p_u32(__a, __b, __p)
+#define vmovlbq_m_u16(__inactive, __a, __p) __arm_vmovlbq_m_u16(__inactive, __a, __p)
+#define vmovltq_m_u16(__inactive, __a, __p) __arm_vmovltq_m_u16(__inactive, __a, __p)
+#define vmovnbq_m_u32(__a, __b, __p) __arm_vmovnbq_m_u32(__a, __b, __p)
+#define vmovntq_m_u32(__a, __b, __p) __arm_vmovntq_m_u32(__a, __b, __p)
+#define vqmovnbq_m_u32(__a, __b, __p) __arm_vqmovnbq_m_u32(__a, __b, __p)
+#define vqmovntq_m_u32(__a, __b, __p) __arm_vqmovntq_m_u32(__a, __b, __p)
+#define vrev32q_m_u16(__inactive, __a, __p) __arm_vrev32q_m_u16(__inactive, __a, __p)
+#define vsriq_m_n_s8(__a, __b, __imm, __p) __arm_vsriq_m_n_s8(__a, __b, __imm, __p)
+#define vsubq_m_s8(__inactive, __a, __b, __p) __arm_vsubq_m_s8(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_u16(__inactive, __a, __imm6, __p)
+#define vqshluq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s8(__inactive, __a, __imm, __p)
+#define vabavq_p_s8(__a, __b, __c, __p) __arm_vabavq_p_s8(__a, __b, __c, __p)
+#define vsriq_m_n_u8(__a, __b, __imm, __p) __arm_vsriq_m_n_u8(__a, __b, __imm, __p)
+#define vshlq_m_u8(__inactive, __a, __b, __p) __arm_vshlq_m_u8(__inactive, __a, __b, __p)
+#define vsubq_m_u8(__inactive, __a, __b, __p) __arm_vsubq_m_u8(__inactive, __a, __b, __p)
+#define vabavq_p_u8(__a, __b, __c, __p) __arm_vabavq_p_u8(__a, __b, __c, __p)
+#define vshlq_m_s8(__inactive, __a, __b, __p) __arm_vshlq_m_s8(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f16_s16(__inactive, __a, __imm6, __p)
+#define vsriq_m_n_s16(__a, __b, __imm, __p) __arm_vsriq_m_n_s16(__a, __b, __imm, __p)
+#define vsubq_m_s16(__inactive, __a, __b, __p) __arm_vsubq_m_s16(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_u32(__inactive, __a, __imm6, __p)
+#define vqshluq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s16(__inactive, __a, __imm, __p)
+#define vabavq_p_s16(__a, __b, __c, __p) __arm_vabavq_p_s16(__a, __b, __c, __p)
+#define vsriq_m_n_u16(__a, __b, __imm, __p) __arm_vsriq_m_n_u16(__a, __b, __imm, __p)
+#define vshlq_m_u16(__inactive, __a, __b, __p) __arm_vshlq_m_u16(__inactive, __a, __b, __p)
+#define vsubq_m_u16(__inactive, __a, __b, __p) __arm_vsubq_m_u16(__inactive, __a, __b, __p)
+#define vabavq_p_u16(__a, __b, __c, __p) __arm_vabavq_p_u16(__a, __b, __c, __p)
+#define vshlq_m_s16(__inactive, __a, __b, __p) __arm_vshlq_m_s16(__inactive, __a, __b, __p)
+#define vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_f32_s32(__inactive, __a, __imm6, __p)
+#define vsriq_m_n_s32(__a, __b, __imm, __p) __arm_vsriq_m_n_s32(__a, __b, __imm, __p)
+#define vsubq_m_s32(__inactive, __a, __b, __p) __arm_vsubq_m_s32(__inactive, __a, __b, __p)
+#define vqshluq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshluq_m_n_s32(__inactive, __a, __imm, __p)
+#define vabavq_p_s32(__a, __b, __c, __p) __arm_vabavq_p_s32(__a, __b, __c, __p)
+#define vsriq_m_n_u32(__a, __b, __imm, __p) __arm_vsriq_m_n_u32(__a, __b, __imm, __p)
+#define vshlq_m_u32(__inactive, __a, __b, __p) __arm_vshlq_m_u32(__inactive, __a, __b, __p)
+#define vsubq_m_u32(__inactive, __a, __b, __p) __arm_vsubq_m_u32(__inactive, __a, __b, __p)
+#define vabavq_p_u32(__a, __b, __c, __p) __arm_vabavq_p_u32(__a, __b, __c, __p)
+#define vshlq_m_s32(__inactive, __a, __b, __p) __arm_vshlq_m_s32(__inactive, __a, __b, __p)
+#define vabdq_m_s8(__inactive, __a, __b, __p) __arm_vabdq_m_s8(__inactive, __a, __b, __p)
+#define vabdq_m_s32(__inactive, __a, __b, __p) __arm_vabdq_m_s32(__inactive, __a, __b, __p)
+#define vabdq_m_s16(__inactive, __a, __b, __p) __arm_vabdq_m_s16(__inactive, __a, __b, __p)
+#define vabdq_m_u8(__inactive, __a, __b, __p) __arm_vabdq_m_u8(__inactive, __a, __b, __p)
+#define vabdq_m_u32(__inactive, __a, __b, __p) __arm_vabdq_m_u32(__inactive, __a, __b, __p)
+#define vabdq_m_u16(__inactive, __a, __b, __p) __arm_vabdq_m_u16(__inactive, __a, __b, __p)
+#define vaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vaddq_m_n_s8(__inactive, __a, __b, __p)
+#define vaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vaddq_m_n_s32(__inactive, __a, __b, __p)
+#define vaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vaddq_m_n_s16(__inactive, __a, __b, __p)
+#define vaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vaddq_m_n_u8(__inactive, __a, __b, __p)
+#define vaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vaddq_m_n_u32(__inactive, __a, __b, __p)
+#define vaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vaddq_m_n_u16(__inactive, __a, __b, __p)
+#define vaddq_m_s8(__inactive, __a, __b, __p) __arm_vaddq_m_s8(__inactive, __a, __b, __p)
+#define vaddq_m_s32(__inactive, __a, __b, __p) __arm_vaddq_m_s32(__inactive, __a, __b, __p)
+#define vaddq_m_s16(__inactive, __a, __b, __p) __arm_vaddq_m_s16(__inactive, __a, __b, __p)
+#define vaddq_m_u8(__inactive, __a, __b, __p) __arm_vaddq_m_u8(__inactive, __a, __b, __p)
+#define vaddq_m_u32(__inactive, __a, __b, __p) __arm_vaddq_m_u32(__inactive, __a, __b, __p)
+#define vaddq_m_u16(__inactive, __a, __b, __p) __arm_vaddq_m_u16(__inactive, __a, __b, __p)
+#define vandq_m_s8(__inactive, __a, __b, __p) __arm_vandq_m_s8(__inactive, __a, __b, __p)
+#define vandq_m_s32(__inactive, __a, __b, __p) __arm_vandq_m_s32(__inactive, __a, __b, __p)
+#define vandq_m_s16(__inactive, __a, __b, __p) __arm_vandq_m_s16(__inactive, __a, __b, __p)
+#define vandq_m_u8(__inactive, __a, __b, __p) __arm_vandq_m_u8(__inactive, __a, __b, __p)
+#define vandq_m_u32(__inactive, __a, __b, __p) __arm_vandq_m_u32(__inactive, __a, __b, __p)
+#define vandq_m_u16(__inactive, __a, __b, __p) __arm_vandq_m_u16(__inactive, __a, __b, __p)
+#define vbicq_m_s8(__inactive, __a, __b, __p) __arm_vbicq_m_s8(__inactive, __a, __b, __p)
+#define vbicq_m_s32(__inactive, __a, __b, __p) __arm_vbicq_m_s32(__inactive, __a, __b, __p)
+#define vbicq_m_s16(__inactive, __a, __b, __p) __arm_vbicq_m_s16(__inactive, __a, __b, __p)
+#define vbicq_m_u8(__inactive, __a, __b, __p) __arm_vbicq_m_u8(__inactive, __a, __b, __p)
+#define vbicq_m_u32(__inactive, __a, __b, __p) __arm_vbicq_m_u32(__inactive, __a, __b, __p)
+#define vbicq_m_u16(__inactive, __a, __b, __p) __arm_vbicq_m_u16(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_s8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s8(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_s32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s32(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_s16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_s16(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_u8(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u8(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_u32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u32(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_u16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_u16(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s8(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s32(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_s16(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u8(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u32(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_u16(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s8(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s32(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_s16(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_u8(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u8(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_u32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u32(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_u16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_u16(__inactive, __a, __b, __p)
+#define veorq_m_s8(__inactive, __a, __b, __p) __arm_veorq_m_s8(__inactive, __a, __b, __p)
+#define veorq_m_s32(__inactive, __a, __b, __p) __arm_veorq_m_s32(__inactive, __a, __b, __p)
+#define veorq_m_s16(__inactive, __a, __b, __p) __arm_veorq_m_s16(__inactive, __a, __b, __p)
+#define veorq_m_u8(__inactive, __a, __b, __p) __arm_veorq_m_u8(__inactive, __a, __b, __p)
+#define veorq_m_u32(__inactive, __a, __b, __p) __arm_veorq_m_u32(__inactive, __a, __b, __p)
+#define veorq_m_u16(__inactive, __a, __b, __p) __arm_veorq_m_u16(__inactive, __a, __b, __p)
+#define vhaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s8(__inactive, __a, __b, __p)
+#define vhaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s32(__inactive, __a, __b, __p)
+#define vhaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_s16(__inactive, __a, __b, __p)
+#define vhaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u8(__inactive, __a, __b, __p)
+#define vhaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u32(__inactive, __a, __b, __p)
+#define vhaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_n_u16(__inactive, __a, __b, __p)
+#define vhaddq_m_s8(__inactive, __a, __b, __p) __arm_vhaddq_m_s8(__inactive, __a, __b, __p)
+#define vhaddq_m_s32(__inactive, __a, __b, __p) __arm_vhaddq_m_s32(__inactive, __a, __b, __p)
+#define vhaddq_m_s16(__inactive, __a, __b, __p) __arm_vhaddq_m_s16(__inactive, __a, __b, __p)
+#define vhaddq_m_u8(__inactive, __a, __b, __p) __arm_vhaddq_m_u8(__inactive, __a, __b, __p)
+#define vhaddq_m_u32(__inactive, __a, __b, __p) __arm_vhaddq_m_u32(__inactive, __a, __b, __p)
+#define vhaddq_m_u16(__inactive, __a, __b, __p) __arm_vhaddq_m_u16(__inactive, __a, __b, __p)
+#define vhcaddq_rot270_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s8(__inactive, __a, __b, __p)
+#define vhcaddq_rot270_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s32(__inactive, __a, __b, __p)
+#define vhcaddq_rot270_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot270_m_s16(__inactive, __a, __b, __p)
+#define vhcaddq_rot90_m_s8(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s8(__inactive, __a, __b, __p)
+#define vhcaddq_rot90_m_s32(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s32(__inactive, __a, __b, __p)
+#define vhcaddq_rot90_m_s16(__inactive, __a, __b, __p) __arm_vhcaddq_rot90_m_s16(__inactive, __a, __b, __p)
+#define vhsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s8(__inactive, __a, __b, __p)
+#define vhsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s32(__inactive, __a, __b, __p)
+#define vhsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_s16(__inactive, __a, __b, __p)
+#define vhsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u8(__inactive, __a, __b, __p)
+#define vhsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u32(__inactive, __a, __b, __p)
+#define vhsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_n_u16(__inactive, __a, __b, __p)
+#define vhsubq_m_s8(__inactive, __a, __b, __p) __arm_vhsubq_m_s8(__inactive, __a, __b, __p)
+#define vhsubq_m_s32(__inactive, __a, __b, __p) __arm_vhsubq_m_s32(__inactive, __a, __b, __p)
+#define vhsubq_m_s16(__inactive, __a, __b, __p) __arm_vhsubq_m_s16(__inactive, __a, __b, __p)
+#define vhsubq_m_u8(__inactive, __a, __b, __p) __arm_vhsubq_m_u8(__inactive, __a, __b, __p)
+#define vhsubq_m_u32(__inactive, __a, __b, __p) __arm_vhsubq_m_u32(__inactive, __a, __b, __p)
+#define vhsubq_m_u16(__inactive, __a, __b, __p) __arm_vhsubq_m_u16(__inactive, __a, __b, __p)
+#define vmaxq_m_s8(__inactive, __a, __b, __p) __arm_vmaxq_m_s8(__inactive, __a, __b, __p)
+#define vmaxq_m_s32(__inactive, __a, __b, __p) __arm_vmaxq_m_s32(__inactive, __a, __b, __p)
+#define vmaxq_m_s16(__inactive, __a, __b, __p) __arm_vmaxq_m_s16(__inactive, __a, __b, __p)
+#define vmaxq_m_u8(__inactive, __a, __b, __p) __arm_vmaxq_m_u8(__inactive, __a, __b, __p)
+#define vmaxq_m_u32(__inactive, __a, __b, __p) __arm_vmaxq_m_u32(__inactive, __a, __b, __p)
+#define vmaxq_m_u16(__inactive, __a, __b, __p) __arm_vmaxq_m_u16(__inactive, __a, __b, __p)
+#define vminq_m_s8(__inactive, __a, __b, __p) __arm_vminq_m_s8(__inactive, __a, __b, __p)
+#define vminq_m_s32(__inactive, __a, __b, __p) __arm_vminq_m_s32(__inactive, __a, __b, __p)
+#define vminq_m_s16(__inactive, __a, __b, __p) __arm_vminq_m_s16(__inactive, __a, __b, __p)
+#define vminq_m_u8(__inactive, __a, __b, __p) __arm_vminq_m_u8(__inactive, __a, __b, __p)
+#define vminq_m_u32(__inactive, __a, __b, __p) __arm_vminq_m_u32(__inactive, __a, __b, __p)
+#define vminq_m_u16(__inactive, __a, __b, __p) __arm_vminq_m_u16(__inactive, __a, __b, __p)
+#define vmladavaq_p_s8(__a, __b, __c, __p) __arm_vmladavaq_p_s8(__a, __b, __c, __p)
+#define vmladavaq_p_s32(__a, __b, __c, __p) __arm_vmladavaq_p_s32(__a, __b, __c, __p)
+#define vmladavaq_p_s16(__a, __b, __c, __p) __arm_vmladavaq_p_s16(__a, __b, __c, __p)
+#define vmladavaq_p_u8(__a, __b, __c, __p) __arm_vmladavaq_p_u8(__a, __b, __c, __p)
+#define vmladavaq_p_u32(__a, __b, __c, __p) __arm_vmladavaq_p_u32(__a, __b, __c, __p)
+#define vmladavaq_p_u16(__a, __b, __c, __p) __arm_vmladavaq_p_u16(__a, __b, __c, __p)
+#define vmladavaxq_p_s8(__a, __b, __c, __p) __arm_vmladavaxq_p_s8(__a, __b, __c, __p)
+#define vmladavaxq_p_s32(__a, __b, __c, __p) __arm_vmladavaxq_p_s32(__a, __b, __c, __p)
+#define vmladavaxq_p_s16(__a, __b, __c, __p) __arm_vmladavaxq_p_s16(__a, __b, __c, __p)
+#define vmlaq_m_n_s8(__a, __b, __c, __p) __arm_vmlaq_m_n_s8(__a, __b, __c, __p)
+#define vmlaq_m_n_s32(__a, __b, __c, __p) __arm_vmlaq_m_n_s32(__a, __b, __c, __p)
+#define vmlaq_m_n_s16(__a, __b, __c, __p) __arm_vmlaq_m_n_s16(__a, __b, __c, __p)
+#define vmlaq_m_n_u8(__a, __b, __c, __p) __arm_vmlaq_m_n_u8(__a, __b, __c, __p)
+#define vmlaq_m_n_u32(__a, __b, __c, __p) __arm_vmlaq_m_n_u32(__a, __b, __c, __p)
+#define vmlaq_m_n_u16(__a, __b, __c, __p) __arm_vmlaq_m_n_u16(__a, __b, __c, __p)
+#define vmlasq_m_n_s8(__a, __b, __c, __p) __arm_vmlasq_m_n_s8(__a, __b, __c, __p)
+#define vmlasq_m_n_s32(__a, __b, __c, __p) __arm_vmlasq_m_n_s32(__a, __b, __c, __p)
+#define vmlasq_m_n_s16(__a, __b, __c, __p) __arm_vmlasq_m_n_s16(__a, __b, __c, __p)
+#define vmlasq_m_n_u8(__a, __b, __c, __p) __arm_vmlasq_m_n_u8(__a, __b, __c, __p)
+#define vmlasq_m_n_u32(__a, __b, __c, __p) __arm_vmlasq_m_n_u32(__a, __b, __c, __p)
+#define vmlasq_m_n_u16(__a, __b, __c, __p) __arm_vmlasq_m_n_u16(__a, __b, __c, __p)
+#define vmlsdavaq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaq_p_s8(__a, __b, __c, __p)
+#define vmlsdavaq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaq_p_s32(__a, __b, __c, __p)
+#define vmlsdavaq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaq_p_s16(__a, __b, __c, __p)
+#define vmlsdavaxq_p_s8(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s8(__a, __b, __c, __p)
+#define vmlsdavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s32(__a, __b, __c, __p)
+#define vmlsdavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsdavaxq_p_s16(__a, __b, __c, __p)
+#define vmulhq_m_s8(__inactive, __a, __b, __p) __arm_vmulhq_m_s8(__inactive, __a, __b, __p)
+#define vmulhq_m_s32(__inactive, __a, __b, __p) __arm_vmulhq_m_s32(__inactive, __a, __b, __p)
+#define vmulhq_m_s16(__inactive, __a, __b, __p) __arm_vmulhq_m_s16(__inactive, __a, __b, __p)
+#define vmulhq_m_u8(__inactive, __a, __b, __p) __arm_vmulhq_m_u8(__inactive, __a, __b, __p)
+#define vmulhq_m_u32(__inactive, __a, __b, __p) __arm_vmulhq_m_u32(__inactive, __a, __b, __p)
+#define vmulhq_m_u16(__inactive, __a, __b, __p) __arm_vmulhq_m_u16(__inactive, __a, __b, __p)
+#define vmullbq_int_m_s8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s8(__inactive, __a, __b, __p)
+#define vmullbq_int_m_s32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s32(__inactive, __a, __b, __p)
+#define vmullbq_int_m_s16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_s16(__inactive, __a, __b, __p)
+#define vmullbq_int_m_u8(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u8(__inactive, __a, __b, __p)
+#define vmullbq_int_m_u32(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u32(__inactive, __a, __b, __p)
+#define vmullbq_int_m_u16(__inactive, __a, __b, __p) __arm_vmullbq_int_m_u16(__inactive, __a, __b, __p)
+#define vmulltq_int_m_s8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s8(__inactive, __a, __b, __p)
+#define vmulltq_int_m_s32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s32(__inactive, __a, __b, __p)
+#define vmulltq_int_m_s16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_s16(__inactive, __a, __b, __p)
+#define vmulltq_int_m_u8(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u8(__inactive, __a, __b, __p)
+#define vmulltq_int_m_u32(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u32(__inactive, __a, __b, __p)
+#define vmulltq_int_m_u16(__inactive, __a, __b, __p) __arm_vmulltq_int_m_u16(__inactive, __a, __b, __p)
+#define vmulq_m_n_s8(__inactive, __a, __b, __p) __arm_vmulq_m_n_s8(__inactive, __a, __b, __p)
+#define vmulq_m_n_s32(__inactive, __a, __b, __p) __arm_vmulq_m_n_s32(__inactive, __a, __b, __p)
+#define vmulq_m_n_s16(__inactive, __a, __b, __p) __arm_vmulq_m_n_s16(__inactive, __a, __b, __p)
+#define vmulq_m_n_u8(__inactive, __a, __b, __p) __arm_vmulq_m_n_u8(__inactive, __a, __b, __p)
+#define vmulq_m_n_u32(__inactive, __a, __b, __p) __arm_vmulq_m_n_u32(__inactive, __a, __b, __p)
+#define vmulq_m_n_u16(__inactive, __a, __b, __p) __arm_vmulq_m_n_u16(__inactive, __a, __b, __p)
+#define vmulq_m_s8(__inactive, __a, __b, __p) __arm_vmulq_m_s8(__inactive, __a, __b, __p)
+#define vmulq_m_s32(__inactive, __a, __b, __p) __arm_vmulq_m_s32(__inactive, __a, __b, __p)
+#define vmulq_m_s16(__inactive, __a, __b, __p) __arm_vmulq_m_s16(__inactive, __a, __b, __p)
+#define vmulq_m_u8(__inactive, __a, __b, __p) __arm_vmulq_m_u8(__inactive, __a, __b, __p)
+#define vmulq_m_u32(__inactive, __a, __b, __p) __arm_vmulq_m_u32(__inactive, __a, __b, __p)
+#define vmulq_m_u16(__inactive, __a, __b, __p) __arm_vmulq_m_u16(__inactive, __a, __b, __p)
+#define vornq_m_s8(__inactive, __a, __b, __p) __arm_vornq_m_s8(__inactive, __a, __b, __p)
+#define vornq_m_s32(__inactive, __a, __b, __p) __arm_vornq_m_s32(__inactive, __a, __b, __p)
+#define vornq_m_s16(__inactive, __a, __b, __p) __arm_vornq_m_s16(__inactive, __a, __b, __p)
+#define vornq_m_u8(__inactive, __a, __b, __p) __arm_vornq_m_u8(__inactive, __a, __b, __p)
+#define vornq_m_u32(__inactive, __a, __b, __p) __arm_vornq_m_u32(__inactive, __a, __b, __p)
+#define vornq_m_u16(__inactive, __a, __b, __p) __arm_vornq_m_u16(__inactive, __a, __b, __p)
+#define vorrq_m_s8(__inactive, __a, __b, __p) __arm_vorrq_m_s8(__inactive, __a, __b, __p)
+#define vorrq_m_s32(__inactive, __a, __b, __p) __arm_vorrq_m_s32(__inactive, __a, __b, __p)
+#define vorrq_m_s16(__inactive, __a, __b, __p) __arm_vorrq_m_s16(__inactive, __a, __b, __p)
+#define vorrq_m_u8(__inactive, __a, __b, __p) __arm_vorrq_m_u8(__inactive, __a, __b, __p)
+#define vorrq_m_u32(__inactive, __a, __b, __p) __arm_vorrq_m_u32(__inactive, __a, __b, __p)
+#define vorrq_m_u16(__inactive, __a, __b, __p) __arm_vorrq_m_u16(__inactive, __a, __b, __p)
+#define vqaddq_m_n_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s8(__inactive, __a, __b, __p)
+#define vqaddq_m_n_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s32(__inactive, __a, __b, __p)
+#define vqaddq_m_n_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_s16(__inactive, __a, __b, __p)
+#define vqaddq_m_n_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u8(__inactive, __a, __b, __p)
+#define vqaddq_m_n_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u32(__inactive, __a, __b, __p)
+#define vqaddq_m_n_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_n_u16(__inactive, __a, __b, __p)
+#define vqaddq_m_s8(__inactive, __a, __b, __p) __arm_vqaddq_m_s8(__inactive, __a, __b, __p)
+#define vqaddq_m_s32(__inactive, __a, __b, __p) __arm_vqaddq_m_s32(__inactive, __a, __b, __p)
+#define vqaddq_m_s16(__inactive, __a, __b, __p) __arm_vqaddq_m_s16(__inactive, __a, __b, __p)
+#define vqaddq_m_u8(__inactive, __a, __b, __p) __arm_vqaddq_m_u8(__inactive, __a, __b, __p)
+#define vqaddq_m_u32(__inactive, __a, __b, __p) __arm_vqaddq_m_u32(__inactive, __a, __b, __p)
+#define vqaddq_m_u16(__inactive, __a, __b, __p) __arm_vqaddq_m_u16(__inactive, __a, __b, __p)
+#define vqdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s8(__inactive, __a, __b, __p)
+#define vqdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s32(__inactive, __a, __b, __p)
+#define vqdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhq_m_s16(__inactive, __a, __b, __p)
+#define vqdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s8(__inactive, __a, __b, __p)
+#define vqdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s32(__inactive, __a, __b, __p)
+#define vqdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmladhxq_m_s16(__inactive, __a, __b, __p)
+#define vqdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s8(__a, __b, __c, __p)
+#define vqdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s32(__a, __b, __c, __p)
+#define vqdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlashq_m_n_s16(__a, __b, __c, __p)
+#define vqdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s8(__a, __b, __c, __p)
+#define vqdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s32(__a, __b, __c, __p)
+#define vqdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqdmlahq_m_n_s16(__a, __b, __c, __p)
+#define vqdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s8(__inactive, __a, __b, __p)
+#define vqdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s32(__inactive, __a, __b, __p)
+#define vqdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhq_m_s16(__inactive, __a, __b, __p)
+#define vqdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s8(__inactive, __a, __b, __p)
+#define vqdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s32(__inactive, __a, __b, __p)
+#define vqdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqdmlsdhxq_m_s16(__inactive, __a, __b, __p)
+#define vqdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s8(__inactive, __a, __b, __p)
+#define vqdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s32(__inactive, __a, __b, __p)
+#define vqdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_n_s16(__inactive, __a, __b, __p)
+#define vqdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s8(__inactive, __a, __b, __p)
+#define vqdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s32(__inactive, __a, __b, __p)
+#define vqdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulhq_m_s16(__inactive, __a, __b, __p)
+#define vqrdmladhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s8(__inactive, __a, __b, __p)
+#define vqrdmladhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s32(__inactive, __a, __b, __p)
+#define vqrdmladhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhq_m_s16(__inactive, __a, __b, __p)
+#define vqrdmladhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s8(__inactive, __a, __b, __p)
+#define vqrdmladhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s32(__inactive, __a, __b, __p)
+#define vqrdmladhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmladhxq_m_s16(__inactive, __a, __b, __p)
+#define vqrdmlahq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s8(__a, __b, __c, __p)
+#define vqrdmlahq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s32(__a, __b, __c, __p)
+#define vqrdmlahq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlahq_m_n_s16(__a, __b, __c, __p)
+#define vqrdmlashq_m_n_s8(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s8(__a, __b, __c, __p)
+#define vqrdmlashq_m_n_s32(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s32(__a, __b, __c, __p)
+#define vqrdmlashq_m_n_s16(__a, __b, __c, __p) __arm_vqrdmlashq_m_n_s16(__a, __b, __c, __p)
+#define vqrdmlsdhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s8(__inactive, __a, __b, __p)
+#define vqrdmlsdhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s32(__inactive, __a, __b, __p)
+#define vqrdmlsdhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhq_m_s16(__inactive, __a, __b, __p)
+#define vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s8(__inactive, __a, __b, __p)
+#define vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s32(__inactive, __a, __b, __p)
+#define vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmlsdhxq_m_s16(__inactive, __a, __b, __p)
+#define vqrdmulhq_m_n_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s8(__inactive, __a, __b, __p)
+#define vqrdmulhq_m_n_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s32(__inactive, __a, __b, __p)
+#define vqrdmulhq_m_n_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_n_s16(__inactive, __a, __b, __p)
+#define vqrdmulhq_m_s8(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s8(__inactive, __a, __b, __p)
+#define vqrdmulhq_m_s32(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s32(__inactive, __a, __b, __p)
+#define vqrdmulhq_m_s16(__inactive, __a, __b, __p) __arm_vqrdmulhq_m_s16(__inactive, __a, __b, __p)
+#define vqrshlq_m_s8(__inactive, __a, __b, __p) __arm_vqrshlq_m_s8(__inactive, __a, __b, __p)
+#define vqrshlq_m_s32(__inactive, __a, __b, __p) __arm_vqrshlq_m_s32(__inactive, __a, __b, __p)
+#define vqrshlq_m_s16(__inactive, __a, __b, __p) __arm_vqrshlq_m_s16(__inactive, __a, __b, __p)
+#define vqrshlq_m_u8(__inactive, __a, __b, __p) __arm_vqrshlq_m_u8(__inactive, __a, __b, __p)
+#define vqrshlq_m_u32(__inactive, __a, __b, __p) __arm_vqrshlq_m_u32(__inactive, __a, __b, __p)
+#define vqrshlq_m_u16(__inactive, __a, __b, __p) __arm_vqrshlq_m_u16(__inactive, __a, __b, __p)
+#define vqshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s8(__inactive, __a, __imm, __p)
+#define vqshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s32(__inactive, __a, __imm, __p)
+#define vqshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_s16(__inactive, __a, __imm, __p)
+#define vqshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u8(__inactive, __a, __imm, __p)
+#define vqshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u32(__inactive, __a, __imm, __p)
+#define vqshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vqshlq_m_n_u16(__inactive, __a, __imm, __p)
+#define vqshlq_m_s8(__inactive, __a, __b, __p) __arm_vqshlq_m_s8(__inactive, __a, __b, __p)
+#define vqshlq_m_s32(__inactive, __a, __b, __p) __arm_vqshlq_m_s32(__inactive, __a, __b, __p)
+#define vqshlq_m_s16(__inactive, __a, __b, __p) __arm_vqshlq_m_s16(__inactive, __a, __b, __p)
+#define vqshlq_m_u8(__inactive, __a, __b, __p) __arm_vqshlq_m_u8(__inactive, __a, __b, __p)
+#define vqshlq_m_u32(__inactive, __a, __b, __p) __arm_vqshlq_m_u32(__inactive, __a, __b, __p)
+#define vqshlq_m_u16(__inactive, __a, __b, __p) __arm_vqshlq_m_u16(__inactive, __a, __b, __p)
+#define vqsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s8(__inactive, __a, __b, __p)
+#define vqsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s32(__inactive, __a, __b, __p)
+#define vqsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_s16(__inactive, __a, __b, __p)
+#define vqsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u8(__inactive, __a, __b, __p)
+#define vqsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u32(__inactive, __a, __b, __p)
+#define vqsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_n_u16(__inactive, __a, __b, __p)
+#define vqsubq_m_s8(__inactive, __a, __b, __p) __arm_vqsubq_m_s8(__inactive, __a, __b, __p)
+#define vqsubq_m_s32(__inactive, __a, __b, __p) __arm_vqsubq_m_s32(__inactive, __a, __b, __p)
+#define vqsubq_m_s16(__inactive, __a, __b, __p) __arm_vqsubq_m_s16(__inactive, __a, __b, __p)
+#define vqsubq_m_u8(__inactive, __a, __b, __p) __arm_vqsubq_m_u8(__inactive, __a, __b, __p)
+#define vqsubq_m_u32(__inactive, __a, __b, __p) __arm_vqsubq_m_u32(__inactive, __a, __b, __p)
+#define vqsubq_m_u16(__inactive, __a, __b, __p) __arm_vqsubq_m_u16(__inactive, __a, __b, __p)
+#define vrhaddq_m_s8(__inactive, __a, __b, __p) __arm_vrhaddq_m_s8(__inactive, __a, __b, __p)
+#define vrhaddq_m_s32(__inactive, __a, __b, __p) __arm_vrhaddq_m_s32(__inactive, __a, __b, __p)
+#define vrhaddq_m_s16(__inactive, __a, __b, __p) __arm_vrhaddq_m_s16(__inactive, __a, __b, __p)
+#define vrhaddq_m_u8(__inactive, __a, __b, __p) __arm_vrhaddq_m_u8(__inactive, __a, __b, __p)
+#define vrhaddq_m_u32(__inactive, __a, __b, __p) __arm_vrhaddq_m_u32(__inactive, __a, __b, __p)
+#define vrhaddq_m_u16(__inactive, __a, __b, __p) __arm_vrhaddq_m_u16(__inactive, __a, __b, __p)
+#define vrmulhq_m_s8(__inactive, __a, __b, __p) __arm_vrmulhq_m_s8(__inactive, __a, __b, __p)
+#define vrmulhq_m_s32(__inactive, __a, __b, __p) __arm_vrmulhq_m_s32(__inactive, __a, __b, __p)
+#define vrmulhq_m_s16(__inactive, __a, __b, __p) __arm_vrmulhq_m_s16(__inactive, __a, __b, __p)
+#define vrmulhq_m_u8(__inactive, __a, __b, __p) __arm_vrmulhq_m_u8(__inactive, __a, __b, __p)
+#define vrmulhq_m_u32(__inactive, __a, __b, __p) __arm_vrmulhq_m_u32(__inactive, __a, __b, __p)
+#define vrmulhq_m_u16(__inactive, __a, __b, __p) __arm_vrmulhq_m_u16(__inactive, __a, __b, __p)
+#define vrshlq_m_s8(__inactive, __a, __b, __p) __arm_vrshlq_m_s8(__inactive, __a, __b, __p)
+#define vrshlq_m_s32(__inactive, __a, __b, __p) __arm_vrshlq_m_s32(__inactive, __a, __b, __p)
+#define vrshlq_m_s16(__inactive, __a, __b, __p) __arm_vrshlq_m_s16(__inactive, __a, __b, __p)
+#define vrshlq_m_u8(__inactive, __a, __b, __p) __arm_vrshlq_m_u8(__inactive, __a, __b, __p)
+#define vrshlq_m_u32(__inactive, __a, __b, __p) __arm_vrshlq_m_u32(__inactive, __a, __b, __p)
+#define vrshlq_m_u16(__inactive, __a, __b, __p) __arm_vrshlq_m_u16(__inactive, __a, __b, __p)
+#define vrshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s8(__inactive, __a, __imm, __p)
+#define vrshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s32(__inactive, __a, __imm, __p)
+#define vrshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_s16(__inactive, __a, __imm, __p)
+#define vrshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u8(__inactive, __a, __imm, __p)
+#define vrshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u32(__inactive, __a, __imm, __p)
+#define vrshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vrshrq_m_n_u16(__inactive, __a, __imm, __p)
+#define vshlq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s8(__inactive, __a, __imm, __p)
+#define vshlq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s32(__inactive, __a, __imm, __p)
+#define vshlq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_s16(__inactive, __a, __imm, __p)
+#define vshlq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u8(__inactive, __a, __imm, __p)
+#define vshlq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u32(__inactive, __a, __imm, __p)
+#define vshlq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlq_m_n_u16(__inactive, __a, __imm, __p)
+#define vshrq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s8(__inactive, __a, __imm, __p)
+#define vshrq_m_n_s32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s32(__inactive, __a, __imm, __p)
+#define vshrq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_s16(__inactive, __a, __imm, __p)
+#define vshrq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u8(__inactive, __a, __imm, __p)
+#define vshrq_m_n_u32(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u32(__inactive, __a, __imm, __p)
+#define vshrq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshrq_m_n_u16(__inactive, __a, __imm, __p)
+#define vsliq_m_n_s8(__a, __b, __imm, __p) __arm_vsliq_m_n_s8(__a, __b, __imm, __p)
+#define vsliq_m_n_s32(__a, __b, __imm, __p) __arm_vsliq_m_n_s32(__a, __b, __imm, __p)
+#define vsliq_m_n_s16(__a, __b, __imm, __p) __arm_vsliq_m_n_s16(__a, __b, __imm, __p)
+#define vsliq_m_n_u8(__a, __b, __imm, __p) __arm_vsliq_m_n_u8(__a, __b, __imm, __p)
+#define vsliq_m_n_u32(__a, __b, __imm, __p) __arm_vsliq_m_n_u32(__a, __b, __imm, __p)
+#define vsliq_m_n_u16(__a, __b, __imm, __p) __arm_vsliq_m_n_u16(__a, __b, __imm, __p)
+#define vsubq_m_n_s8(__inactive, __a, __b, __p) __arm_vsubq_m_n_s8(__inactive, __a, __b, __p)
+#define vsubq_m_n_s32(__inactive, __a, __b, __p) __arm_vsubq_m_n_s32(__inactive, __a, __b, __p)
+#define vsubq_m_n_s16(__inactive, __a, __b, __p) __arm_vsubq_m_n_s16(__inactive, __a, __b, __p)
+#define vsubq_m_n_u8(__inactive, __a, __b, __p) __arm_vsubq_m_n_u8(__inactive, __a, __b, __p)
+#define vsubq_m_n_u32(__inactive, __a, __b, __p) __arm_vsubq_m_n_u32(__inactive, __a, __b, __p)
+#define vsubq_m_n_u16(__inactive, __a, __b, __p) __arm_vsubq_m_n_u16(__inactive, __a, __b, __p)
+#define vmlaldavaq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaq_p_s32(__a, __b, __c, __p)
+#define vmlaldavaq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaq_p_s16(__a, __b, __c, __p)
+#define vmlaldavaq_p_u32(__a, __b, __c, __p) __arm_vmlaldavaq_p_u32(__a, __b, __c, __p)
+#define vmlaldavaq_p_u16(__a, __b, __c, __p) __arm_vmlaldavaq_p_u16(__a, __b, __c, __p)
+#define vmlaldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s32(__a, __b, __c, __p)
+#define vmlaldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlaldavaxq_p_s16(__a, __b, __c, __p)
+#define vmlsldavaq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaq_p_s32(__a, __b, __c, __p)
+#define vmlsldavaq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaq_p_s16(__a, __b, __c, __p)
+#define vmlsldavaxq_p_s32(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s32(__a, __b, __c, __p)
+#define vmlsldavaxq_p_s16(__a, __b, __c, __p) __arm_vmlsldavaxq_p_s16(__a, __b, __c, __p)
+#define vmullbq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p8(__inactive, __a, __b, __p)
+#define vmullbq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmullbq_poly_m_p16(__inactive, __a, __b, __p)
+#define vmulltq_poly_m_p8(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p8(__inactive, __a, __b, __p)
+#define vmulltq_poly_m_p16(__inactive, __a, __b, __p) __arm_vmulltq_poly_m_p16(__inactive, __a, __b, __p)
+#define vqdmullbq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s32(__inactive, __a, __b, __p)
+#define vqdmullbq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_n_s16(__inactive, __a, __b, __p)
+#define vqdmullbq_m_s32(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s32(__inactive, __a, __b, __p)
+#define vqdmullbq_m_s16(__inactive, __a, __b, __p) __arm_vqdmullbq_m_s16(__inactive, __a, __b, __p)
+#define vqdmulltq_m_n_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s32(__inactive, __a, __b, __p)
+#define vqdmulltq_m_n_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_n_s16(__inactive, __a, __b, __p)
+#define vqdmulltq_m_s32(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s32(__inactive, __a, __b, __p)
+#define vqdmulltq_m_s16(__inactive, __a, __b, __p) __arm_vqdmulltq_m_s16(__inactive, __a, __b, __p)
+#define vqrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s32(__a, __b, __imm, __p)
+#define vqrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_s16(__a, __b, __imm, __p)
+#define vqrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u32(__a, __b, __imm, __p)
+#define vqrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrnbq_m_n_u16(__a, __b, __imm, __p)
+#define vqrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s32(__a, __b, __imm, __p)
+#define vqrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_s16(__a, __b, __imm, __p)
+#define vqrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u32(__a, __b, __imm, __p)
+#define vqrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqrshrntq_m_n_u16(__a, __b, __imm, __p)
+#define vqrshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s32(__a, __b, __imm, __p)
+#define vqrshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshrunbq_m_n_s16(__a, __b, __imm, __p)
+#define vqrshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s32(__a, __b, __imm, __p)
+#define vqrshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqrshruntq_m_n_s16(__a, __b, __imm, __p)
+#define vqshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s32(__a, __b, __imm, __p)
+#define vqshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_s16(__a, __b, __imm, __p)
+#define vqshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u32(__a, __b, __imm, __p)
+#define vqshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrnbq_m_n_u16(__a, __b, __imm, __p)
+#define vqshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s32(__a, __b, __imm, __p)
+#define vqshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_s16(__a, __b, __imm, __p)
+#define vqshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u32(__a, __b, __imm, __p)
+#define vqshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vqshrntq_m_n_u16(__a, __b, __imm, __p)
+#define vqshrunbq_m_n_s32(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s32(__a, __b, __imm, __p)
+#define vqshrunbq_m_n_s16(__a, __b, __imm, __p) __arm_vqshrunbq_m_n_s16(__a, __b, __imm, __p)
+#define vqshruntq_m_n_s32(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s32(__a, __b, __imm, __p)
+#define vqshruntq_m_n_s16(__a, __b, __imm, __p) __arm_vqshruntq_m_n_s16(__a, __b, __imm, __p)
+#define vrmlaldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_s32(__a, __b, __c, __p)
+#define vrmlaldavhaq_p_u32(__a, __b, __c, __p) __arm_vrmlaldavhaq_p_u32(__a, __b, __c, __p)
+#define vrmlaldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlaldavhaxq_p_s32(__a, __b, __c, __p)
+#define vrmlsldavhaq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaq_p_s32(__a, __b, __c, __p)
+#define vrmlsldavhaxq_p_s32(__a, __b, __c, __p) __arm_vrmlsldavhaxq_p_s32(__a, __b, __c, __p)
+#define vrshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s32(__a, __b, __imm, __p)
+#define vrshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_s16(__a, __b, __imm, __p)
+#define vrshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u32(__a, __b, __imm, __p)
+#define vrshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrnbq_m_n_u16(__a, __b, __imm, __p)
+#define vrshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s32(__a, __b, __imm, __p)
+#define vrshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_s16(__a, __b, __imm, __p)
+#define vrshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u32(__a, __b, __imm, __p)
+#define vrshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vrshrntq_m_n_u16(__a, __b, __imm, __p)
+#define vshllbq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s8(__inactive, __a, __imm, __p)
+#define vshllbq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_s16(__inactive, __a, __imm, __p)
+#define vshllbq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u8(__inactive, __a, __imm, __p)
+#define vshllbq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshllbq_m_n_u16(__inactive, __a, __imm, __p)
+#define vshlltq_m_n_s8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s8(__inactive, __a, __imm, __p)
+#define vshlltq_m_n_s16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_s16(__inactive, __a, __imm, __p)
+#define vshlltq_m_n_u8(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u8(__inactive, __a, __imm, __p)
+#define vshlltq_m_n_u16(__inactive, __a, __imm, __p) __arm_vshlltq_m_n_u16(__inactive, __a, __imm, __p)
+#define vshrnbq_m_n_s32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s32(__a, __b, __imm, __p)
+#define vshrnbq_m_n_s16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_s16(__a, __b, __imm, __p)
+#define vshrnbq_m_n_u32(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u32(__a, __b, __imm, __p)
+#define vshrnbq_m_n_u16(__a, __b, __imm, __p) __arm_vshrnbq_m_n_u16(__a, __b, __imm, __p)
+#define vshrntq_m_n_s32(__a, __b, __imm, __p) __arm_vshrntq_m_n_s32(__a, __b, __imm, __p)
+#define vshrntq_m_n_s16(__a, __b, __imm, __p) __arm_vshrntq_m_n_s16(__a, __b, __imm, __p)
+#define vshrntq_m_n_u32(__a, __b, __imm, __p) __arm_vshrntq_m_n_u32(__a, __b, __imm, __p)
+#define vshrntq_m_n_u16(__a, __b, __imm, __p) __arm_vshrntq_m_n_u16(__a, __b, __imm, __p)
+#define vabdq_m_f32(__inactive, __a, __b, __p) __arm_vabdq_m_f32(__inactive, __a, __b, __p)
+#define vabdq_m_f16(__inactive, __a, __b, __p) __arm_vabdq_m_f16(__inactive, __a, __b, __p)
+#define vaddq_m_f32(__inactive, __a, __b, __p) __arm_vaddq_m_f32(__inactive, __a, __b, __p)
+#define vaddq_m_f16(__inactive, __a, __b, __p) __arm_vaddq_m_f16(__inactive, __a, __b, __p)
+#define vaddq_m_n_f32(__inactive, __a, __b, __p) __arm_vaddq_m_n_f32(__inactive, __a, __b, __p)
+#define vaddq_m_n_f16(__inactive, __a, __b, __p) __arm_vaddq_m_n_f16(__inactive, __a, __b, __p)
+#define vandq_m_f32(__inactive, __a, __b, __p) __arm_vandq_m_f32(__inactive, __a, __b, __p)
+#define vandq_m_f16(__inactive, __a, __b, __p) __arm_vandq_m_f16(__inactive, __a, __b, __p)
+#define vbicq_m_f32(__inactive, __a, __b, __p) __arm_vbicq_m_f32(__inactive, __a, __b, __p)
+#define vbicq_m_f16(__inactive, __a, __b, __p) __arm_vbicq_m_f16(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_f32(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f32(__inactive, __a, __b, __p)
+#define vbrsrq_m_n_f16(__inactive, __a, __b, __p) __arm_vbrsrq_m_n_f16(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f32(__inactive, __a, __b, __p)
+#define vcaddq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot270_m_f16(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f32(__inactive, __a, __b, __p)
+#define vcaddq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcaddq_rot90_m_f16(__inactive, __a, __b, __p)
+#define vcmlaq_m_f32(__a, __b, __c, __p) __arm_vcmlaq_m_f32(__a, __b, __c, __p)
+#define vcmlaq_m_f16(__a, __b, __c, __p) __arm_vcmlaq_m_f16(__a, __b, __c, __p)
+#define vcmlaq_rot180_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f32(__a, __b, __c, __p)
+#define vcmlaq_rot180_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot180_m_f16(__a, __b, __c, __p)
+#define vcmlaq_rot270_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f32(__a, __b, __c, __p)
+#define vcmlaq_rot270_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot270_m_f16(__a, __b, __c, __p)
+#define vcmlaq_rot90_m_f32(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f32(__a, __b, __c, __p)
+#define vcmlaq_rot90_m_f16(__a, __b, __c, __p) __arm_vcmlaq_rot90_m_f16(__a, __b, __c, __p)
+#define vcmulq_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_m_f32(__inactive, __a, __b, __p)
+#define vcmulq_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_m_f16(__inactive, __a, __b, __p)
+#define vcmulq_rot180_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f32(__inactive, __a, __b, __p)
+#define vcmulq_rot180_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot180_m_f16(__inactive, __a, __b, __p)
+#define vcmulq_rot270_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f32(__inactive, __a, __b, __p)
+#define vcmulq_rot270_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot270_m_f16(__inactive, __a, __b, __p)
+#define vcmulq_rot90_m_f32(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f32(__inactive, __a, __b, __p)
+#define vcmulq_rot90_m_f16(__inactive, __a, __b, __p) __arm_vcmulq_rot90_m_f16(__inactive, __a, __b, __p)
+#define vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s32_f32(__inactive, __a, __imm6, __p)
+#define vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_s16_f16(__inactive, __a, __imm6, __p)
+#define vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u32_f32(__inactive, __a, __imm6, __p)
+#define vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p) __arm_vcvtq_m_n_u16_f16(__inactive, __a, __imm6, __p)
+#define veorq_m_f32(__inactive, __a, __b, __p) __arm_veorq_m_f32(__inactive, __a, __b, __p)
+#define veorq_m_f16(__inactive, __a, __b, __p) __arm_veorq_m_f16(__inactive, __a, __b, __p)
+#define vfmaq_m_f32(__a, __b, __c, __p) __arm_vfmaq_m_f32(__a, __b, __c, __p)
+#define vfmaq_m_f16(__a, __b, __c, __p) __arm_vfmaq_m_f16(__a, __b, __c, __p)
+#define vfmaq_m_n_f32(__a, __b, __c, __p) __arm_vfmaq_m_n_f32(__a, __b, __c, __p)
+#define vfmaq_m_n_f16(__a, __b, __c, __p) __arm_vfmaq_m_n_f16(__a, __b, __c, __p)
+#define vfmasq_m_n_f32(__a, __b, __c, __p) __arm_vfmasq_m_n_f32(__a, __b, __c, __p)
+#define vfmasq_m_n_f16(__a, __b, __c, __p) __arm_vfmasq_m_n_f16(__a, __b, __c, __p)
+#define vfmsq_m_f32(__a, __b, __c, __p) __arm_vfmsq_m_f32(__a, __b, __c, __p)
+#define vfmsq_m_f16(__a, __b, __c, __p) __arm_vfmsq_m_f16(__a, __b, __c, __p)
+#define vmaxnmq_m_f32(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f32(__inactive, __a, __b, __p)
+#define vmaxnmq_m_f16(__inactive, __a, __b, __p) __arm_vmaxnmq_m_f16(__inactive, __a, __b, __p)
+#define vminnmq_m_f32(__inactive, __a, __b, __p) __arm_vminnmq_m_f32(__inactive, __a, __b, __p)
+#define vminnmq_m_f16(__inactive, __a, __b, __p) __arm_vminnmq_m_f16(__inactive, __a, __b, __p)
+#define vmulq_m_f32(__inactive, __a, __b, __p) __arm_vmulq_m_f32(__inactive, __a, __b, __p)
+#define vmulq_m_f16(__inactive, __a, __b, __p) __arm_vmulq_m_f16(__inactive, __a, __b, __p)
+#define vmulq_m_n_f32(__inactive, __a, __b, __p) __arm_vmulq_m_n_f32(__inactive, __a, __b, __p)
+#define vmulq_m_n_f16(__inactive, __a, __b, __p) __arm_vmulq_m_n_f16(__inactive, __a, __b, __p)
+#define vornq_m_f32(__inactive, __a, __b, __p) __arm_vornq_m_f32(__inactive, __a, __b, __p)
+#define vornq_m_f16(__inactive, __a, __b, __p) __arm_vornq_m_f16(__inactive, __a, __b, __p)
+#define vorrq_m_f32(__inactive, __a, __b, __p) __arm_vorrq_m_f32(__inactive, __a, __b, __p)
+#define vorrq_m_f16(__inactive, __a, __b, __p) __arm_vorrq_m_f16(__inactive, __a, __b, __p)
+#define vsubq_m_f32(__inactive, __a, __b, __p) __arm_vsubq_m_f32(__inactive, __a, __b, __p)
+#define vsubq_m_f16(__inactive, __a, __b, __p) __arm_vsubq_m_f16(__inactive, __a, __b, __p)
+#define vsubq_m_n_f32(__inactive, __a, __b, __p) __arm_vsubq_m_n_f32(__inactive, __a, __b, __p)
+#define vsubq_m_n_f16(__inactive, __a, __b, __p) __arm_vsubq_m_n_f16(__inactive, __a, __b, __p)
+#define vstrbq_s8( __addr, __value) __arm_vstrbq_s8( __addr, __value)
+#define vstrbq_u8( __addr, __value) __arm_vstrbq_u8( __addr, __value)
+#define vstrbq_u16( __addr, __value) __arm_vstrbq_u16( __addr, __value)
+#define vstrbq_scatter_offset_s8( __base, __offset, __value) __arm_vstrbq_scatter_offset_s8( __base, __offset, __value)
+#define vstrbq_scatter_offset_u8( __base, __offset, __value) __arm_vstrbq_scatter_offset_u8( __base, __offset, __value)
+#define vstrbq_scatter_offset_u16( __base, __offset, __value) __arm_vstrbq_scatter_offset_u16( __base, __offset, __value)
+#define vstrbq_s16( __addr, __value) __arm_vstrbq_s16( __addr, __value)
+#define vstrbq_u32( __addr, __value) __arm_vstrbq_u32( __addr, __value)
+#define vstrbq_scatter_offset_s16( __base, __offset, __value) __arm_vstrbq_scatter_offset_s16( __base, __offset, __value)
+#define vstrbq_scatter_offset_u32( __base, __offset, __value) __arm_vstrbq_scatter_offset_u32( __base, __offset, __value)
+#define vstrbq_s32( __addr, __value) __arm_vstrbq_s32( __addr, __value)
+#define vstrbq_scatter_offset_s32( __base, __offset, __value) __arm_vstrbq_scatter_offset_s32( __base, __offset, __value)
+#define vstrwq_scatter_base_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_s32(__addr, __offset, __value)
+#define vstrwq_scatter_base_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_u32(__addr, __offset, __value)
+#define vldrbq_gather_offset_u8(__base, __offset) __arm_vldrbq_gather_offset_u8(__base, __offset)
+#define vldrbq_gather_offset_s8(__base, __offset) __arm_vldrbq_gather_offset_s8(__base, __offset)
+#define vldrbq_s8(__base) __arm_vldrbq_s8(__base)
+#define vldrbq_u8(__base) __arm_vldrbq_u8(__base)
+#define vldrbq_gather_offset_u16(__base, __offset) __arm_vldrbq_gather_offset_u16(__base, __offset)
+#define vldrbq_gather_offset_s16(__base, __offset) __arm_vldrbq_gather_offset_s16(__base, __offset)
+#define vldrbq_s16(__base) __arm_vldrbq_s16(__base)
+#define vldrbq_u16(__base) __arm_vldrbq_u16(__base)
+#define vldrbq_gather_offset_u32(__base, __offset) __arm_vldrbq_gather_offset_u32(__base, __offset)
+#define vldrbq_gather_offset_s32(__base, __offset) __arm_vldrbq_gather_offset_s32(__base, __offset)
+#define vldrbq_s32(__base) __arm_vldrbq_s32(__base)
+#define vldrbq_u32(__base) __arm_vldrbq_u32(__base)
+#define vldrwq_gather_base_s32(__addr, __offset) __arm_vldrwq_gather_base_s32(__addr, __offset)
+#define vldrwq_gather_base_u32(__addr, __offset) __arm_vldrwq_gather_base_u32(__addr, __offset)
+#define vstrbq_p_s8( __addr, __value, __p) __arm_vstrbq_p_s8( __addr, __value, __p)
+#define vstrbq_p_s32( __addr, __value, __p) __arm_vstrbq_p_s32( __addr, __value, __p)
+#define vstrbq_p_s16( __addr, __value, __p) __arm_vstrbq_p_s16( __addr, __value, __p)
+#define vstrbq_p_u8( __addr, __value, __p) __arm_vstrbq_p_u8( __addr, __value, __p)
+#define vstrbq_p_u32( __addr, __value, __p) __arm_vstrbq_p_u32( __addr, __value, __p)
+#define vstrbq_p_u16( __addr, __value, __p) __arm_vstrbq_p_u16( __addr, __value, __p)
+#define vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s8( __base, __offset, __value, __p)
+#define vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s32( __base, __offset, __value, __p)
+#define vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_s16( __base, __offset, __value, __p)
+#define vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u8( __base, __offset, __value, __p)
+#define vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u32( __base, __offset, __value, __p)
+#define vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrbq_scatter_offset_p_u16( __base, __offset, __value, __p)
+#define vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_s32(__addr, __offset, __value, __p)
+#define vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_u32(__addr, __offset, __value, __p)
+#define vldrbq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s16(__base, __offset, __p)
+#define vldrbq_gather_offset_z_u8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u8(__base, __offset, __p)
+#define vldrbq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s32(__base, __offset, __p)
+#define vldrbq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u16(__base, __offset, __p)
+#define vldrbq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrbq_gather_offset_z_u32(__base, __offset, __p)
+#define vldrbq_gather_offset_z_s8(__base, __offset, __p) __arm_vldrbq_gather_offset_z_s8(__base, __offset, __p)
+#define vldrbq_z_s16(__base, __p) __arm_vldrbq_z_s16(__base, __p)
+#define vldrbq_z_u8(__base, __p) __arm_vldrbq_z_u8(__base, __p)
+#define vldrbq_z_s8(__base, __p) __arm_vldrbq_z_s8(__base, __p)
+#define vldrbq_z_s32(__base, __p) __arm_vldrbq_z_s32(__base, __p)
+#define vldrbq_z_u16(__base, __p) __arm_vldrbq_z_u16(__base, __p)
+#define vldrbq_z_u32(__base, __p) __arm_vldrbq_z_u32(__base, __p)
+#define vldrwq_gather_base_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_u32(__addr, __offset, __p)
+#define vldrwq_gather_base_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_s32(__addr, __offset, __p)
+#define vld1q_s8(__base) __arm_vld1q_s8(__base)
+#define vld1q_s32(__base) __arm_vld1q_s32(__base)
+#define vld1q_s16(__base) __arm_vld1q_s16(__base)
+#define vld1q_u8(__base) __arm_vld1q_u8(__base)
+#define vld1q_u32(__base) __arm_vld1q_u32(__base)
+#define vld1q_u16(__base) __arm_vld1q_u16(__base)
+#define vldrhq_gather_offset_s32(__base, __offset) __arm_vldrhq_gather_offset_s32(__base, __offset)
+#define vldrhq_gather_offset_s16(__base, __offset) __arm_vldrhq_gather_offset_s16(__base, __offset)
+#define vldrhq_gather_offset_u32(__base, __offset) __arm_vldrhq_gather_offset_u32(__base, __offset)
+#define vldrhq_gather_offset_u16(__base, __offset) __arm_vldrhq_gather_offset_u16(__base, __offset)
+#define vldrhq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s32(__base, __offset, __p)
+#define vldrhq_gather_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_s16(__base, __offset, __p)
+#define vldrhq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u32(__base, __offset, __p)
+#define vldrhq_gather_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_u16(__base, __offset, __p)
+#define vldrhq_gather_shifted_offset_s32(__base, __offset) __arm_vldrhq_gather_shifted_offset_s32(__base, __offset)
+#define vldrhq_gather_shifted_offset_s16(__base, __offset) __arm_vldrhq_gather_shifted_offset_s16(__base, __offset)
+#define vldrhq_gather_shifted_offset_u32(__base, __offset) __arm_vldrhq_gather_shifted_offset_u32(__base, __offset)
+#define vldrhq_gather_shifted_offset_u16(__base, __offset) __arm_vldrhq_gather_shifted_offset_u16(__base, __offset)
+#define vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s32(__base, __offset, __p)
+#define vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_s16(__base, __offset, __p)
+#define vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u32(__base, __offset, __p)
+#define vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_u16(__base, __offset, __p)
+#define vldrhq_s32(__base) __arm_vldrhq_s32(__base)
+#define vldrhq_s16(__base) __arm_vldrhq_s16(__base)
+#define vldrhq_u32(__base) __arm_vldrhq_u32(__base)
+#define vldrhq_u16(__base) __arm_vldrhq_u16(__base)
+#define vldrhq_z_s32(__base, __p) __arm_vldrhq_z_s32(__base, __p)
+#define vldrhq_z_s16(__base, __p) __arm_vldrhq_z_s16(__base, __p)
+#define vldrhq_z_u32(__base, __p) __arm_vldrhq_z_u32(__base, __p)
+#define vldrhq_z_u16(__base, __p) __arm_vldrhq_z_u16(__base, __p)
+#define vldrwq_s32(__base) __arm_vldrwq_s32(__base)
+#define vldrwq_u32(__base) __arm_vldrwq_u32(__base)
+#define vldrwq_z_s32(__base, __p) __arm_vldrwq_z_s32(__base, __p)
+#define vldrwq_z_u32(__base, __p) __arm_vldrwq_z_u32(__base, __p)
+#define vld1q_f32(__base) __arm_vld1q_f32(__base)
+#define vld1q_f16(__base) __arm_vld1q_f16(__base)
+#define vldrhq_f16(__base) __arm_vldrhq_f16(__base)
+#define vldrhq_z_f16(__base, __p) __arm_vldrhq_z_f16(__base, __p)
+#define vldrwq_f32(__base) __arm_vldrwq_f32(__base)
+#define vldrwq_z_f32(__base, __p) __arm_vldrwq_z_f32(__base, __p)
+#define vldrdq_gather_base_s64(__addr, __offset) __arm_vldrdq_gather_base_s64(__addr, __offset)
+#define vldrdq_gather_base_u64(__addr, __offset) __arm_vldrdq_gather_base_u64(__addr, __offset)
+#define vldrdq_gather_base_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_s64(__addr, __offset, __p)
+#define vldrdq_gather_base_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_z_u64(__addr, __offset, __p)
+#define vldrdq_gather_offset_s64(__base, __offset) __arm_vldrdq_gather_offset_s64(__base, __offset)
+#define vldrdq_gather_offset_u64(__base, __offset) __arm_vldrdq_gather_offset_u64(__base, __offset)
+#define vldrdq_gather_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_s64(__base, __offset, __p)
+#define vldrdq_gather_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_offset_z_u64(__base, __offset, __p)
+#define vldrdq_gather_shifted_offset_s64(__base, __offset) __arm_vldrdq_gather_shifted_offset_s64(__base, __offset)
+#define vldrdq_gather_shifted_offset_u64(__base, __offset) __arm_vldrdq_gather_shifted_offset_u64(__base, __offset)
+#define vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_s64(__base, __offset, __p)
+#define vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p) __arm_vldrdq_gather_shifted_offset_z_u64(__base, __offset, __p)
+#define vldrhq_gather_offset_f16(__base, __offset) __arm_vldrhq_gather_offset_f16(__base, __offset)
+#define vldrhq_gather_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_offset_z_f16(__base, __offset, __p)
+#define vldrhq_gather_shifted_offset_f16(__base, __offset) __arm_vldrhq_gather_shifted_offset_f16(__base, __offset)
+#define vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p) __arm_vldrhq_gather_shifted_offset_z_f16(__base, __offset, __p)
+#define vldrwq_gather_base_f32(__addr, __offset) __arm_vldrwq_gather_base_f32(__addr, __offset)
+#define vldrwq_gather_base_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_z_f32(__addr, __offset, __p)
+#define vldrwq_gather_offset_f32(__base, __offset) __arm_vldrwq_gather_offset_f32(__base, __offset)
+#define vldrwq_gather_offset_s32(__base, __offset) __arm_vldrwq_gather_offset_s32(__base, __offset)
+#define vldrwq_gather_offset_u32(__base, __offset) __arm_vldrwq_gather_offset_u32(__base, __offset)
+#define vldrwq_gather_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_f32(__base, __offset, __p)
+#define vldrwq_gather_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_s32(__base, __offset, __p)
+#define vldrwq_gather_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_offset_z_u32(__base, __offset, __p)
+#define vldrwq_gather_shifted_offset_f32(__base, __offset) __arm_vldrwq_gather_shifted_offset_f32(__base, __offset)
+#define vldrwq_gather_shifted_offset_s32(__base, __offset) __arm_vldrwq_gather_shifted_offset_s32(__base, __offset)
+#define vldrwq_gather_shifted_offset_u32(__base, __offset) __arm_vldrwq_gather_shifted_offset_u32(__base, __offset)
+#define vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_f32(__base, __offset, __p)
+#define vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_s32(__base, __offset, __p)
+#define vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p) __arm_vldrwq_gather_shifted_offset_z_u32(__base, __offset, __p)
+#define vst1q_f32(__addr, __value) __arm_vst1q_f32(__addr, __value)
+#define vst1q_f16(__addr, __value) __arm_vst1q_f16(__addr, __value)
+#define vst1q_s8(__addr, __value) __arm_vst1q_s8(__addr, __value)
+#define vst1q_s32(__addr, __value) __arm_vst1q_s32(__addr, __value)
+#define vst1q_s16(__addr, __value) __arm_vst1q_s16(__addr, __value)
+#define vst1q_u8(__addr, __value) __arm_vst1q_u8(__addr, __value)
+#define vst1q_u32(__addr, __value) __arm_vst1q_u32(__addr, __value)
+#define vst1q_u16(__addr, __value) __arm_vst1q_u16(__addr, __value)
+#define vstrhq_f16(__addr, __value) __arm_vstrhq_f16(__addr, __value)
+#define vstrhq_scatter_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_offset_s32( __base, __offset, __value)
+#define vstrhq_scatter_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_offset_s16( __base, __offset, __value)
+#define vstrhq_scatter_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_offset_u32( __base, __offset, __value)
+#define vstrhq_scatter_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_offset_u16( __base, __offset, __value)
+#define vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s32( __base, __offset, __value, __p)
+#define vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_s16( __base, __offset, __value, __p)
+#define vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u32( __base, __offset, __value, __p)
+#define vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_u16( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_s32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s32( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_s16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_s16( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_u32( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u32( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_u16( __base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_u16( __base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s32( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_s16( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u32( __base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_u16( __base, __offset, __value, __p)
+#define vstrhq_s32(__addr, __value) __arm_vstrhq_s32(__addr, __value)
+#define vstrhq_s16(__addr, __value) __arm_vstrhq_s16(__addr, __value)
+#define vstrhq_u32(__addr, __value) __arm_vstrhq_u32(__addr, __value)
+#define vstrhq_u16(__addr, __value) __arm_vstrhq_u16(__addr, __value)
+#define vstrhq_p_f16(__addr, __value, __p) __arm_vstrhq_p_f16(__addr, __value, __p)
+#define vstrhq_p_s32(__addr, __value, __p) __arm_vstrhq_p_s32(__addr, __value, __p)
+#define vstrhq_p_s16(__addr, __value, __p) __arm_vstrhq_p_s16(__addr, __value, __p)
+#define vstrhq_p_u32(__addr, __value, __p) __arm_vstrhq_p_u32(__addr, __value, __p)
+#define vstrhq_p_u16(__addr, __value, __p) __arm_vstrhq_p_u16(__addr, __value, __p)
+#define vstrwq_f32(__addr, __value) __arm_vstrwq_f32(__addr, __value)
+#define vstrwq_s32(__addr, __value) __arm_vstrwq_s32(__addr, __value)
+#define vstrwq_u32(__addr, __value) __arm_vstrwq_u32(__addr, __value)
+#define vstrwq_p_f32(__addr, __value, __p) __arm_vstrwq_p_f32(__addr, __value, __p)
+#define vstrwq_p_s32(__addr, __value, __p) __arm_vstrwq_p_s32(__addr, __value, __p)
+#define vstrwq_p_u32(__addr, __value, __p) __arm_vstrwq_p_u32(__addr, __value, __p)
+#define vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_s64(__addr, __offset, __value, __p)
+#define vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_p_u64(__addr, __offset, __value, __p)
+#define vstrdq_scatter_base_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_s64(__addr, __offset, __value)
+#define vstrdq_scatter_base_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_u64(__addr, __offset, __value)
+#define vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_s64(__base, __offset, __value, __p)
+#define vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_offset_p_u64(__base, __offset, __value, __p)
+#define vstrdq_scatter_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_offset_s64(__base, __offset, __value)
+#define vstrdq_scatter_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_offset_u64(__base, __offset, __value)
+#define vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_s64(__base, __offset, __value, __p)
+#define vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p) __arm_vstrdq_scatter_shifted_offset_p_u64(__base, __offset, __value, __p)
+#define vstrdq_scatter_shifted_offset_s64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_s64(__base, __offset, __value)
+#define vstrdq_scatter_shifted_offset_u64(__base, __offset, __value) __arm_vstrdq_scatter_shifted_offset_u64(__base, __offset, __value)
+#define vstrhq_scatter_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_offset_f16(__base, __offset, __value)
+#define vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_offset_p_f16(__base, __offset, __value, __p)
+#define vstrhq_scatter_shifted_offset_f16(__base, __offset, __value) __arm_vstrhq_scatter_shifted_offset_f16(__base, __offset, __value)
+#define vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p) __arm_vstrhq_scatter_shifted_offset_p_f16(__base, __offset, __value, __p)
+#define vstrwq_scatter_base_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_f32(__addr, __offset, __value)
+#define vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_p_f32(__addr, __offset, __value, __p)
+#define vstrwq_scatter_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_offset_f32(__base, __offset, __value)
+#define vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_f32(__base, __offset, __value, __p)
+#define vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_s32(__base, __offset, __value, __p)
+#define vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_offset_p_u32(__base, __offset, __value, __p)
+#define vstrwq_scatter_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_offset_s32(__base, __offset, __value)
+#define vstrwq_scatter_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_offset_u32(__base, __offset, __value)
+#define vstrwq_scatter_shifted_offset_f32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_f32(__base, __offset, __value)
+#define vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_f32(__base, __offset, __value, __p)
+#define vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_s32(__base, __offset, __value, __p)
+#define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p) __arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
+#define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
+#define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value) __arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
+#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
+#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
+#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
+#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
+#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
+#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
+#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
+#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
+#define vreinterpretq_s16_s32(__a) __arm_vreinterpretq_s16_s32(__a)
+#define vreinterpretq_s16_s64(__a) __arm_vreinterpretq_s16_s64(__a)
+#define vreinterpretq_s16_s8(__a) __arm_vreinterpretq_s16_s8(__a)
+#define vreinterpretq_s16_u16(__a) __arm_vreinterpretq_s16_u16(__a)
+#define vreinterpretq_s16_u32(__a) __arm_vreinterpretq_s16_u32(__a)
+#define vreinterpretq_s16_u64(__a) __arm_vreinterpretq_s16_u64(__a)
+#define vreinterpretq_s16_u8(__a) __arm_vreinterpretq_s16_u8(__a)
+#define vreinterpretq_s32_s16(__a) __arm_vreinterpretq_s32_s16(__a)
+#define vreinterpretq_s32_s64(__a) __arm_vreinterpretq_s32_s64(__a)
+#define vreinterpretq_s32_s8(__a) __arm_vreinterpretq_s32_s8(__a)
+#define vreinterpretq_s32_u16(__a) __arm_vreinterpretq_s32_u16(__a)
+#define vreinterpretq_s32_u32(__a) __arm_vreinterpretq_s32_u32(__a)
+#define vreinterpretq_s32_u64(__a) __arm_vreinterpretq_s32_u64(__a)
+#define vreinterpretq_s32_u8(__a) __arm_vreinterpretq_s32_u8(__a)
+#define vreinterpretq_s64_s16(__a) __arm_vreinterpretq_s64_s16(__a)
+#define vreinterpretq_s64_s32(__a) __arm_vreinterpretq_s64_s32(__a)
+#define vreinterpretq_s64_s8(__a) __arm_vreinterpretq_s64_s8(__a)
+#define vreinterpretq_s64_u16(__a) __arm_vreinterpretq_s64_u16(__a)
+#define vreinterpretq_s64_u32(__a) __arm_vreinterpretq_s64_u32(__a)
+#define vreinterpretq_s64_u64(__a) __arm_vreinterpretq_s64_u64(__a)
+#define vreinterpretq_s64_u8(__a) __arm_vreinterpretq_s64_u8(__a)
+#define vreinterpretq_s8_s16(__a) __arm_vreinterpretq_s8_s16(__a)
+#define vreinterpretq_s8_s32(__a) __arm_vreinterpretq_s8_s32(__a)
+#define vreinterpretq_s8_s64(__a) __arm_vreinterpretq_s8_s64(__a)
+#define vreinterpretq_s8_u16(__a) __arm_vreinterpretq_s8_u16(__a)
+#define vreinterpretq_s8_u32(__a) __arm_vreinterpretq_s8_u32(__a)
+#define vreinterpretq_s8_u64(__a) __arm_vreinterpretq_s8_u64(__a)
+#define vreinterpretq_s8_u8(__a) __arm_vreinterpretq_s8_u8(__a)
+#define vreinterpretq_u16_s16(__a) __arm_vreinterpretq_u16_s16(__a)
+#define vreinterpretq_u16_s32(__a) __arm_vreinterpretq_u16_s32(__a)
+#define vreinterpretq_u16_s64(__a) __arm_vreinterpretq_u16_s64(__a)
+#define vreinterpretq_u16_s8(__a) __arm_vreinterpretq_u16_s8(__a)
+#define vreinterpretq_u16_u32(__a) __arm_vreinterpretq_u16_u32(__a)
+#define vreinterpretq_u16_u64(__a) __arm_vreinterpretq_u16_u64(__a)
+#define vreinterpretq_u16_u8(__a) __arm_vreinterpretq_u16_u8(__a)
+#define vreinterpretq_u32_s16(__a) __arm_vreinterpretq_u32_s16(__a)
+#define vreinterpretq_u32_s32(__a) __arm_vreinterpretq_u32_s32(__a)
+#define vreinterpretq_u32_s64(__a) __arm_vreinterpretq_u32_s64(__a)
+#define vreinterpretq_u32_s8(__a) __arm_vreinterpretq_u32_s8(__a)
+#define vreinterpretq_u32_u16(__a) __arm_vreinterpretq_u32_u16(__a)
+#define vreinterpretq_u32_u64(__a) __arm_vreinterpretq_u32_u64(__a)
+#define vreinterpretq_u32_u8(__a) __arm_vreinterpretq_u32_u8(__a)
+#define vreinterpretq_u64_s16(__a) __arm_vreinterpretq_u64_s16(__a)
+#define vreinterpretq_u64_s32(__a) __arm_vreinterpretq_u64_s32(__a)
+#define vreinterpretq_u64_s64(__a) __arm_vreinterpretq_u64_s64(__a)
+#define vreinterpretq_u64_s8(__a) __arm_vreinterpretq_u64_s8(__a)
+#define vreinterpretq_u64_u16(__a) __arm_vreinterpretq_u64_u16(__a)
+#define vreinterpretq_u64_u32(__a) __arm_vreinterpretq_u64_u32(__a)
+#define vreinterpretq_u64_u8(__a) __arm_vreinterpretq_u64_u8(__a)
+#define vreinterpretq_u8_s16(__a) __arm_vreinterpretq_u8_s16(__a)
+#define vreinterpretq_u8_s32(__a) __arm_vreinterpretq_u8_s32(__a)
+#define vreinterpretq_u8_s64(__a) __arm_vreinterpretq_u8_s64(__a)
+#define vreinterpretq_u8_s8(__a) __arm_vreinterpretq_u8_s8(__a)
+#define vreinterpretq_u8_u16(__a) __arm_vreinterpretq_u8_u16(__a)
+#define vreinterpretq_u8_u32(__a) __arm_vreinterpretq_u8_u32(__a)
+#define vreinterpretq_u8_u64(__a) __arm_vreinterpretq_u8_u64(__a)
+#define vreinterpretq_s32_f16(__a) __arm_vreinterpretq_s32_f16(__a)
+#define vreinterpretq_s32_f32(__a) __arm_vreinterpretq_s32_f32(__a)
+#define vreinterpretq_u16_f16(__a) __arm_vreinterpretq_u16_f16(__a)
+#define vreinterpretq_u16_f32(__a) __arm_vreinterpretq_u16_f32(__a)
+#define vreinterpretq_u32_f16(__a) __arm_vreinterpretq_u32_f16(__a)
+#define vreinterpretq_u32_f32(__a) __arm_vreinterpretq_u32_f32(__a)
+#define vreinterpretq_u64_f16(__a) __arm_vreinterpretq_u64_f16(__a)
+#define vreinterpretq_u64_f32(__a) __arm_vreinterpretq_u64_f32(__a)
+#define vreinterpretq_u8_f16(__a) __arm_vreinterpretq_u8_f16(__a)
+#define vreinterpretq_u8_f32(__a) __arm_vreinterpretq_u8_f32(__a)
+#define vreinterpretq_f16_f32(__a) __arm_vreinterpretq_f16_f32(__a)
+#define vreinterpretq_f16_s16(__a) __arm_vreinterpretq_f16_s16(__a)
+#define vreinterpretq_f16_s32(__a) __arm_vreinterpretq_f16_s32(__a)
+#define vreinterpretq_f16_s64(__a) __arm_vreinterpretq_f16_s64(__a)
+#define vreinterpretq_f16_s8(__a) __arm_vreinterpretq_f16_s8(__a)
+#define vreinterpretq_f16_u16(__a) __arm_vreinterpretq_f16_u16(__a)
+#define vreinterpretq_f16_u32(__a) __arm_vreinterpretq_f16_u32(__a)
+#define vreinterpretq_f16_u64(__a) __arm_vreinterpretq_f16_u64(__a)
+#define vreinterpretq_f16_u8(__a) __arm_vreinterpretq_f16_u8(__a)
+#define vreinterpretq_f32_f16(__a) __arm_vreinterpretq_f32_f16(__a)
+#define vreinterpretq_f32_s16(__a) __arm_vreinterpretq_f32_s16(__a)
+#define vreinterpretq_f32_s32(__a) __arm_vreinterpretq_f32_s32(__a)
+#define vreinterpretq_f32_s64(__a) __arm_vreinterpretq_f32_s64(__a)
+#define vreinterpretq_f32_s8(__a) __arm_vreinterpretq_f32_s8(__a)
+#define vreinterpretq_f32_u16(__a) __arm_vreinterpretq_f32_u16(__a)
+#define vreinterpretq_f32_u32(__a) __arm_vreinterpretq_f32_u32(__a)
+#define vreinterpretq_f32_u64(__a) __arm_vreinterpretq_f32_u64(__a)
+#define vreinterpretq_f32_u8(__a) __arm_vreinterpretq_f32_u8(__a)
+#define vreinterpretq_s16_f16(__a) __arm_vreinterpretq_s16_f16(__a)
+#define vreinterpretq_s16_f32(__a) __arm_vreinterpretq_s16_f32(__a)
+#define vreinterpretq_s64_f16(__a) __arm_vreinterpretq_s64_f16(__a)
+#define vreinterpretq_s64_f32(__a) __arm_vreinterpretq_s64_f32(__a)
+#define vreinterpretq_s8_f16(__a) __arm_vreinterpretq_s8_f16(__a)
+#define vreinterpretq_s8_f32(__a) __arm_vreinterpretq_s8_f32(__a)
+#define vuninitializedq_u8(void) __arm_vuninitializedq_u8(void)
+#define vuninitializedq_u16(void) __arm_vuninitializedq_u16(void)
+#define vuninitializedq_u32(void) __arm_vuninitializedq_u32(void)
+#define vuninitializedq_u64(void) __arm_vuninitializedq_u64(void)
+#define vuninitializedq_s8(void) __arm_vuninitializedq_s8(void)
+#define vuninitializedq_s16(void) __arm_vuninitializedq_s16(void)
+#define vuninitializedq_s32(void) __arm_vuninitializedq_s32(void)
+#define vuninitializedq_s64(void) __arm_vuninitializedq_s64(void)
+#define vuninitializedq_f16(void) __arm_vuninitializedq_f16(void)
+#define vuninitializedq_f32(void) __arm_vuninitializedq_f32(void)
+#define vddupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u8(__inactive, __a, __imm, __p)
+#define vddupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u32(__inactive, __a, __imm, __p)
+#define vddupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_n_u16(__inactive, __a, __imm, __p)
+#define vddupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u8(__inactive, __a, __imm, __p)
+#define vddupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u16(__inactive, __a, __imm, __p)
+#define vddupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vddupq_m_wb_u32(__inactive, __a, __imm, __p)
+#define vddupq_n_u8(__a, __imm) __arm_vddupq_n_u8(__a, __imm)
+#define vddupq_n_u32(__a, __imm) __arm_vddupq_n_u32(__a, __imm)
+#define vddupq_n_u16(__a, __imm) __arm_vddupq_n_u16(__a, __imm)
+#define vddupq_wb_u8( __a, __imm) __arm_vddupq_wb_u8( __a, __imm)
+#define vddupq_wb_u16( __a, __imm) __arm_vddupq_wb_u16( __a, __imm)
+#define vddupq_wb_u32( __a, __imm) __arm_vddupq_wb_u32( __a, __imm)
+#define vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u8(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u32(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_n_u16(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_vdwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p)
+#define vdwdupq_n_u8(__a, __b, __imm) __arm_vdwdupq_n_u8(__a, __b, __imm)
+#define vdwdupq_n_u32(__a, __b, __imm) __arm_vdwdupq_n_u32(__a, __b, __imm)
+#define vdwdupq_n_u16(__a, __b, __imm) __arm_vdwdupq_n_u16(__a, __b, __imm)
+#define vdwdupq_wb_u8( __a, __b, __imm) __arm_vdwdupq_wb_u8( __a, __b, __imm)
+#define vdwdupq_wb_u32( __a, __b, __imm) __arm_vdwdupq_wb_u32( __a, __b, __imm)
+#define vdwdupq_wb_u16( __a, __b, __imm) __arm_vdwdupq_wb_u16( __a, __b, __imm)
+#define vidupq_m_n_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u8(__inactive, __a, __imm, __p)
+#define vidupq_m_n_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u32(__inactive, __a, __imm, __p)
+#define vidupq_m_n_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_n_u16(__inactive, __a, __imm, __p)
+#define vidupq_m_wb_u8(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u8(__inactive, __a, __imm, __p)
+#define vidupq_m_wb_u16(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u16(__inactive, __a, __imm, __p)
+#define vidupq_m_wb_u32(__inactive, __a, __imm, __p) __arm_vidupq_m_wb_u32(__inactive, __a, __imm, __p)
+#define vidupq_n_u8(__a, __imm) __arm_vidupq_n_u8(__a, __imm)
+#define vidupq_n_u32(__a, __imm) __arm_vidupq_n_u32(__a, __imm)
+#define vidupq_n_u16(__a, __imm) __arm_vidupq_n_u16(__a, __imm)
+#define vidupq_wb_u8( __a, __imm) __arm_vidupq_wb_u8( __a, __imm)
+#define vidupq_wb_u16( __a, __imm) __arm_vidupq_wb_u16( __a, __imm)
+#define vidupq_wb_u32( __a, __imm) __arm_vidupq_wb_u32( __a, __imm)
+#define viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u8(__inactive, __a, __b, __imm, __p)
+#define viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u32(__inactive, __a, __b, __imm, __p)
+#define viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_n_u16(__inactive, __a, __b, __imm, __p)
+#define viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u8(__inactive, __a, __b, __imm, __p)
+#define viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u32(__inactive, __a, __b, __imm, __p)
+#define viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p) __arm_viwdupq_m_wb_u16(__inactive, __a, __b, __imm, __p)
+#define viwdupq_n_u8(__a, __b, __imm) __arm_viwdupq_n_u8(__a, __b, __imm)
+#define viwdupq_n_u32(__a, __b, __imm) __arm_viwdupq_n_u32(__a, __b, __imm)
+#define viwdupq_n_u16(__a, __b, __imm) __arm_viwdupq_n_u16(__a, __b, __imm)
+#define viwdupq_wb_u8( __a, __b, __imm) __arm_viwdupq_wb_u8( __a, __b, __imm)
+#define viwdupq_wb_u32( __a, __b, __imm) __arm_viwdupq_wb_u32( __a, __b, __imm)
+#define viwdupq_wb_u16( __a, __b, __imm) __arm_viwdupq_wb_u16( __a, __b, __imm)
+#define vldrdq_gather_base_wb_s64(__addr, __offset) __arm_vldrdq_gather_base_wb_s64(__addr, __offset)
+#define vldrdq_gather_base_wb_u64(__addr, __offset) __arm_vldrdq_gather_base_wb_u64(__addr, __offset)
+#define vldrdq_gather_base_wb_z_s64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_s64(__addr, __offset, __p)
+#define vldrdq_gather_base_wb_z_u64(__addr, __offset, __p) __arm_vldrdq_gather_base_wb_z_u64(__addr, __offset, __p)
+#define vldrwq_gather_base_wb_f32(__addr, __offset) __arm_vldrwq_gather_base_wb_f32(__addr, __offset)
+#define vldrwq_gather_base_wb_s32(__addr, __offset) __arm_vldrwq_gather_base_wb_s32(__addr, __offset)
+#define vldrwq_gather_base_wb_u32(__addr, __offset) __arm_vldrwq_gather_base_wb_u32(__addr, __offset)
+#define vldrwq_gather_base_wb_z_f32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_f32(__addr, __offset, __p)
+#define vldrwq_gather_base_wb_z_s32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_s32(__addr, __offset, __p)
+#define vldrwq_gather_base_wb_z_u32(__addr, __offset, __p) __arm_vldrwq_gather_base_wb_z_u32(__addr, __offset, __p)
+#define vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_s64(__addr, __offset, __value, __p)
+#define vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p) __arm_vstrdq_scatter_base_wb_p_u64(__addr, __offset, __value, __p)
+#define vstrdq_scatter_base_wb_s64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_s64(__addr, __offset, __value)
+#define vstrdq_scatter_base_wb_u64(__addr, __offset, __value) __arm_vstrdq_scatter_base_wb_u64(__addr, __offset, __value)
+#define vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_s32(__addr, __offset, __value, __p)
+#define vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_f32(__addr, __offset, __value, __p)
+#define vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p) __arm_vstrwq_scatter_base_wb_p_u32(__addr, __offset, __value, __p)
+#define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value)
+#define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value)
+#define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value)
+#define vddupq_x_n_u8(__a, __imm, __p) __arm_vddupq_x_n_u8(__a, __imm, __p)
+#define vddupq_x_n_u16(__a, __imm, __p) __arm_vddupq_x_n_u16(__a, __imm, __p)
+#define vddupq_x_n_u32(__a, __imm, __p) __arm_vddupq_x_n_u32(__a, __imm, __p)
+#define vddupq_x_wb_u8(__a, __imm, __p) __arm_vddupq_x_wb_u8(__a, __imm, __p)
+#define vddupq_x_wb_u16(__a, __imm, __p) __arm_vddupq_x_wb_u16(__a, __imm, __p)
+#define vddupq_x_wb_u32(__a, __imm, __p) __arm_vddupq_x_wb_u32(__a, __imm, __p)
+#define vdwdupq_x_n_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b, __imm, __p)
+#define vdwdupq_x_n_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b, __imm, __p)
+#define vdwdupq_x_n_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b, __imm, __p)
+#define vdwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b, __imm, __p)
+#define vdwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b, __imm, __p)
+#define vdwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b, __imm, __p)
+#define vidupq_x_n_u8(__a, __imm, __p) __arm_vidupq_x_n_u8(__a, __imm, __p)
+#define vidupq_x_n_u16(__a, __imm, __p) __arm_vidupq_x_n_u16(__a, __imm, __p)
+#define vidupq_x_n_u32(__a, __imm, __p) __arm_vidupq_x_n_u32(__a, __imm, __p)
+#define vidupq_x_wb_u8(__a, __imm, __p) __arm_vidupq_x_wb_u8(__a, __imm, __p)
+#define vidupq_x_wb_u16(__a, __imm, __p) __arm_vidupq_x_wb_u16(__a, __imm, __p)
+#define vidupq_x_wb_u32(__a, __imm, __p) __arm_vidupq_x_wb_u32(__a, __imm, __p)
+#define viwdupq_x_n_u8(__a, __b, __imm, __p) __arm_viwdupq_x_n_u8(__a, __b, __imm, __p)
+#define viwdupq_x_n_u16(__a, __b, __imm, __p) __arm_viwdupq_x_n_u16(__a, __b, __imm, __p)
+#define viwdupq_x_n_u32(__a, __b, __imm, __p) __arm_viwdupq_x_n_u32(__a, __b, __imm, __p)
+#define viwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b, __imm, __p)
+#define viwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b, __imm, __p)
+#define viwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b, __imm, __p)
+#define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p)
+#define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p)
+#define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p)
+#define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p)
+#define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p)
+#define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p)
+#define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p)
+#define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p)
+#define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p)
+#define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p)
+#define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p)
+#define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p)
+#define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p)
+#define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p)
+#define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p)
+#define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p)
+#define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p)
+#define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p)
+#define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p)
+#define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p)
+#define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p)
+#define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p)
+#define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p)
+#define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p)
+#define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p)
+#define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p)
+#define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p)
+#define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p)
+#define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p)
+#define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p)
+#define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p)
+#define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p)
+#define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p)
+#define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p)
+#define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p)
+#define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p)
+#define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p)
+#define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p)
+#define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p)
+#define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p)
+#define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p)
+#define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p)
+#define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p)
+#define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p)
+#define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p)
+#define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p)
+#define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p)
+#define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p)
+#define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p)
+#define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p)
+#define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p)
+#define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p)
+#define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p)
+#define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p)
+#define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p)
+#define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p)
+#define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p)
+#define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p)
+#define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p)
+#define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p)
+#define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p)
+#define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p)
+#define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p)
+#define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p)
+#define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p)
+#define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p)
+#define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p)
+#define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p)
+#define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p)
+#define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p)
+#define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p)
+#define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p)
+#define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p)
+#define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p)
+#define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p)
+#define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p)
+#define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p)
+#define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p)
+#define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p)
+#define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p)
+#define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p)
+#define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p)
+#define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p)
+#define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p)
+#define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p)
+#define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p)
+#define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p)
+#define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p)
+#define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p)
+#define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p)
+#define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p)
+#define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p)
+#define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p)
+#define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p)
+#define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p)
+#define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p)
+#define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p)
+#define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p)
+#define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p)
+#define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p)
+#define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p)
+#define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p)
+#define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p)
+#define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p)
+#define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p)
+#define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p)
+#define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p)
+#define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p)
+#define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p)
+#define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p)
+#define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p)
+#define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p)
+#define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p)
+#define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p)
+#define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p)
+#define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p)
+#define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p)
+#define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p)
+#define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p)
+#define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p)
+#define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p)
+#define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p)
+#define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p)
+#define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p)
+#define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p)
+#define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p)
+#define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p)
+#define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p)
+#define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p)
+#define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p)
+#define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p)
+#define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p)
+#define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p)
+#define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p)
+#define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p)
+#define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p)
+#define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p)
+#define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p)
+#define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p)
+#define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p)
+#define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p)
+#define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p)
+#define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p)
+#define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p)
+#define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p)
+#define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p)
+#define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p)
+#define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p)
+#define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p)
+#define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p)
+#define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p)
+#define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p)
+#define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p)
+#define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p)
+#define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p)
+#define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p)
+#define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p)
+#define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p)
+#define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p)
+#define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p)
+#define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p)
+#define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p)
+#define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p)
+#define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p)
+#define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p)
+#define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p)
+#define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p)
+#define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p)
+#define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p)
+#define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p)
+#define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p)
+#define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p)
+#define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p)
+#define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p)
+#define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p)
+#define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p)
+#define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p)
+#define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p)
+#define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p)
+#define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p)
+#define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p)
+#define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p)
+#define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p)
+#define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p)
+#define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p)
+#define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p)
+#define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p)
+#define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p)
+#define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p)
+#define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p)
+#define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p)
+#define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p)
+#define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p)
+#define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p)
+#define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p)
+#define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p)
+#define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p)
+#define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p)
+#define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p)
+#define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p)
+#define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p)
+#define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p)
+#define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p)
+#define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p)
+#define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p)
+#define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p)
+#define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p)
+#define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p)
+#define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p)
+#define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p)
+#define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p)
+#define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p)
+#define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p)
+#define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p)
+#define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p)
+#define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p)
+#define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p)
+#define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p)
+#define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p)
+#define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p)
+#define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p)
+#define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p)
+#define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p)
+#define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p)
+#define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p)
+#define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p)
+#define vshllbq_x_n_u16(__a, __imm, __p) __arm_vshllbq_x_n_u16(__a, __imm, __p)
+#define vshlltq_x_n_s8(__a, __imm, __p) __arm_vshlltq_x_n_s8(__a, __imm, __p)
+#define vshlltq_x_n_s16(__a, __imm, __p) __arm_vshlltq_x_n_s16(__a, __imm, __p)
+#define vshlltq_x_n_u8(__a, __imm, __p) __arm_vshlltq_x_n_u8(__a, __imm, __p)
+#define vshlltq_x_n_u16(__a, __imm, __p) __arm_vshlltq_x_n_u16(__a, __imm, __p)
+#define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p)
+#define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p)
+#define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p)
+#define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p)
+#define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p)
+#define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p)
+#define vshlq_x_n_s8(__a, __imm, __p) __arm_vshlq_x_n_s8(__a, __imm, __p)
+#define vshlq_x_n_s16(__a, __imm, __p) __arm_vshlq_x_n_s16(__a, __imm, __p)
+#define vshlq_x_n_s32(__a, __imm, __p) __arm_vshlq_x_n_s32(__a, __imm, __p)
+#define vshlq_x_n_u8(__a, __imm, __p) __arm_vshlq_x_n_u8(__a, __imm, __p)
+#define vshlq_x_n_u16(__a, __imm, __p) __arm_vshlq_x_n_u16(__a, __imm, __p)
+#define vshlq_x_n_u32(__a, __imm, __p) __arm_vshlq_x_n_u32(__a, __imm, __p)
+#define vrshrq_x_n_s8(__a, __imm, __p) __arm_vrshrq_x_n_s8(__a, __imm, __p)
+#define vrshrq_x_n_s16(__a, __imm, __p) __arm_vrshrq_x_n_s16(__a, __imm, __p)
+#define vrshrq_x_n_s32(__a, __imm, __p) __arm_vrshrq_x_n_s32(__a, __imm, __p)
+#define vrshrq_x_n_u8(__a, __imm, __p) __arm_vrshrq_x_n_u8(__a, __imm, __p)
+#define vrshrq_x_n_u16(__a, __imm, __p) __arm_vrshrq_x_n_u16(__a, __imm, __p)
+#define vrshrq_x_n_u32(__a, __imm, __p) __arm_vrshrq_x_n_u32(__a, __imm, __p)
+#define vshrq_x_n_s8(__a, __imm, __p) __arm_vshrq_x_n_s8(__a, __imm, __p)
+#define vshrq_x_n_s16(__a, __imm, __p) __arm_vshrq_x_n_s16(__a, __imm, __p)
+#define vshrq_x_n_s32(__a, __imm, __p) __arm_vshrq_x_n_s32(__a, __imm, __p)
+#define vshrq_x_n_u8(__a, __imm, __p) __arm_vshrq_x_n_u8(__a, __imm, __p)
+#define vshrq_x_n_u16(__a, __imm, __p) __arm_vshrq_x_n_u16(__a, __imm, __p)
+#define vshrq_x_n_u32(__a, __imm, __p) __arm_vshrq_x_n_u32(__a, __imm, __p)
+#define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p)
+#define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p)
+#define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p)
+#define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p)
+#define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p)
+#define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p)
+#define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p)
+#define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p)
+#define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p)
+#define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p)
+#define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p)
+#define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p)
+#define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p)
+#define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p)
+#define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p)
+#define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p)
+#define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p)
+#define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p)
+#define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p)
+#define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p)
+#define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p)
+#define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p)
+#define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p)
+#define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p)
+#define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p)
+#define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p)
+#define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p)
+#define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p)
+#define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p)
+#define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p)
+#define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p)
+#define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p)
+#define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p)
+#define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p)
+#define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p)
+#define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p)
+#define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p)
+#define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p)
+#define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p)
+#define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p)
+#define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p)
+#define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p)
+#define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p)
+#define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p)
+#define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p)
+#define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p)
+#define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p)
+#define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p)
+#define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p)
+#define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p)
+#define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p)
+#define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p)
+#define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p)
+#define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p)
+#define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p)
+#define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p)
+#define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p)
+#define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p)
+#define vcvtq_x_n_f16_s16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a, __imm6, __p)
+#define vcvtq_x_n_f16_u16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a, __imm6, __p)
+#define vcvtq_x_n_f32_s32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a, __imm6, __p)
+#define vcvtq_x_n_f32_u32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a, __imm6, __p)
+#define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p)
+#define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p)
+#define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p)
+#define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p)
+#define vcvtq_x_n_s16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a, __imm6, __p)
+#define vcvtq_x_n_s32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a, __imm6, __p)
+#define vcvtq_x_n_u16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a, __imm6, __p)
+#define vcvtq_x_n_u32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a, __imm6, __p)
+#define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p)
+#define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p)
+#define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p)
+#define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p)
+#define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p)
+#define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p)
+#define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p)
+#define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p)
+#define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p)
+#define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p)
+#define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p)
+#define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p)
+#define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p)
+#define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p)
+#define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p)
+#define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p)
+#define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p)
+#define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p)
+#define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p)
+#define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p)
+#define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p)
+#define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p)
+#define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p)
+#define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p)
+#define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p)
+#define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p)
+#define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p)
+#define vadciq_s32(__a, __b, __carry_out) __arm_vadciq_s32(__a, __b, __carry_out)
+#define vadciq_u32(__a, __b, __carry_out) __arm_vadciq_u32(__a, __b, __carry_out)
+#define vadciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_s32(__inactive, __a, __b, __carry_out, __p)
+#define vadciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vadciq_m_u32(__inactive, __a, __b, __carry_out, __p)
+#define vadcq_s32(__a, __b, __carry) __arm_vadcq_s32(__a, __b, __carry)
+#define vadcq_u32(__a, __b, __carry) __arm_vadcq_u32(__a, __b, __carry)
+#define vadcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_s32(__inactive, __a, __b, __carry, __p)
+#define vadcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vadcq_m_u32(__inactive, __a, __b, __carry, __p)
+#define vsbciq_s32(__a, __b, __carry_out) __arm_vsbciq_s32(__a, __b, __carry_out)
+#define vsbciq_u32(__a, __b, __carry_out) __arm_vsbciq_u32(__a, __b, __carry_out)
+#define vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_s32(__inactive, __a, __b, __carry_out, __p)
+#define vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p) __arm_vsbciq_m_u32(__inactive, __a, __b, __carry_out, __p)
+#define vsbcq_s32(__a, __b, __carry) __arm_vsbcq_s32(__a, __b, __carry)
+#define vsbcq_u32(__a, __b, __carry) __arm_vsbcq_u32(__a, __b, __carry)
+#define vsbcq_m_s32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_s32(__inactive, __a, __b, __carry, __p)
+#define vsbcq_m_u32(__inactive, __a, __b, __carry, __p) __arm_vsbcq_m_u32(__inactive, __a, __b, __carry, __p)
+#define vst1q_p_u8(__addr, __value, __p) __arm_vst1q_p_u8(__addr, __value, __p)
+#define vst1q_p_s8(__addr, __value, __p) __arm_vst1q_p_s8(__addr, __value, __p)
+#define vst2q_s8(__addr, __value) __arm_vst2q_s8(__addr, __value)
+#define vst2q_u8(__addr, __value) __arm_vst2q_u8(__addr, __value)
+#define vld1q_z_u8(__base, __p) __arm_vld1q_z_u8(__base, __p)
+#define vld1q_z_s8(__base, __p) __arm_vld1q_z_s8(__base, __p)
+#define vld2q_s8(__addr) __arm_vld2q_s8(__addr)
+#define vld2q_u8(__addr) __arm_vld2q_u8(__addr)
+#define vld4q_s8(__addr) __arm_vld4q_s8(__addr)
+#define vld4q_u8(__addr) __arm_vld4q_u8(__addr)
+#define vst1q_p_u16(__addr, __value, __p) __arm_vst1q_p_u16(__addr, __value, __p)
+#define vst1q_p_s16(__addr, __value, __p) __arm_vst1q_p_s16(__addr, __value, __p)
+#define vst2q_s16(__addr, __value) __arm_vst2q_s16(__addr, __value)
+#define vst2q_u16(__addr, __value) __arm_vst2q_u16(__addr, __value)
+#define vld1q_z_u16(__base, __p) __arm_vld1q_z_u16(__base, __p)
+#define vld1q_z_s16(__base, __p) __arm_vld1q_z_s16(__base, __p)
+#define vld2q_s16(__addr) __arm_vld2q_s16(__addr)
+#define vld2q_u16(__addr) __arm_vld2q_u16(__addr)
+#define vld4q_s16(__addr) __arm_vld4q_s16(__addr)
+#define vld4q_u16(__addr) __arm_vld4q_u16(__addr)
+#define vst1q_p_u32(__addr, __value, __p) __arm_vst1q_p_u32(__addr, __value, __p)
+#define vst1q_p_s32(__addr, __value, __p) __arm_vst1q_p_s32(__addr, __value, __p)
+#define vst2q_s32(__addr, __value) __arm_vst2q_s32(__addr, __value)
+#define vst2q_u32(__addr, __value) __arm_vst2q_u32(__addr, __value)
+#define vld1q_z_u32(__base, __p) __arm_vld1q_z_u32(__base, __p)
+#define vld1q_z_s32(__base, __p) __arm_vld1q_z_s32(__base, __p)
+#define vld2q_s32(__addr) __arm_vld2q_s32(__addr)
+#define vld2q_u32(__addr) __arm_vld2q_u32(__addr)
+#define vld4q_s32(__addr) __arm_vld4q_s32(__addr)
+#define vld4q_u32(__addr) __arm_vld4q_u32(__addr)
+#define vld4q_f16(__addr) __arm_vld4q_f16(__addr)
+#define vld2q_f16(__addr) __arm_vld2q_f16(__addr)
+#define vld1q_z_f16(__base, __p) __arm_vld1q_z_f16(__base, __p)
+#define vst2q_f16(__addr, __value) __arm_vst2q_f16(__addr, __value)
+#define vst1q_p_f16(__addr, __value, __p) __arm_vst1q_p_f16(__addr, __value, __p)
+#define vld4q_f32(__addr) __arm_vld4q_f32(__addr)
+#define vld2q_f32(__addr) __arm_vld2q_f32(__addr)
+#define vld1q_z_f32(__base, __p) __arm_vld1q_z_f32(__base, __p)
+#define vst2q_f32(__addr, __value) __arm_vst2q_f32(__addr, __value)
+#define vst1q_p_f32(__addr, __value, __p) __arm_vst1q_p_f32(__addr, __value, __p)
+#define vsetq_lane_f16(__a, __b, __idx) __arm_vsetq_lane_f16(__a, __b, __idx)
+#define vsetq_lane_f32(__a, __b, __idx) __arm_vsetq_lane_f32(__a, __b, __idx)
+#define vsetq_lane_s16(__a, __b, __idx) __arm_vsetq_lane_s16(__a, __b, __idx)
+#define vsetq_lane_s32(__a, __b, __idx) __arm_vsetq_lane_s32(__a, __b, __idx)
+#define vsetq_lane_s8(__a, __b, __idx) __arm_vsetq_lane_s8(__a, __b, __idx)
+#define vsetq_lane_s64(__a, __b, __idx) __arm_vsetq_lane_s64(__a, __b, __idx)
+#define vsetq_lane_u8(__a, __b, __idx) __arm_vsetq_lane_u8(__a, __b, __idx)
+#define vsetq_lane_u16(__a, __b, __idx) __arm_vsetq_lane_u16(__a, __b, __idx)
+#define vsetq_lane_u32(__a, __b, __idx) __arm_vsetq_lane_u32(__a, __b, __idx)
+#define vsetq_lane_u64(__a, __b, __idx) __arm_vsetq_lane_u64(__a, __b, __idx)
+#define vgetq_lane_f16(__a, __idx) __arm_vgetq_lane_f16(__a, __idx)
+#define vgetq_lane_f32(__a, __idx) __arm_vgetq_lane_f32(__a, __idx)
+#define vgetq_lane_s16(__a, __idx) __arm_vgetq_lane_s16(__a, __idx)
+#define vgetq_lane_s32(__a, __idx) __arm_vgetq_lane_s32(__a, __idx)
+#define vgetq_lane_s8(__a, __idx) __arm_vgetq_lane_s8(__a, __idx)
+#define vgetq_lane_s64(__a, __idx) __arm_vgetq_lane_s64(__a, __idx)
+#define vgetq_lane_u8(__a, __idx) __arm_vgetq_lane_u8(__a, __idx)
+#define vgetq_lane_u16(__a, __idx) __arm_vgetq_lane_u16(__a, __idx)
+#define vgetq_lane_u32(__a, __idx) __arm_vgetq_lane_u32(__a, __idx)
+#define vgetq_lane_u64(__a, __idx) __arm_vgetq_lane_u64(__a, __idx)
+#define sqrshr(__p0, __p1) __arm_sqrshr(__p0, __p1)
+#define sqrshrl(__p0, __p1) __arm_sqrshrl(__p0, __p1)
+#define sqrshrl_sat48(__p0, __p1) __arm_sqrshrl_sat48(__p0, __p1)
+#define sqshl(__p0, __p1) __arm_sqshl(__p0, __p1)
+#define sqshll(__p0, __p1) __arm_sqshll(__p0, __p1)
+#define srshr(__p0, __p1) __arm_srshr(__p0, __p1)
+#define srshrl(__p0, __p1) __arm_srshrl(__p0, __p1)
+#define uqrshl(__p0, __p1) __arm_uqrshl(__p0, __p1)
+#define uqrshll(__p0, __p1) __arm_uqrshll(__p0, __p1)
+#define uqrshll_sat48(__p0, __p1) __arm_uqrshll_sat48(__p0, __p1)
+#define uqshl(__p0, __p1) __arm_uqshl(__p0, __p1)
+#define uqshll(__p0, __p1) __arm_uqshll(__p0, __p1)
+#define urshr(__p0, __p1) __arm_urshr(__p0, __p1)
+#define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1)
+#define lsll(__p0, __p1) __arm_lsll(__p0, __p1)
+#define asrl(__p0, __p1) __arm_asrl(__p0, __p1)
+#define vshlcq_m_s8(__a, __b, __imm, __p) __arm_vshlcq_m_s8(__a, __b, __imm, __p)
+#define vshlcq_m_u8(__a, __b, __imm, __p) __arm_vshlcq_m_u8(__a, __b, __imm, __p)
+#define vshlcq_m_s16(__a, __b, __imm, __p) __arm_vshlcq_m_s16(__a, __b, __imm, __p)
+#define vshlcq_m_u16(__a, __b, __imm, __p) __arm_vshlcq_m_u16(__a, __b, __imm, __p)
+#define vshlcq_m_s32(__a, __b, __imm, __p) __arm_vshlcq_m_s32(__a, __b, __imm, __p)
+#define vshlcq_m_u32(__a, __b, __imm, __p) __arm_vshlcq_m_u32(__a, __b, __imm, __p)
+#endif
+
+/* For big-endian, GCC's vector indices are reversed within each 64 bits
+ compared to the architectural lane indices used by MVE intrinsics. */
+#define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
+#ifdef __ARM_BIG_ENDIAN
+#define __ARM_LANEQ(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1))
+#else
+#define __ARM_LANEQ(__vec, __idx) __idx
+#endif
+#define __ARM_CHECK_LANEQ(__vec, __idx) \
+ __builtin_arm_lane_check (__ARM_NUM_LANES(__vec), \
+ __ARM_LANEQ(__vec, __idx))
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_s8 (int8_t * __addr, int8x16x4_t __value)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_s16 (int16_t * __addr, int16x8x4_t __value)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_s32 (int32_t * __addr, int32x4x4_t __value)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_u8 (uint8_t * __addr, uint8x16x4_t __value)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_u16 (uint16_t * __addr, uint16x8x4_t __value)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_u32 (uint32_t * __addr, uint32x4x4_t __value)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv4si ((__builtin_neon_si *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_s8 (int8_t __a)
+{
+ return __builtin_mve_vdupq_n_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_s16 (int16_t __a)
+{
+ return __builtin_mve_vdupq_n_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_s32 (int32_t __a)
+{
+ return __builtin_mve_vdupq_n_sv4si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vabsq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vabsq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vabsq_sv4si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vclsq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vclsq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vclsq_sv4si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vclzq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vclzq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vclzq_sv4si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vnegq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vnegq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vnegq_sv4si (__a);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vaddlvq_sv4si (__a);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vaddvq_sv16qi (__a);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vaddvq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vaddvq_sv4si (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vmovlbq_sv16qi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vmovlbq_sv8hi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vmovltq_sv16qi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vmovltq_sv8hi (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vmvnq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vmvnq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vmvnq_sv4si (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_n_s16 (const int16_t __imm)
+{
+ return __builtin_mve_vmvnq_n_sv8hi (__imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_n_s32 (const int32_t __imm)
+{
+ return __builtin_mve_vmvnq_n_sv4si (__imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vrev16q_sv16qi (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vrev32q_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vrev32q_sv8hi (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vrev64q_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vrev64q_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vrev64q_sv4si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vqabsq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vqabsq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vqabsq_sv4si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_s8 (int8x16_t __a)
+{
+ return __builtin_mve_vqnegq_sv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vqnegq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vqnegq_sv4si (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vrev64q_uv16qi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vrev64q_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_u32 (uint32x4_t __a)
+{
+ return __builtin_mve_vrev64q_uv4si (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vmvnq_uv16qi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vmvnq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_u32 (uint32x4_t __a)
+{
+ return __builtin_mve_vmvnq_uv4si (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_u8 (uint8_t __a)
+{
+ return __builtin_mve_vdupq_n_uv16qi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_u16 (uint16_t __a)
+{
+ return __builtin_mve_vdupq_n_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_u32 (uint32_t __a)
+{
+ return __builtin_mve_vdupq_n_uv4si (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vclzq_uv16qi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vclzq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_u32 (uint32x4_t __a)
+{
+ return __builtin_mve_vclzq_uv4si (__a);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vaddvq_uv16qi (__a);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vaddvq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_u32 (uint32x4_t __a)
+{
+ return __builtin_mve_vaddvq_uv4si (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vrev32q_uv16qi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vrev32q_uv8hi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vmovltq_uv16qi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vmovltq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vmovlbq_uv16qi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vmovlbq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_n_u16 (const int __imm)
+{
+ return __builtin_mve_vmvnq_n_uv8hi (__imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_n_u32 (const int __imm)
+{
+ return __builtin_mve_vmvnq_n_uv4si (__imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_u8 (uint8x16_t __a)
+{
+ return __builtin_mve_vrev16q_uv16qi (__a);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq_u32 (uint32x4_t __a)
+{
+ return __builtin_mve_vaddlvq_uv4si (__a);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp16q (uint32_t __a)
+{
+ return __builtin_mve_vctp16qv8bi (__a);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp32q (uint32_t __a)
+{
+ return __builtin_mve_vctp32qv4bi (__a);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp64q (uint32_t __a)
+{
+ return __builtin_mve_vctp64qv2qi (__a);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp8q (uint32_t __a)
+{
+ return __builtin_mve_vctp8qv16bi (__a);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpnot (mve_pred16_t __a)
+{
+ return __builtin_mve_vpnotv16bi (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_u8 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_u16 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_u32 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_u64 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_uv2di (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_s8 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_s16 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_s32 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_s64 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_sv2di (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshrq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshrq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vshrq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshrq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshrq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vshrq_n_uv4si (__a, __imm);
+}
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddlvq_p_sv4si (__a, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddlvq_p_uv4si (__a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpneq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpneq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpneq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmpneq_v16qi ((int8x16_t)__a, (int8x16_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmpneq_v8hi ((int16x8_t)__a, (int16x8_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmpneq_v4si ((int32x4_t)__a, (int32x4_t)__b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vshlq_uv4si (__a, __b);
+}
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vsubq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vsubq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vrmulhq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vrhaddq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vqsubq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vqsubq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vqaddq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vqaddq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vorrq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vornq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vmulq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulltq_int_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmullbq_int_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulhq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmladavq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_u8 (uint8_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vminvq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vminq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmaxvq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmaxq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vhsubq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vhsubq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vhaddq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vhaddq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_veorq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmpneq_n_v16qi ((int8x16_t)__a, (int8_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmphiq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmphiq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmpeqq_v16qi ((int8x16_t)__a, (int8x16_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_v16qi ((int8x16_t)__a, (int8_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmpcsq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmpcsq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)
+ __builtin_mve_vcaddq_rot90v16qi ((int8x16_t)__a, (int8x16_t)__b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)
+ __builtin_mve_vcaddq_rot270v16qi ((int8x16_t)__a, (int8x16_t)__b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vbicq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vandq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_uv16qi (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vaddvaq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vaddq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vabdq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_s8 (uint8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminaq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxaq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpneq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpltq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpltq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpleq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpleq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpgtq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpgeq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpeqq_v16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_v16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vqshluq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_sv16qi (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vsubq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vsubq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrhaddq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqsubq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqsubq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrdmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqdmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqaddq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqaddq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vorrq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vornq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmulq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vmulq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmulltq_int_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmullbq_int_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmlsdavxq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmlsdavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmladavxq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmladavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_s8 (int8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminvq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_s8 (int8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxvq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhsubq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vhsubq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhaddq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vhaddq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_veorq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcaddq_rot90v16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcaddq_rot270v16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vbicq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vandq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vaddvaq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vaddq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vabdq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vsubq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vsubq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vrmulhq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vrhaddq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vqsubq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vqsubq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vqaddq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vqaddq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vorrq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vornq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vmulq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulltq_int_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmullbq_int_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulhq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmladavq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_u16 (uint16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vminvq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vminq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmaxvq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmaxq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vhsubq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vhsubq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vhaddq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vhaddq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_veorq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmpneq_n_v8hi ((int16x8_t)__a, (int16_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmphiq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmphiq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmpeqq_v8hi ((int16x8_t)__a, (int16x8_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_v8hi ((int16x8_t)__a, (int16_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmpcsq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmpcsq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)
+ __builtin_mve_vcaddq_rot90v8hi ((int16x8_t)__a, (int16x8_t)__b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)
+ __builtin_mve_vcaddq_rot270v8hi ((int16x8_t)__a, (int16x8_t)__b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vbicq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vandq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_uv8hi (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vaddvaq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vaddq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vabdq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_s16 (uint16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminaq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxaq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpneq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpltq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpltq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpleq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpleq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpgtq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpgeq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpeqq_v8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_v8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vqshluq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_sv8hi (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vsubq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vsubq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrhaddq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqsubq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqsubq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrdmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqaddq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqaddq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vorrq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vornq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmulq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vmulq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmulltq_int_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmullbq_int_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlsdavxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlsdavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmladavxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmladavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_s16 (int16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminvq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_s16 (int16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxvq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhsubq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vhsubq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhaddq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vhaddq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_veorq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot90v8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot270v8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vbicq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vandq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vaddvaq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vaddq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vabdq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vsubq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vsubq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vrmulhq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vrhaddq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vqsubq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vqsubq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vqaddq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vqaddq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vorrq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vornq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmulq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vmulq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmulltq_int_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmullbq_int_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmulhq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmladavq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_u32 (uint32_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vminvq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vminq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmaxvq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmaxq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vhsubq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vhsubq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vhaddq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vhaddq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_veorq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmpneq_n_v4si ((int32x4_t)__a, (int32_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmphiq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmphiq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmpeqq_v4si ((int32x4_t)__a, (int32x4_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_v4si ((int32x4_t)__a, (int32_t)__b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmpcsq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmpcsq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)
+ __builtin_mve_vcaddq_rot90v4si ((int32x4_t)__a, (int32x4_t)__b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)
+ __builtin_mve_vcaddq_rot270v4si ((int32x4_t)__a, (int32x4_t)__b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vbicq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vandq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_uv4si (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vaddvaq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vaddq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vabdq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrshlq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqshlq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrshlq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_s32 (uint32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpneq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpltq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpltq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpleq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpleq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpgtq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpgeq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpeqq_v4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_v4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vqshluq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_sv4si (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vsubq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vsubq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrhaddq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqsubq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqsubq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrdmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqdmulhq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqaddq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqaddq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vorrq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vornq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmulq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vmulq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmulltq_int_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmullbq_int_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlsdavxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlsdavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmladavxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmladavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_s32 (int32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminvq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_s32 (int32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxvq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhsubq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vhsubq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhaddq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vhaddq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_veorq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot90v4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot270v4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vbicq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vandq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vaddvaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vaddq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vabdq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_u16 (uint8x16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vqmovntq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_u16 (uint8x16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vqmovnbq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_p8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulltq_poly_pv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_p8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmullbq_poly_pv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_u16 (uint8x16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmovntq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_u16 (uint8x16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmovnbq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmlaldavq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq_s16 (uint8x16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqmovuntq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq_s16 (uint8x16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqmovunbq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshlltq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshllbq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vorrq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vbicq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_s16 (int8x16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqmovntq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_s16 (int8x16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqmovnbq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmulltq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqdmulltq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmullbq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqdmullbq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_s16 (int8x16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmovntq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_s16 (int8x16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmovnbq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlsldavxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlsldavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlaldavxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlaldavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshlltq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshllbq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vorrq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vbicq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_u32 (uint16x8_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vqmovntq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_u32 (uint16x8_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vqmovnbq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_p16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulltq_poly_pv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_p16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmullbq_poly_pv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_u32 (uint16x8_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmovntq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_u32 (uint16x8_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmovnbq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmlaldavq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq_s32 (uint16x8_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqmovuntq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq_s32 (uint16x8_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqmovunbq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshlltq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshllbq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vorrq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vbicq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_s32 (int16x8_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqmovntq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_s32 (int16x8_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqmovnbq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmulltq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqdmulltq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmullbq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqdmullbq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_s32 (int16x8_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmovntq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_s32 (int16x8_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmovnbq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlsldavxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlsldavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlaldavxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlaldavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshlltq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshllbq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vorrq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vbicq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vrmlaldavhq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp8q_m (uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vctp8q_mv16bi (__a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp64q_m (uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vctp64q_mv2qi (__a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp32q_m (uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vctp32q_mv4bi (__a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vctp16q_m (uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vctp16q_mv8bi (__a, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq_u32 (uint64_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vaddlvaq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrmlsldavhxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrmlsldavhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrmlaldavhxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrmlaldavhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq_s32 (int64_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vaddlvaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_mve_vabavq_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vabavq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vabavq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return __builtin_mve_vabavq_uv16qi(__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __builtin_mve_vabavq_uv8hi(__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __builtin_mve_vabavq_uv4si(__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_n_sv8hi (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_n_sv4si (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_n_uv8hi (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_n_uv4si (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrnbq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrnbq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrnbq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrnbq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrunbq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrunbq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vrmlaldavhaq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __builtin_mve_vrmlaldavhaq_uv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_s8 (int8x16_t __a, uint32_t * __b, const int __imm)
+{
+ int8x16_t __res = __builtin_mve_vshlcq_vec_sv16qi (__a, *__b, __imm);
+ *__b = __builtin_mve_vshlcq_carry_sv16qi (__a, *__b, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_u8 (uint8x16_t __a, uint32_t * __b, const int __imm)
+{
+ uint8x16_t __res = __builtin_mve_vshlcq_vec_uv16qi (__a, *__b, __imm);
+ *__b = __builtin_mve_vshlcq_carry_uv16qi (__a, *__b, __imm);
+ return __res;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_s16 (int16x8_t __a, uint32_t * __b, const int __imm)
+{
+ int16x8_t __res = __builtin_mve_vshlcq_vec_sv8hi (__a, *__b, __imm);
+ *__b = __builtin_mve_vshlcq_carry_sv8hi (__a, *__b, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_u16 (uint16x8_t __a, uint32_t * __b, const int __imm)
+{
+ uint16x8_t __res = __builtin_mve_vshlcq_vec_uv8hi (__a, *__b, __imm);
+ *__b = __builtin_mve_vshlcq_carry_uv8hi (__a, *__b, __imm);
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_s32 (int32x4_t __a, uint32_t * __b, const int __imm)
+{
+ int32x4_t __res = __builtin_mve_vshlcq_vec_sv4si (__a, *__b, __imm);
+ *__b = __builtin_mve_vshlcq_carry_sv4si (__a, *__b, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_u32 (uint32x4_t __a, uint32_t * __b, const int __imm)
+{
+ uint32x4_t __res = __builtin_mve_vshlcq_vec_uv4si (__a, *__b, __imm);
+ *__b = __builtin_mve_vshlcq_carry_uv4si (__a, *__b, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
+{
+ return __builtin_mve_vmlasq_n_uv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
+{
+ return __builtin_mve_vmlaq_n_uv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavq_p_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return __builtin_mve_vmladavaq_uv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminvq_p_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p_u8 (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxvq_p_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_u8 (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmphiq_m_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmphiq_m_n_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpcsq_m_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpcsq_m_n_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p_u8 (uint32_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvaq_p_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm)
+{
+ return __builtin_mve_vsriq_n_uv16qi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm)
+{
+ return __builtin_mve_vsliq_n_uv16qi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_r_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_n_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_r_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_n_uv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminavq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminaq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_p_s8 (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxavq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_m_s8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxaq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_r_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_r_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_n_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vqnegq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vqabsq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv16qi (__inactive, __a, __p);
+}
+
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavxq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavxq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminvq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p_s8 (int8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxvq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_s8 (int8x16_t __inactive, int8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p_s8 (int32_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvaq_p_sv16qi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrdmlsdhxq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrdmlsdhq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __builtin_mve_vqrdmlashq_n_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __builtin_mve_vqdmlashq_n_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __builtin_mve_vqrdmlahq_n_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrdmladhxq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrdmladhq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqdmlsdhxq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqdmlsdhq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __builtin_mve_vqdmlahq_n_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqdmladhxq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqdmladhq_sv16qi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_mve_vmlsdavaxq_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_mve_vmlsdavaq_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __builtin_mve_vmlasq_n_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __builtin_mve_vmlaq_n_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_mve_vmladavaxq_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_s8 (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __builtin_mve_vmladavaq_sv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm)
+{
+ return __builtin_mve_vsriq_n_sv16qi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm)
+{
+ return __builtin_mve_vsliq_n_sv16qi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return __builtin_mve_vmlasq_n_uv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return __builtin_mve_vmlaq_n_uv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavq_p_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __builtin_mve_vmladavaq_uv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminvq_p_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p_u16 (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxvq_p_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_u16 (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmphiq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmphiq_m_n_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpcsq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpcsq_m_n_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p_u16 (uint32_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvaq_p_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vsriq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vsliq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_r_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_n_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_r_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_n_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminavq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminaq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_p_s16 (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxavq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_m_s16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxaq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_r_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_r_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_n_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vqnegq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vqabsq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavxq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavxq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminvq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p_s16 (int16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxvq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_s16 (int16x8_t __inactive, int16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p_s16 (int32_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvaq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrdmlsdhxq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrdmlsdhq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_mve_vqrdmlashq_n_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_mve_vqdmlashq_n_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_mve_vqrdmlahq_n_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrdmladhxq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrdmladhq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmlsdhxq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmlsdhq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_mve_vqdmlahq_n_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmladhxq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmladhq_sv8hi (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmlsdavaxq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmlsdavaq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_mve_vmlasq_n_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __builtin_mve_vmlaq_n_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmladavaxq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_s16 (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmladavaq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vsriq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vsliq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return __builtin_mve_vmlasq_n_uv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return __builtin_mve_vmlaq_n_uv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __builtin_mve_vmladavaq_uv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminvq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxvq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmphiq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmphiq_m_n_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpcsq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpcsq_m_n_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p_u32 (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvaq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vsriq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vsliq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_r_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_n_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_r_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_n_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminavq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminaq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_p_s32 (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxavq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_m_s32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxaq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_r_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_r_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_n_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vqnegq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vqabsq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavxq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavxq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminvq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxvq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_s32 (int32x4_t __inactive, int32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p_s32 (int32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvaq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m_s32 (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrdmlsdhxq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrdmlsdhq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_mve_vqrdmlashq_n_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_mve_vqdmlashq_n_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_mve_vqrdmlahq_n_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrdmladhxq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrdmladhq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmlsdhxq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmlsdhq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_mve_vqdmlahq_n_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmladhxq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmladhq_sv4si (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmlsdavaxq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmlsdavaq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_mve_vmlasq_n_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __builtin_mve_vmlaq_n_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmladavaxq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_s32 (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmladavaq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vsriq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vsliq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_u64 (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_uv2di (__a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_s64 (int64x2_t __a, int64x2_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_sv2di (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vrmlaldavhaxq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vrmlsldavhaq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vrmlsldavhaxq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq_p_s32 (int64_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddlvaq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev16q_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlaldavhq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlaldavhxq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlsldavhq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlsldavhxq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq_p_u32 (uint64_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddlvaq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev16q_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlaldavhq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_n_s16 (int16x8_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_sv8hi (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_n_sv8hi (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrntq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrnbq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrntq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrnbq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrntq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vshrnbq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vshrntq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmlaldavaq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmlaldavaxq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmlsldavaq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq_s16 (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __builtin_mve_vmlsldavaxq_sv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavxq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq_p_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavxq_p_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m_s8 (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovnbq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovntq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovnbq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m_s16 (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovntq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m_s8 (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_sv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_n_u16 (uint16x8_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_uv8hi (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_n_uv8hi (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshruntq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrunbq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqshruntq_n_sv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovunbq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq_m_s16 (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovuntq_m_sv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrntq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrnbq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrntq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrnbq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrntq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vshrnbq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __builtin_mve_vshrntq_n_uv8hi (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __builtin_mve_vmlaldavaq_uv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavq_p_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m_u8 (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovnbq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovntq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovnbq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m_u16 (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovntq_m_uv8hi (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m_u8 (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_uv16qi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_n_s32 (int32x4_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_sv4si (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_n_sv4si (__a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrntq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrnbq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrntq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrnbq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrntq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vshrnbq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vshrntq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmlaldavaq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmlaldavaxq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmlsldavaq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq_s32 (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __builtin_mve_vmlsldavaxq_sv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavxq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq_p_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavxq_p_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m_s16 (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovnbq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovntq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovnbq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m_s32 (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovntq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m_s16 (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m_n_u32 (uint32x4_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_uv4si (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_n_uv4si (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshruntq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrunbq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqshruntq_n_sv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovunbq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq_m_s32 (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovuntq_m_sv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqrshrntq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrnbq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vqshrntq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrnbq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vrshrntq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vshrnbq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __builtin_mve_vshrntq_n_uv4si (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __builtin_mve_vmlaldavaq_uv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavq_p_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m_u16 (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovnbq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovntq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovnbq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m_u32 (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqmovntq_m_uv4si (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m_u16 (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_sv16qi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m_n_s8 (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshluq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_s8 (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_uv16qi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_uv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m_n_s16 (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshluq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_s16 (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_uv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m_n_s32 (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshluq_m_n_sv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_s32 (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsriq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vabavq_p_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaq_p_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p_u8 (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaq_p_uv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p_u32 (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaq_p_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p_u16 (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaq_p_uv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaxq_p_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaxq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmladavaxq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaq_m_n_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaq_m_n_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaq_m_n_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaq_m_n_uv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaq_m_n_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaq_m_n_uv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlasq_m_n_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlasq_m_n_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlasq_m_n_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlasq_m_n_uv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlasq_m_n_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlasq_m_n_uv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavaq_p_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavaq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavaq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_p_s8 (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavaxq_p_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_p_s32 (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavaxq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_p_s16 (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsdavaxq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m_s8 (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m_u8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m_u32 (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m_u16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqaddq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmladhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmladhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmladhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmladhxq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmladhxq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmladhxq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlahq_m_n_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlahq_m_n_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlahq_m_n_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlsdhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlsdhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlsdhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlsdhxq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulhq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulhq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulhq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmladhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmladhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmladhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmladhxq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmladhxq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmladhxq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlahq_m_n_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlahq_m_n_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlahq_m_n_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlashq_m_n_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlashq_m_n_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlashq_m_n_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_m_n_s8 (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlashq_m_n_sv16qi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_m_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlashq_m_n_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_m_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmlashq_m_n_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlsdhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlsdhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlsdhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlsdhxq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlsdhxq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmlsdhxq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmulhq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmulhq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmulhq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmulhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmulhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrdmulhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshlq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_n_sv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_n_uv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshlq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqsubq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_s8 (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_s16 (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_u8 (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_u16 (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m_n_s8 (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsliq_m_n_sv16qi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m_n_s32 (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsliq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m_n_s16 (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsliq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsliq_m_n_uv16qi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsliq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vsliq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_s8 (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_s32 (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_s16 (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_u8 (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_u16 (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavaq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavaq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavaq_p_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p_u16 (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavaq_p_uv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavaxq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlaldavaxq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavaq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavaq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavaxq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq_p_s16 (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vmlsldavaxq_p_sv8hi (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_poly_m_pv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_poly_m_pv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_m_p8 (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_poly_m_pv16qi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_m_p16 (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_poly_m_pv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmullbq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmullbq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmullbq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmullbq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m_n_s32 (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulltq_m_n_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulltq_m_n_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m_s32 (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulltq_m_sv4si (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m_s16 (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vqdmulltq_m_sv8hi (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrntq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrntq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrunbq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshruntq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqrshruntq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrnbq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrnbq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrntq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrntq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrntq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrntq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrunbq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshrunbq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq_m_n_s32 (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshruntq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq_m_n_s16 (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vqshruntq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlaldavhaq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq_p_u32 (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlaldavhaq_p_uv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlaldavhaxq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlsldavhaq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaxq_p_s32 (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmlsldavhaxq_p_sv4si (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrnbq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrnbq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrntq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrntq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrntq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrntq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m_n_s8 (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_sv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m_n_s16 (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_sv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m_n_u8 (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m_n_u16 (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrnbq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrnbq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrnbq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrnbq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m_n_s32 (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrntq_m_n_sv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m_n_s16 (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrntq_m_n_sv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m_n_u32 (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrntq_m_n_uv4si (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m_n_u16 (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrntq_m_n_uv8hi (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value)
+{
+ __builtin_mve_vstrbq_scatter_offset_sv16qi ((__builtin_neon_qi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrbq_scatter_offset_sv4si ((__builtin_neon_qi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __builtin_mve_vstrbq_scatter_offset_sv8hi ((__builtin_neon_qi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value)
+{
+ __builtin_mve_vstrbq_scatter_offset_uv16qi ((__builtin_neon_qi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrbq_scatter_offset_uv4si ((__builtin_neon_qi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __builtin_mve_vstrbq_scatter_offset_uv8hi ((__builtin_neon_qi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_s8 (int8_t * __addr, int8x16_t __value)
+{
+ __builtin_mve_vstrbq_sv16qi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_s32 (int8_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vstrbq_sv4si ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_s16 (int8_t * __addr, int16x8_t __value)
+{
+ __builtin_mve_vstrbq_sv8hi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_u8 (uint8_t * __addr, uint8x16_t __value)
+{
+ __builtin_mve_vstrbq_uv16qi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_u32 (uint8_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vstrbq_uv4si ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_u16 (uint8_t * __addr, uint16x8_t __value)
+{
+ __builtin_mve_vstrbq_uv8hi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_base_sv4si (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_base_uv4si (__addr, __offset, __value);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_u8 (uint8_t const * __base, uint8x16_t __offset)
+{
+ return __builtin_mve_vldrbq_gather_offset_uv16qi ((__builtin_neon_qi *) __base, __offset);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_s8 (int8_t const * __base, uint8x16_t __offset)
+{
+ return __builtin_mve_vldrbq_gather_offset_sv16qi ((__builtin_neon_qi *) __base, __offset);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_s8 (int8_t const * __base)
+{
+ return __builtin_mve_vldrbq_sv16qi ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_u8 (uint8_t const * __base)
+{
+ return __builtin_mve_vldrbq_uv16qi ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_u16 (uint8_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrbq_gather_offset_uv8hi ((__builtin_neon_qi *) __base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_s16 (int8_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrbq_gather_offset_sv8hi ((__builtin_neon_qi *) __base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_s16 (int8_t const * __base)
+{
+ return __builtin_mve_vldrbq_sv8hi ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_u16 (uint8_t const * __base)
+{
+ return __builtin_mve_vldrbq_uv8hi ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_u32 (uint8_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrbq_gather_offset_uv4si ((__builtin_neon_qi *) __base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_s32 (int8_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrbq_gather_offset_sv4si ((__builtin_neon_qi *) __base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_s32 (int8_t const * __base)
+{
+ return __builtin_mve_vldrbq_sv4si ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_u32 (uint8_t const * __base)
+{
+ return __builtin_mve_vldrbq_uv4si ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_s32 (uint32x4_t __addr, const int __offset)
+{
+ return __builtin_mve_vldrwq_gather_base_sv4si (__addr, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_u32 (uint32x4_t __addr, const int __offset)
+{
+ return __builtin_mve_vldrwq_gather_base_uv4si (__addr, __offset);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_p_sv16qi ((__builtin_neon_qi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p_s32 (int8_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_p_sv4si ((__builtin_neon_qi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p_s16 (int8_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_p_sv8hi ((__builtin_neon_qi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_p_uv16qi ((__builtin_neon_qi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p_u32 (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_p_uv4si ((__builtin_neon_qi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p_u16 (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_p_uv8hi ((__builtin_neon_qi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p_s8 (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_scatter_offset_p_sv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p_s32 (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_scatter_offset_p_sv4si ((__builtin_neon_qi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p_s16 (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_scatter_offset_p_sv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p_u8 (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_scatter_offset_p_uv16qi ((__builtin_neon_qi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p_u32 (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_scatter_offset_p_uv4si ((__builtin_neon_qi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p_u16 (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrbq_scatter_offset_p_uv8hi ((__builtin_neon_qi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_p_s32 (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_base_p_sv4si (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_p_u32 (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_base_p_uv4si (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_s8 (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_sv16qi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_s32 (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_sv4si ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_s16 (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_sv8hi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_u8 (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_uv16qi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_u32 (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_uv4si ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z_u16 (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_gather_offset_z_uv8hi ((__builtin_neon_qi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_s8 (int8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_sv16qi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_s32 (int8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_sv4si ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_s16 (int8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_sv8hi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_u8 (uint8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_uv16qi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_u32 (uint8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_uv4si ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_z_u16 (uint8_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrbq_z_uv8hi ((__builtin_neon_qi *) __base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_z_s32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_base_z_sv4si (__addr, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_z_u32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_base_z_uv4si (__addr, __offset, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_s8 (int8_t const * __base)
+{
+ return __builtin_mve_vld1q_sv16qi ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_s32 (int32_t const * __base)
+{
+ return __builtin_mve_vld1q_sv4si ((__builtin_neon_si *) __base);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_s16 (int16_t const * __base)
+{
+ return __builtin_mve_vld1q_sv8hi ((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_u8 (uint8_t const * __base)
+{
+ return __builtin_mve_vld1q_uv16qi ((__builtin_neon_qi *) __base);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_u32 (uint32_t const * __base)
+{
+ return __builtin_mve_vld1q_uv4si ((__builtin_neon_si *) __base);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_u16 (uint16_t const * __base)
+{
+ return __builtin_mve_vld1q_uv8hi ((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_s32 (int16_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_s16 (int16_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_s32 (int16_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_s16 (int16_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_u32 (uint16_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_u16 (uint16_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z_s32 (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_z_sv4si ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z_s16 (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_z_sv8hi ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z_u32 (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_z_uv4si ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z_u16 (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_z_uv8hi ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_s32 (int16_t const * __base)
+{
+ return __builtin_mve_vldrhq_sv4si ((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_s16 (int16_t const * __base)
+{
+ return __builtin_mve_vldrhq_sv8hi ((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_u32 (uint16_t const * __base)
+{
+ return __builtin_mve_vldrhq_uv4si ((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_u16 (uint16_t const * __base)
+{
+ return __builtin_mve_vldrhq_uv8hi ((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_z_s32 (int16_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_z_sv4si ((__builtin_neon_hi *) __base, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_z_s16 (int16_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_z_sv8hi ((__builtin_neon_hi *) __base, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_z_u32 (uint16_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_z_uv4si ((__builtin_neon_hi *) __base, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_z_u16 (uint16_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_z_uv8hi ((__builtin_neon_hi *) __base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_s32 (int32_t const * __base)
+{
+ return __builtin_mve_vldrwq_sv4si ((__builtin_neon_si *) __base);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_u32 (uint32_t const * __base)
+{
+ return __builtin_mve_vldrwq_uv4si ((__builtin_neon_si *) __base);
+}
+
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_z_s32 (int32_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_z_sv4si ((__builtin_neon_si *) __base, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_z_u32 (uint32_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_z_uv4si ((__builtin_neon_si *) __base, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_s64 (uint64x2_t __addr, const int __offset)
+{
+ return __builtin_mve_vldrdq_gather_base_sv2di (__addr, __offset);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_u64 (uint64x2_t __addr, const int __offset)
+{
+ return __builtin_mve_vldrdq_gather_base_uv2di (__addr, __offset);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_z_s64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrdq_gather_base_z_sv2di (__addr, __offset, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_z_u64 (uint64x2_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrdq_gather_base_z_uv2di (__addr, __offset, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset_s64 (int64_t const * __base, uint64x2_t __offset)
+{
+ return __builtin_mve_vldrdq_gather_offset_sv2di ((__builtin_neon_di *) __base, __offset);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
+{
+ return __builtin_mve_vldrdq_gather_offset_uv2di ((__builtin_neon_di *) __base, __offset);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrdq_gather_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
+}
+
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrdq_gather_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset_s64 (int64_t const * __base, uint64x2_t __offset)
+{
+ return __builtin_mve_vldrdq_gather_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset_u64 (uint64_t const * __base, uint64x2_t __offset)
+{
+ return __builtin_mve_vldrdq_gather_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset_z_s64 (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrdq_gather_shifted_offset_z_sv2di ((__builtin_neon_di *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset_z_u64 (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrdq_gather_shifted_offset_z_uv2di ((__builtin_neon_di *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_s32 (int32_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrwq_gather_offset_sv4si ((__builtin_neon_si *) __base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrwq_gather_offset_uv4si ((__builtin_neon_si *) __base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_s32 (int32_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrwq_gather_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_u32 (uint32_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrwq_gather_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_z_s32 (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_shifted_offset_z_sv4si ((__builtin_neon_si *) __base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_z_u32 (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_shifted_offset_z_uv4si ((__builtin_neon_si *) __base, __offset, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_s8 (int8_t * __addr, int8x16_t __value)
+{
+ __builtin_mve_vst1q_sv16qi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_s32 (int32_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vst1q_sv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_s16 (int16_t * __addr, int16x8_t __value)
+{
+ __builtin_mve_vst1q_sv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_u8 (uint8_t * __addr, uint8x16_t __value)
+{
+ __builtin_mve_vst1q_uv16qi ((__builtin_neon_qi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_u32 (uint32_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vst1q_uv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_u16 (uint16_t * __addr, uint16x8_t __value)
+{
+ __builtin_mve_vst1q_uv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_sv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_sv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_uv4si ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_uv8hi ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_s32 (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_sv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_s16 (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_sv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_u32 (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_uv4si ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_u16 (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_uv8hi ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_s32 (int16_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vstrhq_sv4si ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_s16 (int16_t * __addr, int16x8_t __value)
+{
+ __builtin_mve_vstrhq_sv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_u32 (uint16_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vstrhq_uv4si ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_u16 (uint16_t * __addr, uint16x8_t __value)
+{
+ __builtin_mve_vstrhq_uv8hi ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_s32 (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_sv4si ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_sv8hi ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_u32 (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_uv4si ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_uv8hi ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_s32 (int32_t * __addr, int32x4_t __value)
+{
+ __builtin_mve_vstrwq_sv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_u32 (uint32_t * __addr, uint32x4_t __value)
+{
+ __builtin_mve_vstrwq_uv4si ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_p_sv4si ((__builtin_neon_si *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_p_uv4si ((__builtin_neon_si *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_p_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrdq_scatter_base_p_sv2di (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_p_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrdq_scatter_base_p_uv2di (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_s64 (uint64x2_t __addr, const int __offset, int64x2_t __value)
+{
+ __builtin_mve_vstrdq_scatter_base_sv2di (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_u64 (uint64x2_t __addr, const int __offset, uint64x2_t __value)
+{
+ __builtin_mve_vstrdq_scatter_base_uv2di (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrdq_scatter_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrdq_scatter_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
+{
+ __builtin_mve_vstrdq_scatter_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
+{
+ __builtin_mve_vstrdq_scatter_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset_p_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrdq_scatter_shifted_offset_p_sv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset_p_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrdq_scatter_shifted_offset_p_uv2di ((__builtin_neon_di *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset_s64 (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
+{
+ __builtin_mve_vstrdq_scatter_shifted_offset_sv2di ((__builtin_neon_di *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset_u64 (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
+{
+ __builtin_mve_vstrdq_scatter_shifted_offset_uv2di ((__builtin_neon_di *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_p_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_shifted_offset_p_sv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_p_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_shifted_offset_p_uv4si ((__builtin_neon_si *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_s32 (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_shifted_offset_sv4si ((__builtin_neon_si *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_u32 (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *) __base, __offset, __value);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__inactive, * __a, __imm, __p);
+ *__a -= __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__inactive, *__a, __imm, __p);
+ *__a -= __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__inactive, *__a, __imm, __p);
+ *__a -= __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_n_u8 (uint32_t __a, const int __imm)
+{
+ return __builtin_mve_vddupq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_n_u32 (uint32_t __a, const int __imm)
+{
+ return __builtin_mve_vddupq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_n_u16 (uint32_t __a, const int __imm)
+{
+ return __builtin_mve_vddupq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_n_uv16qi (__a, __c, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_n_uv4si (__a, __c, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_n_uv8hi (__a, __c, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint8x16_t __res = __builtin_mve_vdwdupq_n_uv16qi (*__a, __c, __imm);
+ *__a = __builtin_mve_vdwdupq_wb_uv16qi (*__a, __c, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint32x4_t __res = __builtin_mve_vdwdupq_n_uv4si (*__a, __c, __imm);
+ *__a = __builtin_mve_vdwdupq_wb_uv4si (*__a, __c, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint16x8_t __res = __builtin_mve_vdwdupq_n_uv8hi (*__a, __c, __imm);
+ *__a = __builtin_mve_vdwdupq_wb_uv8hi (*__a, __c, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv16qi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv4si (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv8hi (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_n_u8 (uint32_t __a, const int __imm)
+{
+ return __builtin_mve_vidupq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__inactive, *__a, __imm, __p);
+ *__a += __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__inactive, *__a, __imm, __p);
+ *__a += __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__inactive, *__a, __imm, __p);
+ *__a += __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_n_u32 (uint32_t __a, const int __imm)
+{
+ return __builtin_mve_vidupq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_n_u16 (uint32_t __a, const int __imm)
+{
+ return __builtin_mve_vidupq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_wb_u8 (uint32_t * __a, const int __imm)
+{
+ uint8x16_t __res = __builtin_mve_vidupq_n_uv16qi (*__a, __imm);
+ *__a += __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_wb_u16 (uint32_t * __a, const int __imm)
+{
+ uint16x8_t __res = __builtin_mve_vidupq_n_uv8hi (*__a, __imm);
+ *__a += __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_wb_u32 (uint32_t * __a, const int __imm)
+{
+ uint32x4_t __res = __builtin_mve_vidupq_n_uv4si (*__a, __imm);
+ *__a += __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_wb_u8 (uint32_t * __a, const int __imm)
+{
+ uint8x16_t __res = __builtin_mve_vddupq_n_uv16qi (*__a, __imm);
+ *__a -= __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_wb_u16 (uint32_t * __a, const int __imm)
+{
+ uint16x8_t __res = __builtin_mve_vddupq_n_uv8hi (*__a, __imm);
+ *__a -= __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_wb_u32 (uint32_t * __a, const int __imm)
+{
+ uint32x4_t __res = __builtin_mve_vddupq_n_uv4si (*__a, __imm);
+ *__a -= __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m_n_u8 (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_m_n_uv16qi (__inactive, __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m_n_u32 (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_m_n_uv4si (__inactive, __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m_n_u16 (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_m_n_uv8hi (__inactive, __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m_wb_u8 (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__inactive, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__inactive, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m_wb_u32 (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__inactive, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv4si (__inactive, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m_wb_u16 (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__inactive, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__inactive, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_n_u8 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_n_uv16qi (__a, __c, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_n_u32 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_n_uv4si (__a, __c, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_n_u16 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_n_uv8hi (__a, __c, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_wb_u8 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint8x16_t __res = __builtin_mve_viwdupq_n_uv16qi (*__a, __c, __imm);
+ *__a = __builtin_mve_viwdupq_wb_uv16qi (*__a, __c, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_wb_u32 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint32x4_t __res = __builtin_mve_viwdupq_n_uv4si (*__a, __c, __imm);
+ *__a = __builtin_mve_viwdupq_wb_uv4si (*__a, __c, __imm);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_wb_u16 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint16x8_t __res = __builtin_mve_viwdupq_n_uv8hi (*__a, __c, __imm);
+ *__a = __builtin_mve_viwdupq_wb_uv8hi (*__a, __c, __imm);
+ return __res;
+}
+
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_wb_s64 (uint64x2_t * __addr, const int __offset)
+{
+ int64x2_t
+ result = __builtin_mve_vldrdq_gather_base_nowb_sv2di (*__addr, __offset);
+ *__addr = __builtin_mve_vldrdq_gather_base_wb_sv2di (*__addr, __offset);
+ return result;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_wb_u64 (uint64x2_t * __addr, const int __offset)
+{
+ uint64x2_t
+ result = __builtin_mve_vldrdq_gather_base_nowb_uv2di (*__addr, __offset);
+ *__addr = __builtin_mve_vldrdq_gather_base_wb_uv2di (*__addr, __offset);
+ return result;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_wb_z_s64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p)
+{
+ int64x2_t
+ result = __builtin_mve_vldrdq_gather_base_nowb_z_sv2di (*__addr, __offset, __p);
+ *__addr = __builtin_mve_vldrdq_gather_base_wb_z_sv2di (*__addr, __offset, __p);
+ return result;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_base_wb_z_u64 (uint64x2_t * __addr, const int __offset, mve_pred16_t __p)
+{
+ uint64x2_t
+ result = __builtin_mve_vldrdq_gather_base_nowb_z_uv2di (*__addr, __offset, __p);
+ *__addr = __builtin_mve_vldrdq_gather_base_wb_z_uv2di (*__addr, __offset, __p);
+ return result;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_wb_s32 (uint32x4_t * __addr, const int __offset)
+{
+ int32x4_t
+ result = __builtin_mve_vldrwq_gather_base_nowb_sv4si (*__addr, __offset);
+ *__addr = __builtin_mve_vldrwq_gather_base_wb_sv4si (*__addr, __offset);
+ return result;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_wb_u32 (uint32x4_t * __addr, const int __offset)
+{
+ uint32x4_t
+ result = __builtin_mve_vldrwq_gather_base_nowb_uv4si (*__addr, __offset);
+ *__addr = __builtin_mve_vldrwq_gather_base_wb_uv4si (*__addr, __offset);
+ return result;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_wb_z_s32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
+{
+ int32x4_t
+ result = __builtin_mve_vldrwq_gather_base_nowb_z_sv4si (*__addr, __offset, __p);
+ *__addr = __builtin_mve_vldrwq_gather_base_wb_z_sv4si (*__addr, __offset, __p);
+ return result;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_wb_z_u32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
+{
+ uint32x4_t
+ result = __builtin_mve_vldrwq_gather_base_nowb_z_uv4si (*__addr, __offset, __p);
+ *__addr = __builtin_mve_vldrwq_gather_base_wb_z_uv4si (*__addr, __offset, __p);
+ return result;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value)
+{
+ *__addr = __builtin_mve_vstrdq_scatter_base_wb_sv2di (*__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value)
+{
+ *__addr = __builtin_mve_vstrdq_scatter_base_wb_uv2di (*__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb_p_s64 (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_sv2di (*__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb_p_u64 (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ *__addr = __builtin_mve_vstrdq_scatter_base_wb_p_uv2di (*__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_p_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_sv4si (*__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_p_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_uv4si (*__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_s32 (uint32x4_t * __addr, const int __offset, int32x4_t __value)
+{
+ *__addr = __builtin_mve_vstrwq_scatter_base_wb_sv4si (*__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint32x4_t __value)
+{
+ *__addr = __builtin_mve_vstrwq_scatter_base_wb_uv4si (*__addr, __offset, __value);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p);
+ *__a -= __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
+ *__a -= __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p);
+ *__a -= __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_vdwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p);
+ *__a += __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
+ *__a += __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p);
+ *__a += __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ return __builtin_mve_viwdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __c, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint8x16_t __arg1 = __arm_vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint16x8_t __arg1 = __arm_vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint64_t __c = ((uint64_t) __b) << 32;
+ uint32x4_t __arg1 = __arm_vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __c, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __c, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_poly_m_pv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_poly_m_pv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv4si (__arm_vuninitializedq_s64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv4si (__arm_vuninitializedq_u64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_sv16qi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_sv8hi (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_uv16qi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_uv8hi (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_sv4si (__arm_vuninitializedq_s32 (), __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_uv4si (__arm_vuninitializedq_u32 (), __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev16q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev16q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_sv16qi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_sv8hi (__arm_vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_uv16qi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_uv8hi (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv16qi (__arm_vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv16qi (__arm_vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv16qi (__arm_vuninitializedq_s8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv4si (__arm_vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv16qi (__arm_vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv4si (__arm_vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
+{
+ int32x4_t __res = __builtin_mve_vadciq_sv4si (__a, __b);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
+{
+ uint32x4_t __res = __builtin_mve_vadciq_uv4si (__a, __b);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ int32x4_t __res = __builtin_mve_vadciq_m_sv4si (__inactive, __a, __b, __p);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ uint32x4_t __res = __builtin_mve_vadciq_m_uv4si (__inactive, __a, __b, __p);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vadcq_sv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vadcq_uv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vadcq_m_sv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vadcq_m_uv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
+{
+ int32x4_t __res = __builtin_mve_vsbciq_sv4si (__a, __b);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
+{
+ uint32x4_t __res = __builtin_mve_vsbciq_uv4si (__a, __b);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ int32x4_t __res = __builtin_mve_vsbciq_m_sv4si (__inactive, __a, __b, __p);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ uint32x4_t __res = __builtin_mve_vsbciq_m_uv4si (__inactive, __a, __b, __p);
+ *__carry_out = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq_s32 (int32x4_t __a, int32x4_t __b, unsigned * __carry)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vsbcq_sv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq_u32 (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vsbcq_uv4si (__a, __b);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq_m_s32 (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ int32x4_t __res = __builtin_mve_vsbcq_m_sv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq_m_u32 (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ __builtin_arm_set_fpscr_nzcvqc((__builtin_arm_get_fpscr_nzcvqc () & ~0x20000000u) | ((*__carry & 0x1u) << 29));
+ uint32x4_t __res = __builtin_mve_vsbcq_m_uv4si (__inactive, __a, __b, __p);
+ *__carry = (__builtin_arm_get_fpscr_nzcvqc () >> 29) & 0x1u;
+ return __res;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_u8 (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrbq_p_u8 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_s8 (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrbq_p_s8 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_s8 (int8_t * __addr, int8x16x2_t __value)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_u8 (uint8_t * __addr, uint8x16x2_t __value)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv16qi ((__builtin_neon_qi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_u8 (uint8_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrbq_z_u8 ( __base, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_s8 (int8_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrbq_z_s8 ( __base, __p);
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_s8 (int8_t const * __addr)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_u8 (uint8_t const * __addr)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv16qi ((__builtin_neon_qi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_s8 (int8_t const * __addr)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_u8 (uint8_t const * __addr)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv16qi ((__builtin_neon_qi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_u16 (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrhq_p_u16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_s16 (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrhq_p_s16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_s16 (int16_t * __addr, int16x8x2_t __value)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_u16 (uint16_t * __addr, uint16x8x2_t __value)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv8hi ((__builtin_neon_hi *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_u16 (uint16_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrhq_z_u16 ( __base, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_s16 (int16_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrhq_z_s16 ( __base, __p);
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_s16 (int16_t const * __addr)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_u16 (uint16_t const * __addr)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv8hi ((__builtin_neon_hi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_s16 (int16_t const * __addr)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_u16 (uint16_t const * __addr)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv8hi ((__builtin_neon_hi *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_u32 (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrwq_p_u32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_s32 (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrwq_p_s32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_s32 (int32_t * __addr, int32x4x2_t __value)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_u32 (uint32_t * __addr, uint32x4x2_t __value)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv4si ((__builtin_neon_si *) __addr, __rv.__o);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_u32 (uint32_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrwq_z_u32 ( __base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_s32 (int32_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrwq_z_s32 ( __base, __p);
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_s32 (int32_t const * __addr)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_u32 (uint32_t const * __addr)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv4si ((__builtin_neon_si *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_s32 (int32_t const * __addr)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_u32 (uint32_t const * __addr)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv4si ((__builtin_neon_si *) __addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_s16 (int16x8_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_s32 (int32x4_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_s8 (int8x16_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_s64 (int64x2_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_u8 (uint8x16_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_u16 (uint16x8_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_u32 (uint32x4_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_u64 (uint64x2_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_lsll (uint64_t value, int32_t shift)
+{
+ return (value << shift);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_asrl (int64_t value, int32_t shift)
+{
+ return (value >> shift);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_uqrshll (uint64_t value, int32_t shift)
+{
+ return __builtin_mve_uqrshll_sat64_di (value, shift);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_uqrshll_sat48 (uint64_t value, int32_t shift)
+{
+ return __builtin_mve_uqrshll_sat48_di (value, shift);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_sqrshrl (int64_t value, int32_t shift)
+{
+ return __builtin_mve_sqrshrl_sat64_di (value, shift);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_sqrshrl_sat48 (int64_t value, int32_t shift)
+{
+ return __builtin_mve_sqrshrl_sat48_di (value, shift);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_uqshll (uint64_t value, const int shift)
+{
+ return __builtin_mve_uqshll_di (value, shift);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_urshrl (uint64_t value, const int shift)
+{
+ return __builtin_mve_urshrl_di (value, shift);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_srshrl (int64_t value, const int shift)
+{
+ return __builtin_mve_srshrl_di (value, shift);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_sqshll (int64_t value, const int shift)
+{
+ return __builtin_mve_sqshll_di (value, shift);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_uqrshl (uint32_t value, int32_t shift)
+{
+ return __builtin_mve_uqrshl_si (value, shift);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_sqrshr (int32_t value, int32_t shift)
+{
+ return __builtin_mve_sqrshr_si (value, shift);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_uqshl (uint32_t value, const int shift)
+{
+ return __builtin_mve_uqshl_si (value, shift);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_urshr (uint32_t value, const int shift)
+{
+ return __builtin_mve_urshr_si (value, shift);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_sqshl (int32_t value, const int shift)
+{
+ return __builtin_mve_sqshl_si (value, shift);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_srshr (int32_t value, const int shift)
+{
+ return __builtin_mve_srshr_si (value, shift);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p);
+ *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p);
+ return __res;
+}
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_f16 (float16_t * __addr, float16x8x4_t __value)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv8hf (__addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q_f32 (float32_t * __addr, float32x4x4_t __value)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst4qv4sf (__addr, __rv.__o);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrndxq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrndxq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrndq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrndq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrndpq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrndpq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrndnq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrndnq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrndmq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrndmq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrndaq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrndaq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrev64q_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vrev64q_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vnegq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vnegq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_f16 (float16_t __a)
+{
+ return __builtin_mve_vdupq_n_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n_f32 (float32_t __a)
+{
+ return __builtin_mve_vdupq_n_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vabsq_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vabsq_fv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vrev32q_fv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_f32_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvttq_f32_f16v4sf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_f32_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtbq_f32_f16v4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_f16_s16 (int16x8_t __a)
+{
+ return __builtin_mve_vcvtq_to_f_sv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_f32_s32 (int32x4_t __a)
+{
+ return __builtin_mve_vcvtq_to_f_sv4sf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_f16_u16 (uint16x8_t __a)
+{
+ return __builtin_mve_vcvtq_to_f_uv8hf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_f32_u32 (uint32x4_t __a)
+{
+ return __builtin_mve_vcvtq_to_f_uv4sf (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtq_from_f_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtq_from_f_sv4si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtq_from_f_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_u32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtq_from_f_uv4si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtpq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_u32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtpq_uv4si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtnq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_u32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtnq_uv4si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtmq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_u32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtmq_uv4si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtaq_uv8hi (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_u32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtaq_uv4si (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtaq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtaq_sv4si (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtnq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtnq_sv4si (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtpq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtpq_sv4si (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_mve_vcvtmq_sv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_s32_f32 (float32x4_t __a)
+{
+ return __builtin_mve_vcvtmq_sv4si (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vsubq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vsubq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_f16 (float16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_f32 (float32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_f16_s16 (int16x8_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_to_f_sv8hf (__a, __imm6);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_f32_s32 (int32x4_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_to_f_sv4sf (__a, __imm6);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_f16_u16 (uint16x8_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_to_f_uv8hf (__a, __imm6);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_f32_u32 (uint32x4_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_to_f_uv4sf (__a, __imm6);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_f16 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcreateq_f32 (uint64_t __a, uint64_t __b)
+{
+ return __builtin_mve_vcreateq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_s16_f16 (float16x8_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_from_f_sv8hi (__a, __imm6);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_s32_f32 (float32x4_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_from_f_sv4si (__a, __imm6);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_u16_f16 (float16x8_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_from_f_uv8hi (__a, __imm6);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n_u32_f32 (float32x4_t __a, const int __imm6)
+{
+ return __builtin_mve_vcvtq_n_from_f_uv4si (__a, __imm6);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vcmpneq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmpneq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vcmpltq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmpltq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vcmpleq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmpleq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmpgtq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmpgeq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmpeqq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vsubq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vorrq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vornq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vmulq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vmulq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq_f16 (float16_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vminnmvq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vminnmq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq_f16 (float16_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vminnmavq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vminnmaq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq_f16 (float16_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vmaxnmvq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vmaxnmq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq_f16 (float16_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vmaxnmavq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vmaxnmaq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_veorq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmulq_rot90v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmulq_rot270v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmulq_rot180v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcmulqv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot90v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot270v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vbicq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vandq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return __builtin_mve_vaddq_n_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_mve_vabdq_fv8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vcmpneq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmpneq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vcmpltq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmpltq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vcmpleq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmpleq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmpgtq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmpgeq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmpeqq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vsubq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vorrq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vornq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vmulq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vmulq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq_f32 (float32_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vminnmvq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vminnmq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq_f32 (float32_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vminnmavq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vminnmaq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq_f32 (float32_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vmaxnmvq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vmaxnmq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq_f32 (float32_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vmaxnmavq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vmaxnmaq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_veorq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmulq_rot90v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmulq_rot270v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmulq_rot180v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcmulqv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot90v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot270v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vbicq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vandq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __builtin_mve_vaddq_n_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vabdq_fv4sf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_f16_f32 (float16x8_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcvttq_f16_f32v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_f16_f32 (float16x8_t __a, float32x4_t __b)
+{
+ return __builtin_mve_vcvtbq_f16_f32v8hf (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_f16_s16 (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_sv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_f16_u16 (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_uv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_f32_s32 (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_sv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_f32_u32 (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_uv4sf (__inactive, __a, __p);
+}
+
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtbq_m_f16_f32v8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtbq_m_f32_f16v4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_m_f16_f32 (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvttq_m_f16_f32v8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_m_f32_f16 (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvttq_m_f32_f16v4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_mve_vcmlaqv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_mve_vcmlaq_rot180v8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_mve_vcmlaq_rot270v8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_mve_vcmlaq_rot90v8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_mve_vfmaq_fv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
+{
+ return __builtin_mve_vfmaq_n_fv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
+{
+ return __builtin_mve_vfmasq_n_fv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_mve_vfmsq_fv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_s16_f16 (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_sv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_f16 (float16x8_t __inactive, float16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmaq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmavq_p_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmvq_p_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmaq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmavq_p_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq_p_f16 (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmvq_p_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndaq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndmq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndnq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndpq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_m_f16 (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndxq_m_fv8hf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_n_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_n_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_n_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_n_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_fv8hf (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_u16_f16 (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_uv8hi (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_mve_vcmlaqv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_mve_vcmlaq_rot180v4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_mve_vcmlaq_rot270v4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_mve_vcmlaq_rot90v4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_mve_vfmaq_fv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return __builtin_mve_vfmaq_n_fv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return __builtin_mve_vfmasq_n_fv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __builtin_mve_vfmsq_fv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_s32_f32 (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_sv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m_n_f32 (float32x4_t __inactive, float32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmaq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmavq_p_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmvq_p_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmaq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmavq_p_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq_p_f32 (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmvq_p_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vpselq_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndaq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndmq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndnq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndpq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_m_f32 (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndxq_m_fv4sf (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpeqq_m_n_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgeq_m_n_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpgtq_m_n_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpleq_m_n_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpltq_m_n_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmpneq_m_n_fv4sf (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_u32_f32 (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_uv4si (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f16_u16 (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f16_s16 (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f32_u32 (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_f32_s32 (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_m_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_m_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_rot180_m_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_rot180_m_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_rot270_m_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_rot270_m_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_rot90_m_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmlaq_rot90_m_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot180_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot180_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot270_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot270_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot90_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot90_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_s32_f32 (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_sv4si (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_s16_f16 (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_u32_f32 (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_uv4si (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n_u16_f16 (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmaq_m_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmaq_m_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmaq_m_n_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmaq_m_n_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq_m_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmasq_m_n_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq_m_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmasq_m_n_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq_m_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmsq_m_fv4sf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq_m_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __builtin_mve_vfmsq_m_fv8hf (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_f32 (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_f16 (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_f32 (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_fv4sf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m_n_f16 (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_fv8hf (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_f32 (float32_t const * __base)
+{
+ return __builtin_mve_vld1q_fv4sf((__builtin_neon_si *) __base);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_f16 (float16_t const * __base)
+{
+ return __builtin_mve_vld1q_fv8hf((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_f32 (float32_t const * __base)
+{
+ return __builtin_mve_vldrwq_fv4sf((__builtin_neon_si *) __base);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_z_f32 (float32_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_z_fv4sf((__builtin_neon_si *) __base, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_z_f16 (float16_t const * __base, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_z_fv8hf((__builtin_neon_hi *) __base, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_f16 (float16_t const * __base)
+{
+ return __builtin_mve_vldrhq_fv8hf((__builtin_neon_hi *) __base);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_f16 (float16_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_offset_fv8hf((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_offset_z_fv8hf((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_f16 (float16_t const * __base, uint16x8_t __offset)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z_f16 (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrhq_gather_shifted_offset_z_fv8hf ((__builtin_neon_hi *) __base, __offset, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_f32 (uint32x4_t __addr, const int __offset)
+{
+ return __builtin_mve_vldrwq_gather_base_fv4sf (__addr, __offset);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_z_f32 (uint32x4_t __addr, const int __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_base_z_fv4sf (__addr, __offset, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_f32 (float32_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrwq_gather_offset_fv4sf((__builtin_neon_si *) __base, __offset);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_offset_z_fv4sf((__builtin_neon_si *) __base, __offset, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_f32 (float32_t const * __base, uint32x4_t __offset)
+{
+ return __builtin_mve_vldrwq_gather_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_z_f32 (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __builtin_mve_vldrwq_gather_shifted_offset_z_fv4sf ((__builtin_neon_si *) __base, __offset, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_p_fv4sf ((__builtin_neon_si *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_f32 (float32_t * __addr, float32x4_t __value)
+{
+ __builtin_mve_vstrwq_fv4sf ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_f32 (float32_t * __addr, float32x4_t __value)
+{
+ __builtin_mve_vst1q_fv4sf ((__builtin_neon_si *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_f16 (float16_t * __addr, float16x8_t __value)
+{
+ __builtin_mve_vst1q_fv8hf ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_f16 (float16_t * __addr, float16x8_t __value)
+{
+ __builtin_mve_vstrhq_fv8hf ((__builtin_neon_hi *) __addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_p_fv8hf ((__builtin_neon_hi *) __addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_fv8hf ((__builtin_neon_hi *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p_f16 (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrhq_scatter_shifted_offset_p_fv8hf ((__builtin_neon_hi *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_base_fv4sf (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_p_f32 (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_base_p_fv4sf (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
+{
+ __builtin_mve_vstrwq_scatter_shifted_offset_fv4sf ((__builtin_neon_si *) __base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf ((__builtin_neon_si *) __base, __offset, __value, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_wb_f32 (uint32x4_t * __addr, const int __offset)
+{
+ float32x4_t
+ result = __builtin_mve_vldrwq_gather_base_nowb_fv4sf (*__addr, __offset);
+ *__addr = __builtin_mve_vldrwq_gather_base_wb_fv4sf (*__addr, __offset);
+ return result;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_base_wb_z_f32 (uint32x4_t * __addr, const int __offset, mve_pred16_t __p)
+{
+ float32x4_t
+ result = __builtin_mve_vldrwq_gather_base_nowb_z_fv4sf (*__addr, __offset, __p);
+ *__addr = __builtin_mve_vldrwq_gather_base_wb_z_fv4sf (*__addr, __offset, __p);
+ return result;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value)
+{
+ *__addr = __builtin_mve_vstrwq_scatter_base_wb_fv4sf (*__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ *__addr = __builtin_mve_vstrwq_scatter_base_wb_p_fv4sf (*__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot90_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot90_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot180_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot180_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot270_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot270_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtbq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvttq_m_f32_f16v4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv8hf (__arm_vuninitializedq_f16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv4sf (__arm_vuninitializedq_f32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_sv8hi (__arm_vuninitializedq_s16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_sv4si (__arm_vuninitializedq_s32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_uv8hi (__arm_vuninitializedq_u16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_uv4si (__arm_vuninitializedq_u32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndnq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndnq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndmq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndmq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndpq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndpq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndaq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndaq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndxq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndxq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_fv8hf (__arm_vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_fv4sf (__arm_vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_f16 (float16_t const * __addr)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv8hf (__addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_f16 (float16_t const * __addr)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv8hf (__addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_f16 (float16_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrhq_z_f16 (__base, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_f16 (float16_t * __addr, float16x8x2_t __value)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv8hf (__addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_f16 (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrhq_p_f16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q_f32 (float32_t const * __addr)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_mve_vld4qv4sf (__addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q_f32 (float32_t const * __addr)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_mve_vld2qv4sf (__addr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z_f32 (float32_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vldrwq_z_f32 (__base, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q_f32 (float32_t * __addr, float32x4x2_t __value)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__i = __value;
+ __builtin_mve_vst2qv4sf (__addr, __rv.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p_f32 (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
+{
+ return __arm_vstrwq_p_f32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_f16 (float16_t __a, float16x8_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__b, __idx);
+ __b[__ARM_LANEQ(__b,__idx)] = __a;
+ return __b;
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_f16 (float16x8_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane_f32 (float32x4_t __a, const int __idx)
+{
+ __ARM_CHECK_LANEQ (__a, __idx);
+ return __a[__ARM_LANEQ(__a,__idx)];
+}
+#endif
+
+#ifdef __cplusplus
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (int8_t * __addr, int8x16x4_t __value)
+{
+ __arm_vst4q_s8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (int16_t * __addr, int16x8x4_t __value)
+{
+ __arm_vst4q_s16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (int32_t * __addr, int32x4x4_t __value)
+{
+ __arm_vst4q_s32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (uint8_t * __addr, uint8x16x4_t __value)
+{
+ __arm_vst4q_u8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (uint16_t * __addr, uint16x8x4_t __value)
+{
+ __arm_vst4q_u16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (uint32_t * __addr, uint32x4x4_t __value)
+{
+ __arm_vst4q_u32 (__addr, __value);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (int8_t __a)
+{
+ return __arm_vdupq_n_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (int16_t __a)
+{
+ return __arm_vdupq_n_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (int32_t __a)
+{
+ return __arm_vdupq_n_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq (int8x16_t __a)
+{
+ return __arm_vabsq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq (int16x8_t __a)
+{
+ return __arm_vabsq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq (int32x4_t __a)
+{
+ return __arm_vabsq_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq (int8x16_t __a)
+{
+ return __arm_vclsq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq (int16x8_t __a)
+{
+ return __arm_vclsq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq (int32x4_t __a)
+{
+ return __arm_vclsq_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq (int8x16_t __a)
+{
+ return __arm_vclzq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq (int16x8_t __a)
+{
+ return __arm_vclzq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq (int32x4_t __a)
+{
+ return __arm_vclzq_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq (int8x16_t __a)
+{
+ return __arm_vnegq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq (int16x8_t __a)
+{
+ return __arm_vnegq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq (int32x4_t __a)
+{
+ return __arm_vnegq_s32 (__a);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq (int32x4_t __a)
+{
+ return __arm_vaddlvq_s32 (__a);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq (int8x16_t __a)
+{
+ return __arm_vaddvq_s8 (__a);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq (int16x8_t __a)
+{
+ return __arm_vaddvq_s16 (__a);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq (int32x4_t __a)
+{
+ return __arm_vaddvq_s32 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq (int8x16_t __a)
+{
+ return __arm_vmovlbq_s8 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq (int16x8_t __a)
+{
+ return __arm_vmovlbq_s16 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq (int8x16_t __a)
+{
+ return __arm_vmovltq_s8 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq (int16x8_t __a)
+{
+ return __arm_vmovltq_s16 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq (int8x16_t __a)
+{
+ return __arm_vmvnq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq (int16x8_t __a)
+{
+ return __arm_vmvnq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq (int32x4_t __a)
+{
+ return __arm_vmvnq_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q (int8x16_t __a)
+{
+ return __arm_vrev16q_s8 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q (int8x16_t __a)
+{
+ return __arm_vrev32q_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q (int16x8_t __a)
+{
+ return __arm_vrev32q_s16 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (int8x16_t __a)
+{
+ return __arm_vrev64q_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (int16x8_t __a)
+{
+ return __arm_vrev64q_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (int32x4_t __a)
+{
+ return __arm_vrev64q_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq (int8x16_t __a)
+{
+ return __arm_vqabsq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq (int16x8_t __a)
+{
+ return __arm_vqabsq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq (int32x4_t __a)
+{
+ return __arm_vqabsq_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq (int8x16_t __a)
+{
+ return __arm_vqnegq_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq (int16x8_t __a)
+{
+ return __arm_vqnegq_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq (int32x4_t __a)
+{
+ return __arm_vqnegq_s32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (uint8x16_t __a)
+{
+ return __arm_vrev64q_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (uint16x8_t __a)
+{
+ return __arm_vrev64q_u16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (uint32x4_t __a)
+{
+ return __arm_vrev64q_u32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq (uint8x16_t __a)
+{
+ return __arm_vmvnq_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq (uint16x8_t __a)
+{
+ return __arm_vmvnq_u16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq (uint32x4_t __a)
+{
+ return __arm_vmvnq_u32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (uint8_t __a)
+{
+ return __arm_vdupq_n_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (uint16_t __a)
+{
+ return __arm_vdupq_n_u16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (uint32_t __a)
+{
+ return __arm_vdupq_n_u32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq (uint8x16_t __a)
+{
+ return __arm_vclzq_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq (uint16x8_t __a)
+{
+ return __arm_vclzq_u16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq (uint32x4_t __a)
+{
+ return __arm_vclzq_u32 (__a);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq (uint8x16_t __a)
+{
+ return __arm_vaddvq_u8 (__a);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq (uint16x8_t __a)
+{
+ return __arm_vaddvq_u16 (__a);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq (uint32x4_t __a)
+{
+ return __arm_vaddvq_u32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q (uint8x16_t __a)
+{
+ return __arm_vrev32q_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q (uint16x8_t __a)
+{
+ return __arm_vrev32q_u16 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq (uint8x16_t __a)
+{
+ return __arm_vmovltq_u8 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq (uint16x8_t __a)
+{
+ return __arm_vmovltq_u16 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq (uint8x16_t __a)
+{
+ return __arm_vmovlbq_u8 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq (uint16x8_t __a)
+{
+ return __arm_vmovlbq_u16 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q (uint8x16_t __a)
+{
+ return __arm_vrev16q_u8 (__a);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq (uint32x4_t __a)
+{
+ return __arm_vaddlvq_u32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq (int8x16_t __a, const int __imm)
+{
+ return __arm_vshrq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq (int16x8_t __a, const int __imm)
+{
+ return __arm_vshrq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq (int32x4_t __a, const int __imm)
+{
+ return __arm_vshrq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq (uint8x16_t __a, const int __imm)
+{
+ return __arm_vshrq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq (uint16x8_t __a, const int __imm)
+{
+ return __arm_vshrq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq (uint32x4_t __a, const int __imm)
+{
+ return __arm_vshrq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq_p (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddlvq_p_s32 (__a, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvq_p (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddlvq_p_u32 (__a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcmpneq_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcmpneq_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcmpneq_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vcmpneq_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vcmpneq_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vcmpneq_u32 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vshlq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vshlq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vshlq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq (uint8x16_t __a, int8x16_t __b)
+{
+ return __arm_vshlq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq (uint16x8_t __a, int16x8_t __b)
+{
+ return __arm_vshlq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq (uint32x4_t __a, int32x4_t __b)
+{
+ return __arm_vshlq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vsubq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vsubq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vrmulhq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vrhaddq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vqsubq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vqsubq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vqaddq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vqaddq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vorrq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vornq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmulq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vmulq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmulltq_int_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmullbq_int_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmulhq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmladavq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq (uint8_t __a, uint8x16_t __b)
+{
+ return __arm_vminvq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vminq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq (uint8_t __a, uint8x16_t __b)
+{
+ return __arm_vmaxvq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmaxq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vhsubq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vhsubq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vhaddq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vhaddq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_veorq_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vcmpneq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vcmphiq_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vcmphiq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vcmpeqq_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vcmpeqq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vcmpcsq_u8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vcmpcsq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vcaddq_rot90_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vcaddq_rot270_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vbicq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vandq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddvq_p_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq (uint32_t __a, uint8x16_t __b)
+{
+ return __arm_vaddvaq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (uint8x16_t __a, uint8_t __b)
+{
+ return __arm_vaddq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vabdq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r (uint8x16_t __a, int32_t __b)
+{
+ return __arm_vshlq_r_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (uint8x16_t __a, int8x16_t __b)
+{
+ return __arm_vrshlq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (uint8x16_t __a, int32_t __b)
+{
+ return __arm_vrshlq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq (uint8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqshlq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r (uint8x16_t __a, int32_t __b)
+{
+ return __arm_vqshlq_r_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (uint8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrshlq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (uint8x16_t __a, int32_t __b)
+{
+ return __arm_vqrshlq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq (uint8_t __a, int8x16_t __b)
+{
+ return __arm_vminavq_s8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq (uint8x16_t __a, int8x16_t __b)
+{
+ return __arm_vminaq_s8 (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq (uint8_t __a, int8x16_t __b)
+{
+ return __arm_vmaxavq_s8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq (uint8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmaxaq_s8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (uint8x16_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n (uint8x16_t __a, const int __imm)
+{
+ return __arm_vshlq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq (uint8x16_t __a, const int __imm)
+{
+ return __arm_vrshrq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n (uint8x16_t __a, const int __imm)
+{
+ return __arm_vqshlq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vcmpneq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcmpltq_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vcmpltq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcmpleq_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vcmpleq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcmpgtq_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vcmpgtq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcmpgeq_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vcmpgeq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcmpeqq_s8 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vcmpeqq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq (int8x16_t __a, const int __imm)
+{
+ return __arm_vqshluq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddvq_p_s8 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vsubq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vsubq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r (int8x16_t __a, int32_t __b)
+{
+ return __arm_vshlq_r_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vrshlq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (int8x16_t __a, int32_t __b)
+{
+ return __arm_vrshlq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vrmulhq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vrhaddq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqsubq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vqsubq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqshlq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r (int8x16_t __a, int32_t __b)
+{
+ return __arm_vqshlq_r_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrshlq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (int8x16_t __a, int32_t __b)
+{
+ return __arm_vqrshlq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrdmulhq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vqrdmulhq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqdmulhq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vqdmulhq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqaddq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vqaddq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vorrq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vornq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmulq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vmulq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmulltq_int_s8 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmullbq_int_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmulhq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmlsdavxq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmlsdavq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmladavxq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmladavq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq (int8_t __a, int8x16_t __b)
+{
+ return __arm_vminvq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vminq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq (int8_t __a, int8x16_t __b)
+{
+ return __arm_vmaxvq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vmaxq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vhsubq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vhsubq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90 (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vhcaddq_rot90_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270 (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vhcaddq_rot270_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vhaddq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vhaddq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_veorq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcaddq_rot90_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vcaddq_rot270_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (int8x16_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vbicq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vandq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq (int32_t __a, int8x16_t __b)
+{
+ return __arm_vaddvaq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (int8x16_t __a, int8_t __b)
+{
+ return __arm_vaddq_n_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vabdq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n (int8x16_t __a, const int __imm)
+{
+ return __arm_vshlq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq (int8x16_t __a, const int __imm)
+{
+ return __arm_vrshrq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n (int8x16_t __a, const int __imm)
+{
+ return __arm_vqshlq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vsubq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vsubq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vrmulhq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vrhaddq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vqsubq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vqsubq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vqaddq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vqaddq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vorrq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vornq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmulq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vmulq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmulltq_int_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmullbq_int_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmulhq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmladavq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq (uint16_t __a, uint16x8_t __b)
+{
+ return __arm_vminvq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vminq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq (uint16_t __a, uint16x8_t __b)
+{
+ return __arm_vmaxvq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmaxq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vhsubq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vhsubq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vhaddq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vhaddq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_veorq_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vcmpneq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vcmphiq_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vcmphiq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vcmpeqq_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vcmpeqq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vcmpcsq_u16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vcmpcsq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vcaddq_rot90_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vcaddq_rot270_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vbicq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vandq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddvq_p_u16 (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq (uint32_t __a, uint16x8_t __b)
+{
+ return __arm_vaddvaq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (uint16x8_t __a, uint16_t __b)
+{
+ return __arm_vaddq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vabdq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r (uint16x8_t __a, int32_t __b)
+{
+ return __arm_vshlq_r_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (uint16x8_t __a, int16x8_t __b)
+{
+ return __arm_vrshlq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (uint16x8_t __a, int32_t __b)
+{
+ return __arm_vrshlq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq (uint16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqshlq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r (uint16x8_t __a, int32_t __b)
+{
+ return __arm_vqshlq_r_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (uint16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrshlq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (uint16x8_t __a, int32_t __b)
+{
+ return __arm_vqrshlq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq (uint16_t __a, int16x8_t __b)
+{
+ return __arm_vminavq_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq (uint16x8_t __a, int16x8_t __b)
+{
+ return __arm_vminaq_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq (uint16_t __a, int16x8_t __b)
+{
+ return __arm_vmaxavq_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq (uint16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmaxaq_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (uint16x8_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n (uint16x8_t __a, const int __imm)
+{
+ return __arm_vshlq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq (uint16x8_t __a, const int __imm)
+{
+ return __arm_vrshrq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n (uint16x8_t __a, const int __imm)
+{
+ return __arm_vqshlq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vcmpneq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcmpltq_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vcmpltq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcmpleq_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vcmpleq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcmpgtq_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vcmpgtq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcmpgeq_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vcmpgeq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcmpeqq_s16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vcmpeqq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq (int16x8_t __a, const int __imm)
+{
+ return __arm_vqshluq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddvq_p_s16 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vsubq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vsubq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r (int16x8_t __a, int32_t __b)
+{
+ return __arm_vshlq_r_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vrshlq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (int16x8_t __a, int32_t __b)
+{
+ return __arm_vrshlq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vrmulhq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vrhaddq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqsubq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vqsubq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqshlq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r (int16x8_t __a, int32_t __b)
+{
+ return __arm_vqshlq_r_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrshlq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (int16x8_t __a, int32_t __b)
+{
+ return __arm_vqrshlq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrdmulhq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vqrdmulhq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmulhq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vqdmulhq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqaddq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vqaddq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vorrq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vornq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmulq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vmulq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmulltq_int_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmullbq_int_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmulhq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmlsdavxq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmlsdavq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmladavxq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmladavq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq (int16_t __a, int16x8_t __b)
+{
+ return __arm_vminvq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vminq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq (int16_t __a, int16x8_t __b)
+{
+ return __arm_vmaxvq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmaxq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vhsubq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vhsubq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90 (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vhcaddq_rot90_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270 (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vhcaddq_rot270_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vhaddq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vhaddq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_veorq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcaddq_rot90_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vcaddq_rot270_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (int16x8_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vbicq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vandq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq (int32_t __a, int16x8_t __b)
+{
+ return __arm_vaddvaq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vaddq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vabdq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n (int16x8_t __a, const int __imm)
+{
+ return __arm_vshlq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq (int16x8_t __a, const int __imm)
+{
+ return __arm_vrshrq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n (int16x8_t __a, const int __imm)
+{
+ return __arm_vqshlq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vsubq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vsubq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vrmulhq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vrhaddq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vqsubq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vqsubq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vqaddq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vqaddq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vorrq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vornq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmulq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vmulq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmulltq_int_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmullbq_int_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmulhq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmladavq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq (uint32_t __a, uint32x4_t __b)
+{
+ return __arm_vminvq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vminq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq (uint32_t __a, uint32x4_t __b)
+{
+ return __arm_vmaxvq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmaxq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vhsubq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vhsubq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vhaddq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vhaddq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_veorq_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vcmpneq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vcmphiq_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vcmphiq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vcmpeqq_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vcmpeqq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vcmpcsq_u32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vcmpcsq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vcaddq_rot90_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vcaddq_rot270_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vbicq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vandq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddvq_p_u32 (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq (uint32_t __a, uint32x4_t __b)
+{
+ return __arm_vaddvaq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (uint32x4_t __a, uint32_t __b)
+{
+ return __arm_vaddq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vabdq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r (uint32x4_t __a, int32_t __b)
+{
+ return __arm_vshlq_r_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (uint32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrshlq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (uint32x4_t __a, int32_t __b)
+{
+ return __arm_vrshlq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq (uint32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqshlq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r (uint32x4_t __a, int32_t __b)
+{
+ return __arm_vqshlq_r_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (uint32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrshlq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (uint32x4_t __a, int32_t __b)
+{
+ return __arm_vqrshlq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq (uint32_t __a, int32x4_t __b)
+{
+ return __arm_vminavq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq (uint32x4_t __a, int32x4_t __b)
+{
+ return __arm_vminaq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq (uint32_t __a, int32x4_t __b)
+{
+ return __arm_vmaxavq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq (uint32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmaxaq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (uint32x4_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n (uint32x4_t __a, const int __imm)
+{
+ return __arm_vshlq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq (uint32x4_t __a, const int __imm)
+{
+ return __arm_vrshrq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n (uint32x4_t __a, const int __imm)
+{
+ return __arm_vqshlq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vcmpneq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcmpltq_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vcmpltq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcmpleq_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vcmpleq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcmpgtq_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vcmpgtq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcmpgeq_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vcmpgeq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcmpeqq_s32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vcmpeqq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq (int32x4_t __a, const int __imm)
+{
+ return __arm_vqshluq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vaddvq_p_s32 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vsubq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vsubq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r (int32x4_t __a, int32_t __b)
+{
+ return __arm_vshlq_r_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrshlq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vrshlq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrmulhq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrhaddq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqsubq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqsubq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqshlq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqshlq_r_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrshlq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqrshlq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrdmulhq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqrdmulhq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmulhq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqdmulhq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqaddq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqaddq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vorrq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vornq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmulq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vmulq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmulltq_int_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmullbq_int_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmulhq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmlsdavxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmlsdavq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmladavxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmladavq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq (int32_t __a, int32x4_t __b)
+{
+ return __arm_vminvq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vminq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq (int32_t __a, int32x4_t __b)
+{
+ return __arm_vmaxvq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmaxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vhsubq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vhsubq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90 (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vhcaddq_rot90_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270 (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vhcaddq_rot270_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vhaddq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vhaddq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_veorq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcaddq_rot90_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vcaddq_rot270_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vbicq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vandq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq (int32_t __a, int32x4_t __b)
+{
+ return __arm_vaddvaq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vaddq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vabdq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n (int32x4_t __a, const int __imm)
+{
+ return __arm_vshlq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq (int32x4_t __a, const int __imm)
+{
+ return __arm_vrshrq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n (int32x4_t __a, const int __imm)
+{
+ return __arm_vqshlq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq (uint8x16_t __a, uint16x8_t __b)
+{
+ return __arm_vqmovntq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq (uint8x16_t __a, uint16x8_t __b)
+{
+ return __arm_vqmovnbq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmulltq_poly_p8 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vmullbq_poly_p8 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq (uint8x16_t __a, uint16x8_t __b)
+{
+ return __arm_vmovntq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq (uint8x16_t __a, uint16x8_t __b)
+{
+ return __arm_vmovnbq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmlaldavq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq (uint8x16_t __a, int16x8_t __b)
+{
+ return __arm_vqmovuntq_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq (uint8x16_t __a, int16x8_t __b)
+{
+ return __arm_vqmovunbq_s16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq (uint8x16_t __a, const int __imm)
+{
+ return __arm_vshlltq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq (uint8x16_t __a, const int __imm)
+{
+ return __arm_vshllbq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (uint16x8_t __a, const int __imm)
+{
+ return __arm_vorrq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (uint16x8_t __a, const int __imm)
+{
+ return __arm_vbicq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq (int8x16_t __a, int16x8_t __b)
+{
+ return __arm_vqmovntq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq (int8x16_t __a, int16x8_t __b)
+{
+ return __arm_vqmovnbq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmulltq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vqdmulltq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmullbq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq (int16x8_t __a, int16_t __b)
+{
+ return __arm_vqdmullbq_n_s16 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq (int8x16_t __a, int16x8_t __b)
+{
+ return __arm_vmovntq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq (int8x16_t __a, int16x8_t __b)
+{
+ return __arm_vmovnbq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmlsldavxq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmlsldavq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmlaldavxq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vmlaldavq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq (int8x16_t __a, const int __imm)
+{
+ return __arm_vshlltq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq (int8x16_t __a, const int __imm)
+{
+ return __arm_vshllbq_n_s8 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (int16x8_t __a, const int __imm)
+{
+ return __arm_vorrq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (int16x8_t __a, const int __imm)
+{
+ return __arm_vbicq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq (uint16x8_t __a, uint32x4_t __b)
+{
+ return __arm_vqmovntq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq (uint16x8_t __a, uint32x4_t __b)
+{
+ return __arm_vqmovnbq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmulltq_poly_p16 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vmullbq_poly_p16 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq (uint16x8_t __a, uint32x4_t __b)
+{
+ return __arm_vmovntq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq (uint16x8_t __a, uint32x4_t __b)
+{
+ return __arm_vmovnbq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vmlaldavq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq (uint16x8_t __a, int32x4_t __b)
+{
+ return __arm_vqmovuntq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq (uint16x8_t __a, int32x4_t __b)
+{
+ return __arm_vqmovunbq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq (uint16x8_t __a, const int __imm)
+{
+ return __arm_vshlltq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq (uint16x8_t __a, const int __imm)
+{
+ return __arm_vshllbq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (uint32x4_t __a, const int __imm)
+{
+ return __arm_vorrq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (uint32x4_t __a, const int __imm)
+{
+ return __arm_vbicq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq (int16x8_t __a, int32x4_t __b)
+{
+ return __arm_vqmovntq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq (int16x8_t __a, int32x4_t __b)
+{
+ return __arm_vqmovnbq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmulltq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqdmulltq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmullbq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq (int32x4_t __a, int32_t __b)
+{
+ return __arm_vqdmullbq_n_s32 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq (int16x8_t __a, int32x4_t __b)
+{
+ return __arm_vmovntq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq (int16x8_t __a, int32x4_t __b)
+{
+ return __arm_vmovnbq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmlsldavxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmlsldavq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmlaldavxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vmlaldavq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq (int16x8_t __a, const int __imm)
+{
+ return __arm_vshlltq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq (int16x8_t __a, const int __imm)
+{
+ return __arm_vshllbq_n_s16 (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (int32x4_t __a, const int __imm)
+{
+ return __arm_vorrq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (int32x4_t __a, const int __imm)
+{
+ return __arm_vbicq_n_s32 (__a, __imm);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vrmlaldavhq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq (uint64_t __a, uint32x4_t __b)
+{
+ return __arm_vaddlvaq_u32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrmlsldavhxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrmlsldavhq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhxq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrmlaldavhxq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vrmlaldavhq_s32 (__a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq (int64_t __a, int32x4_t __b)
+{
+ return __arm_vaddlvaq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq (uint32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __arm_vabavq_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq (uint32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vabavq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq (uint32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vabavq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return __arm_vabavq_u8 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __arm_vabavq_u16 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __arm_vabavq_u32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_n_s32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqrshrnbq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vqrshrnbq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqrshrnbq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vqrshrnbq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqrshrunbq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqrshrunbq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vrmlaldavhaq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __arm_vrmlaldavhaq_u32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq (int8x16_t __a, uint32_t * __b, const int __imm)
+{
+ return __arm_vshlcq_s8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq (uint8x16_t __a, uint32_t * __b, const int __imm)
+{
+ return __arm_vshlcq_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq (int16x8_t __a, uint32_t * __b, const int __imm)
+{
+ return __arm_vshlcq_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq (uint16x8_t __a, uint32_t * __b, const int __imm)
+{
+ return __arm_vshlcq_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq (int32x4_t __a, uint32_t * __b, const int __imm)
+{
+ return __arm_vshlcq_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq (uint32x4_t __a, uint32_t * __b, const int __imm)
+{
+ return __arm_vshlcq_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
+{
+ return __arm_vmlasq_n_u8 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq (uint8x16_t __a, uint8x16_t __b, uint8_t __c)
+{
+ return __arm_vmlaq_n_u8 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavq_p_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq (uint32_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return __arm_vmladavaq_u8 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminvq_p_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p (uint8_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxvq_p_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (uint8x16_t __inactive, uint8_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmphiq_m_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmphiq_m_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpcsq_m_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpcsq_m_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p (uint32_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddvaq_p_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq (uint8x16_t __a, uint8x16_t __b, const int __imm)
+{
+ return __arm_vsriq_n_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq (uint8x16_t __a, uint8x16_t __b, const int __imm)
+{
+ return __arm_vsliq_n_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_r_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_r_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminavq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminaq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_p (uint8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxavq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_m (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxaq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_r_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_r_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vqnegq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vqabsq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsdavxq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsdavq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavxq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminvq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p (int8_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxvq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (int8x16_t __inactive, int8_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vclsq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p (int32_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddvaq_p_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrdmlsdhxq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrdmlsdhq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __arm_vqrdmlashq_n_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __arm_vqdmlashq_n_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __arm_vqrdmlahq_n_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrdmladhxq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqrdmladhq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqdmlsdhxq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqdmlsdhq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __arm_vqdmlahq_n_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqdmladhxq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq (int8x16_t __inactive, int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vqdmladhq_s8 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __arm_vmlsdavaxq_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __arm_vmlsdavaq_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __arm_vmlasq_n_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq (int8x16_t __a, int8x16_t __b, int8_t __c)
+{
+ return __arm_vmlaq_n_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __arm_vmladavaxq_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq (int32_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return __arm_vmladavaq_s8 (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq (int8x16_t __a, int8x16_t __b, const int __imm)
+{
+ return __arm_vsriq_n_s8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq (int8x16_t __a, int8x16_t __b, const int __imm)
+{
+ return __arm_vsliq_n_s8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return __arm_vmlasq_n_u16 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return __arm_vmlaq_n_u16 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavq_p_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq (uint32_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __arm_vmladavaq_u16 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminvq_p_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p (uint16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxvq_p_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (uint16x8_t __inactive, uint16_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmphiq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmphiq_m_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpcsq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpcsq_m_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_m_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p (uint32_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddvaq_p_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq (uint16x8_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vsriq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq (uint16x8_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vsliq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_r_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_r_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminavq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminaq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_p (uint16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxavq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_m (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxaq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_r_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_r_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vqnegq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vqabsq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsdavxq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsdavq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavxq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminvq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p (int16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxvq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (int16x8_t __inactive, int16_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vclsq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p (int32_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddvaq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrdmlsdhxq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrdmlsdhq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __arm_vqrdmlashq_n_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __arm_vqdmlashq_n_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __arm_vqrdmlahq_n_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrdmladhxq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqrdmladhq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmlsdhxq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmlsdhq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __arm_vqdmlahq_n_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmladhxq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq (int16x8_t __inactive, int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vqdmladhq_s16 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmlsdavaxq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmlsdavaq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __arm_vmlasq_n_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return __arm_vmlaq_n_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmladavaxq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq (int32_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmladavaq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq (int16x8_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vsriq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq (int16x8_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vsliq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_u32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_u32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return __arm_vmlasq_n_u32 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return __arm_vmlaq_n_u32 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq (uint32_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __arm_vmladavaq_u32 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminvq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxvq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (uint32x4_t __inactive, uint32_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_u32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmphiq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmphiq_m_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpcsq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_m (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpcsq_m_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m (uint32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_m_u32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p (uint32_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddvaq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq (uint32x4_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vsriq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq (uint32x4_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vsliq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_r_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_r_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminavq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminaq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_p (uint32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxavq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_m (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxaq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_r_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_r (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_r_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m_n (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vqnegq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vqabsq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsdavxq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsdavq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavxq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmladavq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminvq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxvq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (int32x4_t __inactive, int32_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vclsq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_p (int32_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddvaq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m (int32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_m_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrdmlsdhxq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrdmlsdhq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __arm_vqrdmlashq_n_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __arm_vqdmlashq_n_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __arm_vqrdmlahq_n_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrdmladhxq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqrdmladhq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmlsdhxq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmlsdhq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __arm_vqdmlahq_n_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmladhxq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq (int32x4_t __inactive, int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vqdmladhq_s32 (__inactive, __a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmlsdavaxq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmlsdavaq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __arm_vmlasq_n_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return __arm_vmlaq_n_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmladavaxq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq (int32_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmladavaq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq (int32x4_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vsriq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq (int32x4_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vsliq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (uint64x2_t __a, uint64x2_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_u64 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (int64x2_t __a, int64x2_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_s64 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vrmlaldavhaxq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vrmlsldavhaq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vrmlsldavhaxq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq_p (int64_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddlvaq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev16q_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmlaldavhq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmlaldavhxq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmlsldavhq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmlsldavhxq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddlvaq_p (uint64_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddlvaq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev16q_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmlaldavhq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (int16x8_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_n_s16 (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqrshrntq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqshrnbq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqshrntq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vrshrnbq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vrshrntq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vshrnbq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq (int8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vshrntq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmlaldavaq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmlaldavaxq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmlsldavaq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq (int64_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return __arm_vmlsldavaxq_s16 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlaldavq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlaldavxq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsldavq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq_p (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsldavxq_p_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m (int16x8_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovnbq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovntq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovnbq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m (int8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovntq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m (int8x16_t __inactive, int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_m_s8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (uint16x8_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_n_u16 (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqrshruntq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqshrunbq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq (uint8x16_t __a, int16x8_t __b, const int __imm)
+{
+ return __arm_vqshruntq_n_s16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovunbq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq_m (uint8x16_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovuntq_m_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vqrshrntq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vqshrnbq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vqshrntq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vrshrnbq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vrshrntq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vshrnbq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq (uint8x16_t __a, uint16x8_t __b, const int __imm)
+{
+ return __arm_vshrntq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq (uint64_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return __arm_vmlaldavaq_u16 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlaldavq_p_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m (uint16x8_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovnbq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovntq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovnbq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m (uint8x16_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovntq_m_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m (uint8x16_t __inactive, uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_m_u8 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (int32x4_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_n_s32 (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_n_s32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqrshrntq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqshrnbq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqshrntq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vrshrnbq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vrshrntq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vshrnbq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq (int16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vshrntq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmlaldavaq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmlaldavaxq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmlsldavaq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq (int64_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return __arm_vmlsldavaxq_s32 (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlaldavq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlaldavxq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsldavq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavxq_p (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlsldavxq_p_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m (int32x4_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovnbq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovntq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovnbq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m (int16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovntq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m (int16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_m_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_m (uint32x4_t __inactive, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vmvnq_m_n_u32 (__inactive, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqrshruntq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqshrunbq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq (uint16x8_t __a, int32x4_t __b, const int __imm)
+{
+ return __arm_vqshruntq_n_s32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovunbq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovunbq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovuntq_m (uint16x8_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovuntq_m_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vqrshrntq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vqshrnbq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vqshrntq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vrshrnbq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vrshrntq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vshrnbq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq (uint16x8_t __a, uint32x4_t __b, const int __imm)
+{
+ return __arm_vshrntq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq (uint64_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return __arm_vmlaldavaq_u32 (__a, __b, __c);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavq_p (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmlaldavq_p_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_m_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_m (uint32x4_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_m_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovnbq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmovntq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovnbq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovnbq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqmovntq_m (uint16x8_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqmovntq_m_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m (uint16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_m_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsriq_m_n_s8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m (uint8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshluq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p (uint32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vabavq_p_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsriq_m_n_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vabavq_p_u8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsriq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m (uint16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshluq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p (uint32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vabavq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsriq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vabavq_p_u16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsriq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_m (uint32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshluq_m_n_s32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p (uint32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vabavq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsriq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsriq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabavq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vabavq_p_u32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (int8x16_t __inactive, int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (int16x8_t __inactive, int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (uint8x16_t __inactive, uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (uint32x4_t __inactive, uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (uint16x8_t __inactive, uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot270_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot270_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot270_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot90_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot90_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot90_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaq_p_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p (uint32_t __a, uint8x16_t __b, uint8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaq_p_u8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p (uint32_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaq_p_u32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaq_p (uint32_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaq_p_u16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaxq_p_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaxq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmladavaxq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaq_m_n_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaq_m_n_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaq_m_n_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaq_m_n_u8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaq_m_n_u32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaq_m_n_u16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlasq_m_n_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlasq_m_n_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlasq_m_n_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m (uint8x16_t __a, uint8x16_t __b, uint8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlasq_m_n_u8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m (uint32x4_t __a, uint32x4_t __b, uint32_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlasq_m_n_u32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlasq_m (uint16x8_t __a, uint16x8_t __b, uint16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlasq_m_n_u16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsdavaq_p_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsdavaq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsdavaq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_p (int32_t __a, int8x16_t __b, int8x16_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsdavaxq_p_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_p (int32_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsdavaxq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavaxq_p (int32_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsdavaxq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m (int16x8_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m (uint64x2_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqaddq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmladhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmladhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmladhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmladhxq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmladhxq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmladhxq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __arm_vqdmlahq_m_n_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __arm_vqdmlahq_m_n_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __arm_vqdmlahq_m_n_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmlsdhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmlsdhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmlsdhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmlsdhxq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmlsdhxq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmlsdhxq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulhq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulhq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulhq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmladhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmladhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmladhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmladhxq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmladhxq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmladhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmladhxq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __arm_vqrdmlahq_m_n_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __arm_vqrdmlahq_m_n_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlahq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __arm_vqrdmlahq_m_n_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __arm_vqrdmlashq_m_n_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __arm_vqrdmlashq_m_n_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __arm_vqrdmlashq_m_n_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_m (int8x16_t __a, int8x16_t __b, int8_t __c, mve_pred16_t __p)
+{
+ return __arm_vqdmlashq_m_n_s8 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_m (int16x8_t __a, int16x8_t __b, int16_t __c, mve_pred16_t __p)
+{
+ return __arm_vqdmlashq_m_n_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmlashq_m (int32x4_t __a, int32x4_t __b, int32_t __c, mve_pred16_t __p)
+{
+ return __arm_vqdmlashq_m_n_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmlsdhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmlsdhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmlsdhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmlsdhxq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmlsdhxq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmlsdhxq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmlsdhxq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmulhq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmulhq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmulhq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmulhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmulhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrdmulhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqrshlq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_n_s32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_n_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqshlq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqsubq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m (uint8x16_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_m (uint16x8_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m (int8x16_t __inactive, int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m (int16x8_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m (uint8x16_t __inactive, uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m (uint32x4_t __inactive, uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_m (uint16x8_t __inactive, uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_m_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_m_n_s32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_m_n_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_n_s32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_n_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_m_n (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m (int8x16_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m (int32x4_t __inactive, int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_m_n_s32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m (int16x8_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m (uint8x16_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m (uint32x4_t __inactive, uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_m_n_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_m (uint16x8_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m (int8x16_t __a, int8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsliq_m_n_s8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m (int32x4_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsliq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m (int16x8_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsliq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m (uint8x16_t __a, uint8x16_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsliq_m_n_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m (uint32x4_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsliq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsliq_m (uint16x8_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vsliq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (int8x16_t __inactive, int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_s8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (int32x4_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (int16x8_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (uint8x16_t __inactive, uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_u8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (uint32x4_t __inactive, uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_u32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (uint16x8_t __inactive, uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_u16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaldavaq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaldavaq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaldavaq_p_u32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaq_p (uint64_t __a, uint16x8_t __b, uint16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaldavaq_p_u16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaldavaxq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlaldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlaldavaxq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsldavaq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsldavaq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsldavaxq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsldavaxq_p (int64_t __a, int16x8_t __b, int16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vmlsldavaxq_p_s16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_poly_m_p8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_poly_m_p16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_m (uint16x8_t __inactive, uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_poly_m_p8 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_m (uint32x4_t __inactive, uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_poly_m_p16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmullbq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmullbq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmullbq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmullbq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmullbq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulltq_m_n_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulltq_m_n_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m (int64x2_t __inactive, int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulltq_m_s32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulltq_m (int32x4_t __inactive, int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vqdmulltq_m_s16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrnbq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrnbq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrnbq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrnbq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrntq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrntq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrntq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrntq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrunbq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshrunbq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshruntq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqrshruntq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrnbq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrnbq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrnbq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrnbq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrntq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrntq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrntq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrntq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrunbq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshrunbq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshrunbq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq_m (uint16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshruntq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshruntq_m (uint8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vqshruntq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vrmlaldavhaq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaq_p (uint64_t __a, uint32x4_t __b, uint32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vrmlaldavhaq_p_u32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlaldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vrmlaldavhaxq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vrmlsldavhaq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmlsldavhaxq_p (int64_t __a, int32x4_t __b, int32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vrmlsldavhaxq_p_s32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrnbq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrnbq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrnbq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrnbq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrntq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrntq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrntq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrntq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m (int16x8_t __inactive, int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_m_n_s8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m (int32x4_t __inactive, int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_m_n_s16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m (uint16x8_t __inactive, uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_m (uint32x4_t __inactive, uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrnbq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrnbq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrnbq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrnbq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrnbq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m (int16x8_t __a, int32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrntq_m_n_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m (int8x16_t __a, int16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrntq_m_n_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m (uint16x8_t __a, uint32x4_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrntq_m_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrntq_m (uint8x16_t __a, uint16x8_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrntq_m_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset (int8_t * __base, uint8x16_t __offset, int8x16_t __value)
+{
+ __arm_vstrbq_scatter_offset_s8 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset (int8_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __arm_vstrbq_scatter_offset_s32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset (int8_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __arm_vstrbq_scatter_offset_s16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value)
+{
+ __arm_vstrbq_scatter_offset_u8 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __arm_vstrbq_scatter_offset_u32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __arm_vstrbq_scatter_offset_u16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq (int8_t * __addr, int8x16_t __value)
+{
+ __arm_vstrbq_s8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq (int8_t * __addr, int32x4_t __value)
+{
+ __arm_vstrbq_s32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq (int8_t * __addr, int16x8_t __value)
+{
+ __arm_vstrbq_s16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq (uint8_t * __addr, uint8x16_t __value)
+{
+ __arm_vstrbq_u8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq (uint8_t * __addr, uint32x4_t __value)
+{
+ __arm_vstrbq_u32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq (uint8_t * __addr, uint16x8_t __value)
+{
+ __arm_vstrbq_u16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, int32x4_t __value)
+{
+ __arm_vstrwq_scatter_base_s32 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, uint32x4_t __value)
+{
+ __arm_vstrwq_scatter_base_u32 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset (uint8_t const * __base, uint8x16_t __offset)
+{
+ return __arm_vldrbq_gather_offset_u8 (__base, __offset);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset (int8_t const * __base, uint8x16_t __offset)
+{
+ return __arm_vldrbq_gather_offset_s8 (__base, __offset);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset (uint8_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrbq_gather_offset_u16 (__base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset (int8_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrbq_gather_offset_s16 (__base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset (uint8_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrbq_gather_offset_u32 (__base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset (int8_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrbq_gather_offset_s32 (__base, __offset);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_p_s8 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p (int8_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_p_s32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p (int8_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_p_s16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_p_u8 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p (uint8_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_p_u32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_p (uint8_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_p_u16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p (int8_t * __base, uint8x16_t __offset, int8x16_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_scatter_offset_p_s8 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p (int8_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_scatter_offset_p_s32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p (int8_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_scatter_offset_p_s16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p (uint8_t * __base, uint8x16_t __offset, uint8x16_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_scatter_offset_p_u8 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p (uint8_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_scatter_offset_p_u32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrbq_scatter_offset_p (uint8_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrbq_scatter_offset_p_u16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_base_p_s32 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_base_p_u32 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z (int8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrbq_gather_offset_z_s8 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z (int8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrbq_gather_offset_z_s32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z (int8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrbq_gather_offset_z_s16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z (uint8_t const * __base, uint8x16_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrbq_gather_offset_z_u8 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z (uint8_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrbq_gather_offset_z_u32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrbq_gather_offset_z (uint8_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrbq_gather_offset_z_u16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (int8_t const * __base)
+{
+ return __arm_vld1q_s8 (__base);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (int32_t const * __base)
+{
+ return __arm_vld1q_s32 (__base);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (int16_t const * __base)
+{
+ return __arm_vld1q_s16 (__base);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (uint8_t const * __base)
+{
+ return __arm_vld1q_u8 (__base);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (uint32_t const * __base)
+{
+ return __arm_vld1q_u32 (__base);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (uint16_t const * __base)
+{
+ return __arm_vld1q_u16 (__base);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset (int16_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrhq_gather_offset_s32 (__base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset (int16_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrhq_gather_offset_s16 (__base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset (uint16_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrhq_gather_offset_u32 (__base, __offset);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset (uint16_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrhq_gather_offset_u16 (__base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_offset_z_s32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_offset_z_s16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_offset_z_u32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_offset_z_u16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrhq_gather_shifted_offset_s32 (__base, __offset);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset (int16_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrhq_gather_shifted_offset_s16 (__base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrhq_gather_shifted_offset_u32 (__base, __offset);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset (uint16_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrhq_gather_shifted_offset_u16 (__base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_shifted_offset_z_s32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z (int16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_shifted_offset_z_s16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_shifted_offset_z_u32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z (uint16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_shifted_offset_z_u16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset (int64_t const * __base, uint64x2_t __offset)
+{
+ return __arm_vldrdq_gather_offset_s64 (__base, __offset);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset (uint64_t const * __base, uint64x2_t __offset)
+{
+ return __arm_vldrdq_gather_offset_u64 (__base, __offset);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrdq_gather_offset_z_s64 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrdq_gather_offset_z_u64 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset (int64_t const * __base, uint64x2_t __offset)
+{
+ return __arm_vldrdq_gather_shifted_offset_s64 (__base, __offset);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset (uint64_t const * __base, uint64x2_t __offset)
+{
+ return __arm_vldrdq_gather_shifted_offset_u64 (__base, __offset);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset_z (int64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrdq_gather_shifted_offset_z_s64 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrdq_gather_shifted_offset_z (uint64_t const * __base, uint64x2_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrdq_gather_shifted_offset_z_u64 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset (int32_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrwq_gather_offset_s32 (__base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset (uint32_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrwq_gather_offset_u32 (__base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrwq_gather_offset_z_s32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrwq_gather_offset_z_u32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset (int32_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrwq_gather_shifted_offset_s32 (__base, __offset);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset (uint32_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrwq_gather_shifted_offset_u32 (__base, __offset);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_z (int32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrwq_gather_shifted_offset_z_s32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_z (uint32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrwq_gather_shifted_offset_z_u32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (int8_t * __addr, int8x16_t __value)
+{
+ __arm_vst1q_s8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (int32_t * __addr, int32x4_t __value)
+{
+ __arm_vst1q_s32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (int16_t * __addr, int16x8_t __value)
+{
+ __arm_vst1q_s16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (uint8_t * __addr, uint8x16_t __value)
+{
+ __arm_vst1q_u8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (uint32_t * __addr, uint32x4_t __value)
+{
+ __arm_vst1q_u32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (uint16_t * __addr, uint16x8_t __value)
+{
+ __arm_vst1q_u16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __arm_vstrhq_scatter_offset_s32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __arm_vstrhq_scatter_offset_s16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __arm_vstrhq_scatter_offset_u32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __arm_vstrhq_scatter_offset_u16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_offset_p_s32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_offset_p_s16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_offset_p_u32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_offset_p_u16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __arm_vstrhq_scatter_shifted_offset_s32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset (int16_t * __base, uint16x8_t __offset, int16x8_t __value)
+{
+ __arm_vstrhq_scatter_shifted_offset_s16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __arm_vstrhq_scatter_shifted_offset_u32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value)
+{
+ __arm_vstrhq_scatter_shifted_offset_u16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p (int16_t * __base, uint16x8_t __offset, int16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_shifted_offset_p_s16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p (uint16_t * __base, uint16x8_t __offset, uint16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_shifted_offset_p_u16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq (int16_t * __addr, int32x4_t __value)
+{
+ __arm_vstrhq_s32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq (int16_t * __addr, int16x8_t __value)
+{
+ __arm_vstrhq_s16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq (uint16_t * __addr, uint32x4_t __value)
+{
+ __arm_vstrhq_u32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq (uint16_t * __addr, uint16x8_t __value)
+{
+ __arm_vstrhq_u16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p (int16_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_p_s32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_p_s16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p (uint16_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_p_u32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_p_u16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq (int32_t * __addr, int32x4_t __value)
+{
+ __arm_vstrwq_s32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq (uint32_t * __addr, uint32x4_t __value)
+{
+ __arm_vstrwq_u32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_p_s32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_p_u32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_base_p_s64 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_p (uint64x2_t __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_base_p_u64 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, int64x2_t __value)
+{
+ __arm_vstrdq_scatter_base_s64 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base (uint64x2_t __addr, const int __offset, uint64x2_t __value)
+{
+ __arm_vstrdq_scatter_base_u64 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_offset_p_s64 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_offset_p_u64 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
+{
+ __arm_vstrdq_scatter_offset_s64 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
+{
+ __arm_vstrdq_scatter_offset_u64 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset_p (int64_t * __base, uint64x2_t __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_shifted_offset_p_s64 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset_p (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_shifted_offset_p_u64 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset (int64_t * __base, uint64x2_t __offset, int64x2_t __value)
+{
+ __arm_vstrdq_scatter_shifted_offset_s64 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_shifted_offset (uint64_t * __base, uint64x2_t __offset, uint64x2_t __value)
+{
+ __arm_vstrdq_scatter_shifted_offset_u64 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_offset_p_s32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_offset_p_u32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __arm_vstrwq_scatter_offset_s32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __arm_vstrwq_scatter_offset_u32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_p (int32_t * __base, uint32x4_t __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_shifted_offset_p_s32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_p (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_shifted_offset_p_u32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset (int32_t * __base, uint32x4_t __offset, int32x4_t __value)
+{
+ __arm_vstrwq_scatter_shifted_offset_s32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset (uint32_t * __base, uint32x4_t __offset, uint32x4_t __value)
+{
+ __arm_vstrwq_scatter_shifted_offset_u32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (int8x16_t __a, int8x16_t __b)
+{
+ return __arm_vaddq_s8 (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (int16x8_t __a, int16x8_t __b)
+{
+ return __arm_vaddq_s16 (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (int32x4_t __a, int32x4_t __b)
+{
+ return __arm_vaddq_s32 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (uint8x16_t __a, uint8x16_t __b)
+{
+ return __arm_vaddq_u8 (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (uint16x8_t __a, uint16x8_t __b)
+{
+ return __arm_vaddq_u16 (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (uint32x4_t __a, uint32x4_t __b)
+{
+ return __arm_vaddq_u32 (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_m_n_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_m_wb_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_m_wb_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_m_wb_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_u8 (uint32_t __a, const int __imm)
+{
+ return __arm_vddupq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_u32 (uint32_t __a, const int __imm)
+{
+ return __arm_vddupq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_u16 (uint32_t __a, const int __imm)
+{
+ return __arm_vddupq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ return __arm_vdwdupq_n_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ return __arm_vdwdupq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ return __arm_vdwdupq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ return __arm_vdwdupq_wb_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ return __arm_vdwdupq_wb_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ return __arm_vdwdupq_wb_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m (uint8x16_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_m_n_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m (uint32x4_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_m_n_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m (uint16x8_t __inactive, uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_m_n_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_u8 (uint32_t __a, const int __imm)
+{
+ return __arm_vidupq_n_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m (uint8x16_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_m_wb_u8 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m (uint16x8_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_m_wb_u16 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_m (uint32x4_t __inactive, uint32_t * __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_m_wb_u32 (__inactive, __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_u32 (uint32_t __a, const int __imm)
+{
+ return __arm_vidupq_n_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_u16 (uint32_t __a, const int __imm)
+{
+ return __arm_vidupq_n_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_u8 (uint32_t * __a, const int __imm)
+{
+ return __arm_vidupq_wb_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_u16 (uint32_t * __a, const int __imm)
+{
+ return __arm_vidupq_wb_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_u32 (uint32_t * __a, const int __imm)
+{
+ return __arm_vidupq_wb_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_u8 (uint32_t * __a, const int __imm)
+{
+ return __arm_vddupq_wb_u8 (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_u16 (uint32_t * __a, const int __imm)
+{
+ return __arm_vddupq_wb_u16 (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_u32 (uint32_t * __a, const int __imm)
+{
+ return __arm_vddupq_wb_u32 (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m (uint8x16_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_m_n_u8 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m (uint32x4_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_m_n_u32 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m (uint16x8_t __inactive, uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_m_n_u16 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m (uint8x16_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_m_wb_u8 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m (uint32x4_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_m_wb_u32 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_m (uint16x8_t __inactive, uint32_t * __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_m_wb_u16 (__inactive, __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_u8 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ return __arm_viwdupq_n_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_u32 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ return __arm_viwdupq_n_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_u16 (uint32_t __a, uint32_t __b, const int __imm)
+{
+ return __arm_viwdupq_n_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_u8 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ return __arm_viwdupq_wb_u8 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_u32 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ return __arm_viwdupq_wb_u32 (__a, __b, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_u16 (uint32_t * __a, uint32_t __b, const int __imm)
+{
+ return __arm_viwdupq_wb_u16 (__a, __b, __imm);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, int64x2_t __value)
+{
+ __arm_vstrdq_scatter_base_wb_s64 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb (uint64x2_t * __addr, const int __offset, uint64x2_t __value)
+{
+ __arm_vstrdq_scatter_base_wb_u64 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, int64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_base_wb_p_s64 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrdq_scatter_base_wb_p (uint64x2_t * __addr, const int __offset, uint64x2_t __value, mve_pred16_t __p)
+{
+ __arm_vstrdq_scatter_base_wb_p_u64 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_base_wb_p_s32 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_base_wb_p_u32 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, int32x4_t __value)
+{
+ __arm_vstrwq_scatter_base_wb_s32 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, uint32x4_t __value)
+{
+ __arm_vstrwq_scatter_base_wb_u32 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_x_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_x_wb_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_x_wb_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vddupq_x_wb_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_x_n_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_x_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_x_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_x_wb_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_x_wb_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vdwdupq_x_wb_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_x_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_x_wb_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_x_wb_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vidupq_x_wb_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_x_n_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_x_n_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_x_n_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_x_wb_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_x_wb_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_viwdupq_x_wb_u32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_x_s32 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vclsq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vclsq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vclsq_x_s32 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_x_s32 (__a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_x_u16 (__a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vclzq_x_u32 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_x_s32 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulhq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_poly_x_p8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_poly_x_p16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmullbq_int_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_poly_x_p8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_poly_x_p16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulltq_int_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhaddq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot90_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot90_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot90_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot270_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot270_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhcaddq_rot270_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vhsubq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrhaddq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrmulhq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovlbq_x_u16 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmovltq_x_u16 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_x_s32 (__a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_x_u16 (__a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vmvnq_x_u32 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev16q_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev16q_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_x_u16 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (int8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_s8 (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_s16 (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_s32 (__a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_u8 (__a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_u16 (__a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_u32 (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vrshlq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_x_n_s8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_x_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshllbq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_x_n_s8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_x_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlltq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_s8 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_s16 (__a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_s32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_u8 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_u16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_u32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_n_s8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_n_s32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlq_x_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_x_n_s8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_x_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_x_n_s32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vrshrq_x_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_x_n_s8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_x_n_s16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_x_n_s32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_x_n_u8 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_x_n_u16 (__a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshrq_x_n_u32 (__a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
+{
+ return __arm_vadciq_s32 (__a, __b, __carry_out);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
+{
+ return __arm_vadciq_u32 (__a, __b, __carry_out);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ return __arm_vadciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ return __arm_vadciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
+{
+ return __arm_vadcq_s32 (__a, __b, __carry);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
+{
+ return __arm_vadcq_u32 (__a, __b, __carry);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ return __arm_vadcq_m_s32 (__inactive, __a, __b, __carry, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vadcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ return __arm_vadcq_m_u32 (__inactive, __a, __b, __carry, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq (int32x4_t __a, int32x4_t __b, unsigned * __carry_out)
+{
+ return __arm_vsbciq_s32 (__a, __b, __carry_out);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out)
+{
+ return __arm_vsbciq_u32 (__a, __b, __carry_out);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ return __arm_vsbciq_m_s32 (__inactive, __a, __b, __carry_out, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbciq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry_out, mve_pred16_t __p)
+{
+ return __arm_vsbciq_m_u32 (__inactive, __a, __b, __carry_out, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq (int32x4_t __a, int32x4_t __b, unsigned * __carry)
+{
+ return __arm_vsbcq_s32 (__a, __b, __carry);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq (uint32x4_t __a, uint32x4_t __b, unsigned * __carry)
+{
+ return __arm_vsbcq_u32 (__a, __b, __carry);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq_m (int32x4_t __inactive, int32x4_t __a, int32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ return __arm_vsbcq_m_s32 (__inactive, __a, __b, __carry, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsbcq_m (uint32x4_t __inactive, uint32x4_t __a, uint32x4_t __b, unsigned * __carry, mve_pred16_t __p)
+{
+ return __arm_vsbcq_m_u32 (__inactive, __a, __b, __carry, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (uint8_t * __addr, uint8x16_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_u8 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (int8_t * __addr, int8x16_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_s8 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (int8_t * __addr, int8x16x2_t __value)
+{
+ __arm_vst2q_s8 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (uint8_t * __addr, uint8x16x2_t __value)
+{
+ __arm_vst2q_u8 (__addr, __value);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (uint8_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_u8 (__base, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (int8_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_s8 (__base, __p);
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (int8_t const * __addr)
+{
+ return __arm_vld2q_s8 (__addr);
+}
+
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (uint8_t const * __addr)
+{
+ return __arm_vld2q_u8 (__addr);
+}
+
+__extension__ extern __inline int8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (int8_t const * __addr)
+{
+ return __arm_vld4q_s8 (__addr);
+}
+
+__extension__ extern __inline uint8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (uint8_t const * __addr)
+{
+ return __arm_vld4q_u8 (__addr);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (uint16_t * __addr, uint16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_u16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (int16_t * __addr, int16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_s16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (int16_t * __addr, int16x8x2_t __value)
+{
+ __arm_vst2q_s16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (uint16_t * __addr, uint16x8x2_t __value)
+{
+ __arm_vst2q_u16 (__addr, __value);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (uint16_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_u16 (__base, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (int16_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_s16 (__base, __p);
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (int16_t const * __addr)
+{
+ return __arm_vld2q_s16 (__addr);
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (uint16_t const * __addr)
+{
+ return __arm_vld2q_u16 (__addr);
+}
+
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (int16_t const * __addr)
+{
+ return __arm_vld4q_s16 (__addr);
+}
+
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (uint16_t const * __addr)
+{
+ return __arm_vld4q_u16 (__addr);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (uint32_t * __addr, uint32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_u32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (int32_t * __addr, int32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_s32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (int32_t * __addr, int32x4x2_t __value)
+{
+ __arm_vst2q_s32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (uint32_t * __addr, uint32x4x2_t __value)
+{
+ __arm_vst2q_u32 (__addr, __value);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (uint32_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_u32 (__base, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (int32_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_s32 (__base, __p);
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (int32_t const * __addr)
+{
+ return __arm_vld2q_s32 (__addr);
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (uint32_t const * __addr)
+{
+ return __arm_vld2q_u32 (__addr);
+}
+
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (int32_t const * __addr)
+{
+ return __arm_vld4q_s32 (__addr);
+}
+
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (uint32_t const * __addr)
+{
+ return __arm_vld4q_u32 (__addr);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (int16_t __a, int16x8_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_s16 (__a, __b, __idx);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (int32_t __a, int32x4_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_s32 (__a, __b, __idx);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (int8_t __a, int8x16_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_s8 (__a, __b, __idx);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (int64_t __a, int64x2_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_s64 (__a, __b, __idx);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (uint8_t __a, uint8x16_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_u8 (__a, __b, __idx);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (uint16_t __a, uint16x8_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_u16 (__a, __b, __idx);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (uint32_t __a, uint32x4_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_u32 (__a, __b, __idx);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (uint64_t __a, uint64x2_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_u64 (__a, __b, __idx);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (int16x8_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_s16 (__a, __idx);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (int32x4_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_s32 (__a, __idx);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (int8x16_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_s8 (__a, __idx);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (int64x2_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_s64 (__a, __idx);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (uint8x16_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_u8 (__a, __idx);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (uint16x8_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_u16 (__a, __idx);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (uint32x4_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_u32 (__a, __idx);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (uint64x2_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_u64 (__a, __idx);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlcq_m_s8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlcq_m_u8 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlcq_m_s16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlcq_m_u16 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlcq_m_s32 (__a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+ return __arm_vshlcq_m_u32 (__a, __b, __imm, __p);
+}
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (float16_t * __addr, float16x8x4_t __value)
+{
+ __arm_vst4q_f16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst4q (float32_t * __addr, float32x4x4_t __value)
+{
+ __arm_vst4q_f32 (__addr, __value);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq (float16x8_t __a)
+{
+ return __arm_vrndxq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq (float32x4_t __a)
+{
+ return __arm_vrndxq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq (float16x8_t __a)
+{
+ return __arm_vrndq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq (float32x4_t __a)
+{
+ return __arm_vrndq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq (float16x8_t __a)
+{
+ return __arm_vrndpq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq (float32x4_t __a)
+{
+ return __arm_vrndpq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq (float16x8_t __a)
+{
+ return __arm_vrndnq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq (float32x4_t __a)
+{
+ return __arm_vrndnq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq (float16x8_t __a)
+{
+ return __arm_vrndmq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq (float32x4_t __a)
+{
+ return __arm_vrndmq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq (float16x8_t __a)
+{
+ return __arm_vrndaq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq (float32x4_t __a)
+{
+ return __arm_vrndaq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (float16x8_t __a)
+{
+ return __arm_vrev64q_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q (float32x4_t __a)
+{
+ return __arm_vrev64q_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq (float16x8_t __a)
+{
+ return __arm_vnegq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq (float32x4_t __a)
+{
+ return __arm_vnegq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (float16_t __a)
+{
+ return __arm_vdupq_n_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_n (float32_t __a)
+{
+ return __arm_vdupq_n_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq (float16x8_t __a)
+{
+ return __arm_vabsq_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq (float32x4_t __a)
+{
+ return __arm_vabsq_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q (float16x8_t __a)
+{
+ return __arm_vrev32q_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_f32 (float16x8_t __a)
+{
+ return __arm_vcvttq_f32_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_f32 (float16x8_t __a)
+{
+ return __arm_vcvtbq_f32_f16 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq (int16x8_t __a)
+{
+ return __arm_vcvtq_f16_s16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq (int32x4_t __a)
+{
+ return __arm_vcvtq_f32_s32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq (uint16x8_t __a)
+{
+ return __arm_vcvtq_f16_u16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq (uint32x4_t __a)
+{
+ return __arm_vcvtq_f32_u32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vsubq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vsubq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (float16x8_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq (float32x4_t __a, int32_t __b)
+{
+ return __arm_vbrsrq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n (int16x8_t __a, const int __imm6)
+{
+ return __arm_vcvtq_n_f16_s16 (__a, __imm6);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n (int32x4_t __a, const int __imm6)
+{
+ return __arm_vcvtq_n_f32_s32 (__a, __imm6);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n (uint16x8_t __a, const int __imm6)
+{
+ return __arm_vcvtq_n_f16_u16 (__a, __imm6);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_n (uint32x4_t __a, const int __imm6)
+{
+ return __arm_vcvtq_n_f32_u32 (__a, __imm6);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vcmpneq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmpneq_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vcmpltq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmpltq_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vcmpleq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmpleq_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vcmpgtq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmpgtq_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vcmpgeq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmpgeq_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vcmpeqq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmpeqq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vsubq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vorrq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vornq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vmulq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vmulq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq (float16_t __a, float16x8_t __b)
+{
+ return __arm_vminnmvq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vminnmq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq (float16_t __a, float16x8_t __b)
+{
+ return __arm_vminnmavq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vminnmaq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq (float16_t __a, float16x8_t __b)
+{
+ return __arm_vmaxnmvq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vmaxnmq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq (float16_t __a, float16x8_t __b)
+{
+ return __arm_vmaxnmavq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vmaxnmaq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_veorq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90 (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmulq_rot90_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270 (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmulq_rot270_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180 (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmulq_rot180_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcmulq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcaddq_rot90_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vcaddq_rot270_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vbicq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vandq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (float16x8_t __a, float16_t __b)
+{
+ return __arm_vaddq_n_f16 (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vabdq_f16 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vcmpneq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmpneq_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vcmpltq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmpltq_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vcmpleq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmpleq_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vcmpgtq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmpgtq_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vcmpgeq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmpgeq_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vcmpeqq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmpeqq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vsubq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vorrq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vornq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vmulq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vmulq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq (float32_t __a, float32x4_t __b)
+{
+ return __arm_vminnmvq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vminnmq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq (float32_t __a, float32x4_t __b)
+{
+ return __arm_vminnmavq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vminnmaq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq (float32_t __a, float32x4_t __b)
+{
+ return __arm_vmaxnmvq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vmaxnmq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq (float32_t __a, float32x4_t __b)
+{
+ return __arm_vmaxnmavq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vmaxnmaq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_veorq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90 (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmulq_rot90_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270 (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmulq_rot270_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180 (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmulq_rot180_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcmulq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90 (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcaddq_rot90_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270 (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vcaddq_rot270_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vbicq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vandq_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (float32x4_t __a, float32_t __b)
+{
+ return __arm_vaddq_n_f32 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vabdq_f32 (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtaq_m_s16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtaq_m_u16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtaq_m_s32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtaq_m_u32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (float16x8_t __inactive, int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_f16_s16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (float16x8_t __inactive, uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_f16_u16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (float32x4_t __inactive, int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_f32_s32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (float32x4_t __inactive, uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_f32_u32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcvtbq_m_f16_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtbq_m_f32_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_m (float16x8_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcvttq_m_f16_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_m (float32x4_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvttq_m_f32_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __arm_vcmlaq_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __arm_vcmlaq_rot180_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __arm_vcmlaq_rot270_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __arm_vcmlaq_rot90_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __arm_vfmaq_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq (float16x8_t __a, float16x8_t __b, float16_t __c)
+{
+ return __arm_vfmaq_n_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq (float16x8_t __a, float16x8_t __b, float16_t __c)
+{
+ return __arm_vfmasq_n_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __arm_vfmsq_f16 (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtmq_m_s16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtnq_m_s16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtpq_m_s16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (int16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_s16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (float16x8_t __inactive, float16_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmaq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmavq_p_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmvq_p_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmaq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmavq_p_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq_p (float16_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmvq_p_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndaq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndmq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndnq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndpq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_m (float16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndxq_m_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtmq_m_u16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtnq_m_u16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtpq_m_u16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (uint16x8_t __inactive, float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_u16_f16 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __arm_vcmlaq_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __arm_vcmlaq_rot180_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __arm_vcmlaq_rot270_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __arm_vcmlaq_rot90_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __arm_vfmaq_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return __arm_vfmaq_n_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return __arm_vfmasq_n_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return __arm_vfmsq_f32 (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtmq_m_s32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtnq_m_s32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtpq_m_s32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (int32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_s32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_m (float32x4_t __inactive, float32_t __a, mve_pred16_t __p)
+{
+ return __arm_vdupq_m_n_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmaq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmavq_p_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmvq_p_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmaq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmaq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmavq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmavq_p_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmvq_p (float32_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmvq_p_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vpselq (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vpselq_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndaq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndmq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndnq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndpq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_m (float32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndxq_m_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpeqq_m_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgeq_m_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpgtq_m_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpleq_m_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpltq_m_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_m (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmpneq_m_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtmq_m_u32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtnq_m_u32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtpq_m_u32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m (uint32x4_t __inactive, float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_u32_f32 (__inactive, __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (float16x8_t __inactive, uint16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_f16_u16 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (float16x8_t __inactive, int16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_f16_s16 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (float32x4_t __inactive, uint32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_f32_u32 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (float32x4_t __inactive, int32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_f32_s32 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_m_n_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (float32x4_t __inactive, float32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_m (float16x8_t __inactive, float16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_m_n_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_m_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_m_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_rot180_m_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot180_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_rot180_m_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_rot270_m_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot270_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_rot270_m_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_rot90_m_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmlaq_rot90_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vcmlaq_rot90_m_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot180_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot180_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot270_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot270_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot90_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot90_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (int32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_s32_f32 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (int16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_s16_f16 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (uint32x4_t __inactive, float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_u32_f32 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_m_n (uint16x8_t __inactive, float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_m_n_u16_f16 (__inactive, __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmaq_m_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmaq_m_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmaq_m_n_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmaq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmaq_m_n_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq_m (float32x4_t __a, float32x4_t __b, float32_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmasq_m_n_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmasq_m (float16x8_t __a, float16x8_t __b, float16_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmasq_m_n_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq_m (float32x4_t __a, float32x4_t __b, float32x4_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmsq_m_f32 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vfmsq_m (float16x8_t __a, float16x8_t __b, float16x8_t __c, mve_pred16_t __p)
+{
+ return __arm_vfmsq_m_f16 (__a, __b, __c, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_m_n_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (float32x4_t __inactive, float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_f32 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_m (float16x8_t __inactive, float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_m_n_f16 (__inactive, __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (float32_t const * __base)
+{
+ return __arm_vld1q_f32 (__base);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q (float16_t const * __base)
+{
+ return __arm_vld1q_f16 (__base);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset (float16_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrhq_gather_offset_f16 (__base, __offset);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_offset_z_f16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset (float16_t const * __base, uint16x8_t __offset)
+{
+ return __arm_vldrhq_gather_shifted_offset_f16 (__base, __offset);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrhq_gather_shifted_offset_z (float16_t const * __base, uint16x8_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrhq_gather_shifted_offset_z_f16 (__base, __offset, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset (float32_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrwq_gather_offset_f32 (__base, __offset);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrwq_gather_offset_z_f32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset (float32_t const * __base, uint32x4_t __offset)
+{
+ return __arm_vldrwq_gather_shifted_offset_f32 (__base, __offset);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vldrwq_gather_shifted_offset_z (float32_t const * __base, uint32x4_t __offset, mve_pred16_t __p)
+{
+ return __arm_vldrwq_gather_shifted_offset_z_f32 (__base, __offset, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_p_f32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq (float32_t * __addr, float32x4_t __value)
+{
+ __arm_vstrwq_f32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (float32_t * __addr, float32x4_t __value)
+{
+ __arm_vst1q_f32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q (float16_t * __addr, float16x8_t __value)
+{
+ __arm_vst1q_f16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq (float16_t * __addr, float16x8_t __value)
+{
+ __arm_vstrhq_f16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_p_f16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
+{
+ __arm_vstrhq_scatter_offset_f16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_offset_p_f16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset (float16_t * __base, uint16x8_t __offset, float16x8_t __value)
+{
+ __arm_vstrhq_scatter_shifted_offset_f16 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrhq_scatter_shifted_offset_p (float16_t * __base, uint16x8_t __offset, float16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vstrhq_scatter_shifted_offset_p_f16 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base (uint32x4_t __addr, const int __offset, float32x4_t __value)
+{
+ __arm_vstrwq_scatter_base_f32 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_p (uint32x4_t __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_base_p_f32 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
+{
+ __arm_vstrwq_scatter_offset_f32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_offset_p_f32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset (float32_t * __base, uint32x4_t __offset, float32x4_t __value)
+{
+ __arm_vstrwq_scatter_shifted_offset_f32 (__base, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_shifted_offset_p (float32_t * __base, uint32x4_t __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_shifted_offset_p_f32 (__base, __offset, __value, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (float16x8_t __a, float16x8_t __b)
+{
+ return __arm_vaddq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq (float32x4_t __a, float32x4_t __b)
+{
+ return __arm_vaddq_f32 (__a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb (uint32x4_t * __addr, const int __offset, float32x4_t __value)
+{
+ __arm_vstrwq_scatter_base_wb_f32 (__addr, __offset, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vstrwq_scatter_base_wb_p (uint32x4_t * __addr, const int __offset, float32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vstrwq_scatter_base_wb_p_f32 (__addr, __offset, __value, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vminnmq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmaxnmq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vabdq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vabsq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vaddq_x_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vnegq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vmulq_x_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __arm_vsubq_x_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot90_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcaddq_rot270_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot90_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot90_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot180_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot180_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot270_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vcmulq_rot270_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_f16_u16 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x (int16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_f16_s16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x (int32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_f32_s32 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_f32_u32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n (int16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_n_f16_s16 (__a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_n_f16_u16 (__a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n (int32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_n_f32_s32 (__a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __arm_vcvtq_x_n_f32_u32 (__a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndnq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndnq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndmq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndmq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndpq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndpq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndaq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndaq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndxq_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrndxq_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vandq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vbicq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (float16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x (float32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __arm_vbrsrq_x_n_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_veorq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vornq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_f16 (__a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __arm_vorrq_x_f32 (__a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev32q_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (float16x8_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_f16 (__a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x (float32x4_t __a, mve_pred16_t __p)
+{
+ return __arm_vrev64q_x_f32 (__a, __p);
+}
+
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (float16_t const * __addr)
+{
+ return __arm_vld4q_f16 (__addr);
+}
+
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (float16_t const * __addr)
+{
+ return __arm_vld2q_f16 (__addr);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (float16_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_f16 (__base, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (float16_t * __addr, float16x8x2_t __value)
+{
+ __arm_vst2q_f16 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (float16_t * __addr, float16x8_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_f16 (__addr, __value, __p);
+}
+
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld4q (float32_t const * __addr)
+{
+ return __arm_vld4q_f32 (__addr);
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld2q (float32_t const * __addr)
+{
+ return __arm_vld2q_f32 (__addr);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vld1q_z (float32_t const *__base, mve_pred16_t __p)
+{
+ return __arm_vld1q_z_f32 (__base, __p);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst2q (float32_t * __addr, float32x4x2_t __value)
+{
+ __arm_vst2q_f32 (__addr, __value);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vst1q_p (float32_t * __addr, float32x4_t __value, mve_pred16_t __p)
+{
+ __arm_vst1q_p_f32 (__addr, __value, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (float16_t __a, float16x8_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_f16 (__a, __b, __idx);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsetq_lane (float32_t __a, float32x4_t __b, const int __idx)
+{
+ return __arm_vsetq_lane_f32 (__a, __b, __idx);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (float16x8_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_f16 (__a, __idx);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vgetq_lane (float32x4_t __a, const int __idx)
+{
+ return __arm_vgetq_lane_f32 (__a, __idx);
+}
+#endif /* MVE Floating point. */
+
+#else
+enum {
+ __ARM_mve_type_fp_n = 1,
+ __ARM_mve_type_int_n,
+ __ARM_mve_type_float16_t_ptr,
+ __ARM_mve_type_float16x8_t,
+ __ARM_mve_type_float16x8x2_t,
+ __ARM_mve_type_float16x8x4_t,
+ __ARM_mve_type_float32_t_ptr,
+ __ARM_mve_type_float32x4_t,
+ __ARM_mve_type_float32x4x2_t,
+ __ARM_mve_type_float32x4x4_t,
+ __ARM_mve_type_int16_t_ptr,
+ __ARM_mve_type_int16x8_t,
+ __ARM_mve_type_int16x8x2_t,
+ __ARM_mve_type_int16x8x4_t,
+ __ARM_mve_type_int32_t_ptr,
+ __ARM_mve_type_int32x4_t,
+ __ARM_mve_type_int32x4x2_t,
+ __ARM_mve_type_int32x4x4_t,
+ __ARM_mve_type_int64_t_ptr,
+ __ARM_mve_type_int64x2_t,
+ __ARM_mve_type_int8_t_ptr,
+ __ARM_mve_type_int8x16_t,
+ __ARM_mve_type_int8x16x2_t,
+ __ARM_mve_type_int8x16x4_t,
+ __ARM_mve_type_uint16_t_ptr,
+ __ARM_mve_type_uint16x8_t,
+ __ARM_mve_type_uint16x8x2_t,
+ __ARM_mve_type_uint16x8x4_t,
+ __ARM_mve_type_uint32_t_ptr,
+ __ARM_mve_type_uint32x4_t,
+ __ARM_mve_type_uint32x4x2_t,
+ __ARM_mve_type_uint32x4x4_t,
+ __ARM_mve_type_uint64_t_ptr,
+ __ARM_mve_type_uint64x2_t,
+ __ARM_mve_type_uint8_t_ptr,
+ __ARM_mve_type_uint8x16_t,
+ __ARM_mve_type_uint8x16x2_t,
+ __ARM_mve_type_uint8x16x4_t,
+ __ARM_mve_unsupported_type
+};
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+#define __ARM_mve_typeid(x) _Generic(x, \
+ float16_t: __ARM_mve_type_fp_n, \
+ float16_t *: __ARM_mve_type_float16_t_ptr, \
+ float16_t const *: __ARM_mve_type_float16_t_ptr, \
+ float16x8_t: __ARM_mve_type_float16x8_t, \
+ float16x8x2_t: __ARM_mve_type_float16x8x2_t, \
+ float16x8x4_t: __ARM_mve_type_float16x8x4_t, \
+ float32_t: __ARM_mve_type_fp_n, \
+ float32_t *: __ARM_mve_type_float32_t_ptr, \
+ float32_t const *: __ARM_mve_type_float32_t_ptr, \
+ float32x4_t: __ARM_mve_type_float32x4_t, \
+ float32x4x2_t: __ARM_mve_type_float32x4x2_t, \
+ float32x4x4_t: __ARM_mve_type_float32x4x4_t, \
+ int16_t: __ARM_mve_type_int_n, \
+ int16_t *: __ARM_mve_type_int16_t_ptr, \
+ int16_t const *: __ARM_mve_type_int16_t_ptr, \
+ int16x8_t: __ARM_mve_type_int16x8_t, \
+ int16x8x2_t: __ARM_mve_type_int16x8x2_t, \
+ int16x8x4_t: __ARM_mve_type_int16x8x4_t, \
+ int32_t: __ARM_mve_type_int_n, \
+ int32_t *: __ARM_mve_type_int32_t_ptr, \
+ int32_t const *: __ARM_mve_type_int32_t_ptr, \
+ int32x4_t: __ARM_mve_type_int32x4_t, \
+ int32x4x2_t: __ARM_mve_type_int32x4x2_t, \
+ int32x4x4_t: __ARM_mve_type_int32x4x4_t, \
+ int64_t: __ARM_mve_type_int_n, \
+ int64_t *: __ARM_mve_type_int64_t_ptr, \
+ int64_t const *: __ARM_mve_type_int64_t_ptr, \
+ int64x2_t: __ARM_mve_type_int64x2_t, \
+ int8_t: __ARM_mve_type_int_n, \
+ int8_t *: __ARM_mve_type_int8_t_ptr, \
+ int8_t const *: __ARM_mve_type_int8_t_ptr, \
+ int8x16_t: __ARM_mve_type_int8x16_t, \
+ int8x16x2_t: __ARM_mve_type_int8x16x2_t, \
+ int8x16x4_t: __ARM_mve_type_int8x16x4_t, \
+ uint16_t: __ARM_mve_type_int_n, \
+ uint16_t *: __ARM_mve_type_uint16_t_ptr, \
+ uint16_t const *: __ARM_mve_type_uint16_t_ptr, \
+ uint16x8_t: __ARM_mve_type_uint16x8_t, \
+ uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \
+ uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \
+ uint32_t: __ARM_mve_type_int_n, \
+ uint32_t *: __ARM_mve_type_uint32_t_ptr, \
+ uint32_t const *: __ARM_mve_type_uint32_t_ptr, \
+ uint32x4_t: __ARM_mve_type_uint32x4_t, \
+ uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \
+ uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \
+ uint64_t: __ARM_mve_type_int_n, \
+ uint64_t *: __ARM_mve_type_uint64_t_ptr, \
+ uint64_t const *: __ARM_mve_type_uint64_t_ptr, \
+ uint64x2_t: __ARM_mve_type_uint64x2_t, \
+ uint8_t: __ARM_mve_type_int_n, \
+ uint8_t *: __ARM_mve_type_uint8_t_ptr, \
+ uint8_t const *: __ARM_mve_type_uint8_t_ptr, \
+ uint8x16_t: __ARM_mve_type_uint8x16_t, \
+ uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \
+ uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \
+ default: _Generic(x, \
+ signed char: __ARM_mve_type_int_n, \
+ short: __ARM_mve_type_int_n, \
+ int: __ARM_mve_type_int_n, \
+ long: __ARM_mve_type_int_n, \
+ long long: __ARM_mve_type_int_n, \
+ _Float16: __ARM_mve_type_fp_n, \
+ __fp16: __ARM_mve_type_fp_n, \
+ float: __ARM_mve_type_fp_n, \
+ double: __ARM_mve_type_fp_n, \
+ unsigned char: __ARM_mve_type_int_n, \
+ unsigned short: __ARM_mve_type_int_n, \
+ unsigned int: __ARM_mve_type_int_n, \
+ unsigned long: __ARM_mve_type_int_n, \
+ unsigned long long: __ARM_mve_type_int_n, \
+ signed char*: __ARM_mve_type_int8_t_ptr, \
+ short*: __ARM_mve_type_int16_t_ptr, \
+ int*: __ARM_mve_type_int32_t_ptr, \
+ long*: __ARM_mve_type_int32_t_ptr, \
+ long long*: __ARM_mve_type_int64_t_ptr, \
+ _Float16*: __ARM_mve_type_float16_t_ptr, \
+ __fp16*: __ARM_mve_type_float16_t_ptr, \
+ float*: __ARM_mve_type_float32_t_ptr, \
+ unsigned char*: __ARM_mve_type_uint8_t_ptr, \
+ unsigned short*: __ARM_mve_type_uint16_t_ptr, \
+ unsigned int*: __ARM_mve_type_uint32_t_ptr, \
+ unsigned long*: __ARM_mve_type_uint32_t_ptr, \
+ unsigned long long*: __ARM_mve_type_uint64_t_ptr, \
+ default: __ARM_mve_unsupported_type))
+#else
+#define __ARM_mve_typeid(x) _Generic(x, \
+ int16_t: __ARM_mve_type_int_n, \
+ int16_t *: __ARM_mve_type_int16_t_ptr, \
+ int16_t const *: __ARM_mve_type_int16_t_ptr, \
+ int16x8_t: __ARM_mve_type_int16x8_t, \
+ int16x8x2_t: __ARM_mve_type_int16x8x2_t, \
+ int16x8x4_t: __ARM_mve_type_int16x8x4_t, \
+ int32_t: __ARM_mve_type_int_n, \
+ int32_t *: __ARM_mve_type_int32_t_ptr, \
+ int32_t const *: __ARM_mve_type_int32_t_ptr, \
+ int32x4_t: __ARM_mve_type_int32x4_t, \
+ int32x4x2_t: __ARM_mve_type_int32x4x2_t, \
+ int32x4x4_t: __ARM_mve_type_int32x4x4_t, \
+ int64_t: __ARM_mve_type_int_n, \
+ int64_t *: __ARM_mve_type_int64_t_ptr, \
+ int64_t const *: __ARM_mve_type_int64_t_ptr, \
+ int64x2_t: __ARM_mve_type_int64x2_t, \
+ int8_t: __ARM_mve_type_int_n, \
+ int8_t *: __ARM_mve_type_int8_t_ptr, \
+ int8_t const *: __ARM_mve_type_int8_t_ptr, \
+ int8x16_t: __ARM_mve_type_int8x16_t, \
+ int8x16x2_t: __ARM_mve_type_int8x16x2_t, \
+ int8x16x4_t: __ARM_mve_type_int8x16x4_t, \
+ uint16_t: __ARM_mve_type_int_n, \
+ uint16_t *: __ARM_mve_type_uint16_t_ptr, \
+ uint16_t const *: __ARM_mve_type_uint16_t_ptr, \
+ uint16x8_t: __ARM_mve_type_uint16x8_t, \
+ uint16x8x2_t: __ARM_mve_type_uint16x8x2_t, \
+ uint16x8x4_t: __ARM_mve_type_uint16x8x4_t, \
+ uint32_t: __ARM_mve_type_int_n, \
+ uint32_t *: __ARM_mve_type_uint32_t_ptr, \
+ uint32_t const *: __ARM_mve_type_uint32_t_ptr, \
+ uint32x4_t: __ARM_mve_type_uint32x4_t, \
+ uint32x4x2_t: __ARM_mve_type_uint32x4x2_t, \
+ uint32x4x4_t: __ARM_mve_type_uint32x4x4_t, \
+ uint64_t: __ARM_mve_type_int_n, \
+ uint64_t *: __ARM_mve_type_uint64_t_ptr, \
+ uint64_t const *: __ARM_mve_type_uint64_t_ptr, \
+ uint64x2_t: __ARM_mve_type_uint64x2_t, \
+ uint8_t: __ARM_mve_type_int_n, \
+ uint8_t *: __ARM_mve_type_uint8_t_ptr, \
+ uint8_t const *: __ARM_mve_type_uint8_t_ptr, \
+ uint8x16_t: __ARM_mve_type_uint8x16_t, \
+ uint8x16x2_t: __ARM_mve_type_uint8x16x2_t, \
+ uint8x16x4_t: __ARM_mve_type_uint8x16x4_t, \
+ default: _Generic(x, \
+ signed char: __ARM_mve_type_int_n, \
+ short: __ARM_mve_type_int_n, \
+ int: __ARM_mve_type_int_n, \
+ long: __ARM_mve_type_int_n, \
+ long long: __ARM_mve_type_int_n, \
+ unsigned char: __ARM_mve_type_int_n, \
+ unsigned short: __ARM_mve_type_int_n, \
+ unsigned int: __ARM_mve_type_int_n, \
+ unsigned long: __ARM_mve_type_int_n, \
+ unsigned long long: __ARM_mve_type_int_n, \
+ signed char*: __ARM_mve_type_int8_t_ptr, \
+ short*: __ARM_mve_type_int16_t_ptr, \
+ int*: __ARM_mve_type_int32_t_ptr, \
+ long*: __ARM_mve_type_int32_t_ptr, \
+ long long*: __ARM_mve_type_int64_t_ptr, \
+ unsigned char*: __ARM_mve_type_uint8_t_ptr, \
+ unsigned short*: __ARM_mve_type_uint16_t_ptr, \
+ unsigned int*: __ARM_mve_type_uint32_t_ptr, \
+ unsigned long*: __ARM_mve_type_uint32_t_ptr, \
+ unsigned long long*: __ARM_mve_type_uint64_t_ptr, \
+ default: __ARM_mve_unsupported_type))
+#endif /* MVE Floating point. */
+
+extern void *__ARM_undef;
+#define __ARM_mve_coerce(param, type) \
+ _Generic(param, type: param, default: *(type *)__ARM_undef)
+#define __ARM_mve_coerce_i_scalar(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, int8_t: param, int16_t: param, int32_t: param, int64_t: param, uint8_t: param, uint16_t: param, uint32_t: param, uint64_t: param, default: *(type *)__ARM_undef))
+
+#define __ARM_mve_coerce_s8_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, signed char*: param, default: *(type *)__ARM_undef))
+#define __ARM_mve_coerce_u8_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned char*: param, default: *(type *)__ARM_undef))
+
+#define __ARM_mve_coerce_s16_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, short*: param, default: *(type *)__ARM_undef))
+#define __ARM_mve_coerce_u16_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned short*: param, default: *(type *)__ARM_undef))
+
+#define __ARM_mve_coerce_s32_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, int*: param, long*: param, default: *(type *)__ARM_undef))
+#define __ARM_mve_coerce_u32_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned int*: param, unsigned long*: param, default: *(type *)__ARM_undef))
+
+#define __ARM_mve_coerce_s64_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, long long*: param, default: *(type *)__ARM_undef))
+#define __ARM_mve_coerce_u64_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, unsigned long long*: param, default: *(type *)__ARM_undef))
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+#define __ARM_mve_coerce_f_scalar(param, type) \
+ _Generic(param, type: param, const type: param, __fp16: param, default: _Generic (param, _Float16: param, float16_t: param, float32_t: param, default: *(type *)__ARM_undef))
+#define __ARM_mve_coerce_f16_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, __fp16*: param, _Float16*: param, default: *(type *)__ARM_undef))
+#define __ARM_mve_coerce_f32_ptr(param, type) \
+ _Generic(param, type: param, const type: param, default: _Generic (param, float*: param, default: *(type *)__ARM_undef))
+#endif
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+
+#define __arm_vst4q(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce_s16_ptr(__p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce_u16_ptr(__p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x4_t]: __arm_vst4q_f16 (__ARM_mve_coerce_f16_ptr(__p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x4_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x4_t]: __arm_vst4q_f32 (__ARM_mve_coerce_f32_ptr(__p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x4_t)));})
+
+#define __arm_vrndxq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrndq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrndpq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrndnq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrndmq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrndaq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vdupq_n(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vdupq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vdupq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
+
+#define __arm_vcvtbq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvtbq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
+
+#define __arm_vcvttq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vcvttq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
+
+#define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));})
+
+#define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
+
+#define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
+
+#define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vcvtq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vcvtq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_n_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_n_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpeqq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpeqq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpeqq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+#define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+#define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+#define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcmulq_rot180(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcmulq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vcmulq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vmaxnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vmaxnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vmaxnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vmaxnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vminnmaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vminnmavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));})
+
+#define __arm_vminnmq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vminnmvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
+
+#define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
+
+#define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
+
+#define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+#define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+#define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
+
+#define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vcvtaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtaq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtaq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcvtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcvtq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtq_m_n_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtq_m_n_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcvtq_m_n_f16_s16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcvtq_m_n_f32_s32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcvtq_m_n_f16_u16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcvtq_m_n_f32_u32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vcmlaq_rot180(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vcmlaq_rot270(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vcmlaq_rot90(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vrndxq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndxq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndxq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndpq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndpq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgtq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpleq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpleq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpleq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+#define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpltq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpltq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpltq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+#define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpneq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpneq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpneq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2));})
+
+#define __arm_vcvtbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvtbq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvtbq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcvttq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float16x8_t]: __arm_vcvttq_m_f32_f16 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float32x4_t]: __arm_vcvttq_m_f16_f32 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcvtmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtmq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtmq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcvtnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtnq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtnq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcvtpq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_s16_f16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_s32_f32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcvtpq_m_u16_f16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcvtpq_m_u32_f32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), (float16_t) __p1, p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), (float32_t) __p1, p2));})
+
+#define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double)));})
+
+#define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vmaxnmavq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vmaxnmvq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vmaxnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmavq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmavq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vmaxnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vmaxnmvq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vmaxnmvq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vminnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vminnmavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmavq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmavq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vminnmvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vminnmvq_p_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vminnmvq_p_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndnq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndnq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __p2));})
+
+#define __arm_vrndaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndmq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrndmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrndmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev64q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vrev64q_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vrev32q_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2));})
+
+#define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vpselq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vpselq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double)));})
+
+#define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));})
+
+#define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vnegq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vnegq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vcmpgeq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce_f_scalar(__p1, double), p2), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbrsrq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbrsrq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
+
+#define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmlaq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmlaq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmlaq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmlaq_rot90_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmlaq_rot90_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_rot180_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)] [__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_m_f16(__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_m_f32(__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vfmaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmaq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vfmasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vfmasq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vfmsq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vmaxnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vminnmq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_m_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_m_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vld1q(p0) (\
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *)), \
+ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
+
+#define __arm_vld1q_z(p0,p1) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), p1), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), p1), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), p1), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
+ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld1q_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), p1), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld1q_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
+
+#define __arm_vld2q(p0) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *)), \
+ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld2q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld2q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
+
+#define __arm_vld4q(p0) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *)), \
+ int (*)[__ARM_mve_type_float16_t_ptr]: __arm_vld4q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *)), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vld4q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *))))
+
+#define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+#define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+#define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vldrwq_gather_offset(p0,p1) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
+
+#define __arm_vldrwq_gather_offset_z(p0,p1,p2) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1, p2), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_offset_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1, p2)))
+
+#define __arm_vldrwq_gather_shifted_offset(p0,p1) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1)))
+
+#define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ( \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1, p2), \
+ int (*)[__ARM_mve_type_float32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), p1, p2)))
+
+#define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8x2_t]: __arm_vst2q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8x2_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4x2_t]: __arm_vst2q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4x2_t)));})
+
+#define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vst1q_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vst1q_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t)));})
+
+#define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_float16x8_t]: __arm_vstrhq_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, float16x8_t), p2));})
+
+#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+#define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __ARM_mve_coerce(__p1, float32x4_t)));})
+
+#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t)));})
+
+#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_float16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_f16 (__ARM_mve_coerce_f16_ptr(p0, float16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3));})
+
+#define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_p_f32(p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_f32 (__ARM_mve_coerce_f32_ptr(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_offset_p_f32 (__ARM_mve_coerce_f32_ptr(__p0, float32_t *), p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce_f32_ptr(p0, float32_t *), __p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 (), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vuninitializedq_f16 (), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vuninitializedq_f32 ());})
+
+#define __arm_vreinterpretq_f16(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_f16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_f32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_f32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_f32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_f32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_f32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_f32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_f32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_f32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_f32_f16 (__ARM_mve_coerce(__p0, float16x8_t)));})
+
+#define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_s8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_s8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u16_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u16_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u32_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u32_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u64_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u64_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vreinterpretq_u8_f16 (__ARM_mve_coerce(__p0, float16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vreinterpretq_u8_f32 (__ARM_mve_coerce(__p0, float32x4_t)));})
+
+#define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+
+#define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
+
+#define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));})
+
+#define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce_f_scalar(__p2, double), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_fp_n]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce_f_scalar(__p2, double), p3));})
+
+#define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vgetq_lane_f16 (__ARM_mve_coerce(__p0, float16x8_t), p1), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vgetq_lane_f32 (__ARM_mve_coerce(__p0, float32x4_t), p1));})
+
+#define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int64x2_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint64x2_t), p2), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float16x8_t]: __arm_vsetq_lane_f16 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_fp_n][__ARM_mve_type_float32x4_t]: __arm_vsetq_lane_f32 (__ARM_mve_coerce_f_scalar(__p0, double), __ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#else /* MVE Integer. */
+
+#define __arm_vstrwq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrwq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_wb_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vst4q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x4_t]: __arm_vst4q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x4_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x4_t]: __arm_vst4q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x4_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x4_t]: __arm_vst4q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x4_t]: __arm_vst4q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x4_t]: __arm_vst4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x4_t]: __arm_vst4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x4_t)));})
+
+#define __arm_vabsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vclsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vclzq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vnegq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vmovlbq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
+
+#define __arm_vmovltq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
+
+#define __arm_vmvnq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vrev16q(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)));})
+
+#define __arm_vrev32q(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)));})
+
+#define __arm_vrev64q(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vqabsq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqabsq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqabsq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqabsq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vqnegq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqnegq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqnegq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqnegq_s32 (__ARM_mve_coerce(__p0, int32x4_t)));})
+
+#define __arm_vshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
+
+#define __arm_vrshrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vorrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vbicq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar (__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vqmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vqmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmulltq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+#define __arm_vmullbq_poly(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_p8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_p16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)));})
+
+#define __arm_vmovntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmovnbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmlaldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqmovuntq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vshlltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
+
+#define __arm_vshllbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1));})
+
+#define __arm_vqmovunbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqdmulltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vqdmullbq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vshlcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vcmpeqq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpeqq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+#define __arm_vbicq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbicq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbicq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vqrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqrdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqrdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vqshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vrev64q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev64q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev64q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrev64q_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev64q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev64q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrev64q_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vrshlq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __p1, p2));})
+
+#define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vsliq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vsriq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqdmlashq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqrdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqrdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqrdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqdmlsdhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vclsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vclzq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vclzq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vclzq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vclzq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vclzq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vclzq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vclzq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vcmpgeq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgeq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+
+#define __arm_vcmpgtq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpgtq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+#define __arm_vcmpleq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpleq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+#define __arm_vcmpltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpltq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+#define __arm_vcmpneq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpneq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+#define __arm_vdupq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), (int8_t) __p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), (int16_t) __p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), (int32_t) __p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint8_t) __p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint16_t) __p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2));})
+
+#define __arm_vmaxaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vmlasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vnegq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vnegq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vnegq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vnegq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vpselq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vpselq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vpselq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vpselq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int64x2_t]: __arm_vpselq_s64 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int64x2_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vpselq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vpselq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vpselq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint64x2_t]: __arm_vpselq_u64 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
+
+#define __arm_vqdmlahq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int)));})
+
+#define __arm_vqdmlsdhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqdmladhxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vqdmladhq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vminaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_m_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_m_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_m_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmovlbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovlbq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovlbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vrshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vrev32q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev32q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrev32q_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev32q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrev32q_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vqshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vrev16q_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrev16q_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrev16q_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2));})
+
+#define __arm_vqshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrshruntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqshrnbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqmovuntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovuntq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovuntq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqmovntq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovntq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovntq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovntq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovntq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqmovnbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovnbq_m_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovnbq_m_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqmovnbq_m_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqmovnbq_m_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmovltq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vmovltq_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vmovltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vmovltq_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vmovltq_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vqmovunbq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqmovunbq_m_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqmovunbq_m_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vabdq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vandq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vbicq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vbrsrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbrsrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbrsrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbrsrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __p2, p3));})
+
+#define __arm_vcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_veorq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vornq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vorrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmulq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_s32(p0, p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_u32(p0, p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vstrwq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_base_p_s32 (p0, p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vld1q(p0) (\
+ _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *))))
+
+#define __arm_vldrhq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vldrhq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vldrhq_gather_shifted_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vldrhq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrhq_gather_shifted_offset_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrhq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vldrwq_gather_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vldrwq_gather_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_offset_z_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_offset_z_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_vldrwq_gather_shifted_offset(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vldrwq_gather_shifted_offset_z(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vldrwq_gather_shifted_offset_z_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_vst1q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vst1q_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vst1q_p_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vst1q_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vst1q_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vst1q_p_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vst1q_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vst1q_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vst2q(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16x2_t]: __arm_vst2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16x2_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8x2_t]: __arm_vst2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8x2_t)), \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4x2_t]: __arm_vst2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4x2_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16x2_t]: __arm_vst2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16x2_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8x2_t]: __arm_vst2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8x2_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4x2_t]: __arm_vst2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4x2_t)));})
+
+#define __arm_vstrhq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vstrhq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrhq_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrhq_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+
+#define __arm_vstrwq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vstrwq_p(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+#define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+#define __arm_vstrhq_scatter_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrhq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrhq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrhq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrhq_scatter_shifted_offset_p_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint16_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrhq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrwq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_offset_p_s32 (__ARM_mve_coerce_s32_ptr(__p0, int32_t *), p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_offset_p_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrwq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrwq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), __p1, __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_shifted_offset_p_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), __p1, __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vuninitializedq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vuninitializedq_s8 (), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vuninitializedq_s16 (), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vuninitializedq_s32 (), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vuninitializedq_s64 (), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vuninitializedq_u8 (), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vuninitializedq_u16 (), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vuninitializedq_u32 (), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vuninitializedq_u64 ());})
+
+#define __arm_vreinterpretq_s16(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s16_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vreinterpretq_s32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s32_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vreinterpretq_s64(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_s64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s64_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vreinterpretq_s8(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_s8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_s8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_s8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_s8_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_s8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_s8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_s8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vreinterpretq_u16(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u16_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u16_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u16_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u16_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u16_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u16_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u16_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vreinterpretq_u32(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u32_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u32_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u32_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u32_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u32_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u32_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u32_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vreinterpretq_u64(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u64_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u64_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u64_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vreinterpretq_u64_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u64_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u64_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u64_s64 (__ARM_mve_coerce(__p0, int64x2_t)));})
+
+#define __arm_vreinterpretq_u8(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vreinterpretq_u8_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vreinterpretq_u8_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vreinterpretq_u8_s64 (__ARM_mve_coerce(__p0, int64x2_t)), \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vreinterpretq_u8_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vreinterpretq_u8_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+
+#define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vld1q_z(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld1q_z_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), p1), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld1q_z_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *), p1), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld1q_z_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *), p1), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld1q_z_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), p1), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld1q_z_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *), p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld1q_z_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *), p1)))
+
+#define __arm_vld2q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld2q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld2q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld2q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld2q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld2q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld2q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *))))
+
+
+#define __arm_vld4q(p0) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr]: __arm_vld4q_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *)), \
+ int (*)[__ARM_mve_type_int16_t_ptr]: __arm_vld4q_s16 (__ARM_mve_coerce_s16_ptr(p0, int16_t *)), \
+ int (*)[__ARM_mve_type_int32_t_ptr]: __arm_vld4q_s32 (__ARM_mve_coerce_s32_ptr(p0, int32_t *)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr]: __arm_vld4q_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *)), \
+ int (*)[__ARM_mve_type_uint16_t_ptr]: __arm_vld4q_u16 (__ARM_mve_coerce_u16_ptr(p0, uint16_t *)), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vld4q_u32 (__ARM_mve_coerce_u32_ptr(p0, uint32_t *))))
+
+#define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vgetq_lane(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vgetq_lane_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vgetq_lane_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vgetq_lane_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vgetq_lane_s64 (__ARM_mve_coerce(__p0, int64x2_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vgetq_lane_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vgetq_lane_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vgetq_lane_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vgetq_lane_u64 (__ARM_mve_coerce(__p0, uint64x2_t), p1));})
+
+#define __arm_vsetq_lane(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vsetq_lane_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vsetq_lane_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vsetq_lane_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int64x2_t]: __arm_vsetq_lane_s64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int64x2_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vsetq_lane_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vsetq_lane_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vsetq_lane_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint64x2_t]: __arm_vsetq_lane_u64 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint64x2_t), p2));})
+
+#endif /* MVE Integer. */
+
+#define __arm_vshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+
+#define __arm_vrshrntq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+
+#define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));})
+
+#define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
+
+#define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
+
+#define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u8 ((uint32_t) __p1, p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u16 ((uint32_t) __p1, p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_x_n_u32 ((uint32_t) __p1, p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u8 ((uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u16 ((uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_x_n_u32 ((uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+#define __arm_vstrdq_scatter_base_wb(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_wb_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_wb_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+#define __arm_vldrdq_gather_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1), \
+ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1)))
+
+#define __arm_vldrdq_gather_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
+ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
+
+#define __arm_vldrdq_gather_shifted_offset(p0,p1) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1), \
+ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1)))
+
+#define __arm_vldrdq_gather_shifted_offset_z(p0,p1,p2) ( _Generic( (int (*)[__ARM_mve_typeid(p0)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_s64 (__ARM_mve_coerce_s64_ptr(p0, int64_t *), p1, p2), \
+ int (*)[__ARM_mve_type_uint64_t_ptr]: __arm_vldrdq_gather_shifted_offset_z_u64 (__ARM_mve_coerce_u64_ptr(p0, uint64_t *), p1, p2)))
+
+#define __arm_vadciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
+
+#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vadcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
+
+#define __arm_vadcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vsbciq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
+
+#define __arm_vsbciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vsbcq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3, p4));})
+
+#define __arm_vsbcq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsbcq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsbcq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vldrbq_gather_offset_z(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_s16 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_s32 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_z_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_z_u16 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_z_u32 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vqrdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vqrdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vqdmlashq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlashq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vqrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vqshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vrhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vrmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vrshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vrshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vshrq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshrq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshrq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vsliq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsliq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsliq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsliq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsliq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsliq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsliq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vqsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vqrdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqrdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vqrdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqrdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vshllbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshllbq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshllbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshllbq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshllbq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
+
+#define __arm_vshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vshlltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t]: __arm_vshlltq_m_n_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t]: __arm_vshlltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t]: __arm_vshlltq_m_n_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t]: __arm_vshlltq_m_n_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
+
+#define __arm_vrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vqshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
+
+#define __arm_vqshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
+
+#define __arm_vqrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vqrshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqrshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqrshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vqrshrunbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshrunbq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshrunbq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
+
+#define __arm_vqrshruntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqrshruntq_m_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqrshruntq_m_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
+
+#define __arm_vqshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vqshrntq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrntq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrntq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vqshrntq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vqshrntq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vrshrnbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int16x8_t]: __arm_vrshrnbq_m_n_s16 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32x4_t]: __arm_vrshrnbq_m_n_s32 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint16x8_t]: __arm_vrshrnbq_m_n_u16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32x4_t]: __arm_vrshrnbq_m_n_u32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vmlaldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmlaldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vmlsldavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vmlsldavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vrmlaldavhaxq_p(p0,p1,p2,p3) __arm_vrmlaldavhaxq_p_s32(p0,p1,p2,p3)
+
+#define __arm_vrmlsldavhaq_p(p0,p1,p2,p3) __arm_vrmlsldavhaq_p_s32(p0,p1,p2,p3)
+
+#define __arm_vrmlsldavhaxq_p(p0,p1,p2,p3) __arm_vrmlsldavhaxq_p_s32(p0,p1,p2,p3)
+
+#define __arm_vqdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqdmlsdhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqdmlsdhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmlsdhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmlsdhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmlsdhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqabsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqabsq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqabsq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqabsq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmvnq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmvnq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmvnq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmvnq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmvnq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmvnq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmvnq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmvnq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int) , p2));})
+
+#define __arm_vorrq_m_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vorrq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vorrq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vorrq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vorrq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vqshrunbq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int16x8_t]: __arm_vqshrunbq_n_s16 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32x4_t]: __arm_vqshrunbq_n_s32 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vqshluq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshluq_m_n_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshluq_m_n_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshluq_m_n_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3));})
+
+#define __arm_vshlq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vshlq_m_n(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vshlq_m_r(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_m_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_m_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_m_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_m_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_m_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_m_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2));})
+
+#define __arm_vsriq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsriq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsriq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsriq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsriq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsriq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsriq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vhaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vhcaddq_rot270_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vhcaddq_rot90_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vhsubq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vhsubq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vmaxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vminq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmlaq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlaq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vmlasq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vmlasq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmullbq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmulltq_int_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_m_s8 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_m_u8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_m_u16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint64x2_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_m_u32 (__ARM_mve_coerce(__p0, uint64x2_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmulltq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define __arm_vqaddq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vqaddq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vqdmlahq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmlahq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vqdmulhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulhq_m_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqdmullbq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmullbq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmullbq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmullbq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3));})
+
+#define __arm_vqdmulltq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int_n]: __arm_vqdmulltq_m_n_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce_i_scalar(__p2, int), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulltq_m_s16 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int64x2_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulltq_m_s32 (__ARM_mve_coerce(__p0, int64x2_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqrdmladhq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vqrdmladhxq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmladhxq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmladhxq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmladhxq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vmlsdavaxq_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_p_s8 (p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_p_s16 (p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_p_s32 (p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vmlsdavaq(p0,p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vmlsdavaxq(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaxq_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaxq_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaxq_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vmlsdavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlsdavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlsdavaq_p(p0,p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavaq_p_s8(p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavaq_p_s16(p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavaq_p_s32(p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vmladavaxq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define __arm_vmullbq_poly_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_m_p8 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_m_p16 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define __arm_vldrbq_gather_offset(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_s8(__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_s16(__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_s32(__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vldrbq_gather_offset_u8(__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vldrbq_gather_offset_u16(__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vldrbq_gather_offset_u32(__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vidupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vidupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vddupq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vddupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), (uint32_t) __p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3));})
+
+#define __arm_vidupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u16 ((uint32_t) __p0, p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vidupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u32 ((uint32_t) __p0, p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vidupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vidupq_n_u8 ((uint32_t) __p0, p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vddupq_u16(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u16 ((uint32_t) __p0, p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vddupq_u32(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u32 ((uint32_t) __p0, p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_vddupq_u8(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vddupq_n_u8 ((uint32_t) __p0, p1), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1));})
+
+#define __arm_viwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_viwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_viwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u16 (__ARM_mve_coerce_i_scalar(__p0, int), p1, (const int) p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, (const int) p2));})
+
+#define __arm_viwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u32 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_viwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_viwdupq_n_u8 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_vdwdupq_m(p0,p1,p2,p3,p4) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vdwdupq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_m_wb_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_u32_ptr(__p1, uint32_t *), p2, p3, p4));})
+
+#define __arm_vdwdupq_u16(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u16 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u16 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_vdwdupq_u32(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u32 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u32 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_vdwdupq_u8(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int_n]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce_i_scalar(__p0, int), p1, p2), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce_u32_ptr(__p0, uint32_t *), p1, p2));})
+
+#define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));})
+
+#define __arm_vabavq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vabavq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabavq_p_s8(__p0, __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabavq_p_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabavq_p_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabavq_p_u8(__p0, __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabavq_p_u16(__p0, __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabavq_p_u32(__p0, __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vaddlvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vaddlvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddlvaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddlvaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vaddlvq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vaddlvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddlvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddlvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vaddvaq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_p_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vaddvq(p0) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_s8 (__ARM_mve_coerce(__p0, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_s16 (__ARM_mve_coerce(__p0, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_s32 (__ARM_mve_coerce(__p0, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_u8 (__ARM_mve_coerce(__p0, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_u16 (__ARM_mve_coerce(__p0, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_u32 (__ARM_mve_coerce(__p0, uint32x4_t)));})
+
+#define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vcmpcsq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmpcsq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2));})
+
+#define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int)));})
+
+#define __arm_vcmphiq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int_n]: __arm_vcmphiq_m_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce_i_scalar(__p1, int), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmaxavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__p0,__ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmaxvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vminavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminavq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminavq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminavq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vminvq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t]: __arm_vminvq_p_s8 (__p0, __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t]: __arm_vminvq_p_s16 (__p0, __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t]: __arm_vminvq_p_s32 (__p0, __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t]: __arm_vminvq_p_u8 (__p0, __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t]: __arm_vminvq_p_u16 (__p0, __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t]: __arm_vminvq_p_u32 (__p0, __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmladavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vmladavaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaq_p_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaq_p_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaq_p_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaq_p_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmladavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavaxq_s8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavaxq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavaxq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavaxq_u8 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavaxq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavaxq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmladavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmladavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlaldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavaq_u16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vmlaldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavaxq_s16 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavaxq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vmlaldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vmlaldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmlaldavq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmlaldavq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vmlaldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlaldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlaldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmlsldavaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vmlsldavaxq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavaxq_s16(__p0, __ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavaxq_s32(__p0, __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)));})
+
+#define __arm_vmlsldavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmlsldavq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmlsldavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define __arm_vmlsldavxq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsldavxq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsldavxq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define __arm_vrmlaldavhaxq(p0,p1,p2) __arm_vrmlaldavhaxq_s32(p0,p1,p2)
+
+#define __arm_vrmlaldavhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vrmlaldavhq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vrmlaldavhxq(p0,p1) __arm_vrmlaldavhxq_s32(p0,p1)
+
+#define __arm_vrmlaldavhxq_p(p0,p1,p2) __arm_vrmlaldavhxq_p_s32(p0,p1,p2)
+
+#define __arm_vrmlsldavhaq(p0,p1,p2) __arm_vrmlsldavhaq_s32(p0,p1,p2)
+
+#define __arm_vrmlsldavhaxq(p0,p1,p2) __arm_vrmlsldavhaxq_s32(p0,p1,p2)
+
+#define __arm_vrmlsldavhq(p0,p1) __arm_vrmlsldavhq_s32(p0,p1)
+
+#define __arm_vrmlsldavhq_p(p0,p1,p2) __arm_vrmlsldavhq_p_s32(p0,p1,p2)
+
+#define __arm_vrmlsldavhxq(p0,p1) __arm_vrmlsldavhxq_s32(p0,p1)
+
+#define __arm_vrmlsldavhxq_p(p0,p1,p2) __arm_vrmlsldavhxq_p_s32(p0,p1,p2)
+
+#define __arm_vstrbq(p0,p1) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_s8 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_s16 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_s32 (__ARM_mve_coerce_s8_ptr(p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_u8 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_u16 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_u32 (__ARM_mve_coerce_u8_ptr(p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define __arm_vstrbq_p(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int8x16_t]: __arm_vstrbq_p_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int16x8_t]: __arm_vstrbq_p_s16 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_int32x4_t]: __arm_vstrbq_p_s32 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_p_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_p_u16 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_p_u32 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define __arm_vstrdq_scatter_base(p0,p1,p2) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+#define __arm_vstrdq_scatter_base_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_base_p_s64 (p0, p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+ int (*)[__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_base_p_u64 (p0, p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+#define __arm_vrmlaldavhaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vrmlaldavhaq_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmlaldavhaq_p_s32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int_n][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmlaldavhaq_p_u32 (__ARM_mve_coerce_i_scalar(__p0, int), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrbq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_s16 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t)), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_s32 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_u16 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_u32 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t)));})
+
+#define __arm_vstrbq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vstrbq_scatter_offset_p_s8 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vstrbq_scatter_offset_p_s16 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vstrbq_scatter_offset_p_s32 (__ARM_mve_coerce_s8_ptr(__p0, int8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vstrbq_scatter_offset_p_u8 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vstrbq_scatter_offset_p_u16 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint8_t_ptr][__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vstrbq_scatter_offset_p_u32 (__ARM_mve_coerce_u8_ptr(__p0, uint8_t *), __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define __arm_vstrdq_scatter_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_p_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_p_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+#define __arm_vstrdq_scatter_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_offset_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_offset_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+#define __arm_vstrdq_scatter_shifted_offset_p(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t), p3), \
+ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_p_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t), p3));})
+
+#define __arm_vstrdq_scatter_shifted_offset(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int64_t_ptr][__ARM_mve_type_int64x2_t]: __arm_vstrdq_scatter_shifted_offset_s64 (__ARM_mve_coerce_s64_ptr(__p0, int64_t *), p1, __ARM_mve_coerce(__p2, int64x2_t)), \
+ int (*)[__ARM_mve_type_uint64_t_ptr][__ARM_mve_type_uint64x2_t]: __arm_vstrdq_scatter_shifted_offset_u64 (__ARM_mve_coerce_u64_ptr(__p0, uint64_t *), p1, __ARM_mve_coerce(__p2, uint64x2_t)));})
+
+#endif /* __cplusplus */
+#endif /* __ARM_FEATURE_MVE */
+#endif /* _GCC_ARM_MVE_H. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_mve_types.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_mve_types.h
new file mode 100644
index 0000000..12bb519
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_mve_types.h
@@ -0,0 +1,1462 @@
+/* Arm MVE intrinsics include file.
+
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Arm.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_MVE_TYPES_H
+#define _GCC_ARM_MVE_TYPES_H
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+typedef __fp16 float16_t;
+typedef float float32_t;
+#endif
+
+#pragma GCC arm "arm_mve_types.h"
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u8 (void)
+{
+ uint8x16_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u16 (void)
+{
+ uint16x8_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u32 (void)
+{
+ uint32x4_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_u64 (void)
+{
+ uint64x2_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s8 (void)
+{
+ int8x16_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s16 (void)
+{
+ int16x8_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s32 (void)
+{
+ int32x4_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_s64 (void)
+{
+ int64x2_t __uninit;
+ __asm__ ("": "=w"(__uninit));
+ return __uninit;
+}
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_f16 (float16x8_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_f16 (float16x8_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_f16 (float16x8_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_f16 (float16x8_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_f16 (float16x8_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_f16 (float16x8_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_f16 (float16x8_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_f32 (float32x4_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s16 (int16x8_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s32 (int32x4_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s64 (int64x2_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_s8 (int8x16_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u16 (uint16x8_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u32 (uint32x4_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u64 (uint64x2_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16_u8 (uint8x16_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_f16 (float16x8_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_f16 (void)
+{
+ float16x8_t __uninit;
+ __asm__ ("": "=w" (__uninit));
+ return __uninit;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq_f32 (void)
+{
+ float32x4_t __uninit;
+ __asm__ ("": "=w" (__uninit));
+ return __uninit;
+}
+
+#endif
+
+#ifdef __cplusplus
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_s16_s32 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_s16_s64 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_s16_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_s16_u16 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_s16_u32 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_s16_u64 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_s16_u8 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_s32_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_s32_s64 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_s32_s8 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_s32_u16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_s32_u32 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_s32_u64 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_s32_u8 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_s64_s16 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_s64_s32 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_s64_s8 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_s64_u16 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_s64_u32 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_s64_u64 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_s64_u8 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_s8_s16 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_s8_s32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_s8_s64 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_s8_u16 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_s8_u32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_s8_u64 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_s8_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_u16_s16 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_u16_s32 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_u16_s64 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_u16_s8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_u16_u32 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_u16_u64 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_u16_u8 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_u32_s16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_u32_s32 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_u32_s64 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_u32_s8 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_u32_u16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_u32_u64 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_u32_u8 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_u64_s16 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_u64_s32 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_u64_s64 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_u64_s8 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_u64_u16 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_u64_u32 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_u64_u8 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_u8_s16 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_u8_s32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_u8_s64 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_u8_s8 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_u8_u16 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_u8_u32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_u8_u64 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (uint8x16_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_u8 ();
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (uint16x8_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_u16 ();
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (uint32x4_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_u32 ();
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (uint64x2_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_u64 ();
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (int8x16_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_s8 ();
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (int16x8_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_s16 ();
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (int32x4_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_s32 ();
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (int64x2_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_s64 ();
+}
+
+#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_s32_f16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s32 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_s32_f32 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_s16_f16 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s16 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_s16_f32 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_s64_f16 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s64 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_s64_f32 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_s8_f16 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_s8 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_s8_f32 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_u16_f16 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u16 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_u16_f32 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_u32_f16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u32 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_u32_f32 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_u64_f16 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u64 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_u64_f32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_u8_f16 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_u8 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_u8_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (float32x4_t __a)
+{
+ return __arm_vreinterpretq_f16_f32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_f16_s16 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_f16_s32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_f16_s64 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_f16_s8 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_f16_u16 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_f16_u32 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_f16_u64 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f16 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_f16_u8 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (float16x8_t __a)
+{
+ return __arm_vreinterpretq_f32_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (int16x8_t __a)
+{
+ return __arm_vreinterpretq_f32_s16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (int32x4_t __a)
+{
+ return __arm_vreinterpretq_f32_s32 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (int64x2_t __a)
+{
+ return __arm_vreinterpretq_f32_s64 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (int8x16_t __a)
+{
+ return __arm_vreinterpretq_f32_s8 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (uint16x8_t __a)
+{
+ return __arm_vreinterpretq_f32_u16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (uint32x4_t __a)
+{
+ return __arm_vreinterpretq_f32_u32 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (uint64x2_t __a)
+{
+ return __arm_vreinterpretq_f32_u64 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vreinterpretq_f32 (uint8x16_t __a)
+{
+ return __arm_vreinterpretq_f32_u8 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (float16x8_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_f16 ();
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vuninitializedq (float32x4_t /* __v ATTRIBUTE UNUSED */)
+{
+ return __arm_vuninitializedq_f32 ();
+}
+#endif /* __ARM_FEATURE_MVE & 2 (MVE floating point) */
+#endif /* __cplusplus */
+
+#endif /* _GCC_ARM_MVE_H. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/arm_neon.h b/lib/gcc/arm-none-eabi/13.2.1/include/arm_neon.h
new file mode 100644
index 0000000..cdfdb44
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/arm_neon.h
@@ -0,0 +1,20040 @@
+/* ARM NEON intrinsics include file.
+
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_NEON_H
+#define _GCC_ARM_NEON_H 1
+
+#ifndef __ARM_FP
+#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
+#else
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=neon")
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <arm_fp16.h>
+#include <arm_bf16.h>
+#include <stdint.h>
+
+/* For big-endian, GCC's vector indices are reversed within each 64
+ bits compared to the architectural lane indices used by Neon
+ intrinsics. */
+#ifdef __ARM_BIG_ENDIAN
+#define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
+#define __arm_lane(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec) - 1))
+#define __arm_laneq(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1))
+#else
+#define __arm_lane(__vec, __idx) __idx
+#define __arm_laneq(__vec, __idx) __idx
+#endif
+
+typedef __simd64_int8_t int8x8_t;
+typedef __simd64_int16_t int16x4_t;
+typedef __simd64_int32_t int32x2_t;
+typedef __builtin_neon_di int64x1_t;
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef __fp16 float16_t;
+typedef __simd64_float16_t float16x4_t;
+#endif
+typedef __simd64_float32_t float32x2_t;
+typedef __simd64_poly8_t poly8x8_t;
+typedef __simd64_poly16_t poly16x4_t;
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+typedef __builtin_neon_poly64 poly64x1_t;
+#pragma GCC pop_options
+typedef __simd64_uint8_t uint8x8_t;
+typedef __simd64_uint16_t uint16x4_t;
+typedef __simd64_uint32_t uint32x2_t;
+typedef __builtin_neon_udi uint64x1_t;
+
+typedef __simd128_int8_t int8x16_t;
+typedef __simd128_int16_t int16x8_t;
+typedef __simd128_int32_t int32x4_t;
+typedef __simd128_int64_t int64x2_t;
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef __simd128_float16_t float16x8_t;
+#endif
+typedef __simd128_float32_t float32x4_t;
+typedef __simd128_poly8_t poly8x16_t;
+typedef __simd128_poly16_t poly16x8_t;
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+typedef __builtin_neon_poly64 poly64x2_t __attribute__ ((__vector_size__ (16)));
+#pragma GCC pop_options
+
+typedef __simd128_uint8_t uint8x16_t;
+typedef __simd128_uint16_t uint16x8_t;
+typedef __simd128_uint32_t uint32x4_t;
+typedef __simd128_uint64_t uint64x2_t;
+
+typedef float float32_t;
+
+typedef __simd128_bfloat16_t bfloat16x8_t;
+typedef __simd64_bfloat16_t bfloat16x4_t;
+
+/* The Poly types are user visible and live in their own world,
+ keep them that way. */
+typedef __builtin_neon_poly8 poly8_t;
+typedef __builtin_neon_poly16 poly16_t;
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+typedef __builtin_neon_poly64 poly64_t;
+typedef __builtin_neon_poly128 poly128_t;
+#pragma GCC pop_options
+
+typedef struct int8x8x2_t
+{
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef struct float16x4x2_t
+{
+ float16x4_t val[2];
+} float16x4x2_t;
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef struct float16x8x2_t
+{
+ float16x8_t val[2];
+} float16x8x2_t;
+#endif
+
+typedef struct float32x2x2_t
+{
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+ float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct poly8x8x2_t
+{
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+typedef struct poly64x1x2_t
+{
+ poly64x1_t val[2];
+} poly64x1x2_t;
+
+
+typedef struct poly64x2x2_t
+{
+ poly64x2_t val[2];
+} poly64x2x2_t;
+#pragma GCC pop_options
+
+
+typedef struct int8x8x3_t
+{
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef struct float16x4x3_t
+{
+ float16x4_t val[3];
+} float16x4x3_t;
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef struct float16x8x3_t
+{
+ float16x8_t val[3];
+} float16x8x3_t;
+#endif
+
+typedef struct float32x2x3_t
+{
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+ float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct poly8x8x3_t
+{
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+typedef struct poly64x1x3_t
+{
+ poly64x1_t val[3];
+} poly64x1x3_t;
+
+
+typedef struct poly64x2x3_t
+{
+ poly64x2_t val[3];
+} poly64x2x3_t;
+#pragma GCC pop_options
+
+
+typedef struct int8x8x4_t
+{
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef struct float16x4x4_t
+{
+ float16x4_t val[4];
+} float16x4x4_t;
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+typedef struct float16x8x4_t
+{
+ float16x8_t val[4];
+} float16x8x4_t;
+#endif
+
+typedef struct float32x2x4_t
+{
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+ float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct poly8x8x4_t
+{
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+typedef struct poly64x1x4_t
+{
+ poly64x1_t val[4];
+} poly64x1x4_t;
+
+
+typedef struct poly64x2x4_t
+{
+ poly64x2_t val[4];
+} poly64x2x4_t;
+#pragma GCC pop_options
+
+/* vadd */
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a + __b;
+#else
+ return (float32x2_t) __builtin_neon_vaddv2sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a + __b;
+#else
+ return (float32x4_t) __builtin_neon_vaddv4sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddlsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddlsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddlsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddluv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddluv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddluv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddwsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddwsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddwsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddwuv8qi ((int16x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddwuv4hi ((int32x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddwuv2si ((int64x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhadduv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhadduv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhadduv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddsv4si (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhadduv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhadduv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhadduv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vrhaddsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vrhaddsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vrhaddsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vrhadduv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vrhadduv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vrhadduv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vrhaddsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vrhaddsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vrhaddsv4si (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vrhadduv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vrhadduv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vrhadduv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqaddsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqaddsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqaddsv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqaddsdi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqadduv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqadduv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqadduv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqaddudi ((int64x1_t) __a, (int64x1_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqaddsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqaddsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqaddsv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqaddsv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqadduv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqadduv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqadduv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqadduv2di ((int64x2_t) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vraddhnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vraddhnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vraddhnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vraddhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vraddhnv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vraddhnv2di ((int64x2_t) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return (float32x2_t) __builtin_neon_vmulfv2sf (__a, __b);
+#endif
+
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return (float32x4_t) __builtin_neon_vmulfv4sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vmulpv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vmulpv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqrdmulhv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqrdmulhv2si (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqrdmulhv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqrdmulhv4si (__a, __b);
+}
+
+#ifdef __ARM_FEATURE_QRDMX
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlah_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vqrdmlahv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlah_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vqrdmlahv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlahq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vqrdmlahv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlahq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqrdmlahv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlsh_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vqrdmlshv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlsh_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vqrdmlshv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlshq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vqrdmlshv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlshq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqrdmlshv4si (__a, __b, __c);
+}
+#endif
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmullsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmullsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmullsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmulluv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmulluv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmulluv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vmullpv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlalsv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlalsv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlalsv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlaluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlaluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlaluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlslsv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlslsv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlslsv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlsluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlsluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=neon-vfpv4")
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vfmav2sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vfmav4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vfmsv2sf (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vfmsv4sf (__a, __b, __c);
+}
+#pragma GCC pop_options
+
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndn_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintnv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndnq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintnv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrnda_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintav2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndaq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintav4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndp_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintpv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndpq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintpv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndm_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintmv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndmq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintmv4sf (__a);
+}
+
+#endif
+
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndx_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintxv2sf (__a);
+}
+
+#endif
+
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndxq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintxv4sf (__a);
+}
+
+#endif
+
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrnd_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintzv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintzv4sf (__a);
+}
+
+#endif
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a - __b;
+#else
+ return (float32x2_t) __builtin_neon_vsubv2sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a - __b;
+#else
+ return (float32x4_t) __builtin_neon_vsubv4sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsublsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsublsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsublsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubluv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubluv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubluv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubwsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubwsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubwsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubwuv8qi ((int16x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubwuv4hi ((int32x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubwuv2si ((int64x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhsubsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhsubsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhsubsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhsubuv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhsubuv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhsubuv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhsubsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhsubsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhsubsv4si (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhsubuv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhsubuv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhsubuv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqsubsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqsubsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqsubsv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqsubsdi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqsubuv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqsubuv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqsubuv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqsubudi ((int64x1_t) __a, (int64x1_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqsubsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqsubsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqsubsv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqsubsv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqsubuv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqsubuv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqsubuv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqsubuv2di ((int64x2_t) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vrsubhnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vrsubhnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vrsubhnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vrsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vrsubhnv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vrsubhnv2di ((int64x2_t) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) (__a == __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) (__a == __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) (__a == __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x2_t) (__a == __b);
+#else
+ return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) (__a == __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) (__a == __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) (__a == __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) (__a == __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) (__a == __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) (__a == __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) (__a == __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x4_t) (__a == __b);
+#else
+ return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) (__a == __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) (__a == __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) (__a == __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) (__a == __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) (__a >= __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) (__a <= __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) (__a > __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) (__a > __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) (__a > __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) (__a > __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (__a > __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (__a > __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (__a > __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) (__a > __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) (__a > __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) (__a > __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) (__a > __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (__a > __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (__a > __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (__a > __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) (__a < __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) (__a < __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) (__a < __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t) (__a < __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (__a < __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (__a < __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (__a < __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) (__a < __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) (__a < __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) (__a < __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t) (__a < __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (__a < __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (__a < __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (__a < __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vabsv8qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vabsv4hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vabsv2si (__a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabs_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vabsv2sf (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vabsv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vabsv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vabsv4si (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabsq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vabsv4sf (__a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqabsv8qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqabsv4hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqabsv2si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqabsv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqabsv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqabsv4si (__a);
+}
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcage_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x2_t) (vabs_f32 (__a) >= vabs_f32 (__b));
+#else
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcageq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x4_t) (vabsq_f32 (__a) >= vabsq_f32 (__b));
+#else
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcale_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x2_t) (vabs_f32 (__a) <= vabs_f32 (__b));
+#else
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a);
+#endif
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x4_t) (vabsq_f32 (__a) <= vabsq_f32 (__b));
+#else
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a);
+#endif
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcagt_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x2_t) (vabs_f32 (__a) > vabs_f32 (__b));
+#else
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcagtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x4_t) (vabsq_f32 (__a) > vabsq_f32 (__b));
+#else
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcalt_f32 (float32x2_t __a, float32x2_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x2_t) (vabs_f32 (__a) < vabs_f32 (__b));
+#else
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a);
+#endif
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint32x4_t) (vabsq_f32 (__a) < vabsq_f32 (__b));
+#else
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a);
+#endif
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (uint16x4_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtstq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ return (uint16x8_t) ((__a & __b) != 0);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vabdsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vabdsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vabdsv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vabdfv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vabduv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vabduv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vabduv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vabdsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdsv4si (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vabdfv4sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vabduv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabduv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabduv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdlsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdlsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vabdlsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdluv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdluv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vabdluv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vabasv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vabasv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vabasv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vabauv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vabauv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vabauv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vabasv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabasv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabasv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vabauv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabauv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabauv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabalsv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabalsv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vabalsv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabaluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabaluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vabaluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmaxsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmaxsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmaxsv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmaxfv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmaxuv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmaxuv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmaxuv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmaxsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmaxsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmaxsv4si (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmaxfv4sf (__a, __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=neon-fp-armv8")
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnm_f32 (float32x2_t a, float32x2_t b)
+{
+ return (float32x2_t)__builtin_neon_vmaxnmv2sf (a, b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ return (float32x4_t)__builtin_neon_vmaxnmv4sf (a, b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnm_f32 (float32x2_t a, float32x2_t b)
+{
+ return (float32x2_t)__builtin_neon_vminnmv2sf (a, b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnmq_f32 (float32x4_t a, float32x4_t b)
+{
+ return (float32x4_t)__builtin_neon_vminnmv4sf (a, b);
+}
+#pragma GCC pop_options
+
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmaxuv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmaxuv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmaxuv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vminsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vminsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vminsv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vminfv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vminuv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vminuv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vminuv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vminsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vminsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vminsv4si (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vminfv4sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vminuv16qi ((int8x16_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vminuv8hi ((int16x8_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vminuv4si ((int32x4_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddl_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vpaddlsv8qi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddl_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vpaddlsv4hi (__a);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddl_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vpaddlsv2si (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddl_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vpaddluv8qi ((int8x8_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddl_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vpaddluv4hi ((int16x4_t) __a);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddl_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vpaddluv2si ((int32x2_t) __a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddlq_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vpaddlsv16qi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddlq_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vpaddlsv8hi (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddlq_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vpaddlsv4si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddlq_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vpaddluv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddlq_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vpaddluv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddlq_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vpaddluv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadal_s8 (int16x4_t __a, int8x8_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpadalsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadal_s16 (int32x2_t __a, int16x4_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpadalsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadal_s32 (int64x1_t __a, int32x2_t __b)
+{
+ return (int64x1_t)__builtin_neon_vpadalsv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadal_u8 (uint16x4_t __a, uint8x8_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpadaluv8qi ((int16x4_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadal_u16 (uint32x2_t __a, uint16x4_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpadaluv4hi ((int32x2_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadal_u32 (uint64x1_t __a, uint32x2_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vpadaluv2si ((int64x1_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadalq_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vpadalsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadalq_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t)__builtin_neon_vpadalsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadalq_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t)__builtin_neon_vpadalsv4si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadalq_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vpadaluv16qi ((int16x8_t) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadalq_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vpadaluv8hi ((int32x4_t) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadalq_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vpadaluv4si ((int64x2_t) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpmaxsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpmaxsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpmaxsv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpmaxfv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpmaxuv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpmaxuv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpmaxuv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpminsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpminsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpminsv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpminfv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpminuv8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpminuv4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpminuv2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecps_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlsv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshlsdi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshluv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshluv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshluv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshludi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlsv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlsv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshluv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshluv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshluv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshluv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vrshlsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vrshlsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vrshlsv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vrshlsdi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vrshluv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vrshluv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vrshluv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vrshludi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vrshlsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vrshlsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vrshlsv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vrshlsv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vrshluv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vrshluv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vrshluv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vrshluv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlsv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshlsdi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshluv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshluv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshluv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshludi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlsv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlsv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshluv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshluv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshluv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshluv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqrshlsv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqrshlsv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqrshlsv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqrshlsdi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqrshluv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqrshluv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqrshluv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqrshludi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqrshlsv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqrshlsv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqrshlsv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqrshlsv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqrshluv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqrshluv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqrshluv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqrshluv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrs_nv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrs_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrs_nv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshrs_ndi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshru_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshru_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshru_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshru_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshrs_nv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshrs_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshrs_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshrs_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshru_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshru_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshru_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshru_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vrshrs_nv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vrshrs_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vrshrs_nv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vrshrs_ndi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vrshru_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vrshru_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vrshru_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vrshru_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vrshrs_nv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vrshrs_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vrshrs_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vrshrs_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vrshru_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vrshru_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vrshru_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vrshru_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vrshrn_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vrshrn_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vrshrn_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vrshrn_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vrshrn_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vrshrn_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrns_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrns_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrns_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrnu_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrnu_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrnu_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqrshrns_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqrshrns_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqrshrns_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqrshrnu_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqrshrnu_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqrshrnu_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqrshrun_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqrshrun_nv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqrshrun_nv2di (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshl_s_nv8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshl_s_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshl_s_nv2si (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vqshl_s_ndi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshl_u_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshl_u_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshl_u_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshl_u_ndi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vqshl_s_nv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vqshl_s_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vqshl_s_nv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vqshl_s_nv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshl_u_nv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshl_u_nv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshl_u_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshl_u_nv2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshlls_nv8qi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshlls_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshlls_nv2si (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshllu_nv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshllu_nv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshllu_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsras_nv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsras_nv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsras_nv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsras_ndi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsrau_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsrau_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsrau_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsrau_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsras_nv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsras_nv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsras_nv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsras_nv2di (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsrau_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsrau_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsrau_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsrau_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vrsras_nv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vrsras_nv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vrsras_nv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vrsras_ndi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vrsrau_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vrsrau_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vrsrau_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vrsrau_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vrsras_nv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vrsras_nv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vrsras_nv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vrsras_nv2di (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vrsrau_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vrsrau_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vrsrau_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vrsrau_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsri_nv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsri_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsri_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsri_nv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsri_nv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsri_nv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsri_nv2di (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsri_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsli_nv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsli_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsli_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsli_nv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsli_nv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsli_nv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsli_nv2di (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsli_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vneg_s8 (int8x8_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vneg_s16 (int16x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vneg_s32 (int32x2_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vneg_f32 (float32x2_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vnegq_s8 (int8x16_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vnegq_s16 (int16x8_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vnegq_s32 (int32x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vnegq_f32 (float32x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqnegv8qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqnegv4hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqnegv2si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqnegv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqnegv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqnegv4si (__a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_s8 (int8x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_s16 (int16x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_s32 (int32x2_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_u8 (uint8x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_u16 (uint16x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_u32 (uint32x2_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) ~((int8x8_t) __a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_s8 (int8x16_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_s16 (int16x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_s32 (int32x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_u8 (uint8x16_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_u16 (uint16x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_u32 (uint32x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) ~((int8x16_t) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcls_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclsv8qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcls_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclsv4hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcls_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclsv2si (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclsv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclsv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclsv4si (__a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclz_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclzv8qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclz_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclzv4hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclz_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclzv2si (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclz_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclz_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclz_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclzq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclzv16qi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclzq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclzv8hi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclzq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclzv4si (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclzq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclzq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclzq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcnt_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcntv8qi (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcnt_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcnt_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcntq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vcntv16qi (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcntq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcntq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpe_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrecpev2sf (__a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpe_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpeq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrecpev4sf (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpeq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrte_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrte_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrteq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrteq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev2si (__a, __b);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+/* Functions cannot accept or return __FP16 types. Even if the function
+ were marked always-inline so there were no call sites, the declaration
+ would nonetheless raise an error. Hence, we must use a macro instead. */
+
+#define vget_lane_f16(__v, __idx) \
+ __extension__ \
+ ({ \
+ float16x4_t __vec = (__v); \
+ __builtin_arm_lane_check (4, __idx); \
+ float16_t __res = __vec[__arm_lane(__vec, __idx)]; \
+ __res; \
+ })
+#endif
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_laneuv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_laneuv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_laneuv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline poly8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_laneuv8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline poly16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_laneuv4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanedi (__a, __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev4si (__a, __b);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define vgetq_lane_f16(__v, __idx) \
+ __extension__ \
+ ({ \
+ float16x8_t __vec = (__v); \
+ __builtin_arm_lane_check (8, __idx); \
+ float16_t __res = __vec[__arm_laneq(__vec, __idx)]; \
+ __res; \
+ })
+#endif
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_laneuv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_laneuv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_laneuv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline poly8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_laneuv16qi ((int8x16_t) __a, __b);
+}
+
+__extension__ extern __inline poly16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_laneuv8hi ((int16x8_t) __a, __b);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanev2di (__a, __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_p64 (poly64x2_t __a, const int __b)
+{
+ return (poly64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_s8 (int8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, __b, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define vset_lane_f16(__e, __v, __idx) \
+ __extension__ \
+ ({ \
+ float16_t __elem = (__e); \
+ float16x4_t __vec = (__v); \
+ __builtin_arm_lane_check (4, __idx); \
+ __vec[__arm_lane (__vec, __idx)] = __elem; \
+ __vec; \
+ })
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vset_lanev2sf ((__builtin_neon_sf) __a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_u8 (uint8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_u16 (uint16_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_u32 (uint32_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_p8 (poly8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_p16 (poly16_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_s64 (int64_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_u64 (uint64_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_p64 (poly64_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, __b, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#define vsetq_lane_f16(__e, __v, __idx) \
+ __extension__ \
+ ({ \
+ float16_t __elem = (__e); \
+ float16x8_t __vec = (__v); \
+ __builtin_arm_lane_check (8, __idx); \
+ __vec[__arm_laneq (__vec, __idx)] = __elem; \
+ __vec; \
+ })
+#endif
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vset_lanev4sf ((__builtin_neon_sf) __a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_p8 (poly8_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_p16 (poly16_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_p64 (poly64_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_p64 (uint64_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_s8 (uint64_t __a)
+{
+ return (int8x8_t) __a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_s16 (uint64_t __a)
+{
+ return (int16x4_t) __a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_s32 (uint64_t __a)
+{
+ return (int32x2_t) __a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_s64 (uint64_t __a)
+{
+ return (int64x1_t) {__a};
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_f16 (uint64_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_f32 (uint64_t __a)
+{
+ return (float32x2_t) __a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_u8 (uint64_t __a)
+{
+ return (uint8x8_t) __a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_u16 (uint64_t __a)
+{
+ return (uint16x4_t) __a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_u32 (uint64_t __a)
+{
+ return (uint32x2_t) __a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_u64 (uint64_t __a)
+{
+ return (uint64x1_t) {__a};
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_p8 (uint64_t __a)
+{
+ return (poly8x8_t) __a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_p16 (uint64_t __a)
+{
+ return (poly16x4_t) __a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_s8 (int8_t __a)
+{
+ return (int8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_s16 (int16_t __a)
+{
+ return (int16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_s32 (int32_t __a)
+{
+ return (int32x2_t) {__a, __a};
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_f32 (float32_t __a)
+{
+ return (float32x2_t) {__a, __a};
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t) {__a, __a};
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t) {__a, __a, __a, __a};
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_p64 (poly64_t __a)
+{
+ return (poly64x1_t) {__a};
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_s64 (int64_t __a)
+{
+ return (int64x1_t) {__a};
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t) {__a};
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_p64 (poly64_t __a)
+{
+ return (poly64x2_t) {__a, __a};
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_s8 (int8_t __a)
+{
+ return (int8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_s16 (int16_t __a)
+{
+ return (int16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_s32 (int32_t __a)
+{
+ return (int32x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_f32 (float32_t __a)
+{
+ return (float32x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_s64 (int64_t __a)
+{
+ return (int64x2_t) {__a, __a};
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t) {__a, __a};
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_s8 (int8_t __a)
+{
+ return vdup_n_s8 (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_s16 (int16_t __a)
+{
+ return vdup_n_s16 (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_s32 (int32_t __a)
+{
+ return vdup_n_s32 (__a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_f32 (float32_t __a)
+{
+ return vdup_n_f32 (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_u8 (uint8_t __a)
+{
+ return vdup_n_u8 (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_u16 (uint16_t __a)
+{
+ return vdup_n_u16 (__a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_u32 (uint32_t __a)
+{
+ return vdup_n_u32 (__a);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_p8 (poly8_t __a)
+{
+ return vdup_n_p8 (__a);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_p16 (poly16_t __a)
+{
+ return vdup_n_p16 (__a);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_s64 (int64_t __a)
+{
+ return vdup_n_s64 (__a);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_u64 (uint64_t __a)
+{
+ return vdup_n_u64 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_s8 (int8_t __a)
+{
+ return vdupq_n_s8 (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_s16 (int16_t __a)
+{
+ return vdupq_n_s16 (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_s32 (int32_t __a)
+{
+ return vdupq_n_s32 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_f32 (float32_t __a)
+{
+ return vdupq_n_f32 (__a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_u8 (uint8_t __a)
+{
+ return vdupq_n_u8 (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_u16 (uint16_t __a)
+{
+ return vdupq_n_u16 (__a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_u32 (uint32_t __a)
+{
+ return vdupq_n_u32 (__a);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_p8 (poly8_t __a)
+{
+ return vdupq_n_p8 (__a);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_p16 (poly16_t __a)
+{
+ return vdupq_n_p16 (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_s64 (int64_t __a)
+{
+ return vdupq_n_s64 (__a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_u64 (uint64_t __a)
+{
+ return vdupq_n_u64 (__a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vdup_lanev8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vdup_lanev4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vdup_lanev2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vdup_lanev2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vdup_lanev2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vdup_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vdup_lanev16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vdup_lanev8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vdup_lanev4si (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vdup_lanev4sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vdup_lanev4si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (poly64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x16_t)__builtin_neon_vcombinev8qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x8_t)__builtin_neon_vcombinev4hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x4_t)__builtin_neon_vcombinev2si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcombinev4hf (__a, __b);
+}
+#endif
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x4_t)__builtin_neon_vcombinev2sf (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcombinev2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vcombinedi ((int64x1_t) __a, (int64x1_t) __b);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_highv16qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_highv8hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_highv4si (__a);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_highv2di (__a);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vget_highv8hf (__a);
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_highv4sf (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_highv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_lowv16qi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_lowv8hi (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_lowv4si (__a);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vget_lowv8hf (__a);
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_lowv4sf (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_lowv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_lowv2di (__a);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcvtsv2sf (__a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtsv2si (__a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtuv2si ((int32x2_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcvtuv2sf (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vcvtsv4sf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtsv4si (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtuv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vcvtuv4sf (__a);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=neon-fp16")
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f16_f32 (float32x4_t __a)
+{
+ return (float16x4_t)__builtin_neon_vcvtv4hfv4sf (__a);
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f32_f16 (float16x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4sfv4hf (__a);
+}
+#endif
+#pragma GCC pop_options
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_s32_f32 (float32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vcvts_nv2sf (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f32_s32 (int32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvts_nv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvtu_nv2si ((int32x2_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_u32_f32 (float32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vcvtu_nv2sf (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vcvts_nv4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvts_nv4si (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvtu_nv4si ((int32x4_t) __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vcvtu_nv4sf (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmovnv8hi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmovnv4si (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmovnv2di (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqmovnsv8hi (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqmovnsv4si (__a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqmovnsv2di (__a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovnuv8hi ((int16x8_t) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovnuv4si ((int32x4_t) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovnuv2di ((int64x2_t) __a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovun_s16 (int16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovun_s32 (int32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovunv4si (__a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqmovun_s64 (int64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovunv2di (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovl_s8 (int8x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmovlsv8qi (__a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovl_s16 (int16x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmovlsv4hi (__a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovl_s32 (int32x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vmovlsv2si (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovl_u8 (uint8x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmovluv8qi ((int8x8_t) __a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovl_u16 (uint16x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmovluv4hi ((int16x4_t) __a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovl_u32 (uint32x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vmovluv2si ((int32x2_t) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl1_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vtbl1v8qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl1_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl1_p8 (poly8x8_t __a, uint8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl2_s8 (int8x8x2_t __a, int8x8_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl2_u8 (uint8x8x2_t __a, uint8x8_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl2_p8 (poly8x8x2_t __a, uint8x8_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl3_s8 (int8x8x3_t __a, int8x8_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl3_u8 (uint8x8x3_t __a, uint8x8_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl3_p8 (poly8x8x3_t __a, uint8x8_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl4_s8 (int8x8x4_t __a, int8x8_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl4_u8 (uint8x8x4_t __a, uint8x8_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbl4_p8 (poly8x8x4_t __a, uint8x8_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx1_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vtbx1v8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx1_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx1_p8 (poly8x8_t __a, poly8x8_t __b, uint8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx2_s8 (int8x8_t __a, int8x8x2_t __b, int8x8_t __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx2v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx2_u8 (uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx2_p8 (poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx3_s8 (int8x8_t __a, int8x8x3_t __b, int8x8_t __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx3v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx3_u8 (uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx3_p8 (poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx4_s8 (int8x8_t __a, int8x8x4_t __b, int8x8_t __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx4v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx4_u8 (uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtbx4_p8 (poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlals_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlals_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlalu_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlalu_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlsls_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlsls_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlslu_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlslu_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmulls_lanev4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vmulls_lanev2si (__a, __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmullu_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vmullu_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqrdmulh_lanev8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqrdmulh_lanev4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqrdmulh_lanev4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqrdmulh_lanev2si (__a, __b, __c);
+}
+
+#ifdef __ARM_FEATURE_QRDMX
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlahq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vqrdmlah_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlahq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqrdmlah_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlah_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vqrdmlah_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlah_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vqrdmlah_lanev2si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlshq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vqrdmlsh_lanev8hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlshq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqrdmlsh_lanev4si (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlsh_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vqrdmlsh_lanev4hi (__a, __b, __c, __d);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmlsh_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vqrdmlsh_lanev2si (__a, __b, __c, __d);
+}
+#endif
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b);
+#endif
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmulls_nv4hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmulls_nv2si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmullu_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmullu_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqrdmulh_nv8hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqrdmulh_nv4si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqrdmulh_nv4hi (__a, (__builtin_neon_hi) __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqrdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqrdmulh_nv2si (__a, (__builtin_neon_si) __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlals_nv4hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlals_nv2si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlalu_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlalu_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsls_nv4hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlsls_nv2si (__a, __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlslu_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlslu_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vextv8qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vextv4hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vextv2si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vextv2sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vextv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vextdi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vextv16qi (__a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vextv8hi (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vextv4si (__a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vextv2di (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_f32 (float32x4_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vextv4sf (__a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vextv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_s32 (int32x2_t __a)
+{
+ return (int32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_f32 (float32x2_t __a)
+{
+ return (float32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_s32 (int32x4_t __a)
+{
+ return (int32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_f32 (float32x4_t __a)
+{
+ return (float32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32q_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32q_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev32q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev16_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev16_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev16_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev16q_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev16q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev16q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
+{
+ return (poly64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
+{
+ return (int64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vbslv2sf ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
+{
+ return (uint64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, (int64x1_t) __b, (int64x1_t) __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
+{
+ return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
+{
+ return (poly64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vbslv4sf ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
+{
+ return (poly8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
+{
+ return (poly16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+/* For big-endian, the shuffle masks for ZIP, UZP and TRN must be changed as
+ follows. (nelt = the number of elements within a vector.)
+
+ Firstly, a value of N within a mask, becomes (N ^ (nelt - 1)), as gcc vector
+ extension's indexing scheme is reversed *within each vector* (relative to the
+ neon intrinsics view), but without changing which of the two vectors.
+
+ Secondly, the elements within each mask are reversed, as the mask is itself a
+ vector, and will itself be loaded in reverse order (again, relative to the
+ neon intrinsics view, i.e. that would result from a "vld1" instruction). */
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 28, 12, 29, 13, 30, 14, 31, 15, 24, 8, 25, 9, 26, 10, 27, 11 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 10, 2, 11, 3, 8, 0, 9, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 14, 6, 15, 7, 12, 4, 13, 5 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 4, 0 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 7, 3, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 4, 0 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 7, 3, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 28, 12, 29, 13, 30, 14, 31, 15, 24, 8, 25, 9, 26, 10, 27, 11 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 10, 2, 11, 3, 8, 0, 9, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 14, 6, 15, 7, 12, 4, 13, 5 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 4, 0 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 7, 3, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 28, 12, 29, 13, 30, 14, 31, 15, 24, 8, 25, 9, 26, 10, 27, 11 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 10, 2, 11, 3, 8, 0, 9, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 14, 6, 15, 7, 12, 4, 13, 5 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7, 25, 27, 29, 31, 17, 19, 21, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6, 24, 26, 28, 30, 16, 18, 20, 22 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 5, 7, 1, 3, 13, 15, 9, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 6, 0, 2, 12, 14, 8, 10 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 3, 1, 7, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 0, 6, 4 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 3, 1, 7, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 0, 6, 4 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7, 25, 27, 29, 31, 17, 19, 21, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6, 24, 26, 28, 30, 16, 18, 20, 22 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 5, 7, 1, 3, 13, 15, 9, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 6, 0, 2, 12, 14, 8, 10 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 3, 1, 7, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 0, 6, 4 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7, 25, 27, 29, 31, 17, 19, 21, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6, 24, 26, 28, 30, 16, 18, 20, 22 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 5, 7, 1, 3, 13, 15, 9, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 6, 0, 2, 12, 14, 8, 10 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t) { *__a };
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_s64 (const int64_t * __a)
+{
+ return (int64x1_t) { *__a };
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_f16 (const float16_t * __a)
+{
+ return __builtin_neon_vld1v4hf (__a);
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1v2sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t) { *__a };
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_f16 (const float16_t * __a)
+{
+ return __builtin_neon_vld1v8hf (__a);
+}
+#endif
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1v4sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_s16 (const int16_t * __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_s32 (const int32_t * __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_f16 (const float16_t * __a, float16x4_t __b, const int __c)
+{
+ return vset_lane_f16 (*__a, __b, __c);
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vld1_lanev2sf ((const __builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_u8 (const uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_u16 (const uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_u32 (const uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_p8 (const poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_p64 (const poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_u64 (const uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_s8 (const int8_t * __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_s16 (const int16_t * __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_s32 (const int32_t * __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_f16 (const float16_t * __a, float16x8_t __b, const int __c)
+{
+ return vsetq_lane_f16 (*__a, __b, __c);
+}
+#endif
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_f32 (const float32_t * __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vld1_lanev4sf ((const __builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_u8 (const uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_u16 (const uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_u32 (const uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_p8 (const poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_p64 (const poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_u64 (const uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_f16 (const float16_t * __a)
+{
+ float16_t __f = *__a;
+ return (float16x4_t) { __f, __f, __f, __f };
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1_dupv2sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_f16 (const float16_t * __a)
+{
+ float16_t __f = *__a;
+ return (float16x8_t) { __f, __f, __f, __f, __f, __f, __f, __f };
+}
+#endif
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1_dupv4sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_p64 (poly64_t * __a, poly64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_s8 (int8_t * __a, int8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_s16 (int16_t * __a, int16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_s32 (int32_t * __a, int32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_s64 (int64_t * __a, int64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_f16 (float16_t * __a, float16x4_t __b)
+{
+ __builtin_neon_vst1v4hf (__a, __b);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_f32 (float32_t * __a, float32x2_t __b)
+{
+ __builtin_neon_vst1v2sf ((__builtin_neon_sf *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_u8 (uint8_t * __a, uint8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_u16 (uint16_t * __a, uint16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_u32 (uint32_t * __a, uint32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, (int32x2_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_u64 (uint64_t * __a, uint64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, (int64x1_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_p8 (poly8_t * __a, poly8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_p16 (poly16_t * __a, poly16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_p64 (poly64_t * __a, poly64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_s8 (int8_t * __a, int8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_s16 (int16_t * __a, int16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_s32 (int32_t * __a, int32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_s64 (int64_t * __a, int64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, __b);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_f16 (float16_t * __a, float16x8_t __b)
+{
+ __builtin_neon_vst1v8hf (__a, __b);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_f32 (float32_t * __a, float32x4_t __b)
+{
+ __builtin_neon_vst1v4sf ((__builtin_neon_sf *) __a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_u8 (uint8_t * __a, uint8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_u16 (uint16_t * __a, uint16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_u32 (uint32_t * __a, uint32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, (int32x4_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_u64 (uint64_t * __a, uint64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_p8 (poly8_t * __a, poly8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_p16 (poly16_t * __a, poly16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_s8 (int8_t * __a, int8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_s16 (int16_t * __a, int16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_s32 (int32_t * __a, int32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_f16 (float16_t * __a, float16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hf (__a, __b, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_f32 (float32_t * __a, float32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2sf ((__builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_u8 (uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_u16 (uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_u32 (uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_p8 (poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_p64 (poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_u64 (uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_s8 (int8_t * __a, int8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_s16 (int16_t * __a, int16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_s32 (int32_t * __a, int32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_f16 (float16_t * __a, float16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hf (__a, __b, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_f32 (float32_t * __a, float32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4sf ((__builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_u8 (uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_u16 (uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_u32 (uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_p8 (poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_p64 (poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_u64 (uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_f16 (const float16_t * __a)
+{
+ union { float16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_s8 (const int8_t * __a)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_s16 (const int16_t * __a)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_s32 (const int32_t * __a)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_f16 (const float16_t * __a)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_f32 (const float32_t * __a)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_s8 (const int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_s16 (const int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_s32 (const int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_f16 (const float16_t * __a, float16x4x2_t __b, const int __c)
+{
+ union { float16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { float16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hf ( __a, __bu.__o, __c);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_f32 (const float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_u8 (const uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_u16 (const uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_u32 (const uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_p8 (const poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_p16 (const poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_s16 (const int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_s32 (const int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_f16 (const float16_t * __a, float16x8x2_t __b, const int __c)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_f32 (const float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_u16 (const uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_u32 (const uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_p16 (const poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_f16 (const float16_t * __a)
+{
+ union { float16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_s8 (int8_t * __a, int8x8x2_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_s16 (int16_t * __a, int16x4x2_t __b)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_s32 (int32_t * __a, int32x2x2_t __b)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_f16 (float16_t * __a, float16x4x2_t __b)
+{
+ union { float16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hf (__a, __bu.__o);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_f32 (float32_t * __a, float32x2x2_t __b)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t __b)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t __b)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t __b)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_p64 (poly64_t * __a, poly64x1x2_t __b)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_s64 (int64_t * __a, int64x1x2_t __b)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_u64 (uint64_t * __a, uint64x1x2_t __b)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_s8 (int8_t * __a, int8x16x2_t __b)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_s16 (int16_t * __a, int16x8x2_t __b)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_s32 (int32_t * __a, int32x4x2_t __b)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_f16 (float16_t * __a, float16x8x2_t __b)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hf (__a, __bu.__o);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_f32 (float32_t * __a, float32x4x2_t __b)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t __b)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t __b)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t __b)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t __b)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t __b)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_s8 (int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_s16 (int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_s32 (int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_f16 (float16_t * __a, float16x4x2_t __b, const int __c)
+{
+ union { float16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hf (__a, __bu.__o, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_f32 (float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_u8 (uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_u16 (uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_u32 (uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_p8 (poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_p16 (poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_s16 (int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_s32 (int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_f16 (float16_t * __a, float16x8x2_t __b, const int __c)
+{
+ union { float16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hf (__a, __bu.__o, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_f32 (float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_u16 (uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_u32 (uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_p16 (poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline int8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_f16 (const float16_t * __a)
+{
+ union { float16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_s8 (const int8_t * __a)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_s16 (const int16_t * __a)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_s32 (const int32_t * __a)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_f16 (const float16_t * __a)
+{
+ union { float16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_f32 (const float32_t * __a)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_s8 (const int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_s16 (const int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_s32 (const int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_f16 (const float16_t * __a, float16x4x3_t __b, const int __c)
+{
+ union { float16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { float16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_f32 (const float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_u8 (const uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_u16 (const uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_u32 (const uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_p8 (const poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_p16 (const poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_s16 (const int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_s32 (const int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_f16 (const float16_t * __a, float16x8x3_t __b, const int __c)
+{
+ union { float16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { float16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_f32 (const float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_u16 (const uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_u32 (const uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_p16 (const poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_f16 (const float16_t * __a)
+{
+ union { float16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_s8 (int8_t * __a, int8x8x3_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_s16 (int16_t * __a, int16x4x3_t __b)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_s32 (int32_t * __a, int32x2x3_t __b)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_f16 (float16_t * __a, float16x4x3_t __b)
+{
+ union { float16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hf (__a, __bu.__o);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_f32 (float32_t * __a, float32x2x3_t __b)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t __b)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t __b)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t __b)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_p64 (poly64_t * __a, poly64x1x3_t __b)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_s64 (int64_t * __a, int64x1x3_t __b)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_u64 (uint64_t * __a, uint64x1x3_t __b)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_s8 (int8_t * __a, int8x16x3_t __b)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_s16 (int16_t * __a, int16x8x3_t __b)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_s32 (int32_t * __a, int32x4x3_t __b)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_f16 (float16_t * __a, float16x8x3_t __b)
+{
+ union { float16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hf (__a, __bu.__o);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_f32 (float32_t * __a, float32x4x3_t __b)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t __b)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t __b)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t __b)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t __b)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t __b)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_s8 (int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_s16 (int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_s32 (int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_f16 (float16_t * __a, float16x4x3_t __b, const int __c)
+{
+ union { float16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hf (__a, __bu.__o, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_f32 (float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_u8 (uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_u16 (uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_u32 (uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_p8 (poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_p16 (poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_s16 (int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_s32 (int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_f16 (float16_t * __a, float16x8x3_t __b, const int __c)
+{
+ union { float16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hf (__a, __bu.__o, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_f32 (float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_u16 (uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_u32 (uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_p16 (poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline int8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_f16 (const float16_t * __a)
+{
+ union { float16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_s8 (const int8_t * __a)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_s16 (const int16_t * __a)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_s32 (const int32_t * __a)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_f16 (const float16_t * __a)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_f32 (const float32_t * __a)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_s8 (const int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_s16 (const int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_s32 (const int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_f16 (const float16_t * __a, float16x4x4_t __b, const int __c)
+{
+ union { float16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hf (__a,
+ __bu.__o, __c);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_f32 (const float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_u8 (const uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_u16 (const uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_u32 (const uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_p8 (const poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_p16 (const poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_s16 (const int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_s32 (const int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_f16 (const float16_t * __a, float16x8x4_t __b, const int __c)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hf (__a,
+ __bu.__o, __c);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_f32 (const float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_u16 (const uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_u32 (const uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_p16 (const poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline int32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_f16 (const float16_t * __a)
+{
+ union { float16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hf (__a);
+ return __rv.__i;
+}
+#endif
+
+__extension__ extern __inline float32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline poly16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline uint64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_s8 (int8_t * __a, int8x8x4_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_s16 (int16_t * __a, int16x4x4_t __b)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_s32 (int32_t * __a, int32x2x4_t __b)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_f16 (float16_t * __a, float16x4x4_t __b)
+{
+ union { float16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hf (__a, __bu.__o);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_f32 (float32_t * __a, float32x2x4_t __b)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t __b)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t __b)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t __b)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_p64 (poly64_t * __a, poly64x1x4_t __b)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_s64 (int64_t * __a, int64x1x4_t __b)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_u64 (uint64_t * __a, uint64x1x4_t __b)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_s8 (int8_t * __a, int8x16x4_t __b)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_s16 (int16_t * __a, int16x8x4_t __b)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_s32 (int32_t * __a, int32x4x4_t __b)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_f16 (float16_t * __a, float16x8x4_t __b)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hf (__a, __bu.__o);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_f32 (float32_t * __a, float32x4x4_t __b)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t __b)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t __b)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t __b)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t __b)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t __b)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_s8 (int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_s16 (int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_s32 (int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_f16 (float16_t * __a, float16x4x4_t __b, const int __c)
+{
+ union { float16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hf (__a, __bu.__o, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_f32 (float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_u8 (uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_u16 (uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_u32 (uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_p8 (poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_p16 (poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_s16 (int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_s32 (int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_f16 (float16_t * __a, float16x8x4_t __b, const int __c)
+{
+ union { float16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hf (__a, __bu.__o, __c);
+}
+#endif
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_f32 (float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_u16 (uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_u32 (uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_p16 (poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a & __b;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a | __b;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a ^ __b;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a & ~__b;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return __a | ~__b;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+ return (poly8x8_t) __a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_f16 (float16x4_t __a)
+{
+ return (poly8x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_p64 (poly64x1_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_f16 (float16x4_t __a)
+{
+ return (poly16x4_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_p64 (poly64x1_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_bf16 (bfloat16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_p8 (poly8x8_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_p16 (poly16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_f32 (float32x2_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_p64 (poly64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+#pragma GCC pop_options
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_s64 (int64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_u64 (uint64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_s8 (int8x8_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_s16 (int16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_s32 (int32x2_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_u8 (uint8x8_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_u16 (uint16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_u32 (uint32x2_t __a)
+{
+ return (float16x4_t) __a;
+}
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_f16 (float16x4_t __a)
+{
+ return (float32x2_t) __a;
+}
+#endif
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_p64 (poly64x1_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_p8 (poly8x8_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_p16 (poly16x4_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f16 (float16x4_t __a)
+{
+ return (poly64x1_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f32 (float32x2_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s64 (int64x1_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u64 (uint64x1_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s8 (int8x8_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s16 (int16x4_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s32 (int32x2_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u8 (uint8x8_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u16 (uint16x4_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u32 (uint32x2_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_f16 (float16x4_t __a)
+{
+ return (int64x1_t) __a;
+}
+#endif
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_p64 (poly64x1_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_f16 (float16x4_t __a)
+{
+ return (uint64x1_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_p64 (poly64x1_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_f16 (float16x4_t __a)
+{
+ return (int8x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_p64 (poly64x1_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_f16 (float16x4_t __a)
+{
+ return (int16x4_t) __a;
+}
+#endif
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_p64 (poly64x1_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_f16 (float16x4_t __a)
+{
+ return (int32x2_t) __a;
+}
+#endif
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_p64 (poly64x1_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_f16 (float16x4_t __a)
+{
+ return (uint8x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_p64 (poly64x1_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_p64 (poly64x1_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_f16 (float16x4_t __a)
+{
+ return (uint32x2_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_p64 (poly64x1_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_f16 (float16x8_t __a)
+{
+ return (poly8x16_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_p64 (poly64x2_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_p128 (poly128_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_f16 (float16x8_t __a)
+{
+ return (poly16x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_p64 (poly64x2_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_p128 (poly128_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p8 (poly8x16_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p16 (poly16x8_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_bf16 (bfloat16x8_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_f32 (float32x4_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p64 (poly64x2_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p128 (poly128_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#pragma GCC pop_options
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_s64 (int64x2_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_u64 (uint64x2_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_s8 (int8x16_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_s16 (int16x8_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_s32 (int32x4_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_u8 (uint8x16_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_u16 (uint16x8_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_u32 (uint32x4_t __a)
+{
+ return (float16x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_f16 (float16x8_t __a)
+{
+ return (float32x4_t) __a;
+}
+#endif
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p64 (poly64x2_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p128 (poly128_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p8 (poly8x16_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p16 (poly16x8_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f16 (float16x8_t __a)
+{
+ return (poly64x2_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f32 (float32x4_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p128 (poly128_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s64 (int64x2_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u64 (uint64x2_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s8 (int8x16_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s16 (int16x8_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s32 (int32x4_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u8 (uint8x16_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u16 (uint16x8_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u32 (uint32x4_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_p8 (poly8x16_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_p16 (poly16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_f16 (float16x8_t __a)
+{
+ return (poly128_t) __a;
+}
+#endif
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_f32 (float32x4_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_p64 (poly64x2_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s64 (int64x2_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u64 (uint64x2_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s8 (int8x16_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s16 (int16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s32 (int32x4_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u8 (uint8x16_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u16 (uint16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u32 (uint32x4_t __a)
+{
+ return (poly128_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_f16 (float16x8_t __a)
+{
+ return (int64x2_t) __a;
+}
+#endif
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p64 (poly64x2_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p128 (poly128_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_f16 (float16x8_t __a)
+{
+ return (uint64x2_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p64 (poly64x2_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p128 (poly128_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_f16 (float16x8_t __a)
+{
+ return (int8x16_t) __a;
+}
+#endif
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p64 (poly64x2_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p128 (poly128_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_f16 (float16x8_t __a)
+{
+ return (int16x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p64 (poly64x2_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p128 (poly128_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_f16 (float16x8_t __a)
+{
+ return (int32x4_t)__a;
+}
+#endif
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p64 (poly64x2_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p128 (poly128_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_f16 (float16x8_t __a)
+{
+ return (uint8x16_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p64 (poly64x2_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p128 (poly128_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p64 (poly64x2_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p128 (poly128_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_f16 (float16x8_t __a)
+{
+ return (uint32x4_t) __a;
+}
+#endif
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p64 (poly64x2_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p128 (poly128_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+#pragma GCC pop_options
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+
+#pragma GCC push_options
+#pragma GCC target ("fpu=crypto-neon-fp-armv8")
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vldrq_p128 (poly128_t const * __ptr)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64_t* __ptmp = (poly64_t*) __ptr;
+ poly64_t __d0 = vld1_p64 (__ptmp);
+ poly64_t __d1 = vld1_p64 (__ptmp + 1);
+ return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0));
+#else
+ return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr));
+#endif
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vstrq_p128 (poly128_t * __ptr, poly128_t __val)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64x2_t __tmp = vreinterpretq_p64_p128 (__val);
+ poly64_t __d0 = vget_high_p64 (__tmp);
+ poly64_t __d1 = vget_low_p64 (__tmp);
+ vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1));
+#else
+ vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val));
+#endif
+}
+
+/* The vceq_p64 intrinsic does not map to a single instruction.
+ Instead we emulate it by performing a 32-bit variant of the vceq
+ and applying a pairwise min reduction to the result.
+ vceq_u32 will produce two 32-bit halves, each of which will contain either
+ all ones or all zeros depending on whether the corresponding 32-bit
+ halves of the poly64_t were equal. The whole poly64_t values are equal
+ if and only if both halves are equal, i.e. vceq_u32 returns all ones.
+ If the result is all zeroes for any half then the whole result is zeroes.
+ This is what the pairwise min reduction achieves. */
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vceq_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmin_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqz_p64 (poly64x1_t __a)
+{
+ poly64x1_t __b = vreinterpret_p64_u32 (vdup_n_u32 (0));
+ return vceq_p64 (__a, __b);
+}
+
+/* For vceqq_p64, we rely on vceq_p64 for each of the two elements. */
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __high_a = vget_high_p64 (__a);
+ poly64_t __high_b = vget_high_p64 (__b);
+ uint64x1_t __high = vceq_p64 (__high_a, __high_b);
+
+ poly64_t __low_a = vget_low_p64 (__a);
+ poly64_t __low_b = vget_low_p64 (__b);
+ uint64x1_t __low = vceq_p64 (__low_a, __low_b);
+ return vcombine_u64 (__low, __high);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqzq_p64 (poly64x2_t __a)
+{
+ poly64x2_t __b = vreinterpretq_p64_u32 (vdupq_n_u32 (0));
+ return vceqq_p64 (__a, __b);
+}
+
+/* The vtst_p64 intrinsic does not map to a single instruction.
+ We emulate it in way similar to vceq_p64 above but here we do
+ a reduction with max since if any two corresponding bits
+ in the two poly64_t's match, then the whole result must be all ones. */
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtst_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vtst_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmax_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaeseq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aese (__data, __key);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaesdq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aesd (__data, __key);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaesmcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesmc (__data);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaesimcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesimc (__data);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha1h_u32 (uint32_t __hash_e)
+{
+ return vgetq_lane_u32 (__builtin_arm_crypto_sha1h (vdupq_n_u32 (__hash_e)),
+ 0);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha1c (__hash_abcd, vdupq_n_u32 (__hash_e),
+ __wk);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha1p (__hash_abcd, vdupq_n_u32 (__hash_e),
+ __wk);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha1m (__hash_abcd, vdupq_n_u32 (__hash_e),
+ __wk);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
+{
+ return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
+{
+ return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15);
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_p64 (poly64_t __a, poly64_t __b)
+{
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b);
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __t1 = vget_high_p64 (__a);
+ poly64_t __t2 = vget_high_p64 (__b);
+
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2);
+}
+
+#pragma GCC pop_options
+
+ /* Intrinsics for FP16 instructions. */
+#pragma GCC push_options
+#pragma GCC target ("fpu=neon-fp-armv8")
+#if defined (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vabdv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vabdv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabs_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vabsv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabsq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vabsv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a + __b;
+#else
+ return __builtin_neon_vaddv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a + __b;
+#else
+ return __builtin_neon_vaddv8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcage_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x4_t) (vabs_f16 (__a) >= vabs_f16 (__b));
+#else
+ return (uint16x4_t)__builtin_neon_vcagev4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcageq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x8_t) (vabsq_f16 (__a) >= vabsq_f16 (__b));
+#else
+ return (uint16x8_t)__builtin_neon_vcagev8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcagt_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x4_t) (vabs_f16 (__a) > vabs_f16 (__b));
+#else
+ return (uint16x4_t)__builtin_neon_vcagtv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcagtq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x8_t) (vabsq_f16 (__a) > vabsq_f16 (__b));
+#else
+ return (uint16x8_t)__builtin_neon_vcagtv8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcale_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x4_t) (vabs_f16 (__a) <= vabs_f16 (__b));
+#else
+ return (uint16x4_t)__builtin_neon_vcalev4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaleq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x8_t) (vabsq_f16 (__a) <= vabsq_f16 (__b));
+#else
+ return (uint16x8_t)__builtin_neon_vcalev8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcalt_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x4_t) (vabs_f16 (__a) < vabs_f16 (__b));
+#else
+ return (uint16x4_t)__builtin_neon_vcaltv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaltq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x8_t) (vabsq_f16 (__a) < vabsq_f16 (__b));
+#else
+ return (uint16x8_t)__builtin_neon_vcaltv8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x4_t) (__a == __b);
+#else
+ return (uint16x4_t)__builtin_neon_vceqv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return (uint16x8_t) (__a == __b);
+#else
+ return (uint16x8_t)__builtin_neon_vceqv8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqz_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vceqzv4hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqzq_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vceqzv8hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgez_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcgezv4hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgezq_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcgezv8hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtz_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcgtzv4hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtzq_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcgtzv8hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vclev4hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vclev8hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclez_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vclezv4hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclezq_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vclezv8hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcltv4hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcltv8hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltz_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcltzv4hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltzq_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcltzv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f16_s16 (int16x4_t __a)
+{
+ return (float16x4_t)__builtin_neon_vcvtsv4hi (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f16_u16 (uint16x4_t __a)
+{
+ return (float16x4_t)__builtin_neon_vcvtuv4hi ((int16x4_t)__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_s16_f16 (float16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vcvtsv4hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcvtuv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_f16_s16 (int16x8_t __a)
+{
+ return (float16x8_t)__builtin_neon_vcvtsv8hi (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_f16_u16 (uint16x8_t __a)
+{
+ return (float16x8_t)__builtin_neon_vcvtuv8hi ((int16x8_t)__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_s16_f16 (float16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vcvtsv8hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcvtuv8hf (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvta_s16_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vcvtasv4hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvta_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcvtauv4hf (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtaq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vcvtasv8hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtaq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcvtauv8hf (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtm_s16_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vcvtmsv4hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtm_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcvtmuv4hf (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtmq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vcvtmsv8hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtmq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcvtmuv8hf (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtn_s16_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vcvtnsv4hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtn_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcvtnuv4hf (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtnq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vcvtnsv8hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtnq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcvtnuv8hf (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtp_s16_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vcvtpsv4hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtp_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcvtpuv4hf (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtpq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vcvtpsv8hf (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtpq_u16_f16 (float16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vcvtpuv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f16_s16 (int16x4_t __a, const int __b)
+{
+ return __builtin_neon_vcvts_nv4hi (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f16_u16 (uint16x4_t __a, const int __b)
+{
+ return __builtin_neon_vcvtu_nv4hi ((int16x4_t)__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f16_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_neon_vcvts_nv8hi (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f16_u16 (uint16x8_t __a, const int __b)
+{
+ return __builtin_neon_vcvtu_nv8hi ((int16x8_t)__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_s16_f16 (float16x4_t __a, const int __b)
+{
+ return __builtin_neon_vcvts_nv4hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_u16_f16 (float16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vcvtu_nv4hf (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_s16_f16 (float16x8_t __a, const int __b)
+{
+ return __builtin_neon_vcvts_nv8hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_u16_f16 (float16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vcvtu_nv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c)
+{
+ return __builtin_neon_vfmav4hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_neon_vfmav8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c)
+{
+ return __builtin_neon_vfmsv4hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_neon_vfmsv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vmaxfv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vmaxfv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnm_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vmaxnmv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vmaxnmv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vminfv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vminfv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnm_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vminnmv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnmq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vminnmv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return __builtin_neon_vmulfv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_f16 (float16x4_t __a, float16x4_t __b, const int __c)
+{
+ return __builtin_neon_vmul_lanev4hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_f16 (float16x4_t __a, float16_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return __builtin_neon_vmul_nv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return __builtin_neon_vmulfv8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __c)
+{
+ return __builtin_neon_vmul_lanev8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_f16 (float16x8_t __a, float16_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a * __b;
+#else
+ return __builtin_neon_vmul_nv8hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vneg_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vnegv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vnegq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vnegv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vpaddv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vpmaxfv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vpminfv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpe_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrecpev4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpeq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrecpev8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrnd_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrndv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrndv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrnda_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrndav4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndaq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrndav8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndm_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrndmv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndmq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrndmv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndn_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrndnv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndnq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrndnv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndp_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrndpv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndpq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrndpv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndx_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrndxv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndxq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrndxv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrte_f16 (float16x4_t __a)
+{
+ return __builtin_neon_vrsqrtev4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrteq_f16 (float16x8_t __a)
+{
+ return __builtin_neon_vrsqrtev8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecps_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vrecpsv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpsq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vrecpsv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrts_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vrsqrtsv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtsq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vrsqrtsv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a - __b;
+#else
+ return __builtin_neon_vsubv4hf (__a, __b);
+#endif
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __FAST_MATH__
+ return __a - __b;
+#else
+ return __builtin_neon_vsubv8hf (__a, __b);
+#endif
+}
+
+#endif /* __ARM_FEATURE_VECTOR_FP16_ARITHMETIC. */
+#pragma GCC pop_options
+
+ /* Half-precision data processing intrinsics. */
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_f16 (uint16x4_t __a, float16x4_t __b, float16x4_t __c)
+{
+ return __builtin_neon_vbslv4hf ((int16x4_t)__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_f16 (uint16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_neon_vbslv8hf ((int16x8_t)__a, __b, __c);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_f16 (float16_t __a)
+{
+ return (float16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_f16 (float16_t __a)
+{
+ return (float16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_f16 (float16x4_t __a, const int __b)
+{
+ return __builtin_neon_vdup_lanev4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_f16 (float16x4_t __a, const int __b)
+{
+ return __builtin_neon_vdup_lanev8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_f16 (float16x4_t __a, float16x4_t __b, const int __c)
+{
+ return __builtin_neon_vextv4hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_f16 (float16x8_t __a, float16x8_t __b, const int __c)
+{
+ return __builtin_neon_vextv8hf (__a, __b, __c);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_f16 (float16_t __a)
+{
+ return vdup_n_f16 (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_f16 (float16_t __a)
+{
+ return vdupq_n_f16 (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_f16 (float16x4_t __a)
+{
+ return (float16x4_t)__builtin_shuffle (__a, (uint16x4_t){ 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_f16 (float16x8_t __a)
+{
+ return
+ (float16x8_t)__builtin_shuffle (__a,
+ (uint16x8_t){ 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_f16 (float16x4_t __a, float16x4_t __b)
+{
+ float16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ float16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp_f16 (float16x4_t __a, float16x4_t __b)
+{
+ float16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzpq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ float16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 5, 7, 1, 3, 13, 15, 9, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 6, 0, 2, 12, 14, 8, 10 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip_f16 (float16x4_t __a, float16x4_t __b)
+{
+ float16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzipq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ float16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 10, 2, 11, 3, 8, 0, 9, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 14, 6, 15, 7, 12, 4, 13, 5 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b,
+ (uint16x8_t){ 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+#endif
+
+/* AdvSIMD Dot Product intrinsics. */
+
+#if __ARM_ARCH >= 8
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+dotprod")
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdot_u32 (uint32x2_t __r, uint8x8_t __a, uint8x8_t __b)
+{
+ return __builtin_neon_udotv8qi_uuuu (__r, __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdotq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_neon_udotv16qi_uuuu (__r, __a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdot_s32 (int32x2_t __r, int8x8_t __a, int8x8_t __b)
+{
+ return __builtin_neon_sdotv8qi (__r, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdotq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_neon_sdotv16qi (__r, __a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdot_lane_u32 (uint32x2_t __r, uint8x8_t __a, uint8x8_t __b, const int __index)
+{
+ return __builtin_neon_udot_lanev8qi_uuuus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdotq_lane_u32 (uint32x4_t __r, uint8x16_t __a, uint8x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_udot_lanev16qi_uuuus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdot_lane_s32 (int32x2_t __r, int8x8_t __a, int8x8_t __b, const int __index)
+{
+ return __builtin_neon_sdot_lanev8qi (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdotq_lane_s32 (int32x4_t __r, int8x16_t __a, int8x8_t __b, const int __index)
+{
+ return __builtin_neon_sdot_lanev16qi (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdot_laneq_u32 (uint32x2_t __r, uint8x8_t __a, uint8x16_t __b, const int __index)
+{
+ return __builtin_neon_udot_laneqv8qi_uuuus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdotq_laneq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b,
+ const int __index)
+{
+ return __builtin_neon_udot_laneqv16qi_uuuus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdot_laneq_s32 (int32x2_t __r, int8x8_t __a, int8x16_t __b, const int __index)
+{
+ return __builtin_neon_sdot_laneqv8qi (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdotq_laneq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b, const int __index)
+{
+ return __builtin_neon_sdot_laneqv16qi (__r, __a, __b, __index);
+}
+
+#pragma GCC pop_options
+#endif
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+fp16fml")
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlal_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vfmal_lowv2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlsl_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vfmsl_lowv2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlal_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vfmal_highv2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlsl_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vfmsl_highv2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlalq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vfmal_lowv4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlslq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vfmsl_lowv4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlalq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vfmal_highv4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlslq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vfmsl_highv4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlal_lane_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmal_lane_lowv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlal_lane_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmal_lane_highv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlalq_laneq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmal_lane_lowv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlalq_lane_low_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmal_lane_lowv4hfv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlal_laneq_low_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmal_lane_lowv8hfv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlalq_laneq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmal_lane_highv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlalq_lane_high_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmal_lane_highv4hfv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlal_laneq_high_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmal_lane_highv8hfv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlsl_lane_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmsl_lane_lowv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlsl_lane_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmsl_lane_highv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlslq_laneq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmsl_lane_lowv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlslq_lane_low_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmsl_lane_lowv4hfv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlsl_laneq_low_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmsl_lane_lowv8hfv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlslq_laneq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmsl_lane_highv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlslq_lane_high_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (4, __index);
+ return __builtin_neon_vfmsl_lane_highv4hfv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmlsl_laneq_high_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ __builtin_arm_lane_check (8, __index);
+ return __builtin_neon_vfmsl_lane_highv8hfv2sf (__r, __a, __b, __index);
+}
+
+#pragma GCC pop_options
+#endif
+
+/* AdvSIMD Complex numbers intrinsics. */
+#if __ARM_ARCH >= 8
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.3-a")
+
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+#pragma GCC push_options
+#pragma GCC target ("+fp16")
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcadd_rot90_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcadd90v4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vcadd90v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcadd_rot270_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcadd90v4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vcadd90v8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcmla0v4hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vcmla0v8hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane0v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq0v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane0v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane0v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot90_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcmla90v4hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot90_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vcmla90v8hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot90_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane90v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot90_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq90v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot90_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane90v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot90_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane90v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot180_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcmla180v4hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot180_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vcmla180v8hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot180_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane180v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot180_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq180v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot180_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane180v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot180_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane180v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot270_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_neon_vcmla270v4hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot270_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_neon_vcmla270v8hf (__r, __a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot270_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane270v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot270_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq270v4hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot270_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane270v8hf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot270_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane270v8hf (__r, __a, __b, __index);
+}
+
+#pragma GCC pop_options
+#endif
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcadd_rot90_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_neon_vcadd90v2sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_neon_vcadd90v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcadd_rot270_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_neon_vcadd90v2sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_neon_vcadd90v4sf (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_neon_vcmla0v2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_neon_vcmla0v4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane0v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq0v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane0v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane0v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot90_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_neon_vcmla90v2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot90_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_neon_vcmla90v4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot90_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane90v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot90_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq90v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot90_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane90v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot90_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane90v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot180_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_neon_vcmla180v2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot180_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_neon_vcmla180v4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot180_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane180v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot180_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq180v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot180_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane180v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot180_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane180v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot270_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_neon_vcmla270v2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot270_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_neon_vcmla270v4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot270_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane270v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmla_rot270_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_laneq270v2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot270_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmlaq_lane270v4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcmlaq_rot270_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vcmla_lane270v4sf (__r, __a, __b, __index);
+}
+
+
+/* AdvSIMD Matrix Multiply-Accumulate and Dot Product intrinsics. */
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+i8mm")
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusdot_s32 (int32x2_t __r, uint8x8_t __a, int8x8_t __b)
+{
+ return __builtin_neon_usdotv8qi_ssus (__r, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusdotq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_neon_usdotv16qi_ssus (__r, __a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusdot_lane_s32 (int32x2_t __r, uint8x8_t __a,
+ int8x8_t __b, const int __index)
+{
+ return __builtin_neon_usdot_lanev8qi_ssuss (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusdotq_lane_s32 (int32x4_t __r, uint8x16_t __a,
+ int8x8_t __b, const int __index)
+{
+ return __builtin_neon_usdot_lanev16qi_ssuss (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsudot_lane_s32 (int32x2_t __r, int8x8_t __a,
+ uint8x8_t __b, const int __index)
+{
+ return __builtin_neon_sudot_lanev8qi_sssus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsudotq_lane_s32 (int32x4_t __r, int8x16_t __a,
+ uint8x8_t __b, const int __index)
+{
+ return __builtin_neon_sudot_lanev16qi_sssus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusdot_laneq_s32 (int32x2_t __r, uint8x8_t __a,
+ int8x16_t __b, const int __index)
+{
+ return __builtin_neon_usdot_laneqv8qi_ssuss (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusdotq_laneq_s32 (int32x4_t __r, uint8x16_t __a,
+ int8x16_t __b, const int __index)
+{
+ return __builtin_neon_usdot_laneqv16qi_ssuss (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsudot_laneq_s32 (int32x2_t __r, int8x8_t __a,
+ uint8x16_t __b, const int __index)
+{
+ return __builtin_neon_sudot_laneqv8qi_sssus (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsudotq_laneq_s32 (int32x4_t __r, int8x16_t __a,
+ uint8x16_t __b, const int __index)
+{
+ return __builtin_neon_sudot_laneqv16qi_sssus (__r, __a, __b, __index);
+}
+
+#pragma GCC pop_options
+
+#pragma GCC pop_options
+#endif
+
+/* AdvSIMD 8-bit Integer Matrix Multiply (I8MM) intrinsics. */
+
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+i8mm")
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmmlaq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_neon_smmlav16qi (__r, __a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmmlaq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_neon_ummlav16qi_uuuu (__r, __a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vusmmlaq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_neon_usmmlav16qi_ssus (__r, __a, __b);
+}
+
+#pragma GCC pop_options
+
+/* AdvSIMD Brain half-precision float-point (Bfloat16) intrinsics. */
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+bf16")
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_bf16 (uint64_t __a)
+{
+ return (bfloat16x4_t) __a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_bf16 (bfloat16_t __a)
+{
+ return (bfloat16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_bf16 (bfloat16_t __a)
+{
+ return (bfloat16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_bf16 (bfloat16x4_t __a, const int __b)
+{
+ return __builtin_neon_vdup_lanev4bf (__a, __b);
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_bf16 (bfloat16x4_t __a, const int __b)
+{
+ return __builtin_neon_vdup_lanev8bf (__a, __b);
+}
+
+#define vset_lane_bf16(__e, __v, __idx) \
+ __extension__ \
+ ({ \
+ bfloat16_t __elem = (__e); \
+ bfloat16x4_t __vec = (__v); \
+ __builtin_arm_lane_check (4, __idx); \
+ __vec[__arm_lane(__vec, __idx)] = __elem; \
+ __vec; \
+ })
+
+#define vsetq_lane_bf16(__e, __v, __idx) \
+ __extension__ \
+ ({ \
+ bfloat16_t __elem = (__e); \
+ bfloat16x8_t __vec = (__v); \
+ __builtin_arm_lane_check (8, __idx); \
+ __vec[__arm_laneq(__vec, __idx)] = __elem; \
+ __vec; \
+ })
+
+#define vget_lane_bf16(__v, __idx) \
+ __extension__ \
+ ({ \
+ bfloat16x4_t __vec = (__v); \
+ __builtin_arm_lane_check (4, __idx); \
+ bfloat16_t __res = __vec[__arm_lane(__vec, __idx)]; \
+ __res; \
+ })
+
+#define vgetq_lane_bf16(__v, __idx) \
+ __extension__ \
+ ({ \
+ bfloat16x8_t __vec = (__v); \
+ __builtin_arm_lane_check (8, __idx); \
+ bfloat16_t __res = __vec[__arm_laneq(__vec, __idx)]; \
+ __res; \
+ })
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_laneq_bf16 (bfloat16x8_t __a, const int __b)
+{
+ return vdup_n_bf16( vgetq_lane_bf16 (__a, __b));
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_laneq_bf16 (bfloat16x8_t __a, const int __b)
+{
+ return vdupq_n_bf16( vgetq_lane_bf16 (__a, __b));
+}
+
+__extension__ extern __inline bfloat16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vduph_lane_bf16 (bfloat16x4_t __a, const int __b)
+{
+ return vget_lane_bf16 (__a, __b);
+}
+
+__extension__ extern __inline bfloat16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vduph_laneq_bf16 (bfloat16x8_t __a, const int __b)
+{
+ return vgetq_lane_bf16 (__a, __b);
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_bf16 (bfloat16x8_t __a)
+{
+ return __builtin_neon_vget_highv8bf (__a);
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_bf16 (bfloat16x8_t __a)
+{
+ return __builtin_neon_vget_lowv8bf (__a);
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_bf16 (bfloat16x4_t __a, bfloat16x4_t __b)
+{
+ return __builtin_neon_vcombinev4bf (__a, __b);
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_u8 (uint8x8_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_u16 (uint16x4_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_u32 (uint32x2_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_u64 (uint64x1_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_s8 (int8x8_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_s16 (int16x4_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_s32 (int32x2_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_s64 (int64x1_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_p8 (poly8x8_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_p16 (poly16x4_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_p64 (poly64x1_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_f16 (float16x4_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+#endif
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_bf16_f32 (float32x2_t __a)
+{
+ return (bfloat16x4_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_u8 (uint8x16_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_u16 (uint16x8_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_u32 (uint32x4_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_u64 (uint64x2_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_s8 (int8x16_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_s16 (int16x8_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_s32 (int32x4_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_s64 (int64x2_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_p8 (poly8x16_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_p16 (poly16x8_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_p64 (poly64x2_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_p128 (poly128_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE)
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_f16 (float16x8_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+#endif
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_bf16_f32 (float32x4_t __a)
+{
+ return (bfloat16x8_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_bf16 (bfloat16x4_t __a)
+{
+ return (int8x8_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_bf16 (bfloat16x4_t __a)
+{
+ return (int16x4_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_bf16 (bfloat16x4_t __a)
+{
+ return (int32x2_t)__a;
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_bf16 (bfloat16x4_t __a)
+{
+ return (int64x1_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_bf16 (bfloat16x4_t __a)
+{
+ return (uint8x8_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_bf16 (bfloat16x4_t __a)
+{
+ return (uint16x4_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_bf16 (bfloat16x4_t __a)
+{
+ return (uint32x2_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_bf16 (bfloat16x4_t __a)
+{
+ return (uint64x1_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_bf16 (bfloat16x4_t __a)
+{
+ return (float32x2_t)__a;
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_bf16 (bfloat16x4_t __a)
+{
+ return (poly8x8_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_bf16 (bfloat16x4_t __a)
+{
+ return (poly16x4_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_bf16 (bfloat16x4_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_bf16 (bfloat16x8_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_bf16 (bfloat16x8_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_bf16 (bfloat16x8_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_bf16 (bfloat16x8_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_bf16 (bfloat16x8_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_bf16 (bfloat16x8_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_bf16 (bfloat16x8_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_bf16 (bfloat16x8_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_bf16 (bfloat16x8_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_bf16 (bfloat16x8_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_bf16 (bfloat16x8_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_bf16 (bfloat16x8_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_bf16 (bfloat16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfdot_f32 (float32x2_t __r, bfloat16x4_t __a, bfloat16x4_t __b)
+{
+ return __builtin_neon_vbfdotv2sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfdotq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
+{
+ return __builtin_neon_vbfdotv4sf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfdot_lane_f32 (float32x2_t __r, bfloat16x4_t __a, bfloat16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vbfdot_lanev4bfv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfdotq_laneq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vbfdot_lanev8bfv4sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfdot_laneq_f32 (float32x2_t __r, bfloat16x4_t __a, bfloat16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vbfdot_lanev8bfv2sf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfdotq_lane_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vbfdot_lanev4bfv4sf (__r, __a, __b, __index);
+}
+
+#pragma GCC pop_options
+
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+bf16")
+
+typedef struct bfloat16x4x2_t
+{
+ bfloat16x4_t val[2];
+} bfloat16x4x2_t;
+
+typedef struct bfloat16x8x2_t
+{
+ bfloat16x8_t val[2];
+} bfloat16x8x2_t;
+
+typedef struct bfloat16x4x3_t
+{
+ bfloat16x4_t val[3];
+} bfloat16x4x3_t;
+
+typedef struct bfloat16x8x3_t
+{
+ bfloat16x8_t val[3];
+} bfloat16x8x3_t;
+
+typedef struct bfloat16x4x4_t
+{
+ bfloat16x4_t val[4];
+} bfloat16x4x4_t;
+
+typedef struct bfloat16x8x4_t
+{
+ bfloat16x8_t val[4];
+} bfloat16x8x4_t;
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f32_bf16 (bfloat16x4_t __a)
+{
+ return __builtin_neon_vbfcvtv4bf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_low_f32_bf16 (bfloat16x8_t __a)
+{
+ return __builtin_neon_vbfcvtv8bf (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_high_f32_bf16 (bfloat16x8_t __a)
+{
+ return __builtin_neon_vbfcvt_highv8bf (__a);
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_bf16_f32 (float32x4_t __a)
+{
+ return __builtin_neon_vbfcvtv4sfv4bf (__a);
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_low_bf16_f32 (float32x4_t __a)
+{
+ return __builtin_neon_vbfcvtv4sfv8bf (__a);
+}
+
+/* The 'inactive' operand is not converted but it provides the
+ low 64 bits to assemble the final 128-bit result. */
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_high_bf16_f32 (bfloat16x8_t inactive, float32x4_t __a)
+{
+ return __builtin_neon_vbfcvtv4sf_highv8bf (inactive, __a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmmlaq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
+{
+ return __builtin_neon_vmmlav8bf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmlalbq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
+{
+ return __builtin_neon_vfmabv8bf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmlaltq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
+{
+ return __builtin_neon_vfmatv8bf (__r, __a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmlalbq_lane_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vfmab_lanev8bf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmlaltq_lane_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x4_t __b,
+ const int __index)
+{
+ return __builtin_neon_vfmat_lanev8bf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmlalbq_laneq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vfmab_laneqv8bf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbfmlaltq_laneq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b,
+ const int __index)
+{
+ return __builtin_neon_vfmat_laneqv8bf (__r, __a, __b, __index);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_bf16 (bfloat16_t * __a, bfloat16x4_t __b)
+{
+ __builtin_neon_vst1v4bf (__a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_bf16 (bfloat16_t * __a, bfloat16x8_t __b)
+{
+ __builtin_neon_vst1v8bf (__a, __b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_bf16 (bfloat16_t * __ptr, bfloat16x4x2_t __val)
+{
+ union { bfloat16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __val };
+ return __builtin_neon_vst2v4bf (__ptr, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_bf16 (bfloat16_t * __ptr, bfloat16x8x2_t __val)
+{
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __val };
+ return __builtin_neon_vst2v8bf (__ptr, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_bf16 (bfloat16_t * __ptr, bfloat16x4x3_t __val)
+{
+ union { bfloat16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __val };
+ return __builtin_neon_vst3v4bf (__ptr, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_bf16 (bfloat16_t * __ptr, bfloat16x8x3_t __val)
+{
+ union { bfloat16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __val };
+ return __builtin_neon_vst3v8bf (__ptr, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_bf16 (bfloat16_t * __ptr, bfloat16x4x4_t __val)
+{
+ union { bfloat16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __val };
+ return __builtin_neon_vst4v4bf (__ptr, __bu.__o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_bf16 (bfloat16_t * __ptr, bfloat16x8x4_t __val)
+{
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __val };
+ return __builtin_neon_vst4v8bf (__ptr, __bu.__o);
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_bf16 (bfloat16_t const * __ptr)
+{
+ return __builtin_neon_vld1v4bf (__ptr);
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_bf16 (const bfloat16_t * __ptr)
+{
+ return __builtin_neon_vld1v8bf (__ptr);
+}
+
+__extension__ extern __inline bfloat16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_bf16 (bfloat16_t const * __ptr)
+{
+ union { bfloat16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_dup_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_dup_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_dup_bf16 (const bfloat16_t * __ptr)
+{
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8bf ((const __builtin_neon_bf *) __ptr);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_bf16 (const bfloat16_t * __a, bfloat16x4_t __b, const int __c)
+{
+ return __builtin_neon_vld1_lanev4bf (__a, __b, __c);
+}
+
+__extension__ extern __inline bfloat16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_bf16 (const bfloat16_t * __a, bfloat16x8_t __b, const int __c)
+{
+ return __builtin_neon_vld1_lanev8bf (__a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_bf16 (bfloat16_t * __a, bfloat16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4bf (__a, __b, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_bf16 (bfloat16_t * __a, bfloat16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8bf (__a, __b, __c);
+}
+
+__extension__ extern __inline bfloat16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_lane_bf16 (const bfloat16_t * __a, bfloat16x4x2_t __b, const int __c)
+{
+ union { bfloat16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { bfloat16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4bf ( __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_lane_bf16 (const bfloat16_t * __a, bfloat16x8x2_t __b, const int __c)
+{
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8bf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_lane_bf16 (const bfloat16_t * __a, bfloat16x4x3_t __b, const int __c)
+{
+ union { bfloat16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { bfloat16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4bf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_lane_bf16 (const bfloat16_t * __a, bfloat16x8x3_t __b, const int __c)
+{
+ union { bfloat16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { bfloat16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8bf (__a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_lane_bf16 (const bfloat16_t * __a, bfloat16x4x4_t __b, const int __c)
+{
+ union { bfloat16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { bfloat16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4bf (__a,
+ __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline bfloat16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_lane_bf16 (const bfloat16_t * __a, bfloat16x8x4_t __b, const int __c)
+{
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8bf (__a,
+ __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_lane_bf16 (bfloat16_t * __a, bfloat16x4x2_t __b, const int __c)
+{
+ union { bfloat16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4bf (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_lane_bf16 (bfloat16_t * __a, bfloat16x8x2_t __b, const int __c)
+{
+ union { bfloat16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8bf (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_lane_bf16 (bfloat16_t * __a, bfloat16x4x3_t __b, const int __c)
+{
+ union { bfloat16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4bf (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_lane_bf16 (bfloat16_t * __a, bfloat16x8x3_t __b, const int __c)
+{
+ union { bfloat16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8bf (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_lane_bf16 (bfloat16_t * __a, bfloat16x4x4_t __b, const int __c)
+{
+ union { bfloat16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4bf (__a, __bu.__o, __c);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_lane_bf16 (bfloat16_t * __a, bfloat16x8x4_t __b, const int __c)
+{
+ union { bfloat16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8bf (__a, __bu.__o, __c);
+}
+
+#pragma GCC pop_options
+
+#ifdef __cplusplus
+}
+#endif
+
+#pragma GCC pop_options
+
+#endif
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/float.h b/lib/gcc/arm-none-eabi/13.2.1/include/float.h
new file mode 100644
index 0000000..45021e4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/float.h
@@ -0,0 +1,631 @@
+/* Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 5.2.4.2.2 Characteristics of floating types <float.h>
+ */
+
+#ifndef _FLOAT_H___
+#define _FLOAT_H___
+
+/* Radix of exponent representation, b. */
+#undef FLT_RADIX
+#define FLT_RADIX __FLT_RADIX__
+
+/* Number of base-FLT_RADIX digits in the significand, p. */
+#undef FLT_MANT_DIG
+#undef DBL_MANT_DIG
+#undef LDBL_MANT_DIG
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+/* Number of decimal digits, q, such that any floating-point number with q
+ decimal digits can be rounded into a floating-point number with p radix b
+ digits and back again without change to the q decimal digits,
+
+ p * log10(b) if b is a power of 10
+ floor((p - 1) * log10(b)) otherwise
+*/
+#undef FLT_DIG
+#undef DBL_DIG
+#undef LDBL_DIG
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */
+#undef FLT_MIN_EXP
+#undef DBL_MIN_EXP
+#undef LDBL_MIN_EXP
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+/* Minimum negative integer such that 10 raised to that power is in the
+ range of normalized floating-point numbers,
+
+ ceil(log10(b) * (emin - 1))
+*/
+#undef FLT_MIN_10_EXP
+#undef DBL_MIN_10_EXP
+#undef LDBL_MIN_10_EXP
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */
+#undef FLT_MAX_EXP
+#undef DBL_MAX_EXP
+#undef LDBL_MAX_EXP
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+/* Maximum integer such that 10 raised to that power is in the range of
+ representable finite floating-point numbers,
+
+ floor(log10((1 - b**-p) * b**emax))
+*/
+#undef FLT_MAX_10_EXP
+#undef DBL_MAX_10_EXP
+#undef LDBL_MAX_10_EXP
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+/* Maximum representable finite floating-point number,
+
+ (1 - b**-p) * b**emax
+*/
+#undef FLT_MAX
+#undef DBL_MAX
+#undef LDBL_MAX
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+/* The difference between 1 and the least value greater than 1 that is
+ representable in the given floating point type, b**1-p. */
+#undef FLT_EPSILON
+#undef DBL_EPSILON
+#undef LDBL_EPSILON
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+/* Minimum normalized positive floating-point number, b**(emin - 1). */
+#undef FLT_MIN
+#undef DBL_MIN
+#undef LDBL_MIN
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */
+/* ??? This is supposed to change with calls to fesetround in <fenv.h>. */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) \
+ || (defined (__cplusplus) && __cplusplus >= 201103L)
+/* The floating-point expression evaluation method. The precise
+ definitions of these values are generalised to include support for
+ the interchange and extended types defined in ISO/IEC TS 18661-3.
+ Prior to this (for C99/C11) the definitions were:
+
+ -1 indeterminate
+ 0 evaluate all operations and constants just to the range and
+ precision of the type
+ 1 evaluate operations and constants of type float and double
+ to the range and precision of the double type, evaluate
+ long double operations and constants to the range and
+ precision of the long double type
+ 2 evaluate all operations and constants to the range and
+ precision of the long double type
+
+ The TS 18661-3 definitions are:
+
+ -1 indeterminate
+ 0 evaluate all operations and constants, whose semantic type has
+ at most the range and precision of float, to the range and
+ precision of float; evaluate all other operations and constants
+ to the range and precision of the semantic type.
+ 1 evaluate all operations and constants, whose semantic type has
+ at most the range and precision of double, to the range and
+ precision of double; evaluate all other operations and constants
+ to the range and precision of the semantic type.
+ 2 evaluate all operations and constants, whose semantic type has
+ at most the range and precision of long double, to the range and
+ precision of long double; evaluate all other operations and
+ constants to the range and precision of the semantic type.
+ N where _FloatN is a supported interchange floating type
+ evaluate all operations and constants, whose semantic type has
+ at most the range and precision of the _FloatN type, to the
+ range and precision of the _FloatN type; evaluate all other
+ operations and constants to the range and precision of the
+ semantic type.
+ N + 1, where _FloatNx is a supported extended floating type
+ evaluate operations and constants, whose semantic type has at
+ most the range and precision of the _FloatNx type, to the range
+ and precision of the _FloatNx type; evaluate all other
+ operations and constants to the range and precision of the
+ semantic type.
+
+ The compiler predefines two macros:
+
+ __FLT_EVAL_METHOD__
+ Which, depending on the value given for
+ -fpermitted-flt-eval-methods, may be limited to only those values
+ for FLT_EVAL_METHOD defined in C99/C11.
+
+ __FLT_EVAL_METHOD_TS_18661_3__
+ Which always permits the values for FLT_EVAL_METHOD defined in
+ ISO/IEC TS 18661-3.
+
+ Here we want to use __FLT_EVAL_METHOD__, unless
+ __STDC_WANT_IEC_60559_TYPES_EXT__ is defined, in which case the user
+ is specifically asking for the ISO/IEC TS 18661-3 types, so we use
+ __FLT_EVAL_METHOD_TS_18661_3__.
+
+ ??? This ought to change with the setting of the fp control word;
+ the value provided by the compiler assumes the widest setting. */
+#undef FLT_EVAL_METHOD
+#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD_TS_18661_3__
+#else
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#endif
+
+/* Number of decimal digits, n, such that any floating-point number in the
+ widest supported floating type with pmax radix b digits can be rounded
+ to a floating-point number with n decimal digits and back again without
+ change to the value,
+
+ pmax * log10(b) if b is a power of 10
+ ceil(1 + pmax * log10(b)) otherwise
+*/
+#undef DECIMAL_DIG
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#endif /* C99 */
+
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \
+ || (defined (__cplusplus) && __cplusplus >= 201703L)
+/* Versions of DECIMAL_DIG for each floating-point type. */
+#undef FLT_DECIMAL_DIG
+#undef DBL_DECIMAL_DIG
+#undef LDBL_DECIMAL_DIG
+#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+#define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
+
+/* Whether types support subnormal numbers. */
+#undef FLT_HAS_SUBNORM
+#undef DBL_HAS_SUBNORM
+#undef LDBL_HAS_SUBNORM
+#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+
+/* Minimum positive values, including subnormals. */
+#undef FLT_TRUE_MIN
+#undef DBL_TRUE_MIN
+#undef LDBL_TRUE_MIN
+#define FLT_TRUE_MIN __FLT_DENORM_MIN__
+#define DBL_TRUE_MIN __DBL_DENORM_MIN__
+#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+
+#endif /* C11 */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+/* Maximum finite positive value with MANT_DIG digits in the
+ significand taking their maximum value. */
+#undef FLT_NORM_MAX
+#undef DBL_NORM_MAX
+#undef LDBL_NORM_MAX
+#define FLT_NORM_MAX __FLT_NORM_MAX__
+#define DBL_NORM_MAX __DBL_NORM_MAX__
+#define LDBL_NORM_MAX __LDBL_NORM_MAX__
+
+/* Whether each type matches an IEC 60559 format. */
+#undef FLT_IS_IEC_60559
+#undef DBL_IS_IEC_60559
+#undef LDBL_IS_IEC_60559
+#define FLT_IS_IEC_60559 __FLT_IS_IEC_60559__
+#define DBL_IS_IEC_60559 __DBL_IS_IEC_60559__
+#define LDBL_IS_IEC_60559 __LDBL_IS_IEC_60559__
+
+/* Infinity in type float; not defined if infinity not supported. */
+#if __FLT_HAS_INFINITY__
+#undef INFINITY
+#define INFINITY (__builtin_inff ())
+#endif
+
+/* Quiet NaN, if supported for float. */
+#if __FLT_HAS_QUIET_NAN__
+#undef NAN
+#define NAN (__builtin_nanf (""))
+#endif
+
+/* Signaling NaN, if supported for each type. All formats supported
+ by GCC support either both quiet and signaling NaNs, or neither
+ kind of NaN. */
+#if __FLT_HAS_QUIET_NAN__
+#undef FLT_SNAN
+#define FLT_SNAN (__builtin_nansf (""))
+#endif
+#if __DBL_HAS_QUIET_NAN__
+#undef DBL_SNAN
+#define DBL_SNAN (__builtin_nans (""))
+#endif
+#if __LDBL_HAS_QUIET_NAN__
+#undef LDBL_SNAN
+#define LDBL_SNAN (__builtin_nansl (""))
+#endif
+
+#endif /* C2X */
+
+#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \
+ || defined __STDC_WANT_IEC_60559_EXT__)
+/* Number of decimal digits for which conversions between decimal
+ character strings and binary formats, in both directions, are
+ correctly rounded. */
+#define CR_DECIMAL_DIG __UINTMAX_MAX__
+#endif
+
+#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
+/* Constants for _FloatN and _FloatNx types from TS 18661-3. See
+ comments above for their semantics. */
+
+#ifdef __FLT16_MANT_DIG__
+#undef FLT16_MANT_DIG
+#define FLT16_MANT_DIG __FLT16_MANT_DIG__
+#undef FLT16_DIG
+#define FLT16_DIG __FLT16_DIG__
+#undef FLT16_MIN_EXP
+#define FLT16_MIN_EXP __FLT16_MIN_EXP__
+#undef FLT16_MIN_10_EXP
+#define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__
+#undef FLT16_MAX_EXP
+#define FLT16_MAX_EXP __FLT16_MAX_EXP__
+#undef FLT16_MAX_10_EXP
+#define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__
+#undef FLT16_MAX
+#define FLT16_MAX __FLT16_MAX__
+#undef FLT16_EPSILON
+#define FLT16_EPSILON __FLT16_EPSILON__
+#undef FLT16_MIN
+#define FLT16_MIN __FLT16_MIN__
+#undef FLT16_DECIMAL_DIG
+#define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__
+#undef FLT16_TRUE_MIN
+#define FLT16_TRUE_MIN __FLT16_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT16_SNAN
+#define FLT16_SNAN (__builtin_nansf16 (""))
+#endif /* C2X */
+#endif /* __FLT16_MANT_DIG__. */
+
+#ifdef __FLT32_MANT_DIG__
+#undef FLT32_MANT_DIG
+#define FLT32_MANT_DIG __FLT32_MANT_DIG__
+#undef FLT32_DIG
+#define FLT32_DIG __FLT32_DIG__
+#undef FLT32_MIN_EXP
+#define FLT32_MIN_EXP __FLT32_MIN_EXP__
+#undef FLT32_MIN_10_EXP
+#define FLT32_MIN_10_EXP __FLT32_MIN_10_EXP__
+#undef FLT32_MAX_EXP
+#define FLT32_MAX_EXP __FLT32_MAX_EXP__
+#undef FLT32_MAX_10_EXP
+#define FLT32_MAX_10_EXP __FLT32_MAX_10_EXP__
+#undef FLT32_MAX
+#define FLT32_MAX __FLT32_MAX__
+#undef FLT32_EPSILON
+#define FLT32_EPSILON __FLT32_EPSILON__
+#undef FLT32_MIN
+#define FLT32_MIN __FLT32_MIN__
+#undef FLT32_DECIMAL_DIG
+#define FLT32_DECIMAL_DIG __FLT32_DECIMAL_DIG__
+#undef FLT32_TRUE_MIN
+#define FLT32_TRUE_MIN __FLT32_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT32_SNAN
+#define FLT32_SNAN (__builtin_nansf32 (""))
+#endif /* C2X */
+#endif /* __FLT32_MANT_DIG__. */
+
+#ifdef __FLT64_MANT_DIG__
+#undef FLT64_MANT_DIG
+#define FLT64_MANT_DIG __FLT64_MANT_DIG__
+#undef FLT64_DIG
+#define FLT64_DIG __FLT64_DIG__
+#undef FLT64_MIN_EXP
+#define FLT64_MIN_EXP __FLT64_MIN_EXP__
+#undef FLT64_MIN_10_EXP
+#define FLT64_MIN_10_EXP __FLT64_MIN_10_EXP__
+#undef FLT64_MAX_EXP
+#define FLT64_MAX_EXP __FLT64_MAX_EXP__
+#undef FLT64_MAX_10_EXP
+#define FLT64_MAX_10_EXP __FLT64_MAX_10_EXP__
+#undef FLT64_MAX
+#define FLT64_MAX __FLT64_MAX__
+#undef FLT64_EPSILON
+#define FLT64_EPSILON __FLT64_EPSILON__
+#undef FLT64_MIN
+#define FLT64_MIN __FLT64_MIN__
+#undef FLT64_DECIMAL_DIG
+#define FLT64_DECIMAL_DIG __FLT64_DECIMAL_DIG__
+#undef FLT64_TRUE_MIN
+#define FLT64_TRUE_MIN __FLT64_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT64_SNAN
+#define FLT64_SNAN (__builtin_nansf64 (""))
+#endif /* C2X */
+#endif /* __FLT64_MANT_DIG__. */
+
+#ifdef __FLT128_MANT_DIG__
+#undef FLT128_MANT_DIG
+#define FLT128_MANT_DIG __FLT128_MANT_DIG__
+#undef FLT128_DIG
+#define FLT128_DIG __FLT128_DIG__
+#undef FLT128_MIN_EXP
+#define FLT128_MIN_EXP __FLT128_MIN_EXP__
+#undef FLT128_MIN_10_EXP
+#define FLT128_MIN_10_EXP __FLT128_MIN_10_EXP__
+#undef FLT128_MAX_EXP
+#define FLT128_MAX_EXP __FLT128_MAX_EXP__
+#undef FLT128_MAX_10_EXP
+#define FLT128_MAX_10_EXP __FLT128_MAX_10_EXP__
+#undef FLT128_MAX
+#define FLT128_MAX __FLT128_MAX__
+#undef FLT128_EPSILON
+#define FLT128_EPSILON __FLT128_EPSILON__
+#undef FLT128_MIN
+#define FLT128_MIN __FLT128_MIN__
+#undef FLT128_DECIMAL_DIG
+#define FLT128_DECIMAL_DIG __FLT128_DECIMAL_DIG__
+#undef FLT128_TRUE_MIN
+#define FLT128_TRUE_MIN __FLT128_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT128_SNAN
+#define FLT128_SNAN (__builtin_nansf128 (""))
+#endif /* C2X */
+#endif /* __FLT128_MANT_DIG__. */
+
+#ifdef __FLT32X_MANT_DIG__
+#undef FLT32X_MANT_DIG
+#define FLT32X_MANT_DIG __FLT32X_MANT_DIG__
+#undef FLT32X_DIG
+#define FLT32X_DIG __FLT32X_DIG__
+#undef FLT32X_MIN_EXP
+#define FLT32X_MIN_EXP __FLT32X_MIN_EXP__
+#undef FLT32X_MIN_10_EXP
+#define FLT32X_MIN_10_EXP __FLT32X_MIN_10_EXP__
+#undef FLT32X_MAX_EXP
+#define FLT32X_MAX_EXP __FLT32X_MAX_EXP__
+#undef FLT32X_MAX_10_EXP
+#define FLT32X_MAX_10_EXP __FLT32X_MAX_10_EXP__
+#undef FLT32X_MAX
+#define FLT32X_MAX __FLT32X_MAX__
+#undef FLT32X_EPSILON
+#define FLT32X_EPSILON __FLT32X_EPSILON__
+#undef FLT32X_MIN
+#define FLT32X_MIN __FLT32X_MIN__
+#undef FLT32X_DECIMAL_DIG
+#define FLT32X_DECIMAL_DIG __FLT32X_DECIMAL_DIG__
+#undef FLT32X_TRUE_MIN
+#define FLT32X_TRUE_MIN __FLT32X_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT32X_SNAN
+#define FLT32X_SNAN (__builtin_nansf32x (""))
+#endif /* C2X */
+#endif /* __FLT32X_MANT_DIG__. */
+
+#ifdef __FLT64X_MANT_DIG__
+#undef FLT64X_MANT_DIG
+#define FLT64X_MANT_DIG __FLT64X_MANT_DIG__
+#undef FLT64X_DIG
+#define FLT64X_DIG __FLT64X_DIG__
+#undef FLT64X_MIN_EXP
+#define FLT64X_MIN_EXP __FLT64X_MIN_EXP__
+#undef FLT64X_MIN_10_EXP
+#define FLT64X_MIN_10_EXP __FLT64X_MIN_10_EXP__
+#undef FLT64X_MAX_EXP
+#define FLT64X_MAX_EXP __FLT64X_MAX_EXP__
+#undef FLT64X_MAX_10_EXP
+#define FLT64X_MAX_10_EXP __FLT64X_MAX_10_EXP__
+#undef FLT64X_MAX
+#define FLT64X_MAX __FLT64X_MAX__
+#undef FLT64X_EPSILON
+#define FLT64X_EPSILON __FLT64X_EPSILON__
+#undef FLT64X_MIN
+#define FLT64X_MIN __FLT64X_MIN__
+#undef FLT64X_DECIMAL_DIG
+#define FLT64X_DECIMAL_DIG __FLT64X_DECIMAL_DIG__
+#undef FLT64X_TRUE_MIN
+#define FLT64X_TRUE_MIN __FLT64X_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT64X_SNAN
+#define FLT64X_SNAN (__builtin_nansf64x (""))
+#endif /* C2X */
+#endif /* __FLT64X_MANT_DIG__. */
+
+#ifdef __FLT128X_MANT_DIG__
+#undef FLT128X_MANT_DIG
+#define FLT128X_MANT_DIG __FLT128X_MANT_DIG__
+#undef FLT128X_DIG
+#define FLT128X_DIG __FLT128X_DIG__
+#undef FLT128X_MIN_EXP
+#define FLT128X_MIN_EXP __FLT128X_MIN_EXP__
+#undef FLT128X_MIN_10_EXP
+#define FLT128X_MIN_10_EXP __FLT128X_MIN_10_EXP__
+#undef FLT128X_MAX_EXP
+#define FLT128X_MAX_EXP __FLT128X_MAX_EXP__
+#undef FLT128X_MAX_10_EXP
+#define FLT128X_MAX_10_EXP __FLT128X_MAX_10_EXP__
+#undef FLT128X_MAX
+#define FLT128X_MAX __FLT128X_MAX__
+#undef FLT128X_EPSILON
+#define FLT128X_EPSILON __FLT128X_EPSILON__
+#undef FLT128X_MIN
+#define FLT128X_MIN __FLT128X_MIN__
+#undef FLT128X_DECIMAL_DIG
+#define FLT128X_DECIMAL_DIG __FLT128X_DECIMAL_DIG__
+#undef FLT128X_TRUE_MIN
+#define FLT128X_TRUE_MIN __FLT128X_DENORM_MIN__
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#undef FLT128X_SNAN
+#define FLT128X_SNAN (__builtin_nansf128x (""))
+#endif /* C2X */
+#endif /* __FLT128X_MANT_DIG__. */
+
+#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__. */
+
+#ifdef __DEC32_MANT_DIG__
+#if (defined __STDC_WANT_DEC_FP__ \
+ || defined __STDC_WANT_IEC_60559_DFP_EXT__ \
+ || (defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L))
+/* C2X; formerly Technical Report 24732, extension for decimal
+ floating-point arithmetic: Characteristic of decimal floating types
+ <float.h>, and TS 18661-2. */
+
+/* Number of base-FLT_RADIX digits in the significand, p. */
+#undef DEC32_MANT_DIG
+#undef DEC64_MANT_DIG
+#undef DEC128_MANT_DIG
+#define DEC32_MANT_DIG __DEC32_MANT_DIG__
+#define DEC64_MANT_DIG __DEC64_MANT_DIG__
+#define DEC128_MANT_DIG __DEC128_MANT_DIG__
+
+/* Minimum exponent. */
+#undef DEC32_MIN_EXP
+#undef DEC64_MIN_EXP
+#undef DEC128_MIN_EXP
+#define DEC32_MIN_EXP __DEC32_MIN_EXP__
+#define DEC64_MIN_EXP __DEC64_MIN_EXP__
+#define DEC128_MIN_EXP __DEC128_MIN_EXP__
+
+/* Maximum exponent. */
+#undef DEC32_MAX_EXP
+#undef DEC64_MAX_EXP
+#undef DEC128_MAX_EXP
+#define DEC32_MAX_EXP __DEC32_MAX_EXP__
+#define DEC64_MAX_EXP __DEC64_MAX_EXP__
+#define DEC128_MAX_EXP __DEC128_MAX_EXP__
+
+/* Maximum representable finite decimal floating-point number
+ (there are 6, 15, and 33 9s after the decimal points respectively). */
+#undef DEC32_MAX
+#undef DEC64_MAX
+#undef DEC128_MAX
+#define DEC32_MAX __DEC32_MAX__
+#define DEC64_MAX __DEC64_MAX__
+#define DEC128_MAX __DEC128_MAX__
+
+/* The difference between 1 and the least value greater than 1 that is
+ representable in the given floating point type. */
+#undef DEC32_EPSILON
+#undef DEC64_EPSILON
+#undef DEC128_EPSILON
+#define DEC32_EPSILON __DEC32_EPSILON__
+#define DEC64_EPSILON __DEC64_EPSILON__
+#define DEC128_EPSILON __DEC128_EPSILON__
+
+/* Minimum normalized positive floating-point number. */
+#undef DEC32_MIN
+#undef DEC64_MIN
+#undef DEC128_MIN
+#define DEC32_MIN __DEC32_MIN__
+#define DEC64_MIN __DEC64_MIN__
+#define DEC128_MIN __DEC128_MIN__
+
+/* The floating-point expression evaluation method.
+ -1 indeterminate
+ 0 evaluate all operations and constants just to the range and
+ precision of the type
+ 1 evaluate operations and constants of type _Decimal32
+ and _Decimal64 to the range and precision of the _Decimal64
+ type, evaluate _Decimal128 operations and constants to the
+ range and precision of the _Decimal128 type;
+ 2 evaluate all operations and constants to the range and
+ precision of the _Decimal128 type. */
+
+#undef DEC_EVAL_METHOD
+#define DEC_EVAL_METHOD __DEC_EVAL_METHOD__
+
+#endif /* __STDC_WANT_DEC_FP__ || __STDC_WANT_IEC_60559_DFP_EXT__ || C2X. */
+
+#ifdef __STDC_WANT_DEC_FP__
+
+/* Minimum subnormal positive floating-point number. */
+#undef DEC32_SUBNORMAL_MIN
+#undef DEC64_SUBNORMAL_MIN
+#undef DEC128_SUBNORMAL_MIN
+#define DEC32_SUBNORMAL_MIN __DEC32_SUBNORMAL_MIN__
+#define DEC64_SUBNORMAL_MIN __DEC64_SUBNORMAL_MIN__
+#define DEC128_SUBNORMAL_MIN __DEC128_SUBNORMAL_MIN__
+
+#endif /* __STDC_WANT_DEC_FP__. */
+
+#if (defined __STDC_WANT_IEC_60559_DFP_EXT__ \
+ || (defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L))
+
+/* Minimum subnormal positive floating-point number. */
+#undef DEC32_TRUE_MIN
+#undef DEC64_TRUE_MIN
+#undef DEC128_TRUE_MIN
+#define DEC32_TRUE_MIN __DEC32_SUBNORMAL_MIN__
+#define DEC64_TRUE_MIN __DEC64_SUBNORMAL_MIN__
+#define DEC128_TRUE_MIN __DEC128_SUBNORMAL_MIN__
+
+#endif /* __STDC_WANT_IEC_60559_DFP_EXT__ || C2X. */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+
+/* Infinity in type _Decimal32. */
+#undef DEC_INFINITY
+#define DEC_INFINITY (__builtin_infd32 ())
+
+/* Quiet NaN in type _Decimal32. */
+#undef DEC_NAN
+#define DEC_NAN (__builtin_nand32 (""))
+
+/* Signaling NaN in each decimal floating-point type. */
+#undef DEC32_SNAN
+#define DEC32_SNAN (__builtin_nansd32 (""))
+#undef DEC64_SNAN
+#define DEC64_SNAN (__builtin_nansd64 (""))
+#undef DEC128_SNAN
+#define DEC128_SNAN (__builtin_nansd128 (""))
+
+#endif /* C2X */
+
+#endif /* __DEC32_MANT_DIG__ */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#define __STDC_VERSION_FLOAT_H__ 202311L
+#endif
+
+#endif /* _FLOAT_H___ */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/gcov.h b/lib/gcc/arm-none-eabi/13.2.1/include/gcov.h
new file mode 100644
index 0000000..77c1046
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/gcov.h
@@ -0,0 +1,70 @@
+/* GCOV interface routines.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GCOV_H
+#define GCC_GCOV_H
+
+struct gcov_info;
+
+/* Set all counters to zero. */
+
+extern void __gcov_reset (void);
+
+/* Write profile information to a file. */
+
+extern void __gcov_dump (void);
+
+/* Convert the gcov information referenced by INFO to a gcda data stream.
+ The FILENAME_FN callback is called exactly once with the filename associated
+ with the gcov information. The filename may be NULL. Afterwards, the
+ DUMP_FN callback is subsequently called with chunks (the begin and length of
+ the chunk are passed as the first two callback parameters) of the gcda data
+ stream. The ALLOCATE_FN callback shall allocate memory with a size in
+ characters specified by the first callback parameter. The ARG parameter is
+ a user-provided argument passed as the last argument to the callback
+ functions. It is recommended to use the __gcov_filename_to_gcfn()
+ in the filename callback function. */
+
+extern void
+__gcov_info_to_gcda (const struct gcov_info *__info,
+ void (*__filename_fn) (const char *, void *),
+ void (*__dump_fn) (const void *, unsigned, void *),
+ void *(*__allocate_fn) (unsigned, void *),
+ void *__arg);
+
+/* Convert the FILENAME to a gcfn data stream. The DUMP_FN callback is
+ subsequently called with chunks (the begin and length of the chunk are
+ passed as the first two callback parameters) of the gcfn data stream.
+ The ARG parameter is a user-provided argument passed as the last
+ argument to the DUMP_FN callback function. This function is intended
+ to be used by the filename callback of __gcov_info_to_gcda(). The gcfn
+ data stream is used by the merge-stream subcommand of the gcov-tool to
+ get the filename associated with a gcda data stream. */
+
+extern void
+__gcov_filename_to_gcfn (const char *__filename,
+ void (*__dump_fn) (const void *, unsigned, void *),
+ void *__arg);
+
+#endif /* GCC_GCOV_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/iso646.h b/lib/gcc/arm-none-eabi/13.2.1/include/iso646.h
new file mode 100644
index 0000000..44aea50
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/iso646.h
@@ -0,0 +1,45 @@
+/* Copyright (C) 1997-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.9 Alternative spellings <iso646.h>
+ */
+
+#ifndef _ISO646_H
+#define _ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/limits.h b/lib/gcc/arm-none-eabi/13.2.1/include/limits.h
new file mode 100644
index 0000000..0cf9c39
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/limits.h
@@ -0,0 +1,208 @@
+/* Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This administrivia gets added to the beginning of limits.h
+ if the system has its own version of limits.h. */
+
+/* We use _GCC_LIMITS_H_ because we want this not to match
+ any macros that the system's limits.h uses for its own purposes. */
+#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */
+#define _GCC_LIMITS_H_
+
+#ifndef _LIBC_LIMITS_H_
+/* Use "..." so that we find syslimits.h only in this same directory. */
+#include "syslimits.h"
+#endif
+/* Copyright (C) 1991-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _LIMITS_H___
+#define _LIMITS_H___
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT __CHAR_BIT__
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#undef SCHAR_MAX
+#define SCHAR_MAX __SCHAR_MAX__
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#if __SCHAR_MAX__ == __INT_MAX__
+# define UCHAR_MAX (SCHAR_MAX * 2U + 1U)
+#else
+# define UCHAR_MAX (SCHAR_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+# undef CHAR_MIN
+# if __SCHAR_MAX__ == __INT_MAX__
+# define CHAR_MIN 0U
+# else
+# define CHAR_MIN 0
+# endif
+# undef CHAR_MAX
+# define CHAR_MAX UCHAR_MAX
+#else
+# undef CHAR_MIN
+# define CHAR_MIN SCHAR_MIN
+# undef CHAR_MAX
+# define CHAR_MAX SCHAR_MAX
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+#define SHRT_MIN (-SHRT_MAX - 1)
+#undef SHRT_MAX
+#define SHRT_MAX __SHRT_MAX__
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#if __SHRT_MAX__ == __INT_MAX__
+# define USHRT_MAX (SHRT_MAX * 2U + 1U)
+#else
+# define USHRT_MAX (SHRT_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `signed int' can hold. */
+#undef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1U)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX - 1L)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1UL)
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LLONG_MIN
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+# undef LLONG_MAX
+# define LLONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULLONG_MAX
+# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LONG_LONG_MIN
+# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL)
+# undef LONG_LONG_MAX
+# define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULONG_LONG_MAX
+# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \
+ || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L))
+/* TS 18661-1 / C2X widths of integer types. */
+# undef CHAR_WIDTH
+# define CHAR_WIDTH __SCHAR_WIDTH__
+# undef SCHAR_WIDTH
+# define SCHAR_WIDTH __SCHAR_WIDTH__
+# undef UCHAR_WIDTH
+# define UCHAR_WIDTH __SCHAR_WIDTH__
+# undef SHRT_WIDTH
+# define SHRT_WIDTH __SHRT_WIDTH__
+# undef USHRT_WIDTH
+# define USHRT_WIDTH __SHRT_WIDTH__
+# undef INT_WIDTH
+# define INT_WIDTH __INT_WIDTH__
+# undef UINT_WIDTH
+# define UINT_WIDTH __INT_WIDTH__
+# undef LONG_WIDTH
+# define LONG_WIDTH __LONG_WIDTH__
+# undef ULONG_WIDTH
+# define ULONG_WIDTH __LONG_WIDTH__
+# undef LLONG_WIDTH
+# define LLONG_WIDTH __LONG_LONG_WIDTH__
+# undef ULLONG_WIDTH
+# define ULLONG_WIDTH __LONG_LONG_WIDTH__
+#endif
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L
+/* C2X width and limit of _Bool. */
+# undef BOOL_MAX
+# define BOOL_MAX 1
+# undef BOOL_WIDTH
+# define BOOL_WIDTH 1
+
+# define __STDC_VERSION_LIMITS_H__ 202311L
+#endif
+
+#endif /* _LIMITS_H___ */
+/* This administrivia gets added to the end of limits.h
+ if the system has its own version of limits.h. */
+
+#else /* not _GCC_LIMITS_H_ */
+
+#ifdef _GCC_NEXT_LIMITS_H
+#include_next <limits.h> /* recurse down to the real one */
+#endif
+
+#endif /* not _GCC_LIMITS_H_ */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/mmintrin.h b/lib/gcc/arm-none-eabi/13.2.1/include/mmintrin.h
new file mode 100644
index 0000000..0dfbbf8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/mmintrin.h
@@ -0,0 +1,1836 @@
+/* Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+#ifndef __IWMMXT__
+#error mmintrin.h included without enabling WMMX/WMMX2 instructions (e.g. -march=iwmmxt or -march=iwmmxt2)
+#endif
+
+
+#if defined __cplusplus
+extern "C" {
+/* Intrinsics use C name-mangling. */
+#endif /* __cplusplus */
+
+/* The data type intended for user use. */
+typedef unsigned long long __m64, __int64;
+
+/* Internal data types for implementing the intrinsics. */
+typedef int __v2si __attribute__ ((vector_size (8)));
+typedef short __v4hi __attribute__ ((vector_size (8)));
+typedef signed char __v8qi __attribute__ ((vector_size (8)));
+
+/* Provided for source compatibility with MMX. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_empty (void)
+{
+}
+
+/* "Convert" __m64 and __int64 into each other. */
+static __inline __m64
+_mm_cvtsi64_m64 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtm64_si64 (__m64 __i)
+{
+ return __i;
+}
+
+static __inline int
+_mm_cvtsi64_si32 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtsi32_si64 (int __i)
+{
+ return (__i & 0xffffffff);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ signed saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pi64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 into the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ unsigned saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pu64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2);
+}
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Take the four 8-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+static __inline __m64
+_mm_add_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+static __inline __m64
+_mm_add_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+static __inline __m64
+_mm_add_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+static __inline __m64
+_mm_sub_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+static __inline __m64
+_mm_sub_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+static __inline __m64
+_mm_sub_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+static __inline __m64
+_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+static __inline __m64
+_mm_sll_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wslld (__m, __count);
+}
+
+static __inline __m64
+_mm_slli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wslldi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrad (__m, __count);
+}
+
+static __inline __m64
+_mm_srai_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsradi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrld (__m, __count);
+}
+
+static __inline __m64
+_mm_srli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrldi (__m, __count);
+}
+
+/* Rotate four 16-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count);
+}
+
+/* Rotate two 32-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count);
+}
+
+/* Rotate two 64-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrord (__m, __count);
+}
+
+static __inline __m64
+_mm_rori_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrordi (__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_and_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wand (__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+static __inline __m64
+_mm_andnot_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wandn (__m2, __m1);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_or_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wor (__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_xor_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wxor (__m1, __m2);
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pu16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pi16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Accumulate across all unsigned 8-bit values in __A. */
+static __inline __m64
+_mm_acc_pu8 (__m64 __A)
+{
+ return __builtin_arm_waccb ((__v8qi)__A);
+}
+
+/* Accumulate across all unsigned 16-bit values in __A. */
+static __inline __m64
+_mm_acc_pu16 (__m64 __A)
+{
+ return __builtin_arm_wacch ((__v4hi)__A);
+}
+
+/* Accumulate across all unsigned 32-bit values in __A. */
+static __inline __m64
+_mm_acc_pu32 (__m64 __A)
+{
+ return __builtin_arm_waccw ((__v2si)__A);
+}
+
+static __inline __m64
+_mm_mia_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmia (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miaph_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiaph (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabt (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatt (__A, __B, __C);
+}
+
+/* Extract one of the elements of A and sign extend. The selector N must
+ be immediate. */
+#define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N))
+#define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N))
+#define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N))
+
+/* Extract one of the elements of A and zero extend. The selector N must
+ be immediate. */
+#define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N))
+#define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N))
+#define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N))
+
+/* Inserts word D into one of the elements of A. The selector N must be
+ immediate. */
+#define _mm_insert_pi8(A, D, N) \
+ ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N)))
+#define _mm_insert_pi16(A, D, N) \
+ ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N)))
+#define _mm_insert_pi32(A, D, N) \
+ ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N)))
+
+/* Compute the element-wise maximum of signed 8-bit values. */
+static __inline __m64
+_mm_max_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+static __inline __m64
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of signed 32-bit values. */
+static __inline __m64
+_mm_max_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+static __inline __m64
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 16-bit values. */
+static __inline __m64
+_mm_max_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 32-bit values. */
+static __inline __m64
+_mm_max_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of signed 32-bit values. */
+static __inline __m64
+_mm_min_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 32-bit values. */
+static __inline __m64
+_mm_min_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+static __inline int
+_mm_movemask_pi8 (__m64 __A)
+{
+ return __builtin_arm_tmovmskb ((__v8qi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 16-bit values. */
+static __inline int
+_mm_movemask_pi16 (__m64 __A)
+{
+ return __builtin_arm_tmovmskh ((__v4hi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 32-bit values. */
+static __inline int
+_mm_movemask_pi32 (__m64 __A)
+{
+ return __builtin_arm_tmovmskw ((__v2si)__A);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+#define _mm_shuffle_pi16(A, N) \
+ ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N)))
+
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
+}
+
+static __inline __m64
+_mm_sada_pu8 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return (__m64) __builtin_arm_wsadb ((__v2si)__A, (__v8qi)__B, (__v8qi)__C);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
+}
+
+static __inline __m64
+_mm_sada_pu16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return (__m64) __builtin_arm_wsadh ((__v2si)__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
+}
+
+#define _mm_align_si64(__A,__B, N) \
+ (__m64) __builtin_arm_walign ((__v8qi) (__A),(__v8qi) (__B), (N))
+
+/* Creates a 64-bit zero. */
+static __inline __m64
+_mm_setzero_si64 (void)
+{
+ return __builtin_arm_wzero ();
+}
+
+/* Set and Get arbitrary iWMMXt Control registers.
+ Note only registers 0-3 and 8-11 are currently defined,
+ the rest are reserved. */
+
+static __inline void
+_mm_setwcx (const int __value, const int __regno)
+{
+ switch (__regno)
+ {
+ case 0:
+ __asm __volatile ("tmcr wcid, %0" :: "r"(__value));
+ break;
+ case 1:
+ __asm __volatile ("tmcr wcon, %0" :: "r"(__value));
+ break;
+ case 2:
+ __asm __volatile ("tmcr wcssf, %0" :: "r"(__value));
+ break;
+ case 3:
+ __asm __volatile ("tmcr wcasf, %0" :: "r"(__value));
+ break;
+ case 8:
+ __builtin_arm_setwcgr0 (__value);
+ break;
+ case 9:
+ __builtin_arm_setwcgr1 (__value);
+ break;
+ case 10:
+ __builtin_arm_setwcgr2 (__value);
+ break;
+ case 11:
+ __builtin_arm_setwcgr3 (__value);
+ break;
+ default:
+ break;
+ }
+}
+
+static __inline int
+_mm_getwcx (const int __regno)
+{
+ int __value;
+ switch (__regno)
+ {
+ case 0:
+ __asm __volatile ("tmrc %0, wcid" : "=r"(__value));
+ break;
+ case 1:
+ __asm __volatile ("tmrc %0, wcon" : "=r"(__value));
+ break;
+ case 2:
+ __asm __volatile ("tmrc %0, wcssf" : "=r"(__value));
+ break;
+ case 3:
+ __asm __volatile ("tmrc %0, wcasf" : "=r"(__value));
+ break;
+ case 8:
+ return __builtin_arm_getwcgr0 ();
+ case 9:
+ return __builtin_arm_getwcgr1 ();
+ case 10:
+ return __builtin_arm_getwcgr2 ();
+ case 11:
+ return __builtin_arm_getwcgr3 ();
+ default:
+ break;
+ }
+ return __value;
+}
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+static __inline __m64
+_mm_set_pi32 (int __i1, int __i0)
+{
+ union
+ {
+ __m64 __q;
+ struct
+ {
+ unsigned int __i0;
+ unsigned int __i1;
+ } __s;
+ } __u;
+
+ __u.__s.__i0 = __i0;
+ __u.__s.__i1 = __i1;
+
+ return __u.__q;
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+static __inline __m64
+_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
+{
+ unsigned int __i1 = (unsigned short) __w3 << 16 | (unsigned short) __w2;
+ unsigned int __i0 = (unsigned short) __w1 << 16 | (unsigned short) __w0;
+
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+static __inline __m64
+_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
+ char __b3, char __b2, char __b1, char __b0)
+{
+ unsigned int __i1, __i0;
+
+ __i1 = (unsigned char)__b7;
+ __i1 = __i1 << 8 | (unsigned char)__b6;
+ __i1 = __i1 << 8 | (unsigned char)__b5;
+ __i1 = __i1 << 8 | (unsigned char)__b4;
+
+ __i0 = (unsigned char)__b3;
+ __i0 = __i0 << 8 | (unsigned char)__b2;
+ __i0 = __i0 << 8 | (unsigned char)__b1;
+ __i0 = __i0 << 8 | (unsigned char)__b0;
+
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* Similar, but with the arguments in reverse order. */
+static __inline __m64
+_mm_setr_pi32 (int __i0, int __i1)
+{
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+static __inline __m64
+_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16 (__w3, __w2, __w1, __w0);
+}
+
+static __inline __m64
+_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
+ char __b4, char __b5, char __b6, char __b7)
+{
+ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+static __inline __m64
+_mm_set1_pi32 (int __i)
+{
+ return _mm_set_pi32 (__i, __i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+static __inline __m64
+_mm_set1_pi16 (short __w)
+{
+ unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w;
+ return _mm_set1_pi32 (__i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing B. */
+static __inline __m64
+_mm_set1_pi8 (char __b)
+{
+ unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b;
+ unsigned int __i = __w << 16 | __w;
+ return _mm_set1_pi32 (__i);
+}
+
+#ifdef __IWMMXT2__
+static __inline __m64
+_mm_abs_pi8 (__m64 m1)
+{
+ return (__m64) __builtin_arm_wabsb ((__v8qi)m1);
+}
+
+static __inline __m64
+_mm_abs_pi16 (__m64 m1)
+{
+ return (__m64) __builtin_arm_wabsh ((__v4hi)m1);
+
+}
+
+static __inline __m64
+_mm_abs_pi32 (__m64 m1)
+{
+ return (__m64) __builtin_arm_wabsw ((__v2si)m1);
+
+}
+
+static __inline __m64
+_mm_addsubhx_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_waddsubhx ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_absdiff_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wabsdiffb ((__v8qi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_absdiff_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wabsdiffh ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_absdiff_pu32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wabsdiffw ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_addc_pu16 (__m64 a, __m64 b)
+{
+ __m64 result;
+ __asm__ __volatile__ ("waddhc %0, %1, %2" : "=y" (result) : "y" (a), "y" (b));
+ return result;
+}
+
+static __inline __m64
+_mm_addc_pu32 (__m64 a, __m64 b)
+{
+ __m64 result;
+ __asm__ __volatile__ ("waddwc %0, %1, %2" : "=y" (result) : "y" (a), "y" (b));
+ return result;
+}
+
+static __inline __m64
+_mm_avg4_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wavg4 ((__v8qi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_avg4r_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wavg4r ((__v8qi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_maddx_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddsx ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_maddx_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddux ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_msub_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddsn ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_msub_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddun ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_mulhi_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwsm ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mulhi_pu32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwum ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mulhir_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulsmr ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_mulhir_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwsmr ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mulhir_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulumr ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_mulhir_pu32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwumr ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mullo_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwl ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_qmulm_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulm ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_qmulm_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulwm ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_qmulmr_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulmr ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_qmulmr_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulwmr ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_subaddhx_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wsubaddhx ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_addbhusl_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_waddbhusl ((__v4hi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_addbhusm_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_waddbhusm ((__v4hi)a, (__v8qi)b);
+}
+
+#define _mm_qmiabb_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiabb ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiabbn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiabbn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiabt_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiabt ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiabtn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc=acc;\
+ __m64 _m1=m1;\
+ __m64 _m2=m2;\
+ _acc = (__m64) __builtin_arm_wqmiabtn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiatb_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiatb ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiatbn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiatbn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiatt_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiatt ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiattn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiattn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabb (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabbn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabt (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabtn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabtn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiatb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiatb (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiatbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiatbn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiatt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiatt (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiattn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiattn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbb (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbbn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbt (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbtn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbtn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawtb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawtb (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawtbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawtbn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawtt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawtt (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawttn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawttn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+/* The third arguments should be an immediate. */
+#define _mm_merge_si64(a, b, n) \
+ ({\
+ __m64 result;\
+ result = (__m64) __builtin_arm_wmerge ((__m64) (a), (__m64) (b), (n));\
+ result;\
+ })
+#endif /* __IWMMXT2__ */
+
+static __inline __m64
+_mm_alignr0_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr0 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline __m64
+_mm_alignr1_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr1 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline __m64
+_mm_alignr2_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr2 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline __m64
+_mm_alignr3_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr3 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline void
+_mm_tandcb ()
+{
+ __asm __volatile ("tandcb r15");
+}
+
+static __inline void
+_mm_tandch ()
+{
+ __asm __volatile ("tandch r15");
+}
+
+static __inline void
+_mm_tandcw ()
+{
+ __asm __volatile ("tandcw r15");
+}
+
+#define _mm_textrcb(n) \
+ ({\
+ __asm__ __volatile__ (\
+ "textrcb r15, %0" : : "i" (n));\
+ })
+
+#define _mm_textrch(n) \
+ ({\
+ __asm__ __volatile__ (\
+ "textrch r15, %0" : : "i" (n));\
+ })
+
+#define _mm_textrcw(n) \
+ ({\
+ __asm__ __volatile__ (\
+ "textrcw r15, %0" : : "i" (n));\
+ })
+
+static __inline void
+_mm_torcb ()
+{
+ __asm __volatile ("torcb r15");
+}
+
+static __inline void
+_mm_torch ()
+{
+ __asm __volatile ("torch r15");
+}
+
+static __inline void
+_mm_torcw ()
+{
+ __asm __volatile ("torcw r15");
+}
+
+#ifdef __IWMMXT2__
+static __inline void
+_mm_torvscb ()
+{
+ __asm __volatile ("torvscb r15");
+}
+
+static __inline void
+_mm_torvsch ()
+{
+ __asm __volatile ("torvsch r15");
+}
+
+static __inline void
+_mm_torvscw ()
+{
+ __asm __volatile ("torvscw r15");
+}
+#endif /* __IWMMXT2__ */
+
+static __inline __m64
+_mm_tbcst_pi8 (int value)
+{
+ return (__m64) __builtin_arm_tbcstb ((signed char) value);
+}
+
+static __inline __m64
+_mm_tbcst_pi16 (int value)
+{
+ return (__m64) __builtin_arm_tbcsth ((short) value);
+}
+
+static __inline __m64
+_mm_tbcst_pi32 (int value)
+{
+ return (__m64) __builtin_arm_tbcstw (value);
+}
+
+#define _m_empty _mm_empty
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_packusdw _mm_packs_pu32
+#define _m_packssqd _mm_packs_pi64
+#define _m_packusqd _mm_packs_pu64
+#define _mm_packs_si64 _mm_packs_pi64
+#define _mm_packs_su64 _mm_packs_pu64
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_punpckehsbw _mm_unpackeh_pi8
+#define _m_punpckehswd _mm_unpackeh_pi16
+#define _m_punpckehsdq _mm_unpackeh_pi32
+#define _m_punpckehubw _mm_unpackeh_pu8
+#define _m_punpckehuwd _mm_unpackeh_pu16
+#define _m_punpckehudq _mm_unpackeh_pu32
+#define _m_punpckelsbw _mm_unpackel_pi8
+#define _m_punpckelswd _mm_unpackel_pi16
+#define _m_punpckelsdq _mm_unpackel_pi32
+#define _m_punpckelubw _mm_unpackel_pu8
+#define _m_punpckeluwd _mm_unpackel_pu16
+#define _m_punpckeludq _mm_unpackel_pu32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddsd _mm_adds_pi32
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_paddusd _mm_adds_pu32
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubuw _mm_subs_pi32
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_psubusd _mm_subs_pu32
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmadduwd _mm_madd_pu16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_pmacsw _mm_mac_pi16
+#define _m_pmacuw _mm_mac_pu16
+#define _m_pmacszw _mm_macz_pi16
+#define _m_pmacuzw _mm_macz_pu16
+#define _m_paccb _mm_acc_pu8
+#define _m_paccw _mm_acc_pu16
+#define _m_paccd _mm_acc_pu32
+#define _m_pmia _mm_mia_si64
+#define _m_pmiaph _mm_miaph_si64
+#define _m_pmiabb _mm_miabb_si64
+#define _m_pmiabt _mm_miabt_si64
+#define _m_pmiatb _mm_miatb_si64
+#define _m_pmiatt _mm_miatt_si64
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psraq _mm_sra_si64
+#define _m_psraqi _mm_srai_si64
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_prorw _mm_ror_pi16
+#define _m_prorwi _mm_rori_pi16
+#define _m_prord _mm_ror_pi32
+#define _m_prordi _mm_rori_pi32
+#define _m_prorq _mm_ror_si64
+#define _m_prorqi _mm_rori_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtub _mm_cmpgt_pu8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtuw _mm_cmpgt_pu16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+#define _m_pcmpgtud _mm_cmpgt_pu32
+#define _m_pextrb _mm_extract_pi8
+#define _m_pextrw _mm_extract_pi16
+#define _m_pextrd _mm_extract_pi32
+#define _m_pextrub _mm_extract_pu8
+#define _m_pextruw _mm_extract_pu16
+#define _m_pextrud _mm_extract_pu32
+#define _m_pinsrb _mm_insert_pi8
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pinsrd _mm_insert_pi32
+#define _m_pmaxsb _mm_max_pi8
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxsd _mm_max_pi32
+#define _m_pmaxub _mm_max_pu8
+#define _m_pmaxuw _mm_max_pu16
+#define _m_pmaxud _mm_max_pu32
+#define _m_pminsb _mm_min_pi8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminsd _mm_min_pi32
+#define _m_pminub _mm_min_pu8
+#define _m_pminuw _mm_min_pu16
+#define _m_pminud _mm_min_pu32
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmovmskw _mm_movemask_pi16
+#define _m_pmovmskd _mm_movemask_pi32
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_pavg2b _mm_avg2_pu8
+#define _m_pavg2w _mm_avg2_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_psadwd _mm_sad_pu16
+#define _m_psadzbw _mm_sadz_pu8
+#define _m_psadzwd _mm_sadz_pu16
+#define _m_paligniq _mm_align_si64
+#define _m_cvt_si2pi _mm_cvtsi64_m64
+#define _m_cvt_pi2si _mm_cvtm64_si64
+#define _m_from_int _mm_cvtsi32_si64
+#define _m_to_int _mm_cvtsi64_si32
+
+#if defined __cplusplus
+}; /* End "C" */
+#endif /* __cplusplus */
+
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdalign.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdalign.h
new file mode 100644
index 0000000..a85a37e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdalign.h
@@ -0,0 +1,40 @@
+/* Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C1X: 7.15 Alignment <stdalign.h>. */
+
+#ifndef _STDALIGN_H
+#define _STDALIGN_H
+
+#if (!defined __cplusplus \
+ && !(defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L))
+
+#define alignas _Alignas
+#define alignof _Alignof
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#endif
+
+#endif /* stdalign.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdarg.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdarg.h
new file mode 100644
index 0000000..e386dbf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdarg.h
@@ -0,0 +1,135 @@
+/* Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.15 Variable arguments <stdarg.h>
+ */
+
+#ifndef _STDARG_H
+#ifndef _ANSI_STDARG_H_
+#ifndef __need___va_list
+#define _STDARG_H
+#define _ANSI_STDARG_H_
+#endif /* not __need___va_list */
+#undef __need___va_list
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef __builtin_va_list __gnuc_va_list;
+#endif
+
+/* Define the standard macros for the user,
+ if this invocation was from the user program. */
+#ifdef _STDARG_H
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#define va_start(v, ...) __builtin_va_start(v, 0)
+#else
+#define va_start(v,l) __builtin_va_start(v,l)
+#endif
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v,l) __builtin_va_arg(v,l)
+#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L \
+ || __cplusplus + 0 >= 201103L
+#define va_copy(d,s) __builtin_va_copy(d,s)
+#endif
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+/* Define va_list, if desired, from __gnuc_va_list. */
+/* We deliberately do not define va_list when called from
+ stdio.h, because ANSI C says that stdio.h is not supposed to define
+ va_list. stdio.h needs to have access to that data type,
+ but must not use that name. It should use the name __gnuc_va_list,
+ which is safe because it is reserved for the implementation. */
+
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
+
+#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST))
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#ifdef _SCO_DS
+#define __VA_LIST
+#endif
+#endif /* _VA_LIST_ */
+#else /* not __svr4__ || _SCO_DS */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT)
+/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */
+#ifndef _VA_LIST_DEFINED
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+/* The macro __va_list__ is used by BeOS. */
+#ifndef __va_list__
+typedef __gnuc_va_list va_list;
+#endif /* not __va_list__ */
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_DEFINED */
+#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__))
+#define _VA_LIST_
+#endif
+#ifndef _VA_LIST
+#define _VA_LIST
+#endif
+#ifndef _VA_LIST_DEFINED
+#define _VA_LIST_DEFINED
+#endif
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#endif
+#ifndef __va_list__
+#define __va_list__
+#endif
+
+#endif /* not _VA_LIST_, except on certain systems */
+
+#endif /* not __svr4__ */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#define __STDC_VERSION_STDARG_H__ 202311L
+#endif
+
+#endif /* _STDARG_H */
+
+#endif /* not _ANSI_STDARG_H_ */
+#endif /* not _STDARG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdatomic.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdatomic.h
new file mode 100644
index 0000000..0c0e46d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdatomic.h
@@ -0,0 +1,255 @@
+/* Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C11 Standard: 7.17 Atomics <stdatomic.h>. */
+
+#ifndef _STDATOMIC_H
+#define _STDATOMIC_H
+
+typedef enum
+ {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+ } memory_order;
+
+
+typedef _Atomic _Bool atomic_bool;
+typedef _Atomic char atomic_char;
+typedef _Atomic signed char atomic_schar;
+typedef _Atomic unsigned char atomic_uchar;
+typedef _Atomic short atomic_short;
+typedef _Atomic unsigned short atomic_ushort;
+typedef _Atomic int atomic_int;
+typedef _Atomic unsigned int atomic_uint;
+typedef _Atomic long atomic_long;
+typedef _Atomic unsigned long atomic_ulong;
+typedef _Atomic long long atomic_llong;
+typedef _Atomic unsigned long long atomic_ullong;
+#ifdef __CHAR8_TYPE__
+typedef _Atomic __CHAR8_TYPE__ atomic_char8_t;
+#endif
+typedef _Atomic __CHAR16_TYPE__ atomic_char16_t;
+typedef _Atomic __CHAR32_TYPE__ atomic_char32_t;
+typedef _Atomic __WCHAR_TYPE__ atomic_wchar_t;
+typedef _Atomic __INT_LEAST8_TYPE__ atomic_int_least8_t;
+typedef _Atomic __UINT_LEAST8_TYPE__ atomic_uint_least8_t;
+typedef _Atomic __INT_LEAST16_TYPE__ atomic_int_least16_t;
+typedef _Atomic __UINT_LEAST16_TYPE__ atomic_uint_least16_t;
+typedef _Atomic __INT_LEAST32_TYPE__ atomic_int_least32_t;
+typedef _Atomic __UINT_LEAST32_TYPE__ atomic_uint_least32_t;
+typedef _Atomic __INT_LEAST64_TYPE__ atomic_int_least64_t;
+typedef _Atomic __UINT_LEAST64_TYPE__ atomic_uint_least64_t;
+typedef _Atomic __INT_FAST8_TYPE__ atomic_int_fast8_t;
+typedef _Atomic __UINT_FAST8_TYPE__ atomic_uint_fast8_t;
+typedef _Atomic __INT_FAST16_TYPE__ atomic_int_fast16_t;
+typedef _Atomic __UINT_FAST16_TYPE__ atomic_uint_fast16_t;
+typedef _Atomic __INT_FAST32_TYPE__ atomic_int_fast32_t;
+typedef _Atomic __UINT_FAST32_TYPE__ atomic_uint_fast32_t;
+typedef _Atomic __INT_FAST64_TYPE__ atomic_int_fast64_t;
+typedef _Atomic __UINT_FAST64_TYPE__ atomic_uint_fast64_t;
+typedef _Atomic __INTPTR_TYPE__ atomic_intptr_t;
+typedef _Atomic __UINTPTR_TYPE__ atomic_uintptr_t;
+typedef _Atomic __SIZE_TYPE__ atomic_size_t;
+typedef _Atomic __PTRDIFF_TYPE__ atomic_ptrdiff_t;
+typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t;
+typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t;
+
+
+#if !(defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L)
+#define ATOMIC_VAR_INIT(VALUE) (VALUE)
+#endif
+
+/* Initialize an atomic object pointed to by PTR with VAL. */
+#define atomic_init(PTR, VAL) \
+ atomic_store_explicit (PTR, VAL, __ATOMIC_RELAXED)
+
+#define kill_dependency(Y) \
+ __extension__ \
+ ({ \
+ __auto_type __kill_dependency_tmp = (Y); \
+ __kill_dependency_tmp; \
+ })
+
+extern void atomic_thread_fence (memory_order);
+#define atomic_thread_fence(MO) __atomic_thread_fence (MO)
+extern void atomic_signal_fence (memory_order);
+#define atomic_signal_fence(MO) __atomic_signal_fence (MO)
+#define atomic_is_lock_free(OBJ) __atomic_is_lock_free (sizeof (*(OBJ)), (OBJ))
+
+#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#ifdef __GCC_ATOMIC_CHAR8_T_LOCK_FREE
+#define ATOMIC_CHAR8_T_LOCK_FREE __GCC_ATOMIC_CHAR8_T_LOCK_FREE
+#endif
+#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+
+
+/* Note that these macros require __auto_type to remove
+ _Atomic qualifiers (and const qualifiers, if those are valid on
+ macro operands).
+
+ Also note that the header file uses the generic form of __atomic
+ builtins, which requires the address to be taken of the value
+ parameter, and then we pass that value on. This allows the macros
+ to work for any type, and the compiler is smart enough to convert
+ these to lock-free _N variants if possible, and throw away the
+ temps. */
+
+#define atomic_store_explicit(PTR, VAL, MO) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_store_ptr = (PTR); \
+ __typeof__ ((void)0, *__atomic_store_ptr) __atomic_store_tmp = (VAL); \
+ __atomic_store (__atomic_store_ptr, &__atomic_store_tmp, (MO)); \
+ })
+
+#define atomic_store(PTR, VAL) \
+ atomic_store_explicit (PTR, VAL, __ATOMIC_SEQ_CST)
+
+
+#define atomic_load_explicit(PTR, MO) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_load_ptr = (PTR); \
+ __typeof__ ((void)0, *__atomic_load_ptr) __atomic_load_tmp; \
+ __atomic_load (__atomic_load_ptr, &__atomic_load_tmp, (MO)); \
+ __atomic_load_tmp; \
+ })
+
+#define atomic_load(PTR) atomic_load_explicit (PTR, __ATOMIC_SEQ_CST)
+
+
+#define atomic_exchange_explicit(PTR, VAL, MO) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_exchange_ptr = (PTR); \
+ __typeof__ ((void)0, *__atomic_exchange_ptr) __atomic_exchange_val = (VAL); \
+ __typeof__ ((void)0, *__atomic_exchange_ptr) __atomic_exchange_tmp; \
+ __atomic_exchange (__atomic_exchange_ptr, &__atomic_exchange_val, \
+ &__atomic_exchange_tmp, (MO)); \
+ __atomic_exchange_tmp; \
+ })
+
+#define atomic_exchange(PTR, VAL) \
+ atomic_exchange_explicit (PTR, VAL, __ATOMIC_SEQ_CST)
+
+
+#define atomic_compare_exchange_strong_explicit(PTR, VAL, DES, SUC, FAIL) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_compare_exchange_ptr = (PTR); \
+ __typeof__ ((void)0, *__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \
+ = (DES); \
+ __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \
+ &__atomic_compare_exchange_tmp, 0, \
+ (SUC), (FAIL)); \
+ })
+
+#define atomic_compare_exchange_strong(PTR, VAL, DES) \
+ atomic_compare_exchange_strong_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \
+ __ATOMIC_SEQ_CST)
+
+#define atomic_compare_exchange_weak_explicit(PTR, VAL, DES, SUC, FAIL) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_compare_exchange_ptr = (PTR); \
+ __typeof__ ((void)0, *__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \
+ = (DES); \
+ __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \
+ &__atomic_compare_exchange_tmp, 1, \
+ (SUC), (FAIL)); \
+ })
+
+#define atomic_compare_exchange_weak(PTR, VAL, DES) \
+ atomic_compare_exchange_weak_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \
+ __ATOMIC_SEQ_CST)
+
+
+
+#define atomic_fetch_add(PTR, VAL) __atomic_fetch_add ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_add_explicit(PTR, VAL, MO) \
+ __atomic_fetch_add ((PTR), (VAL), (MO))
+
+#define atomic_fetch_sub(PTR, VAL) __atomic_fetch_sub ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_sub_explicit(PTR, VAL, MO) \
+ __atomic_fetch_sub ((PTR), (VAL), (MO))
+
+#define atomic_fetch_or(PTR, VAL) __atomic_fetch_or ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_or_explicit(PTR, VAL, MO) \
+ __atomic_fetch_or ((PTR), (VAL), (MO))
+
+#define atomic_fetch_xor(PTR, VAL) __atomic_fetch_xor ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_xor_explicit(PTR, VAL, MO) \
+ __atomic_fetch_xor ((PTR), (VAL), (MO))
+
+#define atomic_fetch_and(PTR, VAL) __atomic_fetch_and ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_and_explicit(PTR, VAL, MO) \
+ __atomic_fetch_and ((PTR), (VAL), (MO))
+
+
+typedef _Atomic struct
+{
+#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
+ _Bool __val;
+#else
+ unsigned char __val;
+#endif
+} atomic_flag;
+
+#define ATOMIC_FLAG_INIT { 0 }
+
+
+extern _Bool atomic_flag_test_and_set (volatile atomic_flag *);
+#define atomic_flag_test_and_set(PTR) \
+ __atomic_test_and_set ((PTR), __ATOMIC_SEQ_CST)
+extern _Bool atomic_flag_test_and_set_explicit (volatile atomic_flag *,
+ memory_order);
+#define atomic_flag_test_and_set_explicit(PTR, MO) \
+ __atomic_test_and_set ((PTR), (MO))
+
+extern void atomic_flag_clear (volatile atomic_flag *);
+#define atomic_flag_clear(PTR) __atomic_clear ((PTR), __ATOMIC_SEQ_CST)
+extern void atomic_flag_clear_explicit (volatile atomic_flag *, memory_order);
+#define atomic_flag_clear_explicit(PTR, MO) __atomic_clear ((PTR), (MO))
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#define __STDC_VERSION_STDATOMIC_H__ 202311L
+#endif
+
+#endif /* _STDATOMIC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdbool.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdbool.h
new file mode 100644
index 0000000..13bdcdb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdbool.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.16 Boolean type and values <stdbool.h>
+ */
+
+#ifndef _STDBOOL_H
+#define _STDBOOL_H
+
+#ifndef __cplusplus
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+/* bool, true and false are keywords. */
+#else
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+#else /* __cplusplus */
+
+/* Supporting _Bool in C++ is a GCC extension. */
+#define _Bool bool
+
+#endif /* __cplusplus */
+
+/* Signal that all the definitions are present. */
+#define __bool_true_false_are_defined 1
+
+#endif /* stdbool.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stddef.h b/lib/gcc/arm-none-eabi/13.2.1/include/stddef.h
new file mode 100644
index 0000000..12ceef3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stddef.h
@@ -0,0 +1,463 @@
+/* Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.17 Common definitions <stddef.h>
+ */
+#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \
+ && !defined(__STDDEF_H__)) \
+ || defined(__need_wchar_t) || defined(__need_size_t) \
+ || defined(__need_ptrdiff_t) || defined(__need_NULL) \
+ || defined(__need_wint_t)
+
+/* Any one of these symbols __need_* means that GNU libc
+ wants us just to define one data type. So don't define
+ the symbols that indicate this file's entire job has been done. */
+#if (!defined(__need_wchar_t) && !defined(__need_size_t) \
+ && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \
+ && !defined(__need_wint_t))
+#define _STDDEF_H
+#define _STDDEF_H_
+/* snaroff@next.com says the NeXT needs this. */
+#define _ANSI_STDDEF_H
+#endif
+
+#ifndef __sys_stdtypes_h
+/* This avoids lossage on SunOS but only if stdtypes.h comes first.
+ There's no way to win with the other order! Sun lossage. */
+
+#if defined(__NetBSD__)
+#include <machine/ansi.h>
+#endif
+
+#if defined (__FreeBSD__)
+#include <sys/_types.h>
+#endif
+
+#if defined(__NetBSD__)
+#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_)
+#define _SIZE_T
+#endif
+#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_)
+#define _PTRDIFF_T
+#endif
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_. */
+#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_)
+#ifndef _BSD_WCHAR_T_
+#define _WCHAR_T
+#endif
+#endif
+/* Undef _FOO_T_ if we are supposed to define foo_t. */
+#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_)
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#if defined (__need_size_t) || defined (_STDDEF_H_)
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#if defined (__need_wchar_t) || defined (_STDDEF_H_)
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* defined(__NetBSD__) */
+
+/* Sequent's header files use _PTRDIFF_T_ in some conflicting way.
+ Just ignore it. */
+#if defined (__sequent__) && defined (_PTRDIFF_T_)
+#undef _PTRDIFF_T_
+#endif
+
+/* On VxWorks, <type/vxTypesBase.h> may have defined macros like
+ _TYPE_size_t which will typedef size_t. fixincludes patched the
+ vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
+ not defined, and so that defining this macro defines _GCC_SIZE_T.
+ If we find that the macros are still defined at this point, we must
+ invoke them so that the type is defined as expected. */
+#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_))
+_TYPE_ptrdiff_t;
+#undef _TYPE_ptrdiff_t
+#endif
+#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_))
+_TYPE_size_t;
+#undef _TYPE_size_t
+#endif
+#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_))
+_TYPE_wchar_t;
+#undef _TYPE_wchar_t
+#endif
+
+/* In case nobody has defined these types, but we aren't running under
+ GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
+ __WCHAR_TYPE__ have reasonable values. This can happen if the
+ parts of GCC is compiled by an older compiler, that actually
+ include gstddef.h, such as collect2. */
+
+/* Signed type of difference of two pointers. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_ptrdiff_t)
+#ifndef _PTRDIFF_T /* in case <sys/types.h> has defined it. */
+#ifndef _T_PTRDIFF_
+#ifndef _T_PTRDIFF
+#ifndef __PTRDIFF_T
+#ifndef _PTRDIFF_T_
+#ifndef _BSD_PTRDIFF_T_
+#ifndef ___int_ptrdiff_t_h
+#ifndef _GCC_PTRDIFF_T
+#ifndef _PTRDIFF_T_DECLARED /* DragonFly */
+#ifndef __DEFINED_ptrdiff_t /* musl libc */
+#define _PTRDIFF_T
+#define _T_PTRDIFF_
+#define _T_PTRDIFF
+#define __PTRDIFF_T
+#define _PTRDIFF_T_
+#define _BSD_PTRDIFF_T_
+#define ___int_ptrdiff_t_h
+#define _GCC_PTRDIFF_T
+#define _PTRDIFF_T_DECLARED
+#define __DEFINED_ptrdiff_t
+#ifndef __PTRDIFF_TYPE__
+#define __PTRDIFF_TYPE__ long int
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif /* __DEFINED_ptrdiff_t */
+#endif /* _PTRDIFF_T_DECLARED */
+#endif /* _GCC_PTRDIFF_T */
+#endif /* ___int_ptrdiff_t_h */
+#endif /* _BSD_PTRDIFF_T_ */
+#endif /* _PTRDIFF_T_ */
+#endif /* __PTRDIFF_T */
+#endif /* _T_PTRDIFF */
+#endif /* _T_PTRDIFF_ */
+#endif /* _PTRDIFF_T */
+
+/* If this symbol has done its job, get rid of it. */
+#undef __need_ptrdiff_t
+
+#endif /* _STDDEF_H or __need_ptrdiff_t. */
+
+/* Unsigned type of `sizeof' something. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_size_t)
+#ifndef __size_t__ /* BeOS */
+#ifndef __SIZE_T__ /* Cray Unicos/Mk */
+#ifndef _SIZE_T /* in case <sys/types.h> has defined it. */
+#ifndef _SYS_SIZE_T_H
+#ifndef _T_SIZE_
+#ifndef _T_SIZE
+#ifndef __SIZE_T
+#ifndef _SIZE_T_
+#ifndef _BSD_SIZE_T_
+#ifndef _SIZE_T_DEFINED_
+#ifndef _SIZE_T_DEFINED
+#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */
+#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */
+#ifndef __DEFINED_size_t /* musl libc */
+#ifndef ___int_size_t_h
+#ifndef _GCC_SIZE_T
+#ifndef _SIZET_
+#ifndef __size_t
+#define __size_t__ /* BeOS */
+#define __SIZE_T__ /* Cray Unicos/Mk */
+#define _SIZE_T
+#define _SYS_SIZE_T_H
+#define _T_SIZE_
+#define _T_SIZE
+#define __SIZE_T
+#define _SIZE_T_
+#define _BSD_SIZE_T_
+#define _SIZE_T_DEFINED_
+#define _SIZE_T_DEFINED
+#define _BSD_SIZE_T_DEFINED_ /* Darwin */
+#define _SIZE_T_DECLARED /* FreeBSD 5 */
+#define __DEFINED_size_t /* musl libc */
+#define ___int_size_t_h
+#define _GCC_SIZE_T
+#define _SIZET_
+#if defined (__FreeBSD__) \
+ || defined(__DragonFly__) \
+ || defined(__FreeBSD_kernel__) \
+ || defined(__VMS__)
+/* __size_t is a typedef, must not trash it. */
+#else
+#define __size_t
+#endif
+#ifndef __SIZE_TYPE__
+#define __SIZE_TYPE__ long unsigned int
+#endif
+#if !(defined (__GNUG__) && defined (size_t))
+typedef __SIZE_TYPE__ size_t;
+#ifdef __BEOS__
+typedef long ssize_t;
+#endif /* __BEOS__ */
+#endif /* !(defined (__GNUG__) && defined (size_t)) */
+#endif /* __size_t */
+#endif /* _SIZET_ */
+#endif /* _GCC_SIZE_T */
+#endif /* ___int_size_t_h */
+#endif /* __DEFINED_size_t */
+#endif /* _SIZE_T_DECLARED */
+#endif /* _BSD_SIZE_T_DEFINED_ */
+#endif /* _SIZE_T_DEFINED */
+#endif /* _SIZE_T_DEFINED_ */
+#endif /* _BSD_SIZE_T_ */
+#endif /* _SIZE_T_ */
+#endif /* __SIZE_T */
+#endif /* _T_SIZE */
+#endif /* _T_SIZE_ */
+#endif /* _SYS_SIZE_T_H */
+#endif /* _SIZE_T */
+#endif /* __SIZE_T__ */
+#endif /* __size_t__ */
+#undef __need_size_t
+#endif /* _STDDEF_H or __need_size_t. */
+
+
+/* Wide character type.
+ Locale-writers should change this as necessary to
+ be big enough to hold unique values not between 0 and 127,
+ and not (wchar_t) -1, for each defined multibyte character. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_wchar_t)
+#ifndef __wchar_t__ /* BeOS */
+#ifndef __WCHAR_T__ /* Cray Unicos/Mk */
+#ifndef _WCHAR_T
+#ifndef _T_WCHAR_
+#ifndef _T_WCHAR
+#ifndef __WCHAR_T
+#ifndef _WCHAR_T_
+#ifndef _BSD_WCHAR_T_
+#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */
+#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */
+#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */
+#ifndef __DEFINED_wchar_t /* musl libc */
+#ifndef _WCHAR_T_DEFINED_
+#ifndef _WCHAR_T_DEFINED
+#ifndef _WCHAR_T_H
+#ifndef ___int_wchar_t_h
+#ifndef __INT_WCHAR_T_H
+#ifndef _GCC_WCHAR_T
+#define __wchar_t__ /* BeOS */
+#define __WCHAR_T__ /* Cray Unicos/Mk */
+#define _WCHAR_T
+#define _T_WCHAR_
+#define _T_WCHAR
+#define __WCHAR_T
+#define _WCHAR_T_
+#define _BSD_WCHAR_T_
+#define _WCHAR_T_DEFINED_
+#define _WCHAR_T_DEFINED
+#define _WCHAR_T_H
+#define ___int_wchar_t_h
+#define __INT_WCHAR_T_H
+#define _GCC_WCHAR_T
+#define _WCHAR_T_DECLARED
+#define __DEFINED_wchar_t
+
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other
+ symbols in the _FOO_T_ family, stays defined even after its
+ corresponding type is defined). If we define wchar_t, then we
+ must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if
+ we undef _WCHAR_T_, then we must also define rune_t, since
+ headers like runetype.h assume that if machine/ansi.h is included,
+ and _BSD_WCHAR_T_ is not defined, then rune_t is available.
+ machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of
+ the same type." */
+#ifdef _BSD_WCHAR_T_
+#undef _BSD_WCHAR_T_
+#ifdef _BSD_RUNE_T_
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+typedef _BSD_RUNE_T_ rune_t;
+#define _BSD_WCHAR_T_DEFINED_
+#define _BSD_RUNE_T_DEFINED_ /* Darwin */
+#if defined (__FreeBSD__) && (__FreeBSD__ < 5)
+/* Why is this file so hard to maintain properly? In contrast to
+ the comment above regarding BSD/386 1.1, on FreeBSD for as long
+ as the symbol has existed, _BSD_RUNE_T_ must not stay defined or
+ redundant typedefs will occur when stdlib.h is included after this file. */
+#undef _BSD_RUNE_T_
+#endif
+#endif
+#endif
+#endif
+/* FreeBSD 5 can't be handled well using "traditional" logic above
+ since it no longer defines _BSD_RUNE_T_ yet still desires to export
+ rune_t in some cases... */
+#if defined (__FreeBSD__) && (__FreeBSD__ >= 5)
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+#if __BSD_VISIBLE
+#ifndef _RUNE_T_DECLARED
+typedef __rune_t rune_t;
+#define _RUNE_T_DECLARED
+#endif
+#endif
+#endif
+#endif
+
+#ifndef __WCHAR_TYPE__
+#define __WCHAR_TYPE__ int
+#endif
+#ifndef __cplusplus
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* __DEFINED_wchar_t */
+#endif /* _WCHAR_T_DECLARED */
+#endif /* _BSD_RUNE_T_DEFINED_ */
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* __WCHAR_T__ */
+#endif /* __wchar_t__ */
+#undef __need_wchar_t
+#endif /* _STDDEF_H or __need_wchar_t. */
+
+#if defined (__need_wint_t)
+#ifndef _WINT_T
+#define _WINT_T
+
+#ifndef __WINT_TYPE__
+#define __WINT_TYPE__ unsigned int
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif
+
+#if defined(__NetBSD__)
+/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_
+ are probably typos and should be removed before 2.8 is released. */
+#ifdef _GCC_PTRDIFF_T_
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T_
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T_
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+/* The following ones are the real ones. */
+#ifdef _GCC_PTRDIFF_T
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* __NetBSD__ */
+
+#endif /* __sys_stdtypes_h */
+
+/* A null pointer constant. */
+
+#if defined (_STDDEF_H) || defined (__need_NULL)
+#undef NULL /* in case <stdio.h> has defined it. */
+#ifdef __GNUG__
+#define NULL __null
+#else /* G++ */
+#ifndef __cplusplus
+#define NULL ((void *)0)
+#else /* C++ */
+#define NULL 0
+#endif /* C++ */
+#endif /* G++ */
+#endif /* NULL not defined and <stddef.h> or need NULL. */
+#undef __need_NULL
+
+#ifdef _STDDEF_H
+
+/* Offset of member MEMBER in a struct of type TYPE. */
+#undef offsetof /* in case a system header has defined it. */
+#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
+
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \
+ || (defined(__cplusplus) && __cplusplus >= 201103L)
+#ifndef _GCC_MAX_ALIGN_T
+#define _GCC_MAX_ALIGN_T
+/* Type whose alignment is supported in every context and is at least
+ as great as that of any standard type not using alignment
+ specifiers. */
+typedef struct {
+ long long __max_align_ll __attribute__((__aligned__(__alignof__(long long))));
+ long double __max_align_ld __attribute__((__aligned__(__alignof__(long double))));
+ /* _Float128 is defined as a basic type, so max_align_t must be
+ sufficiently aligned for it. This code must work in C++, so we
+ use __float128 here; that is only available on some
+ architectures, but only on i386 is extra alignment needed for
+ __float128. */
+#ifdef __i386__
+ __float128 __max_align_f128 __attribute__((__aligned__(__alignof(__float128))));
+#endif
+} max_align_t;
+#endif
+#endif /* C11 or C++11. */
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#ifndef _GXX_NULLPTR_T
+#define _GXX_NULLPTR_T
+ typedef decltype(nullptr) nullptr_t;
+#endif
+#endif /* C++11. */
+
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L)
+#ifndef _GCC_NULLPTR_T
+#define _GCC_NULLPTR_T
+ typedef __typeof__(nullptr) nullptr_t;
+/* ??? This doesn't define __STDC_VERSION_STDDEF_H__ yet. */
+#endif
+#endif /* C23. */
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#define unreachable() (__builtin_unreachable ())
+#define __STDC_VERSION_STDDEF_H__ 202311L
+#endif
+
+#endif /* _STDDEF_H was defined this time */
+
+#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__
+ || __need_XXX was not defined before */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdfix.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdfix.h
new file mode 100644
index 0000000..d501418
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdfix.h
@@ -0,0 +1,204 @@
+/* Copyright (C) 2007-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO/IEC JTC1 SC22 WG14 N1169
+ * Date: 2006-04-04
+ * ISO/IEC TR 18037
+ * Programming languages - C - Extensions to support embedded processors
+ */
+
+#ifndef _STDFIX_H
+#define _STDFIX_H
+
+/* 7.18a.1 Introduction. */
+
+#undef fract
+#undef accum
+#undef sat
+#define fract _Fract
+#define accum _Accum
+#define sat _Sat
+
+/* 7.18a.3 Precision macros. */
+
+#undef SFRACT_FBIT
+#undef SFRACT_MIN
+#undef SFRACT_MAX
+#undef SFRACT_EPSILON
+#define SFRACT_FBIT __SFRACT_FBIT__
+#define SFRACT_MIN __SFRACT_MIN__
+#define SFRACT_MAX __SFRACT_MAX__
+#define SFRACT_EPSILON __SFRACT_EPSILON__
+
+#undef USFRACT_FBIT
+#undef USFRACT_MIN
+#undef USFRACT_MAX
+#undef USFRACT_EPSILON
+#define USFRACT_FBIT __USFRACT_FBIT__
+#define USFRACT_MIN __USFRACT_MIN__ /* GCC extension. */
+#define USFRACT_MAX __USFRACT_MAX__
+#define USFRACT_EPSILON __USFRACT_EPSILON__
+
+#undef FRACT_FBIT
+#undef FRACT_MIN
+#undef FRACT_MAX
+#undef FRACT_EPSILON
+#define FRACT_FBIT __FRACT_FBIT__
+#define FRACT_MIN __FRACT_MIN__
+#define FRACT_MAX __FRACT_MAX__
+#define FRACT_EPSILON __FRACT_EPSILON__
+
+#undef UFRACT_FBIT
+#undef UFRACT_MIN
+#undef UFRACT_MAX
+#undef UFRACT_EPSILON
+#define UFRACT_FBIT __UFRACT_FBIT__
+#define UFRACT_MIN __UFRACT_MIN__ /* GCC extension. */
+#define UFRACT_MAX __UFRACT_MAX__
+#define UFRACT_EPSILON __UFRACT_EPSILON__
+
+#undef LFRACT_FBIT
+#undef LFRACT_MIN
+#undef LFRACT_MAX
+#undef LFRACT_EPSILON
+#define LFRACT_FBIT __LFRACT_FBIT__
+#define LFRACT_MIN __LFRACT_MIN__
+#define LFRACT_MAX __LFRACT_MAX__
+#define LFRACT_EPSILON __LFRACT_EPSILON__
+
+#undef ULFRACT_FBIT
+#undef ULFRACT_MIN
+#undef ULFRACT_MAX
+#undef ULFRACT_EPSILON
+#define ULFRACT_FBIT __ULFRACT_FBIT__
+#define ULFRACT_MIN __ULFRACT_MIN__ /* GCC extension. */
+#define ULFRACT_MAX __ULFRACT_MAX__
+#define ULFRACT_EPSILON __ULFRACT_EPSILON__
+
+#undef LLFRACT_FBIT
+#undef LLFRACT_MIN
+#undef LLFRACT_MAX
+#undef LLFRACT_EPSILON
+#define LLFRACT_FBIT __LLFRACT_FBIT__ /* GCC extension. */
+#define LLFRACT_MIN __LLFRACT_MIN__ /* GCC extension. */
+#define LLFRACT_MAX __LLFRACT_MAX__ /* GCC extension. */
+#define LLFRACT_EPSILON __LLFRACT_EPSILON__ /* GCC extension. */
+
+#undef ULLFRACT_FBIT
+#undef ULLFRACT_MIN
+#undef ULLFRACT_MAX
+#undef ULLFRACT_EPSILON
+#define ULLFRACT_FBIT __ULLFRACT_FBIT__ /* GCC extension. */
+#define ULLFRACT_MIN __ULLFRACT_MIN__ /* GCC extension. */
+#define ULLFRACT_MAX __ULLFRACT_MAX__ /* GCC extension. */
+#define ULLFRACT_EPSILON __ULLFRACT_EPSILON__ /* GCC extension. */
+
+#undef SACCUM_FBIT
+#undef SACCUM_IBIT
+#undef SACCUM_MIN
+#undef SACCUM_MAX
+#undef SACCUM_EPSILON
+#define SACCUM_FBIT __SACCUM_FBIT__
+#define SACCUM_IBIT __SACCUM_IBIT__
+#define SACCUM_MIN __SACCUM_MIN__
+#define SACCUM_MAX __SACCUM_MAX__
+#define SACCUM_EPSILON __SACCUM_EPSILON__
+
+#undef USACCUM_FBIT
+#undef USACCUM_IBIT
+#undef USACCUM_MIN
+#undef USACCUM_MAX
+#undef USACCUM_EPSILON
+#define USACCUM_FBIT __USACCUM_FBIT__
+#define USACCUM_IBIT __USACCUM_IBIT__
+#define USACCUM_MIN __USACCUM_MIN__ /* GCC extension. */
+#define USACCUM_MAX __USACCUM_MAX__
+#define USACCUM_EPSILON __USACCUM_EPSILON__
+
+#undef ACCUM_FBIT
+#undef ACCUM_IBIT
+#undef ACCUM_MIN
+#undef ACCUM_MAX
+#undef ACCUM_EPSILON
+#define ACCUM_FBIT __ACCUM_FBIT__
+#define ACCUM_IBIT __ACCUM_IBIT__
+#define ACCUM_MIN __ACCUM_MIN__
+#define ACCUM_MAX __ACCUM_MAX__
+#define ACCUM_EPSILON __ACCUM_EPSILON__
+
+#undef UACCUM_FBIT
+#undef UACCUM_IBIT
+#undef UACCUM_MIN
+#undef UACCUM_MAX
+#undef UACCUM_EPSILON
+#define UACCUM_FBIT __UACCUM_FBIT__
+#define UACCUM_IBIT __UACCUM_IBIT__
+#define UACCUM_MIN __UACCUM_MIN__ /* GCC extension. */
+#define UACCUM_MAX __UACCUM_MAX__
+#define UACCUM_EPSILON __UACCUM_EPSILON__
+
+#undef LACCUM_FBIT
+#undef LACCUM_IBIT
+#undef LACCUM_MIN
+#undef LACCUM_MAX
+#undef LACCUM_EPSILON
+#define LACCUM_FBIT __LACCUM_FBIT__
+#define LACCUM_IBIT __LACCUM_IBIT__
+#define LACCUM_MIN __LACCUM_MIN__
+#define LACCUM_MAX __LACCUM_MAX__
+#define LACCUM_EPSILON __LACCUM_EPSILON__
+
+#undef ULACCUM_FBIT
+#undef ULACCUM_IBIT
+#undef ULACCUM_MIN
+#undef ULACCUM_MAX
+#undef ULACCUM_EPSILON
+#define ULACCUM_FBIT __ULACCUM_FBIT__
+#define ULACCUM_IBIT __ULACCUM_IBIT__
+#define ULACCUM_MIN __ULACCUM_MIN__ /* GCC extension. */
+#define ULACCUM_MAX __ULACCUM_MAX__
+#define ULACCUM_EPSILON __ULACCUM_EPSILON__
+
+#undef LLACCUM_FBIT
+#undef LLACCUM_IBIT
+#undef LLACCUM_MIN
+#undef LLACCUM_MAX
+#undef LLACCUM_EPSILON
+#define LLACCUM_FBIT __LLACCUM_FBIT__ /* GCC extension. */
+#define LLACCUM_IBIT __LLACCUM_IBIT__ /* GCC extension. */
+#define LLACCUM_MIN __LLACCUM_MIN__ /* GCC extension. */
+#define LLACCUM_MAX __LLACCUM_MAX__ /* GCC extension. */
+#define LLACCUM_EPSILON __LLACCUM_EPSILON__ /* GCC extension. */
+
+#undef ULLACCUM_FBIT
+#undef ULLACCUM_IBIT
+#undef ULLACCUM_MIN
+#undef ULLACCUM_MAX
+#undef ULLACCUM_EPSILON
+#define ULLACCUM_FBIT __ULLACCUM_FBIT__ /* GCC extension. */
+#define ULLACCUM_IBIT __ULLACCUM_IBIT__ /* GCC extension. */
+#define ULLACCUM_MIN __ULLACCUM_MIN__ /* GCC extension. */
+#define ULLACCUM_MAX __ULLACCUM_MAX__ /* GCC extension. */
+#define ULLACCUM_EPSILON __ULLACCUM_EPSILON__ /* GCC extension. */
+
+#endif /* _STDFIX_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdint-gcc.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdint-gcc.h
new file mode 100644
index 0000000..f52a736
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdint-gcc.h
@@ -0,0 +1,369 @@
+/* Copyright (C) 2008-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.18 Integer types <stdint.h>
+ */
+
+#ifndef _GCC_STDINT_H
+#define _GCC_STDINT_H
+
+/* 7.8.1.1 Exact-width integer types */
+
+#ifdef __INT8_TYPE__
+typedef __INT8_TYPE__ int8_t;
+#endif
+#ifdef __INT16_TYPE__
+typedef __INT16_TYPE__ int16_t;
+#endif
+#ifdef __INT32_TYPE__
+typedef __INT32_TYPE__ int32_t;
+#endif
+#ifdef __INT64_TYPE__
+typedef __INT64_TYPE__ int64_t;
+#endif
+#ifdef __UINT8_TYPE__
+typedef __UINT8_TYPE__ uint8_t;
+#endif
+#ifdef __UINT16_TYPE__
+typedef __UINT16_TYPE__ uint16_t;
+#endif
+#ifdef __UINT32_TYPE__
+typedef __UINT32_TYPE__ uint32_t;
+#endif
+#ifdef __UINT64_TYPE__
+typedef __UINT64_TYPE__ uint64_t;
+#endif
+
+/* 7.8.1.2 Minimum-width integer types */
+
+typedef __INT_LEAST8_TYPE__ int_least8_t;
+typedef __INT_LEAST16_TYPE__ int_least16_t;
+typedef __INT_LEAST32_TYPE__ int_least32_t;
+typedef __INT_LEAST64_TYPE__ int_least64_t;
+typedef __UINT_LEAST8_TYPE__ uint_least8_t;
+typedef __UINT_LEAST16_TYPE__ uint_least16_t;
+typedef __UINT_LEAST32_TYPE__ uint_least32_t;
+typedef __UINT_LEAST64_TYPE__ uint_least64_t;
+
+/* 7.8.1.3 Fastest minimum-width integer types */
+
+typedef __INT_FAST8_TYPE__ int_fast8_t;
+typedef __INT_FAST16_TYPE__ int_fast16_t;
+typedef __INT_FAST32_TYPE__ int_fast32_t;
+typedef __INT_FAST64_TYPE__ int_fast64_t;
+typedef __UINT_FAST8_TYPE__ uint_fast8_t;
+typedef __UINT_FAST16_TYPE__ uint_fast16_t;
+typedef __UINT_FAST32_TYPE__ uint_fast32_t;
+typedef __UINT_FAST64_TYPE__ uint_fast64_t;
+
+/* 7.8.1.4 Integer types capable of holding object pointers */
+
+#ifdef __INTPTR_TYPE__
+typedef __INTPTR_TYPE__ intptr_t;
+#endif
+#ifdef __UINTPTR_TYPE__
+typedef __UINTPTR_TYPE__ uintptr_t;
+#endif
+
+/* 7.8.1.5 Greatest-width integer types */
+
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+#if (!defined __cplusplus || __cplusplus >= 201103L \
+ || defined __STDC_LIMIT_MACROS)
+
+/* 7.18.2 Limits of specified-width integer types */
+
+#ifdef __INT8_MAX__
+# undef INT8_MAX
+# define INT8_MAX __INT8_MAX__
+# undef INT8_MIN
+# define INT8_MIN (-INT8_MAX - 1)
+#endif
+#ifdef __UINT8_MAX__
+# undef UINT8_MAX
+# define UINT8_MAX __UINT8_MAX__
+#endif
+#ifdef __INT16_MAX__
+# undef INT16_MAX
+# define INT16_MAX __INT16_MAX__
+# undef INT16_MIN
+# define INT16_MIN (-INT16_MAX - 1)
+#endif
+#ifdef __UINT16_MAX__
+# undef UINT16_MAX
+# define UINT16_MAX __UINT16_MAX__
+#endif
+#ifdef __INT32_MAX__
+# undef INT32_MAX
+# define INT32_MAX __INT32_MAX__
+# undef INT32_MIN
+# define INT32_MIN (-INT32_MAX - 1)
+#endif
+#ifdef __UINT32_MAX__
+# undef UINT32_MAX
+# define UINT32_MAX __UINT32_MAX__
+#endif
+#ifdef __INT64_MAX__
+# undef INT64_MAX
+# define INT64_MAX __INT64_MAX__
+# undef INT64_MIN
+# define INT64_MIN (-INT64_MAX - 1)
+#endif
+#ifdef __UINT64_MAX__
+# undef UINT64_MAX
+# define UINT64_MAX __UINT64_MAX__
+#endif
+
+#undef INT_LEAST8_MAX
+#define INT_LEAST8_MAX __INT_LEAST8_MAX__
+#undef INT_LEAST8_MIN
+#define INT_LEAST8_MIN (-INT_LEAST8_MAX - 1)
+#undef UINT_LEAST8_MAX
+#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__
+#undef INT_LEAST16_MAX
+#define INT_LEAST16_MAX __INT_LEAST16_MAX__
+#undef INT_LEAST16_MIN
+#define INT_LEAST16_MIN (-INT_LEAST16_MAX - 1)
+#undef UINT_LEAST16_MAX
+#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__
+#undef INT_LEAST32_MAX
+#define INT_LEAST32_MAX __INT_LEAST32_MAX__
+#undef INT_LEAST32_MIN
+#define INT_LEAST32_MIN (-INT_LEAST32_MAX - 1)
+#undef UINT_LEAST32_MAX
+#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__
+#undef INT_LEAST64_MAX
+#define INT_LEAST64_MAX __INT_LEAST64_MAX__
+#undef INT_LEAST64_MIN
+#define INT_LEAST64_MIN (-INT_LEAST64_MAX - 1)
+#undef UINT_LEAST64_MAX
+#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__
+
+#undef INT_FAST8_MAX
+#define INT_FAST8_MAX __INT_FAST8_MAX__
+#undef INT_FAST8_MIN
+#define INT_FAST8_MIN (-INT_FAST8_MAX - 1)
+#undef UINT_FAST8_MAX
+#define UINT_FAST8_MAX __UINT_FAST8_MAX__
+#undef INT_FAST16_MAX
+#define INT_FAST16_MAX __INT_FAST16_MAX__
+#undef INT_FAST16_MIN
+#define INT_FAST16_MIN (-INT_FAST16_MAX - 1)
+#undef UINT_FAST16_MAX
+#define UINT_FAST16_MAX __UINT_FAST16_MAX__
+#undef INT_FAST32_MAX
+#define INT_FAST32_MAX __INT_FAST32_MAX__
+#undef INT_FAST32_MIN
+#define INT_FAST32_MIN (-INT_FAST32_MAX - 1)
+#undef UINT_FAST32_MAX
+#define UINT_FAST32_MAX __UINT_FAST32_MAX__
+#undef INT_FAST64_MAX
+#define INT_FAST64_MAX __INT_FAST64_MAX__
+#undef INT_FAST64_MIN
+#define INT_FAST64_MIN (-INT_FAST64_MAX - 1)
+#undef UINT_FAST64_MAX
+#define UINT_FAST64_MAX __UINT_FAST64_MAX__
+
+#ifdef __INTPTR_MAX__
+# undef INTPTR_MAX
+# define INTPTR_MAX __INTPTR_MAX__
+# undef INTPTR_MIN
+# define INTPTR_MIN (-INTPTR_MAX - 1)
+#endif
+#ifdef __UINTPTR_MAX__
+# undef UINTPTR_MAX
+# define UINTPTR_MAX __UINTPTR_MAX__
+#endif
+
+#undef INTMAX_MAX
+#define INTMAX_MAX __INTMAX_MAX__
+#undef INTMAX_MIN
+#define INTMAX_MIN (-INTMAX_MAX - 1)
+#undef UINTMAX_MAX
+#define UINTMAX_MAX __UINTMAX_MAX__
+
+/* 7.18.3 Limits of other integer types */
+
+#undef PTRDIFF_MAX
+#define PTRDIFF_MAX __PTRDIFF_MAX__
+#undef PTRDIFF_MIN
+#define PTRDIFF_MIN (-PTRDIFF_MAX - 1)
+
+#undef SIG_ATOMIC_MAX
+#define SIG_ATOMIC_MAX __SIG_ATOMIC_MAX__
+#undef SIG_ATOMIC_MIN
+#define SIG_ATOMIC_MIN __SIG_ATOMIC_MIN__
+
+#undef SIZE_MAX
+#define SIZE_MAX __SIZE_MAX__
+
+#undef WCHAR_MAX
+#define WCHAR_MAX __WCHAR_MAX__
+#undef WCHAR_MIN
+#define WCHAR_MIN __WCHAR_MIN__
+
+#undef WINT_MAX
+#define WINT_MAX __WINT_MAX__
+#undef WINT_MIN
+#define WINT_MIN __WINT_MIN__
+
+#endif /* (!defined __cplusplus || __cplusplus >= 201103L
+ || defined __STDC_LIMIT_MACROS) */
+
+#if (!defined __cplusplus || __cplusplus >= 201103L \
+ || defined __STDC_CONSTANT_MACROS)
+
+#undef INT8_C
+#define INT8_C(c) __INT8_C(c)
+#undef INT16_C
+#define INT16_C(c) __INT16_C(c)
+#undef INT32_C
+#define INT32_C(c) __INT32_C(c)
+#undef INT64_C
+#define INT64_C(c) __INT64_C(c)
+#undef UINT8_C
+#define UINT8_C(c) __UINT8_C(c)
+#undef UINT16_C
+#define UINT16_C(c) __UINT16_C(c)
+#undef UINT32_C
+#define UINT32_C(c) __UINT32_C(c)
+#undef UINT64_C
+#define UINT64_C(c) __UINT64_C(c)
+#undef INTMAX_C
+#define INTMAX_C(c) __INTMAX_C(c)
+#undef UINTMAX_C
+#define UINTMAX_C(c) __UINTMAX_C(c)
+
+#endif /* (!defined __cplusplus || __cplusplus >= 201103L
+ || defined __STDC_CONSTANT_MACROS) */
+
+#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \
+ || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L))
+/* TS 18661-1 / C2X widths of integer types. */
+
+#ifdef __INT8_TYPE__
+# undef INT8_WIDTH
+# define INT8_WIDTH 8
+#endif
+#ifdef __UINT8_TYPE__
+# undef UINT8_WIDTH
+# define UINT8_WIDTH 8
+#endif
+#ifdef __INT16_TYPE__
+# undef INT16_WIDTH
+# define INT16_WIDTH 16
+#endif
+#ifdef __UINT16_TYPE__
+# undef UINT16_WIDTH
+# define UINT16_WIDTH 16
+#endif
+#ifdef __INT32_TYPE__
+# undef INT32_WIDTH
+# define INT32_WIDTH 32
+#endif
+#ifdef __UINT32_TYPE__
+# undef UINT32_WIDTH
+# define UINT32_WIDTH 32
+#endif
+#ifdef __INT64_TYPE__
+# undef INT64_WIDTH
+# define INT64_WIDTH 64
+#endif
+#ifdef __UINT64_TYPE__
+# undef UINT64_WIDTH
+# define UINT64_WIDTH 64
+#endif
+
+#undef INT_LEAST8_WIDTH
+#define INT_LEAST8_WIDTH __INT_LEAST8_WIDTH__
+#undef UINT_LEAST8_WIDTH
+#define UINT_LEAST8_WIDTH __INT_LEAST8_WIDTH__
+#undef INT_LEAST16_WIDTH
+#define INT_LEAST16_WIDTH __INT_LEAST16_WIDTH__
+#undef UINT_LEAST16_WIDTH
+#define UINT_LEAST16_WIDTH __INT_LEAST16_WIDTH__
+#undef INT_LEAST32_WIDTH
+#define INT_LEAST32_WIDTH __INT_LEAST32_WIDTH__
+#undef UINT_LEAST32_WIDTH
+#define UINT_LEAST32_WIDTH __INT_LEAST32_WIDTH__
+#undef INT_LEAST64_WIDTH
+#define INT_LEAST64_WIDTH __INT_LEAST64_WIDTH__
+#undef UINT_LEAST64_WIDTH
+#define UINT_LEAST64_WIDTH __INT_LEAST64_WIDTH__
+
+#undef INT_FAST8_WIDTH
+#define INT_FAST8_WIDTH __INT_FAST8_WIDTH__
+#undef UINT_FAST8_WIDTH
+#define UINT_FAST8_WIDTH __INT_FAST8_WIDTH__
+#undef INT_FAST16_WIDTH
+#define INT_FAST16_WIDTH __INT_FAST16_WIDTH__
+#undef UINT_FAST16_WIDTH
+#define UINT_FAST16_WIDTH __INT_FAST16_WIDTH__
+#undef INT_FAST32_WIDTH
+#define INT_FAST32_WIDTH __INT_FAST32_WIDTH__
+#undef UINT_FAST32_WIDTH
+#define UINT_FAST32_WIDTH __INT_FAST32_WIDTH__
+#undef INT_FAST64_WIDTH
+#define INT_FAST64_WIDTH __INT_FAST64_WIDTH__
+#undef UINT_FAST64_WIDTH
+#define UINT_FAST64_WIDTH __INT_FAST64_WIDTH__
+
+#ifdef __INTPTR_TYPE__
+# undef INTPTR_WIDTH
+# define INTPTR_WIDTH __INTPTR_WIDTH__
+#endif
+#ifdef __UINTPTR_TYPE__
+# undef UINTPTR_WIDTH
+# define UINTPTR_WIDTH __INTPTR_WIDTH__
+#endif
+
+#undef INTMAX_WIDTH
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#undef UINTMAX_WIDTH
+#define UINTMAX_WIDTH __INTMAX_WIDTH__
+
+#undef PTRDIFF_WIDTH
+#define PTRDIFF_WIDTH __PTRDIFF_WIDTH__
+
+#undef SIG_ATOMIC_WIDTH
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+
+#undef SIZE_WIDTH
+#define SIZE_WIDTH __SIZE_WIDTH__
+
+#undef WCHAR_WIDTH
+#define WCHAR_WIDTH __WCHAR_WIDTH__
+
+#undef WINT_WIDTH
+#define WINT_WIDTH __WINT_WIDTH__
+
+#endif
+
+#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
+#define __STDC_VERSION_STDINT_H__ 202311L
+#endif
+
+#endif /* _GCC_STDINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdint.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdint.h
new file mode 100644
index 0000000..83b6f70
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdint.h
@@ -0,0 +1,14 @@
+#ifndef _GCC_WRAP_STDINT_H
+#if __STDC_HOSTED__
+# if defined __cplusplus && __cplusplus >= 201103L
+# undef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# undef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS
+# endif
+# include_next <stdint.h>
+#else
+# include "stdint-gcc.h"
+#endif
+#define _GCC_WRAP_STDINT_H
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/stdnoreturn.h b/lib/gcc/arm-none-eabi/13.2.1/include/stdnoreturn.h
new file mode 100644
index 0000000..4a3dc7f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/stdnoreturn.h
@@ -0,0 +1,35 @@
+/* Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C1X: 7.23 _Noreturn <stdnoreturn.h>. */
+
+#ifndef _STDNORETURN_H
+#define _STDNORETURN_H
+
+#ifndef __cplusplus
+
+#define noreturn _Noreturn
+
+#endif
+
+#endif /* stdnoreturn.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/syslimits.h b/lib/gcc/arm-none-eabi/13.2.1/include/syslimits.h
new file mode 100644
index 0000000..a362802
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/syslimits.h
@@ -0,0 +1,8 @@
+/* syslimits.h stands for the system's own limits.h file.
+ If we can use it ok unmodified, then we install this text.
+ If fixincludes fixes it, then the fixed version is installed
+ instead of this text. */
+
+#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */
+#include_next <limits.h>
+#undef _GCC_NEXT_LIMITS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/tgmath.h b/lib/gcc/arm-none-eabi/13.2.1/include/tgmath.h
new file mode 100644
index 0000000..598d1ad
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/tgmath.h
@@ -0,0 +1,127 @@
+/* Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Apple, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.22 Type-generic math <tgmath.h>
+ */
+
+#ifndef _TGMATH_H
+#define _TGMATH_H
+
+#include <math.h>
+
+#ifndef __cplusplus
+#include <complex.h>
+
+/* Naming convention: generic macros are defining using
+ __TGMATH_CPLX*, __TGMATH_REAL*, and __TGMATH_CPLX_ONLY. _CPLX
+ means the generic argument(s) may be real or complex, _REAL means
+ real only, _CPLX means complex only. If there is no suffix, we are
+ defining a function of one argument. If the suffix is _n
+ it is a function of n arguments. We only define these macros for
+ values of n that are needed. */
+
+#define __TGMATH_CPLX(z,R,C) \
+ __builtin_tgmath (R##f, R, R##l, C##f, C, C##l, (z))
+
+#define __TGMATH_CPLX_2(z1,z2,R,C) \
+ __builtin_tgmath (R##f, R, R##l, C##f, C, C##l, (z1), (z2))
+
+#define __TGMATH_REAL(x,R) \
+ __builtin_tgmath (R##f, R, R##l, (x))
+#define __TGMATH_REAL_2(x,y,R) \
+ __builtin_tgmath (R##f, R, R##l, (x), (y))
+#define __TGMATH_REAL_3(x,y,z,R) \
+ __builtin_tgmath (R##f, R, R##l, (x), (y), (z))
+#define __TGMATH_CPLX_ONLY(z,C) \
+ __builtin_tgmath (C##f, C, C##l, (z))
+
+/* Functions defined in both <math.h> and <complex.h> (7.22p4) */
+#define acos(z) __TGMATH_CPLX(z, acos, cacos)
+#define asin(z) __TGMATH_CPLX(z, asin, casin)
+#define atan(z) __TGMATH_CPLX(z, atan, catan)
+#define acosh(z) __TGMATH_CPLX(z, acosh, cacosh)
+#define asinh(z) __TGMATH_CPLX(z, asinh, casinh)
+#define atanh(z) __TGMATH_CPLX(z, atanh, catanh)
+#define cos(z) __TGMATH_CPLX(z, cos, ccos)
+#define sin(z) __TGMATH_CPLX(z, sin, csin)
+#define tan(z) __TGMATH_CPLX(z, tan, ctan)
+#define cosh(z) __TGMATH_CPLX(z, cosh, ccosh)
+#define sinh(z) __TGMATH_CPLX(z, sinh, csinh)
+#define tanh(z) __TGMATH_CPLX(z, tanh, ctanh)
+#define exp(z) __TGMATH_CPLX(z, exp, cexp)
+#define log(z) __TGMATH_CPLX(z, log, clog)
+#define pow(z1,z2) __TGMATH_CPLX_2(z1, z2, pow, cpow)
+#define sqrt(z) __TGMATH_CPLX(z, sqrt, csqrt)
+#define fabs(z) __TGMATH_CPLX(z, fabs, cabs)
+
+/* Functions defined in <math.h> only (7.22p5) */
+#define atan2(x,y) __TGMATH_REAL_2(x, y, atan2)
+#define cbrt(x) __TGMATH_REAL(x, cbrt)
+#define ceil(x) __TGMATH_REAL(x, ceil)
+#define copysign(x,y) __TGMATH_REAL_2(x, y, copysign)
+#define erf(x) __TGMATH_REAL(x, erf)
+#define erfc(x) __TGMATH_REAL(x, erfc)
+#define exp2(x) __TGMATH_REAL(x, exp2)
+#define expm1(x) __TGMATH_REAL(x, expm1)
+#define fdim(x,y) __TGMATH_REAL_2(x, y, fdim)
+#define floor(x) __TGMATH_REAL(x, floor)
+#define fma(x,y,z) __TGMATH_REAL_3(x, y, z, fma)
+#define fmax(x,y) __TGMATH_REAL_2(x, y, fmax)
+#define fmin(x,y) __TGMATH_REAL_2(x, y, fmin)
+#define fmod(x,y) __TGMATH_REAL_2(x, y, fmod)
+#define frexp(x,y) __TGMATH_REAL_2(x, y, frexp)
+#define hypot(x,y) __TGMATH_REAL_2(x, y, hypot)
+#define ilogb(x) __TGMATH_REAL(x, ilogb)
+#define ldexp(x,y) __TGMATH_REAL_2(x, y, ldexp)
+#define lgamma(x) __TGMATH_REAL(x, lgamma)
+#define llrint(x) __TGMATH_REAL(x, llrint)
+#define llround(x) __TGMATH_REAL(x, llround)
+#define log10(x) __TGMATH_REAL(x, log10)
+#define log1p(x) __TGMATH_REAL(x, log1p)
+#define log2(x) __TGMATH_REAL(x, log2)
+#define logb(x) __TGMATH_REAL(x, logb)
+#define lrint(x) __TGMATH_REAL(x, lrint)
+#define lround(x) __TGMATH_REAL(x, lround)
+#define nearbyint(x) __TGMATH_REAL(x, nearbyint)
+#define nextafter(x,y) __TGMATH_REAL_2(x, y, nextafter)
+#define nexttoward(x,y) __TGMATH_REAL_2(x, y, nexttoward)
+#define remainder(x,y) __TGMATH_REAL_2(x, y, remainder)
+#define remquo(x,y,z) __TGMATH_REAL_3(x, y, z, remquo)
+#define rint(x) __TGMATH_REAL(x, rint)
+#define round(x) __TGMATH_REAL(x, round)
+#define scalbn(x,y) __TGMATH_REAL_2(x, y, scalbn)
+#define scalbln(x,y) __TGMATH_REAL_2(x, y, scalbln)
+#define tgamma(x) __TGMATH_REAL(x, tgamma)
+#define trunc(x) __TGMATH_REAL(x, trunc)
+
+/* Functions defined in <complex.h> only (7.22p6) */
+#define carg(z) __TGMATH_CPLX_ONLY(z, carg)
+#define cimag(z) __TGMATH_CPLX_ONLY(z, cimag)
+#define conj(z) __TGMATH_CPLX_ONLY(z, conj)
+#define cproj(z) __TGMATH_CPLX_ONLY(z, cproj)
+#define creal(z) __TGMATH_CPLX_ONLY(z, creal)
+
+#endif /* __cplusplus */
+#endif /* _TGMATH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/unwind-arm-common.h b/lib/gcc/arm-none-eabi/13.2.1/include/unwind-arm-common.h
new file mode 100644
index 0000000..3ee0a6b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/unwind-arm-common.h
@@ -0,0 +1,251 @@
+/* Header file for the ARM EABI and C6X unwinders
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_COMMON_H
+#define UNWIND_ARM_COMMON_H
+
+#define __ARM_EABI_UNWINDER__ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ typedef unsigned _Unwind_Word __attribute__((__mode__(__word__)));
+ typedef signed _Unwind_Sword __attribute__((__mode__(__word__)));
+ typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__)));
+ typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__)));
+ typedef _Unwind_Word _uw;
+ typedef unsigned _uw64 __attribute__((mode(__DI__)));
+ typedef unsigned _uw16 __attribute__((mode(__HI__)));
+ typedef unsigned _uw8 __attribute__((mode(__QI__)));
+
+ typedef enum
+ {
+ _URC_OK = 0, /* operation completed successfully */
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+ _URC_FAILURE = 9 /* unspecified failure of some kind */
+ }
+ _Unwind_Reason_Code;
+
+ typedef enum
+ {
+ _US_VIRTUAL_UNWIND_FRAME = 0,
+ _US_UNWIND_FRAME_STARTING = 1,
+ _US_UNWIND_FRAME_RESUME = 2,
+ _US_ACTION_MASK = 3,
+ _US_FORCE_UNWIND = 8,
+ _US_END_OF_STACK = 16
+ }
+ _Unwind_State;
+
+ /* Provided only for compatibility with existing code. */
+ typedef int _Unwind_Action;
+#define _UA_SEARCH_PHASE 1
+#define _UA_CLEANUP_PHASE 2
+#define _UA_HANDLER_FRAME 4
+#define _UA_FORCE_UNWIND 8
+#define _UA_END_OF_STACK 16
+#define _URC_NO_REASON _URC_OK
+
+ typedef struct _Unwind_Control_Block _Unwind_Control_Block;
+ typedef struct _Unwind_Context _Unwind_Context;
+ typedef _uw _Unwind_EHT_Header;
+
+
+ /* UCB: */
+
+ struct _Unwind_Control_Block
+ {
+ char exception_class[8];
+ void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);
+ /* Unwinder cache, private fields for the unwinder's use */
+ struct
+ {
+ _uw reserved1; /* Forced unwind stop fn, 0 if not forced */
+ _uw reserved2; /* Personality routine address */
+ _uw reserved3; /* Saved callsite address */
+ _uw reserved4; /* Forced unwind stop arg */
+ _uw reserved5; /* Personality routine GOT value in FDPIC mode. */
+ }
+ unwinder_cache;
+ /* Propagation barrier cache (valid after phase 1): */
+ struct
+ {
+ _uw sp;
+ _uw bitpattern[5];
+ }
+ barrier_cache;
+ /* Cleanup cache (preserved over cleanup): */
+ struct
+ {
+ _uw bitpattern[4];
+ }
+ cleanup_cache;
+ /* Pr cache (for pr's benefit): */
+ struct
+ {
+ _uw fnstart; /* function start address */
+ _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */
+ _uw additional; /* additional data */
+ _uw reserved1;
+ }
+ pr_cache;
+ long long int :0; /* Force alignment to 8-byte boundary */
+ };
+
+ /* Virtual Register Set*/
+
+ typedef enum
+ {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_FPA = 2, /* fpa */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4, /* Intel WMMX control register */
+ _UVRSC_PAC = 5 /* Armv8.1-M Mainline PAC/AUTH pseudo-register */
+ }
+ _Unwind_VRS_RegClass;
+
+ typedef enum
+ {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_FPAX = 2,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+ }
+ _Unwind_VRS_DataRepresentation;
+
+ typedef enum
+ {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+ }
+ _Unwind_VRS_Result;
+
+ /* Frame unwinding state. */
+ typedef struct
+ {
+ /* The current word (bytes packed msb first). */
+ _uw data;
+ /* Pointer to the next word of data. */
+ _uw *next;
+ /* The number of bytes left in this word. */
+ _uw8 bytes_left;
+ /* The number of words pointed to by ptr. */
+ _uw8 words_left;
+ }
+ __gnu_unwind_state;
+
+ typedef _Unwind_Reason_Code (*personality_routine) (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation);
+
+
+ /* Support functions for the PR. */
+#define _Unwind_Exception _Unwind_Control_Block
+ typedef char _Unwind_Exception_Class[8];
+
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+
+ _Unwind_Ptr _Unwind_GetDataRelBase (_Unwind_Context *);
+ /* This should never be used. */
+ _Unwind_Ptr _Unwind_GetTextRelBase (_Unwind_Context *);
+
+ /* Interface functions: */
+ _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp);
+ void __attribute__((noreturn)) _Unwind_Resume(_Unwind_Control_Block *ucbp);
+ _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (_Unwind_Control_Block *ucbp);
+
+ typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)
+ (int, _Unwind_Action, _Unwind_Exception_Class,
+ _Unwind_Control_Block *, struct _Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *,
+ _Unwind_Stop_Fn, void *);
+ /* @@@ Use unwind data to perform a stack backtrace. The trace callback
+ is called for every stack frame in the call chain, but no cleanup
+ actions are performed. */
+ typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (_Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn,
+ void*);
+
+ _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *);
+ void _Unwind_Complete(_Unwind_Control_Block *ucbp);
+ void _Unwind_DeleteException (_Unwind_Exception *);
+
+ _Unwind_Reason_Code __gnu_unwind_frame (_Unwind_Control_Block *,
+ _Unwind_Context *);
+ _Unwind_Reason_Code __gnu_unwind_execute (_Unwind_Context *,
+ __gnu_unwind_state *);
+
+ static inline _Unwind_Word
+ _Unwind_GetGR (_Unwind_Context *context, int regno)
+ {
+ _uw val;
+ _Unwind_VRS_Get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ return val;
+ }
+
+#define _Unwind_GetIPInfo(context, ip_before_insn) \
+ (*ip_before_insn = 0, _Unwind_GetIP (context))
+
+ static inline void
+ _Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val)
+ {
+ _Unwind_VRS_Set (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ }
+
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+
+/* leb128 type numbers have a potentially unlimited size.
+ The target of the following definitions of _sleb128_t and _uleb128_t
+ is to have efficient data types large enough to hold the leb128 type
+ numbers used in the unwind code. */
+typedef long _sleb128_t;
+typedef unsigned long _uleb128_t;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_COMMON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/unwind.h b/lib/gcc/arm-none-eabi/13.2.1/include/unwind.h
new file mode 100644
index 0000000..b5b27fb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/unwind.h
@@ -0,0 +1,118 @@
+/* Header file for the ARM EABI unwinder
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_H
+#define UNWIND_ARM_H
+
+#include "unwind-arm-common.h"
+
+#define UNWIND_STACK_REG 13
+/* Use IP as a scratch register within the personality routine. */
+#define UNWIND_POINTER_REG 12
+
+#define FDPIC_REGNUM 9
+
+#define STR(x) #x
+#define XSTR(x) STR(x)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+_Unwind_Ptr __attribute__((weak)) __gnu_Unwind_Find_got (_Unwind_Ptr);
+
+static inline _Unwind_Ptr _Unwind_gnu_Find_got (_Unwind_Ptr ptr)
+{
+ _Unwind_Ptr res;
+
+ if (__gnu_Unwind_Find_got)
+ res = __gnu_Unwind_Find_got (ptr);
+ else
+ __asm volatile ("mov %[result], r" XSTR(FDPIC_REGNUM)
+ : [result] "=r" (res));
+
+ return res;
+}
+
+ /* Decode an R_ARM_TARGET2 relocation. */
+ static inline _Unwind_Word
+ _Unwind_decode_typeinfo_ptr (_Unwind_Word base __attribute__ ((unused)),
+ _Unwind_Word ptr)
+ {
+ _Unwind_Word tmp;
+
+ tmp = *(_Unwind_Word *) ptr;
+ /* Zero values are always NULL. */
+ if (!tmp)
+ return 0;
+
+#if __FDPIC__
+ /* For FDPIC, we store the offset of the GOT entry. */
+ /* So, first get GOT from dynamic linker and then use indirect access. */
+ tmp += _Unwind_gnu_Find_got (ptr);
+ tmp = *(_Unwind_Word *) tmp;
+#elif (defined(linux) && !defined(__uClinux__)) || defined(__NetBSD__) \
+ || defined(__FreeBSD__) || defined(__fuchsia__)
+ /* Pc-relative indirect. */
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel | DW_EH_PE_indirect)
+ tmp += ptr;
+ tmp = *(_Unwind_Word *) tmp;
+#elif defined(__symbian__) || defined(__uClinux__)
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_absptr)
+ /* Absolute pointer. Nothing more to do. */
+#else
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel)
+ /* Pc-relative pointer. */
+ tmp += ptr;
+#endif
+ return tmp;
+ }
+
+ static inline _Unwind_Reason_Code
+ __gnu_unwind_24bit (_Unwind_Context * context __attribute__ ((unused)),
+ _uw data __attribute__ ((unused)),
+ int compact __attribute__ ((unused)))
+ {
+ return _URC_FAILURE;
+ }
+#ifndef __FreeBSD__
+ /* Return the address of the instruction, not the actual IP value. */
+#define _Unwind_GetIP(context) \
+ (_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
+
+#define _Unwind_SetIP(context, val) \
+ _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
+#else
+ #undef _Unwind_GetIPInfo
+ _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *);
+ _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *);
+ void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr);
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/include/varargs.h b/lib/gcc/arm-none-eabi/13.2.1/include/varargs.h
new file mode 100644
index 0000000..4b9803e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/include/varargs.h
@@ -0,0 +1,7 @@
+#ifndef _VARARGS_H
+#define _VARARGS_H
+
+#error "GCC no longer implements <varargs.h>."
+#error "Revise your code to use <stdarg.h>."
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/install-tools/fixinc_list b/lib/gcc/arm-none-eabi/13.2.1/install-tools/fixinc_list
new file mode 100644
index 0000000..092bc2b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/install-tools/fixinc_list
@@ -0,0 +1 @@
+;
diff --git a/lib/gcc/arm-none-eabi/13.2.1/install-tools/gsyslimits.h b/lib/gcc/arm-none-eabi/13.2.1/install-tools/gsyslimits.h
new file mode 100644
index 0000000..a362802
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/install-tools/gsyslimits.h
@@ -0,0 +1,8 @@
+/* syslimits.h stands for the system's own limits.h file.
+ If we can use it ok unmodified, then we install this text.
+ If fixincludes fixes it, then the fixed version is installed
+ instead of this text. */
+
+#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */
+#include_next <limits.h>
+#undef _GCC_NEXT_LIMITS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/install-tools/include/README b/lib/gcc/arm-none-eabi/13.2.1/install-tools/include/README
new file mode 100644
index 0000000..7086a77
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/install-tools/include/README
@@ -0,0 +1,14 @@
+This README file is copied into the directory for GCC-only header files
+when fixincludes is run by the makefile for GCC.
+
+Many of the files in this directory were automatically edited from the
+standard system header files by the fixincludes process. They are
+system-specific, and will not work on any other kind of system. They
+are also not part of GCC. The reason we have to do this is because
+GCC requires ANSI C headers and many vendors supply ANSI-incompatible
+headers.
+
+Because this is an automated process, sometimes headers get "fixed"
+that do not, strictly speaking, need a fix. As long as nothing is broken
+by the process, it is just an unfortunate collateral inconvenience.
+We would like to rectify it, if it is not "too inconvenient".
diff --git a/lib/gcc/arm-none-eabi/13.2.1/install-tools/include/limits.h b/lib/gcc/arm-none-eabi/13.2.1/install-tools/include/limits.h
new file mode 100644
index 0000000..0cf9c39
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/install-tools/include/limits.h
@@ -0,0 +1,208 @@
+/* Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This administrivia gets added to the beginning of limits.h
+ if the system has its own version of limits.h. */
+
+/* We use _GCC_LIMITS_H_ because we want this not to match
+ any macros that the system's limits.h uses for its own purposes. */
+#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */
+#define _GCC_LIMITS_H_
+
+#ifndef _LIBC_LIMITS_H_
+/* Use "..." so that we find syslimits.h only in this same directory. */
+#include "syslimits.h"
+#endif
+/* Copyright (C) 1991-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _LIMITS_H___
+#define _LIMITS_H___
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT __CHAR_BIT__
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#undef SCHAR_MAX
+#define SCHAR_MAX __SCHAR_MAX__
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#if __SCHAR_MAX__ == __INT_MAX__
+# define UCHAR_MAX (SCHAR_MAX * 2U + 1U)
+#else
+# define UCHAR_MAX (SCHAR_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+# undef CHAR_MIN
+# if __SCHAR_MAX__ == __INT_MAX__
+# define CHAR_MIN 0U
+# else
+# define CHAR_MIN 0
+# endif
+# undef CHAR_MAX
+# define CHAR_MAX UCHAR_MAX
+#else
+# undef CHAR_MIN
+# define CHAR_MIN SCHAR_MIN
+# undef CHAR_MAX
+# define CHAR_MAX SCHAR_MAX
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+#define SHRT_MIN (-SHRT_MAX - 1)
+#undef SHRT_MAX
+#define SHRT_MAX __SHRT_MAX__
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#if __SHRT_MAX__ == __INT_MAX__
+# define USHRT_MAX (SHRT_MAX * 2U + 1U)
+#else
+# define USHRT_MAX (SHRT_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `signed int' can hold. */
+#undef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1U)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX - 1L)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1UL)
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LLONG_MIN
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+# undef LLONG_MAX
+# define LLONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULLONG_MAX
+# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LONG_LONG_MIN
+# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL)
+# undef LONG_LONG_MAX
+# define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULONG_LONG_MAX
+# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \
+ || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L))
+/* TS 18661-1 / C2X widths of integer types. */
+# undef CHAR_WIDTH
+# define CHAR_WIDTH __SCHAR_WIDTH__
+# undef SCHAR_WIDTH
+# define SCHAR_WIDTH __SCHAR_WIDTH__
+# undef UCHAR_WIDTH
+# define UCHAR_WIDTH __SCHAR_WIDTH__
+# undef SHRT_WIDTH
+# define SHRT_WIDTH __SHRT_WIDTH__
+# undef USHRT_WIDTH
+# define USHRT_WIDTH __SHRT_WIDTH__
+# undef INT_WIDTH
+# define INT_WIDTH __INT_WIDTH__
+# undef UINT_WIDTH
+# define UINT_WIDTH __INT_WIDTH__
+# undef LONG_WIDTH
+# define LONG_WIDTH __LONG_WIDTH__
+# undef ULONG_WIDTH
+# define ULONG_WIDTH __LONG_WIDTH__
+# undef LLONG_WIDTH
+# define LLONG_WIDTH __LONG_LONG_WIDTH__
+# undef ULLONG_WIDTH
+# define ULLONG_WIDTH __LONG_LONG_WIDTH__
+#endif
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L
+/* C2X width and limit of _Bool. */
+# undef BOOL_MAX
+# define BOOL_MAX 1
+# undef BOOL_WIDTH
+# define BOOL_WIDTH 1
+
+# define __STDC_VERSION_LIMITS_H__ 202311L
+#endif
+
+#endif /* _LIMITS_H___ */
+/* This administrivia gets added to the end of limits.h
+ if the system has its own version of limits.h. */
+
+#else /* not _GCC_LIMITS_H_ */
+
+#ifdef _GCC_NEXT_LIMITS_H
+#include_next <limits.h> /* recurse down to the real one */
+#endif
+
+#endif /* not _GCC_LIMITS_H_ */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/install-tools/macro_list b/lib/gcc/arm-none-eabi/13.2.1/install-tools/macro_list
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/install-tools/macro_list
diff --git a/lib/gcc/arm-none-eabi/13.2.1/install-tools/mkheaders.conf b/lib/gcc/arm-none-eabi/13.2.1/install-tools/mkheaders.conf
new file mode 100644
index 0000000..d9cbb3a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/install-tools/mkheaders.conf
@@ -0,0 +1,3 @@
+SYSTEM_HEADER_DIR="/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/install/arm-none-eabi${sysroot_headers_suffix}/include"
+OTHER_FIXINCLUDES_DIRS=""
+STMP_FIXINC="stmp-fixinc"
diff --git a/lib/gcc/arm-none-eabi/13.2.1/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/libcaf_single.a
new file mode 100644
index 0000000..1149ab0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/libgcc.a
new file mode 100644
index 0000000..56aaa32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/libgcov.a
new file mode 100644
index 0000000..e0a751d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/gtype.state b/lib/gcc/arm-none-eabi/13.2.1/plugin/gtype.state
new file mode 100644
index 0000000..efea6e7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/gtype.state
@@ -0,0 +1,38925 @@
+;;;;@@@@ GCC gengtype state
+;;; DON'T EDIT THIS FILE, since generated by GCC's gengtype
+;;; The format of this file is tied to a particular version of GCC.
+;;; Don't parse this file wihout knowing GCC gengtype internals.
+;;; This file should be parsed by the same gengtype which wrote it.
+
+(!version "13.2.1 20231009")
+(!srcdir "/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc")
+(!languages 12 ada c cp d fortran go jit lto m2 objc objcp rust)
+(!fileslist 242 0
+ (!srcfile 4095 "../libcpp/include/line-map.h")
+ (!srcfile 4095 "../libcpp/include/cpplib.h")
+ (!srcfile 4095 "input.h")
+ (!srcfile 4095 "coretypes.h")
+ (!file 4095 "auto-host.h")
+ (!srcfile 4095 "../include/ansidecl.h")
+ (!srcfile 4095 "config/arm/arm-opts.h")
+ (!srcfile 4095 "config/arm/aarch-common.h")
+ (!file 4095 "options.h")
+ (!srcfile 4095 "config/vxworks-dummy.h")
+ (!srcfile 4095 "config/elfos.h")
+ (!srcfile 4095 "config/arm/unknown-elf.h")
+ (!srcfile 4095 "config/arm/elf.h")
+ (!srcfile 4095 "config/arm/bpabi.h")
+ (!srcfile 4095 "config/newlib-stdint.h")
+ (!srcfile 4095 "config/arm/aout.h")
+ (!srcfile 4095 "config/arm/arm.h")
+ (!srcfile 4095 "config/arm/arm-mlib.h")
+ (!srcfile 4095 "config/initfini-array.h")
+ (!srcfile 4095 "defaults.h")
+ (!srcfile 4095 "../include/hashtab.h")
+ (!srcfile 4095 "../include/splay-tree.h")
+ (!srcfile 4095 "bitmap.h")
+ (!srcfile 4095 "wide-int.h")
+ (!srcfile 4095 "alias.h")
+ (!srcfile 4095 "coverage.cc")
+ (!srcfile 4095 "rtl.h")
+ (!srcfile 4095 "optabs.h")
+ (!srcfile 4095 "tree.h")
+ (!srcfile 4095 "tree-core.h")
+ (!srcfile 4095 "libfuncs.h")
+ (!srcfile 4095 "../libcpp/include/symtab.h")
+ (!srcfile 4095 "../include/obstack.h")
+ (!srcfile 4095 "real.h")
+ (!srcfile 4095 "function.h")
+ (!srcfile 4095 "insn-addr.h")
+ (!srcfile 4095 "hwint.h")
+ (!srcfile 4095 "fixed-value.h")
+ (!srcfile 4095 "function-abi.h")
+ (!srcfile 4095 "output.h")
+ (!srcfile 4095 "cfgloop.h")
+ (!srcfile 4095 "cfg.h")
+ (!srcfile 4095 "profile-count.h")
+ (!srcfile 4095 "cselib.h")
+ (!srcfile 4095 "basic-block.h")
+ (!srcfile 4095 "ipa-ref.h")
+ (!srcfile 4095 "cgraph.h")
+ (!srcfile 4095 "symtab-thunks.h")
+ (!srcfile 4095 "symtab-thunks.cc")
+ (!srcfile 4095 "symtab-clones.h")
+ (!srcfile 4095 "reload.h")
+ (!srcfile 4095 "caller-save.cc")
+ (!srcfile 4095 "symtab.cc")
+ (!srcfile 4095 "alias.cc")
+ (!srcfile 4095 "bitmap.cc")
+ (!srcfile 4095 "cselib.cc")
+ (!srcfile 4095 "cgraph.cc")
+ (!srcfile 4095 "ipa-prop.cc")
+ (!srcfile 4095 "ipa-cp.cc")
+ (!srcfile 4095 "ipa-utils.h")
+ (!srcfile 4095 "ipa-param-manipulation.h")
+ (!srcfile 4095 "ipa-sra.cc")
+ (!srcfile 4095 "ipa-modref.h")
+ (!srcfile 4095 "ipa-modref.cc")
+ (!srcfile 4095 "ipa-modref-tree.h")
+ (!srcfile 4095 "signop.h")
+ (!srcfile 4095 "diagnostic-spec.h")
+ (!srcfile 4095 "diagnostic-spec.cc")
+ (!srcfile 4095 "dwarf2out.h")
+ (!srcfile 4095 "dwarf2asm.cc")
+ (!srcfile 4095 "dwarf2cfi.cc")
+ (!srcfile 4095 "dwarf2ctf.cc")
+ (!srcfile 4095 "dwarf2out.cc")
+ (!srcfile 4095 "ctfc.h")
+ (!srcfile 4095 "ctfout.cc")
+ (!srcfile 4095 "btfout.cc")
+ (!srcfile 4095 "tree-vect-generic.cc")
+ (!srcfile 4095 "gimple-isel.cc")
+ (!srcfile 4095 "dojump.cc")
+ (!srcfile 4095 "emit-rtl.h")
+ (!srcfile 4095 "emit-rtl.cc")
+ (!srcfile 4095 "except.h")
+ (!srcfile 4095 "explow.cc")
+ (!srcfile 4095 "expr.cc")
+ (!srcfile 4095 "expr.h")
+ (!srcfile 4095 "function.cc")
+ (!srcfile 4095 "except.cc")
+ (!srcfile 4095 "ggc-tests.cc")
+ (!srcfile 4095 "gcse.cc")
+ (!srcfile 4095 "godump.cc")
+ (!srcfile 4095 "lists.cc")
+ (!srcfile 4095 "optabs-libfuncs.cc")
+ (!srcfile 4095 "profile.cc")
+ (!srcfile 4095 "mcf.cc")
+ (!srcfile 4095 "reg-stack.cc")
+ (!srcfile 4095 "cfgrtl.cc")
+ (!srcfile 4095 "stor-layout.cc")
+ (!srcfile 4095 "stringpool.cc")
+ (!srcfile 4095 "tree.cc")
+ (!srcfile 4095 "varasm.cc")
+ (!srcfile 4095 "gimple.h")
+ (!srcfile 4095 "gimple-ssa.h")
+ (!srcfile 4095 "tree-ssanames.cc")
+ (!srcfile 4095 "tree-eh.cc")
+ (!srcfile 4095 "tree-ssa-address.cc")
+ (!srcfile 4095 "tree-cfg.cc")
+ (!srcfile 4095 "tree-ssa-loop-ivopts.cc")
+ (!srcfile 4095 "tree-dfa.cc")
+ (!srcfile 4095 "tree-iterator.cc")
+ (!srcfile 4095 "gimple-expr.cc")
+ (!srcfile 4095 "tree-chrec.h")
+ (!srcfile 4095 "tree-scalar-evolution.cc")
+ (!srcfile 4095 "tree-ssa-operands.h")
+ (!srcfile 4095 "tree-profile.cc")
+ (!srcfile 4095 "tree-nested.cc")
+ (!srcfile 4095 "omp-offload.h")
+ (!srcfile 4095 "omp-general.cc")
+ (!srcfile 4095 "omp-low.cc")
+ (!srcfile 4095 "targhooks.cc")
+ (!srcfile 4095 "config/arm/arm.cc")
+ (!srcfile 4095 "passes.cc")
+ (!srcfile 4095 "cgraphclones.cc")
+ (!srcfile 4095 "tree-phinodes.cc")
+ (!srcfile 4095 "tree-ssa-alias.h")
+ (!srcfile 4095 "tree-ssanames.h")
+ (!srcfile 4095 "tree-vrp.h")
+ (!srcfile 4095 "value-range.h")
+ (!srcfile 4095 "value-range-storage.h")
+ (!srcfile 4095 "ipa-prop.h")
+ (!srcfile 4095 "trans-mem.cc")
+ (!srcfile 4095 "lto-streamer.h")
+ (!srcfile 4095 "target-globals.h")
+ (!srcfile 4095 "ipa-predicate.h")
+ (!srcfile 4095 "ipa-fnsummary.h")
+ (!srcfile 4095 "vtable-verify.cc")
+ (!srcfile 4095 "asan.cc")
+ (!srcfile 4095 "ubsan.cc")
+ (!srcfile 4095 "tsan.cc")
+ (!srcfile 4095 "sanopt.cc")
+ (!srcfile 4095 "sancov.cc")
+ (!srcfile 4095 "ipa-devirt.cc")
+ (!srcfile 4095 "internal-fn.h")
+ (!srcfile 4095 "calls.cc")
+ (!srcfile 4095 "omp-general.h")
+ (!srcfile 4095 "analyzer/analyzer-language.cc")
+ (!srcfile 4095 "config/arm/arm-builtins.cc")
+ (!srcfile 4095 "config/arm/arm-mve-builtins.h")
+ (!srcfile 4095 "config/arm/arm-mve-builtins.cc")
+ (!srcfile 1 "ada/gcc-interface/ada-tree.h")
+ (!srcfile 1 "ada/gcc-interface/gigi.h")
+ (!srcfile 1 "ada/gcc-interface/decl.cc")
+ (!srcfile 1 "ada/gcc-interface/trans.cc")
+ (!srcfile 1 "ada/gcc-interface/utils.cc")
+ (!srcfile 1 "ada/gcc-interface/misc.cc")
+ (!srcfile 2 "c/c-lang.cc")
+ (!srcfile 514 "c/c-tree.h")
+ (!srcfile 514 "c/c-decl.cc")
+ (!srcfile 1542 "c-family/c-common.cc")
+ (!srcfile 1542 "c-family/c-common.h")
+ (!srcfile 1542 "c-family/c-objc.h")
+ (!srcfile 1542 "c-family/c-cppbuiltin.cc")
+ (!srcfile 1542 "c-family/c-pragma.h")
+ (!srcfile 1542 "c-family/c-pragma.cc")
+ (!srcfile 1542 "c-family/c-format.cc")
+ (!srcfile 514 "c/c-objc-common.cc")
+ (!srcfile 514 "c/c-parser.h")
+ (!srcfile 514 "c/c-parser.cc")
+ (!srcfile 514 "c/c-lang.h")
+ (!srcfile 1028 "cp/name-lookup.h")
+ (!srcfile 1028 "cp/cp-tree.h")
+ (!srcfile 1028 "cp/decl.h")
+ (!srcfile 1028 "cp/parser.h")
+ (!srcfile 1028 "cp/call.cc")
+ (!srcfile 1028 "cp/class.cc")
+ (!srcfile 1028 "cp/constexpr.cc")
+ (!srcfile 1028 "cp/contracts.cc")
+ (!srcfile 1028 "cp/constraint.cc")
+ (!srcfile 1028 "cp/coroutines.cc")
+ (!srcfile 1028 "cp/cp-gimplify.cc")
+ (!srcfile 4 "cp/cp-lang.cc")
+ (!srcfile 1028 "cp/cp-objcp-common.cc")
+ (!srcfile 1028 "cp/decl.cc")
+ (!srcfile 1028 "cp/decl2.cc")
+ (!srcfile 1028 "cp/except.cc")
+ (!srcfile 1028 "cp/friend.cc")
+ (!srcfile 1028 "cp/init.cc")
+ (!srcfile 1028 "cp/lambda.cc")
+ (!srcfile 1028 "cp/lex.cc")
+ (!srcfile 1028 "cp/logic.cc")
+ (!srcfile 1028 "cp/mangle.cc")
+ (!srcfile 1028 "cp/method.cc")
+ (!srcfile 1028 "cp/module.cc")
+ (!srcfile 1028 "cp/name-lookup.cc")
+ (!srcfile 1028 "cp/parser.cc")
+ (!srcfile 1028 "cp/pt.cc")
+ (!srcfile 1028 "cp/rtti.cc")
+ (!srcfile 1028 "cp/semantics.cc")
+ (!srcfile 1028 "cp/tree.cc")
+ (!srcfile 1028 "cp/typeck2.cc")
+ (!srcfile 1028 "cp/vtable-class-hierarchy.cc")
+ (!srcfile 8 "d/d-tree.h")
+ (!srcfile 8 "d/d-builtins.cc")
+ (!srcfile 8 "d/d-lang.cc")
+ (!srcfile 8 "d/typeinfo.cc")
+ (!srcfile 16 "fortran/f95-lang.cc")
+ (!srcfile 16 "fortran/trans-decl.cc")
+ (!srcfile 16 "fortran/trans-intrinsic.cc")
+ (!srcfile 16 "fortran/trans-io.cc")
+ (!srcfile 16 "fortran/trans-stmt.cc")
+ (!srcfile 16 "fortran/trans-types.cc")
+ (!srcfile 16 "fortran/trans-types.h")
+ (!srcfile 16 "fortran/trans.h")
+ (!srcfile 16 "fortran/trans-const.h")
+ (!srcfile 32 "go/go-lang.cc")
+ (!srcfile 32 "go/go-c.h")
+ (!srcfile 64 "jit/dummy-frontend.cc")
+ (!srcfile 128 "lto/lto-tree.h")
+ (!srcfile 128 "lto/lto-lang.cc")
+ (!srcfile 128 "lto/lto.cc")
+ (!srcfile 128 "lto/lto.h")
+ (!srcfile 128 "lto/lto-common.h")
+ (!srcfile 128 "lto/lto-common.cc")
+ (!srcfile 128 "lto/lto-dump.cc")
+ (!srcfile 256 "m2/gm2-lang.cc")
+ (!srcfile 256 "m2/gm2-lang.h")
+ (!srcfile 256 "m2/gm2-gcc/rtegraph.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2block.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2builtins.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2decl.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2except.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2expr.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2statement.cc")
+ (!srcfile 256 "m2/gm2-gcc/m2type.cc")
+ (!srcfile 1536 "objc/objc-map.h")
+ (!srcfile 1536 "objc/objc-act.h")
+ (!srcfile 1536 "objc/objc-act.cc")
+ (!srcfile 1536 "objc/objc-runtime-shared-support.cc")
+ (!srcfile 1536 "objc/objc-gnu-runtime-abi-01.cc")
+ (!srcfile 1536 "objc/objc-next-runtime-abi-01.cc")
+ (!srcfile 1536 "objc/objc-next-runtime-abi-02.cc")
+ (!srcfile 1024 "objcp/objcp-lang.cc")
+ (!srcfile 2048 "rust/rust-lang.cc")
+)
+(!structures 1820
+
+ (!type struct 1 nil gc_used "source_range"
+ (!srcfileloc "../libcpp/include/line-map.h" 758)
+ (!fields 2
+ (!pair "m_start"
+ (!type scalar_nonchar 2
+ (!type pointer 3
+ (!type pointer 4 nil gc_unused
+ (!type already_seen 3)
+ )
+ gc_used
+ (!type already_seen 2)
+ )
+ gc_pointed_to)
+ (!srcfileloc "../libcpp/include/line-map.h" 324)
+ nil )
+ (!pair "m_finish"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 325)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 5 nil gc_used "line_map"
+ (!srcfileloc "../libcpp/include/line-map.h" 391)
+ (!fields 1
+ (!pair "start_location"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 388)
+ nil )
+ )
+ (!options
+ (!option desc string "MAP_ORDINARY_P (&%h) ? 1 : 2")
+ (!option tag string "0")
+ )
+ 4095 nil nil )
+
+ (!type struct 6
+ (!type pointer 7 nil gc_unused
+ (!type already_seen 6)
+ )
+ gc_used "line_map_ordinary"
+ (!srcfileloc "../libcpp/include/line-map.h" 727)
+ (!fields 7
+ (!pair "reason"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 411)
+ nil )
+ (!pair "sysp"
+ (!type scalar_char 8
+ (!type pointer 9
+ (!type pointer 10 nil gc_unused
+ (!type already_seen 9)
+ )
+ gc_unused
+ (!type already_seen 8)
+ )
+ gc_used)
+ (!srcfileloc "../libcpp/include/line-map.h" 417)
+ nil )
+ (!pair "m_column_and_range_bits"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 421)
+ nil )
+ (!pair "m_range_bits"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 436)
+ nil )
+ (!pair "to_file"
+ (!type string 11 nil gc_used)
+ (!srcfileloc "../libcpp/include/line-map.h" 440)
+ nil )
+ (!pair "to_line"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 441)
+ nil )
+ (!pair "included_from"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 447)
+ nil )
+ )
+ (!options
+ (!option tag string "1")
+ )
+ 4095 nil
+ (!type already_seen 5)
+ )
+
+ (!type struct 12
+ (!type pointer 13
+ (!type pointer 14 nil gc_unused
+ (!type already_seen 13)
+ )
+ gc_unused
+ (!type already_seen 12)
+ )
+ gc_used "cpp_hashnode"
+ (!srcfileloc "../libcpp/include/cpplib.h" 1004)
+ (!fields 8
+ (!pair "ident"
+ (!type struct 15
+ (!type pointer 16
+ (!type pointer 17 nil gc_unused
+ (!type already_seen 16)
+ )
+ gc_unused
+ (!type already_seen 15)
+ )
+ gc_used "ht_identifier"
+ (!srcfileloc "../libcpp/include/symtab.h" 38)
+ (!fields 3
+ (!pair "str"
+ (!type already_seen 11)
+ (!srcfileloc "../libcpp/include/symtab.h" 35)
+ (!options
+ (!option string_length string "1 + %h.len")
+ )
+ )
+ (!pair "len"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/symtab.h" 36)
+ nil )
+ (!pair "hash_value"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/symtab.h" 37)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 986)
+ nil )
+ (!pair "is_directive"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 987)
+ nil )
+ (!pair "directive_index"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 988)
+ nil )
+ (!pair "rid_code"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 991)
+ nil )
+ (!pair "flags"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 992)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 993)
+ nil )
+ (!pair "deferred"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 1001)
+ nil )
+ (!pair "value"
+ (!type union 18 nil gc_used "_cpp_hashnode_value"
+ (!srcfileloc "../libcpp/include/cpplib.h" 983)
+ (!fields 4
+ (!pair "answers"
+ (!type pointer 19 nil gc_used
+ (!type struct 20
+ (!type already_seen 19)
+ gc_pointed_to "cpp_macro"
+ (!srcfileloc "../libcpp/include/cpplib.h" 913)
+ (!fields 13
+ (!pair "parm"
+ (!type union 21 nil gc_used "cpp_parm_u"
+ (!srcfileloc "../libcpp/include/cpplib.h" 863)
+ (!fields 2
+ (!pair "params"
+ (!type already_seen 14)
+ (!srcfileloc "../libcpp/include/cpplib.h" 859)
+ (!options
+ (!option length string "%1.paramc")
+ (!option nested_ptr nested
+ (!type union 22
+ (!type pointer 23
+ (!type pointer 24 nil gc_used
+ (!type already_seen 23)
+ )
+ gc_pointed_to
+ (!type already_seen 22)
+ )
+ gc_pointed_to "tree_node"
+ (!srcfileloc "tree-core.h" 2107)
+ (!fields 39
+ (!pair "base"
+ (!type struct 25 nil gc_used "tree_base"
+ (!srcfileloc "tree-core.h" 1145)
+ (!fields 18
+ (!pair "code"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1035)
+ nil )
+ (!pair "side_effects_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1037)
+ nil )
+ (!pair "constant_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1038)
+ nil )
+ (!pair "addressable_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1039)
+ nil )
+ (!pair "volatile_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1040)
+ nil )
+ (!pair "readonly_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1041)
+ nil )
+ (!pair "asm_written_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1042)
+ nil )
+ (!pair "nowarning_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1043)
+ nil )
+ (!pair "visited"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1044)
+ nil )
+ (!pair "used_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1046)
+ nil )
+ (!pair "nothrow_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1047)
+ nil )
+ (!pair "static_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1048)
+ nil )
+ (!pair "public_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1049)
+ nil )
+ (!pair "private_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1050)
+ nil )
+ (!pair "protected_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1051)
+ nil )
+ (!pair "deprecated_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1052)
+ nil )
+ (!pair "default_def_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1053)
+ nil )
+ (!pair "u"
+ (!type union 26 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-core.h:1055"
+ (!srcfileloc "tree-core.h" 1144)
+ (!fields 9
+ (!pair "bits"
+ (!type struct 27 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-core.h:1058"
+ (!srcfileloc "tree-core.h" 1083)
+ (!fields 17
+ (!pair "lang_flag_0"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1059)
+ nil )
+ (!pair "lang_flag_1"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1060)
+ nil )
+ (!pair "lang_flag_2"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1061)
+ nil )
+ (!pair "lang_flag_3"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1062)
+ nil )
+ (!pair "lang_flag_4"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1063)
+ nil )
+ (!pair "lang_flag_5"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1064)
+ nil )
+ (!pair "lang_flag_6"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1065)
+ nil )
+ (!pair "saturating_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1066)
+ nil )
+ (!pair "unsigned_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1068)
+ nil )
+ (!pair "packed_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1069)
+ nil )
+ (!pair "user_align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1070)
+ nil )
+ (!pair "nameless_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1071)
+ nil )
+ (!pair "atomic_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1072)
+ nil )
+ (!pair "unavailable_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1073)
+ nil )
+ (!pair "spare0"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1074)
+ nil )
+ (!pair "spare1"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1076)
+ nil )
+ (!pair "address_space"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1082)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1083)
+ nil )
+ (!pair "int_length"
+ (!type struct 28 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-core.h:1090"
+ (!srcfileloc "tree-core.h" 1104)
+ (!fields 3
+ (!pair "unextended"
+ (!type already_seen 8)
+ (!srcfileloc "tree-core.h" 1093)
+ nil )
+ (!pair "extended"
+ (!type already_seen 8)
+ (!srcfileloc "tree-core.h" 1097)
+ nil )
+ (!pair "offset"
+ (!type already_seen 8)
+ (!srcfileloc "tree-core.h" 1103)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1104)
+ nil )
+ (!pair "length"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1107)
+ nil )
+ (!pair "vector_cst"
+ (!type struct 29 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-core.h:1110"
+ (!srcfileloc "tree-core.h" 1119)
+ (!fields 3
+ (!pair "log2_npatterns"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1112)
+ nil )
+ (!pair "nelts_per_pattern"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1115)
+ nil )
+ (!pair "unused"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1118)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1119)
+ nil )
+ (!pair "version"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1122)
+ nil )
+ (!pair "chrec_var"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1125)
+ nil )
+ (!pair "ifn"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1128)
+ nil )
+ (!pair "omp_atomic_memory_order"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1131)
+ nil )
+ (!pair "dependence_info"
+ (!type struct 30 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-core.h:1140"
+ (!srcfileloc "tree-core.h" 1143)
+ (!fields 2
+ (!pair "clique"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1141)
+ nil )
+ (!pair "base"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1142)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1143)
+ nil )
+ )
+ nil 4095 nil )
+ (!srcfileloc "tree-core.h" 1144)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2064)
+ (!options
+ (!option tag string "TS_BASE")
+ )
+ )
+ (!pair "typed"
+ (!type struct 31 nil gc_used "tree_typed"
+ (!srcfileloc "tree-core.h" 1456)
+ (!fields 2
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "tree-core.h" 1454)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1455)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2065)
+ (!options
+ (!option tag string "TS_TYPED")
+ )
+ )
+ (!pair "common"
+ (!type struct 32 nil gc_used "tree_common"
+ (!srcfileloc "tree-core.h" 1461)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1459)
+ nil )
+ (!pair "chain"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1460)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2066)
+ (!options
+ (!option tag string "TS_COMMON")
+ )
+ )
+ (!pair "int_cst"
+ (!type struct 33 nil gc_used "tree_int_cst"
+ (!srcfileloc "tree-core.h" 1466)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1464)
+ nil )
+ (!pair "val"
+ (!type array 34 nil gc_used "1"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "tree-core.h" 1465)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2067)
+ (!options
+ (!option tag string "TS_INT_CST")
+ )
+ )
+ (!pair "poly_int_cst"
+ (!type struct 35 nil gc_used "tree_poly_int_cst"
+ (!srcfileloc "tree-core.h" 1499)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1497)
+ nil )
+ (!pair "coeffs"
+ (!type array 36 nil gc_used "NUM_POLY_INT_COEFFS"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 1498)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2068)
+ (!options
+ (!option tag string "TS_POLY_INT_CST")
+ )
+ )
+ (!pair "real_cst"
+ (!type struct 37 nil gc_used "tree_real_cst"
+ (!srcfileloc "tree-core.h" 1472)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1470)
+ nil )
+ (!pair "value"
+ (!type struct 38 nil gc_used "real_value"
+ (!srcfileloc "real.h" 57)
+ (!fields 7
+ (!pair "cl"
+ (!type already_seen 2)
+ (!srcfileloc "real.h" 43)
+ nil )
+ (!pair "decimal"
+ (!type already_seen 2)
+ (!srcfileloc "real.h" 45)
+ nil )
+ (!pair "sign"
+ (!type already_seen 2)
+ (!srcfileloc "real.h" 47)
+ nil )
+ (!pair "signalling"
+ (!type already_seen 2)
+ (!srcfileloc "real.h" 49)
+ nil )
+ (!pair "canonical"
+ (!type already_seen 2)
+ (!srcfileloc "real.h" 52)
+ nil )
+ (!pair "uexp"
+ (!type already_seen 2)
+ (!srcfileloc "real.h" 54)
+ nil )
+ (!pair "sig"
+ (!type array 39 nil gc_used "SIGSZ"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "real.h" 56)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1471)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2069)
+ (!options
+ (!option tag string "TS_REAL_CST")
+ )
+ )
+ (!pair "fixed_cst"
+ (!type struct 40 nil gc_used "tree_fixed_cst"
+ (!srcfileloc "tree-core.h" 1477)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1475)
+ nil )
+ (!pair "fixed_cst_ptr"
+ (!type pointer 41 nil gc_used
+ (!type struct 42
+ (!type already_seen 41)
+ gc_pointed_to "fixed_value"
+ (!srcfileloc "fixed-value.h" 27)
+ (!fields 2
+ (!pair "data"
+ (!type already_seen 2)
+ (!srcfileloc "fixed-value.h" 25)
+ nil )
+ (!pair "mode"
+ (!type user_struct 43 nil gc_used "pod_mode<scalar_mode>"
+ (!srcfileloc "coretypes.h" 69)
+ (!fields 1
+ (!pair "scalar_mode"
+ (!type struct 44 nil gc_used "scalar_mode"
+ (!srcfileloc "coretypes.h" 65)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "coretypes.h" 69)
+ nil )
+ )
+ )
+ (!srcfileloc "fixed-value.h" 26)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1476)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2070)
+ (!options
+ (!option tag string "TS_FIXED_CST")
+ )
+ )
+ (!pair "vector"
+ (!type struct 45 nil gc_used "tree_vector"
+ (!srcfileloc "tree-core.h" 1494)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1492)
+ nil )
+ (!pair "elts"
+ (!type array 46 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 1493)
+ (!options
+ (!option length string "vector_cst_encoded_nelts ((tree) &%h)")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2071)
+ (!options
+ (!option tag string "TS_VECTOR")
+ )
+ )
+ (!pair "string"
+ (!type struct 47 nil gc_used "tree_string"
+ (!srcfileloc "tree-core.h" 1483)
+ (!fields 3
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1480)
+ nil )
+ (!pair "length"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1481)
+ nil )
+ (!pair "str"
+ (!type array 48 nil gc_used "1"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "tree-core.h" 1482)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2072)
+ (!options
+ (!option tag string "TS_STRING")
+ )
+ )
+ (!pair "complex"
+ (!type struct 49 nil gc_used "tree_complex"
+ (!srcfileloc "tree-core.h" 1489)
+ (!fields 3
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1486)
+ nil )
+ (!pair "real"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1487)
+ nil )
+ (!pair "imag"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1488)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2073)
+ (!options
+ (!option tag string "TS_COMPLEX")
+ )
+ )
+ (!pair "identifier"
+ (!type struct 50 nil gc_used "tree_identifier"
+ (!srcfileloc "tree-core.h" 1504)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1502)
+ nil )
+ (!pair "id"
+ (!type already_seen 15)
+ (!srcfileloc "tree-core.h" 1503)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2074)
+ (!options
+ (!option tag string "TS_IDENTIFIER")
+ )
+ )
+ (!pair "decl_minimal"
+ (!type struct 51 nil gc_used "tree_decl_minimal"
+ (!srcfileloc "tree-core.h" 1773)
+ (!fields 5
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1768)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1769)
+ nil )
+ (!pair "uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1770)
+ nil )
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1771)
+ nil )
+ (!pair "context"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1772)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2075)
+ (!options
+ (!option tag string "TS_DECL_MINIMAL")
+ )
+ )
+ (!pair "decl_common"
+ (!type struct 52 nil gc_used "tree_decl_common"
+ (!srcfileloc "tree-core.h" 1849)
+ (!fields 37
+ (!pair "common"
+ (!type already_seen 51)
+ (!srcfileloc "tree-core.h" 1776)
+ nil )
+ (!pair "size"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1777)
+ nil )
+ (!pair "mode"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1779)
+ nil )
+ (!pair "nonlocal_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1781)
+ nil )
+ (!pair "virtual_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1782)
+ nil )
+ (!pair "ignored_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1783)
+ nil )
+ (!pair "abstract_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1784)
+ nil )
+ (!pair "artificial_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1785)
+ nil )
+ (!pair "preserve_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1786)
+ nil )
+ (!pair "debug_expr_is_from"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1787)
+ nil )
+ (!pair "lang_flag_0"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1789)
+ nil )
+ (!pair "lang_flag_1"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1790)
+ nil )
+ (!pair "lang_flag_2"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1791)
+ nil )
+ (!pair "lang_flag_3"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1792)
+ nil )
+ (!pair "lang_flag_4"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1793)
+ nil )
+ (!pair "lang_flag_5"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1794)
+ nil )
+ (!pair "lang_flag_6"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1795)
+ nil )
+ (!pair "lang_flag_7"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1796)
+ nil )
+ (!pair "lang_flag_8"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1797)
+ nil )
+ (!pair "decl_flag_0"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1802)
+ nil )
+ (!pair "decl_flag_1"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1806)
+ nil )
+ (!pair "decl_flag_2"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1810)
+ nil )
+ (!pair "decl_flag_3"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1812)
+ nil )
+ (!pair "not_gimple_reg_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1815)
+ nil )
+ (!pair "decl_by_reference_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1817)
+ nil )
+ (!pair "decl_read_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1819)
+ nil )
+ (!pair "decl_nonshareable_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1822)
+ nil )
+ (!pair "off_align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1825)
+ nil )
+ (!pair "align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1828)
+ nil )
+ (!pair "warn_if_not_align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1832)
+ nil )
+ (!pair "decl_not_flexarray"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1835)
+ nil )
+ (!pair "pt_uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1840)
+ nil )
+ (!pair "size_unit"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1842)
+ nil )
+ (!pair "initial"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1843)
+ nil )
+ (!pair "attributes"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1844)
+ nil )
+ (!pair "abstract_origin"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1845)
+ nil )
+ (!pair "lang_specific"
+ (!type pointer 53 nil gc_used
+ (!type lang_struct 54
+ (!type already_seen 53)
+ gc_pointed_to "lang_decl"
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 37)
+ (!fields 0 )
+ nil 4095
+ (!homotypes 10
+ (!type struct 55 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "rust/rust-lang.cc" 74)
+ (!fields 0 )
+ nil 2048
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 56 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "m2/gm2-lang.cc" 83)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "m2/gm2-lang.cc" 82)
+ nil )
+ )
+ nil 256
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 57 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "lto/lto-tree.h" 32)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 2)
+ (!srcfileloc "lto/lto-tree.h" 31)
+ nil )
+ )
+ nil 128
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 58 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "jit/dummy-frontend.cc" 497)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "jit/dummy-frontend.cc" 496)
+ nil )
+ )
+ (!options
+ (!option variable_size string "")
+ )
+ 64
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 59 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "go/go-lang.cc" 58)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "go/go-lang.cc" 57)
+ nil )
+ )
+ nil 32
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 60 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "fortran/trans.h" 1034)
+ (!fields 9
+ (!pair "saved_descriptor"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1021)
+ nil )
+ (!pair "stringlen"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1026)
+ nil )
+ (!pair "addr"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1027)
+ nil )
+ (!pair "token"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1029)
+ nil )
+ (!pair "caf_offset"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1029)
+ nil )
+ (!pair "scalar_allocatable"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1030)
+ nil )
+ (!pair "scalar_pointer"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1031)
+ nil )
+ (!pair "scalar_target"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1032)
+ nil )
+ (!pair "optional_arg"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1033)
+ nil )
+ )
+ nil 16
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 61 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "d/d-tree.h" 284)
+ (!fields 7
+ (!pair "decl"
+ (!type pointer 62 nil gc_unused
+ (!type struct 63
+ (!type already_seen 62)
+ gc_unused "Declaration"
+ (!srcfileloc "d/d-tree.h" 219)
+ (!fields 0 )
+ nil 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 265)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "frame_field"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 268)
+ nil )
+ (!pair "named_result"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 271)
+ nil )
+ (!pair "thunks"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 274)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "d/d-tree.h" 277)
+ nil )
+ (!pair "intrinsic"
+ (!type already_seen 2)
+ (!srcfileloc "d/d-tree.h" 280)
+ nil )
+ (!pair "frame_info"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 283)
+ nil )
+ )
+ nil 8
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 64 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "cp/cp-tree.h" 3019)
+ (!fields 1
+ (!pair "u"
+ (!type union 65 nil gc_used "lang_decl_u"
+ (!srcfileloc "cp/cp-tree.h" 3018)
+ (!fields 6
+ (!pair "base"
+ (!type struct 66 nil gc_used "lang_decl_base"
+ (!srcfileloc "cp/cp-tree.h" 2876)
+ (!fields 18
+ (!pair "selector"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2847)
+ nil )
+ (!pair "language"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2848)
+ nil )
+ (!pair "use_template"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2849)
+ nil )
+ (!pair "not_really_extern"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2850)
+ nil )
+ (!pair "initialized_in_class"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2851)
+ nil )
+ (!pair "threadprivate_or_deleted_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2853)
+ nil )
+ (!pair "anticipated_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2857)
+ nil )
+ (!pair "friend_or_tls"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2858)
+ nil )
+ (!pair "unknown_bound_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2859)
+ nil )
+ (!pair "odr_used"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2860)
+ nil )
+ (!pair "concept_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2861)
+ nil )
+ (!pair "var_declared_inline_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2862)
+ nil )
+ (!pair "dependent_init_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2863)
+ nil )
+ (!pair "module_purview_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2867)
+ nil )
+ (!pair "module_attach_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2868)
+ nil )
+ (!pair "module_import_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2869)
+ nil )
+ (!pair "module_entity_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2870)
+ nil )
+ (!pair "module_keyed_decls_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2873)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 3012)
+ (!options
+ (!option default string "")
+ )
+ )
+ (!pair "min"
+ (!type struct 67 nil gc_used "lang_decl_min"
+ (!srcfileloc "cp/cp-tree.h" 2907)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 66)
+ (!srcfileloc "cp/cp-tree.h" 2891)
+ nil )
+ (!pair "template_info"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2898)
+ nil )
+ (!pair "access"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2906)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 3013)
+ (!options
+ (!option tag string "lds_min")
+ )
+ )
+ (!pair "fn"
+ (!type struct 68 nil gc_used "lang_decl_fn"
+ (!srcfileloc "cp/cp-tree.h" 2971)
+ (!fields 24
+ (!pair "min"
+ (!type already_seen 67)
+ (!srcfileloc "cp/cp-tree.h" 2912)
+ nil )
+ (!pair "ovl_op_code"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2915)
+ nil )
+ (!pair "global_ctor_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2916)
+ nil )
+ (!pair "global_dtor_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2917)
+ nil )
+ (!pair "static_function"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2919)
+ nil )
+ (!pair "pure_virtual"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2920)
+ nil )
+ (!pair "defaulted_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2921)
+ nil )
+ (!pair "has_in_charge_parm_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2922)
+ nil )
+ (!pair "has_vtt_parm_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2923)
+ nil )
+ (!pair "pending_inline_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2924)
+ nil )
+ (!pair "nonconverting"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2925)
+ nil )
+ (!pair "thunk_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2926)
+ nil )
+ (!pair "this_thunk_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2928)
+ nil )
+ (!pair "omp_declare_reduction_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2929)
+ nil )
+ (!pair "has_dependent_explicit_spec_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2930)
+ nil )
+ (!pair "immediate_fn_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2931)
+ nil )
+ (!pair "maybe_deleted"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2932)
+ nil )
+ (!pair "coroutine_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2933)
+ nil )
+ (!pair "implicit_constexpr"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2934)
+ nil )
+ (!pair "spare"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2936)
+ nil )
+ (!pair "befriending_classes"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2943)
+ nil )
+ (!pair "context"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2953)
+ nil )
+ (!pair "u5"
+ (!type union 69 nil gc_used "lang_decl_u5"
+ (!srcfileloc "cp/cp-tree.h" 2963)
+ (!fields 2
+ (!pair "cloned_function"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2958)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "fixed_offset"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2962)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ nil 1028 nil )
+ (!srcfileloc "cp/cp-tree.h" 2963)
+ (!options
+ (!option desc string "%1.thunk_p")
+ )
+ )
+ (!pair "u"
+ (!type union 70 nil gc_used "lang_decl_u3"
+ (!srcfileloc "cp/cp-tree.h" 2969)
+ (!fields 2
+ (!pair "pending_inline_info"
+ (!type pointer 71 nil gc_used
+ (!type struct 72
+ (!type already_seen 71)
+ gc_pointed_to "cp_token_cache"
+ (!srcfileloc "cp/parser.h" 141)
+ (!fields 2
+ (!pair "first"
+ (!type pointer 73 nil gc_unused
+ (!type struct 74
+ (!type already_seen 73)
+ gc_used "cp_token"
+ (!srcfileloc "cp/parser.h" 87)
+ (!fields 10
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 44)
+ nil )
+ (!pair "keyword"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 47)
+ nil )
+ (!pair "flags"
+ (!type already_seen 8)
+ (!srcfileloc "cp/parser.h" 49)
+ nil )
+ (!pair "implicit_extern_c"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 51)
+ nil )
+ (!pair "error_reported"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 55)
+ nil )
+ (!pair "purged_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 59)
+ nil )
+ (!pair "tree_check_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 60)
+ nil )
+ (!pair "main_source_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 61)
+ nil )
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 65)
+ nil )
+ (!pair "u"
+ (!type union 75 nil gc_used "cp_token_value"
+ (!srcfileloc "cp/parser.h" 72)
+ (!fields 2
+ (!pair "tree_check_value"
+ (!type pointer 76 nil gc_used
+ (!type struct 77
+ (!type already_seen 76)
+ gc_pointed_to "tree_check"
+ (!srcfileloc "cp/parser.h" 38)
+ (!fields 3
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 32)
+ nil )
+ (!pair "checks"
+ (!type pointer 78 nil gc_used
+ (!type user_struct 79
+ (!type already_seen 78)
+ gc_pointed_to "vec<deferred_access_check,va_gc>"
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ (!fields 2
+ (!pair "va_gc"
+ (!type undefined 80 nil gc_unused "va_gc"
+ (!srcfileloc "rtl.h" 267)
+ )
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ nil )
+ (!pair "deferred_access_check"
+ (!type struct 81 nil gc_used "deferred_access_check"
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ (!fields 4
+ (!pair "binfo"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1551)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1553)
+ nil )
+ (!pair "diag_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1555)
+ nil )
+ (!pair "loc"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1557)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/parser.h" 34)
+ nil )
+ (!pair "qualifying_scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 37)
+ nil )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/parser.h" 69)
+ (!options
+ (!option tag string "true")
+ )
+ )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 71)
+ (!options
+ (!option tag string "false")
+ )
+ )
+ )
+ nil 1028 nil )
+ (!srcfileloc "cp/parser.h" 72)
+ (!options
+ (!option desc string "%1.tree_check_p")
+ )
+ )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/parser.h" 135)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "last"
+ (!type already_seen 73)
+ (!srcfileloc "cp/parser.h" 138)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/cp-tree.h" 2967)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ (!pair "saved_auto_return_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2968)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ )
+ nil 1028 nil )
+ (!srcfileloc "cp/cp-tree.h" 2969)
+ (!options
+ (!option desc string "%1.pending_inline_p")
+ )
+ )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 3014)
+ (!options
+ (!option tag string "lds_fn")
+ )
+ )
+ (!pair "ns"
+ (!type struct 82 nil gc_used "lang_decl_ns"
+ (!srcfileloc "cp/cp-tree.h" 2986)
+ (!fields 4
+ (!pair "base"
+ (!type already_seen 66)
+ (!srcfileloc "cp/cp-tree.h" 2976)
+ nil )
+ (!pair "level"
+ (!type pointer 83 nil gc_used
+ (!type struct 84
+ (!type already_seen 83)
+ gc_pointed_to "cp_binding_level"
+ (!srcfileloc "cp/name-lookup.h" 314)
+ (!fields 16
+ (!pair "names"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 251)
+ nil )
+ (!pair "using_directives"
+ (!type pointer 85 nil gc_used
+ (!type user_struct 86
+ (!type already_seen 85)
+ gc_pointed_to "vec<tree,va_gc>"
+ (!srcfileloc "tree-core.h" 1670)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "tree-core.h" 1670)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1670)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/name-lookup.h" 254)
+ nil )
+ (!pair "class_shadowed"
+ (!type pointer 87 nil gc_used
+ (!type user_struct 88
+ (!type already_seen 87)
+ gc_pointed_to "vec<cp_class_binding,va_gc>"
+ (!srcfileloc "cp/name-lookup.h" 258)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/name-lookup.h" 258)
+ nil )
+ (!pair "cp_class_binding"
+ (!type struct 89 nil gc_used "cp_class_binding"
+ (!srcfileloc "cp/name-lookup.h" 258)
+ (!fields 2
+ (!pair "base"
+ (!type pointer 90 nil gc_used
+ (!type struct 91
+ (!type already_seen 90)
+ gc_pointed_to "cxx_binding"
+ (!srcfileloc "cp/name-lookup.h" 59)
+ (!fields 7
+ (!pair "previous"
+ (!type already_seen 90)
+ (!srcfileloc "cp/name-lookup.h" 48)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 50)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 52)
+ nil )
+ (!pair "scope"
+ (!type already_seen 83)
+ (!srcfileloc "cp/name-lookup.h" 54)
+ nil )
+ (!pair "value_is_inherited"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 56)
+ nil )
+ (!pair "is_local"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 57)
+ nil )
+ (!pair "type_is_hidden"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 58)
+ nil )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/name-lookup.h" 220)
+ nil )
+ (!pair "identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 222)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/name-lookup.h" 258)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/name-lookup.h" 258)
+ nil )
+ (!pair "type_shadowed"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 265)
+ nil )
+ (!pair "blocks"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 270)
+ nil )
+ (!pair "this_entity"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 274)
+ nil )
+ (!pair "level_chain"
+ (!type already_seen 83)
+ (!srcfileloc "cp/name-lookup.h" 277)
+ nil )
+ (!pair "statement_list"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 281)
+ nil )
+ (!pair "binding_depth"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 284)
+ nil )
+ (!pair "kind"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 289)
+ nil )
+ (!pair "explicit_spec_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 293)
+ nil )
+ (!pair "keep"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 296)
+ nil )
+ (!pair "more_cleanups_ok"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 300)
+ nil )
+ (!pair "have_cleanups"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 301)
+ nil )
+ (!pair "defining_class_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 308)
+ nil )
+ (!pair "requires_expression"
+ (!type already_seen 2)
+ (!srcfileloc "cp/name-lookup.h" 311)
+ nil )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/cp-tree.h" 2977)
+ nil )
+ (!pair "inlinees"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 2980)
+ nil )
+ (!pair "bindings"
+ (!type pointer 92 nil gc_used
+ (!type user_struct 93
+ (!type already_seen 92)
+ gc_pointed_to "hash_table<named_decl_hash>"
+ (!srcfileloc "cp/cp-tree.h" 2985)
+ (!fields 1
+ (!pair "named_decl_hash"
+ (!type struct 94 nil gc_used "named_decl_hash"
+ (!srcfileloc "cp/cp-tree.h" 2985)
+ (!fields 0 )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 2985)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/cp-tree.h" 2985)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 3015)
+ (!options
+ (!option tag string "lds_ns")
+ )
+ )
+ (!pair "parm"
+ (!type struct 95 nil gc_used "lang_decl_parm"
+ (!srcfileloc "cp/cp-tree.h" 2994)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 66)
+ (!srcfileloc "cp/cp-tree.h" 2991)
+ nil )
+ (!pair "level"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2992)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2993)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 3016)
+ (!options
+ (!option tag string "lds_parm")
+ )
+ )
+ (!pair "decomp"
+ (!type struct 96 nil gc_used "lang_decl_decomp"
+ (!srcfileloc "cp/cp-tree.h" 3003)
+ (!fields 2
+ (!pair "min"
+ (!type already_seen 67)
+ (!srcfileloc "cp/cp-tree.h" 2999)
+ nil )
+ (!pair "base"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 3002)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 3017)
+ (!options
+ (!option tag string "lds_decomp")
+ )
+ )
+ )
+ (!options
+ (!option desc string "%h.base.selector")
+ )
+ 1028 nil )
+ (!srcfileloc "cp/cp-tree.h" 3018)
+ nil )
+ )
+ nil 1028
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 97 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "c/c-lang.h" 46)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "c/c-lang.h" 45)
+ nil )
+ )
+ nil 514
+ (!type already_seen 54)
+ nil )
+
+ (!type struct 98 nil gc_pointed_to "lang_decl"
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 37)
+ (!fields 1
+ (!pair "t"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 37)
+ nil )
+ )
+ nil 1
+ (!type already_seen 54)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "tree-core.h" 1848)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2076)
+ (!options
+ (!option tag string "TS_DECL_COMMON")
+ )
+ )
+ (!pair "decl_with_rtl"
+ (!type struct 99 nil gc_used "tree_decl_with_rtl"
+ (!srcfileloc "tree-core.h" 1854)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 52)
+ (!srcfileloc "tree-core.h" 1852)
+ nil )
+ (!pair "rtl"
+ (!type pointer 100
+ (!type pointer 101 nil gc_used
+ (!type already_seen 100)
+ )
+ gc_pointed_to
+ (!type struct 102
+ (!type already_seen 100)
+ gc_pointed_to "rtx_def"
+ (!srcfileloc "rtl.h" 453)
+ (!fields 12
+ (!pair "code"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 313)
+ nil )
+ (!pair "mode"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 316)
+ nil )
+ (!pair "jump"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 329)
+ nil )
+ (!pair "call"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 336)
+ nil )
+ (!pair "unchanging"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 348)
+ nil )
+ (!pair "volatil"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 362)
+ nil )
+ (!pair "in_struct"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 377)
+ nil )
+ (!pair "used"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 386)
+ nil )
+ (!pair "frame_related"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 395)
+ nil )
+ (!pair "return_val"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 402)
+ nil )
+ (!pair "u2"
+ (!type union 103 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/rtl.h:404"
+ (!srcfileloc "rtl.h" 438)
+ (!fields 6
+ (!pair "original_regno"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 411)
+ nil )
+ (!pair "insn_uid"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 414)
+ nil )
+ (!pair "symbol_ref_flags"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 417)
+ nil )
+ (!pair "var_location_status"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 420)
+ nil )
+ (!pair "num_elem"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 424)
+ nil )
+ (!pair "const_vector"
+ (!type struct 104 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/rtl.h:428"
+ (!srcfileloc "rtl.h" 437)
+ (!fields 3
+ (!pair "npatterns"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 430)
+ nil )
+ (!pair "nelts_per_pattern"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 433)
+ nil )
+ (!pair "unused"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 436)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "rtl.h" 437)
+ nil )
+ )
+ nil 4095 nil )
+ (!srcfileloc "rtl.h" 438)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "u"
+ (!type union 105 nil gc_used "rtx_def_subunion"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 152
+ (!pair ""
+ (!type struct 106 nil gc_used "rtx_def_debug_marker"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "DEBUG_MARKER")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 107 nil gc_used "rtx_def_debug_parameter_ref"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "DEBUG_PARAMETER_REF")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 108 nil gc_used "rtx_def_entry_value"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ENTRY_VALUE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 109 nil gc_used "rtx_def_debug_implicit_ptr"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "DEBUG_IMPLICIT_PTR")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 110 nil gc_used "rtx_def_var_location"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VAR_LOCATION")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 111 nil gc_used "rtx_def_fma"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FMA")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 112 nil gc_used "rtx_def_us_truncate"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_TRUNCATE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 113 nil gc_used "rtx_def_ss_truncate"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_TRUNCATE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 114 nil gc_used "rtx_def_us_minus"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_MINUS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 115 nil gc_used "rtx_def_us_ashift"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_ASHIFT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 116 nil gc_used "rtx_def_ss_ashift"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_ASHIFT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 117 nil gc_used "rtx_def_ss_abs"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_ABS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 118 nil gc_used "rtx_def_us_neg"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_NEG")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 119 nil gc_used "rtx_def_ss_neg"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_NEG")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 120 nil gc_used "rtx_def_ss_minus"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_MINUS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 121 nil gc_used "rtx_def_us_plus"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_PLUS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 122 nil gc_used "rtx_def_ss_plus"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_PLUS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 123 nil gc_used "rtx_def_vec_series"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VEC_SERIES")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 124 nil gc_used "rtx_def_vec_duplicate"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VEC_DUPLICATE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 125 nil gc_used "rtx_def_vec_concat"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VEC_CONCAT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 126 nil gc_used "rtx_def_vec_select"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VEC_SELECT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 127 nil gc_used "rtx_def_vec_merge"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VEC_MERGE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 128 nil gc_used "rtx_def_lo_sum"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LO_SUM")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 129 nil gc_used "rtx_def_high"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "HIGH")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 130 nil gc_used "rtx_def_zero_extract"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ZERO_EXTRACT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 131 nil gc_used "rtx_def_sign_extract"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SIGN_EXTRACT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 132 nil gc_used "rtx_def_parity"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PARITY")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 133 nil gc_used "rtx_def_popcount"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "POPCOUNT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 134 nil gc_used "rtx_def_ctz"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CTZ")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 135 nil gc_used "rtx_def_clz"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CLZ")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 136 nil gc_used "rtx_def_clrsb"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CLRSB")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 137 nil gc_used "rtx_def_ffs"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FFS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 138 nil gc_used "rtx_def_bswap"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "BSWAP")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 139 nil gc_used "rtx_def_sqrt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SQRT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 140 nil gc_used "rtx_def_abs"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ABS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 141 nil gc_used "rtx_def_unsigned_sat_fract"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNSIGNED_SAT_FRACT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 142 nil gc_used "rtx_def_sat_fract"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SAT_FRACT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 143 nil gc_used "rtx_def_unsigned_fract_convert"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNSIGNED_FRACT_CONVERT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 144 nil gc_used "rtx_def_fract_convert"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FRACT_CONVERT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 145 nil gc_used "rtx_def_unsigned_fix"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNSIGNED_FIX")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 146 nil gc_used "rtx_def_unsigned_float"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNSIGNED_FLOAT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 147 nil gc_used "rtx_def_fix"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FIX")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 148 nil gc_used "rtx_def_float"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FLOAT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 149 nil gc_used "rtx_def_float_truncate"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FLOAT_TRUNCATE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 150 nil gc_used "rtx_def_float_extend"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "FLOAT_EXTEND")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 151 nil gc_used "rtx_def_truncate"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "TRUNCATE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 152 nil gc_used "rtx_def_zero_extend"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ZERO_EXTEND")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 153 nil gc_used "rtx_def_sign_extend"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SIGN_EXTEND")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 154 nil gc_used "rtx_def_unlt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNLT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 155 nil gc_used "rtx_def_unle"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNLE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 156 nil gc_used "rtx_def_ungt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNGT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 157 nil gc_used "rtx_def_unge"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNGE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 158 nil gc_used "rtx_def_uneq"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNEQ")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 159 nil gc_used "rtx_def_ordered"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ORDERED")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 160 nil gc_used "rtx_def_unordered"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNORDERED")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 161 nil gc_used "rtx_def_ltu"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LTU")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 162 nil gc_used "rtx_def_leu"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LEU")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 163 nil gc_used "rtx_def_gtu"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "GTU")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 164 nil gc_used "rtx_def_geu"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "GEU")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 165 nil gc_used "rtx_def_ltgt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LTGT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 166 nil gc_used "rtx_def_lt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 167 nil gc_used "rtx_def_le"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 168 nil gc_used "rtx_def_gt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "GT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 169 nil gc_used "rtx_def_ge"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "GE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 170 nil gc_used "rtx_def_eq"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "EQ")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 171 nil gc_used "rtx_def_ne"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "NE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 172 nil gc_used "rtx_def_post_modify"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "POST_MODIFY")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 173 nil gc_used "rtx_def_pre_modify"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PRE_MODIFY")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 174 nil gc_used "rtx_def_post_inc"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "POST_INC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 175 nil gc_used "rtx_def_post_dec"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "POST_DEC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 176 nil gc_used "rtx_def_pre_inc"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PRE_INC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 177 nil gc_used "rtx_def_pre_dec"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PRE_DEC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 178 nil gc_used "rtx_def_umax"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UMAX")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 179 nil gc_used "rtx_def_umin"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UMIN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 180 nil gc_used "rtx_def_smax"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SMAX")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 181 nil gc_used "rtx_def_smin"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SMIN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 182 nil gc_used "rtx_def_rotatert"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ROTATERT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 183 nil gc_used "rtx_def_lshiftrt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LSHIFTRT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 184 nil gc_used "rtx_def_ashiftrt"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ASHIFTRT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 185 nil gc_used "rtx_def_rotate"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ROTATE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 186 nil gc_used "rtx_def_ashift"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ASHIFT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 187 nil gc_used "rtx_def_not"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "NOT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 188 nil gc_used "rtx_def_xor"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "XOR")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 189 nil gc_used "rtx_def_ior"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "IOR")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 190 nil gc_used "rtx_def_and"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "AND")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 191 nil gc_used "rtx_def_umod"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UMOD")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 192 nil gc_used "rtx_def_udiv"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UDIV")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 193 nil gc_used "rtx_def_mod"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "MOD")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 194 nil gc_used "rtx_def_us_div"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_DIV")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 195 nil gc_used "rtx_def_ss_div"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_DIV")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 196 nil gc_used "rtx_def_div"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "DIV")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 197 nil gc_used "rtx_def_umul_highpart"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UMUL_HIGHPART")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 198 nil gc_used "rtx_def_smul_highpart"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SMUL_HIGHPART")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 199 nil gc_used "rtx_def_us_mult"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "US_MULT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 200 nil gc_used "rtx_def_ss_mult"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SS_MULT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 201 nil gc_used "rtx_def_mult"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "MULT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 202 nil gc_used "rtx_def_neg"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "NEG")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 203 nil gc_used "rtx_def_minus"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "MINUS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 204 nil gc_used "rtx_def_plus"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PLUS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 205 nil gc_used "rtx_def_compare"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "COMPARE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 206 nil gc_used "rtx_def_if_then_else"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "IF_THEN_ELSE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 207 nil gc_used "rtx_def_symbol_ref"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair "block_sym"
+ (!type union 208 nil gc_used "fake_union_1"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ""
+ (!type struct 209 nil gc_used "block_symbol"
+ (!srcfileloc "rtl.h" 245)
+ (!fields 3
+ (!pair "fld"
+ (!type array 210 nil gc_unused "2"
+ (!type undefined 211 nil gc_unused "rtunion"
+ (!srcfileloc "rtl.h" 237)
+ )
+ )
+ (!srcfileloc "rtl.h" 237)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "block"
+ (!type pointer 212 nil gc_used
+ (!type struct 213
+ (!type already_seen 212)
+ gc_pointed_to "object_block"
+ (!srcfileloc "rtl.h" 278)
+ (!fields 5
+ (!pair "sect"
+ (!type pointer 214 nil gc_used
+ (!type union 215
+ (!type already_seen 214)
+ gc_pointed_to "section"
+ (!srcfileloc "output.h" 511)
+ (!fields 4
+ (!pair "common"
+ (!type struct 216 nil gc_used "section_common"
+ (!srcfileloc "output.h" 453)
+ (!fields 1
+ (!pair "flags"
+ (!type already_seen 2)
+ (!srcfileloc "output.h" 452)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "output.h" 507)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "named"
+ (!type struct 217 nil gc_used "named_section"
+ (!srcfileloc "output.h" 465)
+ (!fields 3
+ (!pair "common"
+ (!type already_seen 216)
+ (!srcfileloc "output.h" 457)
+ nil )
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "output.h" 460)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "output.h" 464)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "output.h" 508)
+ (!options
+ (!option tag string "SECTION_NAMED")
+ )
+ )
+ (!pair "unnamed"
+ (!type struct 218 nil gc_used "unnamed_section"
+ (!srcfileloc "output.h" 482)
+ (!fields 4
+ (!pair "common"
+ (!type already_seen 216)
+ (!srcfileloc "output.h" 473)
+ nil )
+ (!pair "callback"
+ (!type callback 219 nil gc_used)
+ (!srcfileloc "output.h" 477)
+ (!options
+ (!option callback string "")
+ )
+ )
+ (!pair "data"
+ (!type already_seen 11)
+ (!srcfileloc "output.h" 478)
+ nil )
+ (!pair "next"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 481)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "output.h" 509)
+ (!options
+ (!option tag string "SECTION_UNNAMED")
+ )
+ )
+ (!pair "noswitch"
+ (!type struct 220 nil gc_used "noswitch_section"
+ (!srcfileloc "output.h" 503)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 216)
+ (!srcfileloc "output.h" 499)
+ nil )
+ (!pair "callback"
+ (!type already_seen 219)
+ (!srcfileloc "output.h" 502)
+ (!options
+ (!option callback string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "output.h" 510)
+ (!options
+ (!option tag string "SECTION_NOSWITCH")
+ )
+ )
+ )
+ (!options
+ (!option for_user string "")
+ (!option desc string "SECTION_STYLE (&(%h))")
+ )
+ 4095 nil )
+ )
+ (!srcfileloc "rtl.h" 251)
+ nil )
+ (!pair "alignment"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 254)
+ nil )
+ (!pair "size"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 257)
+ nil )
+ (!pair "objects"
+ (!type pointer 221 nil gc_used
+ (!type user_struct 222
+ (!type already_seen 221)
+ gc_pointed_to "vec<rtx,va_gc>"
+ (!srcfileloc "rtl.h" 267)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "rtl.h" 267)
+ nil )
+ (!pair "rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 267)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "rtl.h" 267)
+ nil )
+ (!pair "anchors"
+ (!type already_seen 221)
+ (!srcfileloc "rtl.h" 277)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "rtl.h" 240)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 244)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1024)
+ (!options
+ (!option tag string "1")
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "gengtype.cc" 1369)
+ (!options
+ (!option desc string "SYMBOL_REF_HAS_BLOCK_INFO_P (&%0)")
+ )
+ )
+ (!pair ".fld[1]."
+ (!type union 223 nil gc_used "rtx_def_symbol_subunion"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair "rt_constant"
+ (!type pointer 224 nil gc_used
+ (!type struct 225
+ (!type already_seen 224)
+ gc_pointed_to "constant_descriptor_rtx"
+ (!srcfileloc "varasm.cc" 3719)
+ (!fields 10
+ (!pair "next"
+ (!type already_seen 224)
+ (!srcfileloc "varasm.cc" 3709)
+ nil )
+ (!pair "mem"
+ (!type already_seen 100)
+ (!srcfileloc "varasm.cc" 3710)
+ nil )
+ (!pair "sym"
+ (!type already_seen 100)
+ (!srcfileloc "varasm.cc" 3711)
+ nil )
+ (!pair "constant"
+ (!type already_seen 100)
+ (!srcfileloc "varasm.cc" 3712)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3713)
+ nil )
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3714)
+ nil )
+ (!pair "mode"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3715)
+ nil )
+ (!pair "align"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3716)
+ nil )
+ (!pair "labelno"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3717)
+ nil )
+ (!pair "mark"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3718)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "gengtype.cc" 1228)
+ (!options
+ (!option tag string "1")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1226)
+ (!options
+ (!option default string "")
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option desc string "CONSTANT_POOL_ADDRESS_P (&%0)")
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SYMBOL_REF")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 226 nil gc_used "rtx_def_label_ref"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "LABEL_REF")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 227 nil gc_used "rtx_def_mem"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_mem"
+ (!type pointer 228 nil gc_used
+ (!type struct 229
+ (!type already_seen 228)
+ gc_pointed_to "mem_attrs"
+ (!srcfileloc "rtl.h" 184)
+ (!fields 8
+ (!pair "expr"
+ (!type already_seen 23)
+ (!srcfileloc "rtl.h" 158)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 162)
+ nil )
+ (!pair "size"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 166)
+ nil )
+ (!pair "alias"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 169)
+ nil )
+ (!pair "align"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 174)
+ nil )
+ (!pair "addrspace"
+ (!type already_seen 8)
+ (!srcfileloc "rtl.h" 177)
+ nil )
+ (!pair "offset_known_p"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 180)
+ nil )
+ (!pair "size_known_p"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 183)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "MEM")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 230 nil gc_used "rtx_def_concatn"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtvec"
+ (!type pointer 231 nil gc_used
+ (!type struct 232
+ (!type already_seen 231)
+ gc_pointed_to "rtvec_def"
+ (!srcfileloc "rtl.h" 738)
+ (!fields 2
+ (!pair "num_elem"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 736)
+ nil )
+ (!pair "elem"
+ (!type array 233 nil gc_used "1"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "rtl.h" 737)
+ (!options
+ (!option length string "%h.num_elem")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONCATN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 234 nil gc_used "rtx_def_concat"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONCAT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 235 nil gc_used "rtx_def_strict_low_part"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "STRICT_LOW_PART")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 236 nil gc_used "rtx_def_subreg"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_subreg"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SUBREG")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 237 nil gc_used "rtx_def_scratch"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SCRATCH")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 238 nil gc_used "rtx_def_reg"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair "reg.attrs"
+ (!type pointer 239 nil gc_used
+ (!type struct 240
+ (!type already_seen 239)
+ gc_pointed_to "reg_attrs"
+ (!srcfileloc "rtl.h" 229)
+ (!fields 2
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "rtl.h" 195)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 196)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "gengtype.cc" 1360)
+ nil )
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "REG")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 241 nil gc_used "rtx_def_pc"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 242 nil gc_used "rtx_def_const"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 243 nil gc_used "rtx_def_const_string"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_STRING")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 244 nil gc_used "rtx_def_const_vector"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_VECTOR")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 245 nil gc_used "rtx_def_const_double"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_DOUBLE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 246 nil gc_used "rtx_def_const_fixed"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_FIXED")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 247 nil gc_used "rtx_def_const_poly_int"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_POLY_INT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 248 nil gc_used "rtx_def_const_wide_int"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_WIDE_INT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 249 nil gc_used "rtx_def_const_int"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CONST_INT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 250 nil gc_used "rtx_def_trap_if"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "TRAP_IF")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 251 nil gc_used "rtx_def_eh_return"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "EH_RETURN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 252 nil gc_used "rtx_def_simple_return"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SIMPLE_RETURN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 253 nil gc_used "rtx_def_return"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "RETURN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 254 nil gc_used "rtx_def_call"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CALL")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 255 nil gc_used "rtx_def_clobber"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CLOBBER")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 256 nil gc_used "rtx_def_use"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "USE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 257 nil gc_used "rtx_def_set"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SET")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 258 nil gc_used "rtx_def_prefetch"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 3
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PREFETCH")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 259 nil gc_used "rtx_def_addr_diff_vec"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 5
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ADDR_DIFF_VEC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 260 nil gc_used "rtx_def_addr_vec"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ADDR_VEC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 261 nil gc_used "rtx_def_unspec_volatile"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNSPEC_VOLATILE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 262 nil gc_used "rtx_def_unspec"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNSPEC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 263 nil gc_used "rtx_def_asm_operands"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 7
+ (!pair ".fld[6].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ASM_OPERANDS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 264 nil gc_used "rtx_def_asm_input"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ASM_INPUT")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 265 nil gc_used "rtx_def_parallel"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "PARALLEL")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 266 nil gc_used "rtx_def_cond_exec"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "COND_EXEC")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 267 nil gc_used "rtx_def_note"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 6
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3]."
+ (!type union 268 nil gc_used "rtx_def_note_subunion"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 19
+ (!pair "rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1195)
+ (!options
+ (!option default string "")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_UPDATE_SJLJ_CONTEXT")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_CFI_LABEL")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_CFI")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_SWITCH_TEXT_SECTIONS")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_BASIC_BLOCK")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_INLINE_ENTRY")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_BEGIN_STMT")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1204)
+ (!options
+ (!option tag string "NOTE_INSN_VAR_LOCATION")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_EH_REGION_END")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_EH_REGION_BEG")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_EPILOGUE_BEG")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_PROLOGUE_END")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_FUNCTION_BEG")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1200)
+ (!options
+ (!option tag string "NOTE_INSN_BLOCK_END")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1200)
+ (!options
+ (!option tag string "NOTE_INSN_BLOCK_BEG")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1195)
+ (!options
+ (!option tag string "NOTE_INSN_DELETED_DEBUG_LABEL")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1195)
+ (!options
+ (!option tag string "NOTE_INSN_DELETED_LABEL")
+ (!option dot string "")
+ )
+ )
+ (!pair "rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1208)
+ (!options
+ (!option tag string "NOTE_INSN_DELETED")
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option desc string "NOTE_KIND (&%0)")
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type pointer 269 nil gc_used
+ (!type struct 270
+ (!type already_seen 269)
+ gc_pointed_to "basic_block_def"
+ (!srcfileloc "basic-block.h" 151)
+ (!fields 11
+ (!pair "preds"
+ (!type pointer 271 nil gc_used
+ (!type user_struct 272
+ (!type already_seen 271)
+ gc_pointed_to "vec<edge,va_gc>"
+ (!srcfileloc "basic-block.h" 119)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "basic-block.h" 119)
+ nil )
+ (!pair "edge"
+ (!type pointer 273 nil gc_used
+ (!type user_struct 274
+ (!type already_seen 273)
+ gc_pointed_to "edge_def"
+ (!srcfileloc "basic-block.h" 53)
+ (!fields 0 )
+ )
+ )
+ (!srcfileloc "basic-block.h" 119)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "basic-block.h" 119)
+ nil )
+ (!pair "succs"
+ (!type already_seen 271)
+ (!srcfileloc "basic-block.h" 120)
+ nil )
+ (!pair "aux"
+ (!type already_seen 3)
+ (!srcfileloc "basic-block.h" 123)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "loop_father"
+ (!type pointer 275 nil gc_used
+ (!type struct 276
+ (!type already_seen 275)
+ gc_pointed_to "loop"
+ (!srcfileloc "cfgloop.h" 275)
+ (!fields 37
+ (!pair "num"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 121)
+ nil )
+ (!pair "ninsns"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 124)
+ nil )
+ (!pair "header"
+ (!type already_seen 269)
+ (!srcfileloc "cfgloop.h" 127)
+ nil )
+ (!pair "latch"
+ (!type already_seen 269)
+ (!srcfileloc "cfgloop.h" 130)
+ nil )
+ (!pair "lpt_decision"
+ (!type struct 277 nil gc_used "lpt_decision"
+ (!srcfileloc "cfgloop.h" 37)
+ (!fields 2
+ (!pair "decision"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 35)
+ nil )
+ (!pair "times"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 36)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "cfgloop.h" 133)
+ nil )
+ (!pair "av_ninsns"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 136)
+ nil )
+ (!pair "num_nodes"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 139)
+ nil )
+ (!pair "superloops"
+ (!type pointer 278 nil gc_used
+ (!type user_struct 279
+ (!type already_seen 278)
+ gc_pointed_to "vec<loop_p,va_gc>"
+ (!srcfileloc "cfgloop.h" 142)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cfgloop.h" 142)
+ nil )
+ (!pair "loop_p"
+ (!type already_seen 275)
+ (!srcfileloc "cfgloop.h" 142)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cfgloop.h" 142)
+ nil )
+ (!pair "inner"
+ (!type already_seen 275)
+ (!srcfileloc "cfgloop.h" 145)
+ nil )
+ (!pair "next"
+ (!type already_seen 275)
+ (!srcfileloc "cfgloop.h" 148)
+ nil )
+ (!pair "aux"
+ (!type already_seen 3)
+ (!srcfileloc "cfgloop.h" 151)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "nb_iterations"
+ (!type already_seen 23)
+ (!srcfileloc "cfgloop.h" 160)
+ nil )
+ (!pair "nb_iterations_upper_bound"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 164)
+ nil )
+ (!pair "nb_iterations_likely_upper_bound"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 166)
+ nil )
+ (!pair "nb_iterations_estimate"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 171)
+ nil )
+ (!pair "safelen"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 177)
+ nil )
+ (!pair "simdlen"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 180)
+ nil )
+ (!pair "constraints"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 203)
+ nil )
+ (!pair "estimate_state"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 207)
+ nil )
+ (!pair "any_upper_bound"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 209)
+ nil )
+ (!pair "any_estimate"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 210)
+ nil )
+ (!pair "any_likely_upper_bound"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 211)
+ nil )
+ (!pair "can_be_parallel"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 214)
+ nil )
+ (!pair "warned_aggressive_loop_optimizations"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 218)
+ nil )
+ (!pair "dont_vectorize"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 221)
+ nil )
+ (!pair "force_vectorize"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 224)
+ nil )
+ (!pair "in_oacc_kernels_region"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 227)
+ nil )
+ (!pair "finite_p"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 231)
+ nil )
+ (!pair "unroll"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 237)
+ nil )
+ (!pair "owned_clique"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 241)
+ nil )
+ (!pair "simduid"
+ (!type already_seen 23)
+ (!srcfileloc "cfgloop.h" 246)
+ nil )
+ (!pair "orig_loop_num"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 256)
+ nil )
+ (!pair "bounds"
+ (!type pointer 280 nil gc_used
+ (!type struct 281
+ (!type already_seen 280)
+ gc_pointed_to "nb_iter_bound"
+ (!srcfileloc "cfgloop.h" 70)
+ (!fields 4
+ (!pair "stmt"
+ (!type pointer 282 nil gc_used
+ (!type struct 283
+ (!type already_seen 282)
+ gc_pointed_to "gimple"
+ (!srcfileloc "gimple.h" 288)
+ (!fields 15
+ (!pair "code"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 229)
+ nil )
+ (!pair "no_warning"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 232)
+ nil )
+ (!pair "visited"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 236)
+ nil )
+ (!pair "nontemporal_move"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 239)
+ nil )
+ (!pair "plf"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 246)
+ nil )
+ (!pair "modified"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 250)
+ nil )
+ (!pair "has_volatile_ops"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 253)
+ nil )
+ (!pair "pad"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 256)
+ nil )
+ (!pair "subcode"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 262)
+ nil )
+ (!pair "uid"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 267)
+ nil )
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 271)
+ nil )
+ (!pair "num_ops"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 274)
+ nil )
+ (!pair "bb"
+ (!type already_seen 269)
+ (!srcfileloc "gimple.h" 278)
+ nil )
+ (!pair "next"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 286)
+ nil )
+ (!pair "prev"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 287)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ (!options
+ (!option variable_size string "")
+ (!option chain_next string "%h.next")
+ (!option tag string "GSS_BASE")
+ (!option desc string "gimple_statement_structure (&%h)")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "cfgloop.h" 52)
+ nil )
+ (!pair "bound"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 61)
+ nil )
+ (!pair "is_exit"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 66)
+ nil )
+ (!pair "next"
+ (!type already_seen 280)
+ (!srcfileloc "cfgloop.h" 69)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "cfgloop.h" 259)
+ nil )
+ (!pair "control_ivs"
+ (!type pointer 284 nil gc_used
+ (!type struct 285
+ (!type already_seen 284)
+ gc_pointed_to "control_iv"
+ (!srcfileloc "cfgloop.h" 114)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 23)
+ (!srcfileloc "cfgloop.h" 111)
+ nil )
+ (!pair "step"
+ (!type already_seen 23)
+ (!srcfileloc "cfgloop.h" 112)
+ nil )
+ (!pair "next"
+ (!type already_seen 284)
+ (!srcfileloc "cfgloop.h" 113)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "cfgloop.h" 262)
+ nil )
+ (!pair "exits"
+ (!type pointer 286 nil gc_used
+ (!type struct 287
+ (!type already_seen 286)
+ gc_pointed_to "loop_exit"
+ (!srcfileloc "cfgloop.h" 84)
+ (!fields 4
+ (!pair "e"
+ (!type already_seen 273)
+ (!srcfileloc "cfgloop.h" 76)
+ nil )
+ (!pair "prev"
+ (!type already_seen 286)
+ (!srcfileloc "cfgloop.h" 79)
+ nil )
+ (!pair "next"
+ (!type already_seen 286)
+ (!srcfileloc "cfgloop.h" 80)
+ nil )
+ (!pair "next_e"
+ (!type already_seen 286)
+ (!srcfileloc "cfgloop.h" 83)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "cfgloop.h" 265)
+ nil )
+ (!pair "simple_loop_desc"
+ (!type pointer 288 nil gc_used
+ (!type struct 289
+ (!type already_seen 288)
+ gc_pointed_to "niter_desc"
+ (!srcfileloc "cfgloop.h" 497)
+ (!fields 11
+ (!pair "out_edge"
+ (!type already_seen 273)
+ (!srcfileloc "cfgloop.h" 464)
+ nil )
+ (!pair "in_edge"
+ (!type already_seen 273)
+ (!srcfileloc "cfgloop.h" 467)
+ nil )
+ (!pair "simple_p"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 471)
+ nil )
+ (!pair "const_iter"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 474)
+ nil )
+ (!pair "niter"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 477)
+ nil )
+ (!pair "assumptions"
+ (!type already_seen 100)
+ (!srcfileloc "cfgloop.h" 480)
+ nil )
+ (!pair "noloop_assumptions"
+ (!type already_seen 100)
+ (!srcfileloc "cfgloop.h" 484)
+ nil )
+ (!pair "infinite"
+ (!type already_seen 100)
+ (!srcfileloc "cfgloop.h" 487)
+ nil )
+ (!pair "signed_p"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 490)
+ nil )
+ (!pair "mode"
+ (!type struct 290 nil gc_used "scalar_int_mode"
+ (!srcfileloc "coretypes.h" 66)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "cfgloop.h" 493)
+ nil )
+ (!pair "niter_expr"
+ (!type already_seen 100)
+ (!srcfileloc "cfgloop.h" 496)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cfgloop.h" 268)
+ nil )
+ (!pair "former_header"
+ (!type already_seen 269)
+ (!srcfileloc "cfgloop.h" 274)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "basic-block.h" 126)
+ nil )
+ (!pair "dom"
+ (!type array 291 nil gc_unused "2"
+ (!type pointer 292 nil gc_unused
+ (!type struct 293
+ (!type already_seen 292)
+ gc_unused "et_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ )
+ (!srcfileloc "basic-block.h" 129)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "prev_bb"
+ (!type already_seen 269)
+ (!srcfileloc "basic-block.h" 132)
+ nil )
+ (!pair "next_bb"
+ (!type already_seen 269)
+ (!srcfileloc "basic-block.h" 133)
+ nil )
+ (!pair "il"
+ (!type union 294 nil gc_used "basic_block_il_dependent"
+ (!srcfileloc "basic-block.h" 141)
+ (!fields 2
+ (!pair "gimple"
+ (!type struct 295 nil gc_used "gimple_bb_info"
+ (!srcfileloc "basic-block.h" 89)
+ (!fields 2
+ (!pair "seq"
+ (!type already_seen 282)
+ (!srcfileloc "basic-block.h" 85)
+ nil )
+ (!pair "phi_nodes"
+ (!type already_seen 282)
+ (!srcfileloc "basic-block.h" 88)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "basic-block.h" 136)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "x"
+ (!type struct 296 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/basic-block.h:137"
+ (!srcfileloc "basic-block.h" 140)
+ (!fields 2
+ (!pair "head_"
+ (!type pointer 297 nil gc_used
+ (!type struct 298
+ (!type already_seen 297)
+ gc_pointed_to "rtx_insn"
+ (!srcfileloc "rtl.h" 574)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 102)
+ )
+ )
+ (!srcfileloc "basic-block.h" 138)
+ nil )
+ (!pair "rtl"
+ (!type pointer 299 nil gc_used
+ (!type struct 300
+ (!type already_seen 299)
+ gc_pointed_to "rtl_bb_info"
+ (!srcfileloc "basic-block.h" 81)
+ (!fields 3
+ (!pair "end_"
+ (!type already_seen 297)
+ (!srcfileloc "basic-block.h" 75)
+ nil )
+ (!pair "header_"
+ (!type already_seen 297)
+ (!srcfileloc "basic-block.h" 79)
+ nil )
+ (!pair "footer_"
+ (!type already_seen 297)
+ (!srcfileloc "basic-block.h" 80)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "basic-block.h" 139)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "basic-block.h" 140)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "basic-block.h" 141)
+ (!options
+ (!option desc string "((%1.flags & BB_RTL) != 0)")
+ )
+ )
+ (!pair "flags"
+ (!type already_seen 2)
+ (!srcfileloc "basic-block.h" 144)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "basic-block.h" 147)
+ nil )
+ (!pair "count"
+ (!type struct 301 nil gc_used "profile_count"
+ (!srcfileloc "profile-count.h" 1293)
+ (!fields 2
+ (!pair "UINT64_BIT_FIELD_ALIGN"
+ (!type already_seen 2)
+ (!srcfileloc "profile-count.h" 736)
+ nil )
+ (!pair "m_quality"
+ (!type already_seen 2)
+ (!srcfileloc "profile-count.h" 738)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "basic-block.h" 150)
+ nil )
+ )
+ (!options
+ (!option chain_prev string "%h.prev_bb")
+ (!option chain_next string "%h.next_bb")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "NOTE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 302 nil gc_used "rtx_def_code_label"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 7
+ (!pair ".fld[6].rt_str"
+ (!type already_seen 11)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type already_seen 269)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CODE_LABEL")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 303 nil gc_used "rtx_def_barrier"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 7
+ (!pair ".fld[6].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "BARRIER")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 304 nil gc_used "rtx_def_jump_table_data"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 8
+ (!pair ".fld[7].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[6].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type already_seen 269)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "JUMP_TABLE_DATA")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 305 nil gc_used "rtx_def_call_insn"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 8
+ (!pair ".fld[7].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[6].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type already_seen 269)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "CALL_INSN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 306 nil gc_used "rtx_def_jump_insn"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 8
+ (!pair ".fld[7].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[6].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type already_seen 269)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "JUMP_INSN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 307 nil gc_used "rtx_def_insn"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 7
+ (!pair ".fld[6].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type already_seen 269)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "INSN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 308 nil gc_used "rtx_def_debug_insn"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 7
+ (!pair ".fld[6].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[5].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[4].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[3].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[2].rt_bb"
+ (!type already_seen 269)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "DEBUG_INSN")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 309 nil gc_used "rtx_def_address"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "ADDRESS")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 310 nil gc_used "rtx_def_sequence"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "SEQUENCE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 311 nil gc_used "rtx_def_int_list"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "INT_LIST")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 312 nil gc_used "rtx_def_insn_list"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "INSN_LIST")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 313 nil gc_used "rtx_def_expr_list"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 2
+ (!pair ".fld[1].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ (!pair ".fld[0].rt_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "EXPR_LIST")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 314 nil gc_used "rtx_def_debug_expr"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "DEBUG_EXPR")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 315 nil gc_used "rtx_def_value"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "VALUE")
+ (!option dot string "")
+ )
+ )
+ (!pair ""
+ (!type struct 316 nil gc_used "rtx_def_UnKnown"
+ (!srcfileloc "rtl.h" 452)
+ (!fields 1
+ (!pair ".fld[0].rt_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 1347)
+ (!options
+ (!option dot string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gengtype.cc" 1379)
+ (!options
+ (!option tag string "UNKNOWN")
+ (!option dot string "")
+ )
+ )
+ )
+ (!options
+ (!option dot string "")
+ )
+ 4095 nil )
+ (!srcfileloc "rtl.h" 452)
+ (!options
+ (!option desc string "GET_CODE (&%0)")
+ (!option special string "rtx_def")
+ )
+ )
+ )
+ (!options
+ (!option chain_prev string "RTX_PREV (&%h)")
+ (!option chain_next string "RTX_NEXT (&%h)")
+ (!option tag string "0")
+ (!option desc string "0")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1853)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2077)
+ (!options
+ (!option tag string "TS_DECL_WRTL")
+ )
+ )
+ (!pair "decl_non_common"
+ (!type struct 317 nil gc_used "tree_decl_non_common"
+ (!srcfileloc "tree-core.h" 1930)
+ (!fields 2
+ (!pair "common"
+ (!type struct 318 nil gc_used "tree_decl_with_vis"
+ (!srcfileloc "tree-core.h" 1920)
+ (!fields 20
+ (!pair "common"
+ (!type already_seen 99)
+ (!srcfileloc "tree-core.h" 1886)
+ nil )
+ (!pair "assembler_name"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1887)
+ nil )
+ (!pair "symtab_node"
+ (!type pointer 319 nil gc_used
+ (!type struct 320
+ (!type already_seen 319)
+ gc_pointed_to "symtab_node"
+ (!srcfileloc "cgraph.h" 680)
+ (!fields 42
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 165)
+ nil )
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 168)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 489)
+ nil )
+ (!pair "resolution"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 492)
+ nil )
+ (!pair "definition"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 498)
+ nil )
+ (!pair "alias"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 501)
+ nil )
+ (!pair "transparent_alias"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 523)
+ nil )
+ (!pair "weakref"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 525)
+ nil )
+ (!pair "cpp_implicit_alias"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 532)
+ nil )
+ (!pair "symver"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 534)
+ nil )
+ (!pair "analyzed"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 537)
+ nil )
+ (!pair "writeonly"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 539)
+ nil )
+ (!pair "refuse_visibility_changes"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 542)
+ nil )
+ (!pair "externally_visible"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 547)
+ nil )
+ (!pair "no_reorder"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 549)
+ nil )
+ (!pair "force_output"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 552)
+ nil )
+ (!pair "forced_by_abi"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 556)
+ nil )
+ (!pair "unique_name"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 558)
+ nil )
+ (!pair "implicit_section"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 561)
+ nil )
+ (!pair "body_removed"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 564)
+ nil )
+ (!pair "semantic_interposition"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 566)
+ nil )
+ (!pair "used_from_other_partition"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 573)
+ nil )
+ (!pair "in_other_partition"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 577)
+ nil )
+ (!pair "address_taken"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 584)
+ nil )
+ (!pair "in_init_priority_hash"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 586)
+ nil )
+ (!pair "need_lto_streaming"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 590)
+ nil )
+ (!pair "offloadable"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 593)
+ nil )
+ (!pair "ifunc_resolver"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 596)
+ nil )
+ (!pair "order"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 600)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 603)
+ nil )
+ (!pair "next"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 606)
+ nil )
+ (!pair "previous"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 607)
+ nil )
+ (!pair "next_sharing_asm_name"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 618)
+ nil )
+ (!pair "previous_sharing_asm_name"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 619)
+ nil )
+ (!pair "same_comdat_group"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 622)
+ nil )
+ (!pair "ref_list"
+ (!type struct 321 nil gc_unused "ipa_ref_list"
+ (!srcfileloc "cgraph.h" 625)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "cgraph.h" 625)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "alias_target"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 630)
+ nil )
+ (!pair "lto_file_data"
+ (!type pointer 322
+ (!type pointer 323 nil gc_used
+ (!type already_seen 322)
+ )
+ gc_pointed_to
+ (!type struct 324
+ (!type already_seen 322)
+ gc_pointed_to "lto_file_decl_data"
+ (!srcfileloc "lto-streamer.h" 607)
+ (!fields 18
+ (!pair "current_decl_state"
+ (!type pointer 325 nil gc_used
+ (!type struct 326
+ (!type already_seen 325)
+ gc_pointed_to "lto_in_decl_state"
+ (!srcfileloc "lto-streamer.h" 502)
+ (!fields 3
+ (!pair "streams"
+ (!type array 327 nil gc_used "LTO_N_DECL_STREAMS"
+ (!type already_seen 85)
+ )
+ (!srcfileloc "lto-streamer.h" 494)
+ nil )
+ (!pair "fn_decl"
+ (!type already_seen 23)
+ (!srcfileloc "lto-streamer.h" 498)
+ nil )
+ (!pair "compressed"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 501)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "lto-streamer.h" 559)
+ nil )
+ (!pair "global_decl_state"
+ (!type already_seen 325)
+ (!srcfileloc "lto-streamer.h" 563)
+ nil )
+ (!pair "symtab_node_encoder"
+ (!type pointer 328 nil gc_unused
+ (!type struct 329
+ (!type already_seen 328)
+ gc_unused "lto_symtab_encoder_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "lto-streamer.h" 566)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "function_decl_states"
+ (!type pointer 330 nil gc_used
+ (!type user_struct 331
+ (!type already_seen 330)
+ gc_pointed_to "hash_table<decl_state_hasher>"
+ (!srcfileloc "lto-streamer.h" 569)
+ (!fields 1
+ (!pair "decl_state_hasher"
+ (!type struct 332 nil gc_used "decl_state_hasher"
+ (!srcfileloc "lto-streamer.h" 569)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "lto-streamer.h" 569)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "lto-streamer.h" 569)
+ nil )
+ (!pair "file_name"
+ (!type already_seen 11)
+ (!srcfileloc "lto-streamer.h" 572)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "section_hash_table"
+ (!type pointer 333 nil gc_unused
+ (!type struct 334
+ (!type already_seen 333)
+ gc_unused "htab"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "lto-streamer.h" 575)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "renaming_hash_table"
+ (!type already_seen 333)
+ (!srcfileloc "lto-streamer.h" 578)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "next"
+ (!type already_seen 322)
+ (!srcfileloc "lto-streamer.h" 581)
+ nil )
+ (!pair "order"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 584)
+ nil )
+ (!pair "id"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 587)
+ nil )
+ (!pair "respairs"
+ (!type user_struct 335 nil gc_unused "vec<res_pair>"
+ (!srcfileloc "lto-streamer.h" 590)
+ (!fields 1
+ (!pair "res_pair"
+ (!type struct 336 nil gc_unused "res_pair"
+ (!srcfileloc "lto-streamer.h" 590)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "lto-streamer.h" 590)
+ nil )
+ )
+ )
+ (!srcfileloc "lto-streamer.h" 590)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "max_index"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 591)
+ nil )
+ (!pair "profile_info"
+ (!type undefined 337 nil gc_unused "gcov_summary"
+ (!srcfileloc "lto-streamer.h" 593)
+ )
+ (!srcfileloc "lto-streamer.h" 593)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "resolution_map"
+ (!type pointer 338 nil gc_unused
+ (!type user_struct 339
+ (!type already_seen 338)
+ gc_unused "hash_map<tree,ld_plugin_symbol_resolution>"
+ (!srcfileloc "lto-streamer.h" 596)
+ (!fields 2
+ (!pair "ld_plugin_symbol_resolution"
+ (!type undefined 340 nil gc_unused "ld_plugin_symbol_resolution"
+ (!srcfileloc "lto-streamer.h" 596)
+ )
+ (!srcfileloc "lto-streamer.h" 596)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "lto-streamer.h" 596)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "lto-streamer.h" 596)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "mode_table"
+ (!type already_seen 11)
+ (!srcfileloc "lto-streamer.h" 599)
+ nil )
+ (!pair "lto_section_header"
+ (!type struct 341 nil gc_used "lto_section"
+ (!srcfileloc "lto-streamer.h" 602)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "lto-streamer.h" 602)
+ nil )
+ (!pair "order_base"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 604)
+ nil )
+ (!pair "unit_base"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 606)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 633)
+ nil )
+ (!pair "aux"
+ (!type already_seen 3)
+ (!srcfileloc "cgraph.h" 635)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "x_comdat_group"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 638)
+ nil )
+ (!pair "x_section"
+ (!type pointer 342 nil gc_used
+ (!type struct 343
+ (!type already_seen 342)
+ gc_pointed_to "section_hash_entry"
+ (!srcfileloc "cgraph.h" 641)
+ (!fields 2
+ (!pair "ref_count"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 54)
+ nil )
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "cgraph.h" 55)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 641)
+ nil )
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 648)
+ nil )
+ )
+ (!options
+ (!option chain_prev string "%h.previous")
+ (!option chain_next string "%h.next")
+ (!option tag string "SYMTAB_SYMBOL")
+ (!option desc string "%h.type")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1888)
+ nil )
+ (!pair "defer_output"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1891)
+ nil )
+ (!pair "hard_register"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1892)
+ nil )
+ (!pair "common_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1893)
+ nil )
+ (!pair "in_text_section"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1894)
+ nil )
+ (!pair "in_constant_pool"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1895)
+ nil )
+ (!pair "dllimport_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1896)
+ nil )
+ (!pair "weak_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1898)
+ nil )
+ (!pair "seen_in_bind_expr"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1900)
+ nil )
+ (!pair "comdat_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1901)
+ nil )
+ (!pair "visibility"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1903)
+ nil )
+ (!pair "visibility_specified"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1904)
+ nil )
+ (!pair "init_priority_p"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1907)
+ nil )
+ (!pair "shadowed_for_var_p"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1909)
+ nil )
+ (!pair "cxx_constructor"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1911)
+ nil )
+ (!pair "cxx_destructor"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1913)
+ nil )
+ (!pair "final"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1915)
+ nil )
+ (!pair "regdecl_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1917)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1927)
+ nil )
+ (!pair "result"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1929)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2079)
+ (!options
+ (!option tag string "TS_DECL_NON_COMMON")
+ )
+ )
+ (!pair "parm_decl"
+ (!type struct 344 nil gc_used "tree_parm_decl"
+ (!srcfileloc "tree-core.h" 1883)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 99)
+ (!srcfileloc "tree-core.h" 1881)
+ nil )
+ (!pair "incoming_rtl"
+ (!type already_seen 100)
+ (!srcfileloc "tree-core.h" 1882)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2080)
+ (!options
+ (!option tag string "TS_PARM_DECL")
+ )
+ )
+ (!pair "decl_with_vis"
+ (!type already_seen 318)
+ (!srcfileloc "tree-core.h" 2081)
+ (!options
+ (!option tag string "TS_DECL_WITH_VIS")
+ )
+ )
+ (!pair "var_decl"
+ (!type struct 345 nil gc_used "tree_var_decl"
+ (!srcfileloc "tree-core.h" 1924)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 318)
+ (!srcfileloc "tree-core.h" 1923)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2082)
+ (!options
+ (!option tag string "TS_VAR_DECL")
+ )
+ )
+ (!pair "field_decl"
+ (!type struct 346 nil gc_used "tree_field_decl"
+ (!srcfileloc "tree-core.h" 1864)
+ (!fields 6
+ (!pair "common"
+ (!type already_seen 52)
+ (!srcfileloc "tree-core.h" 1857)
+ nil )
+ (!pair "offset"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1859)
+ nil )
+ (!pair "bit_field_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1860)
+ nil )
+ (!pair "qualifier"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1861)
+ nil )
+ (!pair "bit_offset"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1862)
+ nil )
+ (!pair "fcontext"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1863)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2083)
+ (!options
+ (!option tag string "TS_FIELD_DECL")
+ )
+ )
+ (!pair "label_decl"
+ (!type struct 347 nil gc_used "tree_label_decl"
+ (!srcfileloc "tree-core.h" 1870)
+ (!fields 3
+ (!pair "common"
+ (!type already_seen 99)
+ (!srcfileloc "tree-core.h" 1867)
+ nil )
+ (!pair "label_decl_uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1868)
+ nil )
+ (!pair "eh_landing_pad_nr"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1869)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2084)
+ (!options
+ (!option tag string "TS_LABEL_DECL")
+ )
+ )
+ (!pair "result_decl"
+ (!type struct 348 nil gc_used "tree_result_decl"
+ (!srcfileloc "tree-core.h" 1874)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 99)
+ (!srcfileloc "tree-core.h" 1873)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2085)
+ (!options
+ (!option tag string "TS_RESULT_DECL")
+ )
+ )
+ (!pair "const_decl"
+ (!type struct 349 nil gc_used "tree_const_decl"
+ (!srcfileloc "tree-core.h" 1878)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 52)
+ (!srcfileloc "tree-core.h" 1877)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2086)
+ (!options
+ (!option tag string "TS_CONST_DECL")
+ )
+ )
+ (!pair "type_decl"
+ (!type struct 350 nil gc_used "tree_type_decl"
+ (!srcfileloc "tree-core.h" 2009)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 317)
+ (!srcfileloc "tree-core.h" 2007)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2087)
+ (!options
+ (!option tag string "TS_TYPE_DECL")
+ )
+ )
+ (!pair "function_decl"
+ (!type struct 351 nil gc_used "tree_function_decl"
+ (!srcfileloc "tree-core.h" 1996)
+ (!fields 28
+ (!pair "common"
+ (!type already_seen 317)
+ (!srcfileloc "tree-core.h" 1950)
+ nil )
+ (!pair "f"
+ (!type pointer 352 nil gc_used
+ (!type struct 353
+ (!type already_seen 352)
+ gc_pointed_to "function"
+ (!srcfileloc "function.h" 445)
+ (!fields 52
+ (!pair "eh"
+ (!type pointer 354 nil gc_used
+ (!type struct 355
+ (!type already_seen 354)
+ gc_pointed_to "eh_status"
+ (!srcfileloc "except.h" 218)
+ (!fields 6
+ (!pair "region_tree"
+ (!type pointer 356 nil gc_used
+ (!type struct 357
+ (!type already_seen 356)
+ gc_pointed_to "eh_region_d"
+ (!srcfileloc "except.h" 180)
+ (!fields 10
+ (!pair "outer"
+ (!type already_seen 356)
+ (!srcfileloc "except.h" 121)
+ nil )
+ (!pair "inner"
+ (!type already_seen 356)
+ (!srcfileloc "except.h" 124)
+ nil )
+ (!pair "next_peer"
+ (!type already_seen 356)
+ (!srcfileloc "except.h" 125)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 128)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 131)
+ nil )
+ (!pair "u"
+ (!type union 358 nil gc_used "eh_region_u"
+ (!srcfileloc "except.h" 167)
+ (!fields 3
+ (!pair "eh_try"
+ (!type struct 359 nil gc_used "eh_region_u_try"
+ (!srcfileloc "except.h" 139)
+ (!fields 2
+ (!pair "first_catch"
+ (!type pointer 360 nil gc_used
+ (!type struct 361
+ (!type already_seen 360)
+ gc_pointed_to "eh_catch_d"
+ (!srcfileloc "except.h" 114)
+ (!fields 5
+ (!pair "next_catch"
+ (!type already_seen 360)
+ (!srcfileloc "except.h" 98)
+ nil )
+ (!pair "prev_catch"
+ (!type already_seen 360)
+ (!srcfileloc "except.h" 99)
+ nil )
+ (!pair "type_list"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 103)
+ nil )
+ (!pair "filter_list"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 108)
+ nil )
+ (!pair "label"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 113)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "except.h" 137)
+ nil )
+ (!pair "last_catch"
+ (!type already_seen 360)
+ (!srcfileloc "except.h" 138)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "except.h" 139)
+ (!options
+ (!option tag string "ERT_TRY")
+ )
+ )
+ (!pair "allowed"
+ (!type struct 362 nil gc_used "eh_region_u_allowed"
+ (!srcfileloc "except.h" 153)
+ (!fields 3
+ (!pair "type_list"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 143)
+ nil )
+ (!pair "label"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 147)
+ nil )
+ (!pair "filter"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 152)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "except.h" 153)
+ (!options
+ (!option tag string "ERT_ALLOWED_EXCEPTIONS")
+ )
+ )
+ (!pair "must_not_throw"
+ (!type struct 363 nil gc_used "eh_region_u_must_not_throw"
+ (!srcfileloc "except.h" 166)
+ (!fields 2
+ (!pair "failure_decl"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 163)
+ nil )
+ (!pair "failure_loc"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 165)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "except.h" 166)
+ (!options
+ (!option tag string "ERT_MUST_NOT_THROW")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "except.h" 167)
+ (!options
+ (!option desc string "%0.type")
+ )
+ )
+ (!pair "landing_pads"
+ (!type pointer 364 nil gc_used
+ (!type struct 365
+ (!type already_seen 364)
+ gc_pointed_to "eh_landing_pad_d"
+ (!srcfileloc "except.h" 91)
+ (!fields 5
+ (!pair "next_lp"
+ (!type already_seen 364)
+ (!srcfileloc "except.h" 72)
+ nil )
+ (!pair "region"
+ (!type already_seen 356)
+ (!srcfileloc "except.h" 75)
+ nil )
+ (!pair "post_landing_pad"
+ (!type already_seen 23)
+ (!srcfileloc "except.h" 80)
+ nil )
+ (!pair "landing_pad"
+ (!type pointer 366 nil gc_used
+ (!type struct 367
+ (!type already_seen 366)
+ gc_pointed_to "rtx_code_label"
+ (!srcfileloc "function.h" 140)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+ )
+ (!srcfileloc "except.h" 87)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 90)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "except.h" 170)
+ nil )
+ (!pair "exc_ptr_reg"
+ (!type already_seen 100)
+ (!srcfileloc "except.h" 175)
+ nil )
+ (!pair "filter_reg"
+ (!type already_seen 100)
+ (!srcfileloc "except.h" 175)
+ nil )
+ (!pair "use_cxa_end_cleanup"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 179)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "except.h" 194)
+ nil )
+ (!pair "region_array"
+ (!type pointer 368 nil gc_used
+ (!type user_struct 369
+ (!type already_seen 368)
+ gc_pointed_to "vec<eh_region,va_gc>"
+ (!srcfileloc "except.h" 197)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "except.h" 197)
+ nil )
+ (!pair "eh_region"
+ (!type already_seen 356)
+ (!srcfileloc "except.h" 197)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "except.h" 197)
+ nil )
+ (!pair "lp_array"
+ (!type pointer 370 nil gc_used
+ (!type user_struct 371
+ (!type already_seen 370)
+ gc_pointed_to "vec<eh_landing_pad,va_gc>"
+ (!srcfileloc "except.h" 200)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "except.h" 200)
+ nil )
+ (!pair "eh_landing_pad"
+ (!type already_seen 364)
+ (!srcfileloc "except.h" 200)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "except.h" 200)
+ nil )
+ (!pair "throw_stmt_table"
+ (!type pointer 372 nil gc_used
+ (!type user_struct 373
+ (!type already_seen 372)
+ gc_pointed_to "hash_map<gimple*,int>"
+ (!srcfileloc "except.h" 204)
+ (!fields 2
+ (!pair "int"
+ (!type undefined 374 nil gc_unused "int"
+ (!srcfileloc "coretypes.h" 363)
+ )
+ (!srcfileloc "except.h" 204)
+ nil )
+ (!pair "gimple"
+ (!type already_seen 282)
+ (!srcfileloc "except.h" 204)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "except.h" 204)
+ nil )
+ (!pair "ttype_data"
+ (!type already_seen 85)
+ (!srcfileloc "except.h" 208)
+ nil )
+ (!pair "ehspec_data"
+ (!type union 375 nil gc_used "eh_status_u"
+ (!srcfileloc "except.h" 217)
+ (!fields 2
+ (!pair "arm_eabi"
+ (!type already_seen 85)
+ (!srcfileloc "except.h" 215)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ (!pair "other"
+ (!type pointer 376 nil gc_used
+ (!type user_struct 377
+ (!type already_seen 376)
+ gc_pointed_to "vec<uchar,va_gc>"
+ (!srcfileloc "function.h" 145)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "function.h" 145)
+ nil )
+ (!pair "uchar"
+ (!type already_seen 8)
+ (!srcfileloc "function.h" 145)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "except.h" 216)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "except.h" 217)
+ (!options
+ (!option desc string "targetm.arm_eabi_unwinder")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 250)
+ nil )
+ (!pair "cfg"
+ (!type pointer 378 nil gc_used
+ (!type struct 379
+ (!type already_seen 378)
+ gc_pointed_to "control_flow_graph"
+ (!srcfileloc "cfg.h" 81)
+ (!fields 15
+ (!pair "x_entry_block_ptr"
+ (!type already_seen 269)
+ (!srcfileloc "cfg.h" 41)
+ nil )
+ (!pair "x_exit_block_ptr"
+ (!type already_seen 269)
+ (!srcfileloc "cfg.h" 42)
+ nil )
+ (!pair "x_basic_block_info"
+ (!type pointer 380 nil gc_used
+ (!type user_struct 381
+ (!type already_seen 380)
+ gc_pointed_to "vec<basic_block,va_gc>"
+ (!srcfileloc "cfg.h" 45)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cfg.h" 45)
+ nil )
+ (!pair "basic_block"
+ (!type already_seen 269)
+ (!srcfileloc "cfg.h" 45)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cfg.h" 45)
+ nil )
+ (!pair "x_n_basic_blocks"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 48)
+ nil )
+ (!pair "x_n_edges"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 51)
+ nil )
+ (!pair "x_last_basic_block"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 54)
+ nil )
+ (!pair "last_label_uid"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 57)
+ nil )
+ (!pair "x_label_to_block_map"
+ (!type already_seen 380)
+ (!srcfileloc "cfg.h" 61)
+ nil )
+ (!pair "x_profile_status"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 63)
+ nil )
+ (!pair "x_dom_computed"
+ (!type array 382 nil gc_used "2"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "cfg.h" 66)
+ nil )
+ (!pair "x_n_bbs_in_dom_tree"
+ (!type array 383 nil gc_used "2"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "cfg.h" 69)
+ nil )
+ (!pair "max_jumptable_ents"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 73)
+ nil )
+ (!pair "count_max"
+ (!type already_seen 301)
+ (!srcfileloc "cfg.h" 76)
+ nil )
+ (!pair "edge_flags_allocated"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 79)
+ nil )
+ (!pair "bb_flags_allocated"
+ (!type already_seen 2)
+ (!srcfileloc "cfg.h" 80)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 253)
+ nil )
+ (!pair "gimple_body"
+ (!type already_seen 282)
+ (!srcfileloc "function.h" 256)
+ nil )
+ (!pair "gimple_df"
+ (!type pointer 384 nil gc_used
+ (!type struct 385
+ (!type already_seen 384)
+ gc_pointed_to "gimple_df"
+ (!srcfileloc "gimple-ssa.h" 115)
+ (!fields 13
+ (!pair "ssa_names"
+ (!type already_seen 85)
+ (!srcfileloc "gimple-ssa.h" 74)
+ nil )
+ (!pair "vop"
+ (!type already_seen 23)
+ (!srcfileloc "gimple-ssa.h" 77)
+ nil )
+ (!pair "escaped"
+ (!type struct 386 nil gc_used "pt_solution"
+ (!srcfileloc "tree-ssa-alias.h" 65)
+ (!fields 11
+ (!pair "anything"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 32)
+ nil )
+ (!pair "nonlocal"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 36)
+ nil )
+ (!pair "escaped"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 40)
+ nil )
+ (!pair "ipa_escaped"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 44)
+ nil )
+ (!pair "null"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 48)
+ nil )
+ (!pair "vars_contains_nonlocal"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 51)
+ nil )
+ (!pair "vars_contains_escaped"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 53)
+ nil )
+ (!pair "vars_contains_escaped_heap"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 56)
+ nil )
+ (!pair "vars_contains_restrict"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 59)
+ nil )
+ (!pair "vars_contains_interposable"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-alias.h" 61)
+ nil )
+ (!pair "vars"
+ (!type pointer 387 nil gc_used
+ (!type struct 388
+ (!type already_seen 387)
+ gc_pointed_to "bitmap_head"
+ (!srcfileloc "bitmap.h" 361)
+ (!fields 7
+ (!pair "indx"
+ (!type already_seen 2)
+ (!srcfileloc "bitmap.h" 335)
+ nil )
+ (!pair "tree_form"
+ (!type already_seen 2)
+ (!srcfileloc "bitmap.h" 338)
+ nil )
+ (!pair "padding"
+ (!type already_seen 2)
+ (!srcfileloc "bitmap.h" 340)
+ nil )
+ (!pair "alloc_descriptor"
+ (!type already_seen 2)
+ (!srcfileloc "bitmap.h" 342)
+ nil )
+ (!pair "first"
+ (!type pointer 389 nil gc_used
+ (!type struct 390
+ (!type already_seen 389)
+ gc_pointed_to "bitmap_element"
+ (!srcfileloc "bitmap.h" 345)
+ (!fields 4
+ (!pair "next"
+ (!type already_seen 389)
+ (!srcfileloc "bitmap.h" 313)
+ nil )
+ (!pair "prev"
+ (!type already_seen 389)
+ (!srcfileloc "bitmap.h" 316)
+ nil )
+ (!pair "indx"
+ (!type already_seen 2)
+ (!srcfileloc "bitmap.h" 318)
+ nil )
+ (!pair "bits"
+ (!type array 391 nil gc_used "BITMAP_ELEMENT_WORDS"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "bitmap.h" 320)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "bitmap.h" 345)
+ nil )
+ (!pair "current"
+ (!type already_seen 389)
+ (!srcfileloc "bitmap.h" 347)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "obstack"
+ (!type pointer 392 nil gc_unused
+ (!type struct 393
+ (!type already_seen 392)
+ gc_unused "bitmap_obstack"
+ (!srcfileloc "bitmap.h" 349)
+ (!fields 0 )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "bitmap.h" 349)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-ssa-alias.h" 64)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gimple-ssa.h" 80)
+ nil )
+ (!pair "decls_to_pointers"
+ (!type pointer 394 nil gc_used
+ (!type user_struct 395
+ (!type already_seen 394)
+ gc_pointed_to "hash_map<tree,tree>"
+ (!srcfileloc "gimple-ssa.h" 84)
+ (!fields 2
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "gimple-ssa.h" 84)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "gimple-ssa.h" 84)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "gimple-ssa.h" 84)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "free_ssanames"
+ (!type already_seen 85)
+ (!srcfileloc "gimple-ssa.h" 87)
+ nil )
+ (!pair "free_ssanames_queue"
+ (!type already_seen 85)
+ (!srcfileloc "gimple-ssa.h" 90)
+ nil )
+ (!pair "default_defs"
+ (!type pointer 396 nil gc_used
+ (!type user_struct 397
+ (!type already_seen 396)
+ gc_pointed_to "hash_table<ssa_name_hasher>"
+ (!srcfileloc "gimple-ssa.h" 96)
+ (!fields 1
+ (!pair "ssa_name_hasher"
+ (!type struct 398 nil gc_used "ssa_name_hasher"
+ (!srcfileloc "gimple-ssa.h" 96)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gimple-ssa.h" 96)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "gimple-ssa.h" 96)
+ nil )
+ (!pair "ssa_renaming_needed"
+ (!type already_seen 2)
+ (!srcfileloc "gimple-ssa.h" 99)
+ nil )
+ (!pair "rename_vops"
+ (!type already_seen 2)
+ (!srcfileloc "gimple-ssa.h" 102)
+ nil )
+ (!pair "in_ssa_p"
+ (!type already_seen 2)
+ (!srcfileloc "gimple-ssa.h" 105)
+ nil )
+ (!pair "ipa_pta"
+ (!type already_seen 2)
+ (!srcfileloc "gimple-ssa.h" 108)
+ nil )
+ (!pair "ssa_operands"
+ (!type struct 399 nil gc_used "ssa_operands"
+ (!srcfileloc "tree-ssa-operands.h" 63)
+ (!fields 5
+ (!pair "operand_memory"
+ (!type pointer 400 nil gc_used
+ (!type struct 401
+ (!type already_seen 400)
+ gc_pointed_to "ssa_operand_memory_d"
+ (!srcfileloc "tree-ssa-operands.h" 51)
+ (!fields 2
+ (!pair "next"
+ (!type already_seen 400)
+ (!srcfileloc "tree-ssa-operands.h" 49)
+ nil )
+ (!pair "mem"
+ (!type array 402 nil gc_used "1"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "tree-ssa-operands.h" 50)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "tree-ssa-operands.h" 55)
+ nil )
+ (!pair "operand_memory_index"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-operands.h" 56)
+ nil )
+ (!pair "ssa_operand_mem_size"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-operands.h" 58)
+ nil )
+ (!pair "ops_active"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssa-operands.h" 60)
+ nil )
+ (!pair "free_uses"
+ (!type pointer 403 nil gc_unused
+ (!type struct 404
+ (!type already_seen 403)
+ gc_unused "use_optype_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "tree-ssa-operands.h" 62)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "gimple-ssa.h" 110)
+ nil )
+ (!pair "tm_restart"
+ (!type pointer 405 nil gc_used
+ (!type user_struct 406
+ (!type already_seen 405)
+ gc_pointed_to "hash_table<tm_restart_hasher>"
+ (!srcfileloc "gimple-ssa.h" 114)
+ (!fields 1
+ (!pair "tm_restart_hasher"
+ (!type struct 407 nil gc_used "tm_restart_hasher"
+ (!srcfileloc "gimple-ssa.h" 114)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "gimple-ssa.h" 114)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "gimple-ssa.h" 114)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 259)
+ nil )
+ (!pair "x_current_loops"
+ (!type pointer 408 nil gc_used
+ (!type struct 409
+ (!type already_seen 408)
+ gc_pointed_to "loops"
+ (!srcfileloc "cfgloop.h" 335)
+ (!fields 4
+ (!pair "state"
+ (!type already_seen 2)
+ (!srcfileloc "cfgloop.h" 323)
+ nil )
+ (!pair "larray"
+ (!type already_seen 278)
+ (!srcfileloc "cfgloop.h" 326)
+ nil )
+ (!pair "exits"
+ (!type pointer 410 nil gc_used
+ (!type user_struct 411
+ (!type already_seen 410)
+ gc_pointed_to "hash_table<loop_exit_hasher>"
+ (!srcfileloc "cfgloop.h" 331)
+ (!fields 1
+ (!pair "loop_exit_hasher"
+ (!type struct 412 nil gc_used "loop_exit_hasher"
+ (!srcfileloc "cfgloop.h" 331)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "cfgloop.h" 331)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cfgloop.h" 331)
+ nil )
+ (!pair "tree_root"
+ (!type already_seen 275)
+ (!srcfileloc "cfgloop.h" 334)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 262)
+ nil )
+ (!pair "pass_startwith"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 265)
+ nil )
+ (!pair "su"
+ (!type pointer 413 nil gc_used
+ (!type struct 414
+ (!type already_seen 413)
+ gc_pointed_to "stack_usage"
+ (!srcfileloc "function.h" 235)
+ (!fields 6
+ (!pair "static_stack_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 214)
+ nil )
+ (!pair "dynamic_stack_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 218)
+ nil )
+ (!pair "pushed_stack_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 223)
+ nil )
+ (!pair "has_unbounded_dynamic_stack_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 227)
+ nil )
+ (!pair "callees"
+ (!type pointer 415 nil gc_used
+ (!type user_struct 416
+ (!type already_seen 415)
+ gc_pointed_to "vec<callinfo_callee,va_gc>"
+ (!srcfileloc "function.h" 230)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "function.h" 230)
+ nil )
+ (!pair "callinfo_callee"
+ (!type struct 417 nil gc_used "callinfo_callee"
+ (!srcfileloc "function.h" 230)
+ (!fields 2
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 199)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 200)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "function.h" 230)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "function.h" 230)
+ nil )
+ (!pair "dallocs"
+ (!type pointer 418 nil gc_used
+ (!type user_struct 419
+ (!type already_seen 418)
+ gc_pointed_to "vec<callinfo_dalloc,va_gc>"
+ (!srcfileloc "function.h" 234)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "function.h" 234)
+ nil )
+ (!pair "callinfo_dalloc"
+ (!type struct 420 nil gc_used "callinfo_dalloc"
+ (!srcfileloc "function.h" 234)
+ (!fields 2
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 206)
+ nil )
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 207)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "function.h" 234)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "function.h" 234)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 268)
+ nil )
+ (!pair "value_histograms"
+ (!type already_seen 333)
+ (!srcfileloc "function.h" 271)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 276)
+ nil )
+ (!pair "static_chain_decl"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 280)
+ nil )
+ (!pair "nonlocal_goto_save_area"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 285)
+ nil )
+ (!pair "local_decls"
+ (!type already_seen 85)
+ (!srcfileloc "function.h" 288)
+ nil )
+ (!pair "machine"
+ (!type pointer 421 nil gc_unused
+ (!type struct 422
+ (!type already_seen 421)
+ gc_maybe_pointed_to "machine_function"
+ (!srcfileloc "config/arm/arm.h" 1641)
+ (!fields 18
+ (!pair "eh_epilogue_sp_ofs"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.h" 1601)
+ nil )
+ (!pair "far_jump_used"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1603)
+ nil )
+ (!pair "arg_pointer_live"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1605)
+ nil )
+ (!pair "lr_save_eliminated"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1607)
+ nil )
+ (!pair "stack_offsets"
+ (!type struct 423 nil gc_used "arm_stack_offsets"
+ (!srcfileloc "config/arm/arm.h" 1592)
+ (!fields 7
+ (!pair "saved_args"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1585)
+ nil )
+ (!pair "frame"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1586)
+ nil )
+ (!pair "saved_regs"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1587)
+ nil )
+ (!pair "soft_frame"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1588)
+ nil )
+ (!pair "locals_base"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1589)
+ nil )
+ (!pair "outgoing_args"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1590)
+ nil )
+ (!pair "saved_regs_mask"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1591)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "config/arm/arm.h" 1609)
+ nil )
+ (!pair "func_type"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1611)
+ nil )
+ (!pair "uses_anonymous_args"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1613)
+ nil )
+ (!pair "sibcall_blocked"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1616)
+ nil )
+ (!pair "pic_reg"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.h" 1618)
+ nil )
+ (!pair "call_via"
+ (!type array 424 nil gc_used "14"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "config/arm/arm.h" 1622)
+ nil )
+ (!pair "return_used_this_function"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1625)
+ nil )
+ (!pair "thumb1_cc_insn"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.h" 1628)
+ nil )
+ (!pair "thumb1_cc_op0"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.h" 1629)
+ nil )
+ (!pair "thumb1_cc_op1"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.h" 1630)
+ nil )
+ (!pair "thumb1_cc_mode"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1632)
+ nil )
+ (!pair "after_arm_reorg"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1634)
+ nil )
+ (!pair "static_chain_stack_bytes"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1637)
+ nil )
+ (!pair "pacspval_needed"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 1640)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 293)
+ (!options
+ (!option maybe_undef string "")
+ )
+ )
+ (!pair "language"
+ (!type pointer 425 nil gc_used
+ (!type lang_struct 426
+ (!type already_seen 425)
+ gc_pointed_to "language_function"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 121)
+ (!fields 0 )
+ nil 4095
+ (!homotypes 10
+ (!type struct 427 nil gc_pointed_to "language_function"
+ (!srcfileloc "rust/rust-lang.cc" 98)
+ (!fields 0 )
+ nil 2048
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 428 nil gc_pointed_to "language_function"
+ (!srcfileloc "m2/gm2-lang.cc" 112)
+ (!fields 1
+ (!pair "stmt_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-lang.cc" 111)
+ nil )
+ )
+ nil 256
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 429 nil gc_pointed_to "language_function"
+ (!srcfileloc "lto/lto-tree.h" 42)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 2)
+ (!srcfileloc "lto/lto-tree.h" 41)
+ nil )
+ )
+ nil 128
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 430 nil gc_pointed_to "language_function"
+ (!srcfileloc "jit/dummy-frontend.cc" 523)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 2)
+ (!srcfileloc "jit/dummy-frontend.cc" 522)
+ nil )
+ )
+ nil 64
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 431 nil gc_pointed_to "language_function"
+ (!srcfileloc "go/go-lang.cc" 84)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 2)
+ (!srcfileloc "go/go-lang.cc" 83)
+ nil )
+ )
+ nil 32
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 432 nil gc_pointed_to "language_function"
+ (!srcfileloc "fortran/f95-lang.cc" 68)
+ (!fields 1
+ (!pair "binding_level"
+ (!type pointer 433 nil gc_used
+ (!type lang_struct 434
+ (!type already_seen 433)
+ gc_pointed_to "binding_level"
+ (!srcfileloc "d/d-tree.h" 131)
+ (!fields 0 )
+ (!options
+ (!option chain_next string "%h.level_chain")
+ )
+ 280
+ (!homotypes 3
+ (!type struct 435 nil gc_pointed_to "binding_level"
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 93)
+ (!fields 13
+ (!pair "fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 44)
+ nil )
+ (!pair "names"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 48)
+ nil )
+ (!pair "is_global"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 52)
+ nil )
+ (!pair "context"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 57)
+ nil )
+ (!pair "next"
+ (!type already_seen 433)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 61)
+ nil )
+ (!pair "list"
+ (!type already_seen 433)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 64)
+ nil )
+ (!pair "m2_statements"
+ (!type already_seen 85)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 68)
+ nil )
+ (!pair "constants"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 73)
+ nil )
+ (!pair "init_functions"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 76)
+ nil )
+ (!pair "types"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 81)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 86)
+ nil )
+ (!pair "labels"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 89)
+ nil )
+ (!pair "count"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 92)
+ nil )
+ )
+ nil 256
+ (!type already_seen 434)
+ nil )
+
+ (!type struct 436 nil gc_pointed_to "binding_level"
+ (!srcfileloc "fortran/f95-lang.cc" 323)
+ (!fields 4
+ (!pair "names"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/f95-lang.cc" 314)
+ nil )
+ (!pair "blocks"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/f95-lang.cc" 317)
+ nil )
+ (!pair "level_chain"
+ (!type already_seen 433)
+ (!srcfileloc "fortran/f95-lang.cc" 319)
+ nil )
+ (!pair "reversed"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/f95-lang.cc" 322)
+ nil )
+ )
+ nil 16
+ (!type already_seen 434)
+ nil )
+
+ (!type struct 437 nil gc_pointed_to "binding_level"
+ (!srcfileloc "d/d-tree.h" 131)
+ (!fields 4
+ (!pair "names"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 120)
+ nil )
+ (!pair "blocks"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 124)
+ nil )
+ (!pair "level_chain"
+ (!type already_seen 433)
+ (!srcfileloc "d/d-tree.h" 127)
+ nil )
+ (!pair "kind"
+ (!type already_seen 2)
+ (!srcfileloc "d/d-tree.h" 130)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.level_chain")
+ )
+ 8
+ (!type already_seen 434)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "fortran/f95-lang.cc" 67)
+ nil )
+ )
+ nil 16
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 438 nil gc_pointed_to "language_function"
+ (!srcfileloc "d/d-tree.h" 258)
+ (!fields 6
+ (!pair "function"
+ (!type pointer 439 nil gc_unused
+ (!type struct 440
+ (!type already_seen 439)
+ gc_unused "FuncDeclaration"
+ (!srcfileloc "d/d-tree.h" 243)
+ (!fields 0 )
+ nil 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 243)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "module"
+ (!type pointer 441 nil gc_unused
+ (!type struct 442
+ (!type already_seen 441)
+ gc_unused "Module"
+ (!srcfileloc "d/d-tree.h" 244)
+ (!fields 0 )
+ nil 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 244)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "static_chain"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 247)
+ nil )
+ (!pair "stmt_list"
+ (!type already_seen 85)
+ (!srcfileloc "d/d-tree.h" 251)
+ nil )
+ (!pair "vars_in_scope"
+ (!type already_seen 85)
+ (!srcfileloc "d/d-tree.h" 254)
+ nil )
+ (!pair "labels"
+ (!type pointer 443 nil gc_used
+ (!type user_struct 444
+ (!type already_seen 443)
+ gc_pointed_to "hash_map<Statement*,d_label_entry>"
+ (!srcfileloc "d/d-tree.h" 257)
+ (!fields 2
+ (!pair "d_label_entry"
+ (!type struct 445 nil gc_used "d_label_entry"
+ (!srcfileloc "d/d-tree.h" 257)
+ (!fields 7
+ (!pair "label"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 160)
+ nil )
+ (!pair "statement"
+ (!type pointer 446 nil gc_used
+ (!type struct 447
+ (!type already_seen 446)
+ gc_pointed_to "Statement"
+ (!srcfileloc "d/d-tree.h" 145)
+ (!fields 0 )
+ nil 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 163)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "level"
+ (!type already_seen 433)
+ (!srcfileloc "d/d-tree.h" 168)
+ nil )
+ (!pair "fwdrefs"
+ (!type pointer 448 nil gc_used
+ (!type struct 449
+ (!type already_seen 448)
+ gc_pointed_to "d_label_use_entry"
+ (!srcfileloc "d/d-tree.h" 151)
+ (!fields 3
+ (!pair "next"
+ (!type already_seen 448)
+ (!srcfileloc "d/d-tree.h" 142)
+ nil )
+ (!pair "statement"
+ (!type already_seen 446)
+ (!srcfileloc "d/d-tree.h" 145)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "level"
+ (!type already_seen 433)
+ (!srcfileloc "d/d-tree.h" 150)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 171)
+ nil )
+ (!pair "in_try_scope"
+ (!type already_seen 2)
+ (!srcfileloc "d/d-tree.h" 176)
+ nil )
+ (!pair "in_catch_scope"
+ (!type already_seen 2)
+ (!srcfileloc "d/d-tree.h" 177)
+ nil )
+ (!pair "bc_label"
+ (!type already_seen 2)
+ (!srcfileloc "d/d-tree.h" 180)
+ nil )
+ )
+ nil 8 nil nil )
+ (!srcfileloc "d/d-tree.h" 257)
+ nil )
+ (!pair "Statement"
+ (!type already_seen 446)
+ (!srcfileloc "d/d-tree.h" 257)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "d/d-tree.h" 257)
+ nil )
+ )
+ nil 8
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 450 nil gc_pointed_to "language_function"
+ (!srcfileloc "cp/cp-tree.h" 2123)
+ (!fields 20
+ (!pair "base"
+ (!type struct 451 nil gc_used "c_language_function"
+ (!srcfileloc "c-family/c-common.h" 599)
+ (!fields 2
+ (!pair "x_stmt_tree"
+ (!type struct 452
+ (!type pointer 453 nil gc_unused
+ (!type already_seen 452)
+ )
+ gc_used "stmt_tree_s"
+ (!srcfileloc "c-family/c-common.h" 584)
+ (!fields 2
+ (!pair "x_cur_stmt_list"
+ (!type already_seen 85)
+ (!srcfileloc "c-family/c-common.h" 572)
+ nil )
+ (!pair "stmts_are_full_exprs_p"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-common.h" 583)
+ nil )
+ )
+ nil 1542 nil nil )
+ (!srcfileloc "c-family/c-common.h" 594)
+ nil )
+ (!pair "local_typedefs"
+ (!type already_seen 85)
+ (!srcfileloc "c-family/c-common.h" 598)
+ nil )
+ )
+ nil 1542 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 2093)
+ nil )
+ (!pair "x_current_class_ptr"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2095)
+ nil )
+ (!pair "x_current_class_ref"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2096)
+ nil )
+ (!pair "x_eh_spec_block"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2097)
+ nil )
+ (!pair "x_in_charge_parm"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2098)
+ nil )
+ (!pair "x_vtt_parm"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2099)
+ nil )
+ (!pair "x_return_value"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2100)
+ nil )
+ (!pair "returns_value"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2102)
+ nil )
+ (!pair "returns_null"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2103)
+ nil )
+ (!pair "returns_abnormally"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2104)
+ nil )
+ (!pair "infinite_loop"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2105)
+ nil )
+ (!pair "x_in_function_try_handler"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2106)
+ nil )
+ (!pair "x_in_base_initializer"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2107)
+ nil )
+ (!pair "can_throw"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2110)
+ nil )
+ (!pair "invalid_constexpr"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2112)
+ nil )
+ (!pair "throwing_cleanup"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2113)
+ nil )
+ (!pair "backward_goto"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2114)
+ nil )
+ (!pair "x_named_labels"
+ (!type pointer 454 nil gc_used
+ (!type user_struct 455
+ (!type already_seen 454)
+ gc_pointed_to "hash_table<named_label_hash>"
+ (!srcfileloc "cp/cp-tree.h" 2116)
+ (!fields 1
+ (!pair "named_label_hash"
+ (!type struct 456 nil gc_used "named_label_hash"
+ (!srcfileloc "cp/cp-tree.h" 2116)
+ (!fields 0 )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 2116)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/cp-tree.h" 2116)
+ nil )
+ (!pair "bindings"
+ (!type already_seen 83)
+ (!srcfileloc "cp/cp-tree.h" 2118)
+ nil )
+ (!pair "infinite_loops"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 2122)
+ nil )
+ )
+ nil 1028
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 457 nil gc_pointed_to "language_function"
+ (!srcfileloc "c/c-lang.h" 61)
+ (!fields 8
+ (!pair "base"
+ (!type already_seen 451)
+ (!srcfileloc "c/c-lang.h" 53)
+ nil )
+ (!pair "x_in_statement"
+ (!type already_seen 8)
+ (!srcfileloc "c/c-lang.h" 54)
+ nil )
+ (!pair "x_switch_stack"
+ (!type pointer 458 nil gc_unused
+ (!type struct 459
+ (!type already_seen 458)
+ gc_unused "c_switch"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "c/c-lang.h" 55)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "arg_info"
+ (!type pointer 460 nil gc_unused
+ (!type struct 461
+ (!type already_seen 460)
+ gc_unused "c_arg_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "c/c-lang.h" 56)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "returns_value"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 57)
+ nil )
+ (!pair "returns_null"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 58)
+ nil )
+ (!pair "returns_abnormally"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 59)
+ nil )
+ (!pair "warn_about_return_type"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 60)
+ nil )
+ )
+ nil 514
+ (!type already_seen 426)
+ nil )
+
+ (!type struct 462 nil gc_pointed_to "language_function"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 121)
+ (!fields 4
+ (!pair "parm_attr_cache"
+ (!type pointer 463 nil gc_used
+ (!type user_struct 464
+ (!type already_seen 463)
+ gc_pointed_to "vec<parm_attr,va_gc>"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 117)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 117)
+ nil )
+ (!pair "parm_attr"
+ (!type pointer 465 nil gc_used
+ (!type struct 466
+ (!type already_seen 465)
+ gc_pointed_to "parm_attr_d"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 111)
+ (!fields 5
+ (!pair "id"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 106)
+ nil )
+ (!pair "dim"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 107)
+ nil )
+ (!pair "first"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 108)
+ nil )
+ (!pair "last"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 109)
+ nil )
+ (!pair "length"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 110)
+ nil )
+ )
+ nil 1 nil nil )
+ )
+ (!srcfileloc "ada/gcc-interface/trans.cc" 117)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ada/gcc-interface/trans.cc" 117)
+ nil )
+ (!pair "named_ret_val"
+ (!type already_seen 387)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 118)
+ nil )
+ (!pair "other_ret_val"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 119)
+ nil )
+ (!pair "gnat_ret"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 120)
+ nil )
+ )
+ nil 1
+ (!type already_seen 426)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "function.h" 296)
+ nil )
+ (!pair "used_types_hash"
+ (!type pointer 467 nil gc_used
+ (!type user_struct 468
+ (!type already_seen 467)
+ gc_pointed_to "hash_set<tree>"
+ (!srcfileloc "function.h" 299)
+ (!fields 1
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 299)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "function.h" 299)
+ nil )
+ (!pair "fde"
+ (!type pointer 469 nil gc_used
+ (!type struct 470
+ (!type already_seen 469)
+ gc_pointed_to "dw_fde_node"
+ (!srcfileloc "dwarf2out.h" 119)
+ (!fields 24
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.h" 79)
+ nil )
+ (!pair "dw_fde_begin"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 80)
+ nil )
+ (!pair "dw_fde_current_label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 81)
+ nil )
+ (!pair "dw_fde_end"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 82)
+ nil )
+ (!pair "dw_fde_vms_end_prologue"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 83)
+ nil )
+ (!pair "dw_fde_vms_begin_epilogue"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 84)
+ nil )
+ (!pair "dw_fde_second_begin"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 85)
+ nil )
+ (!pair "dw_fde_second_end"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 86)
+ nil )
+ (!pair "dw_fde_cfi"
+ (!type pointer 471 nil gc_used
+ (!type user_struct 472
+ (!type already_seen 471)
+ gc_pointed_to "vec<dw_cfi_ref,va_gc>"
+ (!srcfileloc "dwarf2out.h" 68)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.h" 68)
+ nil )
+ (!pair "dw_cfi_ref"
+ (!type pointer 473 nil gc_used
+ (!type struct 474
+ (!type already_seen 473)
+ gc_pointed_to "dw_cfi_node"
+ (!srcfileloc "dwarf2out.h" 65)
+ (!fields 3
+ (!pair "dw_cfi_opc"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 60)
+ nil )
+ (!pair "dw_cfi_oprnd1"
+ (!type union 475 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/dwarf2out.h:50"
+ (!srcfileloc "dwarf2out.h" 57)
+ (!fields 5
+ (!pair "dw_cfi_reg_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 51)
+ (!options
+ (!option tag string "dw_cfi_oprnd_reg_num")
+ )
+ )
+ (!pair "dw_cfi_offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 52)
+ (!options
+ (!option tag string "dw_cfi_oprnd_offset")
+ )
+ )
+ (!pair "dw_cfi_addr"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 53)
+ (!options
+ (!option tag string "dw_cfi_oprnd_addr")
+ )
+ )
+ (!pair "dw_cfi_loc"
+ (!type pointer 476 nil gc_used
+ (!type struct 477
+ (!type already_seen 476)
+ gc_pointed_to "dw_loc_descr_node"
+ (!srcfileloc "dwarf2out.h" 299)
+ (!fields 7
+ (!pair "dw_loc_next"
+ (!type already_seen 476)
+ (!srcfileloc "dwarf2out.h" 287)
+ nil )
+ (!pair "dw_loc_opc"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 288)
+ nil )
+ (!pair "dtprel"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 291)
+ nil )
+ (!pair "frame_offset_rel"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 295)
+ nil )
+ (!pair "dw_loc_addr"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 296)
+ nil )
+ (!pair "dw_loc_oprnd1"
+ (!type struct 478
+ (!type pointer 479 nil gc_unused
+ (!type already_seen 478)
+ )
+ gc_used "dw_val_node"
+ (!srcfileloc "dwarf2out.h" 297)
+ (!fields 3
+ (!pair "val_class"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 242)
+ nil )
+ (!pair "val_entry"
+ (!type pointer 480 nil gc_used
+ (!type struct 481
+ (!type already_seen 480)
+ gc_pointed_to "addr_table_entry"
+ (!srcfileloc "dwarf2out.cc" 1364)
+ (!fields 4
+ (!pair "kind"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1345)
+ nil )
+ (!pair "refcount"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1346)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1347)
+ nil )
+ (!pair "addr"
+ (!type union 482 nil gc_used "addr_table_entry_struct_union"
+ (!srcfileloc "dwarf2out.cc" 1352)
+ (!fields 2
+ (!pair "rtl"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2out.cc" 1350)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 1351)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "dwarf2out.cc" 1353)
+ (!options
+ (!option desc string "%1.kind")
+ )
+ )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 243)
+ nil )
+ (!pair "v"
+ (!type union 483 nil gc_used "dw_val_struct_union"
+ (!srcfileloc "dwarf2out.h" 279)
+ (!fields 23
+ (!pair "val_addr"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2out.h" 246)
+ (!options
+ (!option tag string "dw_val_class_addr")
+ )
+ )
+ (!pair "val_offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 247)
+ (!options
+ (!option tag string "dw_val_class_offset")
+ )
+ )
+ (!pair "val_loc_list"
+ (!type pointer 484 nil gc_used
+ (!type struct 485
+ (!type already_seen 484)
+ gc_pointed_to "dw_loc_list_struct"
+ (!srcfileloc "dwarf2out.cc" 1391)
+ (!fields 19
+ (!pair "dw_loc_next"
+ (!type already_seen 484)
+ (!srcfileloc "dwarf2out.cc" 1362)
+ nil )
+ (!pair "begin"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 1363)
+ nil )
+ (!pair "begin_entry"
+ (!type already_seen 480)
+ (!srcfileloc "dwarf2out.cc" 1364)
+ nil )
+ (!pair "end"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 1365)
+ nil )
+ (!pair "end_entry"
+ (!type already_seen 480)
+ (!srcfileloc "dwarf2out.cc" 1366)
+ nil )
+ (!pair "ll_symbol"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 1367)
+ nil )
+ (!pair "vl_symbol"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 1369)
+ nil )
+ (!pair "section"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 1370)
+ nil )
+ (!pair "expr"
+ (!type already_seen 476)
+ (!srcfileloc "dwarf2out.cc" 1371)
+ nil )
+ (!pair "vbegin"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1372)
+ nil )
+ (!pair "vend"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1372)
+ nil )
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1373)
+ nil )
+ (!pair "resolved_addr"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1376)
+ nil )
+ (!pair "replaced"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1378)
+ nil )
+ (!pair "emitted"
+ (!type already_seen 8)
+ (!srcfileloc "dwarf2out.cc" 1381)
+ nil )
+ (!pair "num_assigned"
+ (!type already_seen 8)
+ (!srcfileloc "dwarf2out.cc" 1383)
+ nil )
+ (!pair "offset_emitted"
+ (!type already_seen 8)
+ (!srcfileloc "dwarf2out.cc" 1385)
+ nil )
+ (!pair "noted_variable_value"
+ (!type already_seen 8)
+ (!srcfileloc "dwarf2out.cc" 1387)
+ nil )
+ (!pair "force"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1390)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 248)
+ (!options
+ (!option tag string "dw_val_class_loc_list")
+ )
+ )
+ (!pair "val_view_list"
+ (!type pointer 486 nil gc_used
+ (!type struct 487
+ (!type already_seen 486)
+ gc_pointed_to "die_struct"
+ (!srcfileloc "dwarf2out.cc" 3197)
+ (!fields 15
+ (!pair "die_id"
+ (!type union 488 nil gc_used "die_symbol_or_type_node"
+ (!srcfileloc "dwarf2out.cc" 3174)
+ (!fields 2
+ (!pair "die_symbol"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3172)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "die_type_node"
+ (!type pointer 489 nil gc_used
+ (!type struct 490
+ (!type already_seen 489)
+ gc_pointed_to "comdat_type_node"
+ (!srcfileloc "dwarf2out.cc" 3273)
+ (!fields 5
+ (!pair "root_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3268)
+ nil )
+ (!pair "type_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3269)
+ nil )
+ (!pair "skeleton_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3270)
+ nil )
+ (!pair "signature"
+ (!type array 491 nil gc_used "DWARF_TYPE_SIGNATURE_SIZE"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "dwarf2out.cc" 3271)
+ nil )
+ (!pair "next"
+ (!type already_seen 489)
+ (!srcfileloc "dwarf2out.cc" 3272)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.cc" 3173)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "dwarf2out.cc" 3175)
+ (!options
+ (!option desc string "%0.comdat_type_p")
+ )
+ )
+ (!pair "die_attr"
+ (!type pointer 492 nil gc_used
+ (!type user_struct 493
+ (!type already_seen 492)
+ gc_pointed_to "vec<dw_attr_node,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3176)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3176)
+ nil )
+ (!pair "dw_attr_node"
+ (!type struct 494 nil gc_used "dw_attr_struct"
+ (!srcfileloc "dwarf2out.h" 434)
+ (!fields 2
+ (!pair "dw_attr"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 432)
+ nil )
+ (!pair "dw_attr_val"
+ (!type already_seen 478)
+ (!srcfileloc "dwarf2out.h" 433)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.cc" 3176)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "dwarf2out.cc" 3176)
+ nil )
+ (!pair "die_parent"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3177)
+ nil )
+ (!pair "die_child"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3178)
+ nil )
+ (!pair "die_sib"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3179)
+ nil )
+ (!pair "die_definition"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3180)
+ nil )
+ (!pair "die_offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3181)
+ nil )
+ (!pair "die_abbrev"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3182)
+ nil )
+ (!pair "die_mark"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3183)
+ nil )
+ (!pair "decl_id"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3184)
+ nil )
+ (!pair "die_tag"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3185)
+ nil )
+ (!pair "die_perennial_p"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3187)
+ nil )
+ (!pair "comdat_type_p"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3188)
+ nil )
+ (!pair "with_offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3191)
+ nil )
+ (!pair "removed"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3195)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ (!option chain_circular string "%h.die_sib")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 249)
+ (!options
+ (!option tag string "dw_val_class_view_list")
+ )
+ )
+ (!pair "val_loc"
+ (!type already_seen 476)
+ (!srcfileloc "dwarf2out.h" 250)
+ (!options
+ (!option tag string "dw_val_class_loc")
+ )
+ )
+ (!pair "val_int"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 251)
+ (!options
+ (!option default string "")
+ )
+ )
+ (!pair "val_unsigned"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 253)
+ (!options
+ (!option tag string "dw_val_class_unsigned_const")
+ )
+ )
+ (!pair "val_double"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 254)
+ (!options
+ (!option tag string "dw_val_class_const_double")
+ )
+ )
+ (!pair "val_wide"
+ (!type pointer 495 nil gc_used
+ (!type user_struct 496
+ (!type already_seen 495)
+ gc_pointed_to "generic_wide_int<wide_int_storage>"
+ (!srcfileloc "wide-int.h" 322)
+ (!fields 1
+ (!pair "wide_int_storage"
+ (!type struct 497 nil gc_used "wide_int_storage"
+ (!srcfileloc "wide-int.h" 1088)
+ (!fields 3
+ (!pair "val"
+ (!type array 498 nil gc_used "WIDE_INT_MAX_ELTS"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "wide-int.h" 1062)
+ nil )
+ (!pair "len"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 1063)
+ nil )
+ (!pair "precision"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 1064)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "wide-int.h" 322)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "dwarf2out.h" 255)
+ (!options
+ (!option tag string "dw_val_class_wide_int")
+ )
+ )
+ (!pair "val_vec"
+ (!type struct 499 nil gc_used "dw_vec_const"
+ (!srcfileloc "dwarf2out.h" 256)
+ (!fields 3
+ (!pair "array"
+ (!type already_seen 3)
+ (!srcfileloc "dwarf2out.h" 212)
+ (!options
+ (!option atomic string "")
+ )
+ )
+ (!pair "length"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 213)
+ nil )
+ (!pair "elt_size"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 214)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.h" 256)
+ (!options
+ (!option tag string "dw_val_class_vec")
+ )
+ )
+ (!pair "val_die_ref"
+ (!type struct 500 nil gc_used "dw_val_die_union"
+ (!srcfileloc "dwarf2out.h" 261)
+ (!fields 2
+ (!pair "die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.h" 259)
+ nil )
+ (!pair "external"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 260)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.h" 261)
+ (!options
+ (!option tag string "dw_val_class_die_ref")
+ )
+ )
+ (!pair "val_fde_index"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 262)
+ (!options
+ (!option tag string "dw_val_class_fde_ref")
+ )
+ )
+ (!pair "val_str"
+ (!type pointer 501 nil gc_used
+ (!type struct 502
+ (!type already_seen 501)
+ gc_pointed_to "indirect_string_node"
+ (!srcfileloc "dwarf2out.cc" 223)
+ (!fields 5
+ (!pair "str"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 218)
+ nil )
+ (!pair "refcount"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 219)
+ nil )
+ (!pair "form"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 220)
+ nil )
+ (!pair "label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 221)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 222)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 263)
+ (!options
+ (!option tag string "dw_val_class_str")
+ )
+ )
+ (!pair "val_lbl_id"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 264)
+ (!options
+ (!option tag string "dw_val_class_lbl_id")
+ )
+ )
+ (!pair "val_flag"
+ (!type already_seen 8)
+ (!srcfileloc "dwarf2out.h" 265)
+ (!options
+ (!option tag string "dw_val_class_flag")
+ )
+ )
+ (!pair "val_file"
+ (!type pointer 503 nil gc_used
+ (!type struct 504
+ (!type already_seen 503)
+ gc_pointed_to "dwarf_file_data"
+ (!srcfileloc "dwarf2out.h" 465)
+ (!fields 3
+ (!pair "key"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 462)
+ nil )
+ (!pair "filename"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 463)
+ nil )
+ (!pair "emitted_number"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 464)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 266)
+ (!options
+ (!option tag string "dw_val_class_file")
+ )
+ )
+ (!pair "val_file_implicit"
+ (!type already_seen 503)
+ (!srcfileloc "dwarf2out.h" 268)
+ (!options
+ (!option tag string "dw_val_class_file_implicit")
+ )
+ )
+ (!pair "val_data8"
+ (!type array 505 nil gc_used "8"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "dwarf2out.h" 269)
+ (!options
+ (!option tag string "dw_val_class_data8")
+ )
+ )
+ (!pair "val_decl_ref"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.h" 270)
+ (!options
+ (!option tag string "dw_val_class_decl_ref")
+ )
+ )
+ (!pair "val_vms_delta"
+ (!type struct 506 nil gc_used "dw_val_vms_delta_union"
+ (!srcfileloc "dwarf2out.h" 275)
+ (!fields 2
+ (!pair "lbl1"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 273)
+ nil )
+ (!pair "lbl2"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 274)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.h" 275)
+ (!options
+ (!option tag string "dw_val_class_vms_delta")
+ )
+ )
+ (!pair "val_discr_value"
+ (!type struct 507 nil gc_used "dw_discr_value"
+ (!srcfileloc "dwarf2out.h" 276)
+ (!fields 2
+ (!pair "pos"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 227)
+ nil )
+ (!pair "v"
+ (!type union 508 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/dwarf2out.h:229"
+ (!srcfileloc "dwarf2out.h" 232)
+ (!fields 2
+ (!pair "sval"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 230)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "uval"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 231)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "dwarf2out.h" 233)
+ (!options
+ (!option desc string "%1.pos")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.h" 276)
+ (!options
+ (!option tag string "dw_val_class_discr_value")
+ )
+ )
+ (!pair "val_discr_list"
+ (!type pointer 509 nil gc_used
+ (!type struct 510
+ (!type already_seen 509)
+ gc_pointed_to "dw_discr_list_node"
+ (!srcfileloc "dwarf2out.h" 314)
+ (!fields 4
+ (!pair "dw_discr_next"
+ (!type already_seen 509)
+ (!srcfileloc "dwarf2out.h" 306)
+ nil )
+ (!pair "dw_discr_lower_bound"
+ (!type already_seen 507)
+ (!srcfileloc "dwarf2out.h" 308)
+ nil )
+ (!pair "dw_discr_upper_bound"
+ (!type already_seen 507)
+ (!srcfileloc "dwarf2out.h" 309)
+ nil )
+ (!pair "dw_discr_range"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 313)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 277)
+ (!options
+ (!option tag string "dw_val_class_discr_list")
+ )
+ )
+ (!pair "val_symbolic_view"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.h" 278)
+ (!options
+ (!option tag string "dw_val_class_symview")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "dwarf2out.h" 280)
+ (!options
+ (!option desc string "%1.val_class")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.h" 297)
+ nil )
+ (!pair "dw_loc_oprnd2"
+ (!type already_seen 478)
+ (!srcfileloc "dwarf2out.h" 298)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.dw_loc_next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 54)
+ (!options
+ (!option tag string "dw_cfi_oprnd_loc")
+ )
+ )
+ (!pair "dw_cfi_cfa_loc"
+ (!type pointer 511 nil gc_used
+ (!type struct 512
+ (!type already_seen 511)
+ gc_pointed_to "dw_cfa_location"
+ (!srcfileloc "dwarf2cfi.cc" 66)
+ (!fields 5
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 161)
+ nil )
+ (!pair "base_offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 162)
+ nil )
+ (!pair "reg"
+ (!type struct 513 nil gc_used "cfa_reg"
+ (!srcfileloc "dwarf2out.h" 153)
+ (!fields 3
+ (!pair "reg"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 129)
+ nil )
+ (!pair "span"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 130)
+ nil )
+ (!pair "span_width"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 131)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "dwarf2out.h" 164)
+ nil )
+ (!pair "indirect"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 165)
+ nil )
+ (!pair "in_use"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 166)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 56)
+ (!options
+ (!option tag string "dw_cfi_oprnd_cfa_loc")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "dwarf2out.h" 62)
+ (!options
+ (!option desc string "dw_cfi_oprnd1_desc (%1.dw_cfi_opc)")
+ )
+ )
+ (!pair "dw_cfi_oprnd2"
+ (!type already_seen 475)
+ (!srcfileloc "dwarf2out.h" 64)
+ (!options
+ (!option desc string "dw_cfi_oprnd2_desc (%1.dw_cfi_opc)")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "dwarf2out.h" 68)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "dwarf2out.h" 87)
+ nil )
+ (!pair "dw_fde_switch_cfi_index"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 88)
+ nil )
+ (!pair "stack_realignment"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 89)
+ nil )
+ (!pair "funcdef_number"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 91)
+ nil )
+ (!pair "fde_index"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 92)
+ nil )
+ (!pair "drap_reg"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 95)
+ nil )
+ (!pair "vdrap_reg"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 97)
+ nil )
+ (!pair "all_throwers_are_sibcalls"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 99)
+ nil )
+ (!pair "uses_eh_lsda"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 100)
+ nil )
+ (!pair "nothrow"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 101)
+ nil )
+ (!pair "stack_realign"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 103)
+ nil )
+ (!pair "drap_reg_saved"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 105)
+ nil )
+ (!pair "in_std_section"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 107)
+ nil )
+ (!pair "second_in_std_section"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 110)
+ nil )
+ (!pair "rule18"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 116)
+ nil )
+ (!pair "ignored_debug"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.h" 118)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 304)
+ nil )
+ (!pair "x_range_query"
+ (!type pointer 514 nil gc_unused
+ (!type struct 515
+ (!type already_seen 514)
+ gc_unused "range_query"
+ (!srcfileloc "function.h" 310)
+ (!fields 0 )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.h" 310)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "last_stmt_uid"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 313)
+ nil )
+ (!pair "debug_marker_count"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 319)
+ nil )
+ (!pair "funcdef_no"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 322)
+ nil )
+ (!pair "function_start_locus"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 325)
+ nil )
+ (!pair "function_end_locus"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 328)
+ nil )
+ (!pair "curr_properties"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 331)
+ nil )
+ (!pair "last_verified"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 332)
+ nil )
+ (!pair "pending_TODOs"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 339)
+ nil )
+ (!pair "cannot_be_copied_reason"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 344)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "last_clique"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 347)
+ nil )
+ (!pair "va_list_gpr_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 354)
+ nil )
+ (!pair "va_list_fpr_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 358)
+ nil )
+ (!pair "calls_setjmp"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 361)
+ nil )
+ (!pair "calls_alloca"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 365)
+ nil )
+ (!pair "calls_eh_return"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 368)
+ nil )
+ (!pair "has_nonlocal_label"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 372)
+ nil )
+ (!pair "has_forced_label_in_static"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 376)
+ nil )
+ (!pair "cannot_be_copied_set"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 381)
+ nil )
+ (!pair "stdarg"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 384)
+ nil )
+ (!pair "after_inlining"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 386)
+ nil )
+ (!pair "always_inline_functions_inlined"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 387)
+ nil )
+ (!pair "can_throw_non_call_exceptions"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 391)
+ nil )
+ (!pair "can_delete_dead_exceptions"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 395)
+ nil )
+ (!pair "returns_struct"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 402)
+ nil )
+ (!pair "returns_pcc_struct"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 406)
+ nil )
+ (!pair "has_local_explicit_reg_vars"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 410)
+ nil )
+ (!pair "is_thunk"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 416)
+ nil )
+ (!pair "has_force_vectorize_loops"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 420)
+ nil )
+ (!pair "has_simduid_loops"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 424)
+ nil )
+ (!pair "tail_call_marked"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 427)
+ nil )
+ (!pair "has_unroll"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 430)
+ nil )
+ (!pair "debug_nonbind_markers"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 434)
+ nil )
+ (!pair "coroutine_component"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 437)
+ nil )
+ (!pair "has_omp_target"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 440)
+ nil )
+ (!pair "assume_function"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 444)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1952)
+ nil )
+ (!pair "arguments"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1955)
+ nil )
+ (!pair "personality"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1957)
+ nil )
+ (!pair "function_specific_target"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1960)
+ nil )
+ (!pair "function_specific_optimization"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1961)
+ nil )
+ (!pair "saved_tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1964)
+ nil )
+ (!pair "vindex"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1966)
+ nil )
+ (!pair "function_code"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1969)
+ nil )
+ (!pair "built_in_class"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1971)
+ nil )
+ (!pair "static_ctor_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1972)
+ nil )
+ (!pair "static_dtor_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1973)
+ nil )
+ (!pair "uninlinable"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1974)
+ nil )
+ (!pair "possibly_inlined"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1975)
+ nil )
+ (!pair "novops_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1976)
+ nil )
+ (!pair "returns_twice_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1977)
+ nil )
+ (!pair "malloc_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1979)
+ nil )
+ (!pair "declared_inline_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1980)
+ nil )
+ (!pair "no_inline_warning_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1981)
+ nil )
+ (!pair "no_instrument_function_entry_exit"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1982)
+ nil )
+ (!pair "no_limit_stack"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1983)
+ nil )
+ (!pair "disregard_inline_limits"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1984)
+ nil )
+ (!pair "pure_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1985)
+ nil )
+ (!pair "looping_const_or_pure_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1986)
+ nil )
+ (!pair "decl_type"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1989)
+ nil )
+ (!pair "has_debug_args_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1990)
+ nil )
+ (!pair "versioned_function"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1991)
+ nil )
+ (!pair "replaceable_operator"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1992)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2088)
+ (!options
+ (!option tag string "TS_FUNCTION_DECL")
+ )
+ )
+ (!pair "translation_unit_decl"
+ (!type struct 516 nil gc_used "tree_translation_unit_decl"
+ (!srcfileloc "tree-core.h" 2004)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 52)
+ (!srcfileloc "tree-core.h" 1999)
+ nil )
+ (!pair "language"
+ (!type already_seen 11)
+ (!srcfileloc "tree-core.h" 2001)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2090)
+ (!options
+ (!option tag string "TS_TRANSLATION_UNIT_DECL")
+ )
+ )
+ (!pair "type_common"
+ (!type struct 517 nil gc_used "tree_type_common"
+ (!srcfileloc "tree-core.h" 1735)
+ (!fields 37
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1683)
+ nil )
+ (!pair "size"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1684)
+ nil )
+ (!pair "size_unit"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1685)
+ nil )
+ (!pair "attributes"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1686)
+ nil )
+ (!pair "uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1687)
+ nil )
+ (!pair "precision"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1689)
+ nil )
+ (!pair "no_force_blk_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1690)
+ nil )
+ (!pair "needs_constructing_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1691)
+ nil )
+ (!pair "transparent_aggr_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1692)
+ nil )
+ (!pair "restrict_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1693)
+ nil )
+ (!pair "contains_placeholder_bits"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1694)
+ nil )
+ (!pair "mode"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1696)
+ nil )
+ (!pair "string_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1700)
+ nil )
+ (!pair "lang_flag_0"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1701)
+ nil )
+ (!pair "lang_flag_1"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1702)
+ nil )
+ (!pair "lang_flag_2"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1703)
+ nil )
+ (!pair "lang_flag_3"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1704)
+ nil )
+ (!pair "lang_flag_4"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1705)
+ nil )
+ (!pair "lang_flag_5"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1706)
+ nil )
+ (!pair "lang_flag_6"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1707)
+ nil )
+ (!pair "lang_flag_7"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1708)
+ nil )
+ (!pair "align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1715)
+ nil )
+ (!pair "warn_if_not_align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1716)
+ nil )
+ (!pair "typeless_storage"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1717)
+ nil )
+ (!pair "empty_flag"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1718)
+ nil )
+ (!pair "indivisible_p"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1719)
+ nil )
+ (!pair "no_named_args_stdarg_p"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1720)
+ nil )
+ (!pair "spare"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1721)
+ nil )
+ (!pair "alias_set"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1723)
+ nil )
+ (!pair "pointer_to"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1724)
+ nil )
+ (!pair "reference_to"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1725)
+ nil )
+ (!pair "symtab"
+ (!type union 518 nil gc_used "tree_type_symtab"
+ (!srcfileloc "tree-core.h" 1729)
+ (!fields 2
+ (!pair "address"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1727)
+ (!options
+ (!option tag string "TYPE_SYMTAB_IS_ADDRESS")
+ )
+ )
+ (!pair "die"
+ (!type already_seen 486)
+ (!srcfileloc "tree-core.h" 1728)
+ (!options
+ (!option tag string "TYPE_SYMTAB_IS_DIE")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "tree-core.h" 1729)
+ (!options
+ (!option desc string "debug_hooks->tree_type_symtab_field")
+ )
+ )
+ (!pair "canonical"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1730)
+ nil )
+ (!pair "next_variant"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1731)
+ nil )
+ (!pair "main_variant"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1732)
+ nil )
+ (!pair "context"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1733)
+ nil )
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1734)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2091)
+ (!options
+ (!option tag string "TS_TYPE_COMMON")
+ )
+ )
+ (!pair "type_with_lang_specific"
+ (!type struct 519 nil gc_used "tree_type_with_lang_specific"
+ (!srcfileloc "tree-core.h" 1741)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 517)
+ (!srcfileloc "tree-core.h" 1738)
+ nil )
+ (!pair "lang_specific"
+ (!type pointer 520 nil gc_used
+ (!type lang_struct 521
+ (!type already_seen 520)
+ gc_pointed_to "lang_type"
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 36)
+ (!fields 0 )
+ nil 4095
+ (!homotypes 10
+ (!type struct 522 nil gc_pointed_to "lang_type"
+ (!srcfileloc "rust/rust-lang.cc" 69)
+ (!fields 0 )
+ nil 2048
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 523 nil gc_pointed_to "lang_type"
+ (!srcfileloc "m2/gm2-lang.cc" 76)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "m2/gm2-lang.cc" 75)
+ nil )
+ )
+ nil 256
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 524 nil gc_pointed_to "lang_type"
+ (!srcfileloc "lto/lto-tree.h" 37)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 2)
+ (!srcfileloc "lto/lto-tree.h" 36)
+ nil )
+ )
+ nil 128
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 525 nil gc_pointed_to "lang_type"
+ (!srcfileloc "jit/dummy-frontend.cc" 490)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "jit/dummy-frontend.cc" 489)
+ nil )
+ )
+ nil 64
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 526 nil gc_pointed_to "lang_type"
+ (!srcfileloc "go/go-lang.cc" 51)
+ (!fields 1
+ (!pair "dummy"
+ (!type already_seen 8)
+ (!srcfileloc "go/go-lang.cc" 50)
+ nil )
+ )
+ nil 32
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 527 nil gc_pointed_to "lang_type"
+ (!srcfileloc "fortran/trans.h" 1017)
+ (!fields 14
+ (!pair "rank"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1004)
+ nil )
+ (!pair "corank"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1004)
+ nil )
+ (!pair "akind"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans.h" 1005)
+ nil )
+ (!pair "lbound"
+ (!type array 528 nil gc_used "GFC_MAX_DIMENSIONS"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans.h" 1006)
+ nil )
+ (!pair "ubound"
+ (!type array 529 nil gc_used "GFC_MAX_DIMENSIONS"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans.h" 1007)
+ nil )
+ (!pair "stride"
+ (!type array 530 nil gc_used "GFC_MAX_DIMENSIONS"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans.h" 1008)
+ nil )
+ (!pair "size"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1009)
+ nil )
+ (!pair "offset"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1010)
+ nil )
+ (!pair "dtype"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1011)
+ nil )
+ (!pair "dataptr_type"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1012)
+ nil )
+ (!pair "base_decl"
+ (!type array 531 nil gc_used "2"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans.h" 1013)
+ nil )
+ (!pair "nonrestricted_type"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1014)
+ nil )
+ (!pair "caf_token"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1015)
+ nil )
+ (!pair "caf_offset"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 1016)
+ nil )
+ )
+ nil 16
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 532 nil gc_pointed_to "lang_type"
+ (!srcfileloc "d/d-tree.h" 326)
+ (!fields 1
+ (!pair "type"
+ (!type pointer 533 nil gc_unused
+ (!type struct 534
+ (!type already_seen 533)
+ gc_unused "Type"
+ (!srcfileloc "d/d-tree.h" 325)
+ (!fields 0 )
+ nil 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 325)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 8
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 535 nil gc_pointed_to "lang_type"
+ (!srcfileloc "cp/cp-tree.h" 2424)
+ (!fields 67
+ (!pair "align"
+ (!type already_seen 8)
+ (!srcfileloc "cp/cp-tree.h" 2336)
+ nil )
+ (!pair "has_type_conversion"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2338)
+ nil )
+ (!pair "has_copy_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2339)
+ nil )
+ (!pair "has_default_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2340)
+ nil )
+ (!pair "const_needs_init"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2341)
+ nil )
+ (!pair "ref_needs_init"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2342)
+ nil )
+ (!pair "has_const_copy_assign"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2343)
+ nil )
+ (!pair "use_template"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2344)
+ nil )
+ (!pair "has_mutable"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2346)
+ nil )
+ (!pair "com_interface"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2347)
+ nil )
+ (!pair "non_pod_class"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2348)
+ nil )
+ (!pair "nearly_empty_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2349)
+ nil )
+ (!pair "user_align"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2350)
+ nil )
+ (!pair "has_copy_assign"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2351)
+ nil )
+ (!pair "has_new"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2352)
+ nil )
+ (!pair "has_array_new"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2353)
+ nil )
+ (!pair "gets_delete"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2355)
+ nil )
+ (!pair "interface_only"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2356)
+ nil )
+ (!pair "interface_unknown"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2357)
+ nil )
+ (!pair "contains_empty_class_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2358)
+ nil )
+ (!pair "anon_aggr"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2359)
+ nil )
+ (!pair "non_zero_init"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2360)
+ nil )
+ (!pair "empty_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2361)
+ nil )
+ (!pair "vec_new_uses_cookie"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2364)
+ nil )
+ (!pair "declared_class"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2365)
+ nil )
+ (!pair "diamond_shaped"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2366)
+ nil )
+ (!pair "repeated_base"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2367)
+ nil )
+ (!pair "being_defined"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2368)
+ nil )
+ (!pair "debug_requested"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2369)
+ nil )
+ (!pair "fields_readonly"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2370)
+ nil )
+ (!pair "ptrmemfunc_flag"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2371)
+ nil )
+ (!pair "lazy_default_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2373)
+ nil )
+ (!pair "lazy_copy_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2374)
+ nil )
+ (!pair "lazy_copy_assign"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2375)
+ nil )
+ (!pair "lazy_destructor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2376)
+ nil )
+ (!pair "has_const_copy_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2377)
+ nil )
+ (!pair "has_complex_copy_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2378)
+ nil )
+ (!pair "has_complex_copy_assign"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2379)
+ nil )
+ (!pair "non_aggregate"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2380)
+ nil )
+ (!pair "has_complex_dflt"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2382)
+ nil )
+ (!pair "has_list_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2383)
+ nil )
+ (!pair "non_std_layout"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2384)
+ nil )
+ (!pair "is_literal"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2385)
+ nil )
+ (!pair "lazy_move_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2386)
+ nil )
+ (!pair "lazy_move_assign"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2387)
+ nil )
+ (!pair "has_complex_move_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2388)
+ nil )
+ (!pair "has_complex_move_assign"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2389)
+ nil )
+ (!pair "has_constexpr_ctor"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2391)
+ nil )
+ (!pair "unique_obj_representations"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2392)
+ nil )
+ (!pair "unique_obj_representations_set"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2393)
+ nil )
+ (!pair "erroneous"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2394)
+ nil )
+ (!pair "non_pod_aggregate"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2395)
+ nil )
+ (!pair "dummy"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 2404)
+ nil )
+ (!pair "primary_base"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2406)
+ nil )
+ (!pair "vcall_indices"
+ (!type pointer 536 nil gc_used
+ (!type user_struct 537
+ (!type already_seen 536)
+ gc_pointed_to "vec<tree_pair_s,va_gc>"
+ (!srcfileloc "cp/cp-tree.h" 2407)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/cp-tree.h" 2407)
+ nil )
+ (!pair "tree_pair_s"
+ (!type struct 538
+ (!type pointer 539 nil gc_used
+ (!type already_seen 538)
+ )
+ gc_pointed_to "tree_pair_s"
+ (!srcfileloc "cp/cp-tree.h" 2320)
+ (!fields 2
+ (!pair "purpose"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2317)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2318)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 2407)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/cp-tree.h" 2407)
+ nil )
+ (!pair "vtables"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2408)
+ nil )
+ (!pair "typeinfo_var"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2409)
+ nil )
+ (!pair "vbases"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 2410)
+ nil )
+ (!pair "as_base"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2411)
+ nil )
+ (!pair "pure_virtuals"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 2412)
+ nil )
+ (!pair "friend_classes"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2413)
+ nil )
+ (!pair "members"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 2414)
+ (!options
+ (!option reorder string "resort_type_member_vec")
+ )
+ )
+ (!pair "key_method"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2415)
+ nil )
+ (!pair "decl_list"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2416)
+ nil )
+ (!pair "befriending_classes"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2417)
+ nil )
+ (!pair "objc_info"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2421)
+ nil )
+ (!pair "lambda_expr"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 2423)
+ nil )
+ )
+ nil 1028
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 540 nil gc_pointed_to "lang_type"
+ (!srcfileloc "c/c-lang.h" 42)
+ (!fields 4
+ (!pair "s"
+ (!type pointer 541 nil gc_used
+ (!type struct 542
+ (!type already_seen 541)
+ gc_pointed_to "sorted_fields_type"
+ (!srcfileloc "c/c-lang.h" 30)
+ (!fields 2
+ (!pair "len"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 28)
+ nil )
+ (!pair "elts"
+ (!type array 543 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "c/c-lang.h" 29)
+ (!options
+ (!option length string "%h.len")
+ )
+ )
+ )
+ nil 514 nil nil )
+ )
+ (!srcfileloc "c/c-lang.h" 34)
+ (!options
+ (!option reorder string "resort_sorted_fields")
+ )
+ )
+ (!pair "enum_min"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-lang.h" 36)
+ nil )
+ (!pair "enum_max"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-lang.h" 37)
+ nil )
+ (!pair "objc_info"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-lang.h" 41)
+ nil )
+ )
+ nil 514
+ (!type already_seen 521)
+ nil )
+
+ (!type struct 544 nil gc_pointed_to "lang_type"
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 36)
+ (!fields 2
+ (!pair "t1"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 36)
+ nil )
+ (!pair "t2"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 36)
+ nil )
+ )
+ nil 1
+ (!type already_seen 521)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "tree-core.h" 1740)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2093)
+ (!options
+ (!option tag string "TS_TYPE_WITH_LANG_SPECIFIC")
+ )
+ )
+ (!pair "type_non_common"
+ (!type struct 545 nil gc_used "tree_type_non_common"
+ (!srcfileloc "tree-core.h" 1749)
+ (!fields 5
+ (!pair "with_lang_specific"
+ (!type already_seen 519)
+ (!srcfileloc "tree-core.h" 1744)
+ nil )
+ (!pair "values"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1745)
+ nil )
+ (!pair "minval"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1746)
+ nil )
+ (!pair "maxval"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1747)
+ nil )
+ (!pair "lang_1"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1748)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2095)
+ (!options
+ (!option tag string "TS_TYPE_NON_COMMON")
+ )
+ )
+ (!pair "list"
+ (!type struct 546 nil gc_used "tree_list"
+ (!srcfileloc "tree-core.h" 1510)
+ (!fields 3
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1507)
+ nil )
+ (!pair "purpose"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1508)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1509)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2096)
+ (!options
+ (!option tag string "TS_LIST")
+ )
+ )
+ (!pair "vec"
+ (!type struct 547 nil gc_used "tree_vec"
+ (!srcfileloc "tree-core.h" 1515)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1513)
+ nil )
+ (!pair "a"
+ (!type array 548 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 1514)
+ (!options
+ (!option length string "TREE_VEC_LENGTH ((tree)&%h)")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2097)
+ (!options
+ (!option tag string "TS_VEC")
+ )
+ )
+ (!pair "exp"
+ (!type struct 549 nil gc_used "tree_exp"
+ (!srcfileloc "tree-core.h" 1581)
+ (!fields 3
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1578)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1579)
+ nil )
+ (!pair "operands"
+ (!type array 550 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 1580)
+ (!options
+ (!option length string "TREE_OPERAND_LENGTH ((tree)&%h)")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2098)
+ (!options
+ (!option tag string "TS_EXP")
+ )
+ )
+ (!pair "ssa_name"
+ (!type struct 551 nil gc_used "tree_ssa_name"
+ (!srcfileloc "tree-core.h" 1621)
+ (!fields 5
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1598)
+ nil )
+ (!pair "var"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1601)
+ nil )
+ (!pair "def_stmt"
+ (!type already_seen 282)
+ (!srcfileloc "tree-core.h" 1604)
+ nil )
+ (!pair "info"
+ (!type union 552 nil gc_used "ssa_name_info_type"
+ (!srcfileloc "tree-core.h" 1617)
+ (!fields 4
+ (!pair "irange_info"
+ (!type pointer 553 nil gc_used
+ (!type struct 554
+ (!type already_seen 553)
+ gc_pointed_to "irange_storage_slot"
+ (!srcfileloc "value-range-storage.h" 101)
+ (!fields 1
+ (!pair "m_ints"
+ (!type user_struct 555 nil gc_used "trailing_wide_ints<MAX_INTS>"
+ (!srcfileloc "value-range-storage.h" 100)
+ (!fields 1
+ (!pair "MAX_INTS"
+ (!type undefined 556 nil gc_unused "MAX_INTS"
+ (!srcfileloc "value-range-storage.h" 100)
+ )
+ (!srcfileloc "value-range-storage.h" 100)
+ nil )
+ )
+ )
+ (!srcfileloc "value-range-storage.h" 100)
+ nil )
+ )
+ (!options
+ (!option variable_size string "")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1609)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "frange_info"
+ (!type pointer 557 nil gc_used
+ (!type struct 558
+ (!type already_seen 557)
+ gc_pointed_to "frange_storage_slot"
+ (!srcfileloc "value-range-storage.h" 121)
+ (!fields 5
+ (!pair "m_kind"
+ (!type already_seen 2)
+ (!srcfileloc "value-range-storage.h" 116)
+ nil )
+ (!pair "m_min"
+ (!type already_seen 2)
+ (!srcfileloc "value-range-storage.h" 117)
+ nil )
+ (!pair "m_max"
+ (!type already_seen 2)
+ (!srcfileloc "value-range-storage.h" 118)
+ nil )
+ (!pair "m_pos_nan"
+ (!type already_seen 2)
+ (!srcfileloc "value-range-storage.h" 119)
+ nil )
+ (!pair "m_neg_nan"
+ (!type already_seen 2)
+ (!srcfileloc "value-range-storage.h" 120)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1611)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ (!pair "ptr_info"
+ (!type pointer 559 nil gc_used
+ (!type struct 560
+ (!type already_seen 559)
+ gc_pointed_to "ptr_info_def"
+ (!srcfileloc "tree-ssanames.h" 46)
+ (!fields 3
+ (!pair "pt"
+ (!type already_seen 386)
+ (!srcfileloc "tree-ssanames.h" 28)
+ nil )
+ (!pair "align"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssanames.h" 40)
+ nil )
+ (!pair "misalign"
+ (!type already_seen 2)
+ (!srcfileloc "tree-ssanames.h" 45)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 1613)
+ (!options
+ (!option tag string "2")
+ )
+ )
+ (!pair "range_info"
+ (!type already_seen 3)
+ (!srcfileloc "tree-core.h" 1616)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "tree-core.h" 1618)
+ (!options
+ (!option desc string "%1.typed.type ?(POINTER_TYPE_P (TREE_TYPE ((tree)&%1)) ? 2 : SCALAR_FLOAT_TYPE_P (TREE_TYPE ((tree)&%1))) : 3")
+ )
+ )
+ (!pair "imm_uses"
+ (!type struct 561
+ (!type pointer 562 nil gc_unused
+ (!type already_seen 561)
+ )
+ gc_used "ssa_use_operand_t"
+ (!srcfileloc "tree-ssa-operands.h" 30)
+ (!fields 4
+ (!pair "prev"
+ (!type already_seen 562)
+ (!srcfileloc "tree-core.h" 1586)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "next"
+ (!type already_seen 562)
+ (!srcfileloc "tree-core.h" 1587)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "loc"
+ (!type union 563 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-core.h:1593"
+ (!srcfileloc "tree-core.h" 1593)
+ (!fields 2
+ (!pair "stmt"
+ (!type already_seen 282)
+ (!srcfileloc "tree-core.h" 1593)
+ nil )
+ (!pair "ssa_name"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1593)
+ nil )
+ )
+ nil 4095 nil )
+ (!srcfileloc "tree-core.h" 1593)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "use"
+ (!type already_seen 24)
+ (!srcfileloc "tree-core.h" 1594)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1620)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2099)
+ (!options
+ (!option tag string "TS_SSA_NAME")
+ )
+ )
+ (!pair "block"
+ (!type struct 564 nil gc_used "tree_block"
+ (!srcfileloc "tree-core.h" 1680)
+ (!fields 13
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "tree-core.h" 1661)
+ nil )
+ (!pair "chain"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1662)
+ nil )
+ (!pair "block_num"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1664)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1666)
+ nil )
+ (!pair "end_locus"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1667)
+ nil )
+ (!pair "vars"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1669)
+ nil )
+ (!pair "nonlocalized_vars"
+ (!type already_seen 85)
+ (!srcfileloc "tree-core.h" 1670)
+ nil )
+ (!pair "subblocks"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1672)
+ nil )
+ (!pair "supercontext"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1673)
+ nil )
+ (!pair "abstract_origin"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1674)
+ nil )
+ (!pair "fragment_origin"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1675)
+ nil )
+ (!pair "fragment_chain"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1676)
+ nil )
+ (!pair "die"
+ (!type already_seen 486)
+ (!srcfileloc "tree-core.h" 1679)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2100)
+ (!options
+ (!option tag string "TS_BLOCK")
+ )
+ )
+ (!pair "binfo"
+ (!type struct 565 nil gc_used "tree_binfo"
+ (!srcfileloc "tree-core.h" 1765)
+ (!fields 10
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1752)
+ nil )
+ (!pair "offset"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1754)
+ nil )
+ (!pair "vtable"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1755)
+ nil )
+ (!pair "virtuals"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1756)
+ nil )
+ (!pair "vptr_field"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1757)
+ nil )
+ (!pair "base_accesses"
+ (!type already_seen 85)
+ (!srcfileloc "tree-core.h" 1758)
+ nil )
+ (!pair "inheritance"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1759)
+ nil )
+ (!pair "vtt_subvtt"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1761)
+ nil )
+ (!pair "vtt_vptr"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1762)
+ nil )
+ (!pair "base_binfos"
+ (!type already_seen 86)
+ (!srcfileloc "tree-core.h" 1764)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2101)
+ (!options
+ (!option tag string "TS_BINFO")
+ )
+ )
+ (!pair "stmt_list"
+ (!type struct 566 nil gc_used "tree_statement_list"
+ (!srcfileloc "tree-core.h" 2023)
+ (!fields 3
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 2020)
+ nil )
+ (!pair "head"
+ (!type pointer 567 nil gc_used
+ (!type struct 568
+ (!type already_seen 567)
+ gc_pointed_to "tree_statement_list_node"
+ (!srcfileloc "tree-core.h" 2016)
+ (!fields 3
+ (!pair "prev"
+ (!type already_seen 567)
+ (!srcfileloc "tree-core.h" 2013)
+ nil )
+ (!pair "next"
+ (!type already_seen 567)
+ (!srcfileloc "tree-core.h" 2014)
+ nil )
+ (!pair "stmt"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 2015)
+ nil )
+ )
+ (!options
+ (!option chain_prev string "%h.prev")
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 2021)
+ nil )
+ (!pair "tail"
+ (!type already_seen 567)
+ (!srcfileloc "tree-core.h" 2022)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2102)
+ (!options
+ (!option tag string "TS_STATEMENT_LIST")
+ )
+ )
+ (!pair "constructor"
+ (!type struct 569 nil gc_used "tree_constructor"
+ (!srcfileloc "tree-core.h" 1529)
+ (!fields 2
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "tree-core.h" 1527)
+ nil )
+ (!pair "elts"
+ (!type pointer 570 nil gc_used
+ (!type user_struct 571
+ (!type already_seen 570)
+ gc_pointed_to "vec<constructor_elt,va_gc>"
+ (!srcfileloc "tree-core.h" 1528)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "tree-core.h" 1528)
+ nil )
+ (!pair "constructor_elt"
+ (!type struct 572 nil gc_used "constructor_elt"
+ (!srcfileloc "tree-core.h" 1528)
+ (!fields 2
+ (!pair "index"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1522)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1523)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 1528)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "tree-core.h" 1528)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2103)
+ (!options
+ (!option tag string "TS_CONSTRUCTOR")
+ )
+ )
+ (!pair "omp_clause"
+ (!type struct 573 nil gc_used "tree_omp_clause"
+ (!srcfileloc "tree-core.h" 1658)
+ (!fields 7
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "tree-core.h" 1632)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1633)
+ nil )
+ (!pair "code"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1634)
+ nil )
+ (!pair "subcode"
+ (!type union 574 nil gc_unused "omp_clause_subcode"
+ (!srcfileloc "tree-core.h" 1649)
+ (!fields 12
+ (!pair "default_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1636)
+ nil )
+ (!pair "schedule_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1637)
+ nil )
+ (!pair "depend_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1638)
+ nil )
+ (!pair "doacross_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1639)
+ nil )
+ (!pair "map_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1641)
+ nil )
+ (!pair "proc_bind_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1642)
+ nil )
+ (!pair "reduction_code"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1643)
+ nil )
+ (!pair "linear_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1644)
+ nil )
+ (!pair "if_modifier"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1645)
+ nil )
+ (!pair "defaultmap_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1646)
+ nil )
+ (!pair "bind_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1647)
+ nil )
+ (!pair "device_type_kind"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1648)
+ nil )
+ )
+ nil 4095 nil )
+ (!srcfileloc "tree-core.h" 1649)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "gimple_reduction_init"
+ (!type already_seen 282)
+ (!srcfileloc "tree-core.h" 1653)
+ nil )
+ (!pair "gimple_reduction_merge"
+ (!type already_seen 282)
+ (!srcfileloc "tree-core.h" 1654)
+ nil )
+ (!pair "ops"
+ (!type array 575 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 1657)
+ (!options
+ (!option length string "omp_clause_num_ops[OMP_CLAUSE_CODE ((tree)&%h)]")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2104)
+ (!options
+ (!option tag string "TS_OMP_CLAUSE")
+ )
+ )
+ (!pair "optimization"
+ (!type struct 576 nil gc_used "tree_optimization_option"
+ (!srcfileloc "tree-core.h" 2041)
+ (!fields 4
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "tree-core.h" 2029)
+ nil )
+ (!pair "opts"
+ (!type pointer 577 nil gc_used
+ (!type struct 578
+ (!type already_seen 577)
+ gc_pointed_to "cl_optimization"
+ (!fileloc "options.h" 8795)
+ (!fields 499
+ (!pair "x_str_align_functions"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8295)
+ nil )
+ (!pair "x_str_align_jumps"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8296)
+ nil )
+ (!pair "x_str_align_labels"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8297)
+ nil )
+ (!pair "x_str_align_loops"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8298)
+ nil )
+ (!pair "x_flag_patchable_function_entry"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8299)
+ nil )
+ (!pair "x_param_align_loop_iterations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8300)
+ nil )
+ (!pair "x_param_align_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8301)
+ nil )
+ (!pair "x_param_asan_protect_allocas"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8302)
+ nil )
+ (!pair "x_param_asan_instrument_reads"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8303)
+ nil )
+ (!pair "x_param_asan_instrument_writes"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8304)
+ nil )
+ (!pair "x_param_asan_instrumentation_with_call_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8305)
+ nil )
+ (!pair "x_param_asan_kernel_mem_intrinsic_prefix"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8306)
+ nil )
+ (!pair "x_param_asan_memintrin"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8307)
+ nil )
+ (!pair "x_param_asan_stack"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8308)
+ nil )
+ (!pair "x_param_asan_use_after_return"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8309)
+ nil )
+ (!pair "x_param_avg_loop_niter"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8310)
+ nil )
+ (!pair "x_param_avoid_fma_max_bits"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8311)
+ nil )
+ (!pair "x_param_builtin_expect_probability"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8312)
+ nil )
+ (!pair "x_param_builtin_string_cmp_inline_length"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8313)
+ nil )
+ (!pair "x_param_case_values_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8314)
+ nil )
+ (!pair "x_param_comdat_sharing_probability"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8315)
+ nil )
+ (!pair "x_param_construct_interfere_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8316)
+ nil )
+ (!pair "x_param_destruct_interfere_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8317)
+ nil )
+ (!pair "x_param_dse_max_alias_queries_per_store"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8318)
+ nil )
+ (!pair "x_param_dse_max_object_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8319)
+ nil )
+ (!pair "x_param_early_inlining_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8320)
+ nil )
+ (!pair "x_param_evrp_sparse_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8321)
+ nil )
+ (!pair "x_param_evrp_switch_limit"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8322)
+ nil )
+ (!pair "x_param_fsm_scale_path_stmts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8323)
+ nil )
+ (!pair "x_param_gcse_after_reload_critical_fraction"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8324)
+ nil )
+ (!pair "x_param_gcse_after_reload_partial_fraction"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8325)
+ nil )
+ (!pair "x_param_gcse_cost_distance_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8326)
+ nil )
+ (!pair "x_param_gcse_unrestricted_cost"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8327)
+ nil )
+ (!pair "x_param_graphite_max_arrays_per_scop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8328)
+ nil )
+ (!pair "x_param_graphite_max_nb_scop_params"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8329)
+ nil )
+ (!pair "x_param_hwasan_instrument_allocas"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8330)
+ nil )
+ (!pair "x_param_hwasan_instrument_mem_intrinsics"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8331)
+ nil )
+ (!pair "x_param_hwasan_instrument_reads"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8332)
+ nil )
+ (!pair "x_param_hwasan_instrument_stack"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8333)
+ nil )
+ (!pair "x_param_hwasan_instrument_writes"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8334)
+ nil )
+ (!pair "x_param_hwasan_random_frame_tag"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8335)
+ nil )
+ (!pair "x_param_inline_heuristics_hint_percent"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8336)
+ nil )
+ (!pair "x_param_inline_min_speedup"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8337)
+ nil )
+ (!pair "x_param_inline_unit_growth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8338)
+ nil )
+ (!pair "x_param_ipa_cp_eval_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8339)
+ nil )
+ (!pair "x_param_ipa_cp_large_unit_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8340)
+ nil )
+ (!pair "x_param_ipa_cp_loop_hint_bonus"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8341)
+ nil )
+ (!pair "x_param_ipa_cp_max_recursive_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8342)
+ nil )
+ (!pair "x_param_ipa_cp_min_recursive_probability"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8343)
+ nil )
+ (!pair "x_param_ipa_cp_profile_count_base"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8344)
+ nil )
+ (!pair "x_param_ipa_cp_recursion_penalty"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8345)
+ nil )
+ (!pair "x_param_ipa_cp_recursive_freq_factor"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8346)
+ nil )
+ (!pair "x_param_ipa_cp_single_call_penalty"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8347)
+ nil )
+ (!pair "x_param_ipa_cp_unit_growth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8348)
+ nil )
+ (!pair "x_param_ipa_cp_value_list_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8349)
+ nil )
+ (!pair "x_param_ipa_jump_function_lookups"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8350)
+ nil )
+ (!pair "x_param_ipa_max_aa_steps"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8351)
+ nil )
+ (!pair "x_param_ipa_max_agg_items"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8352)
+ nil )
+ (!pair "x_param_ipa_max_loop_predicates"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8353)
+ nil )
+ (!pair "x_param_ipa_max_param_expr_ops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8354)
+ nil )
+ (!pair "x_param_ipa_max_switch_predicate_bounds"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8355)
+ nil )
+ (!pair "x_param_ipa_sra_deref_prob_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8356)
+ nil )
+ (!pair "x_param_ipa_sra_max_replacements"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8357)
+ nil )
+ (!pair "x_param_ipa_sra_ptr_growth_factor"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8358)
+ nil )
+ (!pair "x_param_ipa_sra_ptrwrap_growth_factor"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8359)
+ nil )
+ (!pair "x_param_ira_consider_dup_in_all_alts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8360)
+ nil )
+ (!pair "x_param_ira_loop_reserved_regs"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8361)
+ nil )
+ (!pair "x_param_ira_max_conflict_table_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8362)
+ nil )
+ (!pair "x_param_ira_max_loops_num"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8363)
+ nil )
+ (!pair "x_param_ira_simple_lra_insn_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8364)
+ nil )
+ (!pair "x_param_iv_always_prune_cand_set_bound"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8365)
+ nil )
+ (!pair "x_param_iv_consider_all_candidates_bound"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8366)
+ nil )
+ (!pair "x_param_iv_max_considered_uses"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8367)
+ nil )
+ (!pair "x_param_jump_table_max_growth_ratio_for_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8368)
+ nil )
+ (!pair "x_param_jump_table_max_growth_ratio_for_speed"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8369)
+ nil )
+ (!pair "x_param_l1_cache_line_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8370)
+ nil )
+ (!pair "x_param_l1_cache_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8371)
+ nil )
+ (!pair "x_param_l2_cache_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8372)
+ nil )
+ (!pair "x_param_large_function_growth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8373)
+ nil )
+ (!pair "x_param_large_function_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8374)
+ nil )
+ (!pair "x_param_stack_frame_growth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8375)
+ nil )
+ (!pair "x_param_large_stack_frame"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8376)
+ nil )
+ (!pair "x_param_large_unit_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8377)
+ nil )
+ (!pair "x_param_lim_expensive"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8378)
+ nil )
+ (!pair "x_param_loop_block_tile_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8379)
+ nil )
+ (!pair "x_param_loop_interchange_max_num_stmts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8380)
+ nil )
+ (!pair "x_param_loop_interchange_stride_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8381)
+ nil )
+ (!pair "x_param_loop_invariant_max_bbs_in_loop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8382)
+ nil )
+ (!pair "x_param_loop_max_datarefs_for_datadeps"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8383)
+ nil )
+ (!pair "x_param_loop_versioning_max_inner_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8384)
+ nil )
+ (!pair "x_param_loop_versioning_max_outer_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8385)
+ nil )
+ (!pair "x_param_lra_inheritance_ebb_probability_cutoff"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8386)
+ nil )
+ (!pair "x_param_lra_max_considered_reload_pseudos"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8387)
+ nil )
+ (!pair "x_param_max_average_unrolled_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8388)
+ nil )
+ (!pair "x_param_max_combine_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8389)
+ nil )
+ (!pair "x_param_max_unroll_iterations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8390)
+ nil )
+ (!pair "x_param_max_completely_peel_times"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8391)
+ nil )
+ (!pair "x_param_max_completely_peeled_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8392)
+ nil )
+ (!pair "x_param_max_crossjump_edges"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8393)
+ nil )
+ (!pair "x_param_max_cse_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8394)
+ nil )
+ (!pair "x_param_max_cse_path_length"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8395)
+ nil )
+ (!pair "x_param_max_cselib_memory_locations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8396)
+ nil )
+ (!pair "x_param_max_debug_marker_count"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8397)
+ nil )
+ (!pair "x_param_max_delay_slot_insn_search"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8398)
+ nil )
+ (!pair "x_param_max_delay_slot_live_search"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8399)
+ nil )
+ (!pair "x_param_max_dse_active_local_stores"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8400)
+ nil )
+ (!pair "x_param_early_inliner_max_iterations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8401)
+ nil )
+ (!pair "x_param_max_find_base_term_values"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8402)
+ nil )
+ (!pair "x_param_max_fsm_thread_path_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8403)
+ nil )
+ (!pair "x_param_max_gcse_insertion_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8404)
+ nil )
+ (!pair "x_param_max_gcse_memory"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8405)
+ nil )
+ (!pair "x_param_max_goto_duplication_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8406)
+ nil )
+ (!pair "x_param_max_grow_copy_bb_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8407)
+ nil )
+ (!pair "x_param_max_hoist_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8408)
+ nil )
+ (!pair "x_param_inline_functions_called_once_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8409)
+ nil )
+ (!pair "x_param_inline_functions_called_once_loop_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8410)
+ nil )
+ (!pair "x_param_max_inline_insns_auto"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8411)
+ nil )
+ (!pair "x_param_max_inline_insns_recursive_auto"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8412)
+ nil )
+ (!pair "x_param_max_inline_insns_recursive"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8413)
+ nil )
+ (!pair "x_param_max_inline_insns_single"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8414)
+ nil )
+ (!pair "x_param_max_inline_insns_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8415)
+ nil )
+ (!pair "x_param_max_inline_insns_small"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8416)
+ nil )
+ (!pair "x_param_max_inline_recursive_depth_auto"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8417)
+ nil )
+ (!pair "x_param_max_inline_recursive_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8418)
+ nil )
+ (!pair "x_param_max_isl_operations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8419)
+ nil )
+ (!pair "x_param_max_iterations_computation_cost"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8420)
+ nil )
+ (!pair "x_param_max_iterations_to_track"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8421)
+ nil )
+ (!pair "x_param_max_jump_thread_duplication_stmts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8422)
+ nil )
+ (!pair "x_param_max_jump_thread_paths"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8423)
+ nil )
+ (!pair "x_param_max_last_value_rtl"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8424)
+ nil )
+ (!pair "x_param_max_loop_header_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8425)
+ nil )
+ (!pair "x_param_max_modulo_backtrack_attempts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8426)
+ nil )
+ (!pair "x_param_max_partial_antic_length"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8427)
+ nil )
+ (!pair "x_param_max_peel_branches"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8428)
+ nil )
+ (!pair "x_param_max_peel_times"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8429)
+ nil )
+ (!pair "x_param_max_peeled_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8430)
+ nil )
+ (!pair "x_param_max_pending_list_length"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8431)
+ nil )
+ (!pair "x_param_max_pipeline_region_blocks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8432)
+ nil )
+ (!pair "x_param_max_pipeline_region_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8433)
+ nil )
+ (!pair "x_param_max_pow_sqrt_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8434)
+ nil )
+ (!pair "x_param_max_predicted_iterations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8435)
+ nil )
+ (!pair "x_param_max_reload_search_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8436)
+ nil )
+ (!pair "x_param_max_rtl_if_conversion_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8437)
+ nil )
+ (!pair "x_param_max_rtl_if_conversion_predictable_cost"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8438)
+ nil )
+ (!pair "x_param_max_rtl_if_conversion_unpredictable_cost"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8439)
+ nil )
+ (!pair "x_param_max_sched_extend_regions_iters"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8440)
+ nil )
+ (!pair "x_param_max_sched_insn_conflict_delay"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8441)
+ nil )
+ (!pair "x_param_max_sched_ready_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8442)
+ nil )
+ (!pair "x_param_max_sched_region_blocks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8443)
+ nil )
+ (!pair "x_param_max_sched_region_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8444)
+ nil )
+ (!pair "x_param_max_slsr_candidate_scan"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8445)
+ nil )
+ (!pair "x_param_max_speculative_devirt_maydefs"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8446)
+ nil )
+ (!pair "x_param_max_stores_to_merge"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8447)
+ nil )
+ (!pair "x_param_max_stores_to_sink"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8448)
+ nil )
+ (!pair "x_param_max_tail_merge_comparisons"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8449)
+ nil )
+ (!pair "x_param_max_tail_merge_iterations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8450)
+ nil )
+ (!pair "x_param_max_tracked_strlens"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8451)
+ nil )
+ (!pair "x_param_max_tree_if_conversion_phi_args"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8452)
+ nil )
+ (!pair "x_param_max_unroll_times"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8453)
+ nil )
+ (!pair "x_param_max_unrolled_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8454)
+ nil )
+ (!pair "x_param_max_unswitch_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8455)
+ nil )
+ (!pair "x_param_max_unswitch_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8456)
+ nil )
+ (!pair "x_param_max_variable_expansions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8457)
+ nil )
+ (!pair "x_param_max_vartrack_expr_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8458)
+ nil )
+ (!pair "x_param_max_vartrack_reverse_op_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8459)
+ nil )
+ (!pair "x_param_max_vartrack_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8460)
+ nil )
+ (!pair "x_param_min_crossjump_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8461)
+ nil )
+ (!pair "x_param_min_inline_recursive_probability"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8462)
+ nil )
+ (!pair "x_param_min_insn_to_prefetch_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8463)
+ nil )
+ (!pair "x_param_min_loop_cond_split_prob"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8464)
+ nil )
+ (!pair "x_param_min_pagesize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8465)
+ nil )
+ (!pair "x_param_min_size_for_stack_sharing"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8466)
+ nil )
+ (!pair "x_param_min_spec_prob"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8467)
+ nil )
+ (!pair "x_param_min_vect_loop_bound"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8468)
+ nil )
+ (!pair "x_param_modref_max_accesses"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8469)
+ nil )
+ (!pair "x_param_modref_max_adjustments"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8470)
+ nil )
+ (!pair "x_param_modref_max_bases"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8471)
+ nil )
+ (!pair "x_param_modref_max_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8472)
+ nil )
+ (!pair "x_param_modref_max_escape_points"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8473)
+ nil )
+ (!pair "x_param_modref_max_refs"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8474)
+ nil )
+ (!pair "x_param_modref_max_tests"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8475)
+ nil )
+ (!pair "x_param_parloops_chunk_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8476)
+ nil )
+ (!pair "x_param_parloops_min_per_thread"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8477)
+ nil )
+ (!pair "x_param_parloops_schedule"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8478)
+ nil )
+ (!pair "x_param_partial_inlining_entry_probability"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8479)
+ nil )
+ (!pair "x_param_predictable_branch_outcome"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8480)
+ nil )
+ (!pair "x_param_prefetch_dynamic_strides"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8481)
+ nil )
+ (!pair "x_param_prefetch_latency"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8482)
+ nil )
+ (!pair "x_param_prefetch_min_insn_to_mem_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8483)
+ nil )
+ (!pair "x_param_prefetch_minimum_stride"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8484)
+ nil )
+ (!pair "x_param_ranger_logical_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8485)
+ nil )
+ (!pair "x_param_ranger_recompute_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8486)
+ nil )
+ (!pair "x_param_relation_block_limit"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8487)
+ nil )
+ (!pair "x_param_rpo_vn_max_loop_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8488)
+ nil )
+ (!pair "x_param_sccvn_max_alias_queries_per_access"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8489)
+ nil )
+ (!pair "x_param_scev_max_expr_complexity"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8490)
+ nil )
+ (!pair "x_param_scev_max_expr_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8491)
+ nil )
+ (!pair "x_param_sched_autopref_queue_depth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8492)
+ nil )
+ (!pair "x_param_sched_mem_true_dep_cost"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8493)
+ nil )
+ (!pair "x_param_sched_pressure_algorithm"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8494)
+ nil )
+ (!pair "x_param_sched_spec_prob_cutoff"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8495)
+ nil )
+ (!pair "x_param_sched_state_edge_prob_cutoff"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8496)
+ nil )
+ (!pair "x_param_selsched_insns_to_rename"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8497)
+ nil )
+ (!pair "x_param_selsched_max_lookahead"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8498)
+ nil )
+ (!pair "x_param_selsched_max_sched_times"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8499)
+ nil )
+ (!pair "x_param_simultaneous_prefetches"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8500)
+ nil )
+ (!pair "x_param_sink_frequency_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8501)
+ nil )
+ (!pair "x_param_sms_dfa_history"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8502)
+ nil )
+ (!pair "x_param_sms_loop_average_count_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8503)
+ nil )
+ (!pair "x_param_sms_max_ii_factor"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8504)
+ nil )
+ (!pair "x_param_sms_min_sc"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8505)
+ nil )
+ (!pair "x_param_sra_max_propagations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8506)
+ nil )
+ (!pair "x_param_sra_max_scalarization_size_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8507)
+ nil )
+ (!pair "x_param_sra_max_scalarization_size_speed"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8508)
+ nil )
+ (!pair "x_param_ssa_name_def_chain_limit"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8509)
+ nil )
+ (!pair "x_param_ssp_buffer_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8510)
+ nil )
+ (!pair "x_param_stack_clash_protection_guard_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8511)
+ nil )
+ (!pair "x_param_stack_clash_protection_probe_interval"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8512)
+ nil )
+ (!pair "x_param_store_merging_allow_unaligned"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8513)
+ nil )
+ (!pair "x_param_store_merging_max_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8514)
+ nil )
+ (!pair "x_param_switch_conversion_branch_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8515)
+ nil )
+ (!pair "x_param_tm_max_aggregate_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8516)
+ nil )
+ (!pair "x_param_tracer_dynamic_coverage_feedback"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8517)
+ nil )
+ (!pair "x_param_tracer_dynamic_coverage"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8518)
+ nil )
+ (!pair "x_param_tracer_max_code_growth"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8519)
+ nil )
+ (!pair "x_param_tracer_min_branch_probability_feedback"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8520)
+ nil )
+ (!pair "x_param_tracer_min_branch_probability"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8521)
+ nil )
+ (!pair "x_param_tracer_min_branch_ratio"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8522)
+ nil )
+ (!pair "x_param_tree_reassoc_width"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8523)
+ nil )
+ (!pair "x_param_uninit_control_dep_attempts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8524)
+ nil )
+ (!pair "x_param_uninlined_function_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8525)
+ nil )
+ (!pair "x_param_uninlined_function_time"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8526)
+ nil )
+ (!pair "x_param_uninlined_function_thunk_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8527)
+ nil )
+ (!pair "x_param_uninlined_function_thunk_time"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8528)
+ nil )
+ (!pair "x_param_unlikely_bb_count_fraction"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8529)
+ nil )
+ (!pair "x_param_unroll_jam_max_unroll"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8530)
+ nil )
+ (!pair "x_param_unroll_jam_min_percent"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8531)
+ nil )
+ (!pair "x_param_use_after_scope_direct_emission_threshold"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8532)
+ nil )
+ (!pair "x_param_vect_epilogues_nomask"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8533)
+ nil )
+ (!pair "x_param_vect_induction_float"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8534)
+ nil )
+ (!pair "x_param_vect_inner_loop_cost_factor"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8535)
+ nil )
+ (!pair "x_param_vect_max_layout_candidates"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8536)
+ nil )
+ (!pair "x_param_vect_max_peeling_for_alignment"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8537)
+ nil )
+ (!pair "x_param_vect_max_version_for_alias_checks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8538)
+ nil )
+ (!pair "x_param_vect_max_version_for_alignment_checks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8539)
+ nil )
+ (!pair "x_param_vect_partial_vector_usage"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8540)
+ nil )
+ (!pair "x_flag_openmp_target_simd_clone"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8541)
+ nil )
+ (!pair "x_flag_sched_stalled_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8542)
+ nil )
+ (!pair "x_flag_sched_stalled_insns_dep"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8543)
+ nil )
+ (!pair "x_flag_tree_parallelize_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8544)
+ nil )
+ (!pair "x_param_ranger_debug"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8545)
+ nil )
+ (!pair "x_param_threader_debug"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8546)
+ nil )
+ (!pair "x_flag_excess_precision"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8547)
+ nil )
+ (!pair "x_flag_fp_contract_mode"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8548)
+ nil )
+ (!pair "x_flag_ira_algorithm"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8549)
+ nil )
+ (!pair "x_flag_ira_region"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8550)
+ nil )
+ (!pair "x_flag_live_patching"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8551)
+ nil )
+ (!pair "x_flag_reorder_blocks_algorithm"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8552)
+ nil )
+ (!pair "x_flag_simd_cost_model"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8553)
+ nil )
+ (!pair "x_flag_stack_reuse"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8554)
+ nil )
+ (!pair "x_flag_auto_var_init"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8555)
+ nil )
+ (!pair "x_flag_vect_cost_model"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8556)
+ nil )
+ (!pair "x_optimize"
+ (!type already_seen 8)
+ (!fileloc "options.h" 8557)
+ nil )
+ (!pair "x_optimize_size"
+ (!type already_seen 8)
+ (!fileloc "options.h" 8558)
+ nil )
+ (!pair "x_optimize_debug"
+ (!type already_seen 8)
+ (!fileloc "options.h" 8559)
+ nil )
+ (!pair "x_optimize_fast"
+ (!type already_seen 8)
+ (!fileloc "options.h" 8560)
+ nil )
+ (!pair "x_warn_inline"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8561)
+ nil )
+ (!pair "x_flag_aggressive_loop_optimizations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8562)
+ nil )
+ (!pair "x_flag_align_functions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8563)
+ nil )
+ (!pair "x_flag_align_jumps"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8564)
+ nil )
+ (!pair "x_flag_align_labels"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8565)
+ nil )
+ (!pair "x_flag_align_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8566)
+ nil )
+ (!pair "x_flag_allocation_dce"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8567)
+ nil )
+ (!pair "x_flag_store_data_races"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8568)
+ nil )
+ (!pair "x_flag_associative_math"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8569)
+ nil )
+ (!pair "x_flag_asynchronous_unwind_tables"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8570)
+ nil )
+ (!pair "x_flag_auto_inc_dec"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8571)
+ nil )
+ (!pair "x_flag_bit_tests"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8572)
+ nil )
+ (!pair "x_flag_branch_on_count_reg"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8573)
+ nil )
+ (!pair "x_flag_branch_probabilities"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8574)
+ nil )
+ (!pair "x_flag_caller_saves"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8575)
+ nil )
+ (!pair "x_flag_code_hoisting"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8576)
+ nil )
+ (!pair "x_flag_combine_stack_adjustments"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8577)
+ nil )
+ (!pair "x_flag_compare_elim_after_reload"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8578)
+ nil )
+ (!pair "x_flag_conserve_stack"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8579)
+ nil )
+ (!pair "x_flag_cprop_registers"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8580)
+ nil )
+ (!pair "x_flag_crossjumping"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8581)
+ nil )
+ (!pair "x_flag_cse_follow_jumps"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8582)
+ nil )
+ (!pair "x_flag_cx_fortran_rules"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8583)
+ nil )
+ (!pair "x_flag_cx_limited_range"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8584)
+ nil )
+ (!pair "x_flag_dce"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8585)
+ nil )
+ (!pair "x_flag_defer_pop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8586)
+ nil )
+ (!pair "x_flag_delayed_branch"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8587)
+ nil )
+ (!pair "x_flag_delete_dead_exceptions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8588)
+ nil )
+ (!pair "x_flag_delete_null_pointer_checks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8589)
+ nil )
+ (!pair "x_flag_devirtualize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8590)
+ nil )
+ (!pair "x_flag_devirtualize_speculatively"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8591)
+ nil )
+ (!pair "x_flag_dse"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8592)
+ nil )
+ (!pair "x_flag_early_inlining"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8593)
+ nil )
+ (!pair "x_flag_exceptions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8594)
+ nil )
+ (!pair "x_flag_expensive_optimizations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8595)
+ nil )
+ (!pair "x_flag_finite_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8596)
+ nil )
+ (!pair "x_flag_finite_math_only"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8597)
+ nil )
+ (!pair "x_flag_float_store"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8598)
+ nil )
+ (!pair "x_flag_fold_simple_inlines"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8599)
+ nil )
+ (!pair "x_flag_forward_propagate"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8600)
+ nil )
+ (!pair "x_flag_fp_int_builtin_inexact"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8601)
+ nil )
+ (!pair "x_flag_no_function_cse"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8602)
+ nil )
+ (!pair "x_flag_gcse"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8603)
+ nil )
+ (!pair "x_flag_gcse_after_reload"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8604)
+ nil )
+ (!pair "x_flag_gcse_las"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8605)
+ nil )
+ (!pair "x_flag_gcse_lm"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8606)
+ nil )
+ (!pair "x_flag_gcse_sm"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8607)
+ nil )
+ (!pair "x_flag_graphite"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8608)
+ nil )
+ (!pair "x_flag_graphite_identity"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8609)
+ nil )
+ (!pair "x_flag_guess_branch_prob"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8610)
+ nil )
+ (!pair "x_flag_harden_compares"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8611)
+ nil )
+ (!pair "x_flag_harden_conditional_branches"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8612)
+ nil )
+ (!pair "x_flag_hoist_adjacent_loads"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8613)
+ nil )
+ (!pair "x_flag_if_conversion"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8614)
+ nil )
+ (!pair "x_flag_if_conversion2"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8615)
+ nil )
+ (!pair "x_flag_indirect_inlining"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8616)
+ nil )
+ (!pair "x_flag_no_inline"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8617)
+ nil )
+ (!pair "x_flag_inline_atomics"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8618)
+ nil )
+ (!pair "x_flag_inline_functions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8619)
+ nil )
+ (!pair "x_flag_inline_functions_called_once"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8620)
+ nil )
+ (!pair "x_flag_inline_small_functions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8621)
+ nil )
+ (!pair "x_flag_ipa_bit_cp"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8622)
+ nil )
+ (!pair "x_flag_ipa_cp"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8623)
+ nil )
+ (!pair "x_flag_ipa_cp_clone"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8624)
+ nil )
+ (!pair "x_flag_ipa_icf"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8625)
+ nil )
+ (!pair "x_flag_ipa_icf_functions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8626)
+ nil )
+ (!pair "x_flag_ipa_icf_variables"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8627)
+ nil )
+ (!pair "x_flag_ipa_modref"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8628)
+ nil )
+ (!pair "x_flag_ipa_profile"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8629)
+ nil )
+ (!pair "x_flag_ipa_pta"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8630)
+ nil )
+ (!pair "x_flag_ipa_pure_const"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8631)
+ nil )
+ (!pair "x_flag_ipa_ra"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8632)
+ nil )
+ (!pair "x_flag_ipa_reference"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8633)
+ nil )
+ (!pair "x_flag_ipa_reference_addressable"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8634)
+ nil )
+ (!pair "x_flag_ipa_sra"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8635)
+ nil )
+ (!pair "x_flag_ipa_stack_alignment"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8636)
+ nil )
+ (!pair "x_flag_ipa_strict_aliasing"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8637)
+ nil )
+ (!pair "x_flag_ipa_vrp"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8638)
+ nil )
+ (!pair "x_flag_ira_hoist_pressure"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8639)
+ nil )
+ (!pair "x_flag_ira_loop_pressure"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8640)
+ nil )
+ (!pair "x_flag_ira_share_save_slots"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8641)
+ nil )
+ (!pair "x_flag_ira_share_spill_slots"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8642)
+ nil )
+ (!pair "x_flag_isolate_erroneous_paths_attribute"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8643)
+ nil )
+ (!pair "x_flag_isolate_erroneous_paths_dereference"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8644)
+ nil )
+ (!pair "x_flag_ivopts"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8645)
+ nil )
+ (!pair "x_flag_jump_tables"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8646)
+ nil )
+ (!pair "x_flag_keep_gc_roots_live"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8647)
+ nil )
+ (!pair "x_flag_lifetime_dse"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8648)
+ nil )
+ (!pair "x_flag_limit_function_alignment"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8649)
+ nil )
+ (!pair "x_flag_live_range_shrinkage"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8650)
+ nil )
+ (!pair "x_flag_loop_interchange"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8651)
+ nil )
+ (!pair "x_flag_loop_nest_optimize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8652)
+ nil )
+ (!pair "x_flag_loop_parallelize_all"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8653)
+ nil )
+ (!pair "x_flag_unroll_jam"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8654)
+ nil )
+ (!pair "x_flag_lra_remat"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8655)
+ nil )
+ (!pair "x_flag_errno_math"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8656)
+ nil )
+ (!pair "x_flag_modulo_sched"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8657)
+ nil )
+ (!pair "x_flag_modulo_sched_allow_regmoves"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8658)
+ nil )
+ (!pair "x_flag_move_loop_invariants"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8659)
+ nil )
+ (!pair "x_flag_move_loop_stores"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8660)
+ nil )
+ (!pair "x_flag_non_call_exceptions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8661)
+ nil )
+ (!pair "x_flag_nothrow_opt"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8662)
+ nil )
+ (!pair "x_flag_omit_frame_pointer"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8663)
+ nil )
+ (!pair "x_flag_opt_info"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8664)
+ nil )
+ (!pair "x_flag_optimize_sibling_calls"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8665)
+ nil )
+ (!pair "x_flag_optimize_strlen"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8666)
+ nil )
+ (!pair "x_flag_pack_struct"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8667)
+ nil )
+ (!pair "x_flag_partial_inlining"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8668)
+ nil )
+ (!pair "x_flag_peel_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8669)
+ nil )
+ (!pair "x_flag_no_peephole"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8670)
+ nil )
+ (!pair "x_flag_peephole2"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8671)
+ nil )
+ (!pair "x_flag_plt"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8672)
+ nil )
+ (!pair "x_flag_predictive_commoning"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8673)
+ nil )
+ (!pair "x_flag_prefetch_loop_arrays"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8674)
+ nil )
+ (!pair "x_flag_printf_return_value"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8675)
+ nil )
+ (!pair "x_flag_profile_partial_training"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8676)
+ nil )
+ (!pair "x_flag_profile_reorder_functions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8677)
+ nil )
+ (!pair "x_flag_reciprocal_math"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8678)
+ nil )
+ (!pair "x_flag_ree"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8679)
+ nil )
+ (!pair "x_flag_pcc_struct_return"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8680)
+ nil )
+ (!pair "x_flag_rename_registers"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8681)
+ nil )
+ (!pair "x_flag_reorder_blocks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8682)
+ nil )
+ (!pair "x_flag_reorder_blocks_and_partition"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8683)
+ nil )
+ (!pair "x_flag_reorder_functions"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8684)
+ nil )
+ (!pair "x_flag_rerun_cse_after_loop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8685)
+ nil )
+ (!pair "x_flag_resched_modulo_sched"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8686)
+ nil )
+ (!pair "x_flag_rounding_math"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8687)
+ nil )
+ (!pair "x_flag_rtti"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8688)
+ nil )
+ (!pair "x_flag_save_optimization_record"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8689)
+ nil )
+ (!pair "x_flag_sched_critical_path_heuristic"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8690)
+ nil )
+ (!pair "x_flag_sched_dep_count_heuristic"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8691)
+ nil )
+ (!pair "x_flag_sched_group_heuristic"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8692)
+ nil )
+ (!pair "x_flag_schedule_interblock"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8693)
+ nil )
+ (!pair "x_flag_sched_last_insn_heuristic"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8694)
+ nil )
+ (!pair "x_flag_sched_pressure"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8695)
+ nil )
+ (!pair "x_flag_sched_rank_heuristic"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8696)
+ nil )
+ (!pair "x_flag_schedule_speculative"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8697)
+ nil )
+ (!pair "x_flag_sched_spec_insn_heuristic"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8698)
+ nil )
+ (!pair "x_flag_schedule_speculative_load"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8699)
+ nil )
+ (!pair "x_flag_schedule_speculative_load_dangerous"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8700)
+ nil )
+ (!pair "x_flag_sched2_use_superblocks"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8701)
+ nil )
+ (!pair "x_flag_schedule_fusion"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8702)
+ nil )
+ (!pair "x_flag_schedule_insns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8703)
+ nil )
+ (!pair "x_flag_schedule_insns_after_reload"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8704)
+ nil )
+ (!pair "x_flag_section_anchors"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8705)
+ nil )
+ (!pair "x_flag_sel_sched_pipelining"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8706)
+ nil )
+ (!pair "x_flag_sel_sched_pipelining_outer_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8707)
+ nil )
+ (!pair "x_flag_sel_sched_reschedule_pipelined"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8708)
+ nil )
+ (!pair "x_flag_selective_scheduling"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8709)
+ nil )
+ (!pair "x_flag_selective_scheduling2"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8710)
+ nil )
+ (!pair "x_flag_semantic_interposition"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8711)
+ nil )
+ (!pair "x_flag_short_enums"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8712)
+ nil )
+ (!pair "x_flag_short_wchar"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8713)
+ nil )
+ (!pair "x_flag_shrink_wrap"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8714)
+ nil )
+ (!pair "x_flag_shrink_wrap_separate"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8715)
+ nil )
+ (!pair "x_flag_signaling_nans"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8716)
+ nil )
+ (!pair "x_flag_signed_zeros"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8717)
+ nil )
+ (!pair "x_flag_single_precision_constant"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8718)
+ nil )
+ (!pair "x_flag_split_ivs_in_unroller"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8719)
+ nil )
+ (!pair "x_flag_split_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8720)
+ nil )
+ (!pair "x_flag_split_paths"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8721)
+ nil )
+ (!pair "x_flag_split_wide_types"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8722)
+ nil )
+ (!pair "x_flag_split_wide_types_early"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8723)
+ nil )
+ (!pair "x_flag_ssa_backprop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8724)
+ nil )
+ (!pair "x_flag_ssa_phiopt"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8725)
+ nil )
+ (!pair "x_flag_stack_clash_protection"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8726)
+ nil )
+ (!pair "x_flag_stack_protect"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8727)
+ nil )
+ (!pair "x_flag_stdarg_opt"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8728)
+ nil )
+ (!pair "x_flag_store_merging"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8729)
+ nil )
+ (!pair "x_flag_strict_aliasing"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8730)
+ nil )
+ (!pair "x_flag_strict_enums"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8731)
+ nil )
+ (!pair "x_flag_strict_volatile_bitfields"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8732)
+ nil )
+ (!pair "x_flag_thread_jumps"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8733)
+ nil )
+ (!pair "x_flag_threadsafe_statics"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8734)
+ nil )
+ (!pair "x_flag_toplevel_reorder"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8735)
+ nil )
+ (!pair "x_flag_tracer"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8736)
+ nil )
+ (!pair "x_flag_trapping_math"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8737)
+ nil )
+ (!pair "x_flag_trapv"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8738)
+ nil )
+ (!pair "x_flag_tree_bit_ccp"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8739)
+ nil )
+ (!pair "x_flag_tree_builtin_call_dce"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8740)
+ nil )
+ (!pair "x_flag_tree_ccp"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8741)
+ nil )
+ (!pair "x_flag_tree_ch"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8742)
+ nil )
+ (!pair "x_flag_tree_coalesce_vars"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8743)
+ nil )
+ (!pair "x_flag_tree_copy_prop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8744)
+ nil )
+ (!pair "x_flag_tree_cselim"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8745)
+ nil )
+ (!pair "x_flag_tree_dce"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8746)
+ nil )
+ (!pair "x_flag_tree_dom"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8747)
+ nil )
+ (!pair "x_flag_tree_dse"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8748)
+ nil )
+ (!pair "x_flag_tree_forwprop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8749)
+ nil )
+ (!pair "x_flag_tree_fre"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8750)
+ nil )
+ (!pair "x_flag_tree_loop_distribute_patterns"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8751)
+ nil )
+ (!pair "x_flag_tree_loop_distribution"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8752)
+ nil )
+ (!pair "x_flag_tree_loop_if_convert"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8753)
+ nil )
+ (!pair "x_flag_tree_loop_im"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8754)
+ nil )
+ (!pair "x_flag_tree_loop_ivcanon"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8755)
+ nil )
+ (!pair "x_flag_tree_loop_optimize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8756)
+ nil )
+ (!pair "x_flag_tree_loop_vectorize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8757)
+ nil )
+ (!pair "x_flag_tree_live_range_split"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8758)
+ nil )
+ (!pair "x_flag_tree_partial_pre"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8759)
+ nil )
+ (!pair "x_flag_tree_phiprop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8760)
+ nil )
+ (!pair "x_flag_tree_pre"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8761)
+ nil )
+ (!pair "x_flag_tree_pta"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8762)
+ nil )
+ (!pair "x_flag_tree_reassoc"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8763)
+ nil )
+ (!pair "x_flag_tree_scev_cprop"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8764)
+ nil )
+ (!pair "x_flag_tree_sink"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8765)
+ nil )
+ (!pair "x_flag_tree_slp_vectorize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8766)
+ nil )
+ (!pair "x_flag_tree_slsr"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8767)
+ nil )
+ (!pair "x_flag_tree_sra"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8768)
+ nil )
+ (!pair "x_flag_tree_switch_conversion"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8769)
+ nil )
+ (!pair "x_flag_tree_tail_merge"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8770)
+ nil )
+ (!pair "x_flag_tree_ter"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8771)
+ nil )
+ (!pair "x_flag_tree_vectorize"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8772)
+ nil )
+ (!pair "x_flag_tree_vrp"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8773)
+ nil )
+ (!pair "x_flag_unconstrained_commons"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8774)
+ nil )
+ (!pair "x_flag_unreachable_traps"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8775)
+ nil )
+ (!pair "x_flag_unroll_all_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8776)
+ nil )
+ (!pair "x_flag_cunroll_grow_size"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8777)
+ nil )
+ (!pair "x_flag_unroll_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8778)
+ nil )
+ (!pair "x_flag_unsafe_math_optimizations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8779)
+ nil )
+ (!pair "x_flag_unswitch_loops"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8780)
+ nil )
+ (!pair "x_flag_unwind_tables"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8781)
+ nil )
+ (!pair "x_flag_var_tracking"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8782)
+ nil )
+ (!pair "x_flag_var_tracking_assignments"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8783)
+ nil )
+ (!pair "x_flag_var_tracking_assignments_toggle"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8784)
+ nil )
+ (!pair "x_flag_var_tracking_uninit"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8785)
+ nil )
+ (!pair "x_flag_variable_expansion_in_unroller"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8786)
+ nil )
+ (!pair "x_flag_version_loops_for_strides"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8787)
+ nil )
+ (!pair "x_flag_value_profile_transformations"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8788)
+ nil )
+ (!pair "x_flag_web"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8789)
+ nil )
+ (!pair "x_flag_wrapv"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8790)
+ nil )
+ (!pair "x_flag_wrapv_pointer"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8791)
+ nil )
+ (!pair "x_debug_nonbind_markers_p"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8792)
+ nil )
+ (!pair "explicit_mask"
+ (!type array 579 nil gc_used "8"
+ (!type already_seen 2)
+ )
+ (!fileloc "options.h" 8794)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 2032)
+ nil )
+ (!pair "optabs"
+ (!type already_seen 3)
+ (!srcfileloc "tree-core.h" 2036)
+ (!options
+ (!option atomic string "")
+ )
+ )
+ (!pair "base_optabs"
+ (!type pointer 580 nil gc_unused
+ (!type struct 581
+ (!type already_seen 580)
+ gc_unused "target_optabs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 2040)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2105)
+ (!options
+ (!option tag string "TS_OPTIMIZATION")
+ )
+ )
+ (!pair "target_option"
+ (!type struct 582 nil gc_used "tree_target_option"
+ (!srcfileloc "tree-core.h" 2057)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "tree-core.h" 2050)
+ nil )
+ (!pair "globals"
+ (!type pointer 583 nil gc_used
+ (!type struct 584
+ (!type already_seen 583)
+ gc_pointed_to "target_globals"
+ (!srcfileloc "target-globals.h" 64)
+ (!fields 17
+ (!pair "flag_state"
+ (!type pointer 585 nil gc_unused
+ (!type struct 586
+ (!type already_seen 585)
+ gc_unused "target_flag_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 47)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "regs"
+ (!type pointer 587 nil gc_unused
+ (!type struct 588
+ (!type already_seen 587)
+ gc_unused "target_regs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 48)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "rtl"
+ (!type pointer 589 nil gc_used
+ (!type struct 590
+ (!type already_seen 589)
+ gc_pointed_to "target_rtl"
+ (!srcfileloc "rtl.h" 3920)
+ (!fields 8
+ (!pair "x_global_rtl"
+ (!type array 591 nil gc_used "GR_MAX"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "rtl.h" 3892)
+ nil )
+ (!pair "x_pic_offset_table_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 3895)
+ nil )
+ (!pair "x_return_address_pointer_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 3900)
+ nil )
+ (!pair "x_initial_regno_reg_rtx"
+ (!type array 592 nil gc_used "FIRST_PSEUDO_REGISTER"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "rtl.h" 3906)
+ nil )
+ (!pair "x_top_of_stack"
+ (!type array 593 nil gc_used "MAX_MACHINE_MODE"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "rtl.h" 3909)
+ nil )
+ (!pair "x_static_reg_base_value"
+ (!type array 594 nil gc_used "FIRST_PSEUDO_REGISTER"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "rtl.h" 3913)
+ nil )
+ (!pair "x_mode_mem_attrs"
+ (!type array 595 nil gc_used "(int) MAX_MACHINE_MODE"
+ (!type already_seen 228)
+ )
+ (!srcfileloc "rtl.h" 3916)
+ nil )
+ (!pair "target_specific_initialized"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 3919)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 49)
+ nil )
+ (!pair "recog"
+ (!type pointer 596 nil gc_unused
+ (!type struct 597
+ (!type already_seen 596)
+ gc_unused "target_recog"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 50)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "hard_regs"
+ (!type pointer 598 nil gc_unused
+ (!type struct 599
+ (!type already_seen 598)
+ gc_unused "target_hard_regs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 51)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "function_abi_info"
+ (!type pointer 600 nil gc_unused
+ (!type struct 601
+ (!type already_seen 600)
+ gc_unused "target_function_abi_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 52)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "reload"
+ (!type pointer 602 nil gc_unused
+ (!type struct 603
+ (!type already_seen 602)
+ gc_unused "target_reload"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 53)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "expmed"
+ (!type pointer 604 nil gc_unused
+ (!type struct 605
+ (!type already_seen 604)
+ gc_unused "target_expmed"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 54)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "optabs"
+ (!type already_seen 580)
+ (!srcfileloc "target-globals.h" 55)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "libfuncs"
+ (!type pointer 606 nil gc_used
+ (!type struct 607
+ (!type already_seen 606)
+ gc_pointed_to "target_libfuncs"
+ (!srcfileloc "libfuncs.h" 62)
+ (!fields 2
+ (!pair "x_libfunc_table"
+ (!type array 608 nil gc_used "LTI_MAX"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "libfuncs.h" 58)
+ nil )
+ (!pair "x_libfunc_hash"
+ (!type pointer 609 nil gc_used
+ (!type user_struct 610
+ (!type already_seen 609)
+ gc_pointed_to "hash_table<libfunc_hasher>"
+ (!srcfileloc "libfuncs.h" 61)
+ (!fields 1
+ (!pair "libfunc_hasher"
+ (!type struct 611 nil gc_used "libfunc_hasher"
+ (!srcfileloc "libfuncs.h" 61)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "libfuncs.h" 61)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "libfuncs.h" 61)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 56)
+ nil )
+ (!pair "cfgloop"
+ (!type pointer 612 nil gc_unused
+ (!type struct 613
+ (!type already_seen 612)
+ gc_unused "target_cfgloop"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 57)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "ira"
+ (!type pointer 614 nil gc_unused
+ (!type struct 615
+ (!type already_seen 614)
+ gc_unused "target_ira"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 58)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "ira_int"
+ (!type pointer 616 nil gc_unused
+ (!type struct 617
+ (!type already_seen 616)
+ gc_unused "target_ira_int"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 59)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "builtins"
+ (!type pointer 618 nil gc_unused
+ (!type struct 619
+ (!type already_seen 618)
+ gc_unused "target_builtins"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 60)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "gcse"
+ (!type pointer 620 nil gc_unused
+ (!type struct 621
+ (!type already_seen 620)
+ gc_unused "target_gcse"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 61)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "bb_reorder"
+ (!type pointer 622 nil gc_unused
+ (!type struct 623
+ (!type already_seen 622)
+ gc_unused "target_bb_reorder"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 62)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "lower_subreg"
+ (!type pointer 624 nil gc_unused
+ (!type struct 625
+ (!type already_seen 624)
+ gc_unused "target_lower_subreg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "target-globals.h" 63)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 2053)
+ nil )
+ (!pair "opts"
+ (!type pointer 626 nil gc_used
+ (!type struct 627
+ (!type already_seen 626)
+ gc_pointed_to "cl_target_option"
+ (!srcfileloc "config/arm/arm.cc" 28501)
+ (!fields 16
+ (!pair "x_arm_stack_protector_guard_offset"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8800)
+ nil )
+ (!pair "x_aarch_enable_bti"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8801)
+ nil )
+ (!pair "x_arm_arch_string"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8802)
+ nil )
+ (!pair "x_arm_branch_protection_string"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8803)
+ nil )
+ (!pair "x_arm_cpu_string"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8804)
+ nil )
+ (!pair "x_arm_fpu_index"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8805)
+ nil )
+ (!pair "x_target_flags"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8806)
+ nil )
+ (!pair "x_arm_tune_string"
+ (!type already_seen 11)
+ (!fileloc "options.h" 8807)
+ nil )
+ (!pair "x_aarch_ra_sign_scope"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8808)
+ nil )
+ (!pair "x_aarch_ra_sign_key"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8809)
+ nil )
+ (!pair "x_inline_asm_unified"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8810)
+ nil )
+ (!pair "x_fix_aes_erratum_1742098"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8811)
+ nil )
+ (!pair "x_arm_restrict_it"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8812)
+ nil )
+ (!pair "x_unaligned_access"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8813)
+ nil )
+ (!pair "explicit_mask"
+ (!type array 628 nil gc_used "1"
+ (!type already_seen 2)
+ )
+ (!fileloc "options.h" 8815)
+ nil )
+ (!pair "explicit_mask_target_flags"
+ (!type already_seen 2)
+ (!fileloc "options.h" 8816)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "tree-core.h" 2056)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "tree-core.h" 2106)
+ (!options
+ (!option tag string "TS_TARGET_OPTION")
+ )
+ )
+ )
+ (!options
+ (!option variable_size string "")
+ (!option desc string "tree_node_structure (&%h)")
+ (!option ptr_alias type
+ (!type lang_struct 629 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 33)
+ (!fields 0 )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "0")
+ )
+ 4095
+ (!homotypes 10
+ (!type union 630 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "rust/rust-lang.cc" 93)
+ (!fields 2
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "rust/rust-lang.cc" 91)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ (!pair "identifier"
+ (!type lang_struct 631 nil gc_used "lang_identifier"
+ (!srcfileloc "d/d-tree.h" 351)
+ (!fields 0 )
+ nil 2552
+ (!homotypes 9
+ (!type struct 632 nil gc_used "lang_identifier"
+ (!srcfileloc "rust/rust-lang.cc" 81)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 50)
+ (!srcfileloc "rust/rust-lang.cc" 80)
+ nil )
+ )
+ nil 2048
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 633 nil gc_used "lang_identifier"
+ (!srcfileloc "m2/gm2-lang.cc" 91)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 50)
+ (!srcfileloc "m2/gm2-lang.cc" 90)
+ nil )
+ )
+ nil 256
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 634 nil gc_used "lang_identifier"
+ (!srcfileloc "lto/lto-tree.h" 27)
+ (!fields 1
+ (!pair "base"
+ (!type already_seen 50)
+ (!srcfileloc "lto/lto-tree.h" 26)
+ nil )
+ )
+ nil 128
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 635 nil gc_used "lang_identifier"
+ (!srcfileloc "jit/dummy-frontend.cc" 505)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 50)
+ (!srcfileloc "jit/dummy-frontend.cc" 504)
+ nil )
+ )
+ nil 64
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 636 nil gc_used "lang_identifier"
+ (!srcfileloc "go/go-lang.cc" 66)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 50)
+ (!srcfileloc "go/go-lang.cc" 65)
+ nil )
+ )
+ nil 32
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 637 nil gc_used "lang_identifier"
+ (!srcfileloc "fortran/f95-lang.cc" 48)
+ (!fields 1
+ (!pair "common"
+ (!type already_seen 50)
+ (!srcfileloc "fortran/f95-lang.cc" 47)
+ nil )
+ )
+ nil 16
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 638 nil gc_used "lang_identifier"
+ (!srcfileloc "d/d-tree.h" 221)
+ (!fields 5
+ (!pair "common"
+ (!type already_seen 50)
+ (!srcfileloc "d/d-tree.h" 210)
+ nil )
+ (!pair "pretty_ident"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 213)
+ nil )
+ (!pair "decl_tree"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 216)
+ nil )
+ (!pair "dsymbol"
+ (!type already_seen 62)
+ (!srcfileloc "d/d-tree.h" 219)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "daggregate"
+ (!type pointer 639 nil gc_unused
+ (!type struct 640
+ (!type already_seen 639)
+ gc_unused "AggregateDeclaration"
+ (!srcfileloc "d/d-tree.h" 220)
+ (!fields 0 )
+ nil 8 nil nil )
+ )
+ (!srcfileloc "d/d-tree.h" 220)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 8
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 641 nil gc_used "lang_identifier"
+ (!srcfileloc "cp/cp-tree.h" 683)
+ (!fields 2
+ (!pair "c_common"
+ (!type struct 642 nil gc_used "c_common_identifier"
+ (!srcfileloc "c-family/c-common.h" 392)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "c-family/c-common.h" 390)
+ nil )
+ (!pair "node"
+ (!type already_seen 12)
+ (!srcfileloc "c-family/c-common.h" 391)
+ nil )
+ )
+ nil 1542 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 681)
+ nil )
+ (!pair "bindings"
+ (!type already_seen 90)
+ (!srcfileloc "cp/cp-tree.h" 682)
+ nil )
+ )
+ nil 1028
+ (!type already_seen 631)
+ nil )
+
+ (!type struct 643 nil gc_used "lang_identifier"
+ (!srcfileloc "c/c-decl.cc" 230)
+ (!fields 4
+ (!pair "common_id"
+ (!type already_seen 642)
+ (!srcfileloc "c/c-decl.cc" 226)
+ nil )
+ (!pair "symbol_binding"
+ (!type pointer 644 nil gc_used
+ (!type struct 645
+ (!type already_seen 644)
+ gc_pointed_to "c_binding"
+ (!srcfileloc "c/c-decl.cc" 215)
+ (!fields 11
+ (!pair "u"
+ (!type union 646 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/c/c-decl.cc:201"
+ (!srcfileloc "c/c-decl.cc" 204)
+ (!fields 2
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 202)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "label"
+ (!type pointer 647 nil gc_used
+ (!type struct 648
+ (!type already_seen 647)
+ gc_pointed_to "c_label_vars"
+ (!srcfileloc "c/c-decl.cc" 394)
+ (!fields 4
+ (!pair "shadowed"
+ (!type already_seen 647)
+ (!srcfileloc "c/c-decl.cc" 382)
+ nil )
+ (!pair "label_bindings"
+ (!type struct 649 nil gc_used "c_spot_bindings"
+ (!srcfileloc "c/c-decl.cc" 361)
+ (!fields 4
+ (!pair "scope"
+ (!type pointer 650 nil gc_used
+ (!type struct 651
+ (!type already_seen 650)
+ gc_pointed_to "c_scope"
+ (!srcfileloc "c/c-decl.cc" 496)
+ (!fields 14
+ (!pair "outer"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 443)
+ nil )
+ (!pair "outer_function"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 446)
+ nil )
+ (!pair "bindings"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 449)
+ nil )
+ (!pair "blocks"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 453)
+ nil )
+ (!pair "blocks_last"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 454)
+ nil )
+ (!pair "depth"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 458)
+ nil )
+ (!pair "parm_flag"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 462)
+ nil )
+ (!pair "had_vla_unspec"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 466)
+ nil )
+ (!pair "warned_forward_parm_decls"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 471)
+ nil )
+ (!pair "function_body"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 477)
+ nil )
+ (!pair "keep"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 480)
+ nil )
+ (!pair "float_const_decimal64"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 483)
+ nil )
+ (!pair "has_label_bindings"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 488)
+ nil )
+ (!pair "has_jump_unsafe_decl"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 495)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.outer")
+ )
+ 514 nil nil )
+ )
+ (!srcfileloc "c/c-decl.cc" 346)
+ nil )
+ (!pair "bindings_in_scope"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 350)
+ nil )
+ (!pair "stmt_exprs"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 357)
+ nil )
+ (!pair "left_stmt_expr"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 360)
+ nil )
+ )
+ nil 514 nil nil )
+ (!srcfileloc "c/c-decl.cc" 384)
+ nil )
+ (!pair "decls_in_scope"
+ (!type already_seen 85)
+ (!srcfileloc "c/c-decl.cc" 389)
+ nil )
+ (!pair "gotos"
+ (!type pointer 652 nil gc_used
+ (!type user_struct 653
+ (!type already_seen 652)
+ gc_pointed_to "vec<c_goto_bindings_p,va_gc>"
+ (!srcfileloc "c/c-decl.cc" 393)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c/c-decl.cc" 393)
+ nil )
+ (!pair "c_goto_bindings_p"
+ (!type pointer 654 nil gc_used
+ (!type struct 655
+ (!type already_seen 654)
+ gc_pointed_to "c_goto_bindings"
+ (!srcfileloc "c/c-decl.cc" 372)
+ (!fields 2
+ (!pair "loc"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 369)
+ nil )
+ (!pair "goto_bindings"
+ (!type already_seen 649)
+ (!srcfileloc "c/c-decl.cc" 371)
+ nil )
+ )
+ nil 514 nil nil )
+ )
+ (!srcfileloc "c/c-decl.cc" 393)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "c/c-decl.cc" 393)
+ nil )
+ )
+ nil 514 nil nil )
+ )
+ (!srcfileloc "c/c-decl.cc" 203)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ nil 514 nil )
+ (!srcfileloc "c/c-decl.cc" 204)
+ (!options
+ (!option desc string "TREE_CODE (%0.decl) == LABEL_DECL")
+ )
+ )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 205)
+ nil )
+ (!pair "id"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 206)
+ nil )
+ (!pair "prev"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 207)
+ nil )
+ (!pair "shadowed"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 208)
+ nil )
+ (!pair "depth"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 209)
+ nil )
+ (!pair "invisible"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 210)
+ nil )
+ (!pair "nested"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 211)
+ nil )
+ (!pair "inner_comp"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 212)
+ nil )
+ (!pair "in_struct"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 213)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 214)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.prev")
+ )
+ 514 nil nil )
+ )
+ (!srcfileloc "c/c-decl.cc" 227)
+ nil )
+ (!pair "tag_binding"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 228)
+ nil )
+ (!pair "label_binding"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 229)
+ nil )
+ )
+ nil 514
+ (!type already_seen 631)
+ nil )
+ )
+ )
+ (!srcfileloc "rust/rust-lang.cc" 92)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE")
+ )
+ 2048
+ (!type already_seen 629)
+ )
+
+ (!type union 656 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "m2/gm2-lang.cc" 103)
+ (!fields 2
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "m2/gm2-lang.cc" 101)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "m2/gm2-lang.cc" 102)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE")
+ )
+ 256
+ (!type already_seen 629)
+ )
+
+ (!type union 657 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "lto/lto-tree.h" 54)
+ (!fields 1
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "lto/lto-tree.h" 53)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "TS_LTO_GENERIC")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_TYPE_COMMON) ? ((union lang_tree_node *) %h.generic.type_common.next_variant) : CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) %h.generic.common.chain) : NULL")
+ (!option desc string "lto_tree_node_structure (&%h)")
+ )
+ 128
+ (!type already_seen 629)
+ )
+
+ (!type union 658 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "jit/dummy-frontend.cc" 516)
+ (!fields 2
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "jit/dummy-frontend.cc" 514)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "jit/dummy-frontend.cc" 515)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE")
+ )
+ 64
+ (!type already_seen 629)
+ )
+
+ (!type union 659 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "go/go-lang.cc" 77)
+ (!fields 2
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "go/go-lang.cc" 75)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "go/go-lang.cc" 76)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE")
+ )
+ 32
+ (!type already_seen 629)
+ )
+
+ (!type union 660 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "fortran/f95-lang.cc" 58)
+ (!fields 2
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "fortran/f95-lang.cc" 56)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "fortran/f95-lang.cc" 57)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE")
+ )
+ 16
+ (!type already_seen 629)
+ )
+
+ (!type union 661 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "d/d-tree.h" 353)
+ (!fields 3
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "d/d-tree.h" 350)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "TS_D_GENERIC")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "d/d-tree.h" 351)
+ (!options
+ (!option tag string "TS_D_IDENTIFIER")
+ )
+ )
+ (!pair "frameinfo"
+ (!type struct 662 nil gc_used "tree_frame_info"
+ (!srcfileloc "d/d-tree.h" 352)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "d/d-tree.h" 187)
+ nil )
+ (!pair "frame_type"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-tree.h" 188)
+ nil )
+ )
+ nil 8 nil nil )
+ (!srcfileloc "d/d-tree.h" 352)
+ (!options
+ (!option tag string "TS_D_FRAMEINFO")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "d_tree_node_structure (&%h)")
+ )
+ 8
+ (!type already_seen 629)
+ )
+
+ (!type union 663 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "cp/cp-tree.h" 1822)
+ (!fields 17
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "cp/cp-tree.h" 1798)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "TS_CP_GENERIC")
+ )
+ )
+ (!pair "tpi"
+ (!type struct 664 nil gc_used "template_parm_index"
+ (!srcfileloc "cp/cp-tree.h" 704)
+ (!fields 5
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 699)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 700)
+ nil )
+ (!pair "level"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 701)
+ nil )
+ (!pair "orig_level"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 702)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 703)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1799)
+ (!options
+ (!option tag string "TS_CP_TPI")
+ )
+ )
+ (!pair "ptrmem"
+ (!type struct 665
+ (!type pointer 666 nil gc_unused
+ (!type already_seen 665)
+ )
+ gc_used "ptrmem_cst"
+ (!srcfileloc "cp/cp-tree.h" 710)
+ (!fields 3
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 707)
+ nil )
+ (!pair "member"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 708)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 709)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1800)
+ (!options
+ (!option tag string "TS_CP_PTRMEM")
+ )
+ )
+ (!pair "overload"
+ (!type struct 667 nil gc_used "tree_overload"
+ (!srcfileloc "cp/cp-tree.h" 825)
+ (!fields 2
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 823)
+ nil )
+ (!pair "function"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 824)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1801)
+ (!options
+ (!option tag string "TS_CP_OVERLOAD")
+ )
+ )
+ (!pair "binding_vec"
+ (!type struct 668 nil gc_used "tree_binding_vec"
+ (!srcfileloc "cp/name-lookup.h" 149)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "cp/name-lookup.h" 146)
+ nil )
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 147)
+ nil )
+ (!pair "vec"
+ (!type array 669 nil gc_used "1"
+ (!type struct 670 nil gc_used "binding_cluster"
+ (!srcfileloc "cp/name-lookup.h" 148)
+ (!fields 2
+ (!pair "indices"
+ (!type array 671 nil gc_unused "BINDING_VECTOR_SLOTS_PER_CLUSTER"
+ (!type struct 672 nil gc_unused "binding_index"
+ (!srcfileloc "cp/name-lookup.h" 129)
+ (!fields 0 )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/name-lookup.h" 129)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "slots"
+ (!type array 673 nil gc_used "BINDING_VECTOR_SLOTS_PER_CLUSTER"
+ (!type struct 674 nil gc_used "binding_slot"
+ (!srcfileloc "cp/name-lookup.h" 111)
+ (!fields 1
+ (!pair "u"
+ (!type union 675 nil gc_used "binding_slot_lazy"
+ (!srcfileloc "cp/name-lookup.h" 80)
+ (!fields 1
+ (!pair "binding"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 79)
+ (!options
+ (!option tag string "false")
+ )
+ )
+ )
+ (!options
+ (!option desc string "%1.is_lazy ()")
+ )
+ 1028 nil )
+ (!srcfileloc "cp/name-lookup.h" 80)
+ nil )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/name-lookup.h" 130)
+ nil )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/name-lookup.h" 148)
+ (!options
+ (!option length string "%h.base.u.dependence_info.base")
+ )
+ )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1802)
+ (!options
+ (!option tag string "TS_CP_BINDING_VECTOR")
+ )
+ )
+ (!pair "baselink"
+ (!type struct 676 nil gc_used "tree_baselink"
+ (!srcfileloc "cp/cp-tree.h" 1093)
+ (!fields 4
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 1089)
+ nil )
+ (!pair "binfo"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1090)
+ nil )
+ (!pair "functions"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1091)
+ nil )
+ (!pair "access_binfo"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1092)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1803)
+ (!options
+ (!option tag string "TS_CP_BASELINK")
+ )
+ )
+ (!pair "template_decl"
+ (!type struct 677 nil gc_used "tree_template_decl"
+ (!srcfileloc "cp/cp-tree.h" 1051)
+ (!fields 3
+ (!pair "common"
+ (!type already_seen 52)
+ (!srcfileloc "cp/cp-tree.h" 1048)
+ nil )
+ (!pair "arguments"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1049)
+ nil )
+ (!pair "result"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1050)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1804)
+ (!options
+ (!option tag string "TS_CP_TEMPLATE_DECL")
+ )
+ )
+ (!pair "deferred_parse"
+ (!type struct 678 nil gc_used "tree_deferred_parse"
+ (!srcfileloc "cp/cp-tree.h" 1321)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "cp/cp-tree.h" 1318)
+ nil )
+ (!pair "tokens"
+ (!type already_seen 71)
+ (!srcfileloc "cp/cp-tree.h" 1319)
+ nil )
+ (!pair "instantiations"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 1320)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1805)
+ (!options
+ (!option tag string "TS_CP_DEFERRED_PARSE")
+ )
+ )
+ (!pair "deferred_noexcept"
+ (!type struct 679 nil gc_used "tree_deferred_noexcept"
+ (!srcfileloc "cp/cp-tree.h" 1342)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "cp/cp-tree.h" 1339)
+ nil )
+ (!pair "pattern"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1340)
+ nil )
+ (!pair "args"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1341)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1806)
+ (!options
+ (!option tag string "TS_CP_DEFERRED_NOEXCEPT")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "cp/cp-tree.h" 1807)
+ (!options
+ (!option tag string "TS_CP_IDENTIFIER")
+ )
+ )
+ (!pair "static_assertion"
+ (!type struct 680 nil gc_used "tree_static_assert"
+ (!srcfileloc "cp/cp-tree.h" 1365)
+ (!fields 4
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 1361)
+ nil )
+ (!pair "condition"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1362)
+ nil )
+ (!pair "message"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1363)
+ nil )
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1364)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1809)
+ (!options
+ (!option tag string "TS_CP_STATIC_ASSERT")
+ )
+ )
+ (!pair "argument_pack_select"
+ (!type struct 681 nil gc_used "tree_argument_pack_select"
+ (!srcfileloc "cp/cp-tree.h" 1371)
+ (!fields 3
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 1368)
+ nil )
+ (!pair "argument_pack"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1369)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1370)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1811)
+ (!options
+ (!option tag string "TS_CP_ARGUMENT_PACK_SELECT")
+ )
+ )
+ (!pair "trait_expression"
+ (!type struct 682 nil gc_used "tree_trait_expr"
+ (!srcfileloc "cp/cp-tree.h" 1403)
+ (!fields 5
+ (!pair "common"
+ (!type already_seen 32)
+ (!srcfileloc "cp/cp-tree.h" 1398)
+ nil )
+ (!pair "type1"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1399)
+ nil )
+ (!pair "type2"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1400)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1401)
+ nil )
+ (!pair "kind"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1402)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1813)
+ (!options
+ (!option tag string "TS_CP_TRAIT_EXPR")
+ )
+ )
+ (!pair "lambda_expression"
+ (!type struct 683 nil gc_used "tree_lambda_expr"
+ (!srcfileloc "cp/cp-tree.h" 1531)
+ (!fields 10
+ (!pair "typed"
+ (!type already_seen 31)
+ (!srcfileloc "cp/cp-tree.h" 1521)
+ nil )
+ (!pair "capture_list"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1522)
+ nil )
+ (!pair "this_capture"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1523)
+ nil )
+ (!pair "extra_scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1524)
+ nil )
+ (!pair "regen_info"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1525)
+ nil )
+ (!pair "pending_proxies"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 1526)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1527)
+ nil )
+ (!pair "default_capture_mode"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1528)
+ nil )
+ (!pair "discriminator_scope"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1529)
+ nil )
+ (!pair "discriminator_sig"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1530)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1815)
+ (!options
+ (!option tag string "TS_CP_LAMBDA_EXPR")
+ )
+ )
+ (!pair "template_info"
+ (!type struct 684 nil gc_used "tree_template_info"
+ (!srcfileloc "cp/cp-tree.h" 1565)
+ (!fields 4
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "cp/cp-tree.h" 1561)
+ nil )
+ (!pair "tmpl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1562)
+ nil )
+ (!pair "args"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1563)
+ nil )
+ (!pair "deferred_access_checks"
+ (!type already_seen 78)
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1817)
+ (!options
+ (!option tag string "TS_CP_TEMPLATE_INFO")
+ )
+ )
+ (!pair "constraint_info"
+ (!type struct 685 nil gc_used "tree_constraint_info"
+ (!srcfileloc "cp/cp-tree.h" 1582)
+ (!fields 4
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "cp/cp-tree.h" 1578)
+ nil )
+ (!pair "template_reqs"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1579)
+ nil )
+ (!pair "declarator_reqs"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1580)
+ nil )
+ (!pair "associated_constr"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1581)
+ nil )
+ )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1819)
+ (!options
+ (!option tag string "TS_CP_CONSTRAINT_INFO")
+ )
+ )
+ (!pair "userdef_literal"
+ (!type struct 686 nil gc_used "tree_userdef_literal"
+ (!srcfileloc "c-family/c-common.h" 1375)
+ (!fields 5
+ (!pair "base"
+ (!type already_seen 25)
+ (!srcfileloc "c-family/c-common.h" 1370)
+ nil )
+ (!pair "suffix_id"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-common.h" 1371)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-common.h" 1372)
+ nil )
+ (!pair "num_string"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-common.h" 1373)
+ nil )
+ (!pair "overflow"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-common.h" 1374)
+ nil )
+ )
+ nil 1542 nil nil )
+ (!srcfileloc "cp/cp-tree.h" 1821)
+ (!options
+ (!option tag string "TS_CP_USERDEF_LITERAL")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "(union lang_tree_node *) c_tree_chain_next (&%h.generic)")
+ (!option desc string "cp_tree_node_structure (&%h)")
+ )
+ 1028
+ (!type already_seen 629)
+ )
+
+ (!type union 687 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "c/c-decl.cc" 335)
+ (!fields 2
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "c/c-decl.cc" 333)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ (!pair "identifier"
+ (!type already_seen 631)
+ (!srcfileloc "c/c-decl.cc" 334)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "(union lang_tree_node *) c_tree_chain_next (&%h.generic)")
+ (!option desc string "TREE_CODE (&%h.generic) == IDENTIFIER_NODE")
+ )
+ 514
+ (!type already_seen 629)
+ )
+
+ (!type union 688 nil gc_pointed_to "lang_tree_node"
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 33)
+ (!fields 1
+ (!pair "generic"
+ (!type already_seen 22)
+ (!srcfileloc "ada/gcc-interface/ada-tree.h" 32)
+ (!options
+ (!option desc string "tree_node_structure (&%h)")
+ (!option tag string "0")
+ )
+ )
+ )
+ (!options
+ (!option chain_next string "CODE_CONTAINS_STRUCT (TREE_CODE (&%h.generic), TS_COMMON) ? ((union lang_tree_node *) TREE_CHAIN (&%h.generic)) : NULL")
+ (!option desc string "0")
+ )
+ 1
+ (!type already_seen 629)
+ )
+ )
+ )
+ )
+ )
+ 4095 nil )
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL")
+ (!option tag string "false")
+ )
+ )
+ (!pair "next"
+ (!type already_seen 19)
+ (!srcfileloc "../libcpp/include/cpplib.h" 862)
+ (!options
+ (!option tag string "true")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 863)
+ (!options
+ (!option desc string "%1.kind == cmk_assert")
+ )
+ )
+ (!pair "line"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 866)
+ nil )
+ (!pair "count"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 870)
+ nil )
+ (!pair "paramc"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 873)
+ nil )
+ (!pair "lazy"
+ (!type already_seen 8)
+ (!srcfileloc "../libcpp/include/cpplib.h" 876)
+ nil )
+ (!pair "kind"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 879)
+ nil )
+ (!pair "fun_like"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 882)
+ nil )
+ (!pair "variadic"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 885)
+ nil )
+ (!pair "syshdr"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 888)
+ nil )
+ (!pair "used"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 891)
+ nil )
+ (!pair "extra_tokens"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 896)
+ nil )
+ (!pair "imported_p"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 899)
+ nil )
+ (!pair "exp"
+ (!type union 689 nil gc_used "cpp_exp_u"
+ (!srcfileloc "../libcpp/include/cpplib.h" 912)
+ (!fields 2
+ (!pair "tokens"
+ (!type array 690 nil gc_used "1"
+ (!type struct 691
+ (!type pointer 692 nil gc_used
+ (!type already_seen 691)
+ )
+ gc_pointed_to "cpp_token"
+ (!srcfileloc "../libcpp/include/cpplib.h" 279)
+ (!fields 4
+ (!pair "src_loc"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 253)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 255)
+ nil )
+ (!pair "flags"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 256)
+ nil )
+ (!pair "val"
+ (!type union 693 nil gc_used "cpp_token_u"
+ (!srcfileloc "../libcpp/include/cpplib.h" 278)
+ (!fields 6
+ (!pair "node"
+ (!type struct 694 nil gc_used "cpp_identifier"
+ (!srcfileloc "../libcpp/include/cpplib.h" 246)
+ (!fields 2
+ (!pair "node"
+ (!type already_seen 13)
+ (!srcfileloc "../libcpp/include/cpplib.h" 239)
+ (!options
+ (!option nested_ptr nested
+ (!type already_seen 22)
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL")
+ )
+ )
+ (!pair "spelling"
+ (!type already_seen 13)
+ (!srcfileloc "../libcpp/include/cpplib.h" 245)
+ (!options
+ (!option nested_ptr nested
+ (!type already_seen 22)
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 261)
+ (!options
+ (!option tag string "CPP_TOKEN_FLD_NODE")
+ )
+ )
+ (!pair "source"
+ (!type already_seen 692)
+ (!srcfileloc "../libcpp/include/cpplib.h" 264)
+ (!options
+ (!option tag string "CPP_TOKEN_FLD_SOURCE")
+ )
+ )
+ (!pair "str"
+ (!type struct 695 nil gc_used "cpp_string"
+ (!srcfileloc "../libcpp/include/cpplib.h" 187)
+ (!fields 2
+ (!pair "len"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 181)
+ nil )
+ (!pair "text"
+ (!type already_seen 11)
+ (!srcfileloc "../libcpp/include/cpplib.h" 186)
+ (!options
+ (!option string_length string "1 + %h.len")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 267)
+ (!options
+ (!option tag string "CPP_TOKEN_FLD_STR")
+ )
+ )
+ (!pair "macro_arg"
+ (!type struct 696 nil gc_used "cpp_macro_arg"
+ (!srcfileloc "../libcpp/include/cpplib.h" 230)
+ (!fields 2
+ (!pair "arg_no"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 223)
+ nil )
+ (!pair "spelling"
+ (!type already_seen 13)
+ (!srcfileloc "../libcpp/include/cpplib.h" 229)
+ (!options
+ (!option nested_ptr nested
+ (!type already_seen 22)
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 270)
+ (!options
+ (!option tag string "CPP_TOKEN_FLD_ARG_NO")
+ )
+ )
+ (!pair "token_no"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 274)
+ (!options
+ (!option tag string "CPP_TOKEN_FLD_TOKEN_NO")
+ )
+ )
+ (!pair "pragma"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 277)
+ (!options
+ (!option tag string "CPP_TOKEN_FLD_PRAGMA")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 278)
+ (!options
+ (!option desc string "cpp_token_val_index (&%1)")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "../libcpp/include/cpplib.h" 906)
+ (!options
+ (!option length string "%1.count")
+ (!option tag string "false")
+ )
+ )
+ (!pair "text"
+ (!type already_seen 11)
+ (!srcfileloc "../libcpp/include/cpplib.h" 911)
+ (!options
+ (!option tag string "true")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 912)
+ (!options
+ (!option desc string "%1.kind == cmk_traditional")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "../libcpp/include/cpplib.h" 976)
+ (!options
+ (!option tag string "NT_VOID")
+ )
+ )
+ (!pair "macro"
+ (!type already_seen 19)
+ (!srcfileloc "../libcpp/include/cpplib.h" 978)
+ (!options
+ (!option tag string "NT_USER_MACRO")
+ )
+ )
+ (!pair "builtin"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 980)
+ (!options
+ (!option tag string "NT_BUILTIN_MACRO")
+ )
+ )
+ (!pair "arg_index"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 982)
+ (!options
+ (!option tag string "NT_MACRO_ARG")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "../libcpp/include/cpplib.h" 1003)
+ (!options
+ (!option desc string "%1.type")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 22)
+
+ (!type struct 697
+ (!type pointer 698 nil gc_unused
+ (!type already_seen 697)
+ )
+ gc_used "line_map_macro"
+ (!srcfileloc "../libcpp/include/line-map.h" 742)
+ (!fields 4
+ (!pair "n_tokens"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 467)
+ nil )
+ (!pair "macro"
+ (!type already_seen 13)
+ (!srcfileloc "../libcpp/include/line-map.h" 476)
+ (!options
+ (!option nested_ptr nested
+ (!type already_seen 22)
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL" "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL")
+ )
+ )
+ (!pair "macro_locations"
+ (!type already_seen 3)
+ (!srcfileloc "../libcpp/include/line-map.h" 531)
+ (!options
+ (!option atomic string "")
+ )
+ )
+ (!pair "expansion"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 538)
+ nil )
+ )
+ (!options
+ (!option tag string "2")
+ )
+ 4095 nil
+ (!type already_seen 5)
+ )
+
+ (!type struct 699 nil gc_used "maps_info_ordinary"
+ (!srcfileloc "../libcpp/include/line-map.h" 788)
+ (!fields 3
+ (!pair "maps"
+ (!type already_seen 7)
+ (!srcfileloc "../libcpp/include/line-map.h" 727)
+ (!options
+ (!option length string "%h.used")
+ )
+ )
+ (!pair "allocated"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 730)
+ nil )
+ (!pair "used"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 734)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 700 nil gc_used "maps_info_macro"
+ (!srcfileloc "../libcpp/include/line-map.h" 790)
+ (!fields 3
+ (!pair "maps"
+ (!type already_seen 698)
+ (!srcfileloc "../libcpp/include/line-map.h" 742)
+ (!options
+ (!option length string "%h.used")
+ )
+ )
+ (!pair "allocated"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 745)
+ nil )
+ (!pair "used"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 749)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 701
+ (!type pointer 702 nil gc_unused
+ (!type already_seen 701)
+ )
+ gc_used "location_adhoc_data"
+ (!srcfileloc "../libcpp/include/line-map.h" 761)
+ (!fields 4
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 757)
+ nil )
+ (!pair "src_range"
+ (!type already_seen 1)
+ (!srcfileloc "../libcpp/include/line-map.h" 758)
+ nil )
+ (!pair "data"
+ (!type already_seen 3)
+ (!srcfileloc "../libcpp/include/line-map.h" 759)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "discriminator"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 760)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 334)
+
+ (!type struct 703 nil gc_used "location_adhoc_data_map"
+ (!srcfileloc "../libcpp/include/line-map.h" 780)
+ (!fields 4
+ (!pair "htab"
+ (!type already_seen 333)
+ (!srcfileloc "../libcpp/include/line-map.h" 776)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "curr_loc"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 777)
+ nil )
+ (!pair "allocated"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 778)
+ nil )
+ (!pair "data"
+ (!type already_seen 702)
+ (!srcfileloc "../libcpp/include/line-map.h" 779)
+ (!options
+ (!option length string "%h.allocated")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 704
+ (!type pointer 705 nil gc_used
+ (!type already_seen 704)
+ )
+ gc_pointed_to "line_maps"
+ (!srcfileloc "../libcpp/include/line-map.h" 829)
+ (!fields 15
+ (!pair "info_ordinary"
+ (!type already_seen 699)
+ (!srcfileloc "../libcpp/include/line-map.h" 788)
+ nil )
+ (!pair "info_macro"
+ (!type already_seen 700)
+ (!srcfileloc "../libcpp/include/line-map.h" 790)
+ nil )
+ (!pair "depth"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 793)
+ nil )
+ (!pair "trace_includes"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 796)
+ nil )
+ (!pair "seen_line_directive"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 799)
+ nil )
+ (!pair "highest_location"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 802)
+ nil )
+ (!pair "highest_line"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 805)
+ nil )
+ (!pair "max_column_hint"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 809)
+ nil )
+ (!pair "reallocator"
+ (!type already_seen 219)
+ (!srcfileloc "../libcpp/include/line-map.h" 812)
+ (!options
+ (!option callback string "")
+ )
+ )
+ (!pair "round_alloc_size"
+ (!type already_seen 219)
+ (!srcfileloc "../libcpp/include/line-map.h" 816)
+ (!options
+ (!option callback string "")
+ )
+ )
+ (!pair "location_adhoc_data_map"
+ (!type already_seen 703)
+ (!srcfileloc "../libcpp/include/line-map.h" 818)
+ nil )
+ (!pair "builtin_location"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 822)
+ nil )
+ (!pair "default_range_bits"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 825)
+ nil )
+ (!pair "num_optimized_ranges"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 827)
+ nil )
+ (!pair "num_unoptimized_ranges"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 828)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 706 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/../libcpp/include/line-map.h:1306"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 707 nil gc_unused "range_label"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 708 nil gc_unused "location_range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 709 nil gc_unused "semi_embedded_vec"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 710 nil gc_unused "fixit_hint"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 711 nil gc_unused "diagnostic_path"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 712 nil gc_unused "rich_location"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 713 nil gc_unused "label_text"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 714 nil gc_unused "linemap_stats"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 715 nil gc_unused "cpp_reader"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 716 nil gc_unused "cpp_buffer"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 717 nil gc_unused "cpp_options"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 691)
+
+ (!type already_seen 695)
+
+ (!type already_seen 20)
+
+ (!type struct 718 nil gc_unused "cpp_callbacks"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 719 nil gc_unused "cpp_dir"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 720 nil gc_unused "_cpp_file"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 696)
+
+ (!type already_seen 694)
+
+ (!type already_seen 693)
+
+ (!type already_seen 21)
+
+ (!type already_seen 689)
+
+ (!type already_seen 18)
+
+ (!type already_seen 15)
+
+ (!type struct 721 nil gc_unused "cpp_string_location_reader"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 722 nil gc_unused "cpp_substring_ranges"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 723 nil gc_unused "cpp_num"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 724 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/../libcpp/include/cpplib.h:1394"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 725 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/../libcpp/include/cpplib.h:1405"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 726 nil gc_unused "cpp_converted_source"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 727 nil gc_unused "save_macro_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 728 nil gc_unused "cpp_decoded_char"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 729 nil gc_unused "cpp_char_column_policy"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 730 nil gc_unused "cpp_display_width_computation"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 731 nil gc_unused "char_span"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 732 nil gc_unused "file_cache_slot"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 733 nil gc_unused "file_cache"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 734
+ (!type pointer 735 nil gc_used
+ (!type already_seen 734)
+ )
+ gc_pointed_to "string_concat"
+ (!srcfileloc "input.h" 265)
+ (!fields 2
+ (!pair "m_num"
+ (!type already_seen 2)
+ (!srcfileloc "input.h" 263)
+ nil )
+ (!pair "m_locs"
+ (!type already_seen 3)
+ (!srcfileloc "input.h" 264)
+ (!options
+ (!option atomic string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 736
+ (!type pointer 737 nil gc_used
+ (!type already_seen 736)
+ )
+ gc_pointed_to "string_concat_db"
+ (!srcfileloc "input.h" 290)
+ (!fields 1
+ (!pair "m_table"
+ (!type pointer 738 nil gc_used
+ (!type user_struct 739
+ (!type already_seen 738)
+ gc_pointed_to "hash_map<location_hash,string_concat*>"
+ (!srcfileloc "input.h" 289)
+ (!fields 2
+ (!pair "string_concat"
+ (!type already_seen 735)
+ (!srcfileloc "input.h" 289)
+ nil )
+ (!pair "location_hash"
+ (!type undefined 740 nil gc_unused "location_hash"
+ (!srcfileloc "input.h" 289)
+ )
+ (!srcfileloc "input.h" 289)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "input.h" 289)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 739)
+
+ (!type already_seen 740)
+
+ (!type already_seen 393)
+
+ (!type already_seen 388)
+
+ (!type struct 741
+ (!type pointer 742 nil gc_unused
+ (!type already_seen 741)
+ )
+ gc_unused "simple_bitmap_def"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 102)
+
+ (!type already_seen 44)
+
+ (!type already_seen 290)
+
+ (!type struct 743 nil gc_unused "scalar_float_mode"
+ (!srcfileloc "coretypes.h" 67)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 744 nil gc_unused "complex_mode"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 745 nil gc_unused "fixed_size_mode"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 746 nil gc_unused "opt_mode<scalar_mode>"
+ (!srcfileloc "coretypes.h" 65)
+ (!fields 1
+ (!pair "scalar_mode"
+ (!type already_seen 44)
+ (!srcfileloc "coretypes.h" 65)
+ nil )
+ )
+ )
+
+ (!type user_struct 747 nil gc_unused "opt_mode<scalar_int_mode>"
+ (!srcfileloc "coretypes.h" 66)
+ (!fields 1
+ (!pair "scalar_int_mode"
+ (!type already_seen 290)
+ (!srcfileloc "coretypes.h" 66)
+ nil )
+ )
+ )
+
+ (!type user_struct 748 nil gc_unused "opt_mode<scalar_float_mode>"
+ (!srcfileloc "coretypes.h" 67)
+ (!fields 1
+ (!pair "scalar_float_mode"
+ (!type already_seen 743)
+ (!srcfileloc "coretypes.h" 67)
+ nil )
+ )
+ )
+
+ (!type already_seen 43)
+
+ (!type user_struct 749 nil gc_unused "pod_mode<scalar_int_mode>"
+ (!srcfileloc "coretypes.h" 70)
+ (!fields 1
+ (!pair "scalar_int_mode"
+ (!type already_seen 290)
+ (!srcfileloc "coretypes.h" 70)
+ nil )
+ )
+ )
+
+ (!type user_struct 750 nil gc_unused "pod_mode<fixed_size_mode>"
+ (!srcfileloc "coretypes.h" 71)
+ (!fields 1
+ (!pair "fixed_size_mode"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 71)
+ nil )
+ )
+ )
+
+ (!type struct 751
+ (!type pointer 752 nil gc_unused
+ (!type already_seen 751)
+ )
+ gc_pointed_to "rtx_expr_list"
+ (!srcfileloc "rtl.h" 468)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 102)
+ )
+
+ (!type struct 753
+ (!type pointer 754 nil gc_used
+ (!type already_seen 753)
+ )
+ gc_pointed_to "rtx_insn_list"
+ (!srcfileloc "rtl.h" 498)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 102)
+ )
+
+ (!type struct 755 nil gc_pointed_to "rtx_sequence"
+ (!srcfileloc "rtl.h" 526)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 102)
+ )
+
+ (!type already_seen 298)
+
+ (!type struct 756 nil gc_pointed_to "rtx_debug_insn"
+ (!srcfileloc "rtl.h" 587)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type struct 757 nil gc_pointed_to "rtx_nonjump_insn"
+ (!srcfileloc "rtl.h" 598)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type struct 758 nil gc_pointed_to "rtx_jump_insn"
+ (!srcfileloc "rtl.h" 625)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type struct 759 nil gc_pointed_to "rtx_call_insn"
+ (!srcfileloc "rtl.h" 638)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type struct 760 nil gc_pointed_to "rtx_jump_table_data"
+ (!srcfileloc "rtl.h" 664)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type struct 761 nil gc_pointed_to "rtx_barrier"
+ (!srcfileloc "rtl.h" 675)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type already_seen 367)
+
+ (!type struct 762
+ (!type pointer 763 nil gc_used
+ (!type already_seen 762)
+ )
+ gc_pointed_to "rtx_note"
+ (!srcfileloc "emit-rtl.h" 128)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 298)
+ )
+
+ (!type already_seen 232)
+
+ (!type struct 764
+ (!type pointer 765 nil gc_unused
+ (!type already_seen 764)
+ )
+ gc_unused "hwivec_def"
+ (!srcfileloc "rtl.h" 282)
+ (!fields 1
+ (!pair "elem"
+ (!type array 766 nil gc_unused "1"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "rtl.h" 281)
+ nil )
+ )
+ (!options
+ (!option variable_size string "")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 283)
+
+ (!type struct 767 nil gc_unused "gimple_stmt_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 768 nil gc_pointed_to "gcond"
+ (!srcfileloc "gimple.h" 900)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_OPS")
+ )
+ 4095 nil
+ (!type struct 769 nil gc_pointed_to "gimple_statement_with_ops"
+ (!srcfileloc "gimple.h" 320)
+ (!fields 1
+ (!pair "op"
+ (!type array 770 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "gimple.h" 319)
+ (!options
+ (!option length string "%h.num_ops")
+ )
+ )
+ )
+ (!options
+ (!option tag string "GSS_WITH_OPS")
+ )
+ 4095 nil
+ (!type struct 771 nil gc_pointed_to "gimple_statement_with_ops_base"
+ (!srcfileloc "gimple.h" 305)
+ (!fields 1
+ (!pair "use_ops"
+ (!type already_seen 403)
+ (!srcfileloc "gimple.h" 304)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil
+ (!type already_seen 283)
+ )
+ )
+ )
+
+ (!type struct 772 nil gc_pointed_to "gdebug"
+ (!srcfileloc "gimple.h" 910)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_OPS")
+ )
+ 4095 nil
+ (!type already_seen 769)
+ )
+
+ (!type struct 773 nil gc_pointed_to "ggoto"
+ (!srcfileloc "gimple.h" 920)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_OPS")
+ )
+ 4095 nil
+ (!type already_seen 769)
+ )
+
+ (!type struct 774 nil gc_pointed_to "glabel"
+ (!srcfileloc "gimple.h" 930)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_OPS")
+ )
+ 4095 nil
+ (!type already_seen 769)
+ )
+
+ (!type struct 775 nil gc_pointed_to "gswitch"
+ (!srcfileloc "gimple.h" 940)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_OPS")
+ )
+ 4095 nil
+ (!type already_seen 769)
+ )
+
+ (!type struct 776 nil gc_pointed_to "gassign"
+ (!srcfileloc "gimple.h" 951)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_MEM_OPS")
+ )
+ 4095 nil
+ (!type struct 777 nil gc_pointed_to "gimple_statement_with_memory_ops"
+ (!srcfileloc "gimple.h" 351)
+ (!fields 1
+ (!pair "op"
+ (!type array 778 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "gimple.h" 350)
+ (!options
+ (!option length string "%h.num_ops")
+ )
+ )
+ )
+ (!options
+ (!option tag string "GSS_WITH_MEM_OPS")
+ )
+ 4095 nil
+ (!type struct 779 nil gc_pointed_to "gimple_statement_with_memory_ops_base"
+ (!srcfileloc "gimple.h" 335)
+ (!fields 2
+ (!pair "vdef"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 333)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "vuse"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 334)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ (!options
+ (!option tag string "GSS_WITH_MEM_OPS_BASE")
+ )
+ 4095 nil
+ (!type already_seen 771)
+ )
+ )
+ )
+
+ (!type struct 780 nil gc_pointed_to "gasm"
+ (!srcfileloc "gimple.h" 587)
+ (!fields 6
+ (!pair "string"
+ (!type already_seen 11)
+ (!srcfileloc "gimple.h" 573)
+ nil )
+ (!pair "ni"
+ (!type already_seen 8)
+ (!srcfileloc "gimple.h" 577)
+ nil )
+ (!pair "no"
+ (!type already_seen 8)
+ (!srcfileloc "gimple.h" 578)
+ nil )
+ (!pair "nc"
+ (!type already_seen 8)
+ (!srcfileloc "gimple.h" 579)
+ nil )
+ (!pair "nl"
+ (!type already_seen 8)
+ (!srcfileloc "gimple.h" 580)
+ nil )
+ (!pair "op"
+ (!type array 781 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "gimple.h" 586)
+ (!options
+ (!option length string "%h.num_ops")
+ )
+ )
+ )
+ (!options
+ (!option tag string "GSS_ASM")
+ )
+ 4095 nil
+ (!type already_seen 779)
+ )
+
+ (!type struct 782
+ (!type pointer 783 nil gc_used
+ (!type already_seen 782)
+ )
+ gc_pointed_to "gcall"
+ (!srcfileloc "gimple.h" 378)
+ (!fields 4
+ (!pair "call_used"
+ (!type already_seen 386)
+ (!srcfileloc "gimple.h" 362)
+ nil )
+ (!pair "call_clobbered"
+ (!type already_seen 386)
+ (!srcfileloc "gimple.h" 363)
+ nil )
+ (!pair "u"
+ (!type union 784 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/gimple.h:366"
+ (!srcfileloc "gimple.h" 369)
+ (!fields 2
+ (!pair "fntype"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 367)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "internal_fn"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 368)
+ (!options
+ (!option tag string "GF_CALL_INTERNAL")
+ )
+ )
+ )
+ (!options
+ (!option desc string "%1.subcode & GF_CALL_INTERNAL")
+ )
+ 4095 nil )
+ (!srcfileloc "gimple.h" 369)
+ nil )
+ (!pair "op"
+ (!type array 785 nil gc_used "1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "gimple.h" 375)
+ (!options
+ (!option length string "%h.num_ops")
+ )
+ )
+ )
+ (!options
+ (!option tag string "GSS_CALL")
+ )
+ 4095 nil
+ (!type already_seen 779)
+ )
+
+ (!type struct 786 nil gc_pointed_to "gtransaction"
+ (!srcfileloc "gimple.h" 882)
+ (!fields 4
+ (!pair "body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 876)
+ nil )
+ (!pair "label_norm"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 879)
+ nil )
+ (!pair "label_uninst"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 880)
+ nil )
+ (!pair "label_over"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 881)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_TRANSACTION")
+ )
+ 4095 nil
+ (!type already_seen 779)
+ )
+
+ (!type struct 787 nil gc_pointed_to "greturn"
+ (!srcfileloc "gimple.h" 961)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_WITH_MEM_OPS")
+ )
+ 4095 nil
+ (!type already_seen 777)
+ )
+
+ (!type struct 788 nil gc_pointed_to "gbind"
+ (!srcfileloc "gimple.h" 414)
+ (!fields 3
+ (!pair "vars"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 402)
+ nil )
+ (!pair "block"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 410)
+ nil )
+ (!pair "body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 413)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_BIND")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 789 nil gc_pointed_to "gcatch"
+ (!srcfileloc "gimple.h" 429)
+ (!fields 2
+ (!pair "types"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 425)
+ nil )
+ (!pair "handler"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 428)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_CATCH")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 790 nil gc_pointed_to "geh_filter"
+ (!srcfileloc "gimple.h" 446)
+ (!fields 2
+ (!pair "types"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 441)
+ nil )
+ (!pair "failure"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 445)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_EH_FILTER")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 791 nil gc_pointed_to "geh_mnt"
+ (!srcfileloc "gimple.h" 468)
+ (!fields 1
+ (!pair "fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 467)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_EH_MNT")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 792 nil gc_pointed_to "geh_else"
+ (!srcfileloc "gimple.h" 457)
+ (!fields 2
+ (!pair "n_body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 456)
+ nil )
+ (!pair "e_body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 456)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_EH_ELSE")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 793 nil gc_pointed_to "gresx"
+ (!srcfileloc "gimple.h" 506)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_EH_CTRL")
+ )
+ 4095 nil
+ (!type struct 794 nil gc_pointed_to "gimple_statement_eh_ctrl"
+ (!srcfileloc "gimple.h" 499)
+ (!fields 1
+ (!pair "region"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 498)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_EH_CTRL")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+ )
+
+ (!type struct 795 nil gc_pointed_to "geh_dispatch"
+ (!srcfileloc "gimple.h" 513)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_EH_CTRL")
+ )
+ 4095 nil
+ (!type already_seen 794)
+ )
+
+ (!type struct 796 nil gc_pointed_to "gphi"
+ (!srcfileloc "gimple.h" 486)
+ (!fields 4
+ (!pair "capacity"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 478)
+ nil )
+ (!pair "nargs"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 479)
+ nil )
+ (!pair "result"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 482)
+ nil )
+ (!pair "args"
+ (!type array 797 nil gc_used "1"
+ (!type struct 798 nil gc_used "phi_arg_d"
+ (!srcfileloc "tree-core.h" 1629)
+ (!fields 3
+ (!pair "imm_use"
+ (!type already_seen 561)
+ (!srcfileloc "tree-core.h" 1626)
+ nil )
+ (!pair "def"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 1627)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1628)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "gimple.h" 485)
+ (!options
+ (!option length string "%h.nargs")
+ )
+ )
+ )
+ (!options
+ (!option tag string "GSS_PHI")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 799 nil gc_pointed_to "gtry"
+ (!srcfileloc "gimple.h" 530)
+ (!fields 2
+ (!pair "eval"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 525)
+ nil )
+ (!pair "cleanup"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 529)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_TRY")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 800 nil gc_pointed_to "gomp_atomic_load"
+ (!srcfileloc "gimple.h" 798)
+ (!fields 2
+ (!pair "rhs"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 797)
+ nil )
+ (!pair "lhs"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 797)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_ATOMIC_LOAD")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 801 nil gc_pointed_to "gomp_atomic_store"
+ (!srcfileloc "gimple.h" 818)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_ATOMIC_STORE_LAYOUT")
+ )
+ 4095 nil
+ (!type struct 802 nil gc_pointed_to "gimple_statement_omp_atomic_store_layout"
+ (!srcfileloc "gimple.h" 810)
+ (!fields 1
+ (!pair "val"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 809)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_ATOMIC_STORE_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+ )
+
+ (!type struct 803 nil gc_pointed_to "gomp_continue"
+ (!srcfileloc "gimple.h" 744)
+ (!fields 2
+ (!pair "control_def"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 740)
+ nil )
+ (!pair "control_use"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 743)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_CONTINUE")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 804 nil gc_pointed_to "gomp_critical"
+ (!srcfileloc "gimple.h" 602)
+ (!fields 2
+ (!pair "clauses"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 597)
+ nil )
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 601)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_CRITICAL")
+ )
+ 4095 nil
+ (!type struct 805 nil gc_pointed_to "gimple_statement_omp"
+ (!srcfileloc "gimple.h" 390)
+ (!fields 1
+ (!pair "body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 389)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+ )
+
+ (!type struct 806 nil gc_pointed_to "gomp_ordered"
+ (!srcfileloc "gimple.h" 777)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_SINGLE_LAYOUT")
+ )
+ 4095 nil
+ (!type struct 807 nil gc_pointed_to "gimple_statement_omp_single_layout"
+ (!srcfileloc "gimple.h" 756)
+ (!fields 1
+ (!pair "clauses"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 755)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_SINGLE_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 805)
+ )
+ )
+
+ (!type struct 808 nil gc_pointed_to "gomp_for"
+ (!srcfileloc "gimple.h" 642)
+ (!fields 4
+ (!pair "clauses"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 630)
+ nil )
+ (!pair "collapse"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 634)
+ nil )
+ (!pair "iter"
+ (!type pointer 809 nil gc_unused
+ (!type struct 810
+ (!type already_seen 809)
+ gc_used "gimple_omp_for_iter"
+ (!srcfileloc "gimple.h" 620)
+ (!fields 5
+ (!pair "cond"
+ (!type already_seen 2)
+ (!srcfileloc "gimple.h" 607)
+ nil )
+ (!pair "index"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 610)
+ nil )
+ (!pair "initial"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 613)
+ nil )
+ (!pair "final"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 616)
+ nil )
+ (!pair "incr"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 619)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "gimple.h" 637)
+ (!options
+ (!option length string "%h.collapse")
+ )
+ )
+ (!pair "pre_body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 641)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_FOR")
+ )
+ 4095 nil
+ (!type already_seen 805)
+ )
+
+ (!type struct 811 nil gc_pointed_to "gomp_parallel"
+ (!srcfileloc "gimple.h" 681)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_PARALLEL_LAYOUT")
+ )
+ 4095 nil
+ (!type struct 812 nil gc_pointed_to "gimple_statement_omp_taskreg"
+ (!srcfileloc "gimple.h" 673)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_PARALLEL_LAYOUT")
+ )
+ 4095 nil
+ (!type struct 813 nil gc_pointed_to "gimple_statement_omp_parallel_layout"
+ (!srcfileloc "gimple.h" 663)
+ (!fields 3
+ (!pair "clauses"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 654)
+ nil )
+ (!pair "child_fn"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 658)
+ nil )
+ (!pair "data_arg"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 662)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_PARALLEL_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 805)
+ )
+ )
+ )
+
+ (!type struct 814 nil gc_pointed_to "gomp_task"
+ (!srcfileloc "gimple.h" 706)
+ (!fields 3
+ (!pair "copy_fn"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 700)
+ nil )
+ (!pair "arg_size"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 704)
+ nil )
+ (!pair "arg_align"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 705)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_TASK")
+ )
+ 4095 nil
+ (!type already_seen 812)
+ )
+
+ (!type struct 815 nil gc_pointed_to "gomp_sections"
+ (!srcfileloc "gimple.h" 727)
+ (!fields 2
+ (!pair "clauses"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 721)
+ nil )
+ (!pair "control"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 726)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_OMP_SECTIONS")
+ )
+ 4095 nil
+ (!type already_seen 805)
+ )
+
+ (!type struct 816 nil gc_pointed_to "gomp_single"
+ (!srcfileloc "gimple.h" 763)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_SINGLE_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 807)
+ )
+
+ (!type struct 817 nil gc_pointed_to "gomp_target"
+ (!srcfileloc "gimple.h" 689)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_PARALLEL_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 813)
+ )
+
+ (!type struct 818 nil gc_pointed_to "gomp_teams"
+ (!srcfileloc "gimple.h" 770)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_PARALLEL_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 812)
+ )
+
+ (!type already_seen 320)
+
+ (!type struct 819
+ (!type pointer 820 nil gc_used
+ (!type already_seen 819)
+ )
+ gc_pointed_to "cgraph_node"
+ (!srcfileloc "cgraph.h" 1506)
+ (!fields 49
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1078)
+ nil )
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1090)
+ nil )
+ (!pair "callees"
+ (!type pointer 821 nil gc_used
+ (!type struct 822
+ (!type already_seen 821)
+ gc_pointed_to "cgraph_edge"
+ (!srcfileloc "cgraph.h" 1977)
+ (!fields 22
+ (!pair "count"
+ (!type already_seen 301)
+ (!srcfileloc "cgraph.h" 1888)
+ nil )
+ (!pair "caller"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1889)
+ nil )
+ (!pair "callee"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1890)
+ nil )
+ (!pair "prev_caller"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1891)
+ nil )
+ (!pair "next_caller"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1892)
+ nil )
+ (!pair "prev_callee"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1893)
+ nil )
+ (!pair "next_callee"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1894)
+ nil )
+ (!pair "call_stmt"
+ (!type already_seen 783)
+ (!srcfileloc "cgraph.h" 1895)
+ nil )
+ (!pair "indirect_info"
+ (!type pointer 823 nil gc_used
+ (!type struct 824
+ (!type already_seen 823)
+ gc_pointed_to "cgraph_indirect_call_info"
+ (!srcfileloc "cgraph.h" 1898)
+ (!fields 13
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1651)
+ nil )
+ (!pair "context"
+ (!type struct 825 nil gc_used "ipa_polymorphic_call_context"
+ (!srcfileloc "cgraph.h" 1642)
+ (!fields 10
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1552)
+ nil )
+ (!pair "speculative_offset"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1553)
+ nil )
+ (!pair "outer_type"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 1554)
+ nil )
+ (!pair "speculative_outer_type"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 1555)
+ nil )
+ (!pair "maybe_in_construction"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1557)
+ nil )
+ (!pair "maybe_derived_type"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1559)
+ nil )
+ (!pair "speculative_maybe_derived_type"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1562)
+ nil )
+ (!pair "invalid"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1565)
+ nil )
+ (!pair "dynamic"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1567)
+ nil )
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1629)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "cgraph.h" 1653)
+ nil )
+ (!pair "otr_token"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1655)
+ nil )
+ (!pair "otr_type"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 1657)
+ nil )
+ (!pair "param_index"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1659)
+ nil )
+ (!pair "ecf_flags"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1661)
+ nil )
+ (!pair "num_speculative_call_targets"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1664)
+ nil )
+ (!pair "polymorphic"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1668)
+ nil )
+ (!pair "agg_contents"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1671)
+ nil )
+ (!pair "member_ptr"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1673)
+ nil )
+ (!pair "by_ref"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1676)
+ nil )
+ (!pair "guaranteed_unmodified"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1681)
+ nil )
+ (!pair "vptr_changed"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1684)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 1898)
+ nil )
+ (!pair "aux"
+ (!type already_seen 3)
+ (!srcfileloc "cgraph.h" 1899)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "inline_failed"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1902)
+ nil )
+ (!pair "lto_stmt_uid"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1905)
+ nil )
+ (!pair "speculative_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1908)
+ nil )
+ (!pair "indirect_inlining_edge"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1910)
+ nil )
+ (!pair "indirect_unknown_callee"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1913)
+ nil )
+ (!pair "call_stmt_cannot_inline_p"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1916)
+ nil )
+ (!pair "can_throw_external"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1918)
+ nil )
+ (!pair "speculative"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1936)
+ nil )
+ (!pair "in_polymorphic_cdtor"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1939)
+ nil )
+ (!pair "m_uid"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1953)
+ nil )
+ (!pair "m_summary_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1956)
+ nil )
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1972)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ (!option chain_prev string "%h.prev_caller")
+ (!option chain_next string "%h.next_caller")
+ )
+ 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 1383)
+ nil )
+ (!pair "callers"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1384)
+ nil )
+ (!pair "indirect_calls"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1387)
+ nil )
+ (!pair "next_sibling_clone"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1388)
+ nil )
+ (!pair "prev_sibling_clone"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1389)
+ nil )
+ (!pair "clones"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1390)
+ nil )
+ (!pair "clone_of"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1391)
+ nil )
+ (!pair "call_site_hash"
+ (!type pointer 826 nil gc_used
+ (!type user_struct 827
+ (!type already_seen 826)
+ gc_pointed_to "hash_table<cgraph_edge_hasher>"
+ (!srcfileloc "cgraph.h" 1394)
+ (!fields 1
+ (!pair "cgraph_edge_hasher"
+ (!type struct 828 nil gc_used "cgraph_edge_hasher"
+ (!srcfileloc "cgraph.h" 1394)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "cgraph.h" 1394)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cgraph.h" 1394)
+ nil )
+ (!pair "former_clone_of"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 1396)
+ nil )
+ (!pair "simdclone"
+ (!type pointer 829 nil gc_used
+ (!type struct 830
+ (!type already_seen 829)
+ gc_pointed_to "cgraph_simd_clone"
+ (!srcfileloc "cgraph.h" 1400)
+ (!fields 11
+ (!pair "simdlen"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 785)
+ nil )
+ (!pair "nargs"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 789)
+ nil )
+ (!pair "vecsize_int"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 792)
+ nil )
+ (!pair "vecsize_float"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 795)
+ nil )
+ (!pair "mask_mode"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 800)
+ nil )
+ (!pair "vecsize_mangle"
+ (!type already_seen 8)
+ (!srcfileloc "cgraph.h" 805)
+ nil )
+ (!pair "inbranch"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 809)
+ nil )
+ (!pair "prev_clone"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 812)
+ nil )
+ (!pair "next_clone"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 812)
+ nil )
+ (!pair "origin"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 815)
+ nil )
+ (!pair "args"
+ (!type array 831 nil gc_used "1"
+ (!type struct 832 nil gc_used "cgraph_simd_clone_arg"
+ (!srcfileloc "cgraph.h" 818)
+ (!fields 8
+ (!pair "orig_arg"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 741)
+ nil )
+ (!pair "orig_type"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 745)
+ nil )
+ (!pair "vector_arg"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 753)
+ nil )
+ (!pair "vector_type"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 756)
+ nil )
+ (!pair "simd_array"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 765)
+ nil )
+ (!pair "arg_type"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 769)
+ nil )
+ (!pair "alignment"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 772)
+ nil )
+ (!pair "linear_step"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 778)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 818)
+ (!options
+ (!option length string "%h.nargs")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 1400)
+ nil )
+ (!pair "simd_clones"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1402)
+ nil )
+ (!pair "ipa_transforms_to_apply"
+ (!type user_struct 833 nil gc_unused "vec<ipa_opt_pass,va_heap,vl_ptr>"
+ (!srcfileloc "cgraph.h" 1407)
+ (!fields 3
+ (!pair "vl_ptr"
+ (!type undefined 834 nil gc_unused "vl_ptr"
+ (!srcfileloc "cgraph.h" 1407)
+ )
+ (!srcfileloc "cgraph.h" 1407)
+ nil )
+ (!pair "va_heap"
+ (!type undefined 835 nil gc_unused "va_heap"
+ (!srcfileloc "cgraph.h" 1407)
+ )
+ (!srcfileloc "cgraph.h" 1407)
+ nil )
+ (!pair "ipa_opt_pass"
+ (!type pointer 836 nil gc_unused
+ (!type struct 837
+ (!type already_seen 836)
+ gc_unused "ipa_opt_pass_d"
+ (!srcfileloc "cgraph.h" 38)
+ (!fields 0 )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 1407)
+ nil )
+ )
+ )
+ (!srcfileloc "cgraph.h" 1407)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "inlined_to"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 1411)
+ nil )
+ (!pair "rtl"
+ (!type pointer 838 nil gc_used
+ (!type struct 839
+ (!type already_seen 838)
+ gc_pointed_to "cgraph_rtl_info"
+ (!srcfileloc "rtl.h" 4558)
+ (!fields 2
+ (!pair "preferred_incoming_stack_boundary"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 4553)
+ nil )
+ (!pair "function_used_regs"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 4557)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "cgraph.h" 1413)
+ nil )
+ (!pair "count"
+ (!type already_seen 301)
+ (!srcfileloc "cgraph.h" 1416)
+ nil )
+ (!pair "count_materialization_scale"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1419)
+ nil )
+ (!pair "profile_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1421)
+ nil )
+ (!pair "unit_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1423)
+ nil )
+ (!pair "tp_first_run"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1425)
+ nil )
+ (!pair "thunk"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1428)
+ nil )
+ (!pair "used_as_abstract_origin"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1431)
+ nil )
+ (!pair "lowered"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1433)
+ nil )
+ (!pair "process"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1436)
+ nil )
+ (!pair "frequency"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1439)
+ nil )
+ (!pair "only_called_at_startup"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1441)
+ nil )
+ (!pair "only_called_at_exit"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1443)
+ nil )
+ (!pair "tm_clone"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1448)
+ nil )
+ (!pair "dispatcher_function"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1450)
+ nil )
+ (!pair "calls_comdat_local"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1453)
+ nil )
+ (!pair "icf_merged"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1455)
+ nil )
+ (!pair "nonfreeing_fn"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1459)
+ nil )
+ (!pair "merged_comdat"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1461)
+ nil )
+ (!pair "merged_extern_inline"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1463)
+ nil )
+ (!pair "parallelized_function"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1465)
+ nil )
+ (!pair "split_part"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1467)
+ nil )
+ (!pair "indirect_call_target"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1469)
+ nil )
+ (!pair "local"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1472)
+ nil )
+ (!pair "versionable"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1474)
+ nil )
+ (!pair "can_change_signature"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1477)
+ nil )
+ (!pair "redefined_extern_inline"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1480)
+ nil )
+ (!pair "tm_may_enter_irr"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1482)
+ nil )
+ (!pair "ipcp_clone"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1484)
+ nil )
+ (!pair "declare_variant_alt"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1487)
+ nil )
+ (!pair "calls_declare_variant_alt"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1489)
+ nil )
+ (!pair "gc_candidate"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1493)
+ nil )
+ (!pair "m_uid"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1497)
+ nil )
+ (!pair "m_summary_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1500)
+ nil )
+ )
+ (!options
+ (!option tag string "SYMTAB_FUNCTION")
+ )
+ 4095 nil
+ (!type already_seen 320)
+ )
+
+ (!type struct 840 nil gc_pointed_to "varpool_node"
+ (!srcfileloc "cgraph.h" 2118)
+ (!fields 5
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 1997)
+ nil )
+ (!pair "output"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2096)
+ nil )
+ (!pair "dynamically_initialized"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2100)
+ nil )
+ (!pair "tls_model"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2102)
+ nil )
+ (!pair "used_by_single_function"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2108)
+ nil )
+ )
+ (!options
+ (!option tag string "SYMTAB_VARIABLE")
+ )
+ 4095 nil
+ (!type already_seen 320)
+ )
+
+ (!type already_seen 822)
+
+ (!type already_seen 215)
+
+ (!type struct 841
+ (!type pointer 842 nil gc_unused
+ (!type already_seen 841)
+ )
+ gc_unused "gcc_options"
+ (!srcfileloc "c-family/c-pragma.cc" 1236)
+ (!fields 0 )
+ nil 1542 nil nil )
+
+ (!type already_seen 627)
+
+ (!type already_seen 578)
+
+ (!type struct 843 nil gc_unused "cl_option"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 844 nil gc_unused "cl_decoded_option"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 845 nil gc_unused "cl_option_handlers"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 846 nil gc_unused "diagnostic_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 847 nil gc_unused "pretty_printer"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 848 nil gc_unused "diagnostic_event_id_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 849 nil gc_unused "bitmap_view"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 274)
+
+ (!type already_seen 270)
+
+ (!type struct 850 nil gc_unused "rtl_opt_pass"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 851 nil gc_unused "context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 852 nil gc_unused "std::pair<tree,tree>"
+ (!srcfileloc "coretypes.h" 362)
+ (!fields 2
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "coretypes.h" 362)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "coretypes.h" 362)
+ nil )
+ )
+ )
+
+ (!type user_struct 853 nil gc_unused "std::pair<char*,int>"
+ (!srcfileloc "coretypes.h" 363)
+ (!fields 1
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "coretypes.h" 363)
+ nil )
+ )
+ )
+
+ (!type already_seen 374)
+
+ (!type struct 854 nil gc_unused "kv_pair"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 855 nil gc_unused "_dont_use_rtx_here_"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 856 nil gc_unused "_dont_use_rtvec_here_"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 857 nil gc_unused "_dont_use_rtx_insn_here_"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type union 858 nil gc_unused "_dont_use_tree_here_"nil
+ (!fields 0 )
+ nil 0 nil )
+
+ (!type struct 859 nil gc_unused "aarch_branch_protect_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 860 nil gc_unused "cpp_reason_option_codes_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 423)
+
+ (!type already_seen 422)
+
+ (!type struct 861 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/config/arm/arm.h:1676"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type undefined 862 nil gc_unused "TARGET_UNIT"
+ (!srcfileloc "defaults.h" 1461)
+ )
+
+ (!type struct 863
+ (!type pointer 864 nil gc_unused
+ (!type already_seen 863)
+ )
+ gc_unused "splay_tree_node_s"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 865
+ (!type pointer 866 nil gc_unused
+ (!type already_seen 865)
+ )
+ gc_unused "splay_tree_s"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 867 nil gc_unused "bitmap_usage"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 390)
+
+ (!type struct 868 nil gc_unused "bitmap_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 869 nil gc_unused "auto_bitmap"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 870 nil gc_unused "base_bitmap_view"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 497)
+
+ (!type already_seen 496)
+
+ (!type undefined 871 nil gc_unused "FIXED_WIDE_INT"
+ (!srcfileloc "wide-int.h" 323)
+ )
+
+ (!type user_struct 872 nil gc_unused "generic_wide_int<fixed_wide_int_storage<WIDE_INT_MAX_PRECISION*2>>"
+ (!srcfileloc "wide-int.h" 327)
+ (!fields 1
+ (!pair "fixed_wide_int_storage<WIDE_INT_MAX_PRECISION"
+ (!type pointer 873 nil gc_unused
+ (!type struct 874
+ (!type already_seen 873)
+ gc_unused "fixed_wide_int_storage<WIDE_INT_MAX_PRECISION"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "wide-int.h" 327)
+ nil )
+ )
+ )
+
+ (!type already_seen 874)
+
+ (!type struct 875 nil gc_unused "wide_int_ref_storage"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 876 nil gc_unused "generic_wide_int<wide_int_ref_storage<false>>"
+ (!srcfileloc "wide-int.h" 334)
+ (!fields 1
+ (!pair "wide_int_ref_storage<false"
+ (!type user_struct 877 nil gc_unused "wide_int_ref_storage<false"
+ (!srcfileloc "wide-int.h" 334)
+ (!fields 1
+ (!pair "false"
+ (!type undefined 878 nil gc_unused "false"
+ (!srcfileloc "wide-int.h" 334)
+ )
+ (!srcfileloc "wide-int.h" 334)
+ nil )
+ )
+ )
+ (!srcfileloc "wide-int.h" 334)
+ nil )
+ )
+ )
+
+ (!type already_seen 877)
+
+ (!type already_seen 878)
+
+ (!type struct 879 nil gc_unused "binary_traits"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 880 nil gc_unused "generic_wide_int<fixed_wide_int_storage<int_traits<T2>precision>>"
+ (!srcfileloc "wide-int.h" 438)
+ (!fields 2
+ (!pair "precision"
+ (!type undefined 881 nil gc_unused "precision"
+ (!srcfileloc "wide-int.h" 438)
+ )
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ (!pair "fixed_wide_int_storage<int_traits<T2"
+ (!type user_struct 882 nil gc_unused "fixed_wide_int_storage<int_traits<T2"
+ (!srcfileloc "wide-int.h" 438)
+ (!fields 1
+ (!pair "int_traits<T2"
+ (!type user_struct 883 nil gc_unused "int_traits<T2"
+ (!srcfileloc "wide-int.h" 438)
+ (!fields 1
+ (!pair "T2"
+ (!type undefined 884 nil gc_unused "T2"
+ (!srcfileloc "wide-int.h" 438)
+ )
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ )
+ )
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ )
+ )
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ )
+ )
+
+ (!type already_seen 882)
+
+ (!type already_seen 883)
+
+ (!type already_seen 884)
+
+ (!type already_seen 881)
+
+ (!type user_struct 885 nil gc_unused "generic_wide_int<fixed_wide_int_storage<int_traits<T1>precision>>"
+ (!srcfileloc "wide-int.h" 459)
+ (!fields 2
+ (!pair "precision"
+ (!type already_seen 881)
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ (!pair "fixed_wide_int_storage<int_traits<T1"
+ (!type user_struct 886 nil gc_unused "fixed_wide_int_storage<int_traits<T1"
+ (!srcfileloc "wide-int.h" 459)
+ (!fields 1
+ (!pair "int_traits<T1"
+ (!type user_struct 887 nil gc_unused "int_traits<T1"
+ (!srcfileloc "wide-int.h" 459)
+ (!fields 1
+ (!pair "T1"
+ (!type undefined 888 nil gc_unused "T1"
+ (!srcfileloc "wide-int.h" 459)
+ )
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ )
+ )
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ )
+ )
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ )
+ )
+
+ (!type already_seen 886)
+
+ (!type already_seen 887)
+
+ (!type already_seen 888)
+
+ (!type struct 889 nil gc_unused "storage_ref"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 890 nil gc_unused "storage"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 891 nil gc_unused "generic_wide_int"
+ (!srcfileloc "wide-int.h" 774)
+ (!fields 0 )
+ nil 4095 nil
+ (!type already_seen 890)
+ )
+
+ (!type undefined 892 nil gc_unused "ASSIGNMENT_OPERATOR"
+ (!srcfileloc "wide-int.h" 754)
+ )
+
+ (!type struct 893 nil gc_unused "int_traits"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 894 nil gc_unused "fixed_wide_int_storage"
+ (!srcfileloc "wide-int.h" 1235)
+ (!fields 2
+ (!pair "val"
+ (!type array 895 nil gc_unused "(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "wide-int.h" 1217)
+ nil )
+ (!pair "len"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 1218)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 896 nil gc_unused "trailing_wide_int_storage"
+ (!srcfileloc "wide-int.h" 1366)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 897 nil gc_unused "generic_wide_int<trailing_wide_int_storage>"
+ (!srcfileloc "wide-int.h" 1366)
+ (!fields 1
+ (!pair "trailing_wide_int_storage"
+ (!type already_seen 896)
+ (!srcfileloc "wide-int.h" 1366)
+ nil )
+ )
+ )
+
+ (!type user_struct 898 nil gc_unused "trailing_wide_ints"
+ (!srcfileloc "wide-int.h" 1417)
+ (!fields 0 )
+ )
+
+ (!type struct 899 nil gc_unused "primitive_int_traits"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 900 nil gc_unused "hwi_with_prec"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 901 nil gc_unused "ints_for"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 902 nil gc_unused "never_used1"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 903 nil gc_unused "never_used2"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 904
+ (!type pointer 905 nil gc_used
+ (!type already_seen 904)
+ )
+ gc_pointed_to "coverage_data"
+ (!srcfileloc "coverage.cc" 66)
+ (!fields 6
+ (!pair "next"
+ (!type already_seen 905)
+ (!srcfileloc "coverage.cc" 60)
+ nil )
+ (!pair "ident"
+ (!type already_seen 2)
+ (!srcfileloc "coverage.cc" 61)
+ nil )
+ (!pair "lineno_checksum"
+ (!type already_seen 2)
+ (!srcfileloc "coverage.cc" 62)
+ nil )
+ (!pair "cfg_checksum"
+ (!type already_seen 2)
+ (!srcfileloc "coverage.cc" 63)
+ nil )
+ (!pair "fn_decl"
+ (!type already_seen 23)
+ (!srcfileloc "coverage.cc" 64)
+ nil )
+ (!pair "ctr_vars"
+ (!type array 906 nil gc_used "GCOV_COUNTERS"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "coverage.cc" 65)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+
+ (!type struct 907 nil gc_unused "counts_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 908
+ (!type pointer 909 nil gc_unused
+ (!type already_seen 908)
+ )
+ gc_unused "predefined_function_abi"
+ (!srcfileloc "emit-rtl.h" 75)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 910 nil gc_unused "addr_diff_vec_flags"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 229)
+
+ (!type already_seen 240)
+
+ (!type union 911 nil gc_unused "rtunion"nil
+ (!fields 0 )
+ nil 0 nil )
+
+ (!type struct 912 nil gc_unused "reg_info"
+ (!srcfileloc "rtl.h" 230)
+ (!fields 4
+ (!pair "regno"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 222)
+ nil )
+ (!pair "nregs"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 225)
+ nil )
+ (!pair "unused"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 226)
+ nil )
+ (!pair "attrs"
+ (!type already_seen 239)
+ (!srcfileloc "rtl.h" 229)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 211)
+
+ (!type already_seen 213)
+
+ (!type already_seen 209)
+
+ (!type already_seen 222)
+
+ (!type already_seen 80)
+
+ (!type user_struct 913 nil gc_unused "trailing_wide_ints<NUM_POLY_INT_COEFFS>"
+ (!srcfileloc "rtl.h" 291)
+ (!fields 1
+ (!pair "NUM_POLY_INT_COEFFS"
+ (!type undefined 914 nil gc_unused "NUM_POLY_INT_COEFFS"
+ (!srcfileloc "rtl.h" 291)
+ )
+ (!srcfileloc "rtl.h" 291)
+ nil )
+ )
+ )
+
+ (!type already_seen 914)
+
+ (!type struct 915 nil gc_unused "const_poly_int_def"
+ (!srcfileloc "rtl.h" 292)
+ (!fields 1
+ (!pair "coeffs"
+ (!type already_seen 913)
+ (!srcfileloc "rtl.h" 291)
+ nil )
+ )
+ (!options
+ (!option variable_size string "")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 104)
+
+ (!type already_seen 103)
+
+ (!type already_seen 38)
+
+ (!type already_seen 42)
+
+ (!type union 916 nil gc_used "u"
+ (!srcfileloc "ggc-tests.cc" 129)
+ (!fields 2
+ (!pair "u_test_struct"
+ (!type pointer 917 nil gc_used
+ (!type struct 918
+ (!type already_seen 917)
+ gc_pointed_to "test_struct"
+ (!srcfileloc "ggc-tests.cc" 42)
+ (!fields 1
+ (!pair "other"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 39)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "ggc-tests.cc" 127)
+ (!options
+ (!option tag string "WHICH_FIELD_USE_TEST_STRUCT")
+ )
+ )
+ (!pair "u_test_other"
+ (!type pointer 919 nil gc_used
+ (!type struct 920
+ (!type already_seen 919)
+ gc_pointed_to "test_other"
+ (!srcfileloc "ggc-tests.cc" 128)
+ (!fields 2
+ (!pair "dummy"
+ (!type array 921 nil gc_used "256"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "ggc-tests.cc" 96)
+ nil )
+ (!pair "m_ptr"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 97)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "ggc-tests.cc" 128)
+ (!options
+ (!option tag string "WHICH_FIELD_USE_TEST_OTHER")
+ )
+ )
+ )
+ nil 4095 nil )
+
+ (!type already_seen 225)
+
+ (!type already_seen 268)
+
+ (!type already_seen 223)
+
+ (!type already_seen 316)
+
+ (!type already_seen 315)
+
+ (!type already_seen 314)
+
+ (!type already_seen 313)
+
+ (!type already_seen 312)
+
+ (!type already_seen 311)
+
+ (!type already_seen 310)
+
+ (!type already_seen 309)
+
+ (!type already_seen 308)
+
+ (!type already_seen 307)
+
+ (!type already_seen 306)
+
+ (!type already_seen 305)
+
+ (!type already_seen 304)
+
+ (!type already_seen 303)
+
+ (!type already_seen 302)
+
+ (!type already_seen 267)
+
+ (!type already_seen 266)
+
+ (!type already_seen 265)
+
+ (!type already_seen 264)
+
+ (!type already_seen 263)
+
+ (!type already_seen 262)
+
+ (!type already_seen 261)
+
+ (!type already_seen 260)
+
+ (!type already_seen 259)
+
+ (!type already_seen 258)
+
+ (!type already_seen 257)
+
+ (!type already_seen 256)
+
+ (!type already_seen 255)
+
+ (!type already_seen 254)
+
+ (!type already_seen 253)
+
+ (!type already_seen 252)
+
+ (!type already_seen 251)
+
+ (!type already_seen 250)
+
+ (!type already_seen 249)
+
+ (!type already_seen 248)
+
+ (!type already_seen 247)
+
+ (!type already_seen 246)
+
+ (!type already_seen 245)
+
+ (!type already_seen 244)
+
+ (!type already_seen 243)
+
+ (!type already_seen 242)
+
+ (!type already_seen 241)
+
+ (!type already_seen 238)
+
+ (!type already_seen 237)
+
+ (!type already_seen 236)
+
+ (!type already_seen 235)
+
+ (!type already_seen 234)
+
+ (!type already_seen 230)
+
+ (!type already_seen 227)
+
+ (!type already_seen 226)
+
+ (!type already_seen 208)
+
+ (!type already_seen 207)
+
+ (!type already_seen 206)
+
+ (!type already_seen 205)
+
+ (!type already_seen 204)
+
+ (!type already_seen 203)
+
+ (!type already_seen 202)
+
+ (!type already_seen 201)
+
+ (!type already_seen 200)
+
+ (!type already_seen 199)
+
+ (!type already_seen 198)
+
+ (!type already_seen 197)
+
+ (!type already_seen 196)
+
+ (!type already_seen 195)
+
+ (!type already_seen 194)
+
+ (!type already_seen 193)
+
+ (!type already_seen 192)
+
+ (!type already_seen 191)
+
+ (!type already_seen 190)
+
+ (!type already_seen 189)
+
+ (!type already_seen 188)
+
+ (!type already_seen 187)
+
+ (!type already_seen 186)
+
+ (!type already_seen 185)
+
+ (!type already_seen 184)
+
+ (!type already_seen 183)
+
+ (!type already_seen 182)
+
+ (!type already_seen 181)
+
+ (!type already_seen 180)
+
+ (!type already_seen 179)
+
+ (!type already_seen 178)
+
+ (!type already_seen 177)
+
+ (!type already_seen 176)
+
+ (!type already_seen 175)
+
+ (!type already_seen 174)
+
+ (!type already_seen 173)
+
+ (!type already_seen 172)
+
+ (!type already_seen 171)
+
+ (!type already_seen 170)
+
+ (!type already_seen 169)
+
+ (!type already_seen 168)
+
+ (!type already_seen 167)
+
+ (!type already_seen 166)
+
+ (!type already_seen 165)
+
+ (!type already_seen 164)
+
+ (!type already_seen 163)
+
+ (!type already_seen 162)
+
+ (!type already_seen 161)
+
+ (!type already_seen 160)
+
+ (!type already_seen 159)
+
+ (!type already_seen 158)
+
+ (!type already_seen 157)
+
+ (!type already_seen 156)
+
+ (!type already_seen 155)
+
+ (!type already_seen 154)
+
+ (!type already_seen 153)
+
+ (!type already_seen 152)
+
+ (!type already_seen 151)
+
+ (!type already_seen 150)
+
+ (!type already_seen 149)
+
+ (!type already_seen 148)
+
+ (!type already_seen 147)
+
+ (!type already_seen 146)
+
+ (!type already_seen 145)
+
+ (!type already_seen 144)
+
+ (!type already_seen 143)
+
+ (!type already_seen 142)
+
+ (!type already_seen 141)
+
+ (!type already_seen 140)
+
+ (!type already_seen 139)
+
+ (!type already_seen 138)
+
+ (!type already_seen 137)
+
+ (!type already_seen 136)
+
+ (!type already_seen 135)
+
+ (!type already_seen 134)
+
+ (!type already_seen 133)
+
+ (!type already_seen 132)
+
+ (!type already_seen 131)
+
+ (!type already_seen 130)
+
+ (!type already_seen 129)
+
+ (!type already_seen 128)
+
+ (!type already_seen 127)
+
+ (!type already_seen 126)
+
+ (!type already_seen 125)
+
+ (!type already_seen 124)
+
+ (!type already_seen 123)
+
+ (!type already_seen 122)
+
+ (!type already_seen 121)
+
+ (!type already_seen 120)
+
+ (!type already_seen 119)
+
+ (!type already_seen 118)
+
+ (!type already_seen 117)
+
+ (!type already_seen 116)
+
+ (!type already_seen 115)
+
+ (!type already_seen 114)
+
+ (!type already_seen 113)
+
+ (!type already_seen 112)
+
+ (!type already_seen 111)
+
+ (!type already_seen 110)
+
+ (!type already_seen 109)
+
+ (!type already_seen 108)
+
+ (!type already_seen 107)
+
+ (!type already_seen 106)
+
+ (!type already_seen 105)
+
+ (!type struct 922 nil gc_unused "full_rtx_costs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 923 nil gc_unused "subreg_shape"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 924 nil gc_unused "address_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 925 nil gc_unused "std::pair<rtx,machine_mode>"
+ (!srcfileloc "rtl.h" 2252)
+ (!fields 2
+ (!pair "machine_mode"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 2252)
+ nil )
+ (!pair "rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 2252)
+ nil )
+ )
+ )
+
+ (!type user_struct 926 nil gc_unused "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<wide_int_ref_storage<false,false>>>"
+ (!srcfileloc "rtl.h" 2340)
+ (!fields 3
+ (!pair "false"
+ (!type already_seen 878)
+ (!srcfileloc "rtl.h" 2340)
+ nil )
+ (!pair "generic_wide_int<wide_int_ref_storage<false"
+ (!type user_struct 927 nil gc_unused "generic_wide_int<wide_int_ref_storage<false"
+ (!srcfileloc "rtl.h" 2340)
+ (!fields 1
+ (!pair "wide_int_ref_storage<false"
+ (!type already_seen 877)
+ (!srcfileloc "rtl.h" 2340)
+ nil )
+ )
+ )
+ (!srcfileloc "rtl.h" 2340)
+ nil )
+ (!pair "NUM_POLY_INT_COEFFS"
+ (!type already_seen 914)
+ (!srcfileloc "rtl.h" 2340)
+ nil )
+ )
+ )
+
+ (!type already_seen 927)
+
+ (!type already_seen 353)
+
+ (!type struct 928 nil gc_unused "simplify_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 929 nil gc_unused "subreg_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 590)
+
+ (!type struct 930 nil gc_unused "rtl_hooks"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 839)
+
+ (!type struct 931 nil gc_unused "rtx_comparison"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 932 nil gc_unused "expand_operand"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 933 nil gc_unused "code_helper"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 934 nil gc_unused "auto_suppress_location_wrappers"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 935 nil gc_unused "tree_vec_range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 936 nil gc_used "tree_decl_map_cache_hasher"
+ (!srcfileloc "tree.cc" 218)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 937 nil gc_used "tree_vec_map_cache_hasher"
+ (!srcfileloc "tree.cc" 224)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 938 nil gc_unused "tree_decl_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 939 nil gc_unused "tree_type_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 940 nil gc_unused "tree_ssa_name_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 941 nil gc_used "tree_hash"
+ (!srcfileloc "except.cc" 151)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 942 nil gc_unused "tree_cache_traits"
+ (!srcfileloc "tree.h" 5768)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 943 nil gc_unused "hash_map<tree,tree,tree_cache_traits>"
+ (!srcfileloc "tree.h" 5768)
+ (!fields 3
+ (!pair "tree_cache_traits"
+ (!type already_seen 942)
+ (!srcfileloc "tree.h" 5768)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5768)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5768)
+ nil )
+ )
+ )
+
+ (!type struct 944 nil gc_used "decl_tree_cache_traits"
+ (!srcfileloc "tree.h" 5774)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 945
+ (!type pointer 946 nil gc_used
+ (!type already_seen 945)
+ )
+ gc_pointed_to "hash_map<tree,tree,decl_tree_cache_traits>"
+ (!srcfileloc "tree.h" 5774)
+ (!fields 3
+ (!pair "decl_tree_cache_traits"
+ (!type already_seen 944)
+ (!srcfileloc "tree.h" 5774)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5774)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5774)
+ nil )
+ )
+ )
+
+ (!type struct 947 nil gc_used "type_tree_cache_traits"
+ (!srcfileloc "tree.h" 5780)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 948
+ (!type pointer 949 nil gc_used
+ (!type already_seen 948)
+ )
+ gc_pointed_to "hash_map<tree,tree,type_tree_cache_traits>"
+ (!srcfileloc "tree.h" 5780)
+ (!fields 3
+ (!pair "type_tree_cache_traits"
+ (!type already_seen 947)
+ (!srcfileloc "tree.h" 5780)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5780)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5780)
+ nil )
+ )
+ )
+
+ (!type struct 950 nil gc_used "decl_tree_traits"
+ (!srcfileloc "tree.h" 5785)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 951
+ (!type pointer 952 nil gc_used
+ (!type already_seen 951)
+ )
+ gc_pointed_to "hash_map<tree,tree,decl_tree_traits>"
+ (!srcfileloc "tree.h" 5785)
+ (!fields 3
+ (!pair "decl_tree_traits"
+ (!type already_seen 950)
+ (!srcfileloc "tree.h" 5785)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5785)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 5785)
+ nil )
+ )
+ )
+
+ (!type struct 953 nil gc_unused "unextended_tree"
+ (!srcfileloc "tree.h" 6138)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 954 nil gc_unused "extended_tree"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 955 nil gc_unused "extended_tree<WIDE_INT_MAX_PRECISION>"
+ (!srcfileloc "tree.h" 6118)
+ (!fields 1
+ (!pair "WIDE_INT_MAX_PRECISION"
+ (!type already_seen 871)
+ (!srcfileloc "tree.h" 6118)
+ nil )
+ )
+ )
+
+ (!type user_struct 956 nil gc_unused "extended_tree<ADDR_MAX_PRECISION>"
+ (!srcfileloc "tree.h" 6119)
+ (!fields 1
+ (!pair "ADDR_MAX_PRECISION"
+ (!type already_seen 871)
+ (!srcfileloc "tree.h" 6119)
+ nil )
+ )
+ )
+
+ (!type user_struct 957 nil gc_unused "generic_wide_int<widest_extended_tree>"
+ (!srcfileloc "tree.h" 6121)
+ (!fields 1
+ (!pair "widest_extended_tree"
+ (!type already_seen 955)
+ (!srcfileloc "tree.h" 6121)
+ nil )
+ )
+ )
+
+ (!type user_struct 958 nil gc_unused "generic_wide_int<offset_extended_tree>"
+ (!srcfileloc "tree.h" 6122)
+ (!fields 1
+ (!pair "offset_extended_tree"
+ (!type already_seen 956)
+ (!srcfileloc "tree.h" 6122)
+ nil )
+ )
+ )
+
+ (!type user_struct 959 nil gc_unused "generic_wide_int<wide_int_ref_storage<false,false>>"
+ (!srcfileloc "tree.h" 6123)
+ (!fields 2
+ (!pair "false"
+ (!type already_seen 878)
+ (!srcfileloc "tree.h" 6123)
+ nil )
+ (!pair "wide_int_ref_storage<false"
+ (!type already_seen 877)
+ (!srcfileloc "tree.h" 6123)
+ nil )
+ )
+ )
+
+ (!type user_struct 960 nil gc_unused "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<widest_extended_tree>>"
+ (!srcfileloc "tree.h" 6132)
+ (!fields 2
+ (!pair "generic_wide_int<widest_extended_tree"
+ (!type user_struct 961 nil gc_unused "generic_wide_int<widest_extended_tree"
+ (!srcfileloc "tree.h" 6132)
+ (!fields 1
+ (!pair "widest_extended_tree"
+ (!type already_seen 955)
+ (!srcfileloc "tree.h" 6132)
+ nil )
+ )
+ )
+ (!srcfileloc "tree.h" 6132)
+ nil )
+ (!pair "NUM_POLY_INT_COEFFS"
+ (!type already_seen 914)
+ (!srcfileloc "tree.h" 6132)
+ nil )
+ )
+ )
+
+ (!type already_seen 961)
+
+ (!type user_struct 962 nil gc_unused "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<offset_extended_tree>>"
+ (!srcfileloc "tree.h" 6135)
+ (!fields 2
+ (!pair "generic_wide_int<offset_extended_tree"
+ (!type user_struct 963 nil gc_unused "generic_wide_int<offset_extended_tree"
+ (!srcfileloc "tree.h" 6135)
+ (!fields 1
+ (!pair "offset_extended_tree"
+ (!type already_seen 956)
+ (!srcfileloc "tree.h" 6135)
+ nil )
+ )
+ )
+ (!srcfileloc "tree.h" 6135)
+ nil )
+ (!pair "NUM_POLY_INT_COEFFS"
+ (!type already_seen 914)
+ (!srcfileloc "tree.h" 6135)
+ nil )
+ )
+ )
+
+ (!type already_seen 963)
+
+ (!type user_struct 964 nil gc_unused "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<unextended_tree>>"
+ (!srcfileloc "tree.h" 6138)
+ (!fields 2
+ (!pair "generic_wide_int<unextended_tree"
+ (!type user_struct 965 nil gc_unused "generic_wide_int<unextended_tree"
+ (!srcfileloc "tree.h" 6138)
+ (!fields 1
+ (!pair "unextended_tree"
+ (!type already_seen 953)
+ (!srcfileloc "tree.h" 6138)
+ nil )
+ )
+ )
+ (!srcfileloc "tree.h" 6138)
+ nil )
+ (!pair "NUM_POLY_INT_COEFFS"
+ (!type already_seen 914)
+ (!srcfileloc "tree.h" 6138)
+ nil )
+ )
+ )
+
+ (!type already_seen 965)
+
+ (!type user_struct 966 nil gc_unused "generic_wide_int<extended_tree<N>>"
+ (!srcfileloc "tree.h" 6148)
+ (!fields 1
+ (!pair "extended_tree<N"
+ (!type user_struct 967 nil gc_unused "extended_tree<N"
+ (!srcfileloc "tree.h" 6148)
+ (!fields 1
+ (!pair "N"
+ (!type undefined 968 nil gc_unused "N"
+ (!srcfileloc "tree.h" 6148)
+ )
+ (!srcfileloc "tree.h" 6148)
+ nil )
+ )
+ )
+ (!srcfileloc "tree.h" 6148)
+ nil )
+ )
+ )
+
+ (!type already_seen 967)
+
+ (!type already_seen 968)
+
+ (!type user_struct 969 nil gc_unused "generic_wide_int<unextended_tree>"
+ (!srcfileloc "tree.h" 6155)
+ (!fields 1
+ (!pair "unextended_tree"
+ (!type already_seen 953)
+ (!srcfileloc "tree.h" 6155)
+ nil )
+ )
+ )
+
+ (!type user_struct 970 nil gc_unused "generic_wide_int<wi::extended_tree<WIDE_INT_MAX_PRECISION*2>>"
+ (!srcfileloc "tree.h" 6162)
+ (!fields 1
+ (!pair "wi::extended_tree<WIDE_INT_MAX_PRECISION"
+ (!type pointer 971 nil gc_unused
+ (!type struct 972
+ (!type already_seen 971)
+ gc_unused "wi::extended_tree<WIDE_INT_MAX_PRECISION"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "tree.h" 6162)
+ nil )
+ )
+ )
+
+ (!type already_seen 972)
+
+ (!type undefined 973 nil gc_unused "typename"
+ (!srcfileloc "tree.h" 6413)
+ )
+
+ (!type struct 974 nil gc_used "int_n_trees_t"
+ (!srcfileloc "tree.h" 6498)
+ (!fields 2
+ (!pair "signed_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 6496)
+ nil )
+ (!pair "unsigned_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree.h" 6497)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 975 nil gc_unused "builtin_structptr_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 976 nil gc_unused "op_location_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 560)
+
+ (!type already_seen 554)
+
+ (!type already_seen 487)
+
+ (!type struct 977 nil gc_used "alias_pair"
+ (!srcfileloc "tree-core.h" 2339)
+ (!fields 2
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 995)
+ nil )
+ (!pair "target"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 996)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 27)
+
+ (!type already_seen 28)
+
+ (!type already_seen 29)
+
+ (!type already_seen 30)
+
+ (!type already_seen 26)
+
+ (!type already_seen 25)
+
+ (!type already_seen 31)
+
+ (!type already_seen 32)
+
+ (!type already_seen 33)
+
+ (!type already_seen 37)
+
+ (!type already_seen 40)
+
+ (!type already_seen 47)
+
+ (!type already_seen 49)
+
+ (!type already_seen 45)
+
+ (!type already_seen 35)
+
+ (!type already_seen 50)
+
+ (!type already_seen 546)
+
+ (!type already_seen 547)
+
+ (!type already_seen 572)
+
+ (!type already_seen 571)
+
+ (!type already_seen 569)
+
+ (!type already_seen 549)
+
+ (!type already_seen 561)
+
+ (!type already_seen 563)
+
+ (!type already_seen 558)
+
+ (!type already_seen 552)
+
+ (!type already_seen 551)
+
+ (!type already_seen 798)
+
+ (!type already_seen 574)
+
+ (!type already_seen 573)
+
+ (!type already_seen 86)
+
+ (!type already_seen 564)
+
+ (!type already_seen 518)
+
+ (!type already_seen 517)
+
+ (!type already_seen 521)
+
+ (!type already_seen 519)
+
+ (!type already_seen 545)
+
+ (!type already_seen 565)
+
+ (!type already_seen 51)
+
+ (!type already_seen 54)
+
+ (!type already_seen 52)
+
+ (!type already_seen 99)
+
+ (!type already_seen 346)
+
+ (!type already_seen 347)
+
+ (!type already_seen 348)
+
+ (!type already_seen 349)
+
+ (!type already_seen 344)
+
+ (!type already_seen 318)
+
+ (!type already_seen 345)
+
+ (!type already_seen 317)
+
+ (!type already_seen 351)
+
+ (!type already_seen 516)
+
+ (!type already_seen 350)
+
+ (!type already_seen 568)
+
+ (!type already_seen 566)
+
+ (!type already_seen 581)
+
+ (!type already_seen 576)
+
+ (!type already_seen 584)
+
+ (!type already_seen 582)
+
+ (!type already_seen 629)
+
+ (!type struct 978 nil gc_unused "attribute_spec"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 979
+ (!type pointer 980 nil gc_unused
+ (!type already_seen 979)
+ )
+ gc_unused "record_layout_info_s"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 981 nil gc_unused "function_args_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 982 nil gc_used "tree_map_base"
+ (!srcfileloc "tree-core.h" 2214)
+ (!fields 1
+ (!pair "from"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 2213)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 983 nil gc_pointed_to "tree_map"
+ (!srcfileloc "tree-core.h" 2222)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 982)
+ (!srcfileloc "tree-core.h" 2219)
+ nil )
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 2220)
+ nil )
+ (!pair "to"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 2221)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 984 nil gc_pointed_to "tree_decl_map"
+ (!srcfileloc "tree-core.h" 2228)
+ (!fields 2
+ (!pair "base"
+ (!type already_seen 982)
+ (!srcfileloc "tree-core.h" 2226)
+ nil )
+ (!pair "to"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 2227)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 985 nil gc_pointed_to "tree_int_map"
+ (!srcfileloc "tree-core.h" 2234)
+ (!fields 2
+ (!pair "base"
+ (!type already_seen 982)
+ (!srcfileloc "tree-core.h" 2232)
+ nil )
+ (!pair "to"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 2233)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 986 nil gc_pointed_to "tree_vec_map"
+ (!srcfileloc "tree-core.h" 2240)
+ (!fields 2
+ (!pair "base"
+ (!type already_seen 982)
+ (!srcfileloc "tree-core.h" 2238)
+ nil )
+ (!pair "to"
+ (!type already_seen 85)
+ (!srcfileloc "tree-core.h" 2239)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 987 nil gc_unused "call_expr_arg_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 988 nil gc_unused "const_call_expr_arg_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 989 nil gc_used "builtin_info_type"
+ (!srcfileloc "tree-core.h" 2375)
+ (!fields 3
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 2262)
+ nil )
+ (!pair "implicit_p"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 2265)
+ nil )
+ (!pair "declared_p"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 2267)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 990 nil gc_unused "floatn_type_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 991 nil gc_unused "tree_code_type_tmpl"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 992 nil gc_unused "tree_code_length_tmpl"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 993
+ (!type pointer 994 nil gc_used
+ (!type already_seen 993)
+ )
+ gc_pointed_to "vec<alias_pair,va_gc>"
+ (!srcfileloc "tree-core.h" 2339)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "tree-core.h" 2339)
+ nil )
+ (!pair "alias_pair"
+ (!type already_seen 977)
+ (!srcfileloc "tree-core.h" 2339)
+ nil )
+ )
+ )
+
+ (!type struct 995 nil gc_pointed_to "libfunc_entry"
+ (!srcfileloc "libfuncs.h" 44)
+ (!fields 4
+ (!pair "op"
+ (!type already_seen 2)
+ (!srcfileloc "libfuncs.h" 42)
+ nil )
+ (!pair "mode1"
+ (!type already_seen 2)
+ (!srcfileloc "libfuncs.h" 42)
+ nil )
+ (!pair "mode2"
+ (!type already_seen 2)
+ (!srcfileloc "libfuncs.h" 42)
+ nil )
+ (!pair "libfunc"
+ (!type already_seen 100)
+ (!srcfileloc "libfuncs.h" 43)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 611)
+
+ (!type already_seen 610)
+
+ (!type already_seen 607)
+
+ (!type struct 996 nil gc_unused "ht"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 997 nil gc_unused "_obstack_chunk"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 998 nil gc_unused "obstack"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 999 nil gc_unused "real_format"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1000 nil gc_unused "format_helper"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1001
+ (!type pointer 1002 nil gc_used
+ (!type already_seen 1001)
+ )
+ gc_pointed_to "sequence_stack"
+ (!srcfileloc "function.h" 34)
+ (!fields 3
+ (!pair "first"
+ (!type already_seen 297)
+ (!srcfileloc "function.h" 31)
+ nil )
+ (!pair "last"
+ (!type already_seen 297)
+ (!srcfileloc "function.h" 32)
+ nil )
+ (!pair "next"
+ (!type already_seen 1002)
+ (!srcfileloc "function.h" 33)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1003 nil gc_used "emit_status"
+ (!srcfileloc "function.h" 76)
+ (!fields 7
+ (!pair "x_reg_rtx_no"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 41)
+ nil )
+ (!pair "x_first_label_num"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 44)
+ nil )
+ (!pair "seq"
+ (!type already_seen 1001)
+ (!srcfileloc "function.h" 56)
+ nil )
+ (!pair "x_cur_insn_uid"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 60)
+ nil )
+ (!pair "x_cur_debug_insn_uid"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 64)
+ nil )
+ (!pair "regno_pointer_align_length"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 70)
+ nil )
+ (!pair "regno_pointer_align"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 75)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1004
+ (!type pointer 1005 nil gc_used
+ (!type already_seen 1004)
+ )
+ gc_pointed_to "vec<rtx_insn*,va_gc>"
+ (!srcfileloc "function.h" 131)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "function.h" 131)
+ nil )
+ (!pair "rtx_insn"
+ (!type already_seen 297)
+ (!srcfileloc "function.h" 131)
+ nil )
+ )
+ )
+
+ (!type struct 1006 nil gc_used "expr_status"
+ (!srcfileloc "function.h" 132)
+ (!fields 6
+ (!pair "x_pending_stack_adjust"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 97)
+ nil )
+ (!pair "x_inhibit_defer_pop"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 114)
+ nil )
+ (!pair "x_stack_pointer_delta"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 120)
+ nil )
+ (!pair "x_saveregs_value"
+ (!type already_seen 100)
+ (!srcfileloc "function.h" 125)
+ nil )
+ (!pair "x_apply_args_value"
+ (!type already_seen 100)
+ (!srcfileloc "function.h" 128)
+ nil )
+ (!pair "x_forced_labels"
+ (!type already_seen 1005)
+ (!srcfileloc "function.h" 131)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1007
+ (!type pointer 1008 nil gc_used
+ (!type already_seen 1007)
+ )
+ gc_pointed_to "call_site_record_d"
+ (!srcfileloc "except.cc" 168)
+ (!fields 2
+ (!pair "landing_pad"
+ (!type already_seen 100)
+ (!srcfileloc "except.cc" 166)
+ nil )
+ (!pair "action"
+ (!type already_seen 2)
+ (!srcfileloc "except.cc" 167)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 377)
+
+ (!type user_struct 1009
+ (!type pointer 1010 nil gc_used
+ (!type already_seen 1009)
+ )
+ gc_pointed_to "vec<call_site_record,va_gc>"
+ (!srcfileloc "function.h" 147)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "function.h" 147)
+ nil )
+ (!pair "call_site_record"
+ (!type already_seen 1008)
+ (!srcfileloc "function.h" 147)
+ nil )
+ )
+ )
+
+ (!type struct 1011 nil gc_used "rtl_eh"
+ (!srcfileloc "function.h" 148)
+ (!fields 7
+ (!pair "ehr_stackadj"
+ (!type already_seen 100)
+ (!srcfileloc "function.h" 138)
+ nil )
+ (!pair "ehr_handler"
+ (!type already_seen 100)
+ (!srcfileloc "function.h" 139)
+ nil )
+ (!pair "ehr_label"
+ (!type already_seen 366)
+ (!srcfileloc "function.h" 140)
+ nil )
+ (!pair "sjlj_fc"
+ (!type already_seen 100)
+ (!srcfileloc "function.h" 142)
+ nil )
+ (!pair "sjlj_exit_after"
+ (!type already_seen 297)
+ (!srcfileloc "function.h" 143)
+ nil )
+ (!pair "action_record_data"
+ (!type already_seen 376)
+ (!srcfileloc "function.h" 145)
+ nil )
+ (!pair "call_site_record_v"
+ (!type array 1012 nil gc_used "2"
+ (!type already_seen 1010)
+ )
+ (!srcfileloc "function.h" 147)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 385)
+
+ (!type already_seen 470)
+
+ (!type already_seen 515)
+
+ (!type struct 1013
+ (!type pointer 1014 nil gc_used
+ (!type already_seen 1013)
+ )
+ gc_pointed_to "rtx_constant_pool"
+ (!srcfileloc "varasm.cc" 3748)
+ (!fields 4
+ (!pair "first"
+ (!type already_seen 224)
+ (!srcfileloc "varasm.cc" 3736)
+ nil )
+ (!pair "last"
+ (!type already_seen 224)
+ (!srcfileloc "varasm.cc" 3737)
+ nil )
+ (!pair "const_rtx_htab"
+ (!type pointer 1015 nil gc_used
+ (!type user_struct 1016
+ (!type already_seen 1015)
+ gc_pointed_to "hash_table<const_rtx_desc_hasher>"
+ (!srcfileloc "varasm.cc" 3743)
+ (!fields 1
+ (!pair "const_rtx_desc_hasher"
+ (!type struct 1017 nil gc_used "const_rtx_desc_hasher"
+ (!srcfileloc "varasm.cc" 3743)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "varasm.cc" 3743)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "varasm.cc" 3743)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 3747)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1018 nil gc_used "varasm_status"
+ (!srcfileloc "function.h" 169)
+ (!fields 2
+ (!pair "pool"
+ (!type already_seen 1014)
+ (!srcfileloc "function.h" 164)
+ nil )
+ (!pair "deferred_constants"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 168)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1019 nil gc_used "function_subsections"
+ (!srcfileloc "function.h" 182)
+ (!fields 4
+ (!pair "hot_section_label"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 178)
+ nil )
+ (!pair "cold_section_label"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 179)
+ nil )
+ (!pair "hot_section_end_label"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 180)
+ nil )
+ (!pair "cold_section_end_label"
+ (!type already_seen 11)
+ (!srcfileloc "function.h" 181)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1020
+ (!type pointer 1021 nil gc_used
+ (!type already_seen 1020)
+ )
+ gc_pointed_to "frame_space"
+ (!srcfileloc "function.h" 194)
+ (!fields 3
+ (!pair "next"
+ (!type already_seen 1021)
+ (!srcfileloc "function.h" 190)
+ nil )
+ (!pair "start"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 192)
+ nil )
+ (!pair "length"
+ (!type already_seen 2)
+ (!srcfileloc "function.h" 193)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 417)
+
+ (!type already_seen 420)
+
+ (!type already_seen 416)
+
+ (!type already_seen 419)
+
+ (!type already_seen 414)
+
+ (!type already_seen 355)
+
+ (!type already_seen 379)
+
+ (!type already_seen 409)
+
+ (!type already_seen 426)
+
+ (!type already_seen 468)
+
+ (!type struct 1022 nil gc_pointed_to "types_used_by_vars_entry"
+ (!srcfileloc "function.h" 482)
+ (!fields 2
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 480)
+ nil )
+ (!pair "var_decl"
+ (!type already_seen 23)
+ (!srcfileloc "function.h" 481)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1023 nil gc_used "used_type_hasher"
+ (!srcfileloc "function.h" 495)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1024
+ (!type pointer 1025 nil gc_used
+ (!type already_seen 1024)
+ )
+ gc_pointed_to "hash_table<used_type_hasher>"
+ (!srcfileloc "function.h" 495)
+ (!fields 1
+ (!pair "used_type_hasher"
+ (!type already_seen 1023)
+ (!srcfileloc "function.h" 495)
+ nil )
+ )
+ )
+
+ (!type struct 1026 nil gc_unused "args_size"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1027 nil gc_unused "locate_and_pad_arg_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1028 nil gc_unused "function_abi"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1029 nil gc_unused "function_abi_aggregator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 601)
+
+ (!type already_seen 216)
+
+ (!type already_seen 217)
+
+ (!type already_seen 218)
+
+ (!type already_seen 220)
+
+ (!type already_seen 277)
+
+ (!type already_seen 281)
+
+ (!type already_seen 287)
+
+ (!type already_seen 412)
+
+ (!type already_seen 276)
+
+ (!type already_seen 285)
+
+ (!type already_seen 279)
+
+ (!type already_seen 289)
+
+ (!type already_seen 411)
+
+ (!type struct 1030 nil gc_unused "eni_weights"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1031 nil gc_unused "rtx_iv"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1032 nil gc_unused "loops_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 613)
+
+ (!type already_seen 381)
+
+ (!type already_seen 301)
+
+ (!type struct 1033 nil gc_unused "auto_flag"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1034 nil gc_unused "auto_edge_flag"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1035 nil gc_unused "auto_bb_flag"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1036 nil gc_used "sreal"
+ (!srcfileloc "profile-count.h" 1253)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1037 nil gc_unused "profile_probability"
+ (!srcfileloc "profile-count.h" 659)
+ (!fields 0 )
+ )
+
+ (!type struct 1038 nil gc_unused "cselib_val"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1039 nil gc_unused "elt_loc_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1040 nil gc_unused "cselib_set"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 300)
+
+ (!type already_seen 295)
+
+ (!type already_seen 272)
+
+ (!type already_seen 293)
+
+ (!type already_seen 296)
+
+ (!type already_seen 294)
+
+ (!type struct 1041 nil gc_unused "edge_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1042
+ (!type pointer 1043 nil gc_unused
+ (!type already_seen 1042)
+ )
+ gc_unused "ipa_ref"
+ (!srcfileloc "cgraph.h" 173)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type already_seen 321)
+
+ (!type already_seen 837)
+
+ (!type already_seen 343)
+
+ (!type struct 1044 nil gc_used "section_name_hasher"
+ (!srcfileloc "cgraph.h" 2492)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type undefined 1045 nil gc_unused "explicit"
+ (!srcfileloc "cgraph.h" 113)
+ )
+
+ (!type already_seen 324)
+
+ (!type struct 1046
+ (!type pointer 1047 nil gc_unused
+ (!type already_seen 1046)
+ )
+ gc_used "symbol_priority_map"
+ (!srcfileloc "cgraph.h" 2498)
+ (!fields 2
+ (!pair "init"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2165)
+ nil )
+ (!pair "fini"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2166)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1048
+ (!type pointer 1049 nil gc_used
+ (!type already_seen 1048)
+ )
+ gc_pointed_to "ipa_replace_map"
+ (!srcfileloc "cgraph.h" 715)
+ (!fields 3
+ (!pair "new_tree"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 708)
+ nil )
+ (!pair "parm_num"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 710)
+ nil )
+ (!pair "force_load_ref"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 714)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 832)
+
+ (!type already_seen 830)
+
+ (!type struct 1050
+ (!type pointer 1051 nil gc_used
+ (!type already_seen 1050)
+ )
+ gc_pointed_to "cgraph_function_version_info"
+ (!srcfileloc "cgraph.h" 843)
+ (!fields 4
+ (!pair "this_node"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 824)
+ nil )
+ (!pair "prev"
+ (!type already_seen 1051)
+ (!srcfileloc "cgraph.h" 828)
+ nil )
+ (!pair "next"
+ (!type already_seen 1051)
+ (!srcfileloc "cgraph.h" 832)
+ nil )
+ (!pair "dispatcher_resolver"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 842)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 828)
+
+ (!type user_struct 1052 nil gc_unused "auto_vec<cgraph_edge*>"
+ (!srcfileloc "cgraph.h" 1150)
+ (!fields 1
+ (!pair "cgraph_edge"
+ (!type already_seen 821)
+ (!srcfileloc "cgraph.h" 1150)
+ nil )
+ )
+ )
+
+ (!type already_seen 827)
+
+ (!type already_seen 833)
+
+ (!type already_seen 835)
+
+ (!type already_seen 834)
+
+ (!type struct 1053
+ (!type pointer 1054 nil gc_unused
+ (!type already_seen 1053)
+ )
+ gc_unused "cgraph_node_set_def"
+ (!srcfileloc "cgraph.h" 1516)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1055
+ (!type pointer 1056 nil gc_unused
+ (!type already_seen 1055)
+ )
+ gc_unused "varpool_node_set_def"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1057 nil gc_unused "cgraph_node_set_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1058 nil gc_unused "varpool_node_set_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 825)
+
+ (!type already_seen 824)
+
+ (!type undefined 1059 nil gc_unused "availability"
+ (!srcfileloc "cgraph.h" 2008)
+ )
+
+ (!type struct 1060
+ (!type pointer 1061 nil gc_used
+ (!type already_seen 1060)
+ )
+ gc_pointed_to "asm_node"
+ (!srcfileloc "cgraph.h" 2129)
+ (!fields 3
+ (!pair "next"
+ (!type already_seen 1061)
+ (!srcfileloc "cgraph.h" 2124)
+ nil )
+ (!pair "asm_str"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 2126)
+ nil )
+ (!pair "order"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2128)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1062
+ (!type pointer 1063 nil gc_unused
+ (!type already_seen 1062)
+ )
+ gc_unused "cgraph_edge_hook_list"
+ (!srcfileloc "cgraph.h" 2341)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1064
+ (!type pointer 1065 nil gc_unused
+ (!type already_seen 1064)
+ )
+ gc_unused "cgraph_node_hook_list"
+ (!srcfileloc "cgraph.h" 2348)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1066
+ (!type pointer 1067 nil gc_unused
+ (!type already_seen 1066)
+ )
+ gc_unused "varpool_node_hook_list"
+ (!srcfileloc "cgraph.h" 2355)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1068
+ (!type pointer 1069 nil gc_unused
+ (!type already_seen 1068)
+ )
+ gc_unused "cgraph_2edge_hook_list"
+ (!srcfileloc "cgraph.h" 2376)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1070
+ (!type pointer 1071 nil gc_unused
+ (!type already_seen 1070)
+ )
+ gc_unused "cgraph_2node_hook_list"
+ (!srcfileloc "cgraph.h" 2382)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1072 nil gc_used "asmname_hasher"
+ (!srcfileloc "cgraph.h" 2495)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1073
+ (!type pointer 1074 nil gc_used
+ (!type already_seen 1073)
+ )
+ gc_pointed_to "thunk_info"
+ (!srcfileloc "symtab-thunks.h" 145)
+ (!fields 6
+ (!pair "fixed_offset"
+ (!type already_seen 2)
+ (!srcfileloc "symtab-thunks.h" 87)
+ nil )
+ (!pair "virtual_value"
+ (!type already_seen 2)
+ (!srcfileloc "symtab-thunks.h" 91)
+ nil )
+ (!pair "indirect_offset"
+ (!type already_seen 2)
+ (!srcfileloc "symtab-thunks.h" 95)
+ nil )
+ (!pair "alias"
+ (!type already_seen 23)
+ (!srcfileloc "symtab-thunks.h" 99)
+ nil )
+ (!pair "this_adjusting"
+ (!type already_seen 2)
+ (!srcfileloc "symtab-thunks.h" 103)
+ nil )
+ (!pair "virtual_offset_p"
+ (!type already_seen 2)
+ (!srcfileloc "symtab-thunks.h" 111)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1075
+ (!type pointer 1076 nil gc_used
+ (!type already_seen 1075)
+ )
+ gc_pointed_to "function_summary<thunk_info*>"
+ (!srcfileloc "cgraph.h" 2202)
+ (!fields 1
+ (!pair "thunk_info"
+ (!type already_seen 1074)
+ (!srcfileloc "cgraph.h" 2202)
+ nil )
+ )
+ )
+
+ (!type struct 1077
+ (!type pointer 1078 nil gc_used
+ (!type already_seen 1077)
+ )
+ gc_pointed_to "clone_info"
+ (!srcfileloc "symtab-clones.h" 49)
+ (!fields 2
+ (!pair "tree_map"
+ (!type pointer 1079 nil gc_used
+ (!type user_struct 1080
+ (!type already_seen 1079)
+ gc_pointed_to "vec<ipa_replace_map*,va_gc>"
+ (!srcfileloc "symtab-clones.h" 34)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "symtab-clones.h" 34)
+ nil )
+ (!pair "ipa_replace_map"
+ (!type already_seen 1049)
+ (!srcfileloc "symtab-clones.h" 34)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "symtab-clones.h" 34)
+ nil )
+ (!pair "param_adjustments"
+ (!type pointer 1081 nil gc_used
+ (!type struct 1082
+ (!type already_seen 1081)
+ gc_pointed_to "ipa_param_adjustments"
+ (!srcfileloc "ipa-param-manipulation.h" 264)
+ (!fields 3
+ (!pair "m_adj_params"
+ (!type pointer 1083 nil gc_used
+ (!type user_struct 1084
+ (!type already_seen 1083)
+ gc_pointed_to "vec<ipa_adjusted_param,va_gc>"
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ nil )
+ (!pair "ipa_adjusted_param"
+ (!type struct 1085 nil gc_used "ipa_adjusted_param"
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ (!fields 10
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-param-manipulation.h" 162)
+ nil )
+ (!pair "alias_ptr_type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-param-manipulation.h" 166)
+ nil )
+ (!pair "unit_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 171)
+ nil )
+ (!pair "base_index"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 177)
+ nil )
+ (!pair "prev_clone_index"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 181)
+ nil )
+ (!pair "op"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 184)
+ nil )
+ (!pair "prev_clone_adjustment"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 188)
+ nil )
+ (!pair "param_prefix_index"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 192)
+ nil )
+ (!pair "reverse"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 196)
+ nil )
+ (!pair "user_flag"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 199)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ nil )
+ (!pair "m_always_copy_start"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 253)
+ nil )
+ (!pair "m_skip_return"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-param-manipulation.h" 255)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "symtab-clones.h" 36)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1086
+ (!type pointer 1087 nil gc_used
+ (!type already_seen 1086)
+ )
+ gc_pointed_to "function_summary<clone_info*>"
+ (!srcfileloc "cgraph.h" 2206)
+ (!fields 1
+ (!pair "clone_info"
+ (!type already_seen 1078)
+ (!srcfileloc "cgraph.h" 2206)
+ nil )
+ )
+ )
+
+ (!type struct 1088
+ (!type pointer 1089 nil gc_used
+ (!type already_seen 1088)
+ )
+ gc_pointed_to "symbol_table"
+ (!srcfileloc "cgraph.h" 2552)
+ (!fields 33
+ (!pair "DEBUG_FUNCTION"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2425)
+ nil )
+ (!pair "cgraph_count"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2453)
+ nil )
+ (!pair "cgraph_max_uid"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2454)
+ nil )
+ (!pair "cgraph_max_summary_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2455)
+ nil )
+ (!pair "edges_count"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2457)
+ nil )
+ (!pair "edges_max_uid"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2458)
+ nil )
+ (!pair "edges_max_summary_id"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2459)
+ nil )
+ (!pair "cgraph_released_summary_ids"
+ (!type user_struct 1090 nil gc_unused "vec<int>"
+ (!srcfileloc "cgraph.h" 2462)
+ (!fields 1
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "cgraph.h" 2462)
+ nil )
+ )
+ )
+ (!srcfileloc "cgraph.h" 2462)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "edge_released_summary_ids"
+ (!type already_seen 1090)
+ (!srcfileloc "cgraph.h" 2465)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "nodes"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 2470)
+ nil )
+ (!pair "asmnodes"
+ (!type already_seen 1061)
+ (!srcfileloc "cgraph.h" 2471)
+ nil )
+ (!pair "asm_last_node"
+ (!type already_seen 1061)
+ (!srcfileloc "cgraph.h" 2472)
+ nil )
+ (!pair "order"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2477)
+ nil )
+ (!pair "max_unit"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2480)
+ nil )
+ (!pair "global_info_ready"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2483)
+ nil )
+ (!pair "state"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2485)
+ nil )
+ (!pair "function_flags_ready"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2487)
+ nil )
+ (!pair "cpp_implicit_aliases_done"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2489)
+ nil )
+ (!pair "section_hash"
+ (!type pointer 1091 nil gc_used
+ (!type user_struct 1092
+ (!type already_seen 1091)
+ gc_pointed_to "hash_table<section_name_hasher>"
+ (!srcfileloc "cgraph.h" 2492)
+ (!fields 1
+ (!pair "section_name_hasher"
+ (!type already_seen 1044)
+ (!srcfileloc "cgraph.h" 2492)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cgraph.h" 2492)
+ nil )
+ (!pair "assembler_name_hash"
+ (!type pointer 1093 nil gc_used
+ (!type user_struct 1094
+ (!type already_seen 1093)
+ gc_pointed_to "hash_table<asmname_hasher>"
+ (!srcfileloc "cgraph.h" 2495)
+ (!fields 1
+ (!pair "asmname_hasher"
+ (!type already_seen 1072)
+ (!srcfileloc "cgraph.h" 2495)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cgraph.h" 2495)
+ nil )
+ (!pair "init_priority_hash"
+ (!type pointer 1095 nil gc_used
+ (!type user_struct 1096
+ (!type already_seen 1095)
+ gc_pointed_to "hash_map<symtab_node*,symbol_priority_map>"
+ (!srcfileloc "cgraph.h" 2498)
+ (!fields 2
+ (!pair "symbol_priority_map"
+ (!type already_seen 1046)
+ (!srcfileloc "cgraph.h" 2498)
+ nil )
+ (!pair "symtab_node"
+ (!type already_seen 319)
+ (!srcfileloc "cgraph.h" 2498)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cgraph.h" 2498)
+ nil )
+ (!pair "dump_file"
+ (!type pointer 1097 nil gc_unused
+ (!type undefined 1098
+ (!type already_seen 1097)
+ gc_unused "FILE"
+ (!srcfileloc "cgraph.h" 2500)
+ )
+ )
+ (!srcfileloc "cgraph.h" 2500)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "ipa_clones_dump_file"
+ (!type already_seen 1097)
+ (!srcfileloc "cgraph.h" 2502)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "cloned_nodes"
+ (!type user_struct 1099 nil gc_unused "hash_set<cgraph_node*>"
+ (!srcfileloc "cgraph.h" 2504)
+ (!fields 1
+ (!pair "cgraph_node"
+ (!type already_seen 820)
+ (!srcfileloc "cgraph.h" 2504)
+ nil )
+ )
+ )
+ (!srcfileloc "cgraph.h" 2504)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_thunks"
+ (!type already_seen 1076)
+ (!srcfileloc "cgraph.h" 2507)
+ nil )
+ (!pair "m_clones"
+ (!type already_seen 1087)
+ (!srcfileloc "cgraph.h" 2510)
+ nil )
+ (!pair "m_first_edge_removal_hook"
+ (!type already_seen 1063)
+ (!srcfileloc "cgraph.h" 2539)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_first_cgraph_removal_hook"
+ (!type already_seen 1065)
+ (!srcfileloc "cgraph.h" 2541)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_first_edge_duplicated_hook"
+ (!type already_seen 1069)
+ (!srcfileloc "cgraph.h" 2543)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_first_cgraph_duplicated_hook"
+ (!type already_seen 1071)
+ (!srcfileloc "cgraph.h" 2545)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_first_cgraph_insertion_hook"
+ (!type already_seen 1065)
+ (!srcfileloc "cgraph.h" 2547)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_first_varpool_insertion_hook"
+ (!type already_seen 1067)
+ (!srcfileloc "cgraph.h" 2549)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "m_first_varpool_removal_hook"
+ (!type already_seen 1067)
+ (!srcfileloc "cgraph.h" 2551)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ (!options
+ (!option tag string "SYMTAB")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 1090)
+
+ (!type already_seen 1092)
+
+ (!type already_seen 1094)
+
+ (!type already_seen 1096)
+
+ (!type already_seen 1098)
+
+ (!type already_seen 1099)
+
+ (!type struct 1100 nil gc_pointed_to "constant_descriptor_tree"
+ (!srcfileloc "cgraph.h" 3093)
+ (!fields 3
+ (!pair "rtl"
+ (!type already_seen 100)
+ (!srcfileloc "cgraph.h" 3084)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "cgraph.h" 3087)
+ nil )
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 3092)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1101 nil gc_used "tree_descriptor_hasher"
+ (!srcfileloc "varasm.cc" 3072)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1102 nil gc_unused "symbol_table_test"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1103 nil gc_used "unprocessed_thunk"
+ (!srcfileloc "symtab-thunks.cc" 62)
+ (!fields 2
+ (!pair "node"
+ (!type already_seen 820)
+ (!srcfileloc "symtab-thunks.cc" 57)
+ nil )
+ (!pair "info"
+ (!type already_seen 1074)
+ (!srcfileloc "symtab-thunks.cc" 58)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1104
+ (!type pointer 1105 nil gc_used
+ (!type already_seen 1104)
+ )
+ gc_pointed_to "vec<unprocessed_thunk,va_gc>"
+ (!srcfileloc "symtab-thunks.cc" 62)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "symtab-thunks.cc" 62)
+ nil )
+ (!pair "unprocessed_thunk"
+ (!type already_seen 1103)
+ (!srcfileloc "symtab-thunks.cc" 62)
+ nil )
+ )
+ )
+
+ (!type user_struct 1106 nil gc_unused "thunk_infos_t"
+ (!srcfileloc "symtab-thunks.cc" 78)
+ (!fields 0 )
+ )
+
+ (!type already_seen 1080)
+
+ (!type already_seen 1082)
+
+ (!type struct 1107 nil gc_unused "reload"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 603)
+
+ (!type struct 1108 nil gc_unused "reg_equivs_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1109 nil gc_unused "insn_chain"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1110 nil gc_unused "saved_hard_reg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1111 nil gc_used "alias_set_hash"
+ (!srcfileloc "alias.cc" 148)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1112
+ (!type pointer 1113 nil gc_used
+ (!type already_seen 1112)
+ )
+ gc_pointed_to "hash_map<alias_set_hash,int>"
+ (!srcfileloc "alias.cc" 148)
+ (!fields 2
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "alias.cc" 148)
+ nil )
+ (!pair "alias_set_hash"
+ (!type already_seen 1111)
+ (!srcfileloc "alias.cc" 148)
+ nil )
+ )
+ )
+
+ (!type struct 1114
+ (!type pointer 1115 nil gc_used
+ (!type already_seen 1114)
+ )
+ gc_pointed_to "alias_set_entry"
+ (!srcfileloc "alias.cc" 149)
+ (!fields 5
+ (!pair "alias_set"
+ (!type already_seen 2)
+ (!srcfileloc "alias.cc" 128)
+ nil )
+ (!pair "has_zero_child"
+ (!type already_seen 2)
+ (!srcfileloc "alias.cc" 132)
+ nil )
+ (!pair "is_pointer"
+ (!type already_seen 2)
+ (!srcfileloc "alias.cc" 137)
+ nil )
+ (!pair "has_pointer"
+ (!type already_seen 2)
+ (!srcfileloc "alias.cc" 139)
+ nil )
+ (!pair "children"
+ (!type already_seen 1113)
+ (!srcfileloc "alias.cc" 148)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1116
+ (!type pointer 1117 nil gc_used
+ (!type already_seen 1116)
+ )
+ gc_pointed_to "vec<alias_set_entry*,va_gc>"
+ (!srcfileloc "alias.cc" 280)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "alias.cc" 280)
+ nil )
+ (!pair "alias_set_entry"
+ (!type already_seen 1115)
+ (!srcfileloc "alias.cc" 280)
+ nil )
+ )
+ )
+
+ (!type struct 1118 nil gc_unused "elt_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1119 nil gc_unused "expand_value_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1120 nil gc_unused "cselib_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1121 nil gc_unused "cselib_record_autoinc_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1122 nil gc_used "function_version_hasher"
+ (!srcfileloc "cgraph.cc" 122)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1123
+ (!type pointer 1124 nil gc_used
+ (!type already_seen 1123)
+ )
+ gc_pointed_to "hash_table<function_version_hasher>"
+ (!srcfileloc "cgraph.cc" 122)
+ (!fields 1
+ (!pair "function_version_hasher"
+ (!type already_seen 1122)
+ (!srcfileloc "cgraph.cc" 122)
+ nil )
+ )
+ )
+
+ (!type struct 1125 nil gc_unused "set_pure_flag_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 326)
+
+ (!type struct 1126 nil gc_used "ipa_bit_ggc_hash_traits"
+ (!srcfileloc "ipa-prop.cc" 109)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1127
+ (!type pointer 1128 nil gc_used
+ (!type already_seen 1127)
+ )
+ gc_pointed_to "hash_table<ipa_bit_ggc_hash_traits>"
+ (!srcfileloc "ipa-prop.cc" 109)
+ (!fields 1
+ (!pair "ipa_bit_ggc_hash_traits"
+ (!type already_seen 1126)
+ (!srcfileloc "ipa-prop.cc" 109)
+ nil )
+ )
+ )
+
+ (!type struct 1129 nil gc_used "ipa_vr_ggc_hash_traits"
+ (!srcfileloc "ipa-prop.cc" 156)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1130
+ (!type pointer 1131 nil gc_used
+ (!type already_seen 1130)
+ )
+ gc_pointed_to "hash_table<ipa_vr_ggc_hash_traits>"
+ (!srcfileloc "ipa-prop.cc" 156)
+ (!fields 1
+ (!pair "ipa_vr_ggc_hash_traits"
+ (!type already_seen 1129)
+ (!srcfileloc "ipa-prop.cc" 156)
+ nil )
+ )
+ )
+
+ (!type struct 1132
+ (!type pointer 1133 nil gc_unused
+ (!type already_seen 1132)
+ )
+ gc_unused "ipa_cst_ref_desc"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1134 nil gc_used "ipa_jump_func"
+ (!srcfileloc "ipa-prop.h" 970)
+ (!fields 5
+ (!pair "agg"
+ (!type struct 1135 nil gc_used "ipa_agg_jump_function"
+ (!srcfileloc "ipa-prop.h" 193)
+ (!fields 2
+ (!pair "items"
+ (!type pointer 1136 nil gc_used
+ (!type user_struct 1137
+ (!type already_seen 1136)
+ gc_pointed_to "vec<ipa_agg_jf_item,va_gc>"
+ (!srcfileloc "ipa-prop.h" 190)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 190)
+ nil )
+ (!pair "ipa_agg_jf_item"
+ (!type struct 1138 nil gc_used "ipa_agg_jf_item"
+ (!srcfileloc "ipa-prop.h" 190)
+ (!fields 4
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 165)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 168)
+ nil )
+ (!pair "jftype"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 171)
+ nil )
+ (!pair "value"
+ (!type union 1139 nil gc_used "jump_func_agg_value"
+ (!srcfileloc "ipa-prop.h" 182)
+ (!fields 3
+ (!pair "constant"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 179)
+ (!options
+ (!option tag string "IPA_JF_CONST")
+ )
+ )
+ (!pair "pass_through"
+ (!type struct 1140 nil gc_used "ipa_pass_through_data"
+ (!srcfileloc "ipa-prop.h" 121)
+ (!fields 5
+ (!pair "operand"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 98)
+ nil )
+ (!pair "formal_id"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 100)
+ nil )
+ (!pair "operation"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 110)
+ nil )
+ (!pair "agg_preserved"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 117)
+ nil )
+ (!pair "refdesc_decremented"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 120)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 180)
+ (!options
+ (!option tag string "IPA_JF_PASS_THROUGH")
+ )
+ )
+ (!pair "load_agg"
+ (!type struct 1141 nil gc_used "ipa_load_agg_data"
+ (!srcfileloc "ipa-prop.h" 141)
+ (!fields 4
+ (!pair "pass_through"
+ (!type already_seen 1140)
+ (!srcfileloc "ipa-prop.h" 132)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 134)
+ nil )
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 136)
+ nil )
+ (!pair "by_ref"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 140)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 181)
+ (!options
+ (!option tag string "IPA_JF_LOAD_AGG")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "ipa-prop.h" 182)
+ (!options
+ (!option desc string "%1.jftype")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 190)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 190)
+ nil )
+ (!pair "by_ref"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 192)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 327)
+ nil )
+ (!pair "bits"
+ (!type pointer 1142 nil gc_used
+ (!type struct 1143
+ (!type already_seen 1142)
+ gc_pointed_to "ipa_bits"
+ (!srcfileloc "ipa-prop.h" 305)
+ (!fields 2
+ (!pair "value"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 300)
+ nil )
+ (!pair "mask"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 304)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "ipa-prop.h" 332)
+ nil )
+ (!pair "m_vr"
+ (!type pointer 1144 nil gc_used
+ (!type user_struct 1145
+ (!type already_seen 1144)
+ gc_pointed_to "int_range<1>"
+ (!srcfileloc "value-range.h" 532)
+ (!fields 1
+ (!pair "1"
+ (!type undefined 1146 nil gc_unused "1"
+ (!srcfileloc "value-range.h" 532)
+ )
+ (!srcfileloc "value-range.h" 532)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 337)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 339)
+ nil )
+ (!pair "value"
+ (!type union 1147 nil gc_used "jump_func_value"
+ (!srcfileloc "ipa-prop.h" 348)
+ (!fields 3
+ (!pair "constant"
+ (!type struct 1148 nil gc_used "ipa_constant_data"
+ (!srcfileloc "ipa-prop.h" 90)
+ (!fields 2
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 87)
+ nil )
+ (!pair "rdesc"
+ (!type already_seen 1133)
+ (!srcfileloc "ipa-prop.h" 89)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 345)
+ (!options
+ (!option tag string "IPA_JF_CONST")
+ )
+ )
+ (!pair "pass_through"
+ (!type already_seen 1140)
+ (!srcfileloc "ipa-prop.h" 346)
+ (!options
+ (!option tag string "IPA_JF_PASS_THROUGH")
+ )
+ )
+ (!pair "ancestor"
+ (!type struct 1149 nil gc_used "ipa_ancestor_jf_data"
+ (!srcfileloc "ipa-prop.h" 156)
+ (!fields 4
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 149)
+ nil )
+ (!pair "formal_id"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 151)
+ nil )
+ (!pair "agg_preserved"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 153)
+ nil )
+ (!pair "keep_null"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 155)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 347)
+ (!options
+ (!option tag string "IPA_JF_ANCESTOR")
+ )
+ )
+ )
+ nil 4095 nil )
+ (!srcfileloc "ipa-prop.h" 348)
+ (!options
+ (!option desc string "%1.type")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 1138)
+
+ (!type struct 1150 nil gc_unused "prop_type_change_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1151 nil gc_unused "ipa_bb_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1152 nil gc_unused "ipa_param_aa_status"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1153 nil gc_pointed_to "ipa_node_params"
+ (!srcfileloc "ipa-prop.h" 658)
+ (!fields 14
+ (!pair "descriptors"
+ (!type pointer 1154 nil gc_used
+ (!type user_struct 1155
+ (!type already_seen 1154)
+ gc_pointed_to "vec<ipa_param_descriptor,va_gc>"
+ (!srcfileloc "ipa-prop.h" 624)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 624)
+ nil )
+ (!pair "ipa_param_descriptor"
+ (!type struct 1156 nil gc_used "ipa_param_descriptor"
+ (!srcfileloc "ipa-prop.h" 624)
+ (!fields 8
+ (!pair "decl_or_type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 589)
+ nil )
+ (!pair "controlled_uses"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 594)
+ nil )
+ (!pair "move_cost"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 595)
+ nil )
+ (!pair "used"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 597)
+ nil )
+ (!pair "used_by_ipa_predicates"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 598)
+ nil )
+ (!pair "used_by_indirect_call"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 599)
+ nil )
+ (!pair "used_by_polymorphic_call"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 600)
+ nil )
+ (!pair "load_dereferenced"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 606)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 624)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 624)
+ nil )
+ (!pair "lattices"
+ (!type pointer 1157 nil gc_unused
+ (!type struct 1158
+ (!type already_seen 1157)
+ gc_unused "ipcp_param_lattices"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "ipa-prop.h" 627)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "ipcp_orig_node"
+ (!type already_seen 820)
+ (!srcfileloc "ipa-prop.h" 630)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "known_csts"
+ (!type user_struct 1159 nil gc_unused "vec<tree>"
+ (!srcfileloc "ipa-prop.h" 633)
+ (!fields 1
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 633)
+ nil )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 633)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "known_contexts"
+ (!type user_struct 1160 nil gc_unused "vec<ipa_polymorphic_call_context>"
+ (!srcfileloc "ipa-prop.h" 636)
+ (!fields 1
+ (!pair "ipa_polymorphic_call_context"
+ (!type already_seen 825)
+ (!srcfileloc "ipa-prop.h" 636)
+ nil )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 636)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "analysis_done"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 639)
+ nil )
+ (!pair "node_enqueued"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 641)
+ nil )
+ (!pair "do_clone_for_all_contexts"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 644)
+ nil )
+ (!pair "is_all_contexts_clone"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 646)
+ nil )
+ (!pair "node_dead"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 649)
+ nil )
+ (!pair "node_within_scc"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 651)
+ nil )
+ (!pair "node_is_self_scc"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 653)
+ nil )
+ (!pair "node_calling_single_call"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 655)
+ nil )
+ (!pair "versionable"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 657)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1161 nil gc_unused "ipa_known_agg_contents_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1141)
+
+ (!type struct 1162 nil gc_unused "analysis_dom_walker"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1163 nil gc_unused "ipa_func_body_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1164 nil gc_pointed_to "ipa_edge_args"
+ (!srcfileloc "ipa-prop.h" 972)
+ (!fields 2
+ (!pair "jump_functions"
+ (!type pointer 1165 nil gc_used
+ (!type user_struct 1166
+ (!type already_seen 1165)
+ gc_pointed_to "vec<ipa_jump_func,va_gc>"
+ (!srcfileloc "ipa-prop.h" 970)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 970)
+ nil )
+ (!pair "ipa_jump_func"
+ (!type already_seen 1134)
+ (!srcfileloc "ipa-prop.h" 970)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 970)
+ nil )
+ (!pair "polymorphic_call_contexts"
+ (!type pointer 1167 nil gc_used
+ (!type user_struct 1168
+ (!type already_seen 1167)
+ gc_pointed_to "vec<ipa_polymorphic_call_context,va_gc>"
+ (!srcfileloc "ipa-prop.h" 971)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 971)
+ nil )
+ (!pair "ipa_polymorphic_call_context"
+ (!type already_seen 825)
+ (!srcfileloc "ipa-prop.h" 971)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 971)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1169 nil gc_unused "bitpack_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1170 nil gc_unused "data_in"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1171 nil gc_unused "output_block"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1172 nil gc_unused "ipcp_modif_dom_walker"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1173 nil gc_unused "ipcp_value_source"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1174 nil gc_unused "ipcp_value_base"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1175 nil gc_unused "ipcp_value"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1176 nil gc_unused "ipcp_lattice"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1177 nil gc_unused "ipcp_agg_lattice"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1178 nil gc_unused "ipcp_bits_lattice"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1179 nil gc_unused "ipcp_vr_lattice"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1158)
+
+ (!type struct 1180 nil gc_unused "caller_statistics"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1181 nil gc_unused "value_topo_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1182 nil gc_unused "ipa_topo_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1183
+ (!type pointer 1184 nil gc_used
+ (!type already_seen 1183)
+ )
+ gc_pointed_to "ipa_fn_summary"
+ (!srcfileloc "ipa-fnsummary.h" 219)
+ (!fields 15
+ (!pair "min_size"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 159)
+ nil )
+ (!pair "inlinable"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 162)
+ nil )
+ (!pair "single_caller"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 165)
+ nil )
+ (!pair "fp_expressions"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 167)
+ nil )
+ (!pair "target_info"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 171)
+ nil )
+ (!pair "estimated_stack_size"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 178)
+ nil )
+ (!pair "time"
+ (!type already_seen 1036)
+ (!srcfileloc "ipa-fnsummary.h" 180)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "conds"
+ (!type pointer 1185 nil gc_used
+ (!type user_struct 1186
+ (!type already_seen 1185)
+ gc_pointed_to "vec<condition,va_gc>"
+ (!srcfileloc "ipa-predicate.h" 94)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-predicate.h" 94)
+ nil )
+ (!pair "condition"
+ (!type struct 1187 nil gc_used "condition"
+ (!srcfileloc "ipa-predicate.h" 94)
+ (!fields 8
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 52)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-predicate.h" 54)
+ nil )
+ (!pair "val"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-predicate.h" 55)
+ nil )
+ (!pair "operand_num"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 56)
+ nil )
+ (!pair "code"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 57)
+ nil )
+ (!pair "agg_contents"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 60)
+ nil )
+ (!pair "by_ref"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 63)
+ nil )
+ (!pair "param_ops"
+ (!type pointer 1188 nil gc_used
+ (!type user_struct 1189
+ (!type already_seen 1188)
+ gc_pointed_to "vec<expr_eval_op,va_gc>"
+ (!srcfileloc "ipa-predicate.h" 46)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-predicate.h" 46)
+ nil )
+ (!pair "expr_eval_op"
+ (!type struct 1190 nil gc_used "expr_eval_op"
+ (!srcfileloc "ipa-predicate.h" 46)
+ (!fields 4
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-predicate.h" 37)
+ nil )
+ (!pair "val"
+ (!type array 1191 nil gc_used "2"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ipa-predicate.h" 39)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 41)
+ nil )
+ (!pair "code"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 43)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-predicate.h" 46)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-predicate.h" 66)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-predicate.h" 94)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-fnsummary.h" 184)
+ nil )
+ (!pair "size_time_table"
+ (!type user_struct 1192 nil gc_unused "auto_vec<size_time_entry>"
+ (!srcfileloc "ipa-fnsummary.h" 189)
+ (!fields 1
+ (!pair "size_time_entry"
+ (!type struct 1193 nil gc_unused "size_time_entry"
+ (!srcfileloc "ipa-fnsummary.h" 189)
+ (!fields 0 )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-fnsummary.h" 189)
+ nil )
+ )
+ )
+ (!srcfileloc "ipa-fnsummary.h" 189)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "call_size_time_table"
+ (!type user_struct 1194 nil gc_unused "vec<size_time_entry,va_heap,vl_ptr>"
+ (!srcfileloc "ipa-fnsummary.h" 193)
+ (!fields 3
+ (!pair "vl_ptr"
+ (!type already_seen 834)
+ (!srcfileloc "ipa-fnsummary.h" 193)
+ nil )
+ (!pair "va_heap"
+ (!type already_seen 835)
+ (!srcfileloc "ipa-fnsummary.h" 193)
+ nil )
+ (!pair "size_time_entry"
+ (!type already_seen 1193)
+ (!srcfileloc "ipa-fnsummary.h" 193)
+ nil )
+ )
+ )
+ (!srcfileloc "ipa-fnsummary.h" 193)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "loop_iterations"
+ (!type pointer 1195 nil gc_used
+ (!type user_struct 1196
+ (!type already_seen 1195)
+ gc_pointed_to "vec<ipa_freqcounting_predicate,va_gc>"
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ nil )
+ (!pair "ipa_freqcounting_predicate"
+ (!type struct 1197 nil gc_used "ipa_freqcounting_predicate"
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ (!fields 2
+ (!pair "freq"
+ (!type already_seen 1036)
+ (!srcfileloc "ipa-fnsummary.h" 115)
+ nil )
+ (!pair "predicate"
+ (!type pointer 1198 nil gc_unused
+ (!type struct 1199
+ (!type already_seen 1198)
+ gc_unused "ipa_predicate"
+ (!srcfileloc "ipa-fnsummary.h" 117)
+ (!fields 0 )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "ipa-fnsummary.h" 117)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ nil )
+ (!pair "loop_strides"
+ (!type already_seen 1195)
+ (!srcfileloc "ipa-fnsummary.h" 198)
+ nil )
+ (!pair "builtin_constant_p_parms"
+ (!type user_struct 1200 nil gc_unused "vec<int,va_heap,vl_ptr>"
+ (!srcfileloc "ipa-fnsummary.h" 200)
+ (!fields 3
+ (!pair "vl_ptr"
+ (!type already_seen 834)
+ (!srcfileloc "ipa-fnsummary.h" 200)
+ nil )
+ (!pair "va_heap"
+ (!type already_seen 835)
+ (!srcfileloc "ipa-fnsummary.h" 200)
+ nil )
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "ipa-fnsummary.h" 200)
+ nil )
+ )
+ )
+ (!srcfileloc "ipa-fnsummary.h" 200)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "growth"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 206)
+ nil )
+ (!pair "scc_no"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 208)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1201 nil gc_unused "edge_clone_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1202 nil gc_unused "edge_clone_summary_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1203 nil gc_unused "gather_other_count_struct"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1204 nil gc_unused "desc_incoming_count_struct"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1205 nil gc_unused "symbol_and_index_together"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1206 nil gc_used "ipa_argagg_value"
+ (!srcfileloc "ipa-prop.h" 916)
+ (!fields 4
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-prop.h" 207)
+ nil )
+ (!pair "unit_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 209)
+ nil )
+ (!pair "index"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 213)
+ nil )
+ (!pair "by_ref"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 215)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1207 nil gc_unused "pass_ipa_cp"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1208 nil gc_unused "ipa_dfs_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1209
+ (!type pointer 1210 nil gc_used
+ (!type already_seen 1209)
+ )
+ gc_pointed_to "odr_type_d"
+ (!srcfileloc "ipa-devirt.cc" 228)
+ (!fields 11
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-devirt.cc" 204)
+ nil )
+ (!pair "bases"
+ (!type user_struct 1211 nil gc_unused "vec<odr_type>"
+ (!srcfileloc "ipa-devirt.cc" 206)
+ (!fields 1
+ (!pair "odr_type"
+ (!type already_seen 1210)
+ (!srcfileloc "ipa-devirt.cc" 206)
+ nil )
+ )
+ )
+ (!srcfileloc "ipa-devirt.cc" 206)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "derived_types"
+ (!type already_seen 1211)
+ (!srcfileloc "ipa-devirt.cc" 209)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "types"
+ (!type already_seen 85)
+ (!srcfileloc "ipa-devirt.cc" 212)
+ nil )
+ (!pair "types_set"
+ (!type already_seen 467)
+ (!srcfileloc "ipa-devirt.cc" 214)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "id"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-devirt.cc" 217)
+ nil )
+ (!pair "anonymous_namespace"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-devirt.cc" 219)
+ nil )
+ (!pair "all_derivations_known"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-devirt.cc" 221)
+ nil )
+ (!pair "odr_violated"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-devirt.cc" 223)
+ nil )
+ (!pair "rtti_broken"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-devirt.cc" 225)
+ nil )
+ (!pair "tbaa_enabled"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-devirt.cc" 227)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 1085)
+
+ (!type already_seen 1084)
+
+ (!type struct 1212 nil gc_unused "ipa_param_body_replacement"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1213 nil gc_unused "ipa_param_body_adjustments"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1214
+ (!type pointer 1215 nil gc_used
+ (!type already_seen 1214)
+ )
+ gc_pointed_to "param_access"
+ (!srcfileloc "ipa-sra.cc" 126)
+ (!fields 6
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-sra.cc" 109)
+ nil )
+ (!pair "alias_ptr_type"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-sra.cc" 113)
+ nil )
+ (!pair "unit_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 117)
+ nil )
+ (!pair "unit_size"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 118)
+ nil )
+ (!pair "certain"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 123)
+ nil )
+ (!pair "reverse"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 125)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1216 nil gc_unused "gensum_param_access"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1217
+ (!type pointer 1218 nil gc_used
+ (!type already_seen 1217)
+ )
+ gc_pointed_to "vec<param_access*,va_gc>"
+ (!srcfileloc "ipa-sra.cc" 170)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-sra.cc" 170)
+ nil )
+ (!pair "param_access"
+ (!type already_seen 1215)
+ (!srcfileloc "ipa-sra.cc" 170)
+ nil )
+ )
+ )
+
+ (!type struct 1219 nil gc_used "isra_param_desc"
+ (!srcfileloc "ipa-sra.cc" 282)
+ (!fields 10
+ (!pair "accesses"
+ (!type already_seen 1218)
+ (!srcfileloc "ipa-sra.cc" 170)
+ nil )
+ (!pair "param_size_limit"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 173)
+ nil )
+ (!pair "size_reached"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 175)
+ nil )
+ (!pair "safe_size"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 179)
+ nil )
+ (!pair "locally_unused"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 183)
+ nil )
+ (!pair "split_candidate"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 185)
+ nil )
+ (!pair "by_ref"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 187)
+ nil )
+ (!pair "not_specially_constructed"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 191)
+ nil )
+ (!pair "conditionally_dereferenceable"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 195)
+ nil )
+ (!pair "safe_size_set"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 197)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1220 nil gc_unused "gensum_param_desc"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1221 nil gc_pointed_to "isra_func_summary"
+ (!srcfileloc "ipa-sra.cc" 300)
+ (!fields 5
+ (!pair "m_parameters"
+ (!type pointer 1222 nil gc_used
+ (!type user_struct 1223
+ (!type already_seen 1222)
+ gc_pointed_to "vec<isra_param_desc,va_gc>"
+ (!srcfileloc "ipa-sra.cc" 282)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-sra.cc" 282)
+ nil )
+ (!pair "isra_param_desc"
+ (!type already_seen 1219)
+ (!srcfileloc "ipa-sra.cc" 282)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-sra.cc" 282)
+ nil )
+ (!pair "m_candidate"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 286)
+ nil )
+ (!pair "m_returns_value"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 289)
+ nil )
+ (!pair "m_return_ignored"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 294)
+ nil )
+ (!pair "m_queued"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-sra.cc" 299)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 1223)
+
+ (!type struct 1224 nil gc_unused "isra_param_flow"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1225 nil gc_unused "isra_call_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1226
+ (!type pointer 1227 nil gc_used
+ (!type already_seen 1226)
+ )
+ gc_pointed_to "ipa_sra_function_summaries"
+ (!srcfileloc "ipa-sra.cc" 415)
+ (!fields 0 )
+ )
+
+ (!type struct 1228 nil gc_unused "ipa_sra_call_summaries"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1229 nil gc_unused "scan_call_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1230 nil gc_unused "caller_issues"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1231 nil gc_unused "pass_ipa_sra"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1232
+ (!type pointer 1233 nil gc_used
+ (!type already_seen 1232)
+ )
+ gc_pointed_to "modref_tree<alias_set_type>"
+ (!srcfileloc "ipa-modref.h" 23)
+ (!fields 1
+ (!pair "alias_set_type"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 23)
+ nil )
+ )
+ )
+
+ (!type user_struct 1234 nil gc_unused "auto_vec<modref_access_node>"
+ (!srcfileloc "ipa-modref.h" 33)
+ (!fields 1
+ (!pair "modref_access_node"
+ (!type struct 1235 nil gc_unused "modref_access_node"
+ (!srcfileloc "ipa-modref-tree.h" 138)
+ (!fields 7
+ (!pair "offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref-tree.h" 67)
+ nil )
+ (!pair "size"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref-tree.h" 68)
+ nil )
+ (!pair "max_size"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref-tree.h" 69)
+ nil )
+ (!pair "parm_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref-tree.h" 72)
+ nil )
+ (!pair "parm_index"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref-tree.h" 76)
+ nil )
+ (!pair "parm_offset_known"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref-tree.h" 77)
+ nil )
+ (!pair "adjustments"
+ (!type already_seen 8)
+ (!srcfileloc "ipa-modref-tree.h" 80)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-modref.h" 33)
+ nil )
+ )
+ )
+
+ (!type already_seen 1235)
+
+ (!type user_struct 1236 nil gc_unused "auto_vec<eaf_flags_t>"
+ (!srcfileloc "ipa-modref.h" 34)
+ (!fields 1
+ (!pair "eaf_flags_t"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 34)
+ nil )
+ )
+ )
+
+ (!type struct 1237
+ (!type pointer 1238 nil gc_used
+ (!type already_seen 1237)
+ )
+ gc_pointed_to "modref_summary"
+ (!srcfileloc "ipa-modref.h" 72)
+ (!fields 14
+ (!pair "loads"
+ (!type already_seen 1233)
+ (!srcfileloc "ipa-modref.h" 31)
+ nil )
+ (!pair "stores"
+ (!type already_seen 1233)
+ (!srcfileloc "ipa-modref.h" 32)
+ nil )
+ (!pair "kills"
+ (!type already_seen 1234)
+ (!srcfileloc "ipa-modref.h" 33)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "arg_flags"
+ (!type already_seen 1236)
+ (!srcfileloc "ipa-modref.h" 34)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "retslot_flags"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 36)
+ nil )
+ (!pair "static_chain_flags"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 37)
+ nil )
+ (!pair "writes_errno"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 39)
+ nil )
+ (!pair "side_effects"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 42)
+ nil )
+ (!pair "nondeterministic"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 45)
+ nil )
+ (!pair "calls_interposable"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 52)
+ nil )
+ (!pair "load_accesses"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 57)
+ nil )
+ (!pair "global_memory_read"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 62)
+ nil )
+ (!pair "global_memory_written"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 63)
+ nil )
+ (!pair "try_dse"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 64)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1239 nil gc_unused "fnspec_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1240 nil gc_unused "fnspec_summaries_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1241 nil gc_unused "escape_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1242 nil gc_unused "escape_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1243 nil gc_unused "escape_summaries_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1244 nil gc_unused "modref_summaries"
+ (!srcfileloc "ipa-modref.cc" 230)
+ (!fields 0 )
+ )
+
+ (!type struct 1245
+ (!type pointer 1246 nil gc_used
+ (!type already_seen 1245)
+ )
+ gc_pointed_to "modref_summary_lto"
+ (!srcfileloc "ipa-modref.cc" 372)
+ (!fields 10
+ (!pair "loads"
+ (!type pointer 1247 nil gc_used
+ (!type user_struct 1248
+ (!type already_seen 1247)
+ gc_pointed_to "modref_tree<tree>"
+ (!srcfileloc "ipa-modref.cc" 350)
+ (!fields 1
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "ipa-modref.cc" 350)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-modref.cc" 357)
+ nil )
+ (!pair "stores"
+ (!type already_seen 1247)
+ (!srcfileloc "ipa-modref.cc" 358)
+ nil )
+ (!pair "kills"
+ (!type already_seen 1234)
+ (!srcfileloc "ipa-modref.cc" 359)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "arg_flags"
+ (!type already_seen 1236)
+ (!srcfileloc "ipa-modref.cc" 360)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "retslot_flags"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.cc" 361)
+ nil )
+ (!pair "static_chain_flags"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.cc" 362)
+ nil )
+ (!pair "writes_errno"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.cc" 363)
+ nil )
+ (!pair "side_effects"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.cc" 364)
+ nil )
+ (!pair "nondeterministic"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.cc" 365)
+ nil )
+ (!pair "calls_interposable"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.cc" 366)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1249 nil gc_unused "modref_summaries_lto"
+ (!srcfileloc "ipa-modref.cc" 255)
+ (!fields 0 )
+ )
+
+ (!type user_struct 1250
+ (!type pointer 1251 nil gc_used
+ (!type already_seen 1250)
+ )
+ gc_pointed_to "fast_function_summary<modref_summary*,va_gc>"
+ (!srcfileloc "ipa-modref.cc" 260)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-modref.cc" 260)
+ nil )
+ (!pair "modref_summary"
+ (!type already_seen 1238)
+ (!srcfileloc "ipa-modref.cc" 260)
+ nil )
+ )
+ )
+
+ (!type user_struct 1252
+ (!type pointer 1253 nil gc_used
+ (!type already_seen 1252)
+ )
+ gc_pointed_to "fast_function_summary<modref_summary_lto*,va_gc>"
+ (!srcfileloc "ipa-modref.cc" 272)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-modref.cc" 272)
+ nil )
+ (!pair "modref_summary_lto"
+ (!type already_seen 1246)
+ (!srcfileloc "ipa-modref.cc" 272)
+ nil )
+ )
+ )
+
+ (!type already_seen 1248)
+
+ (!type struct 1254 nil gc_unused "modref_parm_map"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1255 nil gc_unused "modref_access_analysis"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1256 nil gc_unused "escape_point"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1257 nil gc_unused "modref_lattice"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1258 nil gc_unused "modref_eaf_analysis"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1259 nil gc_unused "stack_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1260 nil gc_unused "pass_modref"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1261 nil gc_unused "pass_ipa_modref"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1262 nil gc_unused "ipa_call_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1263 nil gc_unused "escape_map"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1264 nil gc_unused "ipa_modref_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1265 nil gc_unused "modref_ref_node"
+ (!srcfileloc "ipa-modref-tree.h" 205)
+ (!fields 0 )
+ )
+
+ (!type user_struct 1266 nil gc_unused "modref_base_node"
+ (!srcfileloc "ipa-modref-tree.h" 288)
+ (!fields 0 )
+ )
+
+ (!type user_struct 1267 nil gc_unused "modref_tree"
+ (!srcfileloc "ipa-modref-tree.h" 738)
+ (!fields 0 )
+ )
+
+ (!type struct 1268 nil gc_used "nowarn_spec_t"
+ (!srcfileloc "diagnostic-spec.h" 137)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1269
+ (!type pointer 1270 nil gc_used
+ (!type already_seen 1269)
+ )
+ gc_pointed_to "hash_map<location_hash,nowarn_spec_t>"
+ (!srcfileloc "diagnostic-spec.h" 137)
+ (!fields 2
+ (!pair "nowarn_spec_t"
+ (!type already_seen 1268)
+ (!srcfileloc "diagnostic-spec.h" 137)
+ nil )
+ (!pair "location_hash"
+ (!type already_seen 740)
+ (!srcfileloc "diagnostic-spec.h" 137)
+ nil )
+ )
+ )
+
+ (!type already_seen 478)
+
+ (!type already_seen 474)
+
+ (!type already_seen 477)
+
+ (!type already_seen 485)
+
+ (!type already_seen 510)
+
+ (!type already_seen 512)
+
+ (!type already_seen 475)
+
+ (!type already_seen 472)
+
+ (!type already_seen 513)
+
+ (!type already_seen 499)
+
+ (!type already_seen 508)
+
+ (!type already_seen 507)
+
+ (!type already_seen 481)
+
+ (!type already_seen 500)
+
+ (!type already_seen 502)
+
+ (!type already_seen 504)
+
+ (!type already_seen 506)
+
+ (!type already_seen 483)
+
+ (!type struct 1271 nil gc_unused "array_descr_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1272 nil gc_unused "fixed_point_type_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 494)
+
+ (!type user_struct 1273
+ (!type pointer 1274 nil gc_used
+ (!type already_seen 1273)
+ )
+ gc_pointed_to "hash_map<char*,tree>"
+ (!srcfileloc "dwarf2asm.cc" 911)
+ (!fields 1
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2asm.cc" 911)
+ nil )
+ )
+ )
+
+ (!type struct 1275
+ (!type pointer 1276 nil gc_used
+ (!type already_seen 1275)
+ )
+ gc_pointed_to "dw_cfi_row"
+ (!srcfileloc "dwarf2cfi.cc" 193)
+ (!fields 5
+ (!pair "cfa"
+ (!type already_seen 512)
+ (!srcfileloc "dwarf2cfi.cc" 66)
+ nil )
+ (!pair "cfa_cfi"
+ (!type already_seen 473)
+ (!srcfileloc "dwarf2cfi.cc" 67)
+ nil )
+ (!pair "reg_save"
+ (!type already_seen 471)
+ (!srcfileloc "dwarf2cfi.cc" 70)
+ nil )
+ (!pair "window_save"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2cfi.cc" 73)
+ nil )
+ (!pair "ra_mangled"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2cfi.cc" 76)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1277
+ (!type pointer 1278 nil gc_used
+ (!type already_seen 1277)
+ )
+ gc_pointed_to "reg_saved_in_data"
+ (!srcfileloc "dwarf2cfi.cc" 195)
+ (!fields 2
+ (!pair "orig_reg"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2cfi.cc" 81)
+ nil )
+ (!pair "saved_in_reg"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2cfi.cc" 82)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1279 nil gc_unused "dw_trace_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1280 nil gc_unused "trace_info_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1281 nil gc_unused "queued_reg_save"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1282 nil gc_unused "init_one_dwarf_reg_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1283 nil gc_unused "pass_dwarf2_frame"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1284
+ (!type pointer 1285 nil gc_used
+ (!type already_seen 1284)
+ )
+ gc_pointed_to "vec<dw_fde_ref,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 215)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 215)
+ nil )
+ (!pair "dw_fde_ref"
+ (!type already_seen 469)
+ (!srcfileloc "dwarf2out.cc" 215)
+ nil )
+ )
+ )
+
+ (!type struct 1286 nil gc_used "indirect_string_hasher"
+ (!srcfileloc "dwarf2out.cc" 233)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1287
+ (!type pointer 1288 nil gc_used
+ (!type already_seen 1287)
+ )
+ gc_pointed_to "hash_table<indirect_string_hasher>"
+ (!srcfileloc "dwarf2out.cc" 233)
+ (!fields 1
+ (!pair "indirect_string_hasher"
+ (!type already_seen 1286)
+ (!srcfileloc "dwarf2out.cc" 233)
+ nil )
+ )
+ )
+
+ (!type user_struct 1289
+ (!type pointer 1290 nil gc_used
+ (!type already_seen 1289)
+ )
+ gc_pointed_to "vec<char*,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 275)
+ (!fields 1
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 275)
+ nil )
+ )
+ )
+
+ (!type already_seen 482)
+
+ (!type already_seen 490)
+
+ (!type struct 1291 nil gc_used "dw_line_info_struct"
+ (!srcfileloc "dwarf2out.cc" 3081)
+ (!fields 2
+ (!pair "opcode"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3079)
+ nil )
+ (!pair "val"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3080)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1292
+ (!type pointer 1293 nil gc_used
+ (!type already_seen 1292)
+ )
+ gc_pointed_to "vec<dw_line_info_entry,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3123)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3123)
+ nil )
+ (!pair "dw_line_info_entry"
+ (!type already_seen 1291)
+ (!srcfileloc "dwarf2out.cc" 3123)
+ nil )
+ )
+ )
+
+ (!type struct 1294
+ (!type pointer 1295 nil gc_used
+ (!type already_seen 1294)
+ )
+ gc_pointed_to "dw_line_info_table"
+ (!srcfileloc "dwarf2out.cc" 3654)
+ (!fields 10
+ (!pair "end_label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3086)
+ nil )
+ (!pair "file_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3090)
+ nil )
+ (!pair "line_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3091)
+ nil )
+ (!pair "column_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3092)
+ nil )
+ (!pair "discrim_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3093)
+ nil )
+ (!pair "is_stmt"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3094)
+ nil )
+ (!pair "in_use"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3095)
+ nil )
+ (!pair "view"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3111)
+ nil )
+ (!pair "symviews_since_reset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3116)
+ nil )
+ (!pair "entries"
+ (!type already_seen 1293)
+ (!srcfileloc "dwarf2out.cc" 3123)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 488)
+
+ (!type already_seen 493)
+
+ (!type struct 1296 nil gc_unused "set_early_dwarf"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1297 nil gc_used "pubname_struct"
+ (!srcfileloc "dwarf2out.cc" 3228)
+ (!fields 2
+ (!pair "die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3226)
+ nil )
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3227)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1298 nil gc_used "dw_ranges"
+ (!srcfileloc "dwarf2out.cc" 3687)
+ (!fields 6
+ (!pair "label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3233)
+ nil )
+ (!pair "num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3236)
+ nil )
+ (!pair "idx"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3242)
+ nil )
+ (!pair "maybe_new_sec"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3245)
+ nil )
+ (!pair "begin_entry"
+ (!type already_seen 480)
+ (!srcfileloc "dwarf2out.cc" 3246)
+ nil )
+ (!pair "end_entry"
+ (!type already_seen 480)
+ (!srcfileloc "dwarf2out.cc" 3247)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1299 nil gc_used "macinfo_struct"
+ (!srcfileloc "dwarf2out.cc" 3256)
+ (!fields 3
+ (!pair "code"
+ (!type already_seen 8)
+ (!srcfileloc "dwarf2out.cc" 3253)
+ nil )
+ (!pair "lineno"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3254)
+ nil )
+ (!pair "info"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3255)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1300 nil gc_used "dw_ranges_by_label"
+ (!srcfileloc "dwarf2out.cc" 3690)
+ (!fields 2
+ (!pair "begin"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3261)
+ nil )
+ (!pair "end"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3262)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1301
+ (!type pointer 1302 nil gc_used
+ (!type already_seen 1301)
+ )
+ gc_pointed_to "limbo_die_struct"
+ (!srcfileloc "dwarf2out.cc" 3284)
+ (!fields 3
+ (!pair "die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3279)
+ nil )
+ (!pair "created_for"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.cc" 3282)
+ nil )
+ (!pair "next"
+ (!type already_seen 1302)
+ (!srcfileloc "dwarf2out.cc" 3283)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1303 nil gc_unused "skeleton_chain_struct"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1304 nil gc_used "dwarf_file_hasher"
+ (!srcfileloc "dwarf2out.cc" 3501)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1305
+ (!type pointer 1306 nil gc_used
+ (!type already_seen 1305)
+ )
+ gc_pointed_to "hash_table<dwarf_file_hasher>"
+ (!srcfileloc "dwarf2out.cc" 3501)
+ (!fields 1
+ (!pair "dwarf_file_hasher"
+ (!type already_seen 1304)
+ (!srcfileloc "dwarf2out.cc" 3501)
+ nil )
+ )
+ )
+
+ (!type struct 1307 nil gc_used "decl_die_hasher"
+ (!srcfileloc "dwarf2out.cc" 3512)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1308
+ (!type pointer 1309 nil gc_used
+ (!type already_seen 1308)
+ )
+ gc_pointed_to "hash_table<decl_die_hasher>"
+ (!srcfileloc "dwarf2out.cc" 3512)
+ (!fields 1
+ (!pair "decl_die_hasher"
+ (!type already_seen 1307)
+ (!srcfileloc "dwarf2out.cc" 3512)
+ nil )
+ )
+ )
+
+ (!type user_struct 1310
+ (!type pointer 1311 nil gc_used
+ (!type already_seen 1310)
+ )
+ gc_pointed_to "vec<dw_die_ref,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3516)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3516)
+ nil )
+ (!pair "dw_die_ref"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3516)
+ nil )
+ )
+ )
+
+ (!type struct 1312 nil gc_pointed_to "variable_value_struct"
+ (!srcfileloc "dwarf2out.cc" 3517)
+ (!fields 2
+ (!pair "decl_id"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3515)
+ nil )
+ (!pair "dies"
+ (!type already_seen 1311)
+ (!srcfileloc "dwarf2out.cc" 3516)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1313 nil gc_used "variable_value_hasher"
+ (!srcfileloc "dwarf2out.cc" 3529)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1314
+ (!type pointer 1315 nil gc_used
+ (!type already_seen 1314)
+ )
+ gc_pointed_to "hash_table<variable_value_hasher>"
+ (!srcfileloc "dwarf2out.cc" 3529)
+ (!fields 1
+ (!pair "variable_value_hasher"
+ (!type already_seen 1313)
+ (!srcfileloc "dwarf2out.cc" 3529)
+ nil )
+ )
+ )
+
+ (!type struct 1316 nil gc_used "block_die_hasher"
+ (!srcfileloc "dwarf2out.cc" 3539)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1317
+ (!type pointer 1318 nil gc_used
+ (!type already_seen 1317)
+ )
+ gc_pointed_to "hash_table<block_die_hasher>"
+ (!srcfileloc "dwarf2out.cc" 3539)
+ (!fields 1
+ (!pair "block_die_hasher"
+ (!type already_seen 1316)
+ (!srcfileloc "dwarf2out.cc" 3539)
+ nil )
+ )
+ )
+
+ (!type struct 1319 nil gc_used "die_arg_entry_struct"
+ (!srcfileloc "dwarf2out.cc" 3544)
+ (!fields 2
+ (!pair "die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3542)
+ nil )
+ (!pair "arg"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.cc" 3543)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1320
+ (!type pointer 1321 nil gc_used
+ (!type already_seen 1320)
+ )
+ gc_pointed_to "var_loc_node"
+ (!srcfileloc "dwarf2out.cc" 3561)
+ (!fields 4
+ (!pair "loc"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2out.cc" 3557)
+ nil )
+ (!pair "label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3558)
+ nil )
+ (!pair "next"
+ (!type already_seen 1321)
+ (!srcfileloc "dwarf2out.cc" 3559)
+ nil )
+ (!pair "view"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3560)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+
+ (!type struct 1322 nil gc_pointed_to "var_loc_list_def"
+ (!srcfileloc "dwarf2out.cc" 3582)
+ (!fields 4
+ (!pair "first"
+ (!type already_seen 1321)
+ (!srcfileloc "dwarf2out.cc" 3565)
+ nil )
+ (!pair "last"
+ (!type already_seen 1321)
+ (!srcfileloc "dwarf2out.cc" 3573)
+ (!options
+ (!option skip string "%h")
+ )
+ )
+ (!pair "last_before_switch"
+ (!type already_seen 1321)
+ (!srcfileloc "dwarf2out.cc" 3578)
+ (!options
+ (!option skip string "%h")
+ )
+ )
+ (!pair "decl_id"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3581)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1323
+ (!type pointer 1324 nil gc_used
+ (!type already_seen 1323)
+ )
+ gc_pointed_to "call_arg_loc_node"
+ (!srcfileloc "dwarf2out.cc" 3593)
+ (!fields 6
+ (!pair "call_arg_loc_note"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2out.cc" 3587)
+ nil )
+ (!pair "label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 3588)
+ nil )
+ (!pair "block"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.cc" 3589)
+ nil )
+ (!pair "tail_call_p"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3590)
+ nil )
+ (!pair "symbol_ref"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2out.cc" 3591)
+ nil )
+ (!pair "next"
+ (!type already_seen 1324)
+ (!srcfileloc "dwarf2out.cc" 3592)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 4095 nil nil )
+
+ (!type struct 1325 nil gc_used "decl_loc_hasher"
+ (!srcfileloc "dwarf2out.cc" 3605)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1326
+ (!type pointer 1327 nil gc_used
+ (!type already_seen 1326)
+ )
+ gc_pointed_to "hash_table<decl_loc_hasher>"
+ (!srcfileloc "dwarf2out.cc" 3605)
+ (!fields 1
+ (!pair "decl_loc_hasher"
+ (!type already_seen 1325)
+ (!srcfileloc "dwarf2out.cc" 3605)
+ nil )
+ )
+ )
+
+ (!type struct 1328 nil gc_pointed_to "cached_dw_loc_list_def"
+ (!srcfileloc "dwarf2out.cc" 3623)
+ (!fields 2
+ (!pair "decl_id"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3619)
+ nil )
+ (!pair "loc_list"
+ (!type already_seen 484)
+ (!srcfileloc "dwarf2out.cc" 3622)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1329 nil gc_used "dw_loc_list_hasher"
+ (!srcfileloc "dwarf2out.cc" 3636)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1330
+ (!type pointer 1331 nil gc_used
+ (!type already_seen 1330)
+ )
+ gc_pointed_to "hash_table<dw_loc_list_hasher>"
+ (!srcfileloc "dwarf2out.cc" 3636)
+ (!fields 1
+ (!pair "dw_loc_list_hasher"
+ (!type already_seen 1329)
+ (!srcfileloc "dwarf2out.cc" 3636)
+ nil )
+ )
+ )
+
+ (!type user_struct 1332
+ (!type pointer 1333 nil gc_used
+ (!type already_seen 1332)
+ )
+ gc_pointed_to "vec<dw_line_info_table*,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3661)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3661)
+ nil )
+ (!pair "dw_line_info_table"
+ (!type already_seen 1295)
+ (!srcfileloc "dwarf2out.cc" 3661)
+ nil )
+ )
+ )
+
+ (!type user_struct 1334
+ (!type pointer 1335 nil gc_used
+ (!type already_seen 1334)
+ )
+ gc_pointed_to "vec<pubname_entry,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3669)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3669)
+ nil )
+ (!pair "pubname_entry"
+ (!type already_seen 1297)
+ (!srcfileloc "dwarf2out.cc" 3669)
+ nil )
+ )
+ )
+
+ (!type user_struct 1336
+ (!type pointer 1337 nil gc_used
+ (!type already_seen 1336)
+ )
+ gc_pointed_to "vec<macinfo_entry,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3677)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3677)
+ nil )
+ (!pair "macinfo_entry"
+ (!type already_seen 1299)
+ (!srcfileloc "dwarf2out.cc" 3677)
+ nil )
+ )
+ )
+
+ (!type user_struct 1338
+ (!type pointer 1339 nil gc_used
+ (!type already_seen 1338)
+ )
+ gc_pointed_to "vec<dw_ranges,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3687)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3687)
+ nil )
+ (!pair "dw_ranges"
+ (!type already_seen 1298)
+ (!srcfileloc "dwarf2out.cc" 3687)
+ nil )
+ )
+ )
+
+ (!type user_struct 1340
+ (!type pointer 1341 nil gc_used
+ (!type already_seen 1340)
+ )
+ gc_pointed_to "vec<dw_ranges_by_label,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3690)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3690)
+ nil )
+ (!pair "dw_ranges_by_label"
+ (!type already_seen 1300)
+ (!srcfileloc "dwarf2out.cc" 3690)
+ nil )
+ )
+ )
+
+ (!type user_struct 1342
+ (!type pointer 1343 nil gc_used
+ (!type already_seen 1342)
+ )
+ gc_pointed_to "vec<die_arg_entry,va_gc>"
+ (!srcfileloc "dwarf2out.cc" 3707)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "dwarf2out.cc" 3707)
+ nil )
+ (!pair "die_arg_entry"
+ (!type already_seen 1319)
+ (!srcfileloc "dwarf2out.cc" 3707)
+ nil )
+ )
+ )
+
+ (!type struct 1344 nil gc_unused "md5_ctx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1345 nil gc_unused "checksum_attributes"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1346 nil gc_unused "loc_descr_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1347 nil gc_unused "vlr_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1348 nil gc_used "addr_hasher"
+ (!srcfileloc "dwarf2out.cc" 5072)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1349
+ (!type pointer 1350 nil gc_used
+ (!type already_seen 1349)
+ )
+ gc_pointed_to "hash_table<addr_hasher>"
+ (!srcfileloc "dwarf2out.cc" 5072)
+ (!fields 1
+ (!pair "addr_hasher"
+ (!type already_seen 1348)
+ (!srcfileloc "dwarf2out.cc" 5072)
+ nil )
+ )
+ )
+
+ (!type struct 1351 nil gc_used "sym_off_pair"
+ (!srcfileloc "dwarf2out.cc" 5950)
+ (!fields 2
+ (!pair "sym"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 5947)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "off"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 5948)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1352
+ (!type pointer 1353 nil gc_used
+ (!type already_seen 1352)
+ )
+ gc_pointed_to "hash_map<tree,sym_off_pair>"
+ (!srcfileloc "dwarf2out.cc" 5950)
+ (!fields 2
+ (!pair "sym_off_pair"
+ (!type already_seen 1351)
+ (!srcfileloc "dwarf2out.cc" 5950)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.cc" 5950)
+ nil )
+ )
+ )
+
+ (!type struct 1354 nil gc_unused "decl_table_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1355 nil gc_unused "decl_table_entry_hasher"
+ (!srcfileloc "dwarf2out.cc" 8338)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1356 nil gc_unused "hash_table<decl_table_entry_hasher>"
+ (!srcfileloc "dwarf2out.cc" 8338)
+ (!fields 1
+ (!pair "decl_table_entry_hasher"
+ (!type already_seen 1355)
+ (!srcfileloc "dwarf2out.cc" 8338)
+ nil )
+ )
+ )
+
+ (!type struct 1357 nil gc_unused "external_ref"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1358 nil gc_unused "external_ref_hasher"
+ (!srcfileloc "dwarf2out.cc" 9096)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1359 nil gc_unused "hash_table<external_ref_hasher>"
+ (!srcfileloc "dwarf2out.cc" 9096)
+ (!fields 1
+ (!pair "external_ref_hasher"
+ (!type already_seen 1358)
+ (!srcfileloc "dwarf2out.cc" 9096)
+ nil )
+ )
+ )
+
+ (!type struct 1360 nil gc_unused "file_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1361 nil gc_unused "dir_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1362 nil gc_unused "file_name_acquire_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1363 nil gc_unused "dwarf_qual_info_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1364 nil gc_unused "dwarf_procedure_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1365 nil gc_pointed_to "inline_entry_data"
+ (!srcfileloc "dwarf2out.cc" 24286)
+ (!fields 4
+ (!pair "block"
+ (!type already_seen 23)
+ (!srcfileloc "dwarf2out.cc" 24278)
+ nil )
+ (!pair "label_pfx"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 24281)
+ nil )
+ (!pair "label_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 24282)
+ nil )
+ (!pair "view"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 24285)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1366 nil gc_used "inline_entry_data_hasher"
+ (!srcfileloc "dwarf2out.cc" 24312)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1367
+ (!type pointer 1368 nil gc_used
+ (!type already_seen 1367)
+ )
+ gc_pointed_to "hash_table<inline_entry_data_hasher>"
+ (!srcfileloc "dwarf2out.cc" 24312)
+ (!fields 1
+ (!pair "inline_entry_data_hasher"
+ (!type already_seen 1366)
+ (!srcfileloc "dwarf2out.cc" 24312)
+ nil )
+ )
+ )
+
+ (!type struct 1369 nil gc_unused "macinfo_entry_hasher"
+ (!srcfileloc "dwarf2out.cc" 28878)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1370 nil gc_unused "hash_table<macinfo_entry_hasher>"
+ (!srcfileloc "dwarf2out.cc" 28878)
+ (!fields 1
+ (!pair "macinfo_entry_hasher"
+ (!type already_seen 1369)
+ (!srcfileloc "dwarf2out.cc" 28878)
+ nil )
+ )
+ )
+
+ (!type struct 1371 nil gc_unused "comdat_type_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1372 nil gc_unused "loc_list_hasher"
+ (!srcfileloc "dwarf2out.cc" 31940)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1373 nil gc_unused "hash_table<loc_list_hasher>"
+ (!srcfileloc "dwarf2out.cc" 31940)
+ (!fields 1
+ (!pair "loc_list_hasher"
+ (!type already_seen 1372)
+ (!srcfileloc "dwarf2out.cc" 31940)
+ nil )
+ )
+ )
+
+ (!type struct 1374
+ (!type pointer 1375 nil gc_used
+ (!type already_seen 1374)
+ )
+ gc_pointed_to "ctf_string"
+ (!srcfileloc "ctfc.h" 57)
+ (!fields 2
+ (!pair "cts_str"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 55)
+ nil )
+ (!pair "cts_next"
+ (!type already_seen 1375)
+ (!srcfileloc "ctfc.h" 56)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.cts_next")
+ )
+ 4095 nil nil )
+
+ (!type struct 1376 nil gc_used "ctf_strtable"
+ (!srcfileloc "ctfc.h" 68)
+ (!fields 5
+ (!pair "ctstab_head"
+ (!type already_seen 1375)
+ (!srcfileloc "ctfc.h" 63)
+ nil )
+ (!pair "ctstab_tail"
+ (!type already_seen 1375)
+ (!srcfileloc "ctfc.h" 64)
+ nil )
+ (!pair "ctstab_num"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 65)
+ nil )
+ (!pair "ctstab_len"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 66)
+ nil )
+ (!pair "ctstab_estr"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 67)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1377 nil gc_used "ctf_encoding"
+ (!srcfileloc "ctfc.h" 78)
+ (!fields 3
+ (!pair "cte_format"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 75)
+ nil )
+ (!pair "cte_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 76)
+ nil )
+ (!pair "cte_bits"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 77)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1378 nil gc_used "ctf_arinfo"
+ (!srcfileloc "ctfc.h" 87)
+ (!fields 3
+ (!pair "ctr_contents"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 84)
+ nil )
+ (!pair "ctr_index"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 85)
+ nil )
+ (!pair "ctr_nelems"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 86)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1379 nil gc_unused "ctf_funcinfo"
+ (!srcfileloc "ctfc.h" 96)
+ (!fields 3
+ (!pair "ctc_return"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 93)
+ nil )
+ (!pair "ctc_argc"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 94)
+ nil )
+ (!pair "ctc_flags"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 95)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1380 nil gc_used "ctf_sliceinfo"
+ (!srcfileloc "ctfc.h" 103)
+ (!fields 3
+ (!pair "cts_type"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 100)
+ nil )
+ (!pair "cts_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 101)
+ nil )
+ (!pair "cts_bits"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 102)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type union 1381 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/ctfc.h:113"
+ (!srcfileloc "ctfc.h" 116)
+ (!fields 2
+ (!pair "_size"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 114)
+ (!options
+ (!option tag string "0")
+ )
+ )
+ (!pair "_type"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 115)
+ (!options
+ (!option tag string "1")
+ )
+ )
+ )
+ (!options
+ (!option desc string "0")
+ )
+ 4095 nil )
+
+ (!type struct 1382 nil gc_used "ctf_itype"
+ (!srcfileloc "ctfc.h" 119)
+ (!fields 5
+ (!pair "ctti_name"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 110)
+ nil )
+ (!pair "ctti_info"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 111)
+ nil )
+ (!pair "_u"
+ (!type already_seen 1381)
+ (!srcfileloc "ctfc.h" 116)
+ nil )
+ (!pair "ctti_lsizehi"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 117)
+ nil )
+ (!pair "ctti_lsizelo"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 118)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1383
+ (!type pointer 1384 nil gc_used
+ (!type already_seen 1383)
+ )
+ gc_pointed_to "ctf_dmdef"
+ (!srcfileloc "ctfc.h" 138)
+ (!fields 6
+ (!pair "dmd_name"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 132)
+ nil )
+ (!pair "dmd_type"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 133)
+ nil )
+ (!pair "dmd_name_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 134)
+ nil )
+ (!pair "dmd_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 135)
+ nil )
+ (!pair "dmd_value"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 136)
+ nil )
+ (!pair "dmd_next"
+ (!type already_seen 1384)
+ (!srcfileloc "ctfc.h" 137)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.dmd_next")
+ )
+ 4095 nil nil )
+
+ (!type struct 1385
+ (!type pointer 1386 nil gc_used
+ (!type already_seen 1385)
+ )
+ gc_pointed_to "ctf_func_arg"
+ (!srcfileloc "ctfc.h" 150)
+ (!fields 4
+ (!pair "farg_type"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 146)
+ nil )
+ (!pair "farg_name"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 147)
+ nil )
+ (!pair "farg_name_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 148)
+ nil )
+ (!pair "farg_next"
+ (!type already_seen 1386)
+ (!srcfileloc "ctfc.h" 149)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type union 1387 nil gc_used "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/ctfc.h:167"
+ (!srcfileloc "ctfc.h" 178)
+ (!fields 5
+ (!pair "dtu_members"
+ (!type already_seen 1384)
+ (!srcfileloc "ctfc.h" 169)
+ (!options
+ (!option tag string "CTF_DTU_D_MEMBERS")
+ )
+ )
+ (!pair "dtu_arr"
+ (!type already_seen 1378)
+ (!srcfileloc "ctfc.h" 171)
+ (!options
+ (!option tag string "CTF_DTU_D_ARRAY")
+ )
+ )
+ (!pair "dtu_enc"
+ (!type already_seen 1377)
+ (!srcfileloc "ctfc.h" 173)
+ (!options
+ (!option tag string "CTF_DTU_D_ENCODING")
+ )
+ )
+ (!pair "dtu_argv"
+ (!type already_seen 1386)
+ (!srcfileloc "ctfc.h" 175)
+ (!options
+ (!option tag string "CTF_DTU_D_ARGUMENTS")
+ )
+ )
+ (!pair "dtu_slice"
+ (!type already_seen 1380)
+ (!srcfileloc "ctfc.h" 177)
+ (!options
+ (!option tag string "CTF_DTU_D_SLICE")
+ )
+ )
+ )
+ (!options
+ (!option desc string "ctf_dtu_d_union_selector (&%1)")
+ )
+ 4095 nil )
+
+ (!type struct 1388
+ (!type pointer 1389
+ (!type pointer 1390 nil gc_unused
+ (!type already_seen 1389)
+ )
+ gc_used
+ (!type already_seen 1388)
+ )
+ gc_pointed_to "ctf_dtdef"
+ (!srcfileloc "ctfc.h" 179)
+ (!fields 8
+ (!pair "dtd_key"
+ (!type already_seen 486)
+ (!srcfileloc "ctfc.h" 158)
+ nil )
+ (!pair "dtd_name"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 159)
+ nil )
+ (!pair "dtd_type"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 160)
+ nil )
+ (!pair "dtd_data"
+ (!type already_seen 1382)
+ (!srcfileloc "ctfc.h" 161)
+ nil )
+ (!pair "from_global_func"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 162)
+ nil )
+ (!pair "linkage"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 164)
+ nil )
+ (!pair "dtd_enum_unsigned"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 165)
+ nil )
+ (!pair "dtd_u"
+ (!type already_seen 1387)
+ (!srcfileloc "ctfc.h" 178)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1391
+ (!type pointer 1392
+ (!type pointer 1393 nil gc_unused
+ (!type already_seen 1392)
+ )
+ gc_used
+ (!type already_seen 1391)
+ )
+ gc_pointed_to "ctf_dvdef"
+ (!srcfileloc "ctfc.h" 192)
+ (!fields 5
+ (!pair "dvd_key"
+ (!type already_seen 486)
+ (!srcfileloc "ctfc.h" 187)
+ nil )
+ (!pair "dvd_name"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 188)
+ nil )
+ (!pair "dvd_name_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 189)
+ nil )
+ (!pair "dvd_visibility"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 190)
+ nil )
+ (!pair "dvd_type"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 191)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1394
+ (!type pointer 1395 nil gc_unused
+ (!type already_seen 1394)
+ )
+ gc_unused "ctf_srcloc"
+ (!srcfileloc "ctfc.h" 206)
+ (!fields 3
+ (!pair "ctsloc_file"
+ (!type already_seen 11)
+ (!srcfileloc "ctfc.h" 203)
+ nil )
+ (!pair "ctsloc_line"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 204)
+ nil )
+ (!pair "ctsloc_col"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 205)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1396 nil gc_used "ctfc_dtd_hasher"
+ (!srcfileloc "ctfc.h" 276)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1397 nil gc_used "ctfc_dvd_hasher"
+ (!srcfileloc "ctfc.h" 278)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1398
+ (!type pointer 1399 nil gc_used
+ (!type already_seen 1398)
+ )
+ gc_pointed_to "hash_table<ctfc_dtd_hasher>"
+ (!srcfileloc "ctfc.h" 276)
+ (!fields 1
+ (!pair "ctfc_dtd_hasher"
+ (!type already_seen 1396)
+ (!srcfileloc "ctfc.h" 276)
+ nil )
+ )
+ )
+
+ (!type user_struct 1400
+ (!type pointer 1401 nil gc_used
+ (!type already_seen 1400)
+ )
+ gc_pointed_to "hash_table<ctfc_dvd_hasher>"
+ (!srcfileloc "ctfc.h" 278)
+ (!fields 1
+ (!pair "ctfc_dvd_hasher"
+ (!type already_seen 1397)
+ (!srcfileloc "ctfc.h" 278)
+ nil )
+ )
+ )
+
+ (!type struct 1402
+ (!type pointer 1403 nil gc_used
+ (!type already_seen 1402)
+ )
+ gc_pointed_to "ctf_container"
+ (!srcfileloc "ctfc.h" 334)
+ (!fields 23
+ (!pair "ctfc_magic"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 270)
+ nil )
+ (!pair "ctfc_version"
+ (!type already_seen 8)
+ (!srcfileloc "ctfc.h" 271)
+ nil )
+ (!pair "ctfc_flags"
+ (!type already_seen 8)
+ (!srcfileloc "ctfc.h" 272)
+ nil )
+ (!pair "ctfc_cuname_offset"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 273)
+ nil )
+ (!pair "ctfc_types"
+ (!type already_seen 1399)
+ (!srcfileloc "ctfc.h" 276)
+ nil )
+ (!pair "ctfc_vars"
+ (!type already_seen 1401)
+ (!srcfileloc "ctfc.h" 278)
+ nil )
+ (!pair "ctfc_ignore_vars"
+ (!type already_seen 1401)
+ (!srcfileloc "ctfc.h" 280)
+ nil )
+ (!pair "ctfc_strtable"
+ (!type already_seen 1376)
+ (!srcfileloc "ctfc.h" 283)
+ nil )
+ (!pair "ctfc_aux_strtable"
+ (!type already_seen 1376)
+ (!srcfileloc "ctfc.h" 286)
+ nil )
+ (!pair "ctfc_num_types"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 288)
+ nil )
+ (!pair "ctfc_num_stypes"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 289)
+ nil )
+ (!pair "ctfc_num_global_funcs"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 290)
+ nil )
+ (!pair "ctfc_num_global_objts"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 291)
+ nil )
+ (!pair "ctfc_num_vlen_bytes"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 296)
+ nil )
+ (!pair "ctfc_nextid"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 299)
+ nil )
+ (!pair "ctfc_vars_list"
+ (!type already_seen 1393)
+ (!srcfileloc "ctfc.h" 307)
+ (!options
+ (!option length string "0")
+ )
+ )
+ (!pair "ctfc_vars_list_count"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 309)
+ nil )
+ (!pair "ctfc_types_list"
+ (!type already_seen 1390)
+ (!srcfileloc "ctfc.h" 313)
+ (!options
+ (!option length string "0")
+ )
+ )
+ (!pair "ctfc_gfuncs_list"
+ (!type already_seen 1390)
+ (!srcfileloc "ctfc.h" 317)
+ (!options
+ (!option length string "0")
+ )
+ )
+ (!pair "ctfc_gobjts_list"
+ (!type already_seen 1393)
+ (!srcfileloc "ctfc.h" 320)
+ (!options
+ (!option length string "0")
+ )
+ )
+ (!pair "ctfc_numbytes_asm"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 328)
+ nil )
+ (!pair "ctfc_strlen"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 330)
+ nil )
+ (!pair "ctfc_aux_strlen"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 332)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1404 nil gc_unused "ctf_dtd_preprocess_arg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1405 nil gc_unused "ctf_dvd_preprocess_arg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1406
+ (!type pointer 1407 nil gc_used
+ (!type already_seen 1406)
+ )
+ gc_pointed_to "hash_map<ctf_dvdef_ref,unsigned>"
+ (!srcfileloc "btfout.cc" 73)
+ (!fields 2
+ (!pair "unsigned"
+ (!type undefined 1408 nil gc_unused "unsigned"
+ (!srcfileloc "btfout.cc" 73)
+ )
+ (!srcfileloc "btfout.cc" 73)
+ nil )
+ (!pair "ctf_dvdef_ref"
+ (!type already_seen 1392)
+ (!srcfileloc "btfout.cc" 73)
+ nil )
+ )
+ )
+
+ (!type already_seen 1408)
+
+ (!type struct 1409 nil gc_unused "btf_datasec"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1410
+ (!type pointer 1411 nil gc_used
+ (!type already_seen 1410)
+ )
+ gc_pointed_to "vec<ctf_dtdef_ref,va_gc>"
+ (!srcfileloc "btfout.cc" 105)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "btfout.cc" 105)
+ nil )
+ (!pair "ctf_dtdef_ref"
+ (!type already_seen 1389)
+ (!srcfileloc "btfout.cc" 105)
+ nil )
+ )
+ )
+
+ (!type struct 1412 nil gc_unused "btf_var_secinfo"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1413 nil gc_unused "pass_lower_vector"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1414 nil gc_unused "pass_lower_vector_ssa"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1415 nil gc_unused "pass_gimple_isel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1416
+ (!type pointer 1417 nil gc_used
+ (!type already_seen 1416)
+ )
+ gc_pointed_to "temp_slot"
+ (!srcfileloc "function.cc" 591)
+ (!fields 10
+ (!pair "next"
+ (!type already_seen 1417)
+ (!srcfileloc "function.cc" 567)
+ nil )
+ (!pair "prev"
+ (!type already_seen 1417)
+ (!srcfileloc "function.cc" 569)
+ nil )
+ (!pair "slot"
+ (!type already_seen 100)
+ (!srcfileloc "function.cc" 571)
+ nil )
+ (!pair "size"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 573)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "function.cc" 578)
+ nil )
+ (!pair "align"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 580)
+ nil )
+ (!pair "in_use"
+ (!type already_seen 8)
+ (!srcfileloc "function.cc" 582)
+ nil )
+ (!pair "level"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 584)
+ nil )
+ (!pair "base_offset"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 587)
+ nil )
+ (!pair "full_size"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 590)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1418 nil gc_used "incoming_args"
+ (!srcfileloc "emit-rtl.h" 55)
+ (!fields 6
+ (!pair "pops_args"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 33)
+ nil )
+ (!pair "size"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 38)
+ nil )
+ (!pair "pretend_args_size"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 43)
+ nil )
+ (!pair "arg_offset_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "emit-rtl.h" 47)
+ nil )
+ (!pair "info"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 51)
+ nil )
+ (!pair "internal_arg_pointer"
+ (!type already_seen 100)
+ (!srcfileloc "emit-rtl.h" 54)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type undefined 1419
+ (!type pointer 1420 nil gc_unused
+ (!type already_seen 1419)
+ )
+ gc_unused "rtl_ssa::function_info"
+ (!srcfileloc "emit-rtl.h" 77)
+ )
+
+ (!type struct 1421
+ (!type pointer 1422 nil gc_used
+ (!type already_seen 1421)
+ )
+ gc_pointed_to "initial_value_struct"
+ (!srcfileloc "function.cc" 1266)
+ (!fields 3
+ (!pair "num_entries"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 1263)
+ nil )
+ (!pair "max_entries"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 1264)
+ nil )
+ (!pair "entries"
+ (!type pointer 1423 nil gc_unused
+ (!type struct 1424
+ (!type already_seen 1423)
+ gc_used "initial_value_pair"
+ (!srcfileloc "function.cc" 1265)
+ (!fields 2
+ (!pair "hard_reg"
+ (!type already_seen 100)
+ (!srcfileloc "function.cc" 1256)
+ nil )
+ (!pair "pseudo"
+ (!type already_seen 100)
+ (!srcfileloc "function.cc" 1257)
+ nil )
+ )
+ nil 4095 nil nil )
+ )
+ (!srcfileloc "function.cc" 1265)
+ (!options
+ (!option length string "%h.num_entries")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1425
+ (!type pointer 1426 nil gc_used
+ (!type already_seen 1425)
+ )
+ gc_pointed_to "vec<temp_slot_p,va_gc>"
+ (!srcfileloc "emit-rtl.h" 148)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "emit-rtl.h" 148)
+ nil )
+ (!pair "temp_slot_p"
+ (!type already_seen 1417)
+ (!srcfileloc "emit-rtl.h" 148)
+ nil )
+ )
+ )
+
+ (!type struct 1427 nil gc_used "rtl_data"
+ (!srcfileloc "emit-rtl.h" 321)
+ (!fields 66
+ (!pair "expr"
+ (!type already_seen 1006)
+ (!srcfileloc "emit-rtl.h" 62)
+ nil )
+ (!pair "emit"
+ (!type already_seen 1003)
+ (!srcfileloc "emit-rtl.h" 63)
+ nil )
+ (!pair "varasm"
+ (!type already_seen 1018)
+ (!srcfileloc "emit-rtl.h" 64)
+ nil )
+ (!pair "args"
+ (!type already_seen 1418)
+ (!srcfileloc "emit-rtl.h" 65)
+ nil )
+ (!pair "subsections"
+ (!type already_seen 1019)
+ (!srcfileloc "emit-rtl.h" 66)
+ nil )
+ (!pair "eh"
+ (!type already_seen 1011)
+ (!srcfileloc "emit-rtl.h" 67)
+ nil )
+ (!pair "abi"
+ (!type already_seen 909)
+ (!srcfileloc "emit-rtl.h" 75)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "ssa"
+ (!type already_seen 1420)
+ (!srcfileloc "emit-rtl.h" 77)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "outgoing_args_size"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 83)
+ nil )
+ (!pair "return_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "emit-rtl.h" 89)
+ nil )
+ (!pair "hard_reg_initial_vals"
+ (!type already_seen 1422)
+ (!srcfileloc "emit-rtl.h" 96)
+ nil )
+ (!pair "stack_protect_guard"
+ (!type already_seen 23)
+ (!srcfileloc "emit-rtl.h" 100)
+ nil )
+ (!pair "stack_protect_guard_decl"
+ (!type already_seen 23)
+ (!srcfileloc "emit-rtl.h" 104)
+ nil )
+ (!pair "x_nonlocal_goto_handler_labels"
+ (!type already_seen 754)
+ (!srcfileloc "emit-rtl.h" 108)
+ nil )
+ (!pair "x_return_label"
+ (!type already_seen 366)
+ (!srcfileloc "emit-rtl.h" 113)
+ nil )
+ (!pair "x_naked_return_label"
+ (!type already_seen 366)
+ (!srcfileloc "emit-rtl.h" 118)
+ nil )
+ (!pair "x_stack_slot_list"
+ (!type already_seen 221)
+ (!srcfileloc "emit-rtl.h" 122)
+ nil )
+ (!pair "frame_space_list"
+ (!type already_seen 1021)
+ (!srcfileloc "emit-rtl.h" 125)
+ nil )
+ (!pair "x_stack_check_probe_note"
+ (!type already_seen 763)
+ (!srcfileloc "emit-rtl.h" 128)
+ nil )
+ (!pair "x_arg_pointer_save_area"
+ (!type already_seen 100)
+ (!srcfileloc "emit-rtl.h" 134)
+ nil )
+ (!pair "drap_reg"
+ (!type already_seen 100)
+ (!srcfileloc "emit-rtl.h" 137)
+ nil )
+ (!pair "x_frame_offset"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 142)
+ nil )
+ (!pair "x_parm_birth_insn"
+ (!type already_seen 297)
+ (!srcfileloc "emit-rtl.h" 145)
+ nil )
+ (!pair "x_used_temp_slots"
+ (!type already_seen 1426)
+ (!srcfileloc "emit-rtl.h" 148)
+ nil )
+ (!pair "x_avail_temp_slots"
+ (!type already_seen 1417)
+ (!srcfileloc "emit-rtl.h" 151)
+ nil )
+ (!pair "x_temp_slot_level"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 154)
+ nil )
+ (!pair "stack_alignment_needed"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 158)
+ nil )
+ (!pair "preferred_stack_boundary"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 162)
+ nil )
+ (!pair "parm_stack_boundary"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 165)
+ nil )
+ (!pair "max_used_stack_slot_alignment"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 168)
+ nil )
+ (!pair "stack_alignment_estimated"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 177)
+ nil )
+ (!pair "patch_area_size"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 180)
+ nil )
+ (!pair "patch_area_entry"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 183)
+ nil )
+ (!pair "accesses_prior_frames"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 189)
+ nil )
+ (!pair "calls_eh_return"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 192)
+ nil )
+ (!pair "saves_all_registers"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 196)
+ nil )
+ (!pair "has_nonlocal_goto"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 200)
+ nil )
+ (!pair "has_asm_statement"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 203)
+ nil )
+ (!pair "all_throwers_are_sibcalls"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 210)
+ nil )
+ (!pair "limit_stack"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 214)
+ nil )
+ (!pair "profile"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 217)
+ nil )
+ (!pair "uses_const_pool"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 220)
+ nil )
+ (!pair "uses_pic_offset_table"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 223)
+ nil )
+ (!pair "uses_eh_lsda"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 226)
+ nil )
+ (!pair "tail_call_emit"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 229)
+ nil )
+ (!pair "arg_pointer_save_area_init"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 232)
+ nil )
+ (!pair "frame_pointer_needed"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 237)
+ nil )
+ (!pair "maybe_hot_insn_p"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 240)
+ nil )
+ (!pair "stack_realign_needed"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 247)
+ nil )
+ (!pair "stack_realign_tried"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 252)
+ nil )
+ (!pair "need_drap"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 256)
+ nil )
+ (!pair "stack_realign_processed"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 261)
+ nil )
+ (!pair "stack_realign_finalized"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 265)
+ nil )
+ (!pair "dbr_scheduled_p"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 268)
+ nil )
+ (!pair "nothrow"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 273)
+ nil )
+ (!pair "shrink_wrapped"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 276)
+ nil )
+ (!pair "shrink_wrapped_separate"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 280)
+ nil )
+ (!pair "sp_is_unchanging"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 285)
+ nil )
+ (!pair "sp_is_clobbered_by_asm"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 288)
+ nil )
+ (!pair "is_leaf"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 294)
+ nil )
+ (!pair "uses_only_leaf_regs"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 299)
+ nil )
+ (!pair "has_bb_partition"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 304)
+ nil )
+ (!pair "bb_reorder_complete"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 308)
+ nil )
+ (!pair "asm_clobbers"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 314)
+ nil )
+ (!pair "must_be_zero_on_return"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 317)
+ nil )
+ (!pair "max_insn_address"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.h" 320)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1428 nil gc_used "const_int_hasher"
+ (!srcfileloc "emit-rtl.cc" 145)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1429
+ (!type pointer 1430 nil gc_used
+ (!type already_seen 1429)
+ )
+ gc_pointed_to "hash_table<const_int_hasher>"
+ (!srcfileloc "emit-rtl.cc" 145)
+ (!fields 1
+ (!pair "const_int_hasher"
+ (!type already_seen 1428)
+ (!srcfileloc "emit-rtl.cc" 145)
+ nil )
+ )
+ )
+
+ (!type struct 1431 nil gc_used "const_wide_int_hasher"
+ (!srcfileloc "emit-rtl.cc" 153)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1432
+ (!type pointer 1433 nil gc_used
+ (!type already_seen 1432)
+ )
+ gc_pointed_to "hash_table<const_wide_int_hasher>"
+ (!srcfileloc "emit-rtl.cc" 153)
+ (!fields 1
+ (!pair "const_wide_int_hasher"
+ (!type already_seen 1431)
+ (!srcfileloc "emit-rtl.cc" 153)
+ nil )
+ )
+ )
+
+ (!type struct 1434 nil gc_used "const_poly_int_hasher"
+ (!srcfileloc "emit-rtl.cc" 163)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1435
+ (!type pointer 1436 nil gc_used
+ (!type already_seen 1435)
+ )
+ gc_pointed_to "hash_table<const_poly_int_hasher>"
+ (!srcfileloc "emit-rtl.cc" 163)
+ (!fields 1
+ (!pair "const_poly_int_hasher"
+ (!type already_seen 1434)
+ (!srcfileloc "emit-rtl.cc" 163)
+ nil )
+ )
+ )
+
+ (!type struct 1437 nil gc_used "reg_attr_hasher"
+ (!srcfileloc "emit-rtl.cc" 172)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1438
+ (!type pointer 1439 nil gc_used
+ (!type already_seen 1438)
+ )
+ gc_pointed_to "hash_table<reg_attr_hasher>"
+ (!srcfileloc "emit-rtl.cc" 172)
+ (!fields 1
+ (!pair "reg_attr_hasher"
+ (!type already_seen 1437)
+ (!srcfileloc "emit-rtl.cc" 172)
+ nil )
+ )
+ )
+
+ (!type struct 1440 nil gc_used "const_double_hasher"
+ (!srcfileloc "emit-rtl.cc" 181)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1441
+ (!type pointer 1442 nil gc_used
+ (!type already_seen 1441)
+ )
+ gc_pointed_to "hash_table<const_double_hasher>"
+ (!srcfileloc "emit-rtl.cc" 181)
+ (!fields 1
+ (!pair "const_double_hasher"
+ (!type already_seen 1440)
+ (!srcfileloc "emit-rtl.cc" 181)
+ nil )
+ )
+ )
+
+ (!type struct 1443 nil gc_used "const_fixed_hasher"
+ (!srcfileloc "emit-rtl.cc" 190)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1444
+ (!type pointer 1445 nil gc_used
+ (!type already_seen 1444)
+ )
+ gc_pointed_to "hash_table<const_fixed_hasher>"
+ (!srcfileloc "emit-rtl.cc" 190)
+ (!fields 1
+ (!pair "const_fixed_hasher"
+ (!type already_seen 1443)
+ (!srcfileloc "emit-rtl.cc" 190)
+ nil )
+ )
+ )
+
+ (!type already_seen 357)
+
+ (!type already_seen 365)
+
+ (!type already_seen 361)
+
+ (!type already_seen 359)
+
+ (!type already_seen 362)
+
+ (!type already_seen 363)
+
+ (!type already_seen 358)
+
+ (!type already_seen 369)
+
+ (!type already_seen 371)
+
+ (!type already_seen 373)
+
+ (!type already_seen 375)
+
+ (!type struct 1446 nil gc_unused "throw_stmt_node"
+ (!srcfileloc "except.h" 289)
+ (!fields 2
+ (!pair "stmt"
+ (!type already_seen 282)
+ (!srcfileloc "except.h" 287)
+ nil )
+ (!pair "lp_nr"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 288)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1447 nil gc_unused "pieces_addr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1448 nil gc_unused "op_by_pieces_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1449 nil gc_unused "move_by_pieces_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1450 nil gc_unused "store_by_pieces_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1451 nil gc_unused "compare_by_pieces_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1452 nil gc_unused "algorithm"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1453
+ (!type pointer 1454 nil gc_unused
+ (!type already_seen 1453)
+ )
+ gc_unused "separate_ops"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1455 nil gc_unused "by_pieces_prev"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1456 nil gc_used "insn_cache_hasher"
+ (!srcfileloc "function.cc" 131)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1457
+ (!type pointer 1458 nil gc_used
+ (!type already_seen 1457)
+ )
+ gc_pointed_to "hash_table<insn_cache_hasher>"
+ (!srcfileloc "function.cc" 131)
+ (!fields 1
+ (!pair "insn_cache_hasher"
+ (!type already_seen 1456)
+ (!srcfileloc "function.cc" 131)
+ nil )
+ )
+ )
+
+ (!type struct 1459 nil gc_pointed_to "temp_slot_address_entry"
+ (!srcfileloc "function.cc" 598)
+ (!fields 3
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 595)
+ nil )
+ (!pair "address"
+ (!type already_seen 100)
+ (!srcfileloc "function.cc" 596)
+ nil )
+ (!pair "temp_slot"
+ (!type already_seen 1417)
+ (!srcfileloc "function.cc" 597)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1460 nil gc_used "temp_address_hasher"
+ (!srcfileloc "function.cc" 608)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1461
+ (!type pointer 1462 nil gc_used
+ (!type already_seen 1461)
+ )
+ gc_pointed_to "hash_table<temp_address_hasher>"
+ (!srcfileloc "function.cc" 608)
+ (!fields 1
+ (!pair "temp_address_hasher"
+ (!type already_seen 1460)
+ (!srcfileloc "function.cc" 608)
+ nil )
+ )
+ )
+
+ (!type already_seen 1424)
+
+ (!type struct 1463 nil gc_unused "pass_instantiate_virtual_regs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1464 nil gc_unused "assign_parm_data_all"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1465 nil gc_unused "assign_parm_data_one"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1466 nil gc_unused "pass_leaf_regs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1467 nil gc_unused "pass_thread_prologue_and_epilogue"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1468 nil gc_unused "pass_zero_call_used_regs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1469 nil gc_unused "pass_match_asm_constraints"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1470
+ (!type pointer 1471 nil gc_used
+ (!type already_seen 1470)
+ )
+ gc_pointed_to "hash_map<tree_hash,tree>"
+ (!srcfileloc "except.cc" 151)
+ (!fields 2
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "except.cc" 151)
+ nil )
+ (!pair "tree_hash"
+ (!type already_seen 941)
+ (!srcfileloc "except.cc" 151)
+ nil )
+ )
+ )
+
+ (!type struct 1472 nil gc_unused "action_record"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1473 nil gc_unused "action_record_hasher"
+ (!srcfileloc "except.cc" 210)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1474 nil gc_unused "hash_table<action_record_hasher>"
+ (!srcfileloc "except.cc" 210)
+ (!fields 1
+ (!pair "action_record_hasher"
+ (!type already_seen 1473)
+ (!srcfileloc "except.cc" 210)
+ nil )
+ )
+ )
+
+ (!type struct 1475 nil gc_unused "duplicate_eh_regions_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1476 nil gc_unused "ttypes_filter"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1477 nil gc_unused "ttypes_filter_hasher"
+ (!srcfileloc "except.cc" 729)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1478 nil gc_unused "hash_table<ttypes_filter_hasher>"
+ (!srcfileloc "except.cc" 729)
+ (!fields 1
+ (!pair "ttypes_filter_hasher"
+ (!type already_seen 1477)
+ (!srcfileloc "except.cc" 729)
+ nil )
+ )
+ )
+
+ (!type struct 1479 nil gc_unused "ehspec_hasher"
+ (!srcfileloc "except.cc" 764)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1480 nil gc_unused "hash_table<ehspec_hasher>"
+ (!srcfileloc "except.cc" 764)
+ (!fields 1
+ (!pair "ehspec_hasher"
+ (!type already_seen 1479)
+ (!srcfileloc "except.cc" 764)
+ nil )
+ )
+ )
+
+ (!type struct 1481 nil gc_unused "pass_set_nothrow_function_flags"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1482 nil gc_unused "pass_convert_to_eh_region_ranges"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 918)
+
+ (!type struct 1483
+ (!type pointer 1484 nil gc_used
+ (!type already_seen 1483)
+ )
+ gc_pointed_to "test_of_length"
+ (!srcfileloc "ggc-tests.cc" 68)
+ (!fields 2
+ (!pair "num_elem"
+ (!type already_seen 2)
+ (!srcfileloc "ggc-tests.cc" 64)
+ nil )
+ (!pair "elem"
+ (!type array 1485 nil gc_used "1"
+ (!type already_seen 1484)
+ )
+ (!srcfileloc "ggc-tests.cc" 65)
+ (!options
+ (!option length string "%h.num_elem")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type already_seen 920)
+
+ (!type struct 1486
+ (!type pointer 1487 nil gc_used
+ (!type already_seen 1486)
+ )
+ gc_pointed_to "test_of_union"
+ (!srcfileloc "ggc-tests.cc" 134)
+ (!fields 2
+ (!pair "m_kind"
+ (!type already_seen 2)
+ (!srcfileloc "ggc-tests.cc" 125)
+ nil )
+ (!pair "m_u"
+ (!type already_seen 916)
+ (!srcfileloc "ggc-tests.cc" 129)
+ (!options
+ (!option desc string "calc_desc (%0.m_kind)")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1488 nil gc_unused "test_struct_with_dtor"
+ (!srcfileloc "ggc-tests.cc" 176)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1489
+ (!type pointer 1490 nil gc_used
+ (!type already_seen 1489)
+ )
+ gc_pointed_to "example_base"
+ (!srcfileloc "ggc-tests.cc" 245)
+ (!fields 2
+ (!pair "m_kind"
+ (!type already_seen 2)
+ (!srcfileloc "ggc-tests.cc" 243)
+ nil )
+ (!pair "m_a"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 244)
+ nil )
+ )
+ (!options
+ (!option tag string "0")
+ (!option desc string "%h.m_kind")
+ )
+ 4095 nil nil )
+
+ (!type struct 1491
+ (!type pointer 1492 nil gc_used
+ (!type already_seen 1491)
+ )
+ gc_pointed_to "some_subclass"
+ (!srcfileloc "ggc-tests.cc" 256)
+ (!fields 1
+ (!pair "m_b"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 255)
+ nil )
+ )
+ (!options
+ (!option tag string "1")
+ )
+ 4095 nil
+ (!type already_seen 1489)
+ )
+
+ (!type struct 1493
+ (!type pointer 1494 nil gc_used
+ (!type already_seen 1493)
+ )
+ gc_pointed_to "some_other_subclass"
+ (!srcfileloc "ggc-tests.cc" 267)
+ (!fields 1
+ (!pair "m_c"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 266)
+ nil )
+ )
+ (!options
+ (!option tag string "2")
+ )
+ 4095 nil
+ (!type already_seen 1489)
+ )
+
+ (!type struct 1495
+ (!type pointer 1496 nil gc_used
+ (!type already_seen 1495)
+ )
+ gc_pointed_to "test_node"
+ (!srcfileloc "ggc-tests.cc" 326)
+ (!fields 3
+ (!pair "m_prev"
+ (!type already_seen 1496)
+ (!srcfileloc "ggc-tests.cc" 323)
+ nil )
+ (!pair "m_next"
+ (!type already_seen 1496)
+ (!srcfileloc "ggc-tests.cc" 324)
+ nil )
+ (!pair "m_idx"
+ (!type already_seen 2)
+ (!srcfileloc "ggc-tests.cc" 325)
+ nil )
+ )
+ (!options
+ (!option chain_prev string "%h.m_prev")
+ (!option chain_next string "%h.m_next")
+ )
+ 4095 nil nil )
+
+ (!type user_struct 1497
+ (!type pointer 1498 nil gc_used
+ (!type already_seen 1497)
+ )
+ gc_pointed_to "user_struct"
+ (!srcfileloc "ggc-tests.cc" 388)
+ (!fields 0 )
+ )
+
+ (!type already_seen 621)
+
+ (!type struct 1499 nil gc_unused "gcse_expr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1500
+ (!type pointer 1501 nil gc_unused
+ (!type already_seen 1500)
+ )
+ gc_unused "gcse_occr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1502 nil gc_unused "gcse_hash_table_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1503 nil gc_unused "ls_expr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1504 nil gc_unused "pre_ldst_expr_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1505 nil gc_unused "bb_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1506 nil gc_unused "vec<rtx_insn*>"
+ (!srcfileloc "gcse.cc" 620)
+ (!fields 1
+ (!pair "rtx_insn"
+ (!type already_seen 297)
+ (!srcfileloc "gcse.cc" 620)
+ nil )
+ )
+ )
+
+ (!type user_struct 1507 nil gc_unused "vec<modify_pair>"
+ (!srcfileloc "gcse.cc" 621)
+ (!fields 1
+ (!pair "modify_pair"
+ (!type undefined 1508 nil gc_unused "modify_pair"
+ (!srcfileloc "gcse.cc" 621)
+ )
+ (!srcfileloc "gcse.cc" 621)
+ nil )
+ )
+ )
+
+ (!type already_seen 1508)
+
+ (!type struct 1509 nil gc_unused "reg_avail_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1510 nil gc_unused "mem_conflict_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1511 nil gc_unused "edge_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1512 nil gc_unused "set_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1513 nil gc_unused "pass_rtl_pre"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1514 nil gc_unused "pass_rtl_hoist"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1515 nil gc_unused "godump_str_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1516 nil gc_unused "macro_hash_value"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1517 nil gc_unused "godump_container"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1518 nil gc_used "libfunc_decl_hasher"
+ (!srcfileloc "optabs-libfuncs.cc" 720)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1519
+ (!type pointer 1520 nil gc_used
+ (!type already_seen 1519)
+ )
+ gc_pointed_to "hash_table<libfunc_decl_hasher>"
+ (!srcfileloc "optabs-libfuncs.cc" 720)
+ (!fields 1
+ (!pair "libfunc_decl_hasher"
+ (!type already_seen 1518)
+ (!srcfileloc "optabs-libfuncs.cc" 720)
+ nil )
+ )
+ )
+
+ (!type struct 1521 nil gc_unused "bb_profile_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1522 nil gc_unused "edge_profile_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1523 nil gc_unused "bb_stats"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1524 nil gc_unused "location_triplet"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1525 nil gc_unused "location_triplet_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1526
+ (!type pointer 1527 nil gc_unused
+ (!type already_seen 1526)
+ )
+ gc_unused "fixup_edge_type"
+ (!srcfileloc "mcf.cc" 94)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1528
+ (!type pointer 1529 nil gc_unused
+ (!type already_seen 1528)
+ )
+ gc_unused "fixup_vertex_type"
+ (!srcfileloc "mcf.cc" 103)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type struct 1530 nil gc_unused "fixup_graph_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1531 nil gc_unused "queue_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1532 nil gc_unused "augmenting_path_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type union 1533 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/mcf.cc:343"nil
+ (!fields 0 )
+ nil 0 nil )
+
+ (!type struct 1534
+ (!type pointer 1535 nil gc_unused
+ (!type already_seen 1534)
+ )
+ gc_unused "stack_def"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1536
+ (!type pointer 1537 nil gc_unused
+ (!type already_seen 1536)
+ )
+ gc_unused "block_info_def"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1538 nil gc_unused "pass_stack_regs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1539 nil gc_unused "pass_stack_regs_run"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1540 nil gc_unused "pass_free_cfg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1541 nil gc_unused "pass_into_cfg_layout_mode"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1542 nil gc_unused "pass_outof_cfg_layout_mode"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1543 nil gc_unused "cfg_hooks"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1544
+ (!type pointer 1545 nil gc_used
+ (!type already_seen 1544)
+ )
+ gc_pointed_to "string_pool_data"
+ (!srcfileloc "stringpool.cc" 251)
+ (!fields 3
+ (!pair "entries"
+ (!type already_seen 17)
+ (!srcfileloc "stringpool.cc" 248)
+ (!options
+ (!option nested_ptr nested
+ (!type already_seen 22)
+ "%h ? HT_IDENT_TO_GCC_IDENT (%h) : NULL" "%h ? GCC_IDENT_TO_HT_IDENT (%h) : NULL")
+ (!option length string "%h.nslots")
+ )
+ )
+ (!pair "nslots"
+ (!type already_seen 2)
+ (!srcfileloc "stringpool.cc" 249)
+ nil )
+ (!pair "nelements"
+ (!type already_seen 2)
+ (!srcfileloc "stringpool.cc" 250)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1546 nil gc_pointed_to "type_hash"
+ (!srcfileloc "tree.cc" 150)
+ (!fields 2
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "tree.cc" 148)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "tree.cc" 149)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1547 nil gc_used "type_cache_hasher"
+ (!srcfileloc "tree.cc" 174)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1548
+ (!type pointer 1549 nil gc_used
+ (!type already_seen 1548)
+ )
+ gc_pointed_to "hash_table<type_cache_hasher>"
+ (!srcfileloc "tree.cc" 174)
+ (!fields 1
+ (!pair "type_cache_hasher"
+ (!type already_seen 1547)
+ (!srcfileloc "tree.cc" 174)
+ nil )
+ )
+ )
+
+ (!type struct 1550 nil gc_used "int_cst_hasher"
+ (!srcfileloc "tree.cc" 185)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1551
+ (!type pointer 1552 nil gc_used
+ (!type already_seen 1551)
+ )
+ gc_pointed_to "hash_table<int_cst_hasher>"
+ (!srcfileloc "tree.cc" 185)
+ (!fields 1
+ (!pair "int_cst_hasher"
+ (!type already_seen 1550)
+ (!srcfileloc "tree.cc" 185)
+ nil )
+ )
+ )
+
+ (!type struct 1553 nil gc_used "poly_int_cst_hasher"
+ (!srcfileloc "tree.cc" 196)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1554
+ (!type pointer 1555 nil gc_used
+ (!type already_seen 1554)
+ )
+ gc_pointed_to "hash_table<poly_int_cst_hasher>"
+ (!srcfileloc "tree.cc" 196)
+ (!fields 1
+ (!pair "poly_int_cst_hasher"
+ (!type already_seen 1553)
+ (!srcfileloc "tree.cc" 196)
+ nil )
+ )
+ )
+
+ (!type struct 1556 nil gc_used "cl_option_hasher"
+ (!srcfileloc "tree.cc" 212)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1557
+ (!type pointer 1558 nil gc_used
+ (!type already_seen 1557)
+ )
+ gc_pointed_to "hash_table<cl_option_hasher>"
+ (!srcfileloc "tree.cc" 212)
+ (!fields 1
+ (!pair "cl_option_hasher"
+ (!type already_seen 1556)
+ (!srcfileloc "tree.cc" 212)
+ nil )
+ )
+ )
+
+ (!type user_struct 1559
+ (!type pointer 1560 nil gc_used
+ (!type already_seen 1559)
+ )
+ gc_pointed_to "hash_table<tree_decl_map_cache_hasher>"
+ (!srcfileloc "tree.cc" 218)
+ (!fields 1
+ (!pair "tree_decl_map_cache_hasher"
+ (!type already_seen 936)
+ (!srcfileloc "tree.cc" 218)
+ nil )
+ )
+ )
+
+ (!type user_struct 1561
+ (!type pointer 1562 nil gc_used
+ (!type already_seen 1561)
+ )
+ gc_pointed_to "hash_table<tree_vec_map_cache_hasher>"
+ (!srcfileloc "tree.cc" 224)
+ (!fields 1
+ (!pair "tree_vec_map_cache_hasher"
+ (!type already_seen 937)
+ (!srcfileloc "tree.cc" 224)
+ nil )
+ )
+ )
+
+ (!type struct 1563 nil gc_unused "addr_const"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1564 nil gc_used "section_hasher"
+ (!srcfileloc "varasm.cc" 189)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1565
+ (!type pointer 1566 nil gc_used
+ (!type already_seen 1565)
+ )
+ gc_pointed_to "hash_table<section_hasher>"
+ (!srcfileloc "varasm.cc" 189)
+ (!fields 1
+ (!pair "section_hasher"
+ (!type already_seen 1564)
+ (!srcfileloc "varasm.cc" 189)
+ nil )
+ )
+ )
+
+ (!type struct 1567 nil gc_used "object_block_hasher"
+ (!srcfileloc "varasm.cc" 200)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1568
+ (!type pointer 1569 nil gc_used
+ (!type already_seen 1568)
+ )
+ gc_pointed_to "hash_table<object_block_hasher>"
+ (!srcfileloc "varasm.cc" 200)
+ (!fields 1
+ (!pair "object_block_hasher"
+ (!type already_seen 1567)
+ (!srcfileloc "varasm.cc" 200)
+ nil )
+ )
+ )
+
+ (!type struct 1570 nil gc_unused "asm_int_op"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1571
+ (!type pointer 1572 nil gc_used
+ (!type already_seen 1571)
+ )
+ gc_pointed_to "hash_table<tree_descriptor_hasher>"
+ (!srcfileloc "varasm.cc" 3072)
+ (!fields 1
+ (!pair "tree_descriptor_hasher"
+ (!type already_seen 1101)
+ (!srcfileloc "varasm.cc" 3072)
+ nil )
+ )
+ )
+
+ (!type already_seen 1017)
+
+ (!type already_seen 1016)
+
+ (!type struct 1573 nil gc_unused "constant_descriptor_rtx_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1574 nil gc_unused "const_rtx_data_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1575 nil gc_unused "oc_outer_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1576 nil gc_unused "oc_local_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1577 nil gc_used "tm_clone_hasher"
+ (!srcfileloc "varasm.cc" 6353)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1578
+ (!type pointer 1579 nil gc_used
+ (!type already_seen 1578)
+ )
+ gc_pointed_to "hash_table<tm_clone_hasher>"
+ (!srcfileloc "varasm.cc" 6353)
+ (!fields 1
+ (!pair "tm_clone_hasher"
+ (!type already_seen 1577)
+ (!srcfileloc "varasm.cc" 6353)
+ nil )
+ )
+ )
+
+ (!type struct 1580 nil gc_unused "tm_alias_pair"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 404)
+
+ (!type already_seen 771)
+
+ (!type already_seen 769)
+
+ (!type already_seen 779)
+
+ (!type already_seen 777)
+
+ (!type already_seen 386)
+
+ (!type already_seen 784)
+
+ (!type already_seen 805)
+
+ (!type already_seen 794)
+
+ (!type struct 1581 nil gc_pointed_to "gimple_statement_wce"
+ (!srcfileloc "gimple.h" 561)
+ (!fields 1
+ (!pair "cleanup"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 560)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_WCE")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type already_seen 810)
+
+ (!type already_seen 813)
+
+ (!type already_seen 812)
+
+ (!type already_seen 807)
+
+ (!type struct 1582 nil gc_pointed_to "gomp_scan"
+ (!srcfileloc "gimple.h" 784)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_SINGLE_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 807)
+ )
+
+ (!type already_seen 802)
+
+ (!type struct 1583 nil gc_pointed_to "gimple_statement_omp_return"
+ (!srcfileloc "gimple.h" 826)
+ (!fields 0 )
+ (!options
+ (!option tag string "GSS_OMP_ATOMIC_STORE_LAYOUT")
+ )
+ 4095 nil
+ (!type already_seen 802)
+ )
+
+ (!type struct 1584 nil gc_pointed_to "gimple_statement_assume"
+ (!srcfileloc "gimple.h" 840)
+ (!fields 2
+ (!pair "guard"
+ (!type already_seen 23)
+ (!srcfileloc "gimple.h" 836)
+ nil )
+ (!pair "body"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 839)
+ nil )
+ )
+ (!options
+ (!option tag string "GSS_ASSUME")
+ )
+ 4095 nil
+ (!type already_seen 283)
+ )
+
+ (!type struct 1585 nil gc_unused "gimple_temp_hash_elt"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1586 nil gc_pointed_to "tm_restart_node"
+ (!srcfileloc "gimple-ssa.h" 32)
+ (!fields 2
+ (!pair "stmt"
+ (!type already_seen 282)
+ (!srcfileloc "gimple-ssa.h" 30)
+ nil )
+ (!pair "label_or_list"
+ (!type already_seen 23)
+ (!srcfileloc "gimple-ssa.h" 31)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type already_seen 407)
+
+ (!type already_seen 398)
+
+ (!type already_seen 395)
+
+ (!type already_seen 397)
+
+ (!type already_seen 399)
+
+ (!type already_seen 406)
+
+ (!type struct 1587 nil gc_unused "pass_release_ssa_names"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type union 1588 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/tree-eh.cc:53"nil
+ (!fields 0 )
+ nil 0 nil )
+
+ (!type struct 1589 nil gc_unused "finally_tree_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1590 nil gc_unused "finally_tree_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1591 nil gc_unused "goto_queue_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1592 nil gc_unused "leh_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1593 nil gc_unused "leh_tf_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1594 nil gc_unused "labels_s"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1595 nil gc_unused "pass_lower_eh"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1596 nil gc_unused "pass_refactor_eh"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1597 nil gc_unused "pass_lower_resx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1598 nil gc_unused "pass_lower_eh_dispatch"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1599 nil gc_unused "pass_cleanup_eh"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1600 nil gc_used "mem_addr_template"
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ (!fields 3
+ (!pair "ref"
+ (!type already_seen 100)
+ (!srcfileloc "tree-ssa-address.cc" 83)
+ nil )
+ (!pair "step_p"
+ (!type already_seen 101)
+ (!srcfileloc "tree-ssa-address.cc" 84)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "off_p"
+ (!type already_seen 101)
+ (!srcfileloc "tree-ssa-address.cc" 86)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1601
+ (!type pointer 1602 nil gc_used
+ (!type already_seen 1601)
+ )
+ gc_pointed_to "vec<mem_addr_template,va_gc>"
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ nil )
+ (!pair "mem_addr_template"
+ (!type already_seen 1600)
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ nil )
+ )
+ )
+
+ (!type struct 1603 nil gc_unused "mem_address"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1604 nil gc_unused "cfg_stats_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1605 nil gc_unused "replace_decls_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1606 nil gc_unused "locus_discrim_map"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1607 nil gc_unused "locus_discrim_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1608 nil gc_unused "pass_build_cfg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1609 nil gc_unused "omp_region"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1610 nil gc_unused "label_record"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1611 nil gc_unused "walk_stmt_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1612 nil gc_unused "move_stmt_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1613 nil gc_unused "profile_record"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1614 nil gc_unused "pass_split_crit_edges"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1615 nil gc_unused "pass_warn_function_return"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1616 nil gc_unused "pass_warn_unused_result"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1617 nil gc_unused "pass_fixup_cfg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1618 nil gc_unused "iv_use"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1619 nil gc_unused "iv"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1620 nil gc_unused "version_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1621 nil gc_unused "comp_cost"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1622 nil gc_unused "iv_inv_expr_ent"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1623 nil gc_unused "cost_pair"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1624 nil gc_unused "iv_group"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1625 nil gc_unused "iv_cand"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1626 nil gc_unused "iv_common_cand"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1627 nil gc_unused "iv_common_cand_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1628 nil gc_unused "iv_inv_expr_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1629 nil gc_unused "ivopts_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1630 nil gc_unused "iv_ca"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1631 nil gc_unused "iv_ca_delta"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1632 nil gc_unused "tree_niter_desc"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1633 nil gc_unused "ifs_ivopts_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1634 nil gc_unused "walk_tree_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1635 nil gc_unused "aff_tree"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1636 nil gc_unused "ainc_cost_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1637 nil gc_unused "dfa_stats_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1638 nil gc_unused "numbered_tree"
+ (!srcfileloc "tree-dfa.cc" 960)
+ (!fields 2
+ (!pair "t"
+ (!type already_seen 23)
+ (!srcfileloc "tree-dfa.cc" 958)
+ nil )
+ (!pair "num"
+ (!type already_seen 2)
+ (!srcfileloc "tree-dfa.cc" 959)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1639 nil gc_pointed_to "scev_info_str"
+ (!srcfileloc "tree-scalar-evolution.cc" 300)
+ (!fields 3
+ (!pair "name_version"
+ (!type already_seen 2)
+ (!srcfileloc "tree-scalar-evolution.cc" 297)
+ nil )
+ (!pair "instantiated_below"
+ (!type already_seen 2)
+ (!srcfileloc "tree-scalar-evolution.cc" 298)
+ nil )
+ (!pair "chrec"
+ (!type already_seen 23)
+ (!srcfileloc "tree-scalar-evolution.cc" 299)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1640 nil gc_used "scev_info_hasher"
+ (!srcfileloc "tree-scalar-evolution.cc" 312)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1641
+ (!type pointer 1642 nil gc_used
+ (!type already_seen 1641)
+ )
+ gc_pointed_to "hash_table<scev_info_hasher>"
+ (!srcfileloc "tree-scalar-evolution.cc" 312)
+ (!fields 1
+ (!pair "scev_info_hasher"
+ (!type already_seen 1640)
+ (!srcfileloc "tree-scalar-evolution.cc" 312)
+ nil )
+ )
+ )
+
+ (!type struct 1643 nil gc_unused "instantiate_cache_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1644 nil gc_unused "scev_dfs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1645 nil gc_unused "chrec_stats"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 401)
+
+ (!type struct 1646 nil gc_unused "pass_ipa_tree_profile"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1647 nil gc_unused "nesting_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1648 nil gc_unused "nesting_copy_body_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1649 nil gc_unused "omp_for_data_loop"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1650 nil gc_unused "known_properties"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1651 nil gc_unused "declare_variant_simd_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1652 nil gc_used "omp_declare_variant_entry"
+ (!srcfileloc "omp-general.cc" 2071)
+ (!fields 5
+ (!pair "variant"
+ (!type already_seen 820)
+ (!srcfileloc "omp-general.cc" 2052)
+ nil )
+ (!pair "score"
+ (!type already_seen 2)
+ (!srcfileloc "omp-general.cc" 2054)
+ nil )
+ (!pair "score_in_declare_simd_clone"
+ (!type already_seen 2)
+ (!srcfileloc "omp-general.cc" 2056)
+ nil )
+ (!pair "ctx"
+ (!type already_seen 23)
+ (!srcfileloc "omp-general.cc" 2058)
+ nil )
+ (!pair "matches"
+ (!type already_seen 2)
+ (!srcfileloc "omp-general.cc" 2060)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type user_struct 1653
+ (!type pointer 1654 nil gc_used
+ (!type already_seen 1653)
+ )
+ gc_pointed_to "vec<omp_declare_variant_entry,va_gc>"
+ (!srcfileloc "omp-general.cc" 2071)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "omp-general.cc" 2071)
+ nil )
+ (!pair "omp_declare_variant_entry"
+ (!type already_seen 1652)
+ (!srcfileloc "omp-general.cc" 2071)
+ nil )
+ )
+ )
+
+ (!type struct 1655 nil gc_pointed_to "omp_declare_variant_base_entry"
+ (!srcfileloc "omp-general.cc" 2072)
+ (!fields 3
+ (!pair "base"
+ (!type already_seen 820)
+ (!srcfileloc "omp-general.cc" 2066)
+ nil )
+ (!pair "node"
+ (!type already_seen 820)
+ (!srcfileloc "omp-general.cc" 2069)
+ nil )
+ (!pair "variants"
+ (!type already_seen 1654)
+ (!srcfileloc "omp-general.cc" 2071)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1656 nil gc_used "omp_declare_variant_hasher"
+ (!srcfileloc "omp-general.cc" 2120)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1657
+ (!type pointer 1658 nil gc_used
+ (!type already_seen 1657)
+ )
+ gc_pointed_to "hash_table<omp_declare_variant_hasher>"
+ (!srcfileloc "omp-general.cc" 2120)
+ (!fields 1
+ (!pair "omp_declare_variant_hasher"
+ (!type already_seen 1656)
+ (!srcfileloc "omp-general.cc" 2120)
+ nil )
+ )
+ )
+
+ (!type struct 1659 nil gc_used "omp_declare_variant_alt_hasher"
+ (!srcfileloc "omp-general.cc" 2142)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1660
+ (!type pointer 1661 nil gc_used
+ (!type already_seen 1660)
+ )
+ gc_pointed_to "hash_table<omp_declare_variant_alt_hasher>"
+ (!srcfileloc "omp-general.cc" 2142)
+ (!fields 1
+ (!pair "omp_declare_variant_alt_hasher"
+ (!type already_seen 1659)
+ (!srcfileloc "omp-general.cc" 2142)
+ nil )
+ )
+ )
+
+ (!type struct 1662 nil gc_unused "omp_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1663 nil gc_unused "omp_for_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1664 nil gc_unused "omplow_simd_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1665 nil gc_unused "omp_taskcopy_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1666 nil gc_unused "lower_omp_regimplify_operands_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1667 nil gc_unused "pass_lower_omp"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1668 nil gc_unused "pass_diagnose_omp_blocks"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1669 nil gc_unused "cl_option_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1670 nil gc_unused "minipool_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1671 nil gc_unused "minipool_fixup"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1672 nil gc_unused "four_ints"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1673 nil gc_unused "arm_build_target"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1674 nil gc_unused "cpu_tune"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1675 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/config/arm/arm.cc:2492"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1676 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/config/arm/arm.cc:4045"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1677 nil gc_unused "libcall_hasher"
+ (!srcfileloc "config/arm/arm.cc" 5872)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1678 nil gc_unused "hash_table<libcall_hasher>"
+ (!srcfileloc "config/arm/arm.cc" 5872)
+ (!fields 1
+ (!pair "libcall_hasher"
+ (!type already_seen 1677)
+ (!srcfileloc "config/arm/arm.cc" 5872)
+ nil )
+ )
+ )
+
+ (!type struct 1679 nil gc_unused "scratch_reg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1680 nil gc_unused "thumb1_const_rtl"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1681 nil gc_unused "thumb1_const_print"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1682 nil gc_unused "expand_vec_perm_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1683 nil gc_unused "gcc_target"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1684 nil gc_unused "dump_file_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1685 nil gc_unused "pass_build_ssa_passes"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1686 nil gc_unused "pass_local_optimization_passes"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1687 nil gc_unused "pass_ipa_remove_symbols"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1688 nil gc_unused "pass_all_early_optimizations"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1689 nil gc_unused "pass_all_optimizations"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1690 nil gc_unused "pass_all_optimizations_g"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1691 nil gc_unused "pass_rest_of_compilation"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1692 nil gc_unused "pass_postreload"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1693 nil gc_unused "pass_late_compilation"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1694 nil gc_unused "pass_pre_slp_scalar_cleanup"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1695
+ (!type pointer 1696 nil gc_unused
+ (!type already_seen 1695)
+ )
+ gc_unused "uid_range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1697 nil gc_unused "pass_list_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1698
+ (!type pointer 1699 nil gc_unused
+ (!type already_seen 1698)
+ )
+ gc_unused "lto_out_decl_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1700
+ (!type pointer 1701 nil gc_used
+ (!type already_seen 1700)
+ )
+ gc_pointed_to "hash_map<char*,unsigned>"
+ (!srcfileloc "cgraphclones.cc" 479)
+ (!fields 1
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cgraphclones.cc" 479)
+ nil )
+ )
+ )
+
+ (!type user_struct 1702
+ (!type pointer 1703 nil gc_used
+ (!type already_seen 1702)
+ )
+ gc_pointed_to "vec<gimple*,va_gc>"
+ (!srcfileloc "tree-phinodes.cc" 70)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "tree-phinodes.cc" 70)
+ nil )
+ (!pair "gimple"
+ (!type already_seen 282)
+ (!srcfileloc "tree-phinodes.cc" 70)
+ nil )
+ )
+ )
+
+ (!type struct 1704 nil gc_unused "ao_ref"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1705 nil gc_unused "irange"
+ (!srcfileloc "value-range.h" 225)
+ (!fields 0 )
+ )
+
+ (!type struct 1706 nil gc_unused "vrange"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1707 nil gc_unused "int_range"
+ (!srcfileloc "value-range.h" 260)
+ (!fields 0 )
+ )
+
+ (!type struct 1708 nil gc_unused "unsupported_range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1709 nil gc_unused "nan_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1710 nil gc_unused "frange"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1711 nil gc_unused "int_range<3,true>"
+ (!srcfileloc "value-range.h" 514)
+ (!fields 2
+ (!pair "true"
+ (!type undefined 1712 nil gc_unused "true"
+ (!srcfileloc "value-range.h" 514)
+ )
+ (!srcfileloc "value-range.h" 514)
+ nil )
+ (!pair "3"
+ (!type undefined 1713 nil gc_unused "3"
+ (!srcfileloc "value-range.h" 514)
+ )
+ (!srcfileloc "value-range.h" 514)
+ nil )
+ )
+ )
+
+ (!type already_seen 1713)
+
+ (!type already_seen 1712)
+
+ (!type struct 1714 nil gc_unused "vrange_visitor"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1145)
+
+ (!type already_seen 1146)
+
+ (!type struct 1715 nil gc_unused "Value_Range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1716 nil gc_unused "vrange_allocator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1717 nil gc_unused "vrange_storage"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type undefined 1718 nil gc_unused "DISABLE_COPY_AND_ASSIGN"
+ (!srcfileloc "value-range-storage.h" 78)
+ )
+
+ (!type already_seen 555)
+
+ (!type already_seen 556)
+
+ (!type struct 1719 nil gc_unused "obstack_vrange_allocator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1720 nil gc_unused "ggc_vrange_allocator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1148)
+
+ (!type already_seen 1140)
+
+ (!type already_seen 1149)
+
+ (!type already_seen 1139)
+
+ (!type already_seen 1137)
+
+ (!type already_seen 1135)
+
+ (!type struct 1721
+ (!type pointer 1722 nil gc_used
+ (!type already_seen 1721)
+ )
+ gc_pointed_to "ipcp_transformation"
+ (!srcfileloc "ipa-prop.h" 934)
+ (!fields 3
+ (!pair "m_agg_values"
+ (!type pointer 1723 nil gc_used
+ (!type user_struct 1724
+ (!type already_seen 1723)
+ gc_pointed_to "vec<ipa_argagg_value,va_gc>"
+ (!srcfileloc "ipa-prop.h" 916)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 916)
+ nil )
+ (!pair "ipa_argagg_value"
+ (!type already_seen 1206)
+ (!srcfileloc "ipa-prop.h" 916)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 916)
+ nil )
+ (!pair "bits"
+ (!type pointer 1725 nil gc_used
+ (!type user_struct 1726
+ (!type already_seen 1725)
+ gc_pointed_to "vec<ipa_bits*,va_gc>"
+ (!srcfileloc "ipa-prop.h" 918)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 918)
+ nil )
+ (!pair "ipa_bits"
+ (!type already_seen 1142)
+ (!srcfileloc "ipa-prop.h" 918)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 918)
+ nil )
+ (!pair "m_vr"
+ (!type pointer 1727 nil gc_used
+ (!type user_struct 1728
+ (!type already_seen 1727)
+ gc_pointed_to "vec<ipa_vr,va_gc>"
+ (!srcfileloc "ipa-prop.h" 920)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-prop.h" 920)
+ nil )
+ (!pair "ipa_vr"
+ (!type struct 1729 nil gc_used "ipa_vr"
+ (!srcfileloc "ipa-prop.h" 920)
+ (!fields 4
+ (!pair "known"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 313)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-prop.h" 314)
+ nil )
+ (!pair "min"
+ (!type already_seen 496)
+ (!srcfileloc "ipa-prop.h" 315)
+ nil )
+ (!pair "max"
+ (!type already_seen 496)
+ (!srcfileloc "ipa-prop.h" 316)
+ nil )
+ )
+ nil 4095 nil nil )
+ (!srcfileloc "ipa-prop.h" 920)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "ipa-prop.h" 920)
+ nil )
+ )
+ nil 4095 nil nil )
+
+ (!type struct 1730 nil gc_unused "ipa_auto_call_arg_values"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1731 nil gc_unused "ipa_call_arg_values"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1732 nil gc_unused "ipa_argagg_value_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1143)
+
+ (!type already_seen 1729)
+
+ (!type already_seen 1147)
+
+ (!type already_seen 1156)
+
+ (!type already_seen 1155)
+
+ (!type already_seen 1159)
+
+ (!type already_seen 1160)
+
+ (!type already_seen 1724)
+
+ (!type already_seen 1726)
+
+ (!type already_seen 1728)
+
+ (!type already_seen 1166)
+
+ (!type already_seen 1168)
+
+ (!type user_struct 1733
+ (!type pointer 1734 nil gc_used
+ (!type already_seen 1733)
+ )
+ gc_pointed_to "ipa_node_params_t"
+ (!srcfileloc "ipa-prop.h" 1020)
+ (!fields 0 )
+ )
+
+ (!type user_struct 1735
+ (!type pointer 1736 nil gc_used
+ (!type already_seen 1735)
+ )
+ gc_pointed_to "ipa_edge_args_sum_t"
+ (!srcfileloc "ipa-prop.h" 1042)
+ (!fields 0 )
+ )
+
+ (!type struct 1737 nil gc_unused "ipcp_transformation_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1738
+ (!type pointer 1739 nil gc_used
+ (!type already_seen 1738)
+ )
+ gc_pointed_to "function_summary<ipcp_transformation*>"
+ (!srcfileloc "ipa-prop.h" 1074)
+ (!fields 1
+ (!pair "ipcp_transformation"
+ (!type already_seen 1722)
+ (!srcfileloc "ipa-prop.h" 1074)
+ nil )
+ )
+ )
+
+ (!type struct 1740 nil gc_used "tm_wrapper_hasher"
+ (!srcfileloc "trans-mem.cc" 468)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1741
+ (!type pointer 1742 nil gc_used
+ (!type already_seen 1741)
+ )
+ gc_pointed_to "hash_table<tm_wrapper_hasher>"
+ (!srcfileloc "trans-mem.cc" 468)
+ (!fields 1
+ (!pair "tm_wrapper_hasher"
+ (!type already_seen 1740)
+ (!srcfileloc "trans-mem.cc" 468)
+ nil )
+ )
+ )
+
+ (!type struct 1743 nil gc_unused "diagnose_tm"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1744 nil gc_unused "pass_diagnose_tm_blocks"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1745 nil gc_unused "tm_log_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1746 nil gc_unused "log_entry_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1747 nil gc_unused "tm_new_mem_map"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1748 nil gc_unused "tm_mem_map_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1749 nil gc_unused "pass_lower_tm"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1750 nil gc_unused "tm_region"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1751 nil gc_unused "pass_tm_init"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1752 nil gc_unused "bb2reg_stuff"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1753 nil gc_unused "pass_tm_mark"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1754 nil gc_unused "pass_tm_edges"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1755 nil gc_unused "tm_memop"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1756 nil gc_unused "tm_memop_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1757 nil gc_unused "tm_memopt_bitmaps"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1758 nil gc_unused "pass_tm_memopt"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1759 nil gc_unused "tm_ipa_cg_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1760 nil gc_unused "vec<cgraph_node*>"
+ (!srcfileloc "trans-mem.cc" 4196)
+ (!fields 1
+ (!pair "cgraph_node"
+ (!type already_seen 820)
+ (!srcfileloc "trans-mem.cc" 4196)
+ nil )
+ )
+ )
+
+ (!type struct 1761 nil gc_unused "demangle_component"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1762 nil gc_unused "create_version_alias_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1763 nil gc_unused "pass_ipa_tm"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1764 nil gc_unused "lto_location_cache"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1765 nil gc_unused "lto_input_block"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 341)
+
+ (!type struct 1766 nil gc_unused "lto_simple_header"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1767 nil gc_unused "lto_simple_header_with_strings"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1768 nil gc_unused "lto_function_header"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1769 nil gc_unused "lto_decl_header"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1770 nil gc_unused "lto_stats_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1771 nil gc_unused "lto_encoder_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 329)
+
+ (!type struct 1772 nil gc_unused "lto_symtab_encoder_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1773 nil gc_unused "lto_tree_ref_encoder"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 332)
+
+ (!type already_seen 336)
+
+ (!type already_seen 331)
+
+ (!type already_seen 335)
+
+ (!type already_seen 337)
+
+ (!type already_seen 339)
+
+ (!type already_seen 340)
+
+ (!type struct 1774 nil gc_unused "lto_char_ptr_base"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1775 nil gc_unused "lto_output_stream"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1776 nil gc_unused "lto_simple_output_block"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1777 nil gc_unused "string_slot"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1778 nil gc_unused "string_slot_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1779 nil gc_unused "dref_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 586)
+
+ (!type already_seen 588)
+
+ (!type already_seen 597)
+
+ (!type already_seen 599)
+
+ (!type already_seen 605)
+
+ (!type already_seen 615)
+
+ (!type already_seen 617)
+
+ (!type already_seen 619)
+
+ (!type already_seen 623)
+
+ (!type already_seen 625)
+
+ (!type already_seen 1190)
+
+ (!type already_seen 1189)
+
+ (!type already_seen 1187)
+
+ (!type struct 1780 nil gc_unused "inline_param_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1186)
+
+ (!type already_seen 1199)
+
+ (!type struct 1781 nil gc_unused "agg_position_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1193)
+
+ (!type struct 1782 nil gc_unused "ipa_size_summary"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1197)
+
+ (!type already_seen 1192)
+
+ (!type already_seen 1194)
+
+ (!type already_seen 1196)
+
+ (!type already_seen 1200)
+
+ (!type user_struct 1783 nil gc_unused "ipa_fn_summary_t"
+ (!srcfileloc "ipa-fnsummary.h" 248)
+ (!fields 0 )
+ )
+
+ (!type user_struct 1784
+ (!type pointer 1785 nil gc_used
+ (!type already_seen 1784)
+ )
+ gc_pointed_to "fast_function_summary<ipa_fn_summary*,va_gc>"
+ (!srcfileloc "ipa-fnsummary.h" 250)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-fnsummary.h" 250)
+ nil )
+ (!pair "ipa_fn_summary"
+ (!type already_seen 1184)
+ (!srcfileloc "ipa-fnsummary.h" 250)
+ nil )
+ )
+ )
+
+ (!type struct 1786 nil gc_unused "ipa_size_summary_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1787 nil gc_unused "ipa_call_summary_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1788 nil gc_unused "ipa_call_estimates"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1789 nil gc_unused "ipa_cached_call_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1790 nil gc_unused "ipa_call_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1791 nil gc_unused "vtable_registration"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1792 nil gc_unused "vtbl_map_hasher"
+ (!srcfileloc "vtable-verify.cc" 298)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1793 nil gc_unused "hash_table<vtbl_map_hasher>"
+ (!srcfileloc "vtable-verify.cc" 298)
+ (!fields 1
+ (!pair "vtbl_map_hasher"
+ (!type already_seen 1792)
+ (!srcfileloc "vtable-verify.cc" 298)
+ nil )
+ )
+ )
+
+ (!type undefined 1794 nil gc_unused "vtbl_map_table_type::iterator"
+ (!srcfileloc "vtable-verify.cc" 299)
+ )
+
+ (!type struct 1795 nil gc_unused "vtbl_map_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1796 nil gc_unused "pass_vtable_verify"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1797 nil gc_unused "hwasan_stack_var"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1798 nil gc_unused "asan_mem_ref"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1799 nil gc_unused "asan_mem_ref_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1800 nil gc_unused "asan_redzone_buffer"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1801 nil gc_unused "asan_add_string_csts_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1802 nil gc_unused "pass_asan"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1803 nil gc_unused "pass_asan_O0"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1804 nil gc_pointed_to "tree_type_map"
+ (!srcfileloc "ubsan.cc" 58)
+ (!fields 2
+ (!pair "type"
+ (!type already_seen 982)
+ (!srcfileloc "ubsan.cc" 56)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "ubsan.cc" 57)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 4095 nil nil )
+
+ (!type struct 1805 nil gc_used "tree_type_map_cache_hasher"
+ (!srcfileloc "ubsan.cc" 82)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1806
+ (!type pointer 1807 nil gc_used
+ (!type already_seen 1806)
+ )
+ gc_pointed_to "hash_table<tree_type_map_cache_hasher>"
+ (!srcfileloc "ubsan.cc" 82)
+ (!fields 1
+ (!pair "tree_type_map_cache_hasher"
+ (!type already_seen 1805)
+ (!srcfileloc "ubsan.cc" 82)
+ nil )
+ )
+ )
+
+ (!type struct 1808 nil gc_unused "pass_ubsan"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1809 nil gc_unused "pass_tsan"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1810 nil gc_unused "pass_tsan_O0"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1811 nil gc_unused "sanopt_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1812 nil gc_unused "sanopt_tree_triplet"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1813 nil gc_unused "sanopt_tree_triplet_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1814 nil gc_unused "sanopt_tree_couple"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1815 nil gc_unused "sanopt_tree_couple_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1816 nil gc_unused "sanopt_ctx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1817 nil gc_unused "pass_sanopt"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1818 nil gc_unused "type_pair"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1819 nil gc_unused "default_hash_traits"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 1211)
+
+ (!type struct 1820 nil gc_unused "odr_name_hasher"
+ (!srcfileloc "ipa-devirt.cc" 505)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1821 nil gc_unused "hash_table<odr_name_hasher>"
+ (!srcfileloc "ipa-devirt.cc" 505)
+ (!fields 1
+ (!pair "odr_name_hasher"
+ (!type already_seen 1820)
+ (!srcfileloc "ipa-devirt.cc" 505)
+ nil )
+ )
+ )
+
+ (!type user_struct 1822
+ (!type pointer 1823 nil gc_used
+ (!type already_seen 1822)
+ )
+ gc_pointed_to "vec<odr_type,va_gc>"
+ (!srcfileloc "ipa-devirt.cc" 512)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ipa-devirt.cc" 512)
+ nil )
+ (!pair "odr_type"
+ (!type already_seen 1210)
+ (!srcfileloc "ipa-devirt.cc" 512)
+ nil )
+ )
+ )
+
+ (!type struct 1824 nil gc_unused "odr_enum_val"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1825 nil gc_unused "odr_enum"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1826 nil gc_unused "polymorphic_call_target_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1827 nil gc_unused "polymorphic_call_target_hasher"
+ (!srcfileloc "ipa-devirt.cc" 2746)
+ (!fields 0 )
+ nil 4095 nil nil )
+
+ (!type user_struct 1828 nil gc_unused "hash_table<polymorphic_call_target_hasher>"
+ (!srcfileloc "ipa-devirt.cc" 2746)
+ (!fields 1
+ (!pair "polymorphic_call_target_hasher"
+ (!type already_seen 1827)
+ (!srcfileloc "ipa-devirt.cc" 2746)
+ nil )
+ )
+ )
+
+ (!type struct 1829 nil gc_unused "odr_type_warn_count"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1830 nil gc_unused "decl_warn_count"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1831 nil gc_unused "final_warning_record"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1832 nil gc_unused "pass_ipa_devirt"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1833 nil gc_unused "pass_ipa_odr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1834 nil gc_unused "direct_internal_fn_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1835 nil gc_unused "arg_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1836 nil gc_unused "arg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1837 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/config/arm/arm-builtins.cc:903"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1838 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/config/arm/arm-builtins.cc:999"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1839 nil gc_unused "arm_simd_type_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1840 nil gc_unused "builtin_description"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1841 nil gc_unused "vector_type_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1842 nil gc_unused "File_Info_Type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1843 nil gc_unused "Elist_Header"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1844 nil gc_unused "Elmt_Item"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1845 nil gc_unused "String_Entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1846 nil gc_unused "List_Header"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1847 nil gc_unused "attrib"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1848 nil gc_unused "incomplete"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1849 nil gc_unused "subst_pair_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1850 nil gc_unused "variant_desc_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1851 nil gc_used "value_annotation_hasher"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 157)
+ (!fields 0 )
+ nil 1 nil nil )
+
+ (!type user_struct 1852
+ (!type pointer 1853 nil gc_used
+ (!type already_seen 1852)
+ )
+ gc_pointed_to "hash_table<value_annotation_hasher>"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 157)
+ (!fields 1
+ (!pair "value_annotation_hasher"
+ (!type already_seen 1851)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 157)
+ nil )
+ )
+ )
+
+ (!type user_struct 1854
+ (!type pointer 1855 nil gc_used
+ (!type already_seen 1854)
+ )
+ gc_pointed_to "vec<Entity_Id,va_gc_atomic>"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ (!fields 2
+ (!pair "va_gc_atomic"
+ (!type undefined 1856 nil gc_unused "va_gc_atomic"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ )
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ nil )
+ (!pair "Entity_Id"
+ (!type undefined 1857 nil gc_unused "Entity_Id"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ )
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ nil )
+ )
+ )
+
+ (!type already_seen 1857)
+
+ (!type already_seen 1856)
+
+ (!type struct 1858 nil gc_pointed_to "tree_entity_vec_map"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 164)
+ (!fields 2
+ (!pair "base"
+ (!type already_seen 982)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 162)
+ nil )
+ (!pair "to"
+ (!type already_seen 1855)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1 nil nil )
+
+ (!type struct 1859 nil gc_used "dummy_type_hasher"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 198)
+ (!fields 0 )
+ nil 1 nil nil )
+
+ (!type user_struct 1860
+ (!type pointer 1861 nil gc_used
+ (!type already_seen 1860)
+ )
+ gc_pointed_to "hash_table<dummy_type_hasher>"
+ (!srcfileloc "ada/gcc-interface/decl.cc" 198)
+ (!fields 1
+ (!pair "dummy_type_hasher"
+ (!type already_seen 1859)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 198)
+ nil )
+ )
+ )
+
+ (!type struct 1862 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/ada/gcc-interface/decl.cc:262"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1863 nil gc_unused "er_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1864 nil gc_unused "vinfo"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 466)
+
+ (!type already_seen 464)
+
+ (!type struct 1865
+ (!type pointer 1866 nil gc_used
+ (!type already_seen 1865)
+ )
+ gc_pointed_to "stmt_group"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 145)
+ (!fields 4
+ (!pair "previous"
+ (!type already_seen 1866)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 141)
+ nil )
+ (!pair "stmt_list"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 142)
+ nil )
+ (!pair "block"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 143)
+ nil )
+ (!pair "cleanups"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 144)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.previous")
+ )
+ 1 nil nil )
+
+ (!type struct 1867
+ (!type pointer 1868 nil gc_used
+ (!type already_seen 1867)
+ )
+ gc_pointed_to "elab_info"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 161)
+ (!fields 3
+ (!pair "next"
+ (!type already_seen 1868)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 158)
+ nil )
+ (!pair "elab_proc"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 159)
+ nil )
+ (!pair "gnat_node"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 160)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 1 nil nil )
+
+ (!type struct 1869
+ (!type pointer 1870 nil gc_used
+ (!type already_seen 1869)
+ )
+ gc_pointed_to "range_check_info_d"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 193)
+ (!fields 7
+ (!pair "low_bound"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 186)
+ nil )
+ (!pair "high_bound"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 187)
+ nil )
+ (!pair "disp"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 188)
+ nil )
+ (!pair "neg_p"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 189)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 190)
+ nil )
+ (!pair "invariant_cond"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 191)
+ nil )
+ (!pair "inserted_cond"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 192)
+ nil )
+ )
+ nil 1 nil nil )
+
+ (!type user_struct 1871
+ (!type pointer 1872 nil gc_used
+ (!type already_seen 1871)
+ )
+ gc_pointed_to "vec<range_check_info,va_gc>"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 207)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 207)
+ nil )
+ (!pair "range_check_info"
+ (!type already_seen 1870)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 207)
+ nil )
+ )
+ )
+
+ (!type struct 1873
+ (!type pointer 1874 nil gc_used
+ (!type already_seen 1873)
+ )
+ gc_pointed_to "loop_info_d"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 209)
+ (!fields 10
+ (!pair "fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 199)
+ nil )
+ (!pair "stmt"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 200)
+ nil )
+ (!pair "loop_var"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 201)
+ nil )
+ (!pair "low_bound"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 202)
+ nil )
+ (!pair "high_bound"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 203)
+ nil )
+ (!pair "omp_loop_clauses"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 204)
+ nil )
+ (!pair "omp_construct_clauses"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 205)
+ nil )
+ (!pair "omp_code"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 206)
+ nil )
+ (!pair "checks"
+ (!type already_seen 1872)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 207)
+ nil )
+ (!pair "invariants"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 208)
+ nil )
+ )
+ nil 1 nil nil )
+
+ (!type user_struct 1875
+ (!type pointer 1876 nil gc_used
+ (!type already_seen 1875)
+ )
+ gc_pointed_to "vec<loop_info,va_gc>"
+ (!srcfileloc "ada/gcc-interface/trans.cc" 214)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 214)
+ nil )
+ (!pair "loop_info"
+ (!type already_seen 1874)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 214)
+ nil )
+ )
+ )
+
+ (!type struct 1877 nil gc_unused "nrv_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1878
+ (!type pointer 1879 nil gc_used
+ (!type already_seen 1878)
+ )
+ gc_pointed_to "gnat_binding_level"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 267)
+ (!fields 2
+ (!pair "chain"
+ (!type already_seen 1879)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 264)
+ nil )
+ (!pair "block"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 266)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.chain")
+ )
+ 1 nil nil )
+
+ (!type struct 1880 nil gc_pointed_to "packable_type_hash"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 294)
+ (!fields 2
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 292)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 293)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1 nil nil )
+
+ (!type struct 1881 nil gc_used "packable_type_hasher"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 308)
+ (!fields 0 )
+ nil 1 nil nil )
+
+ (!type user_struct 1882
+ (!type pointer 1883 nil gc_used
+ (!type already_seen 1882)
+ )
+ gc_pointed_to "hash_table<packable_type_hasher>"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 308)
+ (!fields 1
+ (!pair "packable_type_hasher"
+ (!type already_seen 1881)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 308)
+ nil )
+ )
+ )
+
+ (!type struct 1884 nil gc_pointed_to "pad_type_hash"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 317)
+ (!fields 2
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 315)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 316)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1 nil nil )
+
+ (!type struct 1885 nil gc_used "pad_type_hasher"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 331)
+ (!fields 0 )
+ nil 1 nil nil )
+
+ (!type user_struct 1886
+ (!type pointer 1887 nil gc_used
+ (!type already_seen 1886)
+ )
+ gc_pointed_to "hash_table<pad_type_hasher>"
+ (!srcfileloc "ada/gcc-interface/utils.cc" 331)
+ (!fields 1
+ (!pair "pad_type_hasher"
+ (!type already_seen 1885)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 331)
+ nil )
+ )
+ )
+
+ (!type struct 1888 nil gc_unused "deferred_decl_context_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1889 nil gc_unused "lang_hooks"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1890 nil gc_unused "c_expr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1891 nil gc_unused "c_typespec"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1892 nil gc_unused "c_declspecs"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1893 nil gc_unused "c_arg_tag"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 461)
+
+ (!type struct 1894 nil gc_unused "c_declarator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1895 nil gc_unused "c_type_name"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1896 nil gc_unused "c_parm"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1897 nil gc_unused "c_enum_contents"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 649)
+
+ (!type struct 1898 nil gc_unused "c_struct_parse_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 452)
+
+ (!type already_seen 648)
+
+ (!type already_seen 646)
+
+ (!type already_seen 645)
+
+ (!type already_seen 642)
+
+ (!type already_seen 631)
+
+ (!type already_seen 651)
+
+ (!type already_seen 655)
+
+ (!type already_seen 653)
+
+ (!type struct 1899
+ (!type pointer 1900 nil gc_used
+ (!type already_seen 1899)
+ )
+ gc_pointed_to "c_inline_static"
+ (!srcfileloc "c/c-decl.cc" 567)
+ (!fields 5
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 554)
+ nil )
+ (!pair "function"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 557)
+ nil )
+ (!pair "static_decl"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 560)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-decl.cc" 563)
+ nil )
+ (!pair "next"
+ (!type already_seen 1900)
+ (!srcfileloc "c/c-decl.cc" 566)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 514 nil nil )
+
+ (!type already_seen 542)
+
+ (!type struct 1901 nil gc_unused "fname_var_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1902 nil gc_unused "visibility_flags"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1903 nil gc_unused "tlist"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1904 nil gc_unused "tlist_cache"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1905 nil gc_unused "disabled_builtin"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1906 nil gc_unused "nonnull_arg_ctx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 1907
+ (!type pointer 1908 nil gc_used
+ (!type already_seen 1907)
+ )
+ gc_pointed_to "vec<const_char_p,va_gc>"
+ (!srcfileloc "c-family/c-common.cc" 5869)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c-family/c-common.cc" 5869)
+ nil )
+ (!pair "const_char_p"
+ (!type already_seen 11)
+ (!srcfileloc "c-family/c-common.cc" 5869)
+ nil )
+ )
+ )
+
+ (!type user_struct 1909
+ (!type pointer 1910 nil gc_used
+ (!type already_seen 1909)
+ )
+ gc_pointed_to "vec<tree_gc_vec,va_gc>"
+ (!srcfileloc "c-family/c-common.cc" 8285)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c-family/c-common.cc" 8285)
+ nil )
+ (!pair "tree_gc_vec"
+ (!type already_seen 85)
+ (!srcfileloc "c-family/c-common.cc" 8285)
+ nil )
+ )
+ )
+
+ (!type user_struct 1911 nil gc_unused "hash_set<char*,false,nofree_string_hash>"
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ (!fields 2
+ (!pair "nofree_string_hash"
+ (!type undefined 1912 nil gc_unused "nofree_string_hash"
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ )
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ nil )
+ (!pair "false"
+ (!type already_seen 878)
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ nil )
+ )
+ )
+
+ (!type already_seen 1912)
+
+ (!type user_struct 1913 nil gc_unused "hash_map<char*,per_file_includes_t*>"
+ (!srcfileloc "c-family/c-common.cc" 9263)
+ (!fields 1
+ (!pair "per_file_includes_t"
+ (!type pointer 1914 nil gc_unused
+ (!type struct 1915
+ (!type already_seen 1914)
+ gc_unused "per_file_includes_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "c-family/c-common.cc" 9263)
+ nil )
+ )
+ )
+
+ (!type already_seen 1915)
+
+ (!type struct 1916 nil gc_unused "c_common_resword"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 451)
+
+ (!type struct 1917 nil gc_unused "c_fileinfo"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1918 nil gc_unused "substring_loc"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1919 nil gc_unused "bc_state"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type undefined 1920 nil gc_unused "wide_int_bitmask"
+ (!srcfileloc "c-family/c-common.h" 1248)
+ )
+
+ (!type struct 1921 nil gc_unused "c_omp_directive"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 686)
+
+ (!type struct 1922 nil gc_unused "property_attribute_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1923 nil gc_used "lazy_hex_fp_value_struct"
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1781)
+ (!fields 4
+ (!pair "hex_str"
+ (!type already_seen 11)
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1777)
+ nil )
+ (!pair "mode"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1778)
+ nil )
+ (!pair "digits"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1779)
+ nil )
+ (!pair "fp_suffix"
+ (!type already_seen 11)
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1780)
+ nil )
+ )
+ nil 1542 nil nil )
+
+ (!type union 1924 nil gc_unused "gen_pragma_handler"nil
+ (!fields 0 )
+ nil 0 nil )
+
+ (!type struct 1925 nil gc_unused "internal_pragma_handler"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1926
+ (!type pointer 1927 nil gc_used
+ (!type already_seen 1926)
+ )
+ gc_pointed_to "align_stack"
+ (!srcfileloc "c-family/c-pragma.cc" 51)
+ (!fields 3
+ (!pair "alignment"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-pragma.cc" 48)
+ nil )
+ (!pair "id"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 49)
+ nil )
+ (!pair "prev"
+ (!type already_seen 1927)
+ (!srcfileloc "c-family/c-pragma.cc" 50)
+ nil )
+ )
+ nil 1542 nil nil )
+
+ (!type struct 1928 nil gc_used "pending_weak"
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ (!fields 2
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 243)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 244)
+ nil )
+ )
+ nil 1542 nil nil )
+
+ (!type user_struct 1929
+ (!type pointer 1930 nil gc_used
+ (!type already_seen 1929)
+ )
+ gc_pointed_to "vec<pending_weak,va_gc>"
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ nil )
+ (!pair "pending_weak"
+ (!type already_seen 1928)
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ nil )
+ )
+ )
+
+ (!type struct 1931 nil gc_used "pending_redefinition"
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ (!fields 2
+ (!pair "oldname"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 491)
+ nil )
+ (!pair "newname"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 492)
+ nil )
+ )
+ nil 1542 nil nil )
+
+ (!type user_struct 1932
+ (!type pointer 1933 nil gc_used
+ (!type already_seen 1932)
+ )
+ gc_pointed_to "vec<pending_redefinition,va_gc>"
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ nil )
+ (!pair "pending_redefinition"
+ (!type already_seen 1931)
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ nil )
+ )
+ )
+
+ (!type struct 1934 nil gc_unused "pragma_diagnostic_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1935
+ (!type pointer 1936 nil gc_used
+ (!type already_seen 1935)
+ )
+ gc_pointed_to "opt_stack"
+ (!srcfileloc "c-family/c-pragma.cc" 1237)
+ (!fields 6
+ (!pair "prev"
+ (!type already_seen 1936)
+ (!srcfileloc "c-family/c-pragma.cc" 1231)
+ nil )
+ (!pair "target_binary"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 1232)
+ nil )
+ (!pair "target_strings"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 1233)
+ nil )
+ (!pair "optimize_binary"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 1234)
+ nil )
+ (!pair "optimize_strings"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.cc" 1235)
+ nil )
+ (!pair "saved_global_options"
+ (!type already_seen 842)
+ (!srcfileloc "c-family/c-pragma.cc" 1236)
+ (!options
+ (!option skip string "")
+ )
+ )
+ )
+ nil 1542 nil nil )
+
+ (!type struct 1937 nil gc_unused "pragma_pp_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1938 nil gc_unused "omp_pragma_def"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1939 nil gc_unused "function_format_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1940 nil gc_unused "format_wanted_type"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1941 nil gc_unused "format_check_results"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1942 nil gc_unused "format_check_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1943 nil gc_unused "flag_chars_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1944 nil gc_unused "length_modifier"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1945 nil gc_unused "argument_parser"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1946 nil gc_unused "baltoks_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1947 nil gc_unused "token_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1948 nil gc_unused "indirection_suffix"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1949 nil gc_unused "range_label_for_format_type_mismatch"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1950
+ (!type pointer 1951 nil gc_unused
+ (!type already_seen 1950)
+ )
+ gc_used "c_token"
+ (!srcfileloc "c/c-parser.cc" 190)
+ (!fields 7
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.h" 55)
+ nil )
+ (!pair "id_kind"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.h" 58)
+ nil )
+ (!pair "keyword"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.h" 61)
+ nil )
+ (!pair "pragma_kind"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.h" 64)
+ nil )
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.h" 66)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-parser.h" 68)
+ nil )
+ (!pair "flags"
+ (!type already_seen 8)
+ (!srcfileloc "c/c-parser.h" 70)
+ nil )
+ )
+ nil 514 nil nil )
+
+ (!type struct 1952
+ (!type pointer 1953 nil gc_used
+ (!type already_seen 1952)
+ )
+ gc_pointed_to "c_parser"
+ (!srcfileloc "c/c-parser.cc" 282)
+ (!fields 17
+ (!pair "tokens"
+ (!type already_seen 1951)
+ (!srcfileloc "c/c-parser.cc" 190)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "tokens_buf"
+ (!type array 1954 nil gc_used "4"
+ (!type already_seen 1950)
+ )
+ (!srcfileloc "c/c-parser.cc" 192)
+ nil )
+ (!pair "tokens_avail"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 195)
+ nil )
+ (!pair "raw_tokens"
+ (!type pointer 1955 nil gc_used
+ (!type user_struct 1956
+ (!type already_seen 1955)
+ gc_pointed_to "vec<c_token,va_gc>"
+ (!srcfileloc "c/c-parser.cc" 198)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c/c-parser.cc" 198)
+ nil )
+ (!pair "c_token"
+ (!type already_seen 1950)
+ (!srcfileloc "c/c-parser.cc" 198)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "c/c-parser.cc" 198)
+ nil )
+ (!pair "raw_tokens_used"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 201)
+ nil )
+ (!pair "error"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 205)
+ nil )
+ (!pair "in_pragma"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 208)
+ nil )
+ (!pair "in_if_block"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 210)
+ nil )
+ (!pair "lex_joined_string"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 215)
+ nil )
+ (!pair "translate_strings_p"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 219)
+ nil )
+ (!pair "objc_pq_context"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 225)
+ nil )
+ (!pair "objc_could_be_foreach_context"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 230)
+ nil )
+ (!pair "objc_need_raw_identifier"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 235)
+ nil )
+ (!pair "in_transaction"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 238)
+ nil )
+ (!pair "objc_property_attr_context"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 241)
+ nil )
+ (!pair "seen_string_literal"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 248)
+ nil )
+ (!pair "last_token_location"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-parser.cc" 251)
+ nil )
+ )
+ nil 514 nil nil )
+
+ (!type already_seen 1956)
+
+ (!type struct 1957 nil gc_unused "token_pair"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1958 nil gc_unused "matching_paren_traits"
+ (!srcfileloc "c/c-parser.cc" 1148)
+ (!fields 0 )
+ nil 514 nil nil )
+
+ (!type user_struct 1959 nil gc_unused "token_pair<matching_paren_traits>"
+ (!srcfileloc "c/c-parser.cc" 1148)
+ (!fields 1
+ (!pair "matching_paren_traits"
+ (!type already_seen 1958)
+ (!srcfileloc "c/c-parser.cc" 1148)
+ nil )
+ )
+ )
+
+ (!type struct 1960 nil gc_unused "matching_brace_traits"
+ (!srcfileloc "c/c-parser.cc" 1166)
+ (!fields 0 )
+ nil 514 nil nil )
+
+ (!type user_struct 1961 nil gc_unused "token_pair<matching_brace_traits>"
+ (!srcfileloc "c/c-parser.cc" 1166)
+ (!fields 1
+ (!pair "matching_brace_traits"
+ (!type already_seen 1960)
+ (!srcfileloc "c/c-parser.cc" 1166)
+ nil )
+ )
+ )
+
+ (!type struct 1962 nil gc_unused "oacc_routine_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1963 nil gc_unused "c_translation_unit"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1964 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/c/c-parser.cc:8331"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1965 nil gc_unused "c_generic_association"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1966 nil gc_unused "omp_dim"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 459)
+
+ (!type struct 1967 nil gc_used "c_omp_declare_target_attr"
+ (!srcfileloc "c/c-lang.h" 69)
+ (!fields 1
+ (!pair "device_type"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 64)
+ nil )
+ )
+ nil 514 nil nil )
+
+ (!type user_struct 1968
+ (!type pointer 1969 nil gc_used
+ (!type already_seen 1968)
+ )
+ gc_pointed_to "vec<c_omp_declare_target_attr,va_gc>"
+ (!srcfileloc "c/c-lang.h" 69)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "c/c-lang.h" 69)
+ nil )
+ (!pair "c_omp_declare_target_attr"
+ (!type already_seen 1967)
+ (!srcfileloc "c/c-lang.h" 69)
+ nil )
+ )
+ )
+
+ (!type already_seen 84)
+
+ (!type already_seen 91)
+
+ (!type struct 1970 nil gc_used "cxx_saved_binding"
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ (!fields 3
+ (!pair "identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 65)
+ nil )
+ (!pair "binding"
+ (!type already_seen 90)
+ (!srcfileloc "cp/name-lookup.h" 67)
+ nil )
+ (!pair "real_type_value"
+ (!type already_seen 23)
+ (!srcfileloc "cp/name-lookup.h" 68)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type already_seen 675)
+
+ (!type already_seen 674)
+
+ (!type already_seen 672)
+
+ (!type already_seen 670)
+
+ (!type already_seen 668)
+
+ (!type already_seen 89)
+
+ (!type already_seen 88)
+
+ (!type struct 1971 nil gc_unused "cp_expr"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 664)
+
+ (!type already_seen 665)
+
+ (!type already_seen 667)
+
+ (!type struct 1972 nil gc_unused "ovl_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1973 nil gc_unused "ovl_range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1974 nil gc_unused "lkp_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1975 nil gc_unused "lkp_range"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 94)
+
+ (!type struct 1976 nil gc_unused "releasing_vec"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 677)
+
+ (!type already_seen 676)
+
+ (!type already_seen 72)
+
+ (!type already_seen 678)
+
+ (!type already_seen 679)
+
+ (!type already_seen 680)
+
+ (!type already_seen 681)
+
+ (!type already_seen 682)
+
+ (!type already_seen 683)
+
+ (!type already_seen 81)
+
+ (!type already_seen 79)
+
+ (!type already_seen 684)
+
+ (!type already_seen 685)
+
+ (!type struct 1977 nil gc_used "cp_omp_declare_target_attr"
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ (!fields 2
+ (!pair "attr_syntax"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1826)
+ nil )
+ (!pair "device_type"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1827)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 1978 nil gc_used "cp_omp_begin_assumes_data"
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ (!fields 1
+ (!pair "attr_syntax"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1831)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 1979
+ (!type pointer 1980 nil gc_used
+ (!type already_seen 1979)
+ )
+ gc_pointed_to "vec<cxx_saved_binding,va_gc>"
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ nil )
+ (!pair "cxx_saved_binding"
+ (!type already_seen 1970)
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ nil )
+ )
+ )
+
+ (!type user_struct 1981
+ (!type pointer 1982 nil gc_used
+ (!type already_seen 1981)
+ )
+ gc_pointed_to "vec<cp_omp_declare_target_attr,va_gc>"
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ nil )
+ (!pair "cp_omp_declare_target_attr"
+ (!type already_seen 1977)
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ nil )
+ )
+ )
+
+ (!type user_struct 1983
+ (!type pointer 1984 nil gc_used
+ (!type already_seen 1983)
+ )
+ gc_pointed_to "vec<cp_omp_begin_assumes_data,va_gc>"
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ nil )
+ (!pair "cp_omp_begin_assumes_data"
+ (!type already_seen 1978)
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ nil )
+ )
+ )
+
+ (!type struct 1985
+ (!type pointer 1986 nil gc_used
+ (!type already_seen 1985)
+ )
+ gc_pointed_to "saved_scope"
+ (!srcfileloc "cp/cp-tree.h" 1885)
+ (!fields 34
+ (!pair "old_bindings"
+ (!type already_seen 1980)
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ nil )
+ (!pair "old_namespace"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1838)
+ nil )
+ (!pair "decl_ns_list"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 1839)
+ nil )
+ (!pair "class_name"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1840)
+ nil )
+ (!pair "class_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1841)
+ nil )
+ (!pair "access_specifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1842)
+ nil )
+ (!pair "function_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1843)
+ nil )
+ (!pair "lang_base"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 1844)
+ nil )
+ (!pair "lang_name"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1845)
+ nil )
+ (!pair "template_parms"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1846)
+ nil )
+ (!pair "x_previous_class_level"
+ (!type already_seen 83)
+ (!srcfileloc "cp/cp-tree.h" 1847)
+ nil )
+ (!pair "x_saved_tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1848)
+ nil )
+ (!pair "x_current_class_ptr"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1851)
+ nil )
+ (!pair "x_current_class_ref"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 1852)
+ nil )
+ (!pair "x_processing_template_decl"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1854)
+ nil )
+ (!pair "x_processing_specialization"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1855)
+ nil )
+ (!pair "x_processing_constraint"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1856)
+ nil )
+ (!pair "x_processing_contract_condition"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1857)
+ nil )
+ (!pair "suppress_location_wrappers"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1858)
+ nil )
+ (!pair "x_processing_explicit_instantiation"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1859)
+ nil )
+ (!pair "need_pop_function_context"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1860)
+ nil )
+ (!pair "discarded_stmt"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1864)
+ nil )
+ (!pair "consteval_if_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1868)
+ nil )
+ (!pair "unevaluated_operand"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1870)
+ nil )
+ (!pair "inhibit_evaluation_warnings"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1871)
+ nil )
+ (!pair "noexcept_operand"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1872)
+ nil )
+ (!pair "ref_temp_count"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 1873)
+ nil )
+ (!pair "x_stmt_tree"
+ (!type already_seen 452)
+ (!srcfileloc "cp/cp-tree.h" 1875)
+ nil )
+ (!pair "class_bindings"
+ (!type already_seen 83)
+ (!srcfileloc "cp/cp-tree.h" 1877)
+ nil )
+ (!pair "bindings"
+ (!type already_seen 83)
+ (!srcfileloc "cp/cp-tree.h" 1878)
+ nil )
+ (!pair "x_local_specializations"
+ (!type already_seen 394)
+ (!srcfileloc "cp/cp-tree.h" 1880)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "omp_declare_target_attribute"
+ (!type already_seen 1982)
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ nil )
+ (!pair "omp_begin_assumes"
+ (!type already_seen 1984)
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ nil )
+ (!pair "prev"
+ (!type already_seen 1986)
+ (!srcfileloc "cp/cp-tree.h" 1884)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 1987 nil gc_unused "processing_template_decl_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1988 nil gc_unused "warning_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1989 nil gc_unused "iloc_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1990 nil gc_unused "temp_override"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1991 nil gc_unused "type_identity"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1992 nil gc_unused "in_consteval_if_p_temp_override"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1993
+ (!type pointer 1994 nil gc_used
+ (!type already_seen 1993)
+ )
+ gc_pointed_to "named_label_entry"
+ (!srcfileloc "cp/decl.cc" 227)
+ (!fields 14
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl.cc" 194)
+ nil )
+ (!pair "label_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl.cc" 196)
+ nil )
+ (!pair "outer"
+ (!type already_seen 1994)
+ (!srcfileloc "cp/decl.cc" 198)
+ nil )
+ (!pair "binding_level"
+ (!type already_seen 83)
+ (!srcfileloc "cp/decl.cc" 203)
+ nil )
+ (!pair "names_in_scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl.cc" 208)
+ nil )
+ (!pair "bad_decls"
+ (!type already_seen 85)
+ (!srcfileloc "cp/decl.cc" 212)
+ nil )
+ (!pair "uses"
+ (!type pointer 1995 nil gc_used
+ (!type struct 1996
+ (!type already_seen 1995)
+ gc_pointed_to "named_label_use_entry"
+ (!srcfileloc "cp/decl.cc" 215)
+ (!fields 5
+ (!pair "next"
+ (!type already_seen 1995)
+ (!srcfileloc "cp/decl.cc" 171)
+ nil )
+ (!pair "binding_level"
+ (!type already_seen 83)
+ (!srcfileloc "cp/decl.cc" 175)
+ nil )
+ (!pair "names_in_scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl.cc" 179)
+ nil )
+ (!pair "o_goto_locus"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 181)
+ nil )
+ (!pair "in_omp_scope"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 185)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 1028 nil nil )
+ )
+ (!srcfileloc "cp/decl.cc" 215)
+ nil )
+ (!pair "in_try_scope"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 220)
+ nil )
+ (!pair "in_catch_scope"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 221)
+ nil )
+ (!pair "in_omp_scope"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 222)
+ nil )
+ (!pair "in_transaction_scope"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 223)
+ nil )
+ (!pair "in_constexpr_if"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 224)
+ nil )
+ (!pair "in_consteval_if"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 225)
+ nil )
+ (!pair "in_stmt_expr"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 226)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type already_seen 456)
+
+ (!type already_seen 455)
+
+ (!type already_seen 538)
+
+ (!type already_seen 537)
+
+ (!type already_seen 66)
+
+ (!type already_seen 67)
+
+ (!type already_seen 69)
+
+ (!type already_seen 70)
+
+ (!type already_seen 68)
+
+ (!type already_seen 93)
+
+ (!type already_seen 82)
+
+ (!type already_seen 95)
+
+ (!type already_seen 96)
+
+ (!type already_seen 65)
+
+ (!type struct 1997 nil gc_unused "aggr_init_expr_arg_iterator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1998 nil gc_unused "cp_unevaluated"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 1999 nil gc_unused "cp_evaluated"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2000 nil gc_unused "local_specialization_stack"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2001 nil gc_pointed_to "spec_entry"
+ (!srcfileloc "cp/cp-tree.h" 5727)
+ (!fields 3
+ (!pair "tmpl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 5724)
+ nil )
+ (!pair "args"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 5725)
+ nil )
+ (!pair "spec"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 5726)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2002 nil gc_used "ovl_op_info_t"
+ (!srcfileloc "cp/cp-tree.h" 6149)
+ (!fields 6
+ (!pair "identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 6135)
+ nil )
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "cp/cp-tree.h" 6137)
+ nil )
+ (!pair "mangled_name"
+ (!type already_seen 11)
+ (!srcfileloc "cp/cp-tree.h" 6139)
+ nil )
+ (!pair "tree_code"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6141)
+ nil )
+ (!pair "ovl_op_code"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6143)
+ nil )
+ (!pair "flags"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6145)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2003 nil gc_unused "cp_decl_specifier_seq"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2004 nil gc_unused "cp_declarator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2005 nil gc_unused "cp_parameter_declarator"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2006
+ (!type pointer 2007 nil gc_used
+ (!type already_seen 2006)
+ )
+ gc_pointed_to "tinst_level"
+ (!srcfileloc "cp/pt.cc" 9550)
+ (!fields 8
+ (!pair "next"
+ (!type already_seen 2007)
+ (!srcfileloc "cp/cp-tree.h" 6431)
+ nil )
+ (!pair "tldcl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 6443)
+ nil )
+ (!pair "targs"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 6443)
+ nil )
+ (!pair "path"
+ (!type already_seen 387)
+ (!srcfileloc "cp/cp-tree.h" 6449)
+ nil )
+ (!pair "visible"
+ (!type already_seen 387)
+ (!srcfileloc "cp/cp-tree.h" 6450)
+ nil )
+ (!pair "locus"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6495)
+ nil )
+ (!pair "errors"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6498)
+ nil )
+ (!pair "refcount"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6506)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 1028 nil nil )
+
+ (!type struct 2008 nil gc_unused "access_failure_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2009
+ (!type pointer 2010 nil gc_used
+ (!type already_seen 2009)
+ )
+ gc_pointed_to "module_state"
+ (!srcfileloc "cp/module.cc" 3762)
+ (!fields 28
+ (!pair "imports"
+ (!type already_seen 387)
+ (!srcfileloc "cp/module.cc" 3510)
+ nil )
+ (!pair "exports"
+ (!type already_seen 387)
+ (!srcfileloc "cp/module.cc" 3511)
+ nil )
+ (!pair "parent"
+ (!type already_seen 2010)
+ (!srcfileloc "cp/module.cc" 3513)
+ nil )
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "cp/module.cc" 3514)
+ nil )
+ (!pair "slurp"
+ (!type pointer 2011 nil gc_used
+ (!type struct 2012
+ (!type already_seen 2011)
+ gc_pointed_to "slurping"
+ (!srcfileloc "cp/module.cc" 3439)
+ (!fields 9
+ (!pair "remap"
+ (!type pointer 2013 nil gc_unused
+ (!type user_struct 2014
+ (!type already_seen 2013)
+ gc_unused "vec<unsigned,va_heap,vl_embed>"
+ (!srcfileloc "cp/module.cc" 3371)
+ (!fields 3
+ (!pair "vl_embed"
+ (!type undefined 2015 nil gc_unused "vl_embed"
+ (!srcfileloc "cp/module.cc" 3371)
+ )
+ (!srcfileloc "cp/module.cc" 3371)
+ nil )
+ (!pair "va_heap"
+ (!type already_seen 835)
+ (!srcfileloc "cp/module.cc" 3371)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3371)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 3372)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "from"
+ (!type pointer 2016 nil gc_unused
+ (!type struct 2017
+ (!type already_seen 2016)
+ gc_unused "elf_in"
+ (!srcfileloc "cp/module.cc" 3374)
+ (!fields 0 )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/module.cc" 3374)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "headers"
+ (!type already_seen 387)
+ (!srcfileloc "cp/module.cc" 3378)
+ nil )
+ (!pair "macro_defs"
+ (!type struct 2018 nil gc_used "bytes_in"
+ (!srcfileloc "cp/module.cc" 3385)
+ (!fields 0 )
+ nil 1028 nil nil )
+ (!srcfileloc "cp/module.cc" 3385)
+ nil )
+ (!pair "macro_tbl"
+ (!type already_seen 2018)
+ (!srcfileloc "cp/module.cc" 3386)
+ nil )
+ (!pair "loc_deltas"
+ (!type user_struct 2019 nil gc_unused "std::pair<unsigned,unsigned>"
+ (!srcfileloc "cp/module.cc" 3154)
+ (!fields 2
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3154)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3154)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 3389)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "current"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3391)
+ nil )
+ (!pair "remaining"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3392)
+ nil )
+ (!pair "lru"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3393)
+ nil )
+ )
+ nil 1028 nil nil )
+ )
+ (!srcfileloc "cp/module.cc" 3516)
+ nil )
+ (!pair "flatname"
+ (!type already_seen 11)
+ (!srcfileloc "cp/module.cc" 3518)
+ nil )
+ (!pair "filename"
+ (!type already_seen 11)
+ (!srcfileloc "cp/module.cc" 3519)
+ nil )
+ (!pair "entity_lwm"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3522)
+ nil )
+ (!pair "entity_num"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3523)
+ nil )
+ (!pair "ordinary_locs"
+ (!type user_struct 2020 nil gc_unused "std::pair<location_t,location_t>"
+ (!srcfileloc "cp/module.cc" 3157)
+ (!fields 2
+ (!pair "location_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3157)
+ nil )
+ (!pair "location_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3157)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 3527)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "macro_locs"
+ (!type already_seen 2020)
+ (!srcfileloc "cp/module.cc" 3528)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "loc"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3533)
+ nil )
+ (!pair "crc"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3534)
+ nil )
+ (!pair "mod"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3536)
+ nil )
+ (!pair "remap"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3537)
+ nil )
+ (!pair "subst"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3539)
+ nil )
+ (!pair "loadedness"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3542)
+ nil )
+ (!pair "module_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3544)
+ nil )
+ (!pair "header_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3545)
+ nil )
+ (!pair "interface_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3546)
+ nil )
+ (!pair "partition_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3547)
+ nil )
+ (!pair "directness"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3550)
+ nil )
+ (!pair "exported_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3552)
+ nil )
+ (!pair "cmi_noted_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3553)
+ nil )
+ (!pair "active_init_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3555)
+ nil )
+ (!pair "inform_cmi_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3557)
+ nil )
+ (!pair "visited_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3558)
+ nil )
+ (!pair "extensions"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 3560)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ (!option chain_next string "%h.parent")
+ )
+ 1028 nil nil )
+
+ (!type struct 2021 nil gc_unused "conversion"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2022 nil gc_unused "deferring_access_check_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2023 nil gc_unused "diagnostic_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2024 nil gc_unused "diagnosing_failed_constraint"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2025 nil gc_unused "processing_constraint_expression_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2026 nil gc_used "atom_hasher"
+ (!srcfileloc "cp/constraint.cc" 814)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type struct 2027
+ (!type pointer 2028 nil gc_used
+ (!type already_seen 2027)
+ )
+ gc_pointed_to "constexpr_fundef"
+ (!srcfileloc "cp/constexpr.cc" 1112)
+ (!fields 4
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 8451)
+ nil )
+ (!pair "body"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 8452)
+ nil )
+ (!pair "parms"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 8453)
+ nil )
+ (!pair "result"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 8454)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2029 nil gc_unused "constexpr_ctx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2030 nil gc_unused "uid_sensitive_constexpr_evaluation_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2031 nil gc_unused "uid_sensitive_constexpr_evaluation_checker"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2032 nil gc_unused "push_access_scope_guard"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 77)
+
+ (!type already_seen 75)
+
+ (!type already_seen 74)
+
+ (!type user_struct 2033
+ (!type pointer 2034 nil gc_used
+ (!type already_seen 2033)
+ )
+ gc_pointed_to "vec<cp_token,va_gc>"
+ (!srcfileloc "cp/parser.h" 87)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/parser.h" 87)
+ nil )
+ (!pair "cp_token"
+ (!type already_seen 74)
+ (!srcfileloc "cp/parser.h" 87)
+ nil )
+ )
+ )
+
+ (!type user_struct 2035 nil gc_unused "vec<cp_token_position>"
+ (!srcfileloc "cp/parser.h" 101)
+ (!fields 1
+ (!pair "cp_token_position"
+ (!type already_seen 73)
+ (!srcfileloc "cp/parser.h" 101)
+ nil )
+ )
+ )
+
+ (!type struct 2036
+ (!type pointer 2037 nil gc_used
+ (!type already_seen 2036)
+ )
+ gc_pointed_to "cp_lexer"
+ (!srcfileloc "cp/parser.h" 236)
+ (!fields 11
+ (!pair "buffer"
+ (!type already_seen 2034)
+ (!srcfileloc "cp/parser.h" 87)
+ nil )
+ (!pair "last_token"
+ (!type already_seen 73)
+ (!srcfileloc "cp/parser.h" 91)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "next_token"
+ (!type already_seen 73)
+ (!srcfileloc "cp/parser.h" 95)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "saved_tokens"
+ (!type already_seen 2035)
+ (!srcfileloc "cp/parser.h" 101)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "saved_type"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 104)
+ nil )
+ (!pair "saved_keyword"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 105)
+ nil )
+ (!pair "next"
+ (!type already_seen 2037)
+ (!srcfileloc "cp/parser.h" 108)
+ nil )
+ (!pair "debugging_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 111)
+ nil )
+ (!pair "in_pragma"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 115)
+ nil )
+ (!pair "in_omp_attribute_pragma"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 119)
+ nil )
+ (!pair "orphan_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 123)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2038 nil gc_unused "cp_token_ident"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2039 nil gc_used "cp_default_arg_entry"
+ (!srcfileloc "cp/parser.h" 169)
+ (!fields 2
+ (!pair "class_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 157)
+ nil )
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 160)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2040
+ (!type pointer 2041 nil gc_used
+ (!type already_seen 2040)
+ )
+ gc_pointed_to "vec<cp_default_arg_entry,va_gc>"
+ (!srcfileloc "cp/parser.h" 169)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/parser.h" 169)
+ nil )
+ (!pair "cp_default_arg_entry"
+ (!type already_seen 2039)
+ (!srcfileloc "cp/parser.h" 169)
+ nil )
+ )
+ )
+
+ (!type struct 2042 nil gc_used "cp_unparsed_functions_entry"
+ (!srcfileloc "cp/parser.h" 380)
+ (!fields 5
+ (!pair "funs_with_default_args"
+ (!type already_seen 2041)
+ (!srcfileloc "cp/parser.h" 169)
+ nil )
+ (!pair "funs_with_definitions"
+ (!type already_seen 85)
+ (!srcfileloc "cp/parser.h" 173)
+ nil )
+ (!pair "nsdmis"
+ (!type already_seen 85)
+ (!srcfileloc "cp/parser.h" 177)
+ nil )
+ (!pair "noexcepts"
+ (!type already_seen 85)
+ (!srcfileloc "cp/parser.h" 180)
+ nil )
+ (!pair "contracts"
+ (!type already_seen 85)
+ (!srcfileloc "cp/parser.h" 183)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2043
+ (!type pointer 2044 nil gc_used
+ (!type already_seen 2043)
+ )
+ gc_pointed_to "cp_parser_context"
+ (!srcfileloc "cp/parser.h" 270)
+ (!fields 3
+ (!pair "status"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 205)
+ nil )
+ (!pair "object_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 210)
+ nil )
+ (!pair "next"
+ (!type already_seen 2044)
+ (!srcfileloc "cp/parser.h" 213)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2045
+ (!type pointer 2046 nil gc_unused
+ (!type already_seen 2045)
+ )
+ gc_unused "cp_omp_declare_simd_data"
+ (!srcfileloc "cp/parser.h" 392)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type struct 2047
+ (!type pointer 2048 nil gc_unused
+ (!type already_seen 2047)
+ )
+ gc_unused "cp_oacc_routine_data"
+ (!srcfileloc "cp/parser.h" 396)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2049
+ (!type pointer 2050 nil gc_used
+ (!type already_seen 2049)
+ )
+ gc_pointed_to "vec<cp_unparsed_functions_entry,va_gc>"
+ (!srcfileloc "cp/parser.h" 380)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/parser.h" 380)
+ nil )
+ (!pair "cp_unparsed_functions_entry"
+ (!type already_seen 2042)
+ (!srcfileloc "cp/parser.h" 380)
+ nil )
+ )
+ )
+
+ (!type struct 2051
+ (!type pointer 2052 nil gc_used
+ (!type already_seen 2051)
+ )
+ gc_pointed_to "cp_parser"
+ (!srcfileloc "cp/parser.cc" 695)
+ (!fields 39
+ (!pair "lexer"
+ (!type already_seen 2037)
+ (!srcfileloc "cp/parser.h" 236)
+ nil )
+ (!pair "scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 251)
+ nil )
+ (!pair "object_scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 258)
+ nil )
+ (!pair "qualifying_scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 259)
+ nil )
+ (!pair "context"
+ (!type already_seen 2044)
+ (!srcfileloc "cp/parser.h" 270)
+ nil )
+ (!pair "allow_gnu_extensions_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 274)
+ nil )
+ (!pair "greater_than_is_operator_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 281)
+ nil )
+ (!pair "default_arg_ok_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 286)
+ nil )
+ (!pair "integral_constant_expression_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 290)
+ nil )
+ (!pair "allow_non_integral_constant_expression_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 296)
+ nil )
+ (!pair "non_integral_constant_expression_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 300)
+ nil )
+ (!pair "local_variables_forbidden_p"
+ (!type already_seen 8)
+ (!srcfileloc "cp/parser.h" 307)
+ nil )
+ (!pair "in_unbraced_linkage_specification_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 312)
+ nil )
+ (!pair "in_declarator_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 316)
+ nil )
+ (!pair "in_template_argument_list_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 319)
+ nil )
+ (!pair "in_statement"
+ (!type already_seen 8)
+ (!srcfileloc "cp/parser.h" 331)
+ nil )
+ (!pair "in_switch_statement_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 337)
+ nil )
+ (!pair "in_type_id_in_expr_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 342)
+ nil )
+ (!pair "translate_strings_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 346)
+ nil )
+ (!pair "in_function_body"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 350)
+ nil )
+ (!pair "in_transaction"
+ (!type already_seen 8)
+ (!srcfileloc "cp/parser.h" 354)
+ nil )
+ (!pair "colon_corrects_to_scope_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 357)
+ nil )
+ (!pair "colon_doesnt_start_class_def_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 363)
+ nil )
+ (!pair "objective_c_message_context_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 367)
+ nil )
+ (!pair "type_definition_forbidden_message"
+ (!type already_seen 11)
+ (!srcfileloc "cp/parser.h" 372)
+ nil )
+ (!pair "type_definition_forbidden_message_arg"
+ (!type already_seen 11)
+ (!srcfileloc "cp/parser.h" 375)
+ nil )
+ (!pair "unparsed_queues"
+ (!type already_seen 2050)
+ (!srcfileloc "cp/parser.h" 380)
+ nil )
+ (!pair "num_classes_being_defined"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 384)
+ nil )
+ (!pair "num_template_parameter_lists"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 388)
+ nil )
+ (!pair "omp_declare_simd"
+ (!type already_seen 2046)
+ (!srcfileloc "cp/parser.h" 392)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "oacc_routine"
+ (!type already_seen 2048)
+ (!srcfileloc "cp/parser.h" 396)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "auto_is_implicit_function_template_parm_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 400)
+ nil )
+ (!pair "fully_implicit_function_template_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 405)
+ nil )
+ (!pair "omp_attrs_forbidden_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 408)
+ nil )
+ (!pair "implicit_template_parms"
+ (!type already_seen 23)
+ (!srcfileloc "cp/parser.h" 416)
+ nil )
+ (!pair "implicit_template_scope"
+ (!type already_seen 83)
+ (!srcfileloc "cp/parser.h" 423)
+ nil )
+ (!pair "in_result_type_constraint_p"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 428)
+ nil )
+ (!pair "prevent_constrained_type_specifiers"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 432)
+ nil )
+ (!pair "innermost_linkage_specification_location"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.h" 436)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2053 nil gc_unused "rejection_reason"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2054 nil gc_unused "z_candidate"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2055 nil gc_unused "candidate_warning"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2056 nil gc_unused "conversion_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2057 nil gc_unused "conversion_obstack_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2058 nil gc_unused "dealloc_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2059 nil gc_unused "NonPublicField"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2060 nil gc_unused "NonTrivialField"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2061
+ (!type pointer 2062 nil gc_unused
+ (!type already_seen 2061)
+ )
+ gc_unused "class_stack_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2063 nil gc_unused "vtbl_init_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2064 nil gc_unused "flexmems_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2065 nil gc_unused "abi_tag_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2066 nil gc_unused "find_final_overrider_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2067
+ (!type pointer 2068 nil gc_used
+ (!type already_seen 2067)
+ )
+ gc_pointed_to "hash_map<tree,int>"
+ (!srcfileloc "cp/class.cc" 3407)
+ (!fields 2
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "cp/class.cc" 3407)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/class.cc" 3407)
+ nil )
+ )
+ )
+
+ (!type struct 2069 nil gc_unused "secondary_vptr_vtt_init_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2070 nil gc_used "constexpr_fundef_hasher"
+ (!srcfileloc "cp/constexpr.cc" 189)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2071
+ (!type pointer 2072 nil gc_used
+ (!type already_seen 2071)
+ )
+ gc_pointed_to "hash_table<constexpr_fundef_hasher>"
+ (!srcfileloc "cp/constexpr.cc" 189)
+ (!fields 1
+ (!pair "constexpr_fundef_hasher"
+ (!type already_seen 2070)
+ (!srcfileloc "cp/constexpr.cc" 189)
+ nil )
+ )
+ )
+
+ (!type struct 2073 nil gc_pointed_to "constexpr_call"
+ (!srcfileloc "cp/constexpr.cc" 1125)
+ (!fields 5
+ (!pair "fundef"
+ (!type already_seen 2028)
+ (!srcfileloc "cp/constexpr.cc" 1112)
+ nil )
+ (!pair "bindings"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constexpr.cc" 1114)
+ nil )
+ (!pair "result"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constexpr.cc" 1119)
+ nil )
+ (!pair "hash"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constexpr.cc" 1122)
+ nil )
+ (!pair "manifestly_const_eval"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constexpr.cc" 1124)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2074 nil gc_used "constexpr_call_hasher"
+ (!srcfileloc "cp/constexpr.cc" 1317)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type struct 2075 nil gc_unused "constexpr_global_ctx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2076 nil gc_unused "modifiable_tracker"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2077
+ (!type pointer 2078 nil gc_used
+ (!type already_seen 2077)
+ )
+ gc_pointed_to "hash_table<constexpr_call_hasher>"
+ (!srcfileloc "cp/constexpr.cc" 1317)
+ (!fields 1
+ (!pair "constexpr_call_hasher"
+ (!type already_seen 2074)
+ (!srcfileloc "cp/constexpr.cc" 1317)
+ nil )
+ )
+ )
+
+ (!type struct 2079 nil gc_unused "replace_decl_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2080 nil gc_unused "free_bindings"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2081 nil gc_unused "check_for_return_continue_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2082 nil gc_unused "subst_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2083 nil gc_unused "sat_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2084 nil gc_unused "norm_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2085 nil gc_pointed_to "norm_entry"
+ (!srcfileloc "cp/constraint.cc" 712)
+ (!fields 3
+ (!pair "tmpl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constraint.cc" 707)
+ nil )
+ (!pair "args"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constraint.cc" 709)
+ nil )
+ (!pair "norm"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constraint.cc" 711)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2086 nil gc_used "norm_hasher"
+ (!srcfileloc "cp/constraint.cc" 735)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2087
+ (!type pointer 2088 nil gc_used
+ (!type already_seen 2087)
+ )
+ gc_pointed_to "hash_table<norm_hasher>"
+ (!srcfileloc "cp/constraint.cc" 735)
+ (!fields 1
+ (!pair "norm_hasher"
+ (!type already_seen 2086)
+ (!srcfileloc "cp/constraint.cc" 735)
+ nil )
+ )
+ )
+
+ (!type user_struct 2089
+ (!type pointer 2090 nil gc_used
+ (!type already_seen 2089)
+ )
+ gc_pointed_to "hash_table<atom_hasher>"
+ (!srcfileloc "cp/constraint.cc" 814)
+ (!fields 1
+ (!pair "atom_hasher"
+ (!type already_seen 2026)
+ (!srcfileloc "cp/constraint.cc" 814)
+ nil )
+ )
+ )
+
+ (!type struct 2091 nil gc_pointed_to "sat_entry"
+ (!srcfileloc "cp/constraint.cc" 2536)
+ (!fields 8
+ (!pair "atom"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constraint.cc" 2506)
+ nil )
+ (!pair "args"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constraint.cc" 2509)
+ nil )
+ (!pair "result"
+ (!type already_seen 23)
+ (!srcfileloc "cp/constraint.cc" 2516)
+ nil )
+ (!pair "location"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constraint.cc" 2520)
+ nil )
+ (!pair "ftc_begin"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constraint.cc" 2525)
+ nil )
+ (!pair "ftc_end"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constraint.cc" 2525)
+ nil )
+ (!pair "diagnose_instability"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constraint.cc" 2530)
+ nil )
+ (!pair "evaluating"
+ (!type already_seen 2)
+ (!srcfileloc "cp/constraint.cc" 2535)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2092 nil gc_used "sat_hasher"
+ (!srcfileloc "cp/constraint.cc" 2614)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2093
+ (!type pointer 2094 nil gc_used
+ (!type already_seen 2093)
+ )
+ gc_pointed_to "hash_table<sat_hasher>"
+ (!srcfileloc "cp/constraint.cc" 2614)
+ (!fields 1
+ (!pair "sat_hasher"
+ (!type already_seen 2092)
+ (!srcfileloc "cp/constraint.cc" 2614)
+ nil )
+ )
+ )
+
+ (!type struct 2095 nil gc_unused "satisfaction_cache"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2096 nil gc_pointed_to "coroutine_info"
+ (!srcfileloc "cp/coroutines.cc" 100)
+ (!fields 12
+ (!pair "function_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 84)
+ nil )
+ (!pair "actor_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 85)
+ nil )
+ (!pair "destroy_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 86)
+ nil )
+ (!pair "promise_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 87)
+ nil )
+ (!pair "handle_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 88)
+ nil )
+ (!pair "self_h_proxy"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 89)
+ nil )
+ (!pair "promise_proxy"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 92)
+ nil )
+ (!pair "return_void"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 93)
+ nil )
+ (!pair "first_coro_keyword"
+ (!type already_seen 2)
+ (!srcfileloc "cp/coroutines.cc" 94)
+ nil )
+ (!pair "coro_ret_type_error_emitted"
+ (!type already_seen 2)
+ (!srcfileloc "cp/coroutines.cc" 97)
+ nil )
+ (!pair "coro_promise_error_emitted"
+ (!type already_seen 2)
+ (!srcfileloc "cp/coroutines.cc" 98)
+ nil )
+ (!pair "coro_co_return_error_emitted"
+ (!type already_seen 2)
+ (!srcfileloc "cp/coroutines.cc" 99)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2097 nil gc_used "coroutine_info_hasher"
+ (!srcfileloc "cp/coroutines.cc" 114)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2098
+ (!type pointer 2099 nil gc_used
+ (!type already_seen 2098)
+ )
+ gc_pointed_to "hash_table<coroutine_info_hasher>"
+ (!srcfileloc "cp/coroutines.cc" 114)
+ (!fields 1
+ (!pair "coroutine_info_hasher"
+ (!type already_seen 2097)
+ (!srcfileloc "cp/coroutines.cc" 114)
+ nil )
+ )
+ )
+
+ (!type struct 2100 nil gc_unused "proxy_replace"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2101 nil gc_unused "coro_aw_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2102 nil gc_unused "suspend_point_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2103 nil gc_unused "await_xform_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2104 nil gc_unused "param_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2105 nil gc_unused "local_var_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2106 nil gc_unused "local_vars_transform"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2107 nil gc_unused "susp_frame_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2108 nil gc_unused "coro_interesting_subtree"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2109 nil gc_unused "var_nest_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2110 nil gc_unused "truth_if_transform"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2111 nil gc_unused "param_frame_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2112 nil gc_unused "local_vars_frame_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2113 nil gc_unused "cp_fold_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2114 nil gc_unused "cp_genericize_omp_taskreg"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2115 nil gc_unused "cp_genericize_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2116 nil gc_pointed_to "source_location_table_entry"
+ (!srcfileloc "cp/cp-gimplify.cc" 3387)
+ (!fields 3
+ (!pair "loc"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-gimplify.cc" 3384)
+ nil )
+ (!pair "uid"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-gimplify.cc" 3385)
+ nil )
+ (!pair "var"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-gimplify.cc" 3386)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2117 nil gc_used "source_location_table_entry_hash"
+ (!srcfileloc "cp/cp-gimplify.cc" 3463)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2118
+ (!type pointer 2119 nil gc_used
+ (!type already_seen 2118)
+ )
+ gc_pointed_to "hash_table<source_location_table_entry_hash>"
+ (!srcfileloc "cp/cp-gimplify.cc" 3463)
+ (!fields 1
+ (!pair "source_location_table_entry_hash"
+ (!type already_seen 2117)
+ (!srcfileloc "cp/cp-gimplify.cc" 3463)
+ nil )
+ )
+ )
+
+ (!type already_seen 1996)
+
+ (!type struct 2120 nil gc_used "incomplete_var"
+ (!srcfileloc "cp/decl.cc" 255)
+ (!fields 2
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl.cc" 250)
+ nil )
+ (!pair "incomplete_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl.cc" 251)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2121
+ (!type pointer 2122 nil gc_used
+ (!type already_seen 2121)
+ )
+ gc_pointed_to "vec<incomplete_var,va_gc>"
+ (!srcfileloc "cp/decl.cc" 255)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/decl.cc" 255)
+ nil )
+ (!pair "incomplete_var"
+ (!type already_seen 2120)
+ (!srcfileloc "cp/decl.cc" 255)
+ nil )
+ )
+ )
+
+ (!type struct 2123 nil gc_unused "cp_switch"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2124 nil gc_unused "typename_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2125 nil gc_used "typename_hasher"
+ (!srcfileloc "cp/decl.cc" 4178)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2126
+ (!type pointer 2127 nil gc_used
+ (!type already_seen 2126)
+ )
+ gc_pointed_to "hash_table<typename_hasher>"
+ (!srcfileloc "cp/decl.cc" 4178)
+ (!fields 1
+ (!pair "typename_hasher"
+ (!type already_seen 2125)
+ (!srcfileloc "cp/decl.cc" 4178)
+ nil )
+ )
+ )
+
+ (!type struct 2128 nil gc_unused "predefined_identifier"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2129 nil gc_unused "reshape_iter"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2130 nil gc_used "mangled_decl_hash"
+ (!srcfileloc "cp/decl2.cc" 125)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2131
+ (!type pointer 2132 nil gc_used
+ (!type already_seen 2131)
+ )
+ gc_pointed_to "hash_table<mangled_decl_hash>"
+ (!srcfileloc "cp/decl2.cc" 125)
+ (!fields 1
+ (!pair "mangled_decl_hash"
+ (!type already_seen 2130)
+ (!srcfileloc "cp/decl2.cc" 125)
+ nil )
+ )
+ )
+
+ (!type struct 2133 nil gc_used "priority_map_traits"
+ (!srcfileloc "cp/decl2.cc" 166)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2134
+ (!type pointer 2135 nil gc_used
+ (!type already_seen 2134)
+ )
+ gc_pointed_to "hash_map<unsigned,tree,priority_map_traits>"
+ (!srcfileloc "cp/decl2.cc" 166)
+ (!fields 3
+ (!pair "priority_map_traits"
+ (!type already_seen 2133)
+ (!srcfileloc "cp/decl2.cc" 166)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/decl2.cc" 166)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/decl2.cc" 166)
+ nil )
+ )
+ )
+
+ (!type struct 2136 nil gc_used "pending_noexcept"
+ (!srcfileloc "cp/except.cc" 1102)
+ (!fields 2
+ (!pair "fn"
+ (!type already_seen 23)
+ (!srcfileloc "cp/except.cc" 1099)
+ nil )
+ (!pair "loc"
+ (!type already_seen 2)
+ (!srcfileloc "cp/except.cc" 1100)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2137
+ (!type pointer 2138 nil gc_used
+ (!type already_seen 2137)
+ )
+ gc_pointed_to "vec<pending_noexcept,va_gc>"
+ (!srcfileloc "cp/except.cc" 1102)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/except.cc" 1102)
+ nil )
+ (!pair "pending_noexcept"
+ (!type already_seen 2136)
+ (!srcfileloc "cp/except.cc" 1102)
+ nil )
+ )
+ )
+
+ (!type struct 2139 nil gc_unused "find_uninit_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2140 nil gc_used "lambda_sig_count"
+ (!srcfileloc "cp/lambda.cc" 1464)
+ (!fields 2
+ (!pair "fn"
+ (!type already_seen 23)
+ (!srcfileloc "cp/lambda.cc" 1455)
+ nil )
+ (!pair "count"
+ (!type already_seen 2)
+ (!srcfileloc "cp/lambda.cc" 1456)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2141
+ (!type pointer 2142 nil gc_used
+ (!type already_seen 2141)
+ )
+ gc_pointed_to "vec<lambda_sig_count,va_gc>"
+ (!srcfileloc "cp/lambda.cc" 1464)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/lambda.cc" 1464)
+ nil )
+ (!pair "lambda_sig_count"
+ (!type already_seen 2140)
+ (!srcfileloc "cp/lambda.cc" 1464)
+ nil )
+ )
+ )
+
+ (!type struct 2143 nil gc_used "lambda_discriminator"
+ (!srcfileloc "cp/lambda.cc" 1467)
+ (!fields 4
+ (!pair "scope"
+ (!type already_seen 23)
+ (!srcfileloc "cp/lambda.cc" 1460)
+ nil )
+ (!pair "nesting"
+ (!type already_seen 2)
+ (!srcfileloc "cp/lambda.cc" 1461)
+ nil )
+ (!pair "count"
+ (!type already_seen 2)
+ (!srcfileloc "cp/lambda.cc" 1463)
+ nil )
+ (!pair "discriminators"
+ (!type already_seen 2142)
+ (!srcfileloc "cp/lambda.cc" 1464)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2144
+ (!type pointer 2145 nil gc_used
+ (!type already_seen 2144)
+ )
+ gc_pointed_to "vec<lambda_discriminator,va_gc>"
+ (!srcfileloc "cp/lambda.cc" 1469)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/lambda.cc" 1469)
+ nil )
+ (!pair "lambda_discriminator"
+ (!type already_seen 2143)
+ (!srcfileloc "cp/lambda.cc" 1469)
+ nil )
+ )
+ )
+
+ (!type struct 2146 nil gc_unused "impl_files"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2147 nil gc_unused "module_token_filter"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2148 nil gc_used "conv_type_hasher"
+ (!srcfileloc "cp/lex.cc" 821)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2149
+ (!type pointer 2150 nil gc_used
+ (!type already_seen 2149)
+ )
+ gc_pointed_to "hash_table<conv_type_hasher>"
+ (!srcfileloc "cp/lex.cc" 821)
+ (!fields 1
+ (!pair "conv_type_hasher"
+ (!type already_seen 2148)
+ (!srcfileloc "cp/lex.cc" 821)
+ nil )
+ )
+ )
+
+ (!type struct 2151 nil gc_unused "clause"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2152 nil gc_unused "formula"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2153 nil gc_pointed_to "subsumption_entry"
+ (!srcfileloc "cp/logic.cc" 725)
+ (!fields 3
+ (!pair "lhs"
+ (!type already_seen 23)
+ (!srcfileloc "cp/logic.cc" 722)
+ nil )
+ (!pair "rhs"
+ (!type already_seen 23)
+ (!srcfileloc "cp/logic.cc" 723)
+ nil )
+ (!pair "result"
+ (!type already_seen 2)
+ (!srcfileloc "cp/logic.cc" 724)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1028 nil nil )
+
+ (!type struct 2154 nil gc_used "subsumption_hasher"
+ (!srcfileloc "cp/logic.cc" 751)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2155
+ (!type pointer 2156 nil gc_used
+ (!type already_seen 2155)
+ )
+ gc_pointed_to "hash_table<subsumption_hasher>"
+ (!srcfileloc "cp/logic.cc" 751)
+ (!fields 1
+ (!pair "subsumption_hasher"
+ (!type already_seen 2154)
+ (!srcfileloc "cp/logic.cc" 751)
+ nil )
+ )
+ )
+
+ (!type struct 2157 nil gc_used "globals"
+ (!srcfileloc "cp/mangle.cc" 126)
+ (!fields 6
+ (!pair "substitutions"
+ (!type already_seen 85)
+ (!srcfileloc "cp/mangle.cc" 107)
+ nil )
+ (!pair "entity"
+ (!type already_seen 23)
+ (!srcfileloc "cp/mangle.cc" 110)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "parm_depth"
+ (!type already_seen 2)
+ (!srcfileloc "cp/mangle.cc" 113)
+ nil )
+ (!pair "need_abi_warning"
+ (!type already_seen 2)
+ (!srcfileloc "cp/mangle.cc" 117)
+ nil )
+ (!pair "need_cxx17_warning"
+ (!type already_seen 2)
+ (!srcfileloc "cp/mangle.cc" 120)
+ nil )
+ (!pair "mod"
+ (!type already_seen 2)
+ (!srcfileloc "cp/mangle.cc" 123)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2158 nil gc_unused "comp_cat_info_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2159 nil gc_unused "comp_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2160 nil gc_unused "nodel_ptr_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2161 nil gc_unused "simple_hashmap_traits<nodel_ptr_hash<void>,int>"
+ (!srcfileloc "cp/module.cc" 335)
+ (!fields 2
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "cp/module.cc" 335)
+ nil )
+ (!pair "nodel_ptr_hash<void"
+ (!type user_struct 2162 nil gc_unused "nodel_ptr_hash<void"
+ (!srcfileloc "cp/module.cc" 335)
+ (!fields 1
+ (!pair "void"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 335)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 335)
+ nil )
+ )
+ )
+
+ (!type already_seen 2162)
+
+ (!type user_struct 2163 nil gc_unused "hash_map<void*,signed,ptr_int_traits>"
+ (!srcfileloc "cp/module.cc" 336)
+ (!fields 3
+ (!pair "ptr_int_traits"
+ (!type already_seen 2161)
+ (!srcfileloc "cp/module.cc" 336)
+ nil )
+ (!pair "signed"
+ (!type undefined 2164 nil gc_unused "signed"
+ (!srcfileloc "cp/module.cc" 336)
+ )
+ (!srcfileloc "cp/module.cc" 336)
+ nil )
+ (!pair "void"
+ (!type pointer 2165 nil gc_unused
+ (!type struct 2166
+ (!type already_seen 2165)
+ gc_unused "void"nil
+ (!fields 0 )
+ nil 0 nil nil )
+ )
+ (!srcfileloc "cp/module.cc" 336)
+ nil )
+ )
+ )
+
+ (!type already_seen 2166)
+
+ (!type already_seen 2164)
+
+ (!type struct 2167 nil gc_unused "data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2168 nil gc_unused "bytes"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 2017)
+
+ (!type already_seen 2018)
+
+ (!type struct 2169 nil gc_unused "elf_out"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2170 nil gc_unused "bytes_out"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2171 nil gc_unused "elf"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2172 nil gc_unused "stat"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2173 nil gc_unused "depset"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2174 nil gc_unused "pending_key"
+ (!srcfileloc "cp/module.cc" 2695)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2175 nil gc_unused "hash_map<pending_key,auto_vec<unsigned>>"
+ (!srcfileloc "cp/module.cc" 2695)
+ (!fields 2
+ (!pair "auto_vec<unsigned"
+ (!type user_struct 2176 nil gc_unused "auto_vec<unsigned"
+ (!srcfileloc "cp/module.cc" 2695)
+ (!fields 1
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ (!pair "pending_key"
+ (!type already_seen 2174)
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ )
+ )
+
+ (!type already_seen 2176)
+
+ (!type user_struct 2177 nil gc_unused "hash_map<tree,auto_vec<tree>>"
+ (!srcfileloc "cp/module.cc" 2708)
+ (!fields 2
+ (!pair "auto_vec<tree"
+ (!type user_struct 2178 nil gc_unused "auto_vec<tree"
+ (!srcfileloc "cp/module.cc" 2708)
+ (!fields 1
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/module.cc" 2708)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 2708)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/module.cc" 2708)
+ nil )
+ )
+ )
+
+ (!type already_seen 2178)
+
+ (!type struct 2179 nil gc_unused "merge_key"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2180 nil gc_unused "hash_map<tree,uintptr_t,simple_hashmap_traits<nodel_ptr_hash<tree_node>,uintptr_t>>"
+ (!srcfileloc "cp/module.cc" 2837)
+ (!fields 4
+ (!pair "uintptr_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "simple_hashmap_traits<nodel_ptr_hash<tree_node"
+ (!type user_struct 2181 nil gc_unused "simple_hashmap_traits<nodel_ptr_hash<tree_node"
+ (!srcfileloc "cp/module.cc" 2837)
+ (!fields 1
+ (!pair "nodel_ptr_hash<tree_node"
+ (!type user_struct 2182 nil gc_unused "nodel_ptr_hash<tree_node"
+ (!srcfileloc "cp/module.cc" 2837)
+ (!fields 1
+ (!pair "tree_node"
+ (!type undefined 2183 nil gc_unused "tree_node"
+ (!srcfileloc "cp/module.cc" 2837)
+ )
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "uintptr_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ )
+ )
+
+ (!type already_seen 2181)
+
+ (!type already_seen 2182)
+
+ (!type already_seen 2183)
+
+ (!type struct 2184 nil gc_unused "trees_in"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2185 nil gc_unused "trees_out"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 2019)
+
+ (!type already_seen 2020)
+
+ (!type struct 2186 nil gc_unused "loc_spans"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2187 nil gc_unused "ord_loc_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2188 nil gc_unused "ord_loc_traits"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2189 nil gc_unused "macro_loc_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2190 nil gc_unused "macro_loc_traits"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 2014)
+
+ (!type already_seen 2015)
+
+ (!type already_seen 2012)
+
+ (!type struct 2191 nil gc_unused "module_state_config"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2192
+ (!type pointer 2193 nil gc_unused
+ (!type already_seen 2192)
+ )
+ gc_unused "vec<cpp_hashnode*>"
+ (!srcfileloc "cp/module.cc" 3734)
+ (!fields 1
+ (!pair "cpp_hashnode"
+ (!type already_seen 13)
+ (!srcfileloc "cp/module.cc" 3734)
+ nil )
+ )
+ )
+
+ (!type struct 2194 nil gc_used "module_state_hash"
+ (!srcfileloc "cp/module.cc" 3935)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2195
+ (!type pointer 2196 nil gc_used
+ (!type already_seen 2195)
+ )
+ gc_pointed_to "vec<module_state*,va_gc>"
+ (!srcfileloc "cp/module.cc" 3932)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/module.cc" 3932)
+ nil )
+ (!pair "module_state"
+ (!type already_seen 2010)
+ (!srcfileloc "cp/module.cc" 3932)
+ nil )
+ )
+ )
+
+ (!type user_struct 2197
+ (!type pointer 2198 nil gc_used
+ (!type already_seen 2197)
+ )
+ gc_pointed_to "hash_table<module_state_hash>"
+ (!srcfileloc "cp/module.cc" 3935)
+ (!fields 1
+ (!pair "module_state_hash"
+ (!type already_seen 2194)
+ (!srcfileloc "cp/module.cc" 3935)
+ nil )
+ )
+ )
+
+ (!type user_struct 2199 nil gc_unused "hash_map<unsigned,unsigned,simple_hashmap_traits<int_hash<unsigned,0>,unsigned>>"
+ (!srcfileloc "cp/module.cc" 3941)
+ (!fields 5
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "0"
+ (!type undefined 2200 nil gc_unused "0"
+ (!srcfileloc "cp/module.cc" 3941)
+ )
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "simple_hashmap_traits<int_hash<unsigned"
+ (!type user_struct 2201 nil gc_unused "simple_hashmap_traits<int_hash<unsigned"
+ (!srcfileloc "cp/module.cc" 3941)
+ (!fields 1
+ (!pair "int_hash<unsigned"
+ (!type user_struct 2202 nil gc_unused "int_hash<unsigned"
+ (!srcfileloc "cp/module.cc" 3941)
+ (!fields 1
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ )
+ )
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ )
+ )
+
+ (!type already_seen 2201)
+
+ (!type already_seen 2202)
+
+ (!type already_seen 2200)
+
+ (!type struct 2203 nil gc_unused "dumper"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2204 nil gc_used "note_def_cache_hasher"
+ (!srcfileloc "cp/module.cc" 4599)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2205
+ (!type pointer 2206 nil gc_used
+ (!type already_seen 2205)
+ )
+ gc_pointed_to "hash_table<note_def_cache_hasher>"
+ (!srcfileloc "cp/module.cc" 4599)
+ (!fields 1
+ (!pair "note_def_cache_hasher"
+ (!type already_seen 2204)
+ (!srcfileloc "cp/module.cc" 4599)
+ nil )
+ )
+ )
+
+ (!type struct 2207 nil gc_unused "add_binding_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2208 nil gc_unused "tm"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2209 nil gc_used "macro_export"
+ (!srcfileloc "cp/module.cc" 16778)
+ (!fields 2
+ (!pair "def"
+ (!type already_seen 19)
+ (!srcfileloc "cp/module.cc" 16771)
+ nil )
+ (!pair "undef_loc"
+ (!type already_seen 2)
+ (!srcfileloc "cp/module.cc" 16772)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type struct 2210 nil gc_unused "macro_import"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2211
+ (!type pointer 2212 nil gc_used
+ (!type already_seen 2211)
+ )
+ gc_pointed_to "vec<macro_export,va_gc>"
+ (!srcfileloc "cp/module.cc" 16935)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/module.cc" 16935)
+ nil )
+ (!pair "macro_export"
+ (!type already_seen 2209)
+ (!srcfileloc "cp/module.cc" 16935)
+ nil )
+ )
+ )
+
+ (!type struct 2213 nil gc_unused "rlimit"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2214 nil gc_unused "module_processing_cookie"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2215 nil gc_unused "name_lookup"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2216 nil gc_unused "namespace_limit_reached"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2217 nil gc_unused "show_candidate_location"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2218 nil gc_unused "suggest_alternatives"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2219 nil gc_unused "namespace_hints"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2220 nil gc_unused "missing_std_header"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2221 nil gc_unused "macro_use_before_def"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2222 nil gc_unused "type_id_in_expr_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2223 nil gc_unused "saved_token_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2224 nil gc_unused "cp_parser_binary_operations_map_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2225 nil gc_unused "cp_parser_expression_stack_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2226 nil gc_unused "tentative_firewall"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2227 nil gc_unused "cp_omp_attribute_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2228 nil gc_unused "scope_sentinel"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2229 nil gc_unused "class_decl_loc_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type undefined 2230 nil gc_unused "class_to_loc_map_t::iterator"
+ (!srcfileloc "cp/parser.cc" 34637)
+ )
+
+ (!type struct 2231
+ (!type pointer 2232 nil gc_used
+ (!type already_seen 2231)
+ )
+ gc_pointed_to "pending_template"
+ (!srcfileloc "cp/pt.cc" 9559)
+ (!fields 2
+ (!pair "next"
+ (!type already_seen 2232)
+ (!srcfileloc "cp/pt.cc" 59)
+ nil )
+ (!pair "tinst"
+ (!type already_seen 2007)
+ (!srcfileloc "cp/pt.cc" 60)
+ nil )
+ )
+ (!options
+ (!option chain_next string "%h.next")
+ )
+ 1028 nil nil )
+
+ (!type struct 2233 nil gc_used "spec_hasher"
+ (!srcfileloc "cp/pt.cc" 116)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2234
+ (!type pointer 2235 nil gc_used
+ (!type already_seen 2234)
+ )
+ gc_pointed_to "hash_table<spec_hasher>"
+ (!srcfileloc "cp/pt.cc" 116)
+ (!fields 1
+ (!pair "spec_hasher"
+ (!type already_seen 2233)
+ (!srcfileloc "cp/pt.cc" 116)
+ nil )
+ )
+ )
+
+ (!type struct 2236 nil gc_unused "find_parameter_pack_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2237 nil gc_used "ctp_hasher"
+ (!srcfileloc "cp/pt.cc" 4524)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2238
+ (!type pointer 2239 nil gc_used
+ (!type already_seen 2238)
+ )
+ gc_pointed_to "hash_table<ctp_hasher>"
+ (!srcfileloc "cp/pt.cc" 4524)
+ (!fields 1
+ (!pair "ctp_hasher"
+ (!type already_seen 2237)
+ (!srcfileloc "cp/pt.cc" 4524)
+ nil )
+ )
+ )
+
+ (!type struct 2240 nil gc_unused "template_parm_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2241 nil gc_unused "uses_all_template_parms_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2242 nil gc_unused "freelist"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2243 nil gc_unused "pair_fn_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2244 nil gc_unused "find_template_parameter_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2245 nil gc_unused "el_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2246 nil gc_unused "auto_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2247
+ (!type pointer 2248 nil gc_used
+ (!type already_seen 2247)
+ )
+ gc_pointed_to "hash_map<tree,tree_pair_p>"
+ (!srcfileloc "cp/pt.cc" 30455)
+ (!fields 2
+ (!pair "tree_pair_p"
+ (!type already_seen 539)
+ (!srcfileloc "cp/pt.cc" 30455)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "cp/pt.cc" 30455)
+ nil )
+ )
+ )
+
+ (!type struct 2249 nil gc_used "tinfo_s"
+ (!srcfileloc "cp/rtti.cc" 122)
+ (!fields 3
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/rtti.cc" 65)
+ nil )
+ (!pair "vtable"
+ (!type already_seen 23)
+ (!srcfileloc "cp/rtti.cc" 67)
+ nil )
+ (!pair "name"
+ (!type already_seen 23)
+ (!srcfileloc "cp/rtti.cc" 70)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2250
+ (!type pointer 2251 nil gc_used
+ (!type already_seen 2250)
+ )
+ gc_pointed_to "vec<tinfo_s,va_gc>"
+ (!srcfileloc "cp/rtti.cc" 122)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/rtti.cc" 122)
+ nil )
+ (!pair "tinfo_s"
+ (!type already_seen 2249)
+ (!srcfileloc "cp/rtti.cc" 122)
+ nil )
+ )
+ )
+
+ (!type struct 2252 nil gc_used "deferred_access"
+ (!srcfileloc "cp/semantics.cc" 137)
+ (!fields 2
+ (!pair "deferred_access_checks"
+ (!type already_seen 78)
+ (!srcfileloc "cp/semantics.cc" 130)
+ nil )
+ (!pair "deferring_access_checks_kind"
+ (!type already_seen 2)
+ (!srcfileloc "cp/semantics.cc" 133)
+ nil )
+ )
+ nil 1028 nil nil )
+
+ (!type user_struct 2253
+ (!type pointer 2254 nil gc_used
+ (!type already_seen 2253)
+ )
+ gc_pointed_to "vec<deferred_access,va_gc>"
+ (!srcfileloc "cp/semantics.cc" 137)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "cp/semantics.cc" 137)
+ nil )
+ (!pair "deferred_access"
+ (!type already_seen 2252)
+ (!srcfileloc "cp/semantics.cc" 137)
+ nil )
+ )
+ )
+
+ (!type struct 2255 nil gc_unused "cp_check_omp_declare_reduction_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2256 nil gc_unused "omp_target_walk_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2257 nil gc_unused "cplus_array_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2258 nil gc_used "cplus_array_hasher"
+ (!srcfileloc "cp/tree.cc" 1042)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2259
+ (!type pointer 2260 nil gc_used
+ (!type already_seen 2259)
+ )
+ gc_pointed_to "hash_table<cplus_array_hasher>"
+ (!srcfileloc "cp/tree.cc" 1042)
+ (!fields 1
+ (!pair "cplus_array_hasher"
+ (!type already_seen 2258)
+ (!srcfileloc "cp/tree.cc" 1042)
+ nil )
+ )
+ )
+
+ (!type struct 2261 nil gc_unused "list_proxy"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2262 nil gc_used "list_hasher"
+ (!srcfileloc "cp/tree.cc" 2178)
+ (!fields 0 )
+ nil 1028 nil nil )
+
+ (!type user_struct 2263
+ (!type pointer 2264 nil gc_used
+ (!type already_seen 2263)
+ )
+ gc_pointed_to "hash_table<list_hasher>"
+ (!srcfileloc "cp/tree.cc" 2178)
+ (!fields 1
+ (!pair "list_hasher"
+ (!type already_seen 2262)
+ (!srcfileloc "cp/tree.cc" 2178)
+ nil )
+ )
+ )
+
+ (!type struct 2265 nil gc_unused "bot_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2266 nil gc_unused "replace_placeholders_t"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2267 nil gc_unused "work_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2268 nil gc_unused "vtv_graph_node"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2269 nil gc_unused "Dsymbol"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 63)
+
+ (!type already_seen 640)
+
+ (!type struct 2270 nil gc_unused "ClassDeclaration"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2271 nil gc_unused "EnumDeclaration"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 440)
+
+ (!type struct 2272 nil gc_unused "StructDeclaration"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2273 nil gc_unused "TypeInfoDeclaration"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2274 nil gc_unused "VarDeclaration"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2275
+ (!type pointer 2276 nil gc_unused
+ (!type already_seen 2275)
+ )
+ gc_unused "Expression"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2277 nil gc_unused "ClassReferenceExp"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2278 nil gc_unused "IndexExp"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2279 nil gc_unused "SliceExp"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 442)
+
+ (!type already_seen 447)
+
+ (!type already_seen 534)
+
+ (!type struct 2280 nil gc_unused "TypeFunction"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2281 nil gc_unused "Parameter"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2282 nil gc_unused "BaseClass"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2283 nil gc_unused "Scope"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2284 nil gc_unused "Loc"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2285 nil gc_unused "Array<Expression*>"
+ (!srcfileloc "d/d-tree.h" 46)
+ (!fields 1
+ (!pair "Expression"
+ (!type already_seen 2276)
+ (!srcfileloc "d/d-tree.h" 46)
+ nil )
+ )
+ )
+
+ (!type already_seen 434)
+
+ (!type already_seen 449)
+
+ (!type already_seen 445)
+
+ (!type already_seen 662)
+
+ (!type already_seen 444)
+
+ (!type struct 2286 nil gc_unused "builtin_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2287 nil gc_unused "d_option_data"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2288 nil gc_unused "TypeInfoVisitor"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2289 nil gc_unused "TypeInfoDeclVisitor"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2290 nil gc_unused "SpeculativeTypeVisitor"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2291 nil gc_used "module_hasher"
+ (!srcfileloc "fortran/trans-decl.cc" 5114)
+ (!fields 0 )
+ nil 16 nil nil )
+
+ (!type user_struct 2292
+ (!type pointer 2293 nil gc_used
+ (!type already_seen 2292)
+ )
+ gc_pointed_to "hash_table<module_hasher>"
+ (!srcfileloc "fortran/trans-decl.cc" 5114)
+ (!fields 1
+ (!pair "module_hasher"
+ (!type already_seen 2291)
+ (!srcfileloc "fortran/trans-decl.cc" 5114)
+ nil )
+ )
+ )
+
+ (!type struct 2294 nil gc_pointed_to "module_htab_entry"
+ (!srcfileloc "fortran/trans.h" 711)
+ (!fields 3
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "fortran/trans.h" 708)
+ nil )
+ (!pair "namespace_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 709)
+ nil )
+ (!pair "decls"
+ (!type pointer 2295 nil gc_used
+ (!type user_struct 2296
+ (!type already_seen 2295)
+ gc_pointed_to "hash_table<module_decl_hasher>"
+ (!srcfileloc "fortran/trans.h" 710)
+ (!fields 1
+ (!pair "module_decl_hasher"
+ (!type struct 2297 nil gc_used "module_decl_hasher"
+ (!srcfileloc "fortran/trans.h" 710)
+ (!fields 0 )
+ nil 16 nil nil )
+ (!srcfileloc "fortran/trans.h" 710)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "fortran/trans.h" 710)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 16 nil nil )
+
+ (!type struct 2298 nil gc_used "gfc_intrinsic_map_t"
+ (!srcfileloc "fortran/trans-intrinsic.cc" 87)
+ (!fields 19
+ (!pair "id"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 53)
+ nil )
+ (!pair "float_built_in"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 57)
+ nil )
+ (!pair "double_built_in"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 58)
+ nil )
+ (!pair "long_double_built_in"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 59)
+ nil )
+ (!pair "complex_float_built_in"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 60)
+ nil )
+ (!pair "complex_double_built_in"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 61)
+ nil )
+ (!pair "complex_long_double_built_in"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 62)
+ nil )
+ (!pair "libm_name"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 67)
+ nil )
+ (!pair "complex_available"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 70)
+ nil )
+ (!pair "is_constant"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 73)
+ nil )
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 76)
+ nil )
+ (!pair "real4_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 79)
+ nil )
+ (!pair "real8_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 80)
+ nil )
+ (!pair "real10_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 81)
+ nil )
+ (!pair "real16_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 82)
+ nil )
+ (!pair "complex4_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 83)
+ nil )
+ (!pair "complex8_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 84)
+ nil )
+ (!pair "complex10_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 85)
+ nil )
+ (!pair "complex16_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 86)
+ nil )
+ )
+ nil 16 nil nil )
+
+ (!type struct 2299 nil gc_used "gfc_st_parameter_field"
+ (!srcfileloc "fortran/trans-io.cc" 73)
+ (!fields 6
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "fortran/trans-io.cc" 67)
+ nil )
+ (!pair "mask"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-io.cc" 68)
+ nil )
+ (!pair "param_type"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-io.cc" 69)
+ nil )
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "fortran/trans-io.cc" 70)
+ nil )
+ (!pair "field"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-io.cc" 71)
+ nil )
+ (!pair "field_len"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-io.cc" 72)
+ nil )
+ )
+ nil 16 nil nil )
+
+ (!type struct 2300 nil gc_used "gfc_st_parameter"
+ (!srcfileloc "fortran/trans-io.cc" 79)
+ (!fields 2
+ (!pair "name"
+ (!type already_seen 11)
+ (!srcfileloc "fortran/trans-io.cc" 77)
+ nil )
+ (!pair "type"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-io.cc" 78)
+ nil )
+ )
+ nil 16 nil nil )
+
+ (!type struct 2301 nil gc_unused "iter_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2302 nil gc_unused "forall_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2303 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/fortran/trans.h:33"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2304 nil gc_unused "gfc_se"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2305 nil gc_unused "gfc_co_subroutines_args"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2306 nil gc_unused "gfc_array_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2307 nil gc_unused "gfc_ss_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2308 nil gc_unused "gfc_ss"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2309 nil gc_unused "gfc_loopinfo"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2310 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/fortran/trans.h:408"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2311 nil gc_unused "anonymous:/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/fortran/trans.h:420"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type already_seen 2297)
+
+ (!type already_seen 2296)
+
+ (!type struct 2312 nil gc_unused "gimplify_omp_ctx"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2313 nil gc_used "gfc_powdecl_list"
+ (!srcfileloc "fortran/trans.h" 917)
+ (!fields 3
+ (!pair "integer"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 914)
+ nil )
+ (!pair "real"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 915)
+ nil )
+ (!pair "cmplx"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 916)
+ nil )
+ )
+ nil 16 nil nil )
+
+ (!type struct 2314 nil gc_unused "gfc_interface_sym_mapping"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2315 nil gc_unused "gfc_interface_mapping"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2316 nil gc_unused "go_create_gogo_args"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2317 nil gc_unused "Linemap"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2318 nil gc_unused "Backend"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2319 nil gc_unused "ggc_root_tab"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2320 nil gc_unused "lto_file"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2321 nil gc_unused "lto_section_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2322 nil gc_unused "lto_section_slot"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2323 nil gc_unused "tree_scc"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2324 nil gc_unused "tree_scc_hasher"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2325 nil gc_unused "streamer_tree_cache_d"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type user_struct 2326 nil gc_unused "int_hash<unsigned,0,UINT_MAX>"
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ (!fields 3
+ (!pair "UINT_MAX"
+ (!type undefined 2327 nil gc_unused "UINT_MAX"
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ )
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ nil )
+ (!pair "0"
+ (!type already_seen 2200)
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ nil )
+ )
+ )
+
+ (!type already_seen 2327)
+
+ (!type struct 2328 nil gc_unused "file_data_list"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2329 nil gc_unused "symbol_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2330 nil gc_unused "variable_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2331 nil gc_unused "function_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2332 nil gc_unused "named_path_s"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2333
+ (!type pointer 2334 nil gc_used
+ (!type already_seen 2333)
+ )
+ gc_pointed_to "rtenode"
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 61)
+ (!fields 11
+ (!pair "constructor_reachable"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 39)
+ nil )
+ (!pair "export_reachable"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 40)
+ nil )
+ (!pair "exception_routine"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 41)
+ nil )
+ (!pair "constructor_final"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 42)
+ nil )
+ (!pair "export_final"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 43)
+ nil )
+ (!pair "is_call"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 44)
+ nil )
+ (!pair "grtenode"
+ (!type already_seen 282)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 45)
+ nil )
+ (!pair "func"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 46)
+ nil )
+ (!pair "reachable_src"
+ (!type already_seen 2334)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 47)
+ nil )
+ (!pair "function_call"
+ (!type pointer 2335 nil gc_used
+ (!type user_struct 2336
+ (!type already_seen 2335)
+ gc_pointed_to "vec<rtenode*,va_gc>"
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 49)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 49)
+ nil )
+ (!pair "rtenode"
+ (!type already_seen 2334)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 49)
+ nil )
+ )
+ )
+ )
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 49)
+ nil )
+ (!pair "rts_call"
+ (!type already_seen 2335)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 50)
+ nil )
+ )
+ nil 256 nil nil )
+
+ (!type already_seen 2336)
+
+ (!type struct 2337 nil gc_unused "builtin_function_entry"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2338 nil gc_unused "builtin_type_info"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2339
+ (!type pointer 2340 nil gc_used
+ (!type already_seen 2339)
+ )
+ gc_pointed_to "struct_constructor"
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 60)
+ (!fields 5
+ (!pair "constructor_type"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 48)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "constructor_fields"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 51)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "constructor_element_list"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 54)
+ (!options
+ (!option skip string "")
+ )
+ )
+ (!pair "constructor_elements"
+ (!type already_seen 570)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 57)
+ nil )
+ (!pair "level"
+ (!type already_seen 2340)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 59)
+ nil )
+ )
+ nil 256 nil nil )
+
+ (!type struct 2341
+ (!type pointer 2342 nil gc_used
+ (!type already_seen 2341)
+ )
+ gc_pointed_to "array_desc"
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 70)
+ (!fields 4
+ (!pair "type"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 66)
+ nil )
+ (!pair "index"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 67)
+ nil )
+ (!pair "array"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 68)
+ nil )
+ (!pair "next"
+ (!type already_seen 2342)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 69)
+ nil )
+ )
+ nil 256 nil nil )
+
+ (!type struct 2343
+ (!type pointer 2344 nil gc_used
+ (!type already_seen 2343)
+ )
+ gc_pointed_to "objc_map_private"
+ (!srcfileloc "objc/objc-map.h" 79)
+ (!fields 7
+ (!pair "number_of_slots"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 54)
+ nil )
+ (!pair "mask"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 57)
+ nil )
+ (!pair "number_of_non_empty_slots"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 62)
+ nil )
+ (!pair "max_number_of_non_empty_slots"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 68)
+ nil )
+ (!pair "maximum_load_factor"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 71)
+ nil )
+ (!pair "slots"
+ (!type already_seen 24)
+ (!srcfileloc "objc/objc-map.h" 74)
+ (!options
+ (!option length string "%h.number_of_slots")
+ )
+ )
+ (!pair "values"
+ (!type already_seen 24)
+ (!srcfileloc "objc/objc-map.h" 78)
+ (!options
+ (!option length string "%h.number_of_slots")
+ )
+ )
+ )
+ nil 1536 nil nil )
+
+ (!type struct 2345
+ (!type pointer 2346
+ (!type pointer 2347 nil gc_used
+ (!type already_seen 2346)
+ )
+ gc_pointed_to
+ (!type already_seen 2345)
+ )
+ gc_pointed_to "hashed_entry"
+ (!srcfileloc "objc/objc-act.h" 286)
+ (!fields 3
+ (!pair "list"
+ (!type pointer 2348 nil gc_used
+ (!type struct 2349
+ (!type already_seen 2348)
+ gc_pointed_to "hashed_attribute"
+ (!srcfileloc "objc/objc-act.h" 280)
+ (!fields 2
+ (!pair "next"
+ (!type already_seen 2348)
+ (!srcfileloc "objc/objc-act.h" 278)
+ nil )
+ (!pair "value"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.h" 279)
+ nil )
+ )
+ nil 1536 nil nil )
+ )
+ (!srcfileloc "objc/objc-act.h" 283)
+ nil )
+ (!pair "next"
+ (!type already_seen 2346)
+ (!srcfileloc "objc/objc-act.h" 284)
+ nil )
+ (!pair "key"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.h" 285)
+ nil )
+ )
+ nil 1536 nil nil )
+
+ (!type already_seen 2349)
+
+ (!type struct 2350
+ (!type pointer 2351 nil gc_used
+ (!type already_seen 2350)
+ )
+ gc_pointed_to "imp_entry"
+ (!srcfileloc "objc/objc-act.h" 303)
+ (!fields 6
+ (!pair "next"
+ (!type already_seen 2351)
+ (!srcfileloc "objc/objc-act.h" 297)
+ nil )
+ (!pair "imp_context"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.h" 298)
+ nil )
+ (!pair "imp_template"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.h" 299)
+ nil )
+ (!pair "class_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.h" 300)
+ nil )
+ (!pair "meta_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.h" 301)
+ nil )
+ (!pair "has_cxx_cdtors"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-act.h" 302)
+ nil )
+ )
+ nil 1536 nil nil )
+
+ (!type struct 2352 nil gc_unused "objc_try_context"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2353 nil gc_pointed_to "string_descriptor"
+ (!srcfileloc "objc/objc-act.cc" 250)
+ (!fields 2
+ (!pair "literal"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.cc" 246)
+ nil )
+ (!pair "constructor"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.cc" 249)
+ nil )
+ )
+ (!options
+ (!option for_user string "")
+ )
+ 1536 nil nil )
+
+ (!type struct 2354 nil gc_used "objc_string_hasher"
+ (!srcfileloc "objc/objc-act.cc" 258)
+ (!fields 0 )
+ nil 1536 nil nil )
+
+ (!type user_struct 2355
+ (!type pointer 2356 nil gc_used
+ (!type already_seen 2355)
+ )
+ gc_pointed_to "hash_table<objc_string_hasher>"
+ (!srcfileloc "objc/objc-act.cc" 258)
+ (!fields 1
+ (!pair "objc_string_hasher"
+ (!type already_seen 2354)
+ (!srcfileloc "objc/objc-act.cc" 258)
+ nil )
+ )
+ )
+
+ (!type struct 2357 nil gc_unused "decl_name_hash"nil
+ (!fields 0 )
+ nil 0 nil nil )
+
+ (!type struct 2358 nil gc_used "ident_data_tuple"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ (!fields 2
+ (!pair "ident"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1068)
+ nil )
+ (!pair "data"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1069)
+ nil )
+ )
+ nil 1536 nil nil )
+
+ (!type user_struct 2359
+ (!type pointer 2360 nil gc_used
+ (!type already_seen 2359)
+ )
+ gc_pointed_to "vec<ident_data_tuple,va_gc>"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ nil )
+ (!pair "ident_data_tuple"
+ (!type already_seen 2358)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ nil )
+ )
+ )
+
+ (!type struct 2361 nil gc_used "msgref_entry"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ (!fields 3
+ (!pair "func"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1254)
+ nil )
+ (!pair "selname"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1255)
+ nil )
+ (!pair "refdecl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1256)
+ nil )
+ )
+ nil 1536 nil nil )
+
+ (!type user_struct 2362
+ (!type pointer 2363 nil gc_used
+ (!type already_seen 2362)
+ )
+ gc_pointed_to "vec<msgref_entry,va_gc>"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ nil )
+ (!pair "msgref_entry"
+ (!type already_seen 2361)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ nil )
+ )
+ )
+
+ (!type struct 2364 nil gc_used "prot_list_entry"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ (!fields 2
+ (!pair "id"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1310)
+ nil )
+ (!pair "refdecl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1311)
+ nil )
+ )
+ nil 1536 nil nil )
+
+ (!type user_struct 2365
+ (!type pointer 2366 nil gc_used
+ (!type already_seen 2365)
+ )
+ gc_pointed_to "vec<prot_list_entry,va_gc>"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ nil )
+ (!pair "prot_list_entry"
+ (!type already_seen 2364)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ nil )
+ )
+ )
+
+ (!type struct 2367 nil gc_used "ivarref_entry"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ (!fields 2
+ (!pair "decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2993)
+ nil )
+ (!pair "offset"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2994)
+ nil )
+ )
+ nil 1536 nil nil )
+
+ (!type user_struct 2368
+ (!type pointer 2369 nil gc_used
+ (!type already_seen 2368)
+ )
+ gc_pointed_to "vec<ivarref_entry,va_gc>"
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ (!fields 2
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ nil )
+ (!pair "ivarref_entry"
+ (!type already_seen 2367)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ nil )
+ )
+ )
+)
+(!typedefs 956
+ (!pair "ivarref_entry"
+ (!type already_seen 2367)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ nil )
+ (!pair "vec<ivarref_entry,va_gc>"
+ (!type already_seen 2368)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ nil )
+ (!pair "prot_list_entry"
+ (!type already_seen 2364)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ nil )
+ (!pair "vec<prot_list_entry,va_gc>"
+ (!type already_seen 2365)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ nil )
+ (!pair "msgref_entry"
+ (!type already_seen 2361)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ nil )
+ (!pair "vec<msgref_entry,va_gc>"
+ (!type already_seen 2362)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ nil )
+ (!pair "ident_data_tuple"
+ (!type already_seen 2358)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ nil )
+ (!pair "vec<ident_data_tuple,va_gc>"
+ (!type already_seen 2359)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ nil )
+ (!pair "objc_string_hasher"
+ (!type already_seen 2354)
+ (!srcfileloc "objc/objc-act.cc" 258)
+ nil )
+ (!pair "hash_table<objc_string_hasher>"
+ (!type already_seen 2355)
+ (!srcfileloc "objc/objc-act.cc" 258)
+ nil )
+ (!pair "attr"
+ (!type already_seen 2348)
+ (!srcfileloc "objc/objc-act.h" 275)
+ nil )
+ (!pair "hash"
+ (!type already_seen 2346)
+ (!srcfileloc "objc/objc-act.h" 274)
+ nil )
+ (!pair "objc_map_iterator_t"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 241)
+ nil )
+ (!pair "objc_map_t"
+ (!type already_seen 2344)
+ (!srcfileloc "objc/objc-map.h" 90)
+ nil )
+ (!pair "objc_map_private_hash_t"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-map.h" 40)
+ nil )
+ (!pair "array_desc"
+ (!type already_seen 2341)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 70)
+ nil )
+ (!pair "builtin_prototype"
+ (!type already_seen 2)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 108)
+ nil )
+ (!pair "stmt_tree_t"
+ (!type already_seen 453)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 110)
+ nil )
+ (!pair "rtevec"
+ (!type already_seen 2336)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 64)
+ nil )
+ (!pair "vec<rtenode*,va_gc>"
+ (!type already_seen 2336)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 49)
+ nil )
+ (!pair "rtenode"
+ (!type already_seen 2333)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 47)
+ nil )
+ (!pair "named_path"
+ (!type already_seen 2332)
+ (!srcfileloc "m2/gm2-lang.cc" 55)
+ nil )
+ (!pair "code_id_hash"
+ (!type already_seen 2326)
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ nil )
+ (!pair "UINT_MAX"
+ (!type already_seen 2327)
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ nil )
+ (!pair "int_hash<unsigned,0,UINT_MAX>"
+ (!type already_seen 2326)
+ (!srcfileloc "lto/lto-common.cc" 1805)
+ nil )
+ (!pair "go_char_p"
+ (!type already_seen 9)
+ (!srcfileloc "go/go-lang.cc" 196)
+ nil )
+ (!pair "gfc_interface_mapping"
+ (!type already_seen 2315)
+ (!srcfileloc "fortran/trans.h" 1167)
+ nil )
+ (!pair "gfc_interface_sym_mapping"
+ (!type already_seen 2314)
+ (!srcfileloc "fortran/trans.h" 1152)
+ nil )
+ (!pair "gfc_powdecl_list"
+ (!type already_seen 2313)
+ (!srcfileloc "fortran/trans.h" 918)
+ nil )
+ (!pair "module_decl_hasher"
+ (!type already_seen 2297)
+ (!srcfileloc "fortran/trans.h" 710)
+ nil )
+ (!pair "hash_table<module_decl_hasher>"
+ (!type already_seen 2296)
+ (!srcfileloc "fortran/trans.h" 710)
+ nil )
+ (!pair "gfc_wrapped_block"
+ (!type already_seen 2311)
+ (!srcfileloc "fortran/trans.h" 425)
+ nil )
+ (!pair "gfc_saved_var"
+ (!type already_seen 2310)
+ (!srcfileloc "fortran/trans.h" 412)
+ nil )
+ (!pair "gfc_loopinfo"
+ (!type already_seen 2309)
+ (!srcfileloc "fortran/trans.h" 402)
+ nil )
+ (!pair "gfc_ss"
+ (!type already_seen 2308)
+ (!srcfileloc "fortran/trans.h" 353)
+ nil )
+ (!pair "gfc_ss_info"
+ (!type already_seen 2307)
+ (!srcfileloc "fortran/trans.h" 312)
+ nil )
+ (!pair "gfc_array_info"
+ (!type already_seen 2306)
+ (!srcfileloc "fortran/trans.h" 213)
+ nil )
+ (!pair "gfc_co_subroutines_args"
+ (!type already_seen 2305)
+ (!srcfileloc "fortran/trans.h" 124)
+ nil )
+ (!pair "gfc_se"
+ (!type already_seen 2304)
+ (!srcfileloc "fortran/trans.h" 115)
+ nil )
+ (!pair "stmtblock_t"
+ (!type already_seen 2303)
+ (!srcfileloc "fortran/trans.h" 37)
+ nil )
+ (!pair "forall_info"
+ (!type already_seen 2302)
+ (!srcfileloc "fortran/trans-stmt.cc" 58)
+ nil )
+ (!pair "iter_info"
+ (!type already_seen 2301)
+ (!srcfileloc "fortran/trans-stmt.cc" 46)
+ nil )
+ (!pair "gfc_st_parameter"
+ (!type already_seen 2300)
+ (!srcfileloc "fortran/trans-io.cc" 80)
+ nil )
+ (!pair "gfc_st_parameter_field"
+ (!type already_seen 2299)
+ (!srcfileloc "fortran/trans-io.cc" 74)
+ nil )
+ (!pair "gfc_intrinsic_map_t"
+ (!type already_seen 2298)
+ (!srcfileloc "fortran/trans-intrinsic.cc" 88)
+ nil )
+ (!pair "module_hasher"
+ (!type already_seen 2291)
+ (!srcfileloc "fortran/trans-decl.cc" 5114)
+ nil )
+ (!pair "hash_table<module_hasher>"
+ (!type already_seen 2292)
+ (!srcfileloc "fortran/trans-decl.cc" 5114)
+ nil )
+ (!pair "tree_frame_info"
+ (!type already_seen 662)
+ (!srcfileloc "d/d-tree.h" 352)
+ nil )
+ (!pair "lang_identifier"
+ (!type already_seen 631)
+ (!srcfileloc "d/d-tree.h" 351)
+ nil )
+ (!pair "Type"
+ (!type already_seen 534)
+ (!srcfileloc "d/d-tree.h" 325)
+ nil )
+ (!pair "d_label_entry"
+ (!type already_seen 445)
+ (!srcfileloc "d/d-tree.h" 257)
+ nil )
+ (!pair "hash_map<Statement*,d_label_entry>"
+ (!type already_seen 444)
+ (!srcfileloc "d/d-tree.h" 257)
+ nil )
+ (!pair "Module"
+ (!type already_seen 442)
+ (!srcfileloc "d/d-tree.h" 244)
+ nil )
+ (!pair "FuncDeclaration"
+ (!type already_seen 440)
+ (!srcfileloc "d/d-tree.h" 243)
+ nil )
+ (!pair "AggregateDeclaration"
+ (!type already_seen 640)
+ (!srcfileloc "d/d-tree.h" 220)
+ nil )
+ (!pair "Declaration"
+ (!type already_seen 63)
+ (!srcfileloc "d/d-tree.h" 219)
+ nil )
+ (!pair "Statement"
+ (!type already_seen 447)
+ (!srcfileloc "d/d-tree.h" 145)
+ nil )
+ (!pair "d_label_use_entry"
+ (!type already_seen 449)
+ (!srcfileloc "d/d-tree.h" 142)
+ nil )
+ (!pair "binding_level"
+ (!type already_seen 434)
+ (!srcfileloc "d/d-tree.h" 127)
+ nil )
+ (!pair "Expressions"
+ (!type already_seen 2285)
+ (!srcfileloc "d/d-tree.h" 46)
+ nil )
+ (!pair "Array<Expression*>"
+ (!type already_seen 2285)
+ (!srcfileloc "d/d-tree.h" 46)
+ nil )
+ (!pair "list_hasher"
+ (!type already_seen 2262)
+ (!srcfileloc "cp/tree.cc" 2178)
+ nil )
+ (!pair "hash_table<list_hasher>"
+ (!type already_seen 2263)
+ (!srcfileloc "cp/tree.cc" 2178)
+ nil )
+ (!pair "cplus_array_hasher"
+ (!type already_seen 2258)
+ (!srcfileloc "cp/tree.cc" 1042)
+ nil )
+ (!pair "hash_table<cplus_array_hasher>"
+ (!type already_seen 2259)
+ (!srcfileloc "cp/tree.cc" 1042)
+ nil )
+ (!pair "deferred_access"
+ (!type already_seen 2252)
+ (!srcfileloc "cp/semantics.cc" 137)
+ nil )
+ (!pair "vec<deferred_access,va_gc>"
+ (!type already_seen 2253)
+ (!srcfileloc "cp/semantics.cc" 137)
+ nil )
+ (!pair "tinfo_s"
+ (!type already_seen 2249)
+ (!srcfileloc "cp/rtti.cc" 122)
+ nil )
+ (!pair "vec<tinfo_s,va_gc>"
+ (!type already_seen 2250)
+ (!srcfileloc "cp/rtti.cc" 122)
+ nil )
+ (!pair "hash_map<tree,tree_pair_p>"
+ (!type already_seen 2247)
+ (!srcfileloc "cp/pt.cc" 30455)
+ nil )
+ (!pair "pending_template"
+ (!type already_seen 2231)
+ (!srcfileloc "cp/pt.cc" 9559)
+ nil )
+ (!pair "tinst_level"
+ (!type already_seen 2006)
+ (!srcfileloc "cp/pt.cc" 9550)
+ nil )
+ (!pair "ctp_hasher"
+ (!type already_seen 2237)
+ (!srcfileloc "cp/pt.cc" 4524)
+ nil )
+ (!pair "hash_table<ctp_hasher>"
+ (!type already_seen 2238)
+ (!srcfileloc "cp/pt.cc" 4524)
+ nil )
+ (!pair "spec_hash_table"
+ (!type already_seen 2234)
+ (!srcfileloc "cp/pt.cc" 116)
+ nil )
+ (!pair "spec_hasher"
+ (!type already_seen 2233)
+ (!srcfileloc "cp/pt.cc" 116)
+ nil )
+ (!pair "hash_table<spec_hasher>"
+ (!type already_seen 2234)
+ (!srcfileloc "cp/pt.cc" 116)
+ nil )
+ (!pair "tree_fn_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/pt.cc" 52)
+ nil )
+ (!pair "iter_t"
+ (!type already_seen 2230)
+ (!srcfileloc "cp/parser.cc" 34637)
+ nil )
+ (!pair "class_to_loc_map_t::iterator"
+ (!type already_seen 2230)
+ (!srcfileloc "cp/parser.cc" 34637)
+ nil )
+ (!pair "cp_parser_expression_stack"
+ (!type array 2370 nil gc_unused "NUM_PREC_VALUES"
+ (!type already_seen 2225)
+ )
+ (!srcfileloc "cp/parser.cc" 2092)
+ nil )
+ (!pair "cp_parser_flags"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.cc" 2021)
+ nil )
+ (!pair "cp_parser"
+ (!type already_seen 2051)
+ (!srcfileloc "cp/parser.cc" 695)
+ nil )
+ (!pair "vec<macro_export,va_gc>"
+ (!type already_seen 2211)
+ (!srcfileloc "cp/module.cc" 16935)
+ nil )
+ (!pair "macro_export"
+ (!type already_seen 2209)
+ (!srcfileloc "cp/module.cc" 16774)
+ nil )
+ (!pair "note_defs_table_t"
+ (!type already_seen 2205)
+ (!srcfileloc "cp/module.cc" 4599)
+ nil )
+ (!pair "note_def_cache_hasher"
+ (!type already_seen 2204)
+ (!srcfileloc "cp/module.cc" 4599)
+ nil )
+ (!pair "hash_table<note_def_cache_hasher>"
+ (!type already_seen 2205)
+ (!srcfileloc "cp/module.cc" 4599)
+ nil )
+ (!pair "entity_map_t"
+ (!type already_seen 2199)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "0"
+ (!type already_seen 2200)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "int_hash<unsigned"
+ (!type already_seen 2202)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "simple_hashmap_traits<int_hash<unsigned"
+ (!type already_seen 2201)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "hash_map<unsigned,unsigned,simple_hashmap_traits<int_hash<unsigned,0>,unsigned>>"
+ (!type already_seen 2199)
+ (!srcfileloc "cp/module.cc" 3941)
+ nil )
+ (!pair "module_state_hash"
+ (!type already_seen 2194)
+ (!srcfileloc "cp/module.cc" 3935)
+ nil )
+ (!pair "hash_table<module_state_hash>"
+ (!type already_seen 2197)
+ (!srcfileloc "cp/module.cc" 3935)
+ nil )
+ (!pair "vec<module_state*,va_gc>"
+ (!type already_seen 2195)
+ (!srcfileloc "cp/module.cc" 3932)
+ nil )
+ (!pair "vec<cpp_hashnode*>"
+ (!type already_seen 2192)
+ (!srcfileloc "cp/module.cc" 3734)
+ nil )
+ (!pair "module_state"
+ (!type already_seen 2009)
+ (!srcfileloc "cp/module.cc" 3513)
+ nil )
+ (!pair "slurping"
+ (!type already_seen 2012)
+ (!srcfileloc "cp/module.cc" 3396)
+ nil )
+ (!pair "bytes_in"
+ (!type already_seen 2018)
+ (!srcfileloc "cp/module.cc" 3385)
+ nil )
+ (!pair "elf_in"
+ (!type already_seen 2017)
+ (!srcfileloc "cp/module.cc" 3374)
+ nil )
+ (!pair "vl_embed"
+ (!type already_seen 2015)
+ (!srcfileloc "cp/module.cc" 3371)
+ nil )
+ (!pair "vec<unsigned,va_heap,vl_embed>"
+ (!type already_seen 2014)
+ (!srcfileloc "cp/module.cc" 3371)
+ nil )
+ (!pair "loc_range_t"
+ (!type already_seen 2020)
+ (!srcfileloc "cp/module.cc" 3157)
+ nil )
+ (!pair "std::pair<location_t,location_t>"
+ (!type already_seen 2020)
+ (!srcfileloc "cp/module.cc" 3157)
+ nil )
+ (!pair "range_t"
+ (!type already_seen 2019)
+ (!srcfileloc "cp/module.cc" 3154)
+ nil )
+ (!pair "std::pair<unsigned,unsigned>"
+ (!type already_seen 2019)
+ (!srcfileloc "cp/module.cc" 3154)
+ nil )
+ (!pair "duplicate_hash_map"
+ (!type already_seen 2180)
+ (!srcfileloc "cp/module.cc" 2838)
+ nil )
+ (!pair "tree_node"
+ (!type already_seen 2183)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "nodel_ptr_hash<tree_node"
+ (!type already_seen 2182)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "simple_hashmap_traits<nodel_ptr_hash<tree_node"
+ (!type already_seen 2181)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "hash_map<tree,uintptr_t,simple_hashmap_traits<nodel_ptr_hash<tree_node>,uintptr_t>>"
+ (!type already_seen 2180)
+ (!srcfileloc "cp/module.cc" 2837)
+ nil )
+ (!pair "keyed_map_t"
+ (!type already_seen 2177)
+ (!srcfileloc "cp/module.cc" 2708)
+ nil )
+ (!pair "auto_vec<tree"
+ (!type already_seen 2178)
+ (!srcfileloc "cp/module.cc" 2708)
+ nil )
+ (!pair "hash_map<tree,auto_vec<tree>>"
+ (!type already_seen 2177)
+ (!srcfileloc "cp/module.cc" 2708)
+ nil )
+ (!pair "pending_map_t"
+ (!type already_seen 2175)
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ (!pair "auto_vec<unsigned"
+ (!type already_seen 2176)
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ (!pair "pending_key"
+ (!type already_seen 2174)
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ (!pair "hash_map<pending_key,auto_vec<unsigned>>"
+ (!type already_seen 2175)
+ (!srcfileloc "cp/module.cc" 2695)
+ nil )
+ (!pair "ptr_int_hash_map"
+ (!type already_seen 2163)
+ (!srcfileloc "cp/module.cc" 336)
+ nil )
+ (!pair "signed"
+ (!type already_seen 2164)
+ (!srcfileloc "cp/module.cc" 336)
+ nil )
+ (!pair "hash_map<void*,signed,ptr_int_traits>"
+ (!type already_seen 2163)
+ (!srcfileloc "cp/module.cc" 336)
+ nil )
+ (!pair "ptr_int_traits"
+ (!type already_seen 2161)
+ (!srcfileloc "cp/module.cc" 335)
+ nil )
+ (!pair "nodel_ptr_hash<void"
+ (!type already_seen 2162)
+ (!srcfileloc "cp/module.cc" 335)
+ nil )
+ (!pair "simple_hashmap_traits<nodel_ptr_hash<void>,int>"
+ (!type already_seen 2161)
+ (!srcfileloc "cp/module.cc" 335)
+ nil )
+ (!pair "verstr_t"
+ (!type array 2371 nil gc_unused "32"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "cp/module.cc" 303)
+ nil )
+ (!pair "substitution_identifier_index_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/mangle.cc" 151)
+ nil )
+ (!pair "globals"
+ (!type already_seen 2157)
+ (!srcfileloc "cp/mangle.cc" 126)
+ nil )
+ (!pair "subsumption_hasher"
+ (!type already_seen 2154)
+ (!srcfileloc "cp/logic.cc" 751)
+ nil )
+ (!pair "hash_table<subsumption_hasher>"
+ (!type already_seen 2155)
+ (!srcfileloc "cp/logic.cc" 751)
+ nil )
+ (!pair "conv_type_hasher"
+ (!type already_seen 2148)
+ (!srcfileloc "cp/lex.cc" 821)
+ nil )
+ (!pair "hash_table<conv_type_hasher>"
+ (!type already_seen 2149)
+ (!srcfileloc "cp/lex.cc" 821)
+ nil )
+ (!pair "vec<lambda_discriminator,va_gc>"
+ (!type already_seen 2144)
+ (!srcfileloc "cp/lambda.cc" 1469)
+ nil )
+ (!pair "lambda_discriminator"
+ (!type already_seen 2143)
+ (!srcfileloc "cp/lambda.cc" 1467)
+ nil )
+ (!pair "lambda_sig_count"
+ (!type already_seen 2140)
+ (!srcfileloc "cp/lambda.cc" 1464)
+ nil )
+ (!pair "vec<lambda_sig_count,va_gc>"
+ (!type already_seen 2141)
+ (!srcfileloc "cp/lambda.cc" 1464)
+ nil )
+ (!pair "pending_noexcept"
+ (!type already_seen 2136)
+ (!srcfileloc "cp/except.cc" 1102)
+ nil )
+ (!pair "vec<pending_noexcept,va_gc>"
+ (!type already_seen 2137)
+ (!srcfileloc "cp/except.cc" 1102)
+ nil )
+ (!pair "priority_map_t"
+ (!type already_seen 2134)
+ (!srcfileloc "cp/decl2.cc" 166)
+ nil )
+ (!pair "priority_map_traits"
+ (!type already_seen 2133)
+ (!srcfileloc "cp/decl2.cc" 166)
+ nil )
+ (!pair "hash_map<unsigned,tree,priority_map_traits>"
+ (!type already_seen 2134)
+ (!srcfileloc "cp/decl2.cc" 166)
+ nil )
+ (!pair "mangled_decl_hash"
+ (!type already_seen 2130)
+ (!srcfileloc "cp/decl2.cc" 125)
+ nil )
+ (!pair "hash_table<mangled_decl_hash>"
+ (!type already_seen 2131)
+ (!srcfileloc "cp/decl2.cc" 125)
+ nil )
+ (!pair "typename_hasher"
+ (!type already_seen 2125)
+ (!srcfileloc "cp/decl.cc" 4178)
+ nil )
+ (!pair "hash_table<typename_hasher>"
+ (!type already_seen 2126)
+ (!srcfileloc "cp/decl.cc" 4178)
+ nil )
+ (!pair "incomplete_var"
+ (!type already_seen 2120)
+ (!srcfileloc "cp/decl.cc" 255)
+ nil )
+ (!pair "vec<incomplete_var,va_gc>"
+ (!type already_seen 2121)
+ (!srcfileloc "cp/decl.cc" 255)
+ nil )
+ (!pair "named_label_use_entry"
+ (!type already_seen 1996)
+ (!srcfileloc "cp/decl.cc" 215)
+ nil )
+ (!pair "named_label_entry"
+ (!type already_seen 1993)
+ (!srcfileloc "cp/decl.cc" 198)
+ nil )
+ (!pair "source_location_table_entry_hash"
+ (!type already_seen 2117)
+ (!srcfileloc "cp/cp-gimplify.cc" 3463)
+ nil )
+ (!pair "hash_table<source_location_table_entry_hash>"
+ (!type already_seen 2118)
+ (!srcfileloc "cp/cp-gimplify.cc" 3463)
+ nil )
+ (!pair "coroutine_info_hasher"
+ (!type already_seen 2097)
+ (!srcfileloc "cp/coroutines.cc" 114)
+ nil )
+ (!pair "hash_table<coroutine_info_hasher>"
+ (!type already_seen 2098)
+ (!srcfileloc "cp/coroutines.cc" 114)
+ nil )
+ (!pair "sat_hasher"
+ (!type already_seen 2092)
+ (!srcfileloc "cp/constraint.cc" 2614)
+ nil )
+ (!pair "hash_table<sat_hasher>"
+ (!type already_seen 2093)
+ (!srcfileloc "cp/constraint.cc" 2614)
+ nil )
+ (!pair "atom_hasher"
+ (!type already_seen 2026)
+ (!srcfileloc "cp/constraint.cc" 814)
+ nil )
+ (!pair "hash_table<atom_hasher>"
+ (!type already_seen 2089)
+ (!srcfileloc "cp/constraint.cc" 814)
+ nil )
+ (!pair "norm_hasher"
+ (!type already_seen 2086)
+ (!srcfileloc "cp/constraint.cc" 735)
+ nil )
+ (!pair "hash_table<norm_hasher>"
+ (!type already_seen 2087)
+ (!srcfileloc "cp/constraint.cc" 735)
+ nil )
+ (!pair "constexpr_call_hasher"
+ (!type already_seen 2074)
+ (!srcfileloc "cp/constexpr.cc" 1317)
+ nil )
+ (!pair "hash_table<constexpr_call_hasher>"
+ (!type already_seen 2077)
+ (!srcfileloc "cp/constexpr.cc" 1317)
+ nil )
+ (!pair "constexpr_fundef"
+ (!type already_seen 2027)
+ (!srcfileloc "cp/constexpr.cc" 1112)
+ nil )
+ (!pair "constexpr_fundef_hasher"
+ (!type already_seen 2070)
+ (!srcfileloc "cp/constexpr.cc" 189)
+ nil )
+ (!pair "hash_table<constexpr_fundef_hasher>"
+ (!type already_seen 2071)
+ (!srcfileloc "cp/constexpr.cc" 189)
+ nil )
+ (!pair "hash_map<tree,int>"
+ (!type already_seen 2067)
+ (!srcfileloc "cp/class.cc" 3407)
+ nil )
+ (!pair "subobject_offset_fn"
+ (!type already_seen 2)
+ (!srcfileloc "cp/class.cc" 104)
+ nil )
+ (!pair "class_stack_node_t"
+ (!type already_seen 2062)
+ (!srcfileloc "cp/class.cc" 70)
+ nil )
+ (!pair "cp_oacc_routine_data"
+ (!type already_seen 2047)
+ (!srcfileloc "cp/parser.h" 396)
+ nil )
+ (!pair "cp_omp_declare_simd_data"
+ (!type already_seen 2045)
+ (!srcfileloc "cp/parser.h" 392)
+ nil )
+ (!pair "cp_unparsed_functions_entry"
+ (!type already_seen 2042)
+ (!srcfileloc "cp/parser.h" 380)
+ nil )
+ (!pair "vec<cp_unparsed_functions_entry,va_gc>"
+ (!type already_seen 2049)
+ (!srcfileloc "cp/parser.h" 380)
+ nil )
+ (!pair "cp_parser_context"
+ (!type already_seen 2043)
+ (!srcfileloc "cp/parser.h" 270)
+ nil )
+ (!pair "cp_lexer"
+ (!type already_seen 2036)
+ (!srcfileloc "cp/parser.h" 236)
+ nil )
+ (!pair "cp_default_arg_entry"
+ (!type already_seen 2039)
+ (!srcfileloc "cp/parser.h" 169)
+ nil )
+ (!pair "vec<cp_default_arg_entry,va_gc>"
+ (!type already_seen 2040)
+ (!srcfileloc "cp/parser.h" 169)
+ nil )
+ (!pair "cp_token_cache_ptr"
+ (!type already_seen 71)
+ (!srcfileloc "cp/parser.h" 141)
+ nil )
+ (!pair "cp_token_cache"
+ (!type already_seen 72)
+ (!srcfileloc "cp/parser.h" 141)
+ nil )
+ (!pair "vec<cp_token_position>"
+ (!type already_seen 2035)
+ (!srcfileloc "cp/parser.h" 101)
+ nil )
+ (!pair "cp_token"
+ (!type already_seen 74)
+ (!srcfileloc "cp/parser.h" 87)
+ nil )
+ (!pair "vec<cp_token,va_gc>"
+ (!type already_seen 2033)
+ (!srcfileloc "cp/parser.h" 87)
+ nil )
+ (!pair "cp_token_position"
+ (!type already_seen 73)
+ (!srcfileloc "cp/parser.h" 77)
+ nil )
+ (!pair "cp_parameter_declarator"
+ (!type already_seen 2005)
+ (!srcfileloc "cp/cp-tree.h" 6328)
+ nil )
+ (!pair "cp_declarator"
+ (!type already_seen 2004)
+ (!srcfileloc "cp/cp-tree.h" 6326)
+ nil )
+ (!pair "cp_virt_specifiers"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6192)
+ nil )
+ (!pair "cp_cv_quals"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 6176)
+ nil )
+ (!pair "ovl_op_info_t"
+ (!type already_seen 2002)
+ (!srcfileloc "cp/cp-tree.h" 6149)
+ nil )
+ (!pair "base_access"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 5613)
+ nil )
+ (!pair "tsubst_flags_t"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 5599)
+ nil )
+ (!pair "cp_lvalue_kind"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-tree.h" 5473)
+ nil )
+ (!pair "named_decl_hash"
+ (!type already_seen 94)
+ (!srcfileloc "cp/cp-tree.h" 2985)
+ nil )
+ (!pair "hash_table<named_decl_hash>"
+ (!type already_seen 93)
+ (!srcfileloc "cp/cp-tree.h" 2985)
+ nil )
+ (!pair "vec<tree_pair_s,va_gc>"
+ (!type already_seen 537)
+ (!srcfileloc "cp/cp-tree.h" 2407)
+ nil )
+ (!pair "tree_pair_p"
+ (!type already_seen 539)
+ (!srcfileloc "cp/cp-tree.h" 2320)
+ nil )
+ (!pair "tree_pair_s"
+ (!type already_seen 538)
+ (!srcfileloc "cp/cp-tree.h" 2320)
+ nil )
+ (!pair "named_label_hash"
+ (!type already_seen 456)
+ (!srcfileloc "cp/cp-tree.h" 2116)
+ nil )
+ (!pair "hash_table<named_label_hash>"
+ (!type already_seen 455)
+ (!srcfileloc "cp/cp-tree.h" 2116)
+ nil )
+ (!pair "cp_omp_begin_assumes_data"
+ (!type already_seen 1978)
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ nil )
+ (!pair "vec<cp_omp_begin_assumes_data,va_gc>"
+ (!type already_seen 1983)
+ (!srcfileloc "cp/cp-tree.h" 1882)
+ nil )
+ (!pair "cp_omp_declare_target_attr"
+ (!type already_seen 1977)
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ nil )
+ (!pair "vec<cp_omp_declare_target_attr,va_gc>"
+ (!type already_seen 1981)
+ (!srcfileloc "cp/cp-tree.h" 1881)
+ nil )
+ (!pair "cxx_saved_binding"
+ (!type already_seen 1970)
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ nil )
+ (!pair "vec<cxx_saved_binding,va_gc>"
+ (!type already_seen 1979)
+ (!srcfileloc "cp/cp-tree.h" 1837)
+ nil )
+ (!pair "deferred_access_check"
+ (!type already_seen 81)
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ nil )
+ (!pair "vec<deferred_access_check,va_gc>"
+ (!type already_seen 79)
+ (!srcfileloc "cp/cp-tree.h" 1564)
+ nil )
+ (!pair "ptrmem_cst_t"
+ (!type already_seen 666)
+ (!srcfileloc "cp/cp-tree.h" 711)
+ nil )
+ (!pair "cp_class_binding"
+ (!type already_seen 89)
+ (!srcfileloc "cp/name-lookup.h" 258)
+ nil )
+ (!pair "vec<cp_class_binding,va_gc>"
+ (!type already_seen 88)
+ (!srcfileloc "cp/name-lookup.h" 258)
+ nil )
+ (!pair "binding_cluster"
+ (!type already_seen 670)
+ (!srcfileloc "cp/name-lookup.h" 148)
+ nil )
+ (!pair "binding_index"
+ (!type already_seen 672)
+ (!srcfileloc "cp/name-lookup.h" 129)
+ nil )
+ (!pair "binding_slot"
+ (!type already_seen 674)
+ (!srcfileloc "cp/name-lookup.h" 87)
+ nil )
+ (!pair "cp_binding_level"
+ (!type already_seen 84)
+ (!srcfileloc "cp/name-lookup.h" 54)
+ nil )
+ (!pair "cxx_binding"
+ (!type already_seen 91)
+ (!srcfileloc "cp/name-lookup.h" 48)
+ nil )
+ (!pair "c_omp_declare_target_attr"
+ (!type already_seen 1967)
+ (!srcfileloc "c/c-lang.h" 69)
+ nil )
+ (!pair "vec<c_omp_declare_target_attr,va_gc>"
+ (!type already_seen 1968)
+ (!srcfileloc "c/c-lang.h" 69)
+ nil )
+ (!pair "matching_braces"
+ (!type already_seen 1961)
+ (!srcfileloc "c/c-parser.cc" 1166)
+ nil )
+ (!pair "matching_brace_traits"
+ (!type already_seen 1960)
+ (!srcfileloc "c/c-parser.cc" 1166)
+ nil )
+ (!pair "token_pair<matching_brace_traits>"
+ (!type already_seen 1961)
+ (!srcfileloc "c/c-parser.cc" 1166)
+ nil )
+ (!pair "matching_parens"
+ (!type already_seen 1959)
+ (!srcfileloc "c/c-parser.cc" 1148)
+ nil )
+ (!pair "matching_paren_traits"
+ (!type already_seen 1958)
+ (!srcfileloc "c/c-parser.cc" 1148)
+ nil )
+ (!pair "token_pair<matching_paren_traits>"
+ (!type already_seen 1959)
+ (!srcfileloc "c/c-parser.cc" 1148)
+ nil )
+ (!pair "c_parser"
+ (!type already_seen 1952)
+ (!srcfileloc "c/c-parser.cc" 282)
+ nil )
+ (!pair "vec<c_token,va_gc>"
+ (!type already_seen 1956)
+ (!srcfileloc "c/c-parser.cc" 198)
+ nil )
+ (!pair "c_token"
+ (!type already_seen 1950)
+ (!srcfileloc "c/c-parser.cc" 190)
+ nil )
+ (!pair "gcc_options"
+ (!type already_seen 841)
+ (!srcfileloc "c-family/c-pragma.cc" 1236)
+ nil )
+ (!pair "pending_redefinition"
+ (!type already_seen 1931)
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ nil )
+ (!pair "vec<pending_redefinition,va_gc>"
+ (!type already_seen 1932)
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ nil )
+ (!pair "pending_weak"
+ (!type already_seen 1928)
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ nil )
+ (!pair "vec<pending_weak,va_gc>"
+ (!type already_seen 1929)
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ nil )
+ (!pair "pragma_handler_2arg"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-pragma.h" 217)
+ nil )
+ (!pair "pragma_handler_1arg"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-pragma.h" 214)
+ nil )
+ (!pair "omp_clause_mask"
+ (!type already_seen 1920)
+ (!srcfileloc "c-family/c-common.h" 1248)
+ nil )
+ (!pair "wide_int_bitmask"
+ (!type already_seen 1920)
+ (!srcfileloc "c-family/c-common.h" 1248)
+ nil )
+ (!pair "bc_state_t"
+ (!type already_seen 1919)
+ (!srcfileloc "c-family/c-common.h" 1203)
+ nil )
+ (!pair "stmt_tree"
+ (!type already_seen 453)
+ (!srcfileloc "c-family/c-common.h" 586)
+ nil )
+ (!pair "added_includes_t"
+ (!type already_seen 1913)
+ (!srcfileloc "c-family/c-common.cc" 9263)
+ nil )
+ (!pair "hash_map<char*,per_file_includes_t*>"
+ (!type already_seen 1913)
+ (!srcfileloc "c-family/c-common.cc" 9263)
+ nil )
+ (!pair "per_file_includes_t"
+ (!type already_seen 1911)
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ nil )
+ (!pair "nofree_string_hash"
+ (!type already_seen 1912)
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ nil )
+ (!pair "hash_set<char*,false,nofree_string_hash>"
+ (!type already_seen 1911)
+ (!srcfileloc "c-family/c-common.cc" 9258)
+ nil )
+ (!pair "vec<tree_gc_vec,va_gc>"
+ (!type already_seen 1909)
+ (!srcfileloc "c-family/c-common.cc" 8285)
+ nil )
+ (!pair "tree_gc_vec"
+ (!type already_seen 85)
+ (!srcfileloc "c-family/c-common.cc" 8284)
+ nil )
+ (!pair "vec<const_char_p,va_gc>"
+ (!type already_seen 1907)
+ (!srcfileloc "c-family/c-common.cc" 5869)
+ nil )
+ (!pair "const_char_p"
+ (!type already_seen 9)
+ (!srcfileloc "c-family/c-common.cc" 5868)
+ nil )
+ (!pair "c_binding_ptr"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 585)
+ nil )
+ (!pair "vec<c_goto_bindings_p,va_gc>"
+ (!type already_seen 653)
+ (!srcfileloc "c/c-decl.cc" 393)
+ nil )
+ (!pair "c_goto_bindings_p"
+ (!type already_seen 654)
+ (!srcfileloc "c/c-decl.cc" 374)
+ nil )
+ (!pair "c_expr_t"
+ (!type already_seen 1890)
+ (!srcfileloc "c/c-tree.h" 204)
+ nil )
+ (!pair "builtin_type"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 6152)
+ nil )
+ (!pair "pad_type_hasher"
+ (!type already_seen 1885)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 331)
+ nil )
+ (!pair "hash_table<pad_type_hasher>"
+ (!type already_seen 1886)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 331)
+ nil )
+ (!pair "packable_type_hasher"
+ (!type already_seen 1881)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 308)
+ nil )
+ (!pair "hash_table<packable_type_hasher>"
+ (!type already_seen 1882)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 308)
+ nil )
+ (!pair "atomic_acces_t"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 4177)
+ nil )
+ (!pair "vec<loop_info,va_gc>"
+ (!type already_seen 1875)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 214)
+ nil )
+ (!pair "loop_info"
+ (!type already_seen 1874)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 211)
+ nil )
+ (!pair "vec<range_check_info,va_gc>"
+ (!type already_seen 1871)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 207)
+ nil )
+ (!pair "range_check_info"
+ (!type already_seen 1870)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 195)
+ nil )
+ (!pair "vec<parm_attr,va_gc>"
+ (!type already_seen 464)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 117)
+ nil )
+ (!pair "parm_attr"
+ (!type already_seen 465)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 113)
+ nil )
+ (!pair "vinfo_t"
+ (!type already_seen 1864)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 7923)
+ nil )
+ (!pair "intrin_binding_t"
+ (!type already_seen 1862)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 266)
+ nil )
+ (!pair "dummy_type_hasher"
+ (!type already_seen 1859)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 198)
+ nil )
+ (!pair "hash_table<dummy_type_hasher>"
+ (!type already_seen 1860)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 198)
+ nil )
+ (!pair "va_gc_atomic"
+ (!type already_seen 1856)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ nil )
+ (!pair "Entity_Id"
+ (!type already_seen 1857)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ nil )
+ (!pair "vec<Entity_Id,va_gc_atomic>"
+ (!type already_seen 1854)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 163)
+ nil )
+ (!pair "value_annotation_hasher"
+ (!type already_seen 1851)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 157)
+ nil )
+ (!pair "hash_table<value_annotation_hasher>"
+ (!type already_seen 1852)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 157)
+ nil )
+ (!pair "variant_desc"
+ (!type already_seen 1850)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 132)
+ nil )
+ (!pair "subst_pair"
+ (!type already_seen 1849)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 114)
+ nil )
+ (!pair "rewrite_fn"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/gigi.h" 965)
+ nil )
+ (!pair "builtin_arg"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm-builtins.cc" 2857)
+ nil )
+ (!pair "arm_builtin_cde_datum"
+ (!type already_seen 1838)
+ (!srcfileloc "config/arm/arm-builtins.cc" 1003)
+ nil )
+ (!pair "arm_builtin_datum"
+ (!type already_seen 1837)
+ (!srcfileloc "config/arm/arm-builtins.cc" 909)
+ nil )
+ (!pair "polymorphic_call_target_hash_type"
+ (!type already_seen 1828)
+ (!srcfileloc "ipa-devirt.cc" 2747)
+ nil )
+ (!pair "polymorphic_call_target_hasher"
+ (!type already_seen 1827)
+ (!srcfileloc "ipa-devirt.cc" 2746)
+ nil )
+ (!pair "hash_table<polymorphic_call_target_hasher>"
+ (!type already_seen 1828)
+ (!srcfileloc "ipa-devirt.cc" 2746)
+ nil )
+ (!pair "vec<odr_type,va_gc>"
+ (!type already_seen 1822)
+ (!srcfileloc "ipa-devirt.cc" 512)
+ nil )
+ (!pair "odr_hash_type"
+ (!type already_seen 1821)
+ (!srcfileloc "ipa-devirt.cc" 505)
+ nil )
+ (!pair "odr_name_hasher"
+ (!type already_seen 1820)
+ (!srcfileloc "ipa-devirt.cc" 505)
+ nil )
+ (!pair "hash_table<odr_name_hasher>"
+ (!type already_seen 1821)
+ (!srcfileloc "ipa-devirt.cc" 505)
+ nil )
+ (!pair "vec<odr_type>"
+ (!type already_seen 1211)
+ (!srcfileloc "ipa-devirt.cc" 206)
+ nil )
+ (!pair "tree_type_map_cache_hasher"
+ (!type already_seen 1805)
+ (!srcfileloc "ubsan.cc" 82)
+ nil )
+ (!pair "hash_table<tree_type_map_cache_hasher>"
+ (!type already_seen 1806)
+ (!srcfileloc "ubsan.cc" 82)
+ nil )
+ (!pair "vtbl_map_iterator_type"
+ (!type already_seen 1794)
+ (!srcfileloc "vtable-verify.cc" 299)
+ nil )
+ (!pair "vtbl_map_table_type::iterator"
+ (!type already_seen 1794)
+ (!srcfileloc "vtable-verify.cc" 299)
+ nil )
+ (!pair "vtbl_map_table_type"
+ (!type already_seen 1793)
+ (!srcfileloc "vtable-verify.cc" 298)
+ nil )
+ (!pair "vtbl_map_hasher"
+ (!type already_seen 1792)
+ (!srcfileloc "vtable-verify.cc" 298)
+ nil )
+ (!pair "hash_table<vtbl_map_hasher>"
+ (!type already_seen 1793)
+ (!srcfileloc "vtable-verify.cc" 298)
+ nil )
+ (!pair "fast_function_summary<ipa_fn_summary*,va_gc>"
+ (!type already_seen 1784)
+ (!srcfileloc "ipa-fnsummary.h" 250)
+ nil )
+ (!pair "ipa_fn_summary_t"
+ (!type already_seen 1783)
+ (!srcfileloc "ipa-fnsummary.h" 248)
+ nil )
+ (!pair "vec<int,va_heap,vl_ptr>"
+ (!type already_seen 1200)
+ (!srcfileloc "ipa-fnsummary.h" 200)
+ nil )
+ (!pair "ipa_freqcounting_predicate"
+ (!type already_seen 1197)
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ nil )
+ (!pair "vec<ipa_freqcounting_predicate,va_gc>"
+ (!type already_seen 1196)
+ (!srcfileloc "ipa-fnsummary.h" 196)
+ nil )
+ (!pair "vec<size_time_entry,va_heap,vl_ptr>"
+ (!type already_seen 1194)
+ (!srcfileloc "ipa-fnsummary.h" 193)
+ nil )
+ (!pair "size_time_entry"
+ (!type already_seen 1193)
+ (!srcfileloc "ipa-fnsummary.h" 189)
+ nil )
+ (!pair "auto_vec<size_time_entry>"
+ (!type already_seen 1192)
+ (!srcfileloc "ipa-fnsummary.h" 189)
+ nil )
+ (!pair "ipa_fn_summary"
+ (!type already_seen 1183)
+ (!srcfileloc "ipa-fnsummary.h" 126)
+ nil )
+ (!pair "ipa_predicate"
+ (!type already_seen 1199)
+ (!srcfileloc "ipa-fnsummary.h" 117)
+ nil )
+ (!pair "ipa_hints"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-fnsummary.h" 58)
+ nil )
+ (!pair "clause_t"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-predicate.h" 113)
+ nil )
+ (!pair "conditions"
+ (!type already_seen 1185)
+ (!srcfileloc "ipa-predicate.h" 94)
+ nil )
+ (!pair "condition"
+ (!type already_seen 1187)
+ (!srcfileloc "ipa-predicate.h" 94)
+ nil )
+ (!pair "vec<condition,va_gc>"
+ (!type already_seen 1186)
+ (!srcfileloc "ipa-predicate.h" 94)
+ nil )
+ (!pair "expr_eval_ops"
+ (!type already_seen 1188)
+ (!srcfileloc "ipa-predicate.h" 46)
+ nil )
+ (!pair "expr_eval_op"
+ (!type already_seen 1190)
+ (!srcfileloc "ipa-predicate.h" 46)
+ nil )
+ (!pair "vec<expr_eval_op,va_gc>"
+ (!type already_seen 1189)
+ (!srcfileloc "ipa-predicate.h" 46)
+ nil )
+ (!pair "lto_file_decl_data_ptr"
+ (!type already_seen 322)
+ (!srcfileloc "lto-streamer.h" 609)
+ nil )
+ (!pair "lto_section"
+ (!type already_seen 341)
+ (!srcfileloc "lto-streamer.h" 602)
+ nil )
+ (!pair "ld_plugin_symbol_resolution"
+ (!type already_seen 340)
+ (!srcfileloc "lto-streamer.h" 596)
+ nil )
+ (!pair "hash_map<tree,ld_plugin_symbol_resolution>"
+ (!type already_seen 339)
+ (!srcfileloc "lto-streamer.h" 596)
+ nil )
+ (!pair "gcov_summary"
+ (!type already_seen 337)
+ (!srcfileloc "lto-streamer.h" 593)
+ nil )
+ (!pair "res_pair"
+ (!type already_seen 336)
+ (!srcfileloc "lto-streamer.h" 590)
+ nil )
+ (!pair "vec<res_pair>"
+ (!type already_seen 335)
+ (!srcfileloc "lto-streamer.h" 590)
+ nil )
+ (!pair "decl_state_hasher"
+ (!type already_seen 332)
+ (!srcfileloc "lto-streamer.h" 569)
+ nil )
+ (!pair "hash_table<decl_state_hasher>"
+ (!type already_seen 331)
+ (!srcfileloc "lto-streamer.h" 569)
+ nil )
+ (!pair "lto_out_decl_state_ptr"
+ (!type already_seen 1699)
+ (!srcfileloc "lto-streamer.h" 540)
+ nil )
+ (!pair "lto_in_decl_state_ptr"
+ (!type already_seen 325)
+ (!srcfileloc "lto-streamer.h" 504)
+ nil )
+ (!pair "lto_symtab_encoder_t"
+ (!type already_seen 328)
+ (!srcfileloc "lto-streamer.h" 470)
+ nil )
+ (!pair "lto_free_section_data_f"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 263)
+ nil )
+ (!pair "lto_get_section_data_f"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 254)
+ nil )
+ (!pair "ld_plugin_symbol_resolution_t"
+ (!type already_seen 2)
+ (!srcfileloc "lto-streamer.h" 242)
+ nil )
+ (!pair "lto_decl_flags_t"
+ (!type already_seen 8)
+ (!srcfileloc "lto-streamer.h" 127)
+ nil )
+ (!pair "cgraph_node_queue"
+ (!type already_seen 1760)
+ (!srcfileloc "trans-mem.cc" 4196)
+ nil )
+ (!pair "vec<cgraph_node*>"
+ (!type already_seen 1760)
+ (!srcfileloc "trans-mem.cc" 4196)
+ nil )
+ (!pair "tm_wrapper_hasher"
+ (!type already_seen 1740)
+ (!srcfileloc "trans-mem.cc" 468)
+ nil )
+ (!pair "hash_table<tm_wrapper_hasher>"
+ (!type already_seen 1741)
+ (!srcfileloc "trans-mem.cc" 468)
+ nil )
+ (!pair "function_summary<ipcp_transformation*>"
+ (!type already_seen 1738)
+ (!srcfileloc "ipa-prop.h" 1074)
+ nil )
+ (!pair "ipa_edge_args_sum_t"
+ (!type already_seen 1735)
+ (!srcfileloc "ipa-prop.h" 1042)
+ nil )
+ (!pair "ipa_node_params_t"
+ (!type already_seen 1733)
+ (!srcfileloc "ipa-prop.h" 1020)
+ nil )
+ (!pair "vec<ipa_polymorphic_call_context,va_gc>"
+ (!type already_seen 1168)
+ (!srcfileloc "ipa-prop.h" 971)
+ nil )
+ (!pair "ipa_jump_func"
+ (!type already_seen 1134)
+ (!srcfileloc "ipa-prop.h" 970)
+ nil )
+ (!pair "vec<ipa_jump_func,va_gc>"
+ (!type already_seen 1166)
+ (!srcfileloc "ipa-prop.h" 970)
+ nil )
+ (!pair "ipa_edge_args"
+ (!type already_seen 1164)
+ (!srcfileloc "ipa-prop.h" 954)
+ nil )
+ (!pair "ipcp_transformation"
+ (!type already_seen 1721)
+ (!srcfileloc "ipa-prop.h" 923)
+ nil )
+ (!pair "ipa_vr"
+ (!type already_seen 1729)
+ (!srcfileloc "ipa-prop.h" 920)
+ nil )
+ (!pair "vec<ipa_vr,va_gc>"
+ (!type already_seen 1728)
+ (!srcfileloc "ipa-prop.h" 920)
+ nil )
+ (!pair "vec<ipa_bits*,va_gc>"
+ (!type already_seen 1726)
+ (!srcfileloc "ipa-prop.h" 918)
+ nil )
+ (!pair "ipa_argagg_value"
+ (!type already_seen 1206)
+ (!srcfileloc "ipa-prop.h" 916)
+ nil )
+ (!pair "vec<ipa_argagg_value,va_gc>"
+ (!type already_seen 1724)
+ (!srcfileloc "ipa-prop.h" 916)
+ nil )
+ (!pair "vec<ipa_polymorphic_call_context>"
+ (!type already_seen 1160)
+ (!srcfileloc "ipa-prop.h" 636)
+ nil )
+ (!pair "vec<tree>"
+ (!type already_seen 1159)
+ (!srcfileloc "ipa-prop.h" 633)
+ nil )
+ (!pair "ipa_param_descriptor"
+ (!type already_seen 1156)
+ (!srcfileloc "ipa-prop.h" 624)
+ nil )
+ (!pair "vec<ipa_param_descriptor,va_gc>"
+ (!type already_seen 1155)
+ (!srcfileloc "ipa-prop.h" 624)
+ nil )
+ (!pair "ipa_node_params"
+ (!type already_seen 1153)
+ (!srcfileloc "ipa-prop.h" 617)
+ nil )
+ (!pair "ipa_agg_jf_item"
+ (!type already_seen 1138)
+ (!srcfileloc "ipa-prop.h" 190)
+ nil )
+ (!pair "vec<ipa_agg_jf_item,va_gc>"
+ (!type already_seen 1137)
+ (!srcfileloc "ipa-prop.h" 190)
+ nil )
+ (!pair "frange_storage_slot"
+ (!type already_seen 558)
+ (!srcfileloc "value-range-storage.h" 113)
+ nil )
+ (!pair "MAX_INTS"
+ (!type already_seen 556)
+ (!srcfileloc "value-range-storage.h" 100)
+ nil )
+ (!pair "trailing_wide_ints<MAX_INTS>"
+ (!type already_seen 555)
+ (!srcfileloc "value-range-storage.h" 100)
+ nil )
+ (!pair "irange_storage_slot"
+ (!type already_seen 554)
+ (!srcfileloc "value-range-storage.h" 96)
+ nil )
+ (!pair "DISABLE_COPY_AND_ASSIGN"
+ (!type already_seen 1718)
+ (!srcfileloc "value-range-storage.h" 78)
+ nil )
+ (!pair "value_range"
+ (!type already_seen 1145)
+ (!srcfileloc "value-range.h" 532)
+ nil )
+ (!pair "1"
+ (!type already_seen 1146)
+ (!srcfileloc "value-range.h" 532)
+ nil )
+ (!pair "int_range<1>"
+ (!type already_seen 1145)
+ (!srcfileloc "value-range.h" 532)
+ nil )
+ (!pair "int_range_max"
+ (!type already_seen 1711)
+ (!srcfileloc "value-range.h" 514)
+ nil )
+ (!pair "true"
+ (!type already_seen 1712)
+ (!srcfileloc "value-range.h" 514)
+ nil )
+ (!pair "3"
+ (!type already_seen 1713)
+ (!srcfileloc "value-range.h" 514)
+ nil )
+ (!pair "int_range<3,true>"
+ (!type already_seen 1711)
+ (!srcfileloc "value-range.h" 514)
+ nil )
+ (!pair "int_range"
+ (!type already_seen 1707)
+ (!srcfileloc "value-range.h" 260)
+ nil )
+ (!pair "irange"
+ (!type already_seen 1705)
+ (!srcfileloc "value-range.h" 225)
+ nil )
+ (!pair "vec<gimple*,va_gc>"
+ (!type already_seen 1702)
+ (!srcfileloc "tree-phinodes.cc" 70)
+ nil )
+ (!pair "hash_map<char*,unsigned>"
+ (!type already_seen 1700)
+ (!srcfileloc "cgraphclones.cc" 479)
+ nil )
+ (!pair "uid_range_p"
+ (!type already_seen 1696)
+ (!srcfileloc "passes.cc" 1043)
+ nil )
+ (!pair "char_ptr"
+ (!type already_seen 9)
+ (!srcfileloc "passes.cc" 923)
+ nil )
+ (!pair "cl_target_option"
+ (!type already_seen 627)
+ (!srcfileloc "config/arm/arm.cc" 28501)
+ nil )
+ (!pair "arm_pragma_enum"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.cc" 7377)
+ nil )
+ (!pair "libcall_table_type"
+ (!type already_seen 1678)
+ (!srcfileloc "config/arm/arm.cc" 5872)
+ nil )
+ (!pair "libcall_hasher"
+ (!type already_seen 1677)
+ (!srcfileloc "config/arm/arm.cc" 5872)
+ nil )
+ (!pair "hash_table<libcall_hasher>"
+ (!type already_seen 1678)
+ (!srcfileloc "config/arm/arm.cc" 5872)
+ nil )
+ (!pair "isr_attribute_arg"
+ (!type already_seen 1676)
+ (!srcfileloc "config/arm/arm.cc" 4049)
+ nil )
+ (!pair "arm_fixed_mode_set"
+ (!type already_seen 1675)
+ (!srcfileloc "config/arm/arm.cc" 2495)
+ nil )
+ (!pair "Mfix"
+ (!type already_seen 1671)
+ (!srcfileloc "config/arm/arm.cc" 83)
+ nil )
+ (!pair "Mnode"
+ (!type already_seen 1670)
+ (!srcfileloc "config/arm/arm.cc" 82)
+ nil )
+ (!pair "omp_declare_variant_alt_hasher"
+ (!type already_seen 1659)
+ (!srcfileloc "omp-general.cc" 2142)
+ nil )
+ (!pair "hash_table<omp_declare_variant_alt_hasher>"
+ (!type already_seen 1660)
+ (!srcfileloc "omp-general.cc" 2142)
+ nil )
+ (!pair "omp_declare_variant_hasher"
+ (!type already_seen 1656)
+ (!srcfileloc "omp-general.cc" 2120)
+ nil )
+ (!pair "hash_table<omp_declare_variant_hasher>"
+ (!type already_seen 1657)
+ (!srcfileloc "omp-general.cc" 2120)
+ nil )
+ (!pair "omp_declare_variant_entry"
+ (!type already_seen 1652)
+ (!srcfileloc "omp-general.cc" 2071)
+ nil )
+ (!pair "vec<omp_declare_variant_entry,va_gc>"
+ (!type already_seen 1653)
+ (!srcfileloc "omp-general.cc" 2071)
+ nil )
+ (!pair "use_optype_p"
+ (!type already_seen 403)
+ (!srcfileloc "tree-ssa-operands.h" 42)
+ nil )
+ (!pair "use_operand_p"
+ (!type already_seen 562)
+ (!srcfileloc "tree-ssa-operands.h" 30)
+ nil )
+ (!pair "ssa_use_operand_t"
+ (!type already_seen 561)
+ (!srcfileloc "tree-ssa-operands.h" 30)
+ nil )
+ (!pair "def_operand_p"
+ (!type already_seen 24)
+ (!srcfileloc "tree-ssa-operands.h" 27)
+ nil )
+ (!pair "scev_info_hasher"
+ (!type already_seen 1640)
+ (!srcfileloc "tree-scalar-evolution.cc" 312)
+ nil )
+ (!pair "hash_table<scev_info_hasher>"
+ (!type already_seen 1641)
+ (!srcfileloc "tree-scalar-evolution.cc" 312)
+ nil )
+ (!pair "mem_addr_template"
+ (!type already_seen 1600)
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ nil )
+ (!pair "vec<mem_addr_template,va_gc>"
+ (!type already_seen 1601)
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ nil )
+ (!pair "treemple"
+ (!type already_seen 1588)
+ (!srcfileloc "tree-eh.cc" 53)
+ nil )
+ (!pair "tm_restart_hasher"
+ (!type already_seen 407)
+ (!srcfileloc "gimple-ssa.h" 114)
+ nil )
+ (!pair "hash_table<tm_restart_hasher>"
+ (!type already_seen 406)
+ (!srcfileloc "gimple-ssa.h" 114)
+ nil )
+ (!pair "ssa_name_hasher"
+ (!type already_seen 398)
+ (!srcfileloc "gimple-ssa.h" 96)
+ nil )
+ (!pair "hash_table<ssa_name_hasher>"
+ (!type already_seen 397)
+ (!srcfileloc "gimple-ssa.h" 96)
+ nil )
+ (!pair "hash_map<tree,tree>"
+ (!type already_seen 395)
+ (!srcfileloc "gimple-ssa.h" 84)
+ nil )
+ (!pair "elt_t"
+ (!type already_seen 1585)
+ (!srcfileloc "gimple.h" 1701)
+ nil )
+ (!pair "gimple_seq_node"
+ (!type already_seen 282)
+ (!srcfileloc "gimple.h" 28)
+ nil )
+ (!pair "tm_clone_hasher"
+ (!type already_seen 1577)
+ (!srcfileloc "varasm.cc" 6353)
+ nil )
+ (!pair "hash_table<tm_clone_hasher>"
+ (!type already_seen 1578)
+ (!srcfileloc "varasm.cc" 6353)
+ nil )
+ (!pair "const_rtx_desc_hasher"
+ (!type already_seen 1017)
+ (!srcfileloc "varasm.cc" 3743)
+ nil )
+ (!pair "hash_table<const_rtx_desc_hasher>"
+ (!type already_seen 1016)
+ (!srcfileloc "varasm.cc" 3743)
+ nil )
+ (!pair "tree_descriptor_hasher"
+ (!type already_seen 1101)
+ (!srcfileloc "varasm.cc" 3072)
+ nil )
+ (!pair "hash_table<tree_descriptor_hasher>"
+ (!type already_seen 1571)
+ (!srcfileloc "varasm.cc" 3072)
+ nil )
+ (!pair "object_block_hasher"
+ (!type already_seen 1567)
+ (!srcfileloc "varasm.cc" 200)
+ nil )
+ (!pair "hash_table<object_block_hasher>"
+ (!type already_seen 1568)
+ (!srcfileloc "varasm.cc" 200)
+ nil )
+ (!pair "section_hasher"
+ (!type already_seen 1564)
+ (!srcfileloc "varasm.cc" 189)
+ nil )
+ (!pair "hash_table<section_hasher>"
+ (!type already_seen 1565)
+ (!srcfileloc "varasm.cc" 189)
+ nil )
+ (!pair "tree_vec_map_cache_hasher"
+ (!type already_seen 937)
+ (!srcfileloc "tree.cc" 224)
+ nil )
+ (!pair "hash_table<tree_vec_map_cache_hasher>"
+ (!type already_seen 1561)
+ (!srcfileloc "tree.cc" 224)
+ nil )
+ (!pair "tree_decl_map_cache_hasher"
+ (!type already_seen 936)
+ (!srcfileloc "tree.cc" 218)
+ nil )
+ (!pair "hash_table<tree_decl_map_cache_hasher>"
+ (!type already_seen 1559)
+ (!srcfileloc "tree.cc" 218)
+ nil )
+ (!pair "cl_option_hasher"
+ (!type already_seen 1556)
+ (!srcfileloc "tree.cc" 212)
+ nil )
+ (!pair "hash_table<cl_option_hasher>"
+ (!type already_seen 1557)
+ (!srcfileloc "tree.cc" 212)
+ nil )
+ (!pair "poly_int_cst_hasher"
+ (!type already_seen 1553)
+ (!srcfileloc "tree.cc" 196)
+ nil )
+ (!pair "hash_table<poly_int_cst_hasher>"
+ (!type already_seen 1554)
+ (!srcfileloc "tree.cc" 196)
+ nil )
+ (!pair "int_cst_hasher"
+ (!type already_seen 1550)
+ (!srcfileloc "tree.cc" 185)
+ nil )
+ (!pair "hash_table<int_cst_hasher>"
+ (!type already_seen 1551)
+ (!srcfileloc "tree.cc" 185)
+ nil )
+ (!pair "type_cache_hasher"
+ (!type already_seen 1547)
+ (!srcfileloc "tree.cc" 174)
+ nil )
+ (!pair "hash_table<type_cache_hasher>"
+ (!type already_seen 1548)
+ (!srcfileloc "tree.cc" 174)
+ nil )
+ (!pair "block_info"
+ (!type already_seen 1537)
+ (!srcfileloc "reg-stack.cc" 220)
+ nil )
+ (!pair "stack_ptr"
+ (!type already_seen 1535)
+ (!srcfileloc "reg-stack.cc" 207)
+ nil )
+ (!pair "fixup_vertex_p"
+ (!type already_seen 1529)
+ (!srcfileloc "mcf.cc" 103)
+ nil )
+ (!pair "fixup_vertex_type"
+ (!type already_seen 1528)
+ (!srcfileloc "mcf.cc" 103)
+ nil )
+ (!pair "fixup_edge_p"
+ (!type already_seen 1527)
+ (!srcfileloc "mcf.cc" 94)
+ nil )
+ (!pair "fixup_edge_type"
+ (!type already_seen 1526)
+ (!srcfileloc "mcf.cc" 94)
+ nil )
+ (!pair "libfunc_decl_hasher"
+ (!type already_seen 1518)
+ (!srcfileloc "optabs-libfuncs.cc" 720)
+ nil )
+ (!pair "hash_table<libfunc_decl_hasher>"
+ (!type already_seen 1519)
+ (!srcfileloc "optabs-libfuncs.cc" 720)
+ nil )
+ (!pair "vec_modify_pair_heap"
+ (!type already_seen 1507)
+ (!srcfileloc "gcse.cc" 621)
+ nil )
+ (!pair "modify_pair"
+ (!type already_seen 1508)
+ (!srcfileloc "gcse.cc" 621)
+ nil )
+ (!pair "vec<modify_pair>"
+ (!type already_seen 1507)
+ (!srcfileloc "gcse.cc" 621)
+ nil )
+ (!pair "vec_rtx_heap"
+ (!type already_seen 1506)
+ (!srcfileloc "gcse.cc" 620)
+ nil )
+ (!pair "vec<rtx_insn*>"
+ (!type already_seen 1506)
+ (!srcfileloc "gcse.cc" 620)
+ nil )
+ (!pair "occr_t"
+ (!type already_seen 1501)
+ (!srcfileloc "gcse.cc" 306)
+ nil )
+ (!pair "user_struct"
+ (!type already_seen 1497)
+ (!srcfileloc "ggc-tests.cc" 388)
+ nil )
+ (!pair "test_node"
+ (!type already_seen 1495)
+ (!srcfileloc "ggc-tests.cc" 323)
+ nil )
+ (!pair "some_other_subclass"
+ (!type already_seen 1493)
+ (!srcfileloc "ggc-tests.cc" 261)
+ nil )
+ (!pair "some_subclass"
+ (!type already_seen 1491)
+ (!srcfileloc "ggc-tests.cc" 250)
+ nil )
+ (!pair "example_base"
+ (!type already_seen 1489)
+ (!srcfileloc "ggc-tests.cc" 225)
+ nil )
+ (!pair "test_of_union"
+ (!type already_seen 1486)
+ (!srcfileloc "ggc-tests.cc" 134)
+ nil )
+ (!pair "test_other"
+ (!type already_seen 920)
+ (!srcfileloc "ggc-tests.cc" 128)
+ nil )
+ (!pair "test_of_length"
+ (!type already_seen 1483)
+ (!srcfileloc "ggc-tests.cc" 68)
+ nil )
+ (!pair "test_struct"
+ (!type already_seen 918)
+ (!srcfileloc "ggc-tests.cc" 42)
+ nil )
+ (!pair "ehspec_hash_type"
+ (!type already_seen 1480)
+ (!srcfileloc "except.cc" 764)
+ nil )
+ (!pair "ehspec_hasher"
+ (!type already_seen 1479)
+ (!srcfileloc "except.cc" 764)
+ nil )
+ (!pair "hash_table<ehspec_hasher>"
+ (!type already_seen 1480)
+ (!srcfileloc "except.cc" 764)
+ nil )
+ (!pair "ttypes_hash_type"
+ (!type already_seen 1478)
+ (!srcfileloc "except.cc" 729)
+ nil )
+ (!pair "ttypes_filter_hasher"
+ (!type already_seen 1477)
+ (!srcfileloc "except.cc" 729)
+ nil )
+ (!pair "hash_table<ttypes_filter_hasher>"
+ (!type already_seen 1478)
+ (!srcfileloc "except.cc" 729)
+ nil )
+ (!pair "action_hash_type"
+ (!type already_seen 1474)
+ (!srcfileloc "except.cc" 210)
+ nil )
+ (!pair "action_record_hasher"
+ (!type already_seen 1473)
+ (!srcfileloc "except.cc" 210)
+ nil )
+ (!pair "hash_table<action_record_hasher>"
+ (!type already_seen 1474)
+ (!srcfileloc "except.cc" 210)
+ nil )
+ (!pair "tree_hash"
+ (!type already_seen 941)
+ (!srcfileloc "except.cc" 151)
+ nil )
+ (!pair "hash_map<tree_hash,tree>"
+ (!type already_seen 1470)
+ (!srcfileloc "except.cc" 151)
+ nil )
+ (!pair "initial_value_pair"
+ (!type already_seen 1424)
+ (!srcfileloc "function.cc" 1265)
+ nil )
+ (!pair "temp_address_hasher"
+ (!type already_seen 1460)
+ (!srcfileloc "function.cc" 608)
+ nil )
+ (!pair "hash_table<temp_address_hasher>"
+ (!type already_seen 1461)
+ (!srcfileloc "function.cc" 608)
+ nil )
+ (!pair "insn_cache_hasher"
+ (!type already_seen 1456)
+ (!srcfileloc "function.cc" 131)
+ nil )
+ (!pair "hash_table<insn_cache_hasher>"
+ (!type already_seen 1457)
+ (!srcfileloc "function.cc" 131)
+ nil )
+ (!pair "by_pieces_constfn"
+ (!type already_seen 2)
+ (!srcfileloc "expr.h" 120)
+ nil )
+ (!pair "sepops"
+ (!type already_seen 1454)
+ (!srcfileloc "expr.h" 55)
+ nil )
+ (!pair "duplicate_eh_regions_map"
+ (!type already_seen 2)
+ (!srcfileloc "except.h" 247)
+ nil )
+ (!pair "hash_map<gimple*,int>"
+ (!type already_seen 373)
+ (!srcfileloc "except.h" 204)
+ nil )
+ (!pair "vec<eh_landing_pad,va_gc>"
+ (!type already_seen 371)
+ (!srcfileloc "except.h" 200)
+ nil )
+ (!pair "vec<eh_region,va_gc>"
+ (!type already_seen 369)
+ (!srcfileloc "except.h" 197)
+ nil )
+ (!pair "eh_region"
+ (!type already_seen 356)
+ (!srcfileloc "except.h" 184)
+ nil )
+ (!pair "eh_catch"
+ (!type already_seen 360)
+ (!srcfileloc "except.h" 183)
+ nil )
+ (!pair "eh_landing_pad"
+ (!type already_seen 364)
+ (!srcfileloc "except.h" 182)
+ nil )
+ (!pair "twi"
+ (!type already_seen 913)
+ (!srcfileloc "emit-rtl.cc" 774)
+ nil )
+ (!pair "const_fixed_hasher"
+ (!type already_seen 1443)
+ (!srcfileloc "emit-rtl.cc" 190)
+ nil )
+ (!pair "hash_table<const_fixed_hasher>"
+ (!type already_seen 1444)
+ (!srcfileloc "emit-rtl.cc" 190)
+ nil )
+ (!pair "const_double_hasher"
+ (!type already_seen 1440)
+ (!srcfileloc "emit-rtl.cc" 181)
+ nil )
+ (!pair "hash_table<const_double_hasher>"
+ (!type already_seen 1441)
+ (!srcfileloc "emit-rtl.cc" 181)
+ nil )
+ (!pair "reg_attr_hasher"
+ (!type already_seen 1437)
+ (!srcfileloc "emit-rtl.cc" 172)
+ nil )
+ (!pair "hash_table<reg_attr_hasher>"
+ (!type already_seen 1438)
+ (!srcfileloc "emit-rtl.cc" 172)
+ nil )
+ (!pair "const_poly_int_hasher"
+ (!type already_seen 1434)
+ (!srcfileloc "emit-rtl.cc" 163)
+ nil )
+ (!pair "hash_table<const_poly_int_hasher>"
+ (!type already_seen 1435)
+ (!srcfileloc "emit-rtl.cc" 163)
+ nil )
+ (!pair "const_wide_int_hasher"
+ (!type already_seen 1431)
+ (!srcfileloc "emit-rtl.cc" 153)
+ nil )
+ (!pair "hash_table<const_wide_int_hasher>"
+ (!type already_seen 1432)
+ (!srcfileloc "emit-rtl.cc" 153)
+ nil )
+ (!pair "const_int_hasher"
+ (!type already_seen 1428)
+ (!srcfileloc "emit-rtl.cc" 145)
+ nil )
+ (!pair "hash_table<const_int_hasher>"
+ (!type already_seen 1429)
+ (!srcfileloc "emit-rtl.cc" 145)
+ nil )
+ (!pair "vec<temp_slot_p,va_gc>"
+ (!type already_seen 1425)
+ (!srcfileloc "emit-rtl.h" 148)
+ nil )
+ (!pair "rtx_note"
+ (!type already_seen 762)
+ (!srcfileloc "emit-rtl.h" 128)
+ nil )
+ (!pair "rtl_ssa::function_info"
+ (!type already_seen 1419)
+ (!srcfileloc "emit-rtl.h" 77)
+ nil )
+ (!pair "predefined_function_abi"
+ (!type already_seen 908)
+ (!srcfileloc "emit-rtl.h" 75)
+ nil )
+ (!pair "temp_slot_p"
+ (!type already_seen 1417)
+ (!srcfileloc "emit-rtl.h" 24)
+ nil )
+ (!pair "elem_op_func"
+ (!type already_seen 2)
+ (!srcfileloc "tree-vect-generic.cc" 159)
+ nil )
+ (!pair "vec<ctf_dtdef_ref,va_gc>"
+ (!type already_seen 1410)
+ (!srcfileloc "btfout.cc" 105)
+ nil )
+ (!pair "btf_datasec_t"
+ (!type already_seen 1409)
+ (!srcfileloc "btfout.cc" 86)
+ nil )
+ (!pair "unsigned"
+ (!type already_seen 1408)
+ (!srcfileloc "btfout.cc" 73)
+ nil )
+ (!pair "hash_map<ctf_dvdef_ref,unsigned>"
+ (!type already_seen 1406)
+ (!srcfileloc "btfout.cc" 73)
+ nil )
+ (!pair "ctf_dvd_preprocess_arg_t"
+ (!type already_seen 1405)
+ (!srcfileloc "ctfout.cc" 72)
+ nil )
+ (!pair "ctf_dtd_preprocess_arg_t"
+ (!type already_seen 1404)
+ (!srcfileloc "ctfout.cc" 66)
+ nil )
+ (!pair "ctf_container_ref"
+ (!type already_seen 1403)
+ (!srcfileloc "ctfc.h" 341)
+ nil )
+ (!pair "ctf_container_t"
+ (!type already_seen 1402)
+ (!srcfileloc "ctfc.h" 334)
+ nil )
+ (!pair "ctfc_dvd_hasher"
+ (!type already_seen 1397)
+ (!srcfileloc "ctfc.h" 278)
+ nil )
+ (!pair "hash_table<ctfc_dvd_hasher>"
+ (!type already_seen 1400)
+ (!srcfileloc "ctfc.h" 278)
+ nil )
+ (!pair "ctfc_dtd_hasher"
+ (!type already_seen 1396)
+ (!srcfileloc "ctfc.h" 276)
+ nil )
+ (!pair "hash_table<ctfc_dtd_hasher>"
+ (!type already_seen 1398)
+ (!srcfileloc "ctfc.h" 276)
+ nil )
+ (!pair "ctf_srcloc_ref"
+ (!type already_seen 1395)
+ (!srcfileloc "ctfc.h" 208)
+ nil )
+ (!pair "ctf_srcloc_t"
+ (!type already_seen 1394)
+ (!srcfileloc "ctfc.h" 206)
+ nil )
+ (!pair "ctf_dtdef_ref"
+ (!type already_seen 1389)
+ (!srcfileloc "ctfc.h" 197)
+ nil )
+ (!pair "ctf_dvdef_ref"
+ (!type already_seen 1392)
+ (!srcfileloc "ctfc.h" 196)
+ nil )
+ (!pair "ctf_dvdef_t"
+ (!type already_seen 1391)
+ (!srcfileloc "ctfc.h" 194)
+ nil )
+ (!pair "ctf_dtdef_t"
+ (!type already_seen 1388)
+ (!srcfileloc "ctfc.h" 181)
+ nil )
+ (!pair "ctf_func_arg_t"
+ (!type already_seen 1385)
+ (!srcfileloc "ctfc.h" 150)
+ nil )
+ (!pair "ctf_dmdef_t"
+ (!type already_seen 1383)
+ (!srcfileloc "ctfc.h" 138)
+ nil )
+ (!pair "ctf_itype_t"
+ (!type already_seen 1382)
+ (!srcfileloc "ctfc.h" 119)
+ nil )
+ (!pair "ctf_sliceinfo_t"
+ (!type already_seen 1380)
+ (!srcfileloc "ctfc.h" 103)
+ nil )
+ (!pair "ctf_funcinfo_t"
+ (!type already_seen 1379)
+ (!srcfileloc "ctfc.h" 96)
+ nil )
+ (!pair "ctf_arinfo_t"
+ (!type already_seen 1378)
+ (!srcfileloc "ctfc.h" 87)
+ nil )
+ (!pair "ctf_encoding_t"
+ (!type already_seen 1377)
+ (!srcfileloc "ctfc.h" 78)
+ nil )
+ (!pair "ctf_strtable_t"
+ (!type already_seen 1376)
+ (!srcfileloc "ctfc.h" 68)
+ nil )
+ (!pair "ctf_string_t"
+ (!type already_seen 1374)
+ (!srcfileloc "ctfc.h" 57)
+ nil )
+ (!pair "ctf_id_t"
+ (!type already_seen 2)
+ (!srcfileloc "ctfc.h" 49)
+ nil )
+ (!pair "loc_list_hash_type"
+ (!type already_seen 1373)
+ (!srcfileloc "dwarf2out.cc" 31940)
+ nil )
+ (!pair "loc_list_hasher"
+ (!type already_seen 1372)
+ (!srcfileloc "dwarf2out.cc" 31940)
+ nil )
+ (!pair "hash_table<loc_list_hasher>"
+ (!type already_seen 1373)
+ (!srcfileloc "dwarf2out.cc" 31940)
+ nil )
+ (!pair "macinfo_hash_type"
+ (!type already_seen 1370)
+ (!srcfileloc "dwarf2out.cc" 28878)
+ nil )
+ (!pair "macinfo_entry_hasher"
+ (!type already_seen 1369)
+ (!srcfileloc "dwarf2out.cc" 28878)
+ nil )
+ (!pair "hash_table<macinfo_entry_hasher>"
+ (!type already_seen 1370)
+ (!srcfileloc "dwarf2out.cc" 28878)
+ nil )
+ (!pair "inline_entry_data_hasher"
+ (!type already_seen 1366)
+ (!srcfileloc "dwarf2out.cc" 24312)
+ nil )
+ (!pair "hash_table<inline_entry_data_hasher>"
+ (!type already_seen 1367)
+ (!srcfileloc "dwarf2out.cc" 24312)
+ nil )
+ (!pair "external_ref_hash_type"
+ (!type already_seen 1359)
+ (!srcfileloc "dwarf2out.cc" 9096)
+ nil )
+ (!pair "external_ref_hasher"
+ (!type already_seen 1358)
+ (!srcfileloc "dwarf2out.cc" 9096)
+ nil )
+ (!pair "hash_table<external_ref_hasher>"
+ (!type already_seen 1359)
+ (!srcfileloc "dwarf2out.cc" 9096)
+ nil )
+ (!pair "decl_hash_type"
+ (!type already_seen 1356)
+ (!srcfileloc "dwarf2out.cc" 8338)
+ nil )
+ (!pair "decl_table_entry_hasher"
+ (!type already_seen 1355)
+ (!srcfileloc "dwarf2out.cc" 8338)
+ nil )
+ (!pair "hash_table<decl_table_entry_hasher>"
+ (!type already_seen 1356)
+ (!srcfileloc "dwarf2out.cc" 8338)
+ nil )
+ (!pair "sym_off_pair"
+ (!type already_seen 1351)
+ (!srcfileloc "dwarf2out.cc" 5950)
+ nil )
+ (!pair "hash_map<tree,sym_off_pair>"
+ (!type already_seen 1352)
+ (!srcfileloc "dwarf2out.cc" 5950)
+ nil )
+ (!pair "addr_hasher"
+ (!type already_seen 1348)
+ (!srcfileloc "dwarf2out.cc" 5072)
+ nil )
+ (!pair "hash_table<addr_hasher>"
+ (!type already_seen 1349)
+ (!srcfileloc "dwarf2out.cc" 5072)
+ nil )
+ (!pair "vec<die_arg_entry,va_gc>"
+ (!type already_seen 1342)
+ (!srcfileloc "dwarf2out.cc" 3707)
+ nil )
+ (!pair "dw_ranges_by_label"
+ (!type already_seen 1300)
+ (!srcfileloc "dwarf2out.cc" 3690)
+ nil )
+ (!pair "vec<dw_ranges_by_label,va_gc>"
+ (!type already_seen 1340)
+ (!srcfileloc "dwarf2out.cc" 3690)
+ nil )
+ (!pair "dw_ranges"
+ (!type already_seen 1298)
+ (!srcfileloc "dwarf2out.cc" 3687)
+ nil )
+ (!pair "vec<dw_ranges,va_gc>"
+ (!type already_seen 1338)
+ (!srcfileloc "dwarf2out.cc" 3687)
+ nil )
+ (!pair "vec<macinfo_entry,va_gc>"
+ (!type already_seen 1336)
+ (!srcfileloc "dwarf2out.cc" 3677)
+ nil )
+ (!pair "vec<pubname_entry,va_gc>"
+ (!type already_seen 1334)
+ (!srcfileloc "dwarf2out.cc" 3669)
+ nil )
+ (!pair "vec<dw_line_info_table*,va_gc>"
+ (!type already_seen 1332)
+ (!srcfileloc "dwarf2out.cc" 3661)
+ nil )
+ (!pair "dw_line_info_table"
+ (!type already_seen 1294)
+ (!srcfileloc "dwarf2out.cc" 3654)
+ nil )
+ (!pair "dw_loc_list_hasher"
+ (!type already_seen 1329)
+ (!srcfileloc "dwarf2out.cc" 3636)
+ nil )
+ (!pair "hash_table<dw_loc_list_hasher>"
+ (!type already_seen 1330)
+ (!srcfileloc "dwarf2out.cc" 3636)
+ nil )
+ (!pair "cached_dw_loc_list"
+ (!type already_seen 1328)
+ (!srcfileloc "dwarf2out.cc" 3624)
+ nil )
+ (!pair "decl_loc_hasher"
+ (!type already_seen 1325)
+ (!srcfileloc "dwarf2out.cc" 3605)
+ nil )
+ (!pair "hash_table<decl_loc_hasher>"
+ (!type already_seen 1326)
+ (!srcfileloc "dwarf2out.cc" 3605)
+ nil )
+ (!pair "var_loc_list"
+ (!type already_seen 1322)
+ (!srcfileloc "dwarf2out.cc" 3583)
+ nil )
+ (!pair "die_arg_entry"
+ (!type already_seen 1319)
+ (!srcfileloc "dwarf2out.cc" 3544)
+ nil )
+ (!pair "block_die_hasher"
+ (!type already_seen 1316)
+ (!srcfileloc "dwarf2out.cc" 3539)
+ nil )
+ (!pair "hash_table<block_die_hasher>"
+ (!type already_seen 1317)
+ (!srcfileloc "dwarf2out.cc" 3539)
+ nil )
+ (!pair "variable_value_hasher"
+ (!type already_seen 1313)
+ (!srcfileloc "dwarf2out.cc" 3529)
+ nil )
+ (!pair "hash_table<variable_value_hasher>"
+ (!type already_seen 1314)
+ (!srcfileloc "dwarf2out.cc" 3529)
+ nil )
+ (!pair "vec<dw_die_ref,va_gc>"
+ (!type already_seen 1310)
+ (!srcfileloc "dwarf2out.cc" 3516)
+ nil )
+ (!pair "decl_die_hasher"
+ (!type already_seen 1307)
+ (!srcfileloc "dwarf2out.cc" 3512)
+ nil )
+ (!pair "hash_table<decl_die_hasher>"
+ (!type already_seen 1308)
+ (!srcfileloc "dwarf2out.cc" 3512)
+ nil )
+ (!pair "dwarf_file_hasher"
+ (!type already_seen 1304)
+ (!srcfileloc "dwarf2out.cc" 3501)
+ nil )
+ (!pair "hash_table<dwarf_file_hasher>"
+ (!type already_seen 1305)
+ (!srcfileloc "dwarf2out.cc" 3501)
+ nil )
+ (!pair "skeleton_chain_node"
+ (!type already_seen 1303)
+ (!srcfileloc "dwarf2out.cc" 3293)
+ nil )
+ (!pair "limbo_die_node"
+ (!type already_seen 1301)
+ (!srcfileloc "dwarf2out.cc" 3285)
+ nil )
+ (!pair "macinfo_entry"
+ (!type already_seen 1299)
+ (!srcfileloc "dwarf2out.cc" 3257)
+ nil )
+ (!pair "pubname_entry"
+ (!type already_seen 1297)
+ (!srcfileloc "dwarf2out.cc" 3229)
+ nil )
+ (!pair "die_node"
+ (!type already_seen 487)
+ (!srcfileloc "dwarf2out.cc" 3198)
+ nil )
+ (!pair "vec<dw_attr_node,va_gc>"
+ (!type already_seen 493)
+ (!srcfileloc "dwarf2out.cc" 3176)
+ nil )
+ (!pair "comdat_type_node"
+ (!type already_seen 490)
+ (!srcfileloc "dwarf2out.cc" 3173)
+ nil )
+ (!pair "vec<dw_line_info_entry,va_gc>"
+ (!type already_seen 1292)
+ (!srcfileloc "dwarf2out.cc" 3123)
+ nil )
+ (!pair "dw_line_info_entry"
+ (!type already_seen 1291)
+ (!srcfileloc "dwarf2out.cc" 3081)
+ nil )
+ (!pair "dw_offset"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3035)
+ nil )
+ (!pair "dw_loc_list_node"
+ (!type already_seen 485)
+ (!srcfileloc "dwarf2out.cc" 1391)
+ nil )
+ (!pair "addr_table_entry"
+ (!type already_seen 481)
+ (!srcfileloc "dwarf2out.cc" 1364)
+ nil )
+ (!pair "var_loc_view"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 1356)
+ nil )
+ (!pair "vec<char*,va_gc>"
+ (!type already_seen 1289)
+ (!srcfileloc "dwarf2out.cc" 275)
+ nil )
+ (!pair "indirect_string_hasher"
+ (!type already_seen 1286)
+ (!srcfileloc "dwarf2out.cc" 233)
+ nil )
+ (!pair "hash_table<indirect_string_hasher>"
+ (!type already_seen 1287)
+ (!srcfileloc "dwarf2out.cc" 233)
+ nil )
+ (!pair "vec<dw_fde_ref,va_gc>"
+ (!type already_seen 1284)
+ (!srcfileloc "dwarf2out.cc" 215)
+ nil )
+ (!pair "reg_saved_in_data"
+ (!type already_seen 1277)
+ (!srcfileloc "dwarf2cfi.cc" 195)
+ nil )
+ (!pair "dw_cfi_row"
+ (!type already_seen 1275)
+ (!srcfileloc "dwarf2cfi.cc" 193)
+ nil )
+ (!pair "dw_cfa_location"
+ (!type already_seen 512)
+ (!srcfileloc "dwarf2cfi.cc" 66)
+ nil )
+ (!pair "hash_map<char*,tree>"
+ (!type already_seen 1273)
+ (!srcfileloc "dwarf2asm.cc" 911)
+ nil )
+ (!pair "dw_attr_node"
+ (!type already_seen 494)
+ (!srcfileloc "dwarf2out.h" 435)
+ nil )
+ (!pair "dw_val_node"
+ (!type already_seen 478)
+ (!srcfileloc "dwarf2out.h" 297)
+ nil )
+ (!pair "dw_discr_value"
+ (!type already_seen 507)
+ (!srcfileloc "dwarf2out.h" 276)
+ nil )
+ (!pair "dw_vec_const"
+ (!type already_seen 499)
+ (!srcfileloc "dwarf2out.h" 256)
+ nil )
+ (!pair "cfa_reg"
+ (!type already_seen 513)
+ (!srcfileloc "dwarf2out.h" 133)
+ nil )
+ (!pair "dw_fde_ref"
+ (!type already_seen 469)
+ (!srcfileloc "dwarf2out.h" 70)
+ nil )
+ (!pair "cfi_vec"
+ (!type already_seen 471)
+ (!srcfileloc "dwarf2out.h" 68)
+ nil )
+ (!pair "vec<dw_cfi_ref,va_gc>"
+ (!type already_seen 472)
+ (!srcfileloc "dwarf2out.h" 68)
+ nil )
+ (!pair "dw_cfi_oprnd"
+ (!type already_seen 475)
+ (!srcfileloc "dwarf2out.h" 57)
+ nil )
+ (!pair "wide_int_ptr"
+ (!type already_seen 495)
+ (!srcfileloc "dwarf2out.h" 33)
+ nil )
+ (!pair "dw_discr_list_ref"
+ (!type already_seen 509)
+ (!srcfileloc "dwarf2out.h" 32)
+ nil )
+ (!pair "dw_loc_list_ref"
+ (!type already_seen 484)
+ (!srcfileloc "dwarf2out.h" 31)
+ nil )
+ (!pair "dw_loc_descr_ref"
+ (!type already_seen 476)
+ (!srcfileloc "dwarf2out.h" 30)
+ nil )
+ (!pair "dw_cfi_ref"
+ (!type already_seen 473)
+ (!srcfileloc "dwarf2out.h" 29)
+ nil )
+ (!pair "dw_val_ref"
+ (!type already_seen 479)
+ (!srcfileloc "dwarf2out.h" 28)
+ nil )
+ (!pair "const_dw_die_ref"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.h" 26)
+ nil )
+ (!pair "dw_die_ref"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.h" 25)
+ nil )
+ (!pair "nowarn_map_t"
+ (!type already_seen 1269)
+ (!srcfileloc "diagnostic-spec.h" 137)
+ nil )
+ (!pair "nowarn_spec_t"
+ (!type already_seen 1268)
+ (!srcfileloc "diagnostic-spec.h" 137)
+ nil )
+ (!pair "hash_map<location_hash,nowarn_spec_t>"
+ (!type already_seen 1269)
+ (!srcfileloc "diagnostic-spec.h" 137)
+ nil )
+ (!pair "modref_tree"
+ (!type already_seen 1267)
+ (!srcfileloc "ipa-modref-tree.h" 738)
+ nil )
+ (!pair "modref_base_node"
+ (!type already_seen 1266)
+ (!srcfileloc "ipa-modref-tree.h" 288)
+ nil )
+ (!pair "modref_ref_node"
+ (!type already_seen 1265)
+ (!srcfileloc "ipa-modref-tree.h" 205)
+ nil )
+ (!pair "modref_summary_lto"
+ (!type already_seen 1245)
+ (!srcfileloc "ipa-modref.cc" 368)
+ nil )
+ (!pair "modref_records_lto"
+ (!type already_seen 1248)
+ (!srcfileloc "ipa-modref.cc" 350)
+ nil )
+ (!pair "modref_tree<tree>"
+ (!type already_seen 1248)
+ (!srcfileloc "ipa-modref.cc" 350)
+ nil )
+ (!pair "fast_function_summary<modref_summary_lto*,va_gc>"
+ (!type already_seen 1252)
+ (!srcfileloc "ipa-modref.cc" 272)
+ nil )
+ (!pair "fast_function_summary<modref_summary*,va_gc>"
+ (!type already_seen 1250)
+ (!srcfileloc "ipa-modref.cc" 260)
+ nil )
+ (!pair "modref_summaries_lto"
+ (!type already_seen 1249)
+ (!srcfileloc "ipa-modref.cc" 255)
+ nil )
+ (!pair "modref_summaries"
+ (!type already_seen 1244)
+ (!srcfileloc "ipa-modref.cc" 230)
+ nil )
+ (!pair "modref_summary"
+ (!type already_seen 1237)
+ (!srcfileloc "ipa-modref.h" 67)
+ nil )
+ (!pair "auto_vec<eaf_flags_t>"
+ (!type already_seen 1236)
+ (!srcfileloc "ipa-modref.h" 34)
+ nil )
+ (!pair "modref_access_node"
+ (!type already_seen 1235)
+ (!srcfileloc "ipa-modref.h" 33)
+ nil )
+ (!pair "auto_vec<modref_access_node>"
+ (!type already_seen 1234)
+ (!srcfileloc "ipa-modref.h" 33)
+ nil )
+ (!pair "eaf_flags_t"
+ (!type already_seen 2)
+ (!srcfileloc "ipa-modref.h" 24)
+ nil )
+ (!pair "modref_records"
+ (!type already_seen 1232)
+ (!srcfileloc "ipa-modref.h" 23)
+ nil )
+ (!pair "modref_tree<alias_set_type>"
+ (!type already_seen 1232)
+ (!srcfileloc "ipa-modref.h" 23)
+ nil )
+ (!pair "ipa_sra_function_summaries"
+ (!type already_seen 1226)
+ (!srcfileloc "ipa-sra.cc" 415)
+ nil )
+ (!pair "isra_param_desc"
+ (!type already_seen 1219)
+ (!srcfileloc "ipa-sra.cc" 282)
+ nil )
+ (!pair "vec<isra_param_desc,va_gc>"
+ (!type already_seen 1223)
+ (!srcfileloc "ipa-sra.cc" 282)
+ nil )
+ (!pair "isra_func_summary"
+ (!type already_seen 1221)
+ (!srcfileloc "ipa-sra.cc" 266)
+ nil )
+ (!pair "vec<param_access*,va_gc>"
+ (!type already_seen 1217)
+ (!srcfileloc "ipa-sra.cc" 170)
+ nil )
+ (!pair "ipa_adjusted_param"
+ (!type already_seen 1085)
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ nil )
+ (!pair "vec<ipa_adjusted_param,va_gc>"
+ (!type already_seen 1084)
+ (!srcfileloc "ipa-param-manipulation.h" 247)
+ nil )
+ (!pair "odr_type"
+ (!type already_seen 1210)
+ (!srcfileloc "ipa-utils.h" 64)
+ nil )
+ (!pair "odr_type_d"
+ (!type already_seen 1209)
+ (!srcfileloc "ipa-utils.h" 64)
+ nil )
+ (!pair "ipa_vr_ggc_hash_traits"
+ (!type already_seen 1129)
+ (!srcfileloc "ipa-prop.cc" 156)
+ nil )
+ (!pair "hash_table<ipa_vr_ggc_hash_traits>"
+ (!type already_seen 1130)
+ (!srcfileloc "ipa-prop.cc" 156)
+ nil )
+ (!pair "ipa_bit_ggc_hash_traits"
+ (!type already_seen 1126)
+ (!srcfileloc "ipa-prop.cc" 109)
+ nil )
+ (!pair "hash_table<ipa_bit_ggc_hash_traits>"
+ (!type already_seen 1127)
+ (!srcfileloc "ipa-prop.cc" 109)
+ nil )
+ (!pair "function_version_hasher"
+ (!type already_seen 1122)
+ (!srcfileloc "cgraph.cc" 122)
+ nil )
+ (!pair "hash_table<function_version_hasher>"
+ (!type already_seen 1123)
+ (!srcfileloc "cgraph.cc" 122)
+ nil )
+ (!pair "vec<alias_set_entry*,va_gc>"
+ (!type already_seen 1116)
+ (!srcfileloc "alias.cc" 280)
+ nil )
+ (!pair "alias_set_hash"
+ (!type already_seen 1111)
+ (!srcfileloc "alias.cc" 148)
+ nil )
+ (!pair "hash_map<alias_set_hash,int>"
+ (!type already_seen 1112)
+ (!srcfileloc "alias.cc" 148)
+ nil )
+ (!pair "ipa_param_adjustments"
+ (!type already_seen 1082)
+ (!srcfileloc "symtab-clones.h" 36)
+ nil )
+ (!pair "vec<ipa_replace_map*,va_gc>"
+ (!type already_seen 1080)
+ (!srcfileloc "symtab-clones.h" 34)
+ nil )
+ (!pair "clone_info"
+ (!type already_seen 1077)
+ (!srcfileloc "symtab-clones.h" 27)
+ nil )
+ (!pair "thunk_infos_t"
+ (!type already_seen 1106)
+ (!srcfileloc "symtab-thunks.cc" 78)
+ nil )
+ (!pair "unprocessed_thunk"
+ (!type already_seen 1103)
+ (!srcfileloc "symtab-thunks.cc" 62)
+ nil )
+ (!pair "vec<unprocessed_thunk,va_gc>"
+ (!type already_seen 1104)
+ (!srcfileloc "symtab-thunks.cc" 62)
+ nil )
+ (!pair "thunk_info"
+ (!type already_seen 1073)
+ (!srcfileloc "symtab-thunks.h" 38)
+ nil )
+ (!pair "hash_set<cgraph_node*>"
+ (!type already_seen 1099)
+ (!srcfileloc "cgraph.h" 2504)
+ nil )
+ (!pair "FILE"
+ (!type already_seen 1098)
+ (!srcfileloc "cgraph.h" 2500)
+ nil )
+ (!pair "symbol_priority_map"
+ (!type already_seen 1046)
+ (!srcfileloc "cgraph.h" 2498)
+ nil )
+ (!pair "hash_map<symtab_node*,symbol_priority_map>"
+ (!type already_seen 1096)
+ (!srcfileloc "cgraph.h" 2498)
+ nil )
+ (!pair "asmname_hasher"
+ (!type already_seen 1072)
+ (!srcfileloc "cgraph.h" 2495)
+ nil )
+ (!pair "hash_table<asmname_hasher>"
+ (!type already_seen 1094)
+ (!srcfileloc "cgraph.h" 2495)
+ nil )
+ (!pair "section_name_hasher"
+ (!type already_seen 1044)
+ (!srcfileloc "cgraph.h" 2492)
+ nil )
+ (!pair "hash_table<section_name_hasher>"
+ (!type already_seen 1092)
+ (!srcfileloc "cgraph.h" 2492)
+ nil )
+ (!pair "vec<int>"
+ (!type already_seen 1090)
+ (!srcfileloc "cgraph.h" 2462)
+ nil )
+ (!pair "cgraph_2node_hook_list"
+ (!type already_seen 1070)
+ (!srcfileloc "cgraph.h" 2382)
+ nil )
+ (!pair "cgraph_2edge_hook_list"
+ (!type already_seen 1068)
+ (!srcfileloc "cgraph.h" 2376)
+ nil )
+ (!pair "varpool_node_hook_list"
+ (!type already_seen 1066)
+ (!srcfileloc "cgraph.h" 2355)
+ nil )
+ (!pair "cgraph_node_hook_list"
+ (!type already_seen 1064)
+ (!srcfileloc "cgraph.h" 2348)
+ nil )
+ (!pair "cgraph_edge_hook_list"
+ (!type already_seen 1062)
+ (!srcfileloc "cgraph.h" 2341)
+ nil )
+ (!pair "symbol_table"
+ (!type already_seen 1088)
+ (!srcfileloc "cgraph.h" 2215)
+ nil )
+ (!pair "clone_summary"
+ (!type already_seen 1086)
+ (!srcfileloc "cgraph.h" 2206)
+ nil )
+ (!pair "function_summary<clone_info*>"
+ (!type already_seen 1086)
+ (!srcfileloc "cgraph.h" 2206)
+ nil )
+ (!pair "thunk_summary"
+ (!type already_seen 1075)
+ (!srcfileloc "cgraph.h" 2202)
+ nil )
+ (!pair "function_summary<thunk_info*>"
+ (!type already_seen 1075)
+ (!srcfileloc "cgraph.h" 2202)
+ nil )
+ (!pair "cgraph_2node_hook"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2155)
+ nil )
+ (!pair "cgraph_2edge_hook"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2154)
+ nil )
+ (!pair "varpool_node_hook"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2153)
+ nil )
+ (!pair "cgraph_node_hook"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2152)
+ nil )
+ (!pair "cgraph_edge_hook"
+ (!type already_seen 2)
+ (!srcfileloc "cgraph.h" 2151)
+ nil )
+ (!pair "asm_node"
+ (!type already_seen 1060)
+ (!srcfileloc "cgraph.h" 2124)
+ nil )
+ (!pair "availability"
+ (!type already_seen 1059)
+ (!srcfileloc "cgraph.h" 2008)
+ nil )
+ (!pair "cgraph_indirect_call_info"
+ (!type already_seen 824)
+ (!srcfileloc "cgraph.h" 1898)
+ nil )
+ (!pair "gcall"
+ (!type already_seen 782)
+ (!srcfileloc "cgraph.h" 1895)
+ nil )
+ (!pair "ipa_polymorphic_call_context"
+ (!type already_seen 825)
+ (!srcfileloc "cgraph.h" 1570)
+ nil )
+ (!pair "varpool_node_set"
+ (!type already_seen 1056)
+ (!srcfileloc "cgraph.h" 1517)
+ nil )
+ (!pair "cgraph_node_set"
+ (!type already_seen 1054)
+ (!srcfileloc "cgraph.h" 1516)
+ nil )
+ (!pair "cgraph_node_set_def"
+ (!type already_seen 1053)
+ (!srcfileloc "cgraph.h" 1516)
+ nil )
+ (!pair "vl_ptr"
+ (!type already_seen 834)
+ (!srcfileloc "cgraph.h" 1407)
+ nil )
+ (!pair "va_heap"
+ (!type already_seen 835)
+ (!srcfileloc "cgraph.h" 1407)
+ nil )
+ (!pair "vec<ipa_opt_pass,va_heap,vl_ptr>"
+ (!type already_seen 833)
+ (!srcfileloc "cgraph.h" 1407)
+ nil )
+ (!pair "cgraph_simd_clone"
+ (!type already_seen 830)
+ (!srcfileloc "cgraph.h" 1400)
+ nil )
+ (!pair "cgraph_edge_hasher"
+ (!type already_seen 828)
+ (!srcfileloc "cgraph.h" 1394)
+ nil )
+ (!pair "hash_table<cgraph_edge_hasher>"
+ (!type already_seen 827)
+ (!srcfileloc "cgraph.h" 1394)
+ nil )
+ (!pair "auto_vec<cgraph_edge*>"
+ (!type already_seen 1052)
+ (!srcfileloc "cgraph.h" 1150)
+ nil )
+ (!pair "cgraph_edge"
+ (!type already_seen 822)
+ (!srcfileloc "cgraph.h" 1125)
+ nil )
+ (!pair "cgraph_function_version_info"
+ (!type already_seen 1050)
+ (!srcfileloc "cgraph.h" 828)
+ nil )
+ (!pair "cgraph_simd_clone_arg"
+ (!type already_seen 832)
+ (!srcfileloc "cgraph.h" 818)
+ nil )
+ (!pair "cgraph_node"
+ (!type already_seen 819)
+ (!srcfileloc "cgraph.h" 812)
+ nil )
+ (!pair "section_hash_entry"
+ (!type already_seen 343)
+ (!srcfileloc "cgraph.h" 641)
+ nil )
+ (!pair "ipa_ref_list"
+ (!type already_seen 321)
+ (!srcfileloc "cgraph.h" 625)
+ nil )
+ (!pair "symtab_node"
+ (!type already_seen 320)
+ (!srcfileloc "cgraph.h" 296)
+ nil )
+ (!pair "ipa_ref"
+ (!type already_seen 1042)
+ (!srcfileloc "cgraph.h" 173)
+ nil )
+ (!pair "explicit"
+ (!type already_seen 1045)
+ (!srcfileloc "cgraph.h" 113)
+ nil )
+ (!pair "ipa_opt_pass"
+ (!type already_seen 836)
+ (!srcfileloc "cgraph.h" 38)
+ nil )
+ (!pair "ipa_opt_pass_d"
+ (!type already_seen 837)
+ (!srcfileloc "cgraph.h" 38)
+ nil )
+ (!pair "ipa_ref_t"
+ (!type already_seen 1042)
+ (!srcfileloc "ipa-ref.h" 70)
+ nil )
+ (!pair "vec<edge,va_gc>"
+ (!type already_seen 272)
+ (!srcfileloc "basic-block.h" 119)
+ nil )
+ (!pair "edge_def"
+ (!type already_seen 274)
+ (!srcfileloc "basic-block.h" 53)
+ nil )
+ (!pair "cselib_expand_callback"
+ (!type already_seen 2)
+ (!srcfileloc "cselib.h" 89)
+ nil )
+ (!pair "sreal"
+ (!type already_seen 1036)
+ (!srcfileloc "profile-count.h" 1253)
+ nil )
+ (!pair "profile_probability"
+ (!type already_seen 1037)
+ (!srcfileloc "profile-count.h" 659)
+ nil )
+ (!pair "profile_count"
+ (!type already_seen 301)
+ (!srcfileloc "cfg.h" 76)
+ nil )
+ (!pair "vec<basic_block,va_gc>"
+ (!type already_seen 381)
+ (!srcfileloc "cfg.h" 45)
+ nil )
+ (!pair "loop_exit_hasher"
+ (!type already_seen 412)
+ (!srcfileloc "cfgloop.h" 331)
+ nil )
+ (!pair "hash_table<loop_exit_hasher>"
+ (!type already_seen 411)
+ (!srcfileloc "cfgloop.h" 331)
+ nil )
+ (!pair "vec<loop_p,va_gc>"
+ (!type already_seen 279)
+ (!srcfileloc "cfgloop.h" 142)
+ nil )
+ (!pair "loop_p"
+ (!type already_seen 275)
+ (!srcfileloc "cfgloop.h" 95)
+ nil )
+ (!pair "noswitch_section_callback"
+ (!type already_seen 2)
+ (!srcfileloc "output.h" 495)
+ nil )
+ (!pair "unnamed_section_callback"
+ (!type already_seen 2)
+ (!srcfileloc "output.h" 469)
+ nil )
+ (!pair "__gcc_host_wide_int__"
+ (!type already_seen 2)
+ (!srcfileloc "hwint.h" 77)
+ nil )
+ (!pair "used_type_hasher"
+ (!type already_seen 1023)
+ (!srcfileloc "function.h" 495)
+ nil )
+ (!pair "hash_table<used_type_hasher>"
+ (!type already_seen 1024)
+ (!srcfileloc "function.h" 495)
+ nil )
+ (!pair "range_query"
+ (!type already_seen 515)
+ (!srcfileloc "function.h" 310)
+ nil )
+ (!pair "hash_set<tree>"
+ (!type already_seen 468)
+ (!srcfileloc "function.h" 299)
+ nil )
+ (!pair "callinfo_dalloc"
+ (!type already_seen 420)
+ (!srcfileloc "function.h" 234)
+ nil )
+ (!pair "vec<callinfo_dalloc,va_gc>"
+ (!type already_seen 419)
+ (!srcfileloc "function.h" 234)
+ nil )
+ (!pair "callinfo_callee"
+ (!type already_seen 417)
+ (!srcfileloc "function.h" 230)
+ nil )
+ (!pair "vec<callinfo_callee,va_gc>"
+ (!type already_seen 416)
+ (!srcfileloc "function.h" 230)
+ nil )
+ (!pair "vec<call_site_record,va_gc>"
+ (!type already_seen 1009)
+ (!srcfileloc "function.h" 147)
+ nil )
+ (!pair "vec<uchar,va_gc>"
+ (!type already_seen 377)
+ (!srcfileloc "function.h" 145)
+ nil )
+ (!pair "rtx_code_label"
+ (!type already_seen 367)
+ (!srcfileloc "function.h" 140)
+ nil )
+ (!pair "call_site_record"
+ (!type already_seen 1008)
+ (!srcfileloc "function.h" 134)
+ nil )
+ (!pair "vec<rtx_insn*,va_gc>"
+ (!type already_seen 1004)
+ (!srcfileloc "function.h" 131)
+ nil )
+ (!pair "ht_cb"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/symtab.h" 92)
+ nil )
+ (!pair "hashnode"
+ (!type already_seen 16)
+ (!srcfileloc "../libcpp/include/symtab.h" 44)
+ nil )
+ (!pair "cpp_hash_table"
+ (!type already_seen 996)
+ (!srcfileloc "../libcpp/include/symtab.h" 43)
+ nil )
+ (!pair "ht_identifier_ptr"
+ (!type already_seen 16)
+ (!srcfileloc "../libcpp/include/symtab.h" 30)
+ nil )
+ (!pair "ht_identifier"
+ (!type already_seen 15)
+ (!srcfileloc "../libcpp/include/symtab.h" 29)
+ nil )
+ (!pair "libfunc_hasher"
+ (!type already_seen 611)
+ (!srcfileloc "libfuncs.h" 61)
+ nil )
+ (!pair "hash_table<libfunc_hasher>"
+ (!type already_seen 610)
+ (!srcfileloc "libfuncs.h" 61)
+ nil )
+ (!pair "builtin_info_type"
+ (!type already_seen 989)
+ (!srcfileloc "tree-core.h" 2375)
+ nil )
+ (!pair "alias_pair"
+ (!type already_seen 977)
+ (!srcfileloc "tree-core.h" 2339)
+ nil )
+ (!pair "vec<alias_pair,va_gc>"
+ (!type already_seen 993)
+ (!srcfileloc "tree-core.h" 2339)
+ nil )
+ (!pair "record_layout_info"
+ (!type already_seen 980)
+ (!srcfileloc "tree-core.h" 2204)
+ nil )
+ (!pair "vec<tree,va_gc>"
+ (!type already_seen 86)
+ (!srcfileloc "tree-core.h" 1670)
+ nil )
+ (!pair "constructor_elt"
+ (!type already_seen 572)
+ (!srcfileloc "tree-core.h" 1528)
+ nil )
+ (!pair "vec<constructor_elt,va_gc>"
+ (!type already_seen 571)
+ (!srcfileloc "tree-core.h" 1528)
+ nil )
+ (!pair "walk_tree_lh"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1007)
+ nil )
+ (!pair "walk_tree_fn"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1003)
+ nil )
+ (!pair "priority_type"
+ (!type already_seen 2)
+ (!srcfileloc "tree-core.h" 1000)
+ nil )
+ (!pair "poly_int_traits"
+ (!type already_seen 973)
+ (!srcfileloc "tree.h" 6413)
+ nil )
+ (!pair "typename"
+ (!type already_seen 973)
+ (!srcfileloc "tree.h" 6413)
+ nil )
+ (!pair "widest2_int_cst"
+ (!type already_seen 970)
+ (!srcfileloc "tree.h" 6163)
+ nil )
+ (!pair "generic_wide_int<wi::extended_tree<WIDE_INT_MAX_PRECISION*2>>"
+ (!type already_seen 970)
+ (!srcfileloc "tree.h" 6162)
+ nil )
+ (!pair "unextended"
+ (!type already_seen 969)
+ (!srcfileloc "tree.h" 6155)
+ nil )
+ (!pair "generic_wide_int<unextended_tree>"
+ (!type already_seen 969)
+ (!srcfileloc "tree.h" 6155)
+ nil )
+ (!pair "extended"
+ (!type already_seen 966)
+ (!srcfileloc "tree.h" 6148)
+ nil )
+ (!pair "N"
+ (!type already_seen 968)
+ (!srcfileloc "tree.h" 6148)
+ nil )
+ (!pair "extended_tree<N"
+ (!type already_seen 967)
+ (!srcfileloc "tree.h" 6148)
+ nil )
+ (!pair "generic_wide_int<extended_tree<N>>"
+ (!type already_seen 966)
+ (!srcfileloc "tree.h" 6148)
+ nil )
+ (!pair "tree_to_poly_wide_ref"
+ (!type already_seen 964)
+ (!srcfileloc "tree.h" 6139)
+ nil )
+ (!pair "unextended_tree"
+ (!type already_seen 953)
+ (!srcfileloc "tree.h" 6138)
+ nil )
+ (!pair "generic_wide_int<unextended_tree"
+ (!type already_seen 965)
+ (!srcfileloc "tree.h" 6138)
+ nil )
+ (!pair "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<unextended_tree>>"
+ (!type already_seen 964)
+ (!srcfileloc "tree.h" 6138)
+ nil )
+ (!pair "tree_to_poly_offset_ref"
+ (!type already_seen 962)
+ (!srcfileloc "tree.h" 6136)
+ nil )
+ (!pair "generic_wide_int<offset_extended_tree"
+ (!type already_seen 963)
+ (!srcfileloc "tree.h" 6135)
+ nil )
+ (!pair "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<offset_extended_tree>>"
+ (!type already_seen 962)
+ (!srcfileloc "tree.h" 6135)
+ nil )
+ (!pair "tree_to_poly_widest_ref"
+ (!type already_seen 960)
+ (!srcfileloc "tree.h" 6133)
+ nil )
+ (!pair "generic_wide_int<widest_extended_tree"
+ (!type already_seen 961)
+ (!srcfileloc "tree.h" 6132)
+ nil )
+ (!pair "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<widest_extended_tree>>"
+ (!type already_seen 960)
+ (!srcfileloc "tree.h" 6132)
+ nil )
+ (!pair "tree_to_wide_ref"
+ (!type already_seen 959)
+ (!srcfileloc "tree.h" 6124)
+ nil )
+ (!pair "generic_wide_int<wide_int_ref_storage<false,false>>"
+ (!type already_seen 959)
+ (!srcfileloc "tree.h" 6123)
+ nil )
+ (!pair "tree_to_offset_ref"
+ (!type already_seen 958)
+ (!srcfileloc "tree.h" 6122)
+ nil )
+ (!pair "generic_wide_int<offset_extended_tree>"
+ (!type already_seen 958)
+ (!srcfileloc "tree.h" 6122)
+ nil )
+ (!pair "tree_to_widest_ref"
+ (!type already_seen 957)
+ (!srcfileloc "tree.h" 6121)
+ nil )
+ (!pair "generic_wide_int<widest_extended_tree>"
+ (!type already_seen 957)
+ (!srcfileloc "tree.h" 6121)
+ nil )
+ (!pair "offset_extended_tree"
+ (!type already_seen 956)
+ (!srcfileloc "tree.h" 6119)
+ nil )
+ (!pair "extended_tree<ADDR_MAX_PRECISION>"
+ (!type already_seen 956)
+ (!srcfileloc "tree.h" 6119)
+ nil )
+ (!pair "widest_extended_tree"
+ (!type already_seen 955)
+ (!srcfileloc "tree.h" 6118)
+ nil )
+ (!pair "extended_tree<WIDE_INT_MAX_PRECISION>"
+ (!type already_seen 955)
+ (!srcfileloc "tree.h" 6118)
+ nil )
+ (!pair "decl_tree_map"
+ (!type already_seen 951)
+ (!srcfileloc "tree.h" 5785)
+ nil )
+ (!pair "decl_tree_traits"
+ (!type already_seen 950)
+ (!srcfileloc "tree.h" 5785)
+ nil )
+ (!pair "hash_map<tree,tree,decl_tree_traits>"
+ (!type already_seen 951)
+ (!srcfileloc "tree.h" 5785)
+ nil )
+ (!pair "type_tree_cache_map"
+ (!type already_seen 948)
+ (!srcfileloc "tree.h" 5780)
+ nil )
+ (!pair "type_tree_cache_traits"
+ (!type already_seen 947)
+ (!srcfileloc "tree.h" 5780)
+ nil )
+ (!pair "hash_map<tree,tree,type_tree_cache_traits>"
+ (!type already_seen 948)
+ (!srcfileloc "tree.h" 5780)
+ nil )
+ (!pair "decl_tree_cache_map"
+ (!type already_seen 945)
+ (!srcfileloc "tree.h" 5774)
+ nil )
+ (!pair "decl_tree_cache_traits"
+ (!type already_seen 944)
+ (!srcfileloc "tree.h" 5774)
+ nil )
+ (!pair "hash_map<tree,tree,decl_tree_cache_traits>"
+ (!type already_seen 945)
+ (!srcfileloc "tree.h" 5774)
+ nil )
+ (!pair "tree_cache_map"
+ (!type already_seen 943)
+ (!srcfileloc "tree.h" 5768)
+ nil )
+ (!pair "tree_cache_traits"
+ (!type already_seen 942)
+ (!srcfileloc "tree.h" 5768)
+ nil )
+ (!pair "hash_map<tree,tree,tree_cache_traits>"
+ (!type already_seen 943)
+ (!srcfileloc "tree.h" 5768)
+ nil )
+ (!pair "hash_rtx_callback_function"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 3709)
+ nil )
+ (!pair "rtx_equal_p_callback_function"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 3704)
+ nil )
+ (!pair "for_each_inc_dec_fn"
+ (!type already_seen 2)
+ (!srcfileloc "rtl.h" 3700)
+ nil )
+ (!pair "rtx_to_poly_wide_ref"
+ (!type already_seen 926)
+ (!srcfileloc "rtl.h" 2341)
+ nil )
+ (!pair "generic_wide_int<wide_int_ref_storage<false"
+ (!type already_seen 927)
+ (!srcfileloc "rtl.h" 2340)
+ nil )
+ (!pair "poly_int<NUM_POLY_INT_COEFFS,generic_wide_int<wide_int_ref_storage<false,false>>>"
+ (!type already_seen 926)
+ (!srcfileloc "rtl.h" 2340)
+ nil )
+ (!pair "rtx_mode_t"
+ (!type already_seen 925)
+ (!srcfileloc "rtl.h" 2252)
+ nil )
+ (!pair "std::pair<rtx,machine_mode>"
+ (!type already_seen 925)
+ (!srcfileloc "rtl.h" 2252)
+ nil )
+ (!pair "rtx_insn"
+ (!type already_seen 298)
+ (!srcfileloc "rtl.h" 496)
+ nil )
+ (!pair "rtx_insn_list"
+ (!type already_seen 753)
+ (!srcfileloc "rtl.h" 493)
+ nil )
+ (!pair "rtx_expr_list"
+ (!type already_seen 751)
+ (!srcfileloc "rtl.h" 464)
+ nil )
+ (!pair "NUM_POLY_INT_COEFFS"
+ (!type already_seen 914)
+ (!srcfileloc "rtl.h" 291)
+ nil )
+ (!pair "trailing_wide_ints<NUM_POLY_INT_COEFFS>"
+ (!type already_seen 913)
+ (!srcfileloc "rtl.h" 291)
+ nil )
+ (!pair "va_gc"
+ (!type already_seen 80)
+ (!srcfileloc "rtl.h" 267)
+ nil )
+ (!pair "vec<rtx,va_gc>"
+ (!type already_seen 222)
+ (!srcfileloc "rtl.h" 267)
+ nil )
+ (!pair "rtunion"
+ (!type already_seen 211)
+ (!srcfileloc "rtl.h" 237)
+ nil )
+ (!pair "reg_attrs"
+ (!type already_seen 240)
+ (!srcfileloc "rtl.h" 229)
+ nil )
+ (!pair "mem_attrs"
+ (!type already_seen 229)
+ (!srcfileloc "rtl.h" 153)
+ nil )
+ (!pair "trailing_wide_ints"
+ (!type already_seen 898)
+ (!srcfileloc "wide-int.h" 1417)
+ nil )
+ (!pair "trailing_wide_int"
+ (!type already_seen 897)
+ (!srcfileloc "wide-int.h" 1366)
+ nil )
+ (!pair "trailing_wide_int_storage"
+ (!type already_seen 896)
+ (!srcfileloc "wide-int.h" 1366)
+ nil )
+ (!pair "generic_wide_int<trailing_wide_int_storage>"
+ (!type already_seen 897)
+ (!srcfileloc "wide-int.h" 1366)
+ nil )
+ (!pair "fixed_wide_int_storage"
+ (!type already_seen 894)
+ (!srcfileloc "wide-int.h" 1221)
+ nil )
+ (!pair "ASSIGNMENT_OPERATOR"
+ (!type already_seen 892)
+ (!srcfileloc "wide-int.h" 754)
+ nil )
+ (!pair "generic_wide_int"
+ (!type already_seen 891)
+ (!srcfileloc "wide-int.h" 715)
+ nil )
+ (!pair "T1"
+ (!type already_seen 888)
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ (!pair "int_traits<T1"
+ (!type already_seen 887)
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ (!pair "fixed_wide_int_storage<int_traits<T1"
+ (!type already_seen 886)
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ (!pair "generic_wide_int<fixed_wide_int_storage<int_traits<T1>precision>>"
+ (!type already_seen 885)
+ (!srcfileloc "wide-int.h" 459)
+ nil )
+ (!pair "signed_predicate_result"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 442)
+ nil )
+ (!pair "signed_shift_result_type"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 441)
+ nil )
+ (!pair "precision"
+ (!type already_seen 881)
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ (!pair "T2"
+ (!type already_seen 884)
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ (!pair "int_traits<T2"
+ (!type already_seen 883)
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ (!pair "fixed_wide_int_storage<int_traits<T2"
+ (!type already_seen 882)
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ (!pair "generic_wide_int<fixed_wide_int_storage<int_traits<T2>precision>>"
+ (!type already_seen 880)
+ (!srcfileloc "wide-int.h" 438)
+ nil )
+ (!pair "predicate_result"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 429)
+ nil )
+ (!pair "operator_result"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 428)
+ nil )
+ (!pair "result_type"
+ (!type already_seen 2)
+ (!srcfileloc "wide-int.h" 420)
+ nil )
+ (!pair "wide_int_ref"
+ (!type already_seen 876)
+ (!srcfileloc "wide-int.h" 334)
+ nil )
+ (!pair "false"
+ (!type already_seen 878)
+ (!srcfileloc "wide-int.h" 334)
+ nil )
+ (!pair "wide_int_ref_storage<false"
+ (!type already_seen 877)
+ (!srcfileloc "wide-int.h" 334)
+ nil )
+ (!pair "generic_wide_int<wide_int_ref_storage<false>>"
+ (!type already_seen 876)
+ (!srcfileloc "wide-int.h" 334)
+ nil )
+ (!pair "widest2_int"
+ (!type already_seen 872)
+ (!srcfileloc "wide-int.h" 327)
+ nil )
+ (!pair "generic_wide_int<fixed_wide_int_storage<WIDE_INT_MAX_PRECISION*2>>"
+ (!type already_seen 872)
+ (!srcfileloc "wide-int.h" 327)
+ nil )
+ (!pair "WIDE_INT_MAX_PRECISION"
+ (!type already_seen 871)
+ (!srcfileloc "wide-int.h" 324)
+ nil )
+ (!pair "ADDR_MAX_PRECISION"
+ (!type already_seen 871)
+ (!srcfileloc "wide-int.h" 323)
+ nil )
+ (!pair "FIXED_WIDE_INT"
+ (!type already_seen 871)
+ (!srcfileloc "wide-int.h" 323)
+ nil )
+ (!pair "wide_int"
+ (!type already_seen 496)
+ (!srcfileloc "wide-int.h" 322)
+ nil )
+ (!pair "wide_int_storage"
+ (!type already_seen 497)
+ (!srcfileloc "wide-int.h" 322)
+ nil )
+ (!pair "generic_wide_int<wide_int_storage>"
+ (!type already_seen 496)
+ (!srcfileloc "wide-int.h" 322)
+ nil )
+ (!pair "bitmap_obstack"
+ (!type already_seen 393)
+ (!srcfileloc "bitmap.h" 349)
+ nil )
+ (!pair "bitmap_element"
+ (!type already_seen 390)
+ (!srcfileloc "bitmap.h" 345)
+ nil )
+ (!pair "BITMAP_WORD"
+ (!type already_seen 2)
+ (!srcfileloc "bitmap.h" 276)
+ nil )
+ (!pair "splay_tree"
+ (!type already_seen 866)
+ (!srcfileloc "../include/splay-tree.h" 127)
+ nil )
+ (!pair "splay_tree_deallocate_fn"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 88)
+ nil )
+ (!pair "splay_tree_allocate_fn"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 82)
+ nil )
+ (!pair "splay_tree_foreach_fn"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 76)
+ nil )
+ (!pair "splay_tree_delete_value_fn"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 73)
+ nil )
+ (!pair "splay_tree_delete_key_fn"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 67)
+ nil )
+ (!pair "splay_tree_compare_fn"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 58)
+ nil )
+ (!pair "splay_tree_node"
+ (!type already_seen 864)
+ (!srcfileloc "../include/splay-tree.h" 54)
+ nil )
+ (!pair "splay_tree_value"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 51)
+ nil )
+ (!pair "splay_tree_key"
+ (!type already_seen 2)
+ (!srcfileloc "../include/splay-tree.h" 50)
+ nil )
+ (!pair "htab_t"
+ (!type already_seen 333)
+ (!srcfileloc "../include/hashtab.h" 139)
+ nil )
+ (!pair "htab_free_with_arg"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 78)
+ nil )
+ (!pair "htab_alloc_with_arg"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 77)
+ nil )
+ (!pair "htab_free"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 73)
+ nil )
+ (!pair "htab_alloc"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 70)
+ nil )
+ (!pair "htab_trav"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 64)
+ nil )
+ (!pair "htab_del"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 58)
+ nil )
+ (!pair "htab_eq"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 54)
+ nil )
+ (!pair "htab_hash"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 47)
+ nil )
+ (!pair "hashval_t"
+ (!type already_seen 2)
+ (!srcfileloc "../include/hashtab.h" 42)
+ nil )
+ (!pair "target_unit"
+ (!type already_seen 862)
+ (!srcfileloc "defaults.h" 1461)
+ nil )
+ (!pair "TARGET_UNIT"
+ (!type already_seen 862)
+ (!srcfileloc "defaults.h" 1461)
+ nil )
+ (!pair "machine_function"
+ (!type already_seen 422)
+ (!srcfileloc "config/arm/arm.h" 1642)
+ nil )
+ (!pair "arm_stack_offsets"
+ (!type already_seen 423)
+ (!srcfileloc "config/arm/arm.h" 1593)
+ nil )
+ (!pair "arm_cc"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.h" 60)
+ nil )
+ (!pair "uchar"
+ (!type already_seen 8)
+ (!srcfileloc "coretypes.h" 453)
+ nil )
+ (!pair "gt_pointer_operator"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 450)
+ nil )
+ (!pair "complex_mode"
+ (!type already_seen 744)
+ (!srcfileloc "coretypes.h" 390)
+ nil )
+ (!pair "string_int_pair"
+ (!type already_seen 853)
+ (!srcfileloc "coretypes.h" 363)
+ nil )
+ (!pair "int"
+ (!type already_seen 374)
+ (!srcfileloc "coretypes.h" 363)
+ nil )
+ (!pair "std::pair<char*,int>"
+ (!type already_seen 853)
+ (!srcfileloc "coretypes.h" 363)
+ nil )
+ (!pair "tree_pair"
+ (!type already_seen 852)
+ (!srcfileloc "coretypes.h" 362)
+ nil )
+ (!pair "std::pair<tree,tree>"
+ (!type already_seen 852)
+ (!srcfileloc "coretypes.h" 362)
+ nil )
+ (!pair "reg_class_t"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 354)
+ nil )
+ (!pair "const_basic_block"
+ (!type already_seen 269)
+ (!srcfileloc "coretypes.h" 334)
+ nil )
+ (!pair "basic_block"
+ (!type already_seen 269)
+ (!srcfileloc "coretypes.h" 333)
+ nil )
+ (!pair "const_edge"
+ (!type already_seen 273)
+ (!srcfileloc "coretypes.h" 331)
+ nil )
+ (!pair "edge"
+ (!type already_seen 273)
+ (!srcfileloc "coretypes.h" 330)
+ nil )
+ (!pair "alias_set_type"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 327)
+ nil )
+ (!pair "addr_space_t"
+ (!type already_seen 8)
+ (!srcfileloc "coretypes.h" 168)
+ nil )
+ (!pair "diagnostic_input_charset_callback"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 157)
+ nil )
+ (!pair "section"
+ (!type already_seen 215)
+ (!srcfileloc "coretypes.h" 147)
+ nil )
+ (!pair "gimple_seq"
+ (!type already_seen 282)
+ (!srcfileloc "coretypes.h" 100)
+ nil )
+ (!pair "gimple"
+ (!type already_seen 283)
+ (!srcfileloc "coretypes.h" 100)
+ nil )
+ (!pair "const_tree"
+ (!type already_seen 23)
+ (!srcfileloc "coretypes.h" 98)
+ nil )
+ (!pair "tree"
+ (!type already_seen 23)
+ (!srcfileloc "coretypes.h" 97)
+ nil )
+ (!pair "const_hwivec"
+ (!type already_seen 765)
+ (!srcfileloc "coretypes.h" 95)
+ nil )
+ (!pair "hwivec"
+ (!type already_seen 765)
+ (!srcfileloc "coretypes.h" 94)
+ nil )
+ (!pair "const_rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "coretypes.h" 92)
+ nil )
+ (!pair "rtvec"
+ (!type already_seen 231)
+ (!srcfileloc "coretypes.h" 91)
+ nil )
+ (!pair "fixed_size_mode_pod"
+ (!type already_seen 750)
+ (!srcfileloc "coretypes.h" 71)
+ nil )
+ (!pair "pod_mode<fixed_size_mode>"
+ (!type already_seen 750)
+ (!srcfileloc "coretypes.h" 71)
+ nil )
+ (!pair "scalar_int_mode_pod"
+ (!type already_seen 749)
+ (!srcfileloc "coretypes.h" 70)
+ nil )
+ (!pair "pod_mode<scalar_int_mode>"
+ (!type already_seen 749)
+ (!srcfileloc "coretypes.h" 70)
+ nil )
+ (!pair "scalar_mode_pod"
+ (!type already_seen 43)
+ (!srcfileloc "coretypes.h" 69)
+ nil )
+ (!pair "pod_mode<scalar_mode>"
+ (!type already_seen 43)
+ (!srcfileloc "coretypes.h" 69)
+ nil )
+ (!pair "opt_scalar_float_mode"
+ (!type already_seen 748)
+ (!srcfileloc "coretypes.h" 67)
+ nil )
+ (!pair "scalar_float_mode"
+ (!type already_seen 743)
+ (!srcfileloc "coretypes.h" 67)
+ nil )
+ (!pair "opt_mode<scalar_float_mode>"
+ (!type already_seen 748)
+ (!srcfileloc "coretypes.h" 67)
+ nil )
+ (!pair "opt_scalar_int_mode"
+ (!type already_seen 747)
+ (!srcfileloc "coretypes.h" 66)
+ nil )
+ (!pair "scalar_int_mode"
+ (!type already_seen 290)
+ (!srcfileloc "coretypes.h" 66)
+ nil )
+ (!pair "opt_mode<scalar_int_mode>"
+ (!type already_seen 747)
+ (!srcfileloc "coretypes.h" 66)
+ nil )
+ (!pair "opt_scalar_mode"
+ (!type already_seen 746)
+ (!srcfileloc "coretypes.h" 65)
+ nil )
+ (!pair "scalar_mode"
+ (!type already_seen 44)
+ (!srcfileloc "coretypes.h" 65)
+ nil )
+ (!pair "opt_mode<scalar_mode>"
+ (!type already_seen 746)
+ (!srcfileloc "coretypes.h" 65)
+ nil )
+ (!pair "const_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "coretypes.h" 58)
+ nil )
+ (!pair "rtx"
+ (!type already_seen 100)
+ (!srcfileloc "coretypes.h" 57)
+ nil )
+ (!pair "const_sbitmap"
+ (!type already_seen 742)
+ (!srcfileloc "coretypes.h" 55)
+ nil )
+ (!pair "sbitmap"
+ (!type already_seen 742)
+ (!srcfileloc "coretypes.h" 54)
+ nil )
+ (!pair "const_bitmap"
+ (!type already_seen 387)
+ (!srcfileloc "coretypes.h" 52)
+ nil )
+ (!pair "bitmap"
+ (!type already_seen 387)
+ (!srcfileloc "coretypes.h" 51)
+ nil )
+ (!pair "gcov_type_unsigned"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 47)
+ nil )
+ (!pair "gcov_type"
+ (!type already_seen 2)
+ (!srcfileloc "coretypes.h" 46)
+ nil )
+ (!pair "location_hash"
+ (!type already_seen 740)
+ (!srcfileloc "input.h" 289)
+ nil )
+ (!pair "hash_map<location_hash,string_concat*>"
+ (!type already_seen 739)
+ (!srcfileloc "input.h" 289)
+ nil )
+ (!pair "string_concat_db"
+ (!type already_seen 736)
+ (!srcfileloc "input.h" 270)
+ nil )
+ (!pair "string_concat"
+ (!type already_seen 734)
+ (!srcfileloc "input.h" 261)
+ nil )
+ (!pair "cpp_cb"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 1427)
+ nil )
+ (!pair "cpp_comment_table"
+ (!type already_seen 725)
+ (!srcfileloc "../libcpp/include/cpplib.h" 1414)
+ nil )
+ (!pair "cpp_comment"
+ (!type already_seen 724)
+ (!srcfileloc "../libcpp/include/cpplib.h" 1400)
+ nil )
+ (!pair "cpp_num"
+ (!type already_seen 723)
+ (!srcfileloc "../libcpp/include/cpplib.h" 1236)
+ nil )
+ (!pair "cpp_num_part"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 1235)
+ nil )
+ (!pair "missing_header_cb"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 703)
+ nil )
+ (!pair "cppchar_signed_t"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 300)
+ nil )
+ (!pair "cppchar_t"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/cpplib.h" 299)
+ nil )
+ (!pair "cpp_dir"
+ (!type already_seen 719)
+ (!srcfileloc "../libcpp/include/cpplib.h" 37)
+ nil )
+ (!pair "cpp_callbacks"
+ (!type already_seen 718)
+ (!srcfileloc "../libcpp/include/cpplib.h" 36)
+ nil )
+ (!pair "cpp_macro"
+ (!type already_seen 20)
+ (!srcfileloc "../libcpp/include/cpplib.h" 35)
+ nil )
+ (!pair "cpp_hashnode"
+ (!type already_seen 12)
+ (!srcfileloc "../libcpp/include/cpplib.h" 34)
+ nil )
+ (!pair "cpp_string"
+ (!type already_seen 695)
+ (!srcfileloc "../libcpp/include/cpplib.h" 33)
+ nil )
+ (!pair "cpp_token"
+ (!type already_seen 691)
+ (!srcfileloc "../libcpp/include/cpplib.h" 32)
+ nil )
+ (!pair "cpp_options"
+ (!type already_seen 717)
+ (!srcfileloc "../libcpp/include/cpplib.h" 31)
+ nil )
+ (!pair "cpp_buffer"
+ (!type already_seen 716)
+ (!srcfileloc "../libcpp/include/cpplib.h" 30)
+ nil )
+ (!pair "cpp_reader"
+ (!type already_seen 715)
+ (!srcfileloc "../libcpp/include/cpplib.h" 29)
+ nil )
+ (!pair "expanded_location"
+ (!type already_seen 706)
+ (!srcfileloc "../libcpp/include/line-map.h" 1319)
+ nil )
+ (!pair "maps_info_macro"
+ (!type already_seen 700)
+ (!srcfileloc "../libcpp/include/line-map.h" 790)
+ nil )
+ (!pair "maps_info_ordinary"
+ (!type already_seen 699)
+ (!srcfileloc "../libcpp/include/line-map.h" 788)
+ nil )
+ (!pair "source_range"
+ (!type already_seen 1)
+ (!srcfileloc "../libcpp/include/line-map.h" 758)
+ nil )
+ (!pair "line_map_macro"
+ (!type already_seen 697)
+ (!srcfileloc "../libcpp/include/line-map.h" 742)
+ nil )
+ (!pair "line_map_ordinary"
+ (!type already_seen 6)
+ (!srcfileloc "../libcpp/include/line-map.h" 727)
+ nil )
+ (!pair "line_map_round_alloc_size_func"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 356)
+ nil )
+ (!pair "line_map_realloc"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 352)
+ nil )
+ (!pair "location_t"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 294)
+ nil )
+ (!pair "linenum_arith_t"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 55)
+ nil )
+ (!pair "linenum_type"
+ (!type already_seen 2)
+ (!srcfileloc "../libcpp/include/line-map.h" 52)
+ nil )
+ (!pair "void *"
+ (!type already_seen 3)
+ (!srcfileloc "gengtype.cc" 5250)
+ nil )
+ (!pair "CONSTEXPR"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5247)
+ nil )
+ (!pair "fixed_size_mode"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5246)
+ nil )
+ (!pair "machine_mode"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5245)
+ nil )
+ (!pair "void"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5244)
+ nil )
+ (!pair "JCF_u2"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5243)
+ nil )
+ (!pair "jword"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5242)
+ nil )
+ (!pair "uintptr_t"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5241)
+ nil )
+ (!pair "uint8"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5240)
+ nil )
+ (!pair "uint32_t"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5239)
+ nil )
+ (!pair "uint64_t"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5238)
+ nil )
+ (!pair "poly_uint64"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5237)
+ nil )
+ (!pair "poly_int64"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5236)
+ nil )
+ (!pair "int64_t"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5235)
+ nil )
+ (!pair "widest_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5234)
+ nil )
+ (!pair "offset_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5233)
+ nil )
+ (!pair "poly_int64_pod"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5232)
+ nil )
+ (!pair "double_int"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5231)
+ nil )
+ (!pair "FIXED_VALUE_TYPE"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5230)
+ nil )
+ (!pair "REAL_VALUE_TYPE"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5229)
+ nil )
+ (!pair "CUMULATIVE_ARGS"
+ (!type already_seen 2)
+ (!srcfileloc "gengtype.cc" 5228)
+ nil )
+)
+(!variables 832
+ (!pair "rust_gc_root"
+ (!type already_seen 23)
+ (!srcfileloc "rust/rust-lang.cc" 385)
+ nil )
+ (!pair "objc_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3840)
+ nil )
+ (!pair "next_v2_EHTYPE_id_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3630)
+ nil )
+ (!pair "next_v2_ehvtable_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3629)
+ nil )
+ (!pair "objc_v2_ehtype_template"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3628)
+ nil )
+ (!pair "ehtype_list"
+ (!type already_seen 2360)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 3287)
+ nil )
+ (!pair "ivar_offset_refs"
+ (!type already_seen 2369)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2997)
+ nil )
+ (!pair "protlist"
+ (!type already_seen 2366)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2277)
+ nil )
+ (!pair "nonlazy_category_list"
+ (!type already_seen 85)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2189)
+ nil )
+ (!pair "category_list"
+ (!type already_seen 85)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2178)
+ nil )
+ (!pair "nonlazy_class_list"
+ (!type already_seen 85)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2167)
+ nil )
+ (!pair "class_list"
+ (!type already_seen 85)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 2159)
+ nil )
+ (!pair "metaclass_super_refs"
+ (!type already_seen 2360)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1485)
+ nil )
+ (!pair "class_super_refs"
+ (!type already_seen 2360)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1484)
+ nil )
+ (!pair "protrefs"
+ (!type already_seen 2366)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1313)
+ nil )
+ (!pair "msgrefs"
+ (!type already_seen 2363)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1259)
+ nil )
+ (!pair "classrefs"
+ (!type already_seen 2360)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 1092)
+ nil )
+ (!pair "extern_names"
+ (!type already_seen 2347)
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 235)
+ (!options
+ (!option length string "SIZEHASHTABLE")
+ )
+ )
+ (!pair "objc_v2_global_trees"
+ (!type array 2372 nil gc_used "OCTI_V2_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "objc/objc-next-runtime-abi-02.cc" 185)
+ nil )
+ (!pair "objc_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 2788)
+ nil )
+ (!pair "V1_Property_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1518)
+ nil )
+ (!pair "V1_ProtocolExt_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1517)
+ nil )
+ (!pair "V1_Protocol_OPT_CLS_METHODS_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1516)
+ nil )
+ (!pair "V1_Protocol_OPT_NST_METHODS_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1515)
+ nil )
+ (!pair "objc_class_ext_template"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1110)
+ nil )
+ (!pair "objc_protocol_extension_template"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1076)
+ nil )
+ (!pair "objc_v1_property_template"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 1046)
+ nil )
+ (!pair "class_reference_idx"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-next-runtime-abi-01.cc" 678)
+ nil )
+ (!pair "objc_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 2129)
+ nil )
+ (!pair "num_static_inst"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 870)
+ nil )
+ (!pair "meta_base"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 193)
+ nil )
+ (!pair "objc_meta"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-gnu-runtime-abi-01.cc" 192)
+ nil )
+ (!pair "property_name_attr_idx"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-runtime-shared-support.cc" 290)
+ nil )
+ (!pair "meth_var_types_idx"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-runtime-shared-support.cc" 289)
+ nil )
+ (!pair "meth_var_names_idx"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-runtime-shared-support.cc" 288)
+ nil )
+ (!pair "objc_rt_trees"
+ (!type array 2373 nil gc_used "OCTI_RT_META_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "objc/objc-runtime-shared-support.cc" 51)
+ nil )
+ (!pair "objc_parmlist"
+ (!type already_seen 23)
+ (!srcfileloc "objc/objc-act.cc" 8434)
+ nil )
+ (!pair "interface_map"
+ (!type already_seen 2344)
+ (!srcfileloc "objc/objc-act.cc" 3940)
+ nil )
+ (!pair "string_layout_checked"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-act.cc" 3226)
+ nil )
+ (!pair "string_htab"
+ (!type already_seen 2356)
+ (!srcfileloc "objc/objc-act.cc" 258)
+ nil )
+ (!pair "alias_name_map"
+ (!type already_seen 2344)
+ (!srcfileloc "objc/objc-act.cc" 163)
+ nil )
+ (!pair "class_name_map"
+ (!type already_seen 2344)
+ (!srcfileloc "objc/objc-act.cc" 162)
+ nil )
+ (!pair "class_method_map"
+ (!type already_seen 2344)
+ (!srcfileloc "objc/objc-act.cc" 158)
+ nil )
+ (!pair "instance_method_map"
+ (!type already_seen 2344)
+ (!srcfileloc "objc/objc-act.cc" 157)
+ nil )
+ (!pair "objc_global_trees"
+ (!type array 2374 nil gc_used "OCTI_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "objc/objc-act.h" 438)
+ nil )
+ (!pair "objc_ivar_visibility"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-act.h" 309)
+ nil )
+ (!pair "cat_count"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-act.h" 307)
+ nil )
+ (!pair "imp_count"
+ (!type already_seen 2)
+ (!srcfileloc "objc/objc-act.h" 306)
+ nil )
+ (!pair "imp_list"
+ (!type already_seen 2351)
+ (!srcfileloc "objc/objc-act.h" 305)
+ nil )
+ (!pair "local_variables_to_volatilize"
+ (!type already_seen 85)
+ (!srcfileloc "objc/objc-act.h" 292)
+ nil )
+ (!pair "m2_cardinal_address_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 120)
+ nil )
+ (!pair "m2_packed_boolean_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 119)
+ nil )
+ (!pair "m2_complex128_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 118)
+ nil )
+ (!pair "m2_complex96_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 117)
+ nil )
+ (!pair "m2_complex64_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 116)
+ nil )
+ (!pair "m2_complex32_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 115)
+ nil )
+ (!pair "m2_c_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 114)
+ nil )
+ (!pair "m2_short_complex_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 113)
+ nil )
+ (!pair "m2_long_complex_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 112)
+ nil )
+ (!pair "m2_complex_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 111)
+ nil )
+ (!pair "m2_real128_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 110)
+ nil )
+ (!pair "m2_real96_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 109)
+ nil )
+ (!pair "m2_real64_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 108)
+ nil )
+ (!pair "m2_real32_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 107)
+ nil )
+ (!pair "m2_bitset32_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 106)
+ nil )
+ (!pair "m2_bitset16_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 105)
+ nil )
+ (!pair "m2_bitset8_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 104)
+ nil )
+ (!pair "m2_word64_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 103)
+ nil )
+ (!pair "m2_word32_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 102)
+ nil )
+ (!pair "m2_word16_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 101)
+ nil )
+ (!pair "m2_cardinal64_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 100)
+ nil )
+ (!pair "m2_cardinal32_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 99)
+ nil )
+ (!pair "m2_cardinal16_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 98)
+ nil )
+ (!pair "m2_cardinal8_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 97)
+ nil )
+ (!pair "m2_integer64_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 96)
+ nil )
+ (!pair "m2_integer32_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 95)
+ nil )
+ (!pair "m2_integer16_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 94)
+ nil )
+ (!pair "m2_integer8_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 93)
+ nil )
+ (!pair "m2_iso_word_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 92)
+ nil )
+ (!pair "m2_iso_byte_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 91)
+ nil )
+ (!pair "m2_iso_loc_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 90)
+ nil )
+ (!pair "m2_z_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 89)
+ nil )
+ (!pair "m2_short_card_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 88)
+ nil )
+ (!pair "m2_short_int_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 87)
+ nil )
+ (!pair "m2_long_card_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 86)
+ nil )
+ (!pair "m2_long_int_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 85)
+ nil )
+ (!pair "m2_long_real_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 84)
+ nil )
+ (!pair "m2_real_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 83)
+ nil )
+ (!pair "m2_short_real_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 82)
+ nil )
+ (!pair "m2_cardinal_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 81)
+ nil )
+ (!pair "m2_integer_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 80)
+ nil )
+ (!pair "m2_char_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 79)
+ nil )
+ (!pair "bitnum_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 78)
+ nil )
+ (!pair "bitset_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 77)
+ nil )
+ (!pair "proc_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 76)
+ nil )
+ (!pair "param_type_list"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 74)
+ nil )
+ (!pair "list_of_arrays"
+ (!type already_seen 2342)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 72)
+ nil )
+ (!pair "top_constructor"
+ (!type already_seen 2340)
+ (!srcfileloc "m2/gm2-gcc/m2type.cc" 62)
+ nil )
+ (!pair "last_function"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2statement.cc" 42)
+ nil )
+ (!pair "param_list"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2statement.cc" 40)
+ nil )
+ (!pair "set_full_complement"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2expr.cc" 58)
+ nil )
+ (!pair "gm2_eh_int_type"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 77)
+ nil )
+ (!pair "fn_free_exception_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 76)
+ nil )
+ (!pair "fn_allocate_exception_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 75)
+ nil )
+ (!pair "cleanup_type"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 74)
+ nil )
+ (!pair "fn_rethrow_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 73)
+ nil )
+ (!pair "fn_throw_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 72)
+ nil )
+ (!pair "fn_end_catch_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 71)
+ nil )
+ (!pair "fn_begin_catch_tree"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2except.cc" 70)
+ nil )
+ (!pair "param_list"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2decl.cc" 41)
+ nil )
+ (!pair "param_type_list"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2decl.cc" 40)
+ nil )
+ (!pair "current_function_decl"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2decl.cc" 37)
+ nil )
+ (!pair "builtin_ftype_int_var"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 406)
+ nil )
+ (!pair "floatptr_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 405)
+ nil )
+ (!pair "doubleptr_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 404)
+ nil )
+ (!pair "long_doubleptr_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 403)
+ nil )
+ (!pair "gm2_huge_vall_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 402)
+ nil )
+ (!pair "gm2_huge_val_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 401)
+ nil )
+ (!pair "gm2_huge_valf_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 400)
+ nil )
+ (!pair "gm2_isfinite_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 399)
+ nil )
+ (!pair "gm2_memcpy_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 398)
+ nil )
+ (!pair "gm2_alloca_node"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 397)
+ nil )
+ (!pair "ldouble_ftype_ldouble"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 396)
+ nil )
+ (!pair "double_ftype_double"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 395)
+ nil )
+ (!pair "float_ftype_float"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 394)
+ nil )
+ (!pair "ldouble_ftype_void"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 393)
+ nil )
+ (!pair "float_ftype_void"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 392)
+ nil )
+ (!pair "double_ftype_void"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 391)
+ nil )
+ (!pair "const_ptr_endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 390)
+ nil )
+ (!pair "ptr_endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 389)
+ nil )
+ (!pair "int_endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 388)
+ nil )
+ (!pair "math_endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 387)
+ nil )
+ (!pair "endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 386)
+ nil )
+ (!pair "unsigned_endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 385)
+ nil )
+ (!pair "sizetype_endlink"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-gcc/m2builtins.cc" 384)
+ nil )
+ (!pair "head_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 106)
+ nil )
+ (!pair "global_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 103)
+ nil )
+ (!pair "current_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "m2/gm2-gcc/m2block.cc" 97)
+ nil )
+ (!pair "rtegraph_current_function"
+ (!type already_seen 2334)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 77)
+ nil )
+ (!pair "constructors"
+ (!type already_seen 2335)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 69)
+ nil )
+ (!pair "externs"
+ (!type already_seen 2335)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 68)
+ nil )
+ (!pair "candidates"
+ (!type already_seen 2335)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 67)
+ nil )
+ (!pair "allnodes"
+ (!type already_seen 2335)
+ (!srcfileloc "m2/gm2-gcc/rtegraph.cc" 66)
+ nil )
+ (!pair "gm2_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "m2/gm2-lang.cc" 1011)
+ nil )
+ (!pair "real_file_decl_data"
+ (!type already_seen 323)
+ (!srcfileloc "lto/lto-common.cc" 2714)
+ (!options
+ (!option length string "real_file_count + 1")
+ )
+ )
+ (!pair "all_file_decl_data"
+ (!type already_seen 323)
+ (!srcfileloc "lto/lto-common.cc" 2681)
+ (!options
+ (!option length string "lto_stats.num_input_files + 1")
+ )
+ )
+ (!pair "types_to_register"
+ (!type already_seen 85)
+ (!srcfileloc "lto/lto-common.cc" 228)
+ nil )
+ (!pair "tree_with_vars"
+ (!type already_seen 85)
+ (!srcfileloc "lto/lto-common.h" 28)
+ nil )
+ (!pair "registered_builtin_types"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 790)
+ nil )
+ (!pair "signed_size_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 244)
+ nil )
+ (!pair "uintmax_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 243)
+ nil )
+ (!pair "intmax_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 242)
+ nil )
+ (!pair "wint_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 241)
+ nil )
+ (!pair "const_string_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 240)
+ nil )
+ (!pair "string_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "lto/lto-lang.cc" 239)
+ nil )
+ (!pair "builtin_types"
+ (!type array 2375 nil gc_used "(int) BT_LAST + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "lto/lto-lang.cc" 237)
+ nil )
+ (!pair "built_in_attributes"
+ (!type array 2376 nil gc_used "(int) ATTR_LAST"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "lto/lto-lang.cc" 172)
+ nil )
+ (!pair "go_non_zero_struct"
+ (!type already_seen 23)
+ (!srcfileloc "go/go-c.h" 81)
+ nil )
+ (!pair "go_gc_root"
+ (!type already_seen 23)
+ (!srcfileloc "go/go-lang.cc" 592)
+ nil )
+ (!pair "gfc_rank_cst"
+ (!type array 2377 nil gc_used "GFC_MAX_DIMENSIONS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-const.h" 65)
+ nil )
+ (!pair "gfor_fndecl_caf_random_init"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 976)
+ nil )
+ (!pair "gfor_fndecl_random_init"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 975)
+ nil )
+ (!pair "gfor_fndecl_ieee_procedure_exit"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 972)
+ nil )
+ (!pair "gfor_fndecl_ieee_procedure_entry"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 971)
+ nil )
+ (!pair "gfor_fndecl_sr_kind"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 968)
+ nil )
+ (!pair "gfor_fndecl_si_kind"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 967)
+ nil )
+ (!pair "gfor_fndecl_sc_kind"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 966)
+ nil )
+ (!pair "gfor_fndecl_is_contiguous0"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 963)
+ nil )
+ (!pair "gfor_fndecl_kill_sub"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 962)
+ nil )
+ (!pair "gfor_fndecl_kill"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 961)
+ nil )
+ (!pair "gfor_fndecl_iargc"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 960)
+ nil )
+ (!pair "gfor_fndecl_convert_char4_to_char1"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 957)
+ nil )
+ (!pair "gfor_fndecl_convert_char1_to_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 956)
+ nil )
+ (!pair "gfor_fndecl_select_string_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 953)
+ nil )
+ (!pair "gfor_fndecl_adjustr_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 952)
+ nil )
+ (!pair "gfor_fndecl_adjustl_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 951)
+ nil )
+ (!pair "gfor_fndecl_string_minmax_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 950)
+ nil )
+ (!pair "gfor_fndecl_string_trim_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 949)
+ nil )
+ (!pair "gfor_fndecl_string_verify_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 948)
+ nil )
+ (!pair "gfor_fndecl_string_scan_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 947)
+ nil )
+ (!pair "gfor_fndecl_string_index_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 946)
+ nil )
+ (!pair "gfor_fndecl_string_len_trim_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 945)
+ nil )
+ (!pair "gfor_fndecl_concat_string_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 944)
+ nil )
+ (!pair "gfor_fndecl_compare_string_char4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 943)
+ nil )
+ (!pair "gfor_fndecl_select_string"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 942)
+ nil )
+ (!pair "gfor_fndecl_adjustr"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 941)
+ nil )
+ (!pair "gfor_fndecl_adjustl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 940)
+ nil )
+ (!pair "gfor_fndecl_string_minmax"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 939)
+ nil )
+ (!pair "gfor_fndecl_string_trim"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 938)
+ nil )
+ (!pair "gfor_fndecl_string_verify"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 937)
+ nil )
+ (!pair "gfor_fndecl_string_scan"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 936)
+ nil )
+ (!pair "gfor_fndecl_string_index"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 935)
+ nil )
+ (!pair "gfor_fndecl_string_len_trim"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 934)
+ nil )
+ (!pair "gfor_fndecl_concat_string"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 933)
+ nil )
+ (!pair "gfor_fndecl_compare_string"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 932)
+ nil )
+ (!pair "gfor_fndecl_zgemm"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 929)
+ nil )
+ (!pair "gfor_fndecl_cgemm"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 928)
+ nil )
+ (!pair "gfor_fndecl_dgemm"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 927)
+ nil )
+ (!pair "gfor_fndecl_sgemm"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 926)
+ nil )
+ (!pair "gfor_fndecl_math_ishftc16"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 923)
+ nil )
+ (!pair "gfor_fndecl_math_ishftc8"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 922)
+ nil )
+ (!pair "gfor_fndecl_math_ishftc4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 921)
+ nil )
+ (!pair "gfor_fndecl_math_powi"
+ (!type array 2378 nil gc_used "4"
+ (!type array 2379 nil gc_used "3"
+ (!type already_seen 2313)
+ )
+ )
+ (!srcfileloc "fortran/trans.h" 920)
+ nil )
+ (!pair "gfor_fndecl_caf_is_present"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 908)
+ nil )
+ (!pair "gfor_fndecl_co_sum"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 907)
+ nil )
+ (!pair "gfor_fndecl_co_reduce"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 906)
+ nil )
+ (!pair "gfor_fndecl_co_min"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 905)
+ nil )
+ (!pair "gfor_fndecl_co_max"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 904)
+ nil )
+ (!pair "gfor_fndecl_co_broadcast"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 903)
+ nil )
+ (!pair "gfor_fndecl_caf_team_number"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 902)
+ nil )
+ (!pair "gfor_fndecl_caf_sync_team"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 901)
+ nil )
+ (!pair "gfor_fndecl_caf_get_team"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 900)
+ nil )
+ (!pair "gfor_fndecl_caf_end_team"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 899)
+ nil )
+ (!pair "gfor_fndecl_caf_change_team"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 898)
+ nil )
+ (!pair "gfor_fndecl_caf_form_team"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 897)
+ nil )
+ (!pair "gfor_fndecl_caf_stopped_images"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 896)
+ nil )
+ (!pair "gfor_fndecl_caf_image_status"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 895)
+ nil )
+ (!pair "gfor_fndecl_caf_failed_images"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 894)
+ nil )
+ (!pair "gfor_fndecl_caf_fail_image"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 893)
+ nil )
+ (!pair "gfor_fndecl_caf_event_query"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 892)
+ nil )
+ (!pair "gfor_fndecl_caf_event_wait"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 891)
+ nil )
+ (!pair "gfor_fndecl_caf_event_post"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 890)
+ nil )
+ (!pair "gfor_fndecl_caf_unlock"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 889)
+ nil )
+ (!pair "gfor_fndecl_caf_lock"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 888)
+ nil )
+ (!pair "gfor_fndecl_caf_atomic_op"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 887)
+ nil )
+ (!pair "gfor_fndecl_caf_atomic_cas"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 886)
+ nil )
+ (!pair "gfor_fndecl_caf_atomic_ref"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 885)
+ nil )
+ (!pair "gfor_fndecl_caf_atomic_def"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 884)
+ nil )
+ (!pair "gfor_fndecl_caf_error_stop_str"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 883)
+ nil )
+ (!pair "gfor_fndecl_caf_error_stop"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 882)
+ nil )
+ (!pair "gfor_fndecl_caf_stop_str"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 881)
+ nil )
+ (!pair "gfor_fndecl_caf_stop_numeric"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 880)
+ nil )
+ (!pair "gfor_fndecl_caf_sync_images"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 879)
+ nil )
+ (!pair "gfor_fndecl_caf_sync_memory"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 878)
+ nil )
+ (!pair "gfor_fndecl_caf_sync_all"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 877)
+ nil )
+ (!pair "gfor_fndecl_caf_sendget_by_ref"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 876)
+ nil )
+ (!pair "gfor_fndecl_caf_send_by_ref"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 875)
+ nil )
+ (!pair "gfor_fndecl_caf_get_by_ref"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 874)
+ nil )
+ (!pair "gfor_fndecl_caf_sendget"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 873)
+ nil )
+ (!pair "gfor_fndecl_caf_send"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 872)
+ nil )
+ (!pair "gfor_fndecl_caf_get"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 871)
+ nil )
+ (!pair "gfor_fndecl_caf_deregister"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 870)
+ nil )
+ (!pair "gfor_fndecl_caf_register"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 869)
+ nil )
+ (!pair "gfor_fndecl_caf_num_images"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 868)
+ nil )
+ (!pair "gfor_fndecl_caf_this_image"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 867)
+ nil )
+ (!pair "gfor_fndecl_caf_finalize"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 866)
+ nil )
+ (!pair "gfor_fndecl_caf_init"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 865)
+ nil )
+ (!pair "gfor_fndecl_system_clock8"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 861)
+ nil )
+ (!pair "gfor_fndecl_system_clock4"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 860)
+ nil )
+ (!pair "gfor_fndecl_associated"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 859)
+ nil )
+ (!pair "gfor_fndecl_in_unpack"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 858)
+ nil )
+ (!pair "gfor_fndecl_in_pack"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 857)
+ nil )
+ (!pair "gfor_fndecl_fdate"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 856)
+ nil )
+ (!pair "gfor_fndecl_ctime"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 855)
+ nil )
+ (!pair "gfor_fndecl_ttynam"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 854)
+ nil )
+ (!pair "gfor_fndecl_set_options"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 853)
+ nil )
+ (!pair "gfor_fndecl_set_fpe"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 852)
+ nil )
+ (!pair "gfor_fndecl_generate_error"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 851)
+ nil )
+ (!pair "gfor_fndecl_os_error_at"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 850)
+ nil )
+ (!pair "gfor_fndecl_runtime_warning_at"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 849)
+ nil )
+ (!pair "gfor_fndecl_runtime_error_at"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 848)
+ nil )
+ (!pair "gfor_fndecl_runtime_error"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 847)
+ nil )
+ (!pair "gfor_fndecl_error_stop_string"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 846)
+ nil )
+ (!pair "gfor_fndecl_error_stop_numeric"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 845)
+ nil )
+ (!pair "gfor_fndecl_stop_string"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 844)
+ nil )
+ (!pair "gfor_fndecl_stop_numeric"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 843)
+ nil )
+ (!pair "gfor_fndecl_pause_string"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 842)
+ nil )
+ (!pair "gfor_fndecl_pause_numeric"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 841)
+ nil )
+ (!pair "gfc_static_ctors"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans.h" 722)
+ nil )
+ (!pair "gfc_charlen_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 53)
+ nil )
+ (!pair "logical_false_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 49)
+ nil )
+ (!pair "logical_true_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 48)
+ nil )
+ (!pair "logical_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 47)
+ nil )
+ (!pair "gfc_complex_float128_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 35)
+ nil )
+ (!pair "gfc_float128_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 34)
+ nil )
+ (!pair "pchar_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 33)
+ nil )
+ (!pair "prvoid_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 32)
+ nil )
+ (!pair "pvoid_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 31)
+ nil )
+ (!pair "ppvoid_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 30)
+ nil )
+ (!pair "gfc_character1_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 29)
+ nil )
+ (!pair "gfc_array_range_type"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 28)
+ nil )
+ (!pair "gfc_array_index_type"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.h" 27)
+ nil )
+ (!pair "gfc_pcharacter_types"
+ (!type array 2380 nil gc_used "MAX_CHARACTER_KINDS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 100)
+ nil )
+ (!pair "gfc_character_types"
+ (!type array 2381 nil gc_used "MAX_CHARACTER_KINDS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 99)
+ nil )
+ (!pair "gfc_complex_types"
+ (!type array 2382 nil gc_used "MAX_REAL_KINDS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 95)
+ nil )
+ (!pair "gfc_real_types"
+ (!type array 2383 nil gc_used "MAX_REAL_KINDS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 94)
+ nil )
+ (!pair "gfc_logical_types"
+ (!type array 2384 nil gc_used "MAX_INT_KINDS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 90)
+ nil )
+ (!pair "gfc_integer_types"
+ (!type array 2385 nil gc_used "MAX_INT_KINDS + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 89)
+ nil )
+ (!pair "gfc_cfi_descriptor_base"
+ (!type array 2386 nil gc_used "2 * (CFI_MAX_RANK + 2)"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 81)
+ nil )
+ (!pair "gfc_array_descriptor_base_caf"
+ (!type array 2387 nil gc_used "2 * (GFC_MAX_DIMENSIONS+1)"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 80)
+ nil )
+ (!pair "gfc_array_descriptor_base"
+ (!type array 2388 nil gc_used "2 * (GFC_MAX_DIMENSIONS+1)"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-types.cc" 79)
+ nil )
+ (!pair "gfc_max_array_element_size"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.cc" 78)
+ nil )
+ (!pair "gfc_desc_dim_type"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-types.cc" 77)
+ nil )
+ (!pair "select_struct"
+ (!type array 2389 nil gc_used "2"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-stmt.cc" 3290)
+ nil )
+ (!pair "dt_parm"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-io.cc" 162)
+ nil )
+ (!pair "iocall"
+ (!type array 2390 nil gc_used "IOCALL_NUM"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "fortran/trans-io.cc" 153)
+ nil )
+ (!pair "st_parameter_field"
+ (!type array 2391 nil gc_used ""
+ (!type already_seen 2299)
+ )
+ (!srcfileloc "fortran/trans-io.cc" 101)
+ nil )
+ (!pair "st_parameter"
+ (!type array 2392 nil gc_used ""
+ (!type already_seen 2300)
+ )
+ (!srcfileloc "fortran/trans-io.cc" 90)
+ nil )
+ (!pair "gfc_intrinsic_map"
+ (!type array 2393 nil gc_used ""
+ (!type already_seen 2298)
+ )
+ (!srcfileloc "fortran/trans-intrinsic.cc" 117)
+ nil )
+ (!pair "module_htab"
+ (!type already_seen 2293)
+ (!srcfileloc "fortran/trans-decl.cc" 5114)
+ nil )
+ (!pair "saved_local_decls"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-decl.cc" 68)
+ nil )
+ (!pair "saved_parent_function_decls"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-decl.cc" 64)
+ nil )
+ (!pair "saved_function_decls"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-decl.cc" 63)
+ nil )
+ (!pair "parent_fake_result_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-decl.cc" 58)
+ nil )
+ (!pair "current_fake_result_decl"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/trans-decl.cc" 57)
+ nil )
+ (!pair "global_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "fortran/f95-lang.cc" 330)
+ nil )
+ (!pair "current_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "fortran/f95-lang.cc" 326)
+ nil )
+ (!pair "current_translation_unit"
+ (!type already_seen 23)
+ (!srcfileloc "fortran/f95-lang.cc" 204)
+ nil )
+ (!pair "free_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "fortran/f95-lang.cc" 198)
+ nil )
+ (!pair "tinfo_types"
+ (!type array 2394 nil gc_used "TK_END"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "d/typeinfo.cc" 95)
+ nil )
+ (!pair "d_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-lang.cc" 1863)
+ nil )
+ (!pair "d_keep_list"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-lang.cc" 1852)
+ nil )
+ (!pair "global_declarations"
+ (!type already_seen 85)
+ (!srcfileloc "d/d-lang.cc" 95)
+ nil )
+ (!pair "global_context"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-lang.cc" 92)
+ nil )
+ (!pair "builtin_types"
+ (!type array 2395 nil gc_used "(int) BT_LAST + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "d/d-builtins.cc" 1136)
+ nil )
+ (!pair "built_in_attributes"
+ (!type array 2396 nil gc_used "(int) ATTR_LAST"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "d/d-builtins.cc" 1040)
+ nil )
+ (!pair "signed_size_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-builtins.cc" 889)
+ nil )
+ (!pair "uintmax_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-builtins.cc" 888)
+ nil )
+ (!pair "intmax_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-builtins.cc" 887)
+ nil )
+ (!pair "wint_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-builtins.cc" 886)
+ nil )
+ (!pair "const_string_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-builtins.cc" 885)
+ nil )
+ (!pair "string_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "d/d-builtins.cc" 884)
+ nil )
+ (!pair "gcc_builtins_types"
+ (!type already_seen 85)
+ (!srcfileloc "d/d-builtins.cc" 48)
+ nil )
+ (!pair "gcc_builtins_functions"
+ (!type already_seen 85)
+ (!srcfileloc "d/d-builtins.cc" 47)
+ nil )
+ (!pair "d_global_trees"
+ (!type array 2397 nil gc_used "DTI_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "d/d-tree.h" 444)
+ nil )
+ (!pair "global_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "d/d-tree.h" 135)
+ nil )
+ (!pair "current_binding_level"
+ (!type already_seen 433)
+ (!srcfileloc "d/d-tree.h" 134)
+ nil )
+ (!pair "vlt_register_set_fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/vtable-class-hierarchy.cc" 133)
+ nil )
+ (!pair "vlt_register_pairs_fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/vtable-class-hierarchy.cc" 132)
+ nil )
+ (!pair "vlt_saved_class_info"
+ (!type already_seen 85)
+ (!srcfileloc "cp/vtable-class-hierarchy.cc" 131)
+ nil )
+ (!pair "deleted_copy_types"
+ (!type already_seen 467)
+ (!srcfileloc "cp/tree.cc" 4522)
+ nil )
+ (!pair "list_hash_table"
+ (!type already_seen 2264)
+ (!srcfileloc "cp/tree.cc" 2178)
+ nil )
+ (!pair "cplus_array_htab"
+ (!type already_seen 2260)
+ (!srcfileloc "cp/tree.cc" 1042)
+ nil )
+ (!pair "deferred_access_no_check"
+ (!type already_seen 2)
+ (!srcfileloc "cp/semantics.cc" 138)
+ nil )
+ (!pair "deferred_access_stack"
+ (!type already_seen 2254)
+ (!srcfileloc "cp/semantics.cc" 137)
+ nil )
+ (!pair "tinfo_descs"
+ (!type already_seen 2251)
+ (!srcfileloc "cp/rtti.cc" 122)
+ nil )
+ (!pair "dguide_cache"
+ (!type already_seen 2248)
+ (!srcfileloc "cp/pt.cc" 30455)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "explicit_specifier_map"
+ (!type already_seen 946)
+ (!srcfileloc "cp/pt.cc" 14202)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "defarg_inst"
+ (!type already_seen 1562)
+ (!srcfileloc "cp/pt.cc" 14064)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "last_error_tinst_level"
+ (!type already_seen 2007)
+ (!srcfileloc "cp/pt.cc" 11100)
+ nil )
+ (!pair "pending_template_freelist_head"
+ (!type already_seen 2232)
+ (!srcfileloc "cp/pt.cc" 9559)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "tinst_level_freelist_head"
+ (!type already_seen 2007)
+ (!srcfileloc "cp/pt.cc" 9550)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "tree_list_freelist_head"
+ (!type already_seen 23)
+ (!srcfileloc "cp/pt.cc" 9541)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "defaulted_ttp_cache"
+ (!type already_seen 394)
+ (!srcfileloc "cp/pt.cc" 7903)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "tparm_obj_values"
+ (!type already_seen 394)
+ (!srcfileloc "cp/pt.cc" 7267)
+ nil )
+ (!pair "ctp_table"
+ (!type already_seen 2239)
+ (!srcfileloc "cp/pt.cc" 4524)
+ nil )
+ (!pair "canonical_template_parms"
+ (!type already_seen 85)
+ (!srcfileloc "cp/pt.cc" 124)
+ nil )
+ (!pair "type_specializations"
+ (!type already_seen 2235)
+ (!srcfileloc "cp/pt.cc" 118)
+ nil )
+ (!pair "decl_specializations"
+ (!type already_seen 2235)
+ (!srcfileloc "cp/pt.cc" 117)
+ nil )
+ (!pair "saved_access_scope"
+ (!type already_seen 85)
+ (!srcfileloc "cp/pt.cc" 73)
+ nil )
+ (!pair "current_tinst_level"
+ (!type already_seen 2007)
+ (!srcfileloc "cp/pt.cc" 71)
+ nil )
+ (!pair "last_pending_template"
+ (!type already_seen 2232)
+ (!srcfileloc "cp/pt.cc" 64)
+ nil )
+ (!pair "pending_templates"
+ (!type already_seen 2232)
+ (!srcfileloc "cp/pt.cc" 63)
+ nil )
+ (!pair "generic_parm_count"
+ (!type already_seen 2)
+ (!srcfileloc "cp/parser.cc" 49593)
+ nil )
+ (!pair "cp_parser_decl_specs_attrs"
+ (!type already_seen 85)
+ (!srcfileloc "cp/parser.cc" 19407)
+ nil )
+ (!pair "cp_parser_context_free_list"
+ (!type already_seen 2044)
+ (!srcfileloc "cp/parser.cc" 2103)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "the_parser"
+ (!type already_seen 2052)
+ (!srcfileloc "cp/parser.cc" 695)
+ nil )
+ (!pair "free_saved_scope"
+ (!type already_seen 1986)
+ (!srcfileloc "cp/name-lookup.cc" 8109)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "free_binding_level"
+ (!type already_seen 83)
+ (!srcfileloc "cp/name-lookup.cc" 4236)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "extern_c_decls"
+ (!type already_seen 92)
+ (!srcfileloc "cp/name-lookup.cc" 2986)
+ nil )
+ (!pair "free_bindings"
+ (!type already_seen 90)
+ (!srcfileloc "cp/name-lookup.cc" 2376)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "headers"
+ (!type already_seen 387)
+ (!srcfileloc "cp/module.cc" 16939)
+ nil )
+ (!pair "macro_exports"
+ (!type already_seen 2212)
+ (!srcfileloc "cp/module.cc" 16935)
+ nil )
+ (!pair "note_defs"
+ (!type already_seen 2206)
+ (!srcfileloc "cp/module.cc" 4600)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "partial_specializations"
+ (!type already_seen 85)
+ (!srcfileloc "cp/module.cc" 3965)
+ nil )
+ (!pair "class_members"
+ (!type already_seen 85)
+ (!srcfileloc "cp/module.cc" 3956)
+ nil )
+ (!pair "modules_hash"
+ (!type already_seen 2198)
+ (!srcfileloc "cp/module.cc" 3935)
+ nil )
+ (!pair "modules"
+ (!type already_seen 2196)
+ (!srcfileloc "cp/module.cc" 3932)
+ nil )
+ (!pair "fixed_trees"
+ (!type already_seen 85)
+ (!srcfileloc "cp/module.cc" 3913)
+ nil )
+ (!pair "comp_cat_cache"
+ (!type array 2398 nil gc_used "cc_last"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "cp/method.cc" 933)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "thunk_labelno"
+ (!type already_seen 2)
+ (!srcfileloc "cp/method.cc" 183)
+ nil )
+ (!pair "subst_identifiers"
+ (!type array 2399 nil gc_used "SUBID_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "cp/mangle.cc" 155)
+ nil )
+ (!pair "G"
+ (!type already_seen 2157)
+ (!srcfileloc "cp/mangle.cc" 126)
+ nil )
+ (!pair "subsumption_cache"
+ (!type already_seen 2156)
+ (!srcfileloc "cp/logic.cc" 751)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "conv_type_names"
+ (!type already_seen 2150)
+ (!srcfileloc "cp/lex.cc" 821)
+ nil )
+ (!pair "lambda_scope_stack"
+ (!type already_seen 2145)
+ (!srcfileloc "cp/lambda.cc" 1469)
+ nil )
+ (!pair "lambda_scope"
+ (!type already_seen 2143)
+ (!srcfileloc "cp/lambda.cc" 1467)
+ nil )
+ (!pair "max_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/lambda.cc" 488)
+ nil )
+ (!pair "ptr_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/lambda.cc" 487)
+ nil )
+ (!pair "nsdmi_inst"
+ (!type already_seen 946)
+ (!srcfileloc "cp/init.cc" 574)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "fn"
+ (!type already_seen 23)
+ (!srcfileloc "cp/init.cc" 53)
+ nil )
+ (!pair "global_friend"
+ (!type already_seen 23)
+ (!srcfileloc "cp/friend.cc" 35)
+ nil )
+ (!pair "pending_noexcept_checks"
+ (!type already_seen 2138)
+ (!srcfileloc "cp/except.cc" 1102)
+ nil )
+ (!pair "static_init_fini_fns"
+ (!type array 2400 nil gc_used "2"
+ (!type already_seen 2135)
+ )
+ (!srcfileloc "cp/decl2.cc" 170)
+ nil )
+ (!pair "mangled_decls"
+ (!type already_seen 2132)
+ (!srcfileloc "cp/decl2.cc" 125)
+ nil )
+ (!pair "mangling_aliases"
+ (!type already_seen 85)
+ (!srcfileloc "cp/decl2.cc" 89)
+ nil )
+ (!pair "no_linkage_decls"
+ (!type already_seen 85)
+ (!srcfileloc "cp/decl2.cc" 85)
+ nil )
+ (!pair "deferred_fns"
+ (!type already_seen 85)
+ (!srcfileloc "cp/decl2.cc" 81)
+ nil )
+ (!pair "pending_statics"
+ (!type already_seen 85)
+ (!srcfileloc "cp/decl2.cc" 77)
+ nil )
+ (!pair "start_cleanup_cnt"
+ (!type already_seen 2)
+ (!srcfileloc "cp/decl.cc" 9581)
+ nil )
+ (!pair "decomp_type_table"
+ (!type already_seen 946)
+ (!srcfileloc "cp/decl.cc" 9040)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "typename_htab"
+ (!type already_seen 2127)
+ (!srcfileloc "cp/decl.cc" 4178)
+ nil )
+ (!pair "local_entities"
+ (!type already_seen 85)
+ (!srcfileloc "cp/decl.cc" 906)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "incomplete_vars"
+ (!type already_seen 2122)
+ (!srcfileloc "cp/decl.cc" 255)
+ nil )
+ (!pair "debug_type_map"
+ (!type already_seen 949)
+ (!srcfileloc "cp/cp-objcp-common.cc" 133)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "cp_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-lang.cc" 149)
+ nil )
+ (!pair "source_location_id"
+ (!type already_seen 2)
+ (!srcfileloc "cp/cp-gimplify.cc" 3465)
+ nil )
+ (!pair "source_location_table"
+ (!type already_seen 2119)
+ (!srcfileloc "cp/cp-gimplify.cc" 3464)
+ nil )
+ (!pair "fold_caches"
+ (!type array 2401 nil gc_used "2"
+ (!type already_seen 394)
+ )
+ (!srcfileloc "cp/cp-gimplify.cc" 2514)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "to_ramp"
+ (!type already_seen 394)
+ (!srcfileloc "cp/coroutines.cc" 560)
+ nil )
+ (!pair "void_coro_handle_type"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 272)
+ nil )
+ (!pair "coro_handle_templ"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 271)
+ nil )
+ (!pair "coro_traits_templ"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 270)
+ nil )
+ (!pair "coro_frame_i_a_r_c_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 227)
+ nil )
+ (!pair "coro_actor_continue_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 226)
+ nil )
+ (!pair "coro_self_handle_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 225)
+ nil )
+ (!pair "coro_resume_index_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 224)
+ nil )
+ (!pair "coro_frame_needs_free_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 223)
+ nil )
+ (!pair "coro_promise_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 222)
+ nil )
+ (!pair "coro_destroy_fn_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 221)
+ nil )
+ (!pair "coro_resume_fn_id"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 220)
+ nil )
+ (!pair "coro_await_resume_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 216)
+ nil )
+ (!pair "coro_await_suspend_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 215)
+ nil )
+ (!pair "coro_await_ready_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 214)
+ nil )
+ (!pair "coro_unhandled_exception_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 210)
+ nil )
+ (!pair "coro_gro_on_allocation_fail_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 209)
+ nil )
+ (!pair "coro_get_return_object_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 208)
+ nil )
+ (!pair "coro_from_address_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 207)
+ nil )
+ (!pair "coro_address_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 206)
+ nil )
+ (!pair "coro_resume_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 205)
+ nil )
+ (!pair "coro_yield_value_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 204)
+ nil )
+ (!pair "coro_return_value_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 203)
+ nil )
+ (!pair "coro_return_void_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 202)
+ nil )
+ (!pair "coro_final_suspend_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 201)
+ nil )
+ (!pair "coro_initial_suspend_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 200)
+ nil )
+ (!pair "coro_await_transform_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 199)
+ nil )
+ (!pair "coro_promise_type_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 195)
+ nil )
+ (!pair "coro_handle_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 194)
+ nil )
+ (!pair "coro_traits_identifier"
+ (!type already_seen 23)
+ (!srcfileloc "cp/coroutines.cc" 193)
+ nil )
+ (!pair "coroutine_info_table"
+ (!type already_seen 2099)
+ (!srcfileloc "cp/coroutines.cc" 114)
+ nil )
+ (!pair "decl_satisfied_cache"
+ (!type already_seen 394)
+ (!srcfileloc "cp/constraint.cc" 2617)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "sat_cache"
+ (!type already_seen 2094)
+ (!srcfileloc "cp/constraint.cc" 2614)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "failed_type_completions"
+ (!type already_seen 85)
+ (!srcfileloc "cp/constraint.cc" 2466)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "decl_constraints"
+ (!type already_seen 946)
+ (!srcfileloc "cp/constraint.cc" 1289)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "normalized_map"
+ (!type already_seen 394)
+ (!srcfileloc "cp/constraint.cc" 903)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "atom_cache"
+ (!type already_seen 2090)
+ (!srcfileloc "cp/constraint.cc" 814)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "norm_cache"
+ (!type already_seen 2088)
+ (!srcfileloc "cp/constraint.cc" 735)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "decl_post_fn"
+ (!type already_seen 394)
+ (!srcfileloc "cp/contracts.cc" 1301)
+ nil )
+ (!pair "decl_pre_fn"
+ (!type already_seen 394)
+ (!srcfileloc "cp/contracts.cc" 1300)
+ nil )
+ (!pair "cv_cache"
+ (!type already_seen 394)
+ (!srcfileloc "cp/constexpr.cc" 8574)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "fundef_copies_table"
+ (!type already_seen 952)
+ (!srcfileloc "cp/constexpr.cc" 1366)
+ nil )
+ (!pair "constexpr_call_table"
+ (!type already_seen 2078)
+ (!srcfileloc "cp/constexpr.cc" 1317)
+ nil )
+ (!pair "constexpr_fundef_table"
+ (!type already_seen 2072)
+ (!srcfileloc "cp/constexpr.cc" 189)
+ nil )
+ (!pair "dvirt_fn"
+ (!type already_seen 23)
+ (!srcfileloc "cp/class.cc" 9919)
+ nil )
+ (!pair "abort_fndecl_addr"
+ (!type already_seen 23)
+ (!srcfileloc "cp/class.cc" 9918)
+ nil )
+ (!pair "enum_to_min_precision"
+ (!type already_seen 2068)
+ (!srcfileloc "cp/class.cc" 3407)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "sizeof_biggest_empty_class"
+ (!type already_seen 23)
+ (!srcfileloc "cp/class.cc" 112)
+ nil )
+ (!pair "default_arg_context"
+ (!type already_seen 85)
+ (!srcfileloc "cp/call.cc" 9162)
+ nil )
+ (!pair "unemitted_tinfo_decls"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 7499)
+ nil )
+ (!pair "ovl_op_alternate"
+ (!type array 2402 nil gc_used "OVL_OP_MAX"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "cp/cp-tree.h" 6153)
+ nil )
+ (!pair "ovl_op_mapping"
+ (!type array 2403 nil gc_used "MAX_TREE_CODES"
+ (!type already_seen 8)
+ )
+ (!srcfileloc "cp/cp-tree.h" 6151)
+ nil )
+ (!pair "ovl_op_info"
+ (!type array 2404 nil gc_used "2"
+ (!type array 2405 nil gc_used "OVL_OP_MAX"
+ (!type already_seen 2002)
+ )
+ )
+ (!srcfileloc "cp/cp-tree.h" 6149)
+ nil )
+ (!pair "dynamic_initializers"
+ (!type already_seen 952)
+ (!srcfileloc "cp/cp-tree.h" 5841)
+ nil )
+ (!pair "tls_aggregates"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 5837)
+ nil )
+ (!pair "static_aggregates"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 5835)
+ nil )
+ (!pair "keyed_classes"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 5740)
+ nil )
+ (!pair "static_decls"
+ (!type already_seen 85)
+ (!srcfileloc "cp/cp-tree.h" 5736)
+ nil )
+ (!pair "integer_two_node"
+ (!type already_seen 23)
+ (!srcfileloc "cp/cp-tree.h" 5641)
+ nil )
+ (!pair "scope_chain"
+ (!type already_seen 1986)
+ (!srcfileloc "cp/cp-tree.h" 1887)
+ nil )
+ (!pair "cp_global_trees"
+ (!type array 2406 nil gc_used "CPTI_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "cp/cp-tree.h" 241)
+ nil )
+ (!pair "current_omp_begin_assumes"
+ (!type already_seen 2)
+ (!srcfileloc "c/c-lang.h" 73)
+ nil )
+ (!pair "current_omp_declare_target_attribute"
+ (!type already_seen 1969)
+ (!srcfileloc "c/c-lang.h" 70)
+ nil )
+ (!pair "the_parser"
+ (!type already_seen 1953)
+ (!srcfileloc "c/c-parser.cc" 282)
+ nil )
+ (!pair "locus"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-format.cc" 71)
+ nil )
+ (!pair "local_cgraph_node_ptr_node"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-format.cc" 70)
+ nil )
+ (!pair "local_gimple_ptr_node"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-format.cc" 69)
+ nil )
+ (!pair "local_event_ptr_node"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-format.cc" 68)
+ nil )
+ (!pair "local_tree_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-format.cc" 67)
+ nil )
+ (!pair "options_stack"
+ (!type already_seen 1936)
+ (!srcfileloc "c-family/c-pragma.cc" 1239)
+ nil )
+ (!pair "pending_redefine_extname"
+ (!type already_seen 1933)
+ (!srcfileloc "c-family/c-pragma.cc" 496)
+ nil )
+ (!pair "pending_weaks"
+ (!type already_seen 1930)
+ (!srcfileloc "c-family/c-pragma.cc" 248)
+ nil )
+ (!pair "alignment_stack"
+ (!type already_seen 1927)
+ (!srcfileloc "c-family/c-pragma.cc" 53)
+ nil )
+ (!pair "pragma_extern_prefix"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-pragma.h" 284)
+ nil )
+ (!pair "lazy_hex_fp_value_count"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1790)
+ nil )
+ (!pair "lazy_hex_fp_values"
+ (!type array 2407 nil gc_used "LAZY_HEX_FP_VALUES_CNT"
+ (!type already_seen 1923)
+ )
+ (!srcfileloc "c-family/c-cppbuiltin.cc" 1789)
+ nil )
+ (!pair "g_string_concat_db"
+ (!type already_seen 737)
+ (!srcfileloc "c-family/c-common.h" 1193)
+ nil )
+ (!pair "registered_builtin_types"
+ (!type already_seen 23)
+ (!srcfileloc "c-family/c-common.h" 1176)
+ nil )
+ (!pair "pending_lang_change"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-common.h" 1129)
+ nil )
+ (!pair "c_global_trees"
+ (!type array 2408 nil gc_used "CTI_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "c-family/c-common.h" 531)
+ nil )
+ (!pair "ridpointers"
+ (!type already_seen 24)
+ (!srcfileloc "c-family/c-common.h" 299)
+ (!options
+ (!option length string "(int) RID_MAX")
+ )
+ )
+ (!pair "tree_vector_cache"
+ (!type already_seen 1910)
+ (!srcfileloc "c-family/c-common.cc" 8285)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "optimize_args"
+ (!type already_seen 1908)
+ (!srcfileloc "c-family/c-common.cc" 5869)
+ nil )
+ (!pair "compound_literal_number"
+ (!type already_seen 2)
+ (!srcfileloc "c-family/c-common.cc" 4737)
+ nil )
+ (!pair "built_in_attributes"
+ (!type array 2409 nil gc_used "(int) ATTR_LAST"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "c-family/c-common.cc" 4002)
+ nil )
+ (!pair "ext_block"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 12873)
+ nil )
+ (!pair "last_structptr_types"
+ (!type array 2410 nil gc_used "builtin_structptr_type_count"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "c/c-decl.cc" 1740)
+ nil )
+ (!pair "c_inline_statics"
+ (!type already_seen 1900)
+ (!srcfileloc "c/c-decl.cc" 571)
+ nil )
+ (!pair "binding_freelist"
+ (!type already_seen 644)
+ (!srcfileloc "c/c-decl.cc" 523)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "scope_freelist"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 519)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "external_scope"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 515)
+ nil )
+ (!pair "file_scope"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 510)
+ nil )
+ (!pair "current_function_scope"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 506)
+ nil )
+ (!pair "current_scope"
+ (!type already_seen 650)
+ (!srcfileloc "c/c-decl.cc" 500)
+ nil )
+ (!pair "visible_builtins"
+ (!type already_seen 23)
+ (!srcfileloc "c/c-decl.cc" 129)
+ nil )
+ (!pair "c_stmt_tree"
+ (!type already_seen 452)
+ (!srcfileloc "c/c-decl.cc" 122)
+ nil )
+ (!pair "gnat_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/misc.cc" 1283)
+ nil )
+ (!pair "built_in_attributes"
+ (!type array 2411 nil gc_used "(int) ATTR_LAST"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ada/gcc-interface/utils.cc" 6322)
+ nil )
+ (!pair "builtin_types"
+ (!type array 2412 nil gc_used "(int) BT_LAST + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ada/gcc-interface/utils.cc" 6155)
+ nil )
+ (!pair "dummy_global"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 5908)
+ nil )
+ (!pair "pad_type_hash_table"
+ (!type already_seen 1887)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 331)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "packable_type_hash_table"
+ (!type already_seen 1883)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 308)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "free_block_chain"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 285)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "builtin_decls"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 282)
+ nil )
+ (!pair "global_decls"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 279)
+ nil )
+ (!pair "global_context"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 276)
+ nil )
+ (!pair "free_binding_level"
+ (!type already_seen 1879)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 273)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "current_binding_level"
+ (!type already_seen 1879)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 270)
+ nil )
+ (!pair "float_types"
+ (!type array 2413 nil gc_used "NUM_MACHINE_MODES"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ada/gcc-interface/utils.cc" 257)
+ nil )
+ (!pair "signed_and_unsigned_types"
+ (!type array 2414 nil gc_used "2 * MAX_BITS_PER_WORD + 1"
+ (!type array 2415 nil gc_used "2"
+ (!type already_seen 23)
+ )
+ )
+ (!srcfileloc "ada/gcc-interface/utils.cc" 254)
+ nil )
+ (!pair "dummy_node_table"
+ (!type already_seen 24)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 237)
+ (!options
+ (!option length string "max_gnat_nodes")
+ )
+ )
+ (!pair "associate_gnat_to_gnu"
+ (!type already_seen 24)
+ (!srcfileloc "ada/gcc-interface/utils.cc" 225)
+ (!options
+ (!option length string "max_gnat_nodes")
+ )
+ )
+ (!pair "thunk_labelno"
+ (!type already_seen 2)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 10889)
+ nil )
+ (!pair "gnu_loop_stack"
+ (!type already_seen 1876)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 214)
+ nil )
+ (!pair "gnu_return_var_stack"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 182)
+ nil )
+ (!pair "gnu_return_label_stack"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 178)
+ nil )
+ (!pair "gnu_elab_proc_stack"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 174)
+ nil )
+ (!pair "gnu_incoming_exc_ptr"
+ (!type already_seen 23)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 171)
+ nil )
+ (!pair "gnu_except_ptr_stack"
+ (!type already_seen 85)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 168)
+ nil )
+ (!pair "elab_info_list"
+ (!type already_seen 1868)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 163)
+ nil )
+ (!pair "stmt_group_free_list"
+ (!type already_seen 1866)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 150)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "current_stmt_group"
+ (!type already_seen 1866)
+ (!srcfileloc "ada/gcc-interface/trans.cc" 147)
+ nil )
+ (!pair "dummy_to_subprog_map"
+ (!type already_seen 1861)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 198)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "annotate_value_cache"
+ (!type already_seen 1853)
+ (!srcfileloc "ada/gcc-interface/decl.cc" 157)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "gnat_raise_decls_ext"
+ (!type array 2416 nil gc_used "(int) LAST_REASON_CODE + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ada/gcc-interface/gigi.h" 443)
+ nil )
+ (!pair "gnat_raise_decls"
+ (!type array 2417 nil gc_used "(int) LAST_REASON_CODE + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ada/gcc-interface/gigi.h" 442)
+ nil )
+ (!pair "gnat_std_decls"
+ (!type array 2418 nil gc_used "(int) ADT_LAST"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "ada/gcc-interface/gigi.h" 441)
+ nil )
+ (!pair "abi_vector_types"
+ (!type array 2419 nil gc_used "NUM_VECTOR_TYPES + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "config/arm/arm-mve-builtins.cc" 66)
+ nil )
+ (!pair "arm_builtin_decls"
+ (!type array 2420 nil gc_used "ARM_BUILTIN_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "config/arm/arm-builtins.cc" 1354)
+ nil )
+ (!pair "analyzer_stashed_constants"
+ (!type already_seen 394)
+ (!srcfileloc "analyzer/analyzer-language.cc" 33)
+ nil )
+ (!pair "omp_requires_mask"
+ (!type already_seen 2)
+ (!srcfileloc "omp-general.h" 138)
+ nil )
+ (!pair "internal_fn_fnspec_array"
+ (!type array 2421 nil gc_used "IFN_LAST + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "internal-fn.h" 128)
+ nil )
+ (!pair "odr_enums"
+ (!type already_seen 85)
+ (!srcfileloc "ipa-devirt.cc" 516)
+ nil )
+ (!pair "odr_types_ptr"
+ (!type already_seen 1823)
+ (!srcfileloc "ipa-devirt.cc" 512)
+ nil )
+ (!pair "ubsan_vptr_type_cache_decl"
+ (!type already_seen 23)
+ (!srcfileloc "ubsan.cc" 1218)
+ nil )
+ (!pair "ubsan_ids"
+ (!type array 2422 nil gc_used "2"
+ (!type already_seen 2)
+ )
+ (!srcfileloc "ubsan.cc" 344)
+ nil )
+ (!pair "ubsan_source_location_type"
+ (!type already_seen 23)
+ (!srcfileloc "ubsan.cc" 239)
+ nil )
+ (!pair "ubsan_type_descriptor_type"
+ (!type already_seen 23)
+ (!srcfileloc "ubsan.cc" 189)
+ nil )
+ (!pair "decl_tree_for_type"
+ (!type already_seen 1807)
+ (!srcfileloc "ubsan.cc" 82)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "hwasan_ctor_statements"
+ (!type already_seen 23)
+ (!srcfileloc "asan.cc" 4648)
+ nil )
+ (!pair "asan_ctor_statements"
+ (!type already_seen 23)
+ (!srcfileloc "asan.cc" 3596)
+ nil )
+ (!pair "asan_detect_stack_use_after_return"
+ (!type already_seen 23)
+ (!srcfileloc "asan.cc" 474)
+ nil )
+ (!pair "shadow_ptr_types"
+ (!type array 2423 nil gc_used "3"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "asan.cc" 471)
+ nil )
+ (!pair "asan_memfn_rtls"
+ (!type array 2424 nil gc_used "3"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "asan.cc" 395)
+ nil )
+ (!pair "hwasan_frame_base_init_seq"
+ (!type already_seen 297)
+ (!srcfileloc "asan.cc" 284)
+ nil )
+ (!pair "hwasan_frame_base_ptr"
+ (!type already_seen 100)
+ (!srcfileloc "asan.cc" 273)
+ nil )
+ (!pair "vtbl_mangled_name_ids"
+ (!type already_seen 85)
+ (!srcfileloc "vtable-verify.cc" 309)
+ nil )
+ (!pair "vtbl_mangled_name_types"
+ (!type already_seen 85)
+ (!srcfileloc "vtable-verify.cc" 308)
+ nil )
+ (!pair "verify_vtbl_ptr_fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "vtable-verify.cc" 151)
+ nil )
+ (!pair "ipa_fn_summaries"
+ (!type already_seen 1785)
+ (!srcfileloc "ipa-fnsummary.h" 251)
+ nil )
+ (!pair "tm_wrap_map"
+ (!type already_seen 1742)
+ (!srcfileloc "trans-mem.cc" 468)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "ipcp_transformation_sum"
+ (!type already_seen 1739)
+ (!srcfileloc "ipa-prop.h" 1074)
+ nil )
+ (!pair "ipa_edge_args_sum"
+ (!type already_seen 1736)
+ (!srcfileloc "ipa-prop.h" 1047)
+ nil )
+ (!pair "ipa_node_params_sum"
+ (!type already_seen 1734)
+ (!srcfileloc "ipa-prop.h" 1045)
+ nil )
+ (!pair "ipa_escaped_pt"
+ (!type already_seen 386)
+ (!srcfileloc "tree-ssa-alias.h" 183)
+ nil )
+ (!pair "free_phinodes"
+ (!type array 2425 nil gc_used "NUM_BUCKETS - 2"
+ (!type already_seen 1703)
+ )
+ (!srcfileloc "tree-phinodes.cc" 70)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "clone_fn_ids"
+ (!type already_seen 1701)
+ (!srcfileloc "cgraphclones.cc" 479)
+ nil )
+ (!pair "arm_previous_fndecl"
+ (!type already_seen 23)
+ (!srcfileloc "config/arm/arm.cc" 33362)
+ nil )
+ (!pair "last_asm_targ_options"
+ (!type already_seen 626)
+ (!srcfileloc "config/arm/arm.cc" 28501)
+ nil )
+ (!pair "tls_get_addr_libfunc"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.cc" 9184)
+ nil )
+ (!pair "pic_labelno"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.cc" 8316)
+ nil )
+ (!pair "init_optimize"
+ (!type already_seen 23)
+ (!srcfileloc "config/arm/arm.cc" 3055)
+ nil )
+ (!pair "thumb_flipper"
+ (!type already_seen 2)
+ (!srcfileloc "config/arm/arm.cc" 3052)
+ nil )
+ (!pair "va_list_type"
+ (!type already_seen 23)
+ (!srcfileloc "config/arm/arm.cc" 2848)
+ nil )
+ (!pair "speculation_barrier_libfunc"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.cc" 2534)
+ nil )
+ (!pair "stack_chk_fail_decl"
+ (!type already_seen 23)
+ (!srcfileloc "targhooks.cc" 916)
+ nil )
+ (!pair "stack_chk_guard_decl"
+ (!type already_seen 23)
+ (!srcfileloc "targhooks.cc" 883)
+ nil )
+ (!pair "critical_name_mutexes"
+ (!type already_seen 394)
+ (!srcfileloc "omp-low.cc" 10393)
+ nil )
+ (!pair "omp_declare_variant_alt"
+ (!type already_seen 1661)
+ (!srcfileloc "omp-general.cc" 2143)
+ nil )
+ (!pair "omp_declare_variants"
+ (!type already_seen 1658)
+ (!srcfileloc "omp-general.cc" 2120)
+ nil )
+ (!pair "offload_vars"
+ (!type already_seen 85)
+ (!srcfileloc "omp-offload.h" 30)
+ nil )
+ (!pair "offload_funcs"
+ (!type already_seen 85)
+ (!srcfileloc "omp-offload.h" 29)
+ nil )
+ (!pair "descriptor_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree-nested.cc" 643)
+ nil )
+ (!pair "trampoline_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree-nested.cc" 603)
+ nil )
+ (!pair "ic_tuple_callee_field"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 74)
+ nil )
+ (!pair "ic_tuple_counters_field"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 73)
+ nil )
+ (!pair "ic_tuple_var"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 72)
+ nil )
+ (!pair "tree_time_profiler_counter"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 69)
+ nil )
+ (!pair "tree_ior_profiler_fn"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 68)
+ nil )
+ (!pair "tree_average_profiler_fn"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 67)
+ nil )
+ (!pair "tree_indirect_call_profiler_fn"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 66)
+ nil )
+ (!pair "tree_topn_values_profiler_fn"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 65)
+ nil )
+ (!pair "tree_pow2_profiler_fn"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 64)
+ nil )
+ (!pair "tree_interval_profiler_fn"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 63)
+ nil )
+ (!pair "gcov_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "tree-profile.cc" 62)
+ nil )
+ (!pair "scalar_evolution_info"
+ (!type already_seen 1642)
+ (!srcfileloc "tree-scalar-evolution.cc" 312)
+ nil )
+ (!pair "tmp_var_id_num"
+ (!type already_seen 2)
+ (!srcfileloc "gimple-expr.cc" 415)
+ nil )
+ (!pair "stmt_list_cache"
+ (!type already_seen 85)
+ (!srcfileloc "tree-iterator.cc" 31)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "addr_list"
+ (!type already_seen 221)
+ (!srcfileloc "tree-ssa-loop-ivopts.cc" 2594)
+ nil )
+ (!pair "mem_addr_template_list"
+ (!type already_seen 1602)
+ (!srcfileloc "tree-ssa-address.cc" 95)
+ nil )
+ (!pair "elf_fini_array_section"
+ (!type already_seen 214)
+ (!srcfileloc "varasm.cc" 8397)
+ nil )
+ (!pair "elf_init_array_section"
+ (!type already_seen 214)
+ (!srcfileloc "varasm.cc" 8396)
+ nil )
+ (!pair "tm_clone_hash"
+ (!type already_seen 1579)
+ (!srcfileloc "varasm.cc" 6353)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "weakref_targets"
+ (!type already_seen 23)
+ (!srcfileloc "varasm.cc" 6008)
+ nil )
+ (!pair "const_desc_htab"
+ (!type already_seen 1572)
+ (!srcfileloc "varasm.cc" 3072)
+ nil )
+ (!pair "initial_trampoline"
+ (!type already_seen 100)
+ (!srcfileloc "varasm.cc" 2736)
+ nil )
+ (!pair "weak_decls"
+ (!type already_seen 23)
+ (!srcfileloc "varasm.cc" 2531)
+ nil )
+ (!pair "pending_assemble_externals"
+ (!type already_seen 23)
+ (!srcfileloc "varasm.cc" 2462)
+ nil )
+ (!pair "shared_constant_pool"
+ (!type already_seen 1014)
+ (!srcfileloc "varasm.cc" 206)
+ nil )
+ (!pair "anchor_labelno"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 203)
+ nil )
+ (!pair "object_block_htab"
+ (!type already_seen 1569)
+ (!srcfileloc "varasm.cc" 200)
+ nil )
+ (!pair "section_htab"
+ (!type already_seen 1566)
+ (!srcfileloc "varasm.cc" 189)
+ nil )
+ (!pair "unnamed_sections"
+ (!type already_seen 214)
+ (!srcfileloc "varasm.cc" 174)
+ nil )
+ (!pair "const_labelno"
+ (!type already_seen 2)
+ (!srcfileloc "varasm.cc" 81)
+ nil )
+ (!pair "weak_global_object_name"
+ (!type already_seen 11)
+ (!srcfileloc "varasm.cc" 67)
+ nil )
+ (!pair "first_global_object_name"
+ (!type already_seen 11)
+ (!srcfileloc "varasm.cc" 66)
+ nil )
+ (!pair "gcc_eh_personality_decl"
+ (!type already_seen 23)
+ (!srcfileloc "tree.cc" 12099)
+ nil )
+ (!pair "anon_cnt"
+ (!type already_seen 2)
+ (!srcfileloc "tree.cc" 8737)
+ nil )
+ (!pair "nonstandard_boolean_type_cache"
+ (!type array 2426 nil gc_used "MAX_BOOL_CACHED_PREC + 1"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree.cc" 7101)
+ nil )
+ (!pair "nonstandard_integer_type_cache"
+ (!type array 2427 nil gc_used "2 * MAX_INT_CACHED_PREC + 2"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree.cc" 7052)
+ nil )
+ (!pair "debug_args_for_decl"
+ (!type already_seen 1562)
+ (!srcfileloc "tree.cc" 224)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "value_expr_for_decl"
+ (!type already_seen 1560)
+ (!srcfileloc "tree.cc" 221)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "debug_expr_for_decl"
+ (!type already_seen 1560)
+ (!srcfileloc "tree.cc" 218)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "cl_option_hash_table"
+ (!type already_seen 1558)
+ (!srcfileloc "tree.cc" 212)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "cl_target_option_node"
+ (!type already_seen 23)
+ (!srcfileloc "tree.cc" 204)
+ nil )
+ (!pair "cl_optimization_node"
+ (!type already_seen 23)
+ (!srcfileloc "tree.cc" 203)
+ nil )
+ (!pair "poly_int_cst_hash_table"
+ (!type already_seen 1555)
+ (!srcfileloc "tree.cc" 196)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "int_cst_hash_table"
+ (!type already_seen 1552)
+ (!srcfileloc "tree.cc" 185)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "int_cst_node"
+ (!type already_seen 23)
+ (!srcfileloc "tree.cc" 177)
+ nil )
+ (!pair "type_hash_table"
+ (!type already_seen 1549)
+ (!srcfileloc "tree.cc" 174)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "next_debug_decl_uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree.cc" 142)
+ nil )
+ (!pair "next_type_uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree.cc" 139)
+ nil )
+ (!pair "next_decl_uid"
+ (!type already_seen 2)
+ (!srcfileloc "tree.cc" 137)
+ nil )
+ (!pair "spd"
+ (!type already_seen 1545)
+ (!srcfileloc "stringpool.cc" 253)
+ nil )
+ (!pair "size_functions"
+ (!type already_seen 85)
+ (!srcfileloc "stor-layout.cc" 88)
+ nil )
+ (!pair "cfg_layout_function_header"
+ (!type already_seen 297)
+ (!srcfileloc "cfgrtl.cc" 78)
+ nil )
+ (!pair "cfg_layout_function_footer"
+ (!type already_seen 297)
+ (!srcfileloc "cfgrtl.cc" 77)
+ nil )
+ (!pair "libfunc_decls"
+ (!type already_seen 1520)
+ (!srcfileloc "optabs-libfuncs.cc" 720)
+ nil )
+ (!pair "unused_expr_list"
+ (!type already_seen 100)
+ (!srcfileloc "lists.cc" 34)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "unused_insn_list"
+ (!type already_seen 100)
+ (!srcfileloc "lists.cc" 31)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "queue"
+ (!type already_seen 85)
+ (!srcfileloc "godump.cc" 57)
+ nil )
+ (!pair "test_insn"
+ (!type already_seen 297)
+ (!srcfileloc "gcse.cc" 823)
+ nil )
+ (!pair "dummy_unittesting_tree"
+ (!type already_seen 23)
+ (!srcfileloc "ggc-tests.cc" 443)
+ nil )
+ (!pair "root_user_struct_ptr"
+ (!type already_seen 1498)
+ (!srcfileloc "ggc-tests.cc" 390)
+ nil )
+ (!pair "root_test_node"
+ (!type already_seen 1496)
+ (!srcfileloc "ggc-tests.cc" 328)
+ nil )
+ (!pair "test_some_other_subclass_as_base_ptr"
+ (!type already_seen 1490)
+ (!srcfileloc "ggc-tests.cc" 275)
+ nil )
+ (!pair "test_some_subclass_as_base_ptr"
+ (!type already_seen 1490)
+ (!srcfileloc "ggc-tests.cc" 274)
+ nil )
+ (!pair "test_some_other_subclass"
+ (!type already_seen 1494)
+ (!srcfileloc "ggc-tests.cc" 273)
+ nil )
+ (!pair "test_some_subclass"
+ (!type already_seen 1492)
+ (!srcfileloc "ggc-tests.cc" 272)
+ nil )
+ (!pair "test_example_base"
+ (!type already_seen 1490)
+ (!srcfileloc "ggc-tests.cc" 271)
+ nil )
+ (!pair "test_of_deletable"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 205)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "root_test_of_union_2"
+ (!type already_seen 1487)
+ (!srcfileloc "ggc-tests.cc" 135)
+ nil )
+ (!pair "root_test_of_union_1"
+ (!type already_seen 1487)
+ (!srcfileloc "ggc-tests.cc" 134)
+ nil )
+ (!pair "root_test_of_length"
+ (!type already_seen 1484)
+ (!srcfileloc "ggc-tests.cc" 68)
+ nil )
+ (!pair "root_test_struct"
+ (!type already_seen 917)
+ (!srcfileloc "ggc-tests.cc" 42)
+ nil )
+ (!pair "sjlj_fc_type_node"
+ (!type already_seen 23)
+ (!srcfileloc "except.cc" 156)
+ nil )
+ (!pair "setjmp_fn"
+ (!type already_seen 23)
+ (!srcfileloc "except.cc" 153)
+ nil )
+ (!pair "type_to_runtime_map"
+ (!type already_seen 1471)
+ (!srcfileloc "except.cc" 151)
+ nil )
+ (!pair "call_site_base"
+ (!type already_seen 2)
+ (!srcfileloc "except.cc" 149)
+ nil )
+ (!pair "next_block_index"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 4640)
+ nil )
+ (!pair "temp_slot_address_table"
+ (!type already_seen 1462)
+ (!srcfileloc "function.cc" 608)
+ nil )
+ (!pair "epilogue_insn_hash"
+ (!type already_seen 1458)
+ (!srcfileloc "function.cc" 133)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "prologue_insn_hash"
+ (!type already_seen 1458)
+ (!srcfileloc "function.cc" 131)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "funcdef_no"
+ (!type already_seen 2)
+ (!srcfileloc "function.cc" 113)
+ nil )
+ (!pair "stack_check_libfunc"
+ (!type already_seen 100)
+ (!srcfileloc "explow.cc" 1643)
+ nil )
+ (!pair "hard_reg_clobbers"
+ (!type array 2428 nil gc_used "NUM_MACHINE_MODES"
+ (!type array 2429 nil gc_used "FIRST_PSEUDO_REGISTER"
+ (!type already_seen 100)
+ )
+ )
+ (!srcfileloc "emit-rtl.cc" 6479)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "free_sequence_stack"
+ (!type already_seen 1002)
+ (!srcfileloc "emit-rtl.cc" 5466)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "spill_slot_decl"
+ (!type already_seen 23)
+ (!srcfileloc "emit-rtl.cc" 2652)
+ nil )
+ (!pair "const_fixed_htab"
+ (!type already_seen 1445)
+ (!srcfileloc "emit-rtl.cc" 190)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "const_double_htab"
+ (!type already_seen 1442)
+ (!srcfileloc "emit-rtl.cc" 181)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "reg_attrs_htab"
+ (!type already_seen 1439)
+ (!srcfileloc "emit-rtl.cc" 172)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "const_poly_int_htab"
+ (!type already_seen 1436)
+ (!srcfileloc "emit-rtl.cc" 163)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "const_wide_int_htab"
+ (!type already_seen 1433)
+ (!srcfileloc "emit-rtl.cc" 153)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "const_int_htab"
+ (!type already_seen 1430)
+ (!srcfileloc "emit-rtl.cc" 145)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "label_num"
+ (!type already_seen 2)
+ (!srcfileloc "emit-rtl.cc" 94)
+ nil )
+ (!pair "x_rtl"
+ (!type already_seen 1427)
+ (!srcfileloc "emit-rtl.h" 338)
+ nil )
+ (!pair "shift_test"
+ (!type already_seen 100)
+ (!srcfileloc "dojump.cc" 127)
+ nil )
+ (!pair "and_test"
+ (!type already_seen 100)
+ (!srcfileloc "dojump.cc" 126)
+ nil )
+ (!pair "and_reg"
+ (!type already_seen 100)
+ (!srcfileloc "dojump.cc" 125)
+ nil )
+ (!pair "vector_last_nunits"
+ (!type already_seen 2)
+ (!srcfileloc "tree-vect-generic.cc" 137)
+ nil )
+ (!pair "vector_last_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree-vect-generic.cc" 136)
+ nil )
+ (!pair "vector_inner_type"
+ (!type already_seen 23)
+ (!srcfileloc "tree-vect-generic.cc" 135)
+ nil )
+ (!pair "funcs"
+ (!type already_seen 1411)
+ (!srcfileloc "btfout.cc" 105)
+ nil )
+ (!pair "btf_var_ids"
+ (!type already_seen 1407)
+ (!srcfileloc "btfout.cc" 73)
+ nil )
+ (!pair "btf_info_section"
+ (!type already_seen 214)
+ (!srcfileloc "btfout.cc" 42)
+ nil )
+ (!pair "ctf_info_section"
+ (!type already_seen 214)
+ (!srcfileloc "ctfout.cc" 36)
+ nil )
+ (!pair "tu_ctfc"
+ (!type already_seen 1403)
+ (!srcfileloc "ctfc.h" 343)
+ nil )
+ (!pair "inline_entry_data_table"
+ (!type already_seen 1368)
+ (!srcfileloc "dwarf2out.cc" 24312)
+ nil )
+ (!pair "external_die_map"
+ (!type already_seen 1353)
+ (!srcfileloc "dwarf2out.cc" 5950)
+ nil )
+ (!pair "addr_index_table"
+ (!type already_seen 1350)
+ (!srcfileloc "dwarf2out.cc" 5072)
+ nil )
+ (!pair "generic_type_instances"
+ (!type already_seen 85)
+ (!srcfileloc "dwarf2out.cc" 3713)
+ nil )
+ (!pair "tmpl_value_parm_die_table"
+ (!type already_seen 1343)
+ (!srcfileloc "dwarf2out.cc" 3707)
+ nil )
+ (!pair "label_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3705)
+ nil )
+ (!pair "last_emitted_file"
+ (!type already_seen 503)
+ (!srcfileloc "dwarf2out.cc" 3702)
+ nil )
+ (!pair "poc_label_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3699)
+ nil )
+ (!pair "loclabel_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3696)
+ nil )
+ (!pair "have_location_lists"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 3693)
+ nil )
+ (!pair "ranges_by_label"
+ (!type already_seen 1341)
+ (!srcfileloc "dwarf2out.cc" 3690)
+ nil )
+ (!pair "ranges_table"
+ (!type already_seen 1339)
+ (!srcfileloc "dwarf2out.cc" 3687)
+ nil )
+ (!pair "macinfo_table"
+ (!type already_seen 1337)
+ (!srcfileloc "dwarf2out.cc" 3677)
+ nil )
+ (!pair "pubtype_table"
+ (!type already_seen 1335)
+ (!srcfileloc "dwarf2out.cc" 3673)
+ nil )
+ (!pair "pubname_table"
+ (!type already_seen 1335)
+ (!srcfileloc "dwarf2out.cc" 3669)
+ nil )
+ (!pair "separate_line_info"
+ (!type already_seen 1333)
+ (!srcfileloc "dwarf2out.cc" 3661)
+ nil )
+ (!pair "cold_text_section_line_info"
+ (!type already_seen 1295)
+ (!srcfileloc "dwarf2out.cc" 3658)
+ nil )
+ (!pair "text_section_line_info"
+ (!type already_seen 1295)
+ (!srcfileloc "dwarf2out.cc" 3657)
+ nil )
+ (!pair "cur_line_info_table"
+ (!type already_seen 1295)
+ (!srcfileloc "dwarf2out.cc" 3654)
+ nil )
+ (!pair "abbrev_die_table"
+ (!type already_seen 1311)
+ (!srcfileloc "dwarf2out.cc" 3640)
+ nil )
+ (!pair "cached_dw_loc_list_table"
+ (!type already_seen 1331)
+ (!srcfileloc "dwarf2out.cc" 3636)
+ nil )
+ (!pair "call_arg_locations"
+ (!type already_seen 1324)
+ (!srcfileloc "dwarf2out.cc" 3608)
+ nil )
+ (!pair "decl_loc_table"
+ (!type already_seen 1327)
+ (!srcfileloc "dwarf2out.cc" 3605)
+ nil )
+ (!pair "common_block_die_table"
+ (!type already_seen 1318)
+ (!srcfileloc "dwarf2out.cc" 3539)
+ nil )
+ (!pair "variable_value_hash"
+ (!type already_seen 1315)
+ (!srcfileloc "dwarf2out.cc" 3529)
+ nil )
+ (!pair "decl_die_table"
+ (!type already_seen 1309)
+ (!srcfileloc "dwarf2out.cc" 3512)
+ nil )
+ (!pair "file_table"
+ (!type already_seen 1306)
+ (!srcfileloc "dwarf2out.cc" 3501)
+ nil )
+ (!pair "deferred_asm_name"
+ (!type already_seen 1302)
+ (!srcfileloc "dwarf2out.cc" 3490)
+ nil )
+ (!pair "limbo_die_list"
+ (!type already_seen 1302)
+ (!srcfileloc "dwarf2out.cc" 3486)
+ nil )
+ (!pair "cu_die_list"
+ (!type already_seen 1302)
+ (!srcfileloc "dwarf2out.cc" 3483)
+ nil )
+ (!pair "comdat_type_list"
+ (!type already_seen 489)
+ (!srcfileloc "dwarf2out.cc" 3480)
+ nil )
+ (!pair "single_comp_unit_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 3477)
+ nil )
+ (!pair "zero_view_p"
+ (!type already_seen 387)
+ (!srcfileloc "dwarf2out.cc" 3400)
+ nil )
+ (!pair "do_eh_frame"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 293)
+ nil )
+ (!pair "current_unit_personality"
+ (!type already_seen 100)
+ (!srcfileloc "dwarf2out.cc" 290)
+ nil )
+ (!pair "decltype_auto_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 282)
+ nil )
+ (!pair "auto_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2out.cc" 279)
+ nil )
+ (!pair "switch_cold_ranges"
+ (!type already_seen 1290)
+ (!srcfileloc "dwarf2out.cc" 276)
+ nil )
+ (!pair "switch_text_ranges"
+ (!type already_seen 1290)
+ (!srcfileloc "dwarf2out.cc" 275)
+ nil )
+ (!pair "last_cold_label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 271)
+ nil )
+ (!pair "last_text_label"
+ (!type already_seen 11)
+ (!srcfileloc "dwarf2out.cc" 270)
+ nil )
+ (!pair "in_text_section_p"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 267)
+ nil )
+ (!pair "cold_text_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 264)
+ nil )
+ (!pair "have_multiple_function_sections"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 261)
+ nil )
+ (!pair "dw2_string_counter"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2out.cc" 258)
+ nil )
+ (!pair "skeleton_debug_str_hash"
+ (!type already_seen 1288)
+ (!srcfileloc "dwarf2out.cc" 256)
+ nil )
+ (!pair "debug_line_str_hash"
+ (!type already_seen 1288)
+ (!srcfileloc "dwarf2out.cc" 235)
+ nil )
+ (!pair "debug_str_hash"
+ (!type already_seen 1288)
+ (!srcfileloc "dwarf2out.cc" 233)
+ nil )
+ (!pair "fde_vec"
+ (!type already_seen 1285)
+ (!srcfileloc "dwarf2out.cc" 215)
+ nil )
+ (!pair "debug_frame_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 176)
+ nil )
+ (!pair "debug_ranges_dwo_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 175)
+ nil )
+ (!pair "debug_ranges_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 174)
+ nil )
+ (!pair "debug_str_offsets_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 173)
+ nil )
+ (!pair "debug_str_dwo_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 172)
+ nil )
+ (!pair "debug_line_str_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 171)
+ nil )
+ (!pair "debug_str_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 170)
+ nil )
+ (!pair "debug_pubtypes_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 169)
+ nil )
+ (!pair "debug_pubnames_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 168)
+ nil )
+ (!pair "debug_loc_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 167)
+ nil )
+ (!pair "debug_skeleton_line_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 166)
+ nil )
+ (!pair "debug_line_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 165)
+ nil )
+ (!pair "debug_macinfo_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 162)
+ nil )
+ (!pair "debug_addr_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 161)
+ nil )
+ (!pair "debug_aranges_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 160)
+ nil )
+ (!pair "debug_skeleton_abbrev_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 159)
+ nil )
+ (!pair "debug_abbrev_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 158)
+ nil )
+ (!pair "debug_skeleton_info_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 157)
+ nil )
+ (!pair "debug_info_section"
+ (!type already_seen 214)
+ (!srcfileloc "dwarf2out.cc" 156)
+ nil )
+ (!pair "incomplete_types"
+ (!type already_seen 85)
+ (!srcfileloc "dwarf2out.cc" 153)
+ nil )
+ (!pair "used_rtx_array"
+ (!type already_seen 221)
+ (!srcfileloc "dwarf2out.cc" 147)
+ nil )
+ (!pair "ctf_unknown_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2ctf.cc" 56)
+ nil )
+ (!pair "ctf_array_index_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2ctf.cc" 55)
+ nil )
+ (!pair "ctf_void_die"
+ (!type already_seen 486)
+ (!srcfileloc "dwarf2ctf.cc" 54)
+ nil )
+ (!pair "saved_do_cfi_asm"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2cfi.cc" 3699)
+ nil )
+ (!pair "dwarf2out_cfi_label_num"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2cfi.cc" 197)
+ nil )
+ (!pair "cie_return_save"
+ (!type already_seen 1278)
+ (!srcfileloc "dwarf2cfi.cc" 195)
+ nil )
+ (!pair "cie_cfi_row"
+ (!type already_seen 1276)
+ (!srcfileloc "dwarf2cfi.cc" 193)
+ nil )
+ (!pair "dw2_const_labelno"
+ (!type already_seen 2)
+ (!srcfileloc "dwarf2asm.cc" 913)
+ nil )
+ (!pair "indirect_pool"
+ (!type already_seen 1274)
+ (!srcfileloc "dwarf2asm.cc" 911)
+ nil )
+ (!pair "cie_cfi_vec"
+ (!type already_seen 471)
+ (!srcfileloc "dwarf2out.h" 339)
+ nil )
+ (!pair "nowarn_map"
+ (!type already_seen 1270)
+ (!srcfileloc "diagnostic-spec.h" 140)
+ nil )
+ (!pair "summaries_lto"
+ (!type already_seen 1253)
+ (!srcfileloc "ipa-modref.cc" 273)
+ nil )
+ (!pair "optimization_summaries"
+ (!type already_seen 1251)
+ (!srcfileloc "ipa-modref.cc" 267)
+ nil )
+ (!pair "summaries"
+ (!type already_seen 1251)
+ (!srcfileloc "ipa-modref.cc" 261)
+ nil )
+ (!pair "func_sums"
+ (!type already_seen 1227)
+ (!srcfileloc "ipa-sra.cc" 471)
+ nil )
+ (!pair "ipa_vr_hash_table"
+ (!type already_seen 1131)
+ (!srcfileloc "ipa-prop.cc" 156)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "ipa_bits_hash_table"
+ (!type already_seen 1128)
+ (!srcfileloc "ipa-prop.cc" 109)
+ (!options
+ (!option cache string "")
+ )
+ )
+ (!pair "version_info_node"
+ (!type already_seen 1051)
+ (!srcfileloc "cgraph.cc" 142)
+ nil )
+ (!pair "cgraph_fnver_htab"
+ (!type already_seen 1124)
+ (!srcfileloc "cgraph.cc" 122)
+ nil )
+ (!pair "callmem"
+ (!type already_seen 100)
+ (!srcfileloc "cselib.cc" 252)
+ nil )
+ (!pair "bitmap_ggc_free"
+ (!type already_seen 389)
+ (!srcfileloc "bitmap.cc" 70)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "frame_set"
+ (!type already_seen 2)
+ (!srcfileloc "alias.cc" 1373)
+ nil )
+ (!pair "varargs_set"
+ (!type already_seen 2)
+ (!srcfileloc "alias.cc" 1351)
+ nil )
+ (!pair "alias_sets"
+ (!type already_seen 1117)
+ (!srcfileloc "alias.cc" 280)
+ nil )
+ (!pair "reg_known_value"
+ (!type already_seen 221)
+ (!srcfileloc "alias.cc" 258)
+ nil )
+ (!pair "old_reg_base_value"
+ (!type already_seen 221)
+ (!srcfileloc "alias.cc" 239)
+ (!options
+ (!option deletable string "")
+ )
+ )
+ (!pair "arg_base_value"
+ (!type already_seen 100)
+ (!srcfileloc "alias.cc" 231)
+ nil )
+ (!pair "reg_base_value"
+ (!type already_seen 221)
+ (!srcfileloc "alias.cc" 226)
+ nil )
+ (!pair "restinsn"
+ (!type already_seen 297)
+ (!srcfileloc "caller-save.cc" 107)
+ nil )
+ (!pair "saveinsn"
+ (!type already_seen 297)
+ (!srcfileloc "caller-save.cc" 106)
+ nil )
+ (!pair "test_mem"
+ (!type already_seen 100)
+ (!srcfileloc "caller-save.cc" 105)
+ nil )
+ (!pair "test_reg"
+ (!type already_seen 100)
+ (!srcfileloc "caller-save.cc" 104)
+ nil )
+ (!pair "restpat"
+ (!type already_seen 100)
+ (!srcfileloc "caller-save.cc" 103)
+ nil )
+ (!pair "savepat"
+ (!type already_seen 100)
+ (!srcfileloc "caller-save.cc" 102)
+ nil )
+ (!pair "thunks"
+ (!type already_seen 1105)
+ (!srcfileloc "symtab-thunks.cc" 62)
+ nil )
+ (!pair "vtable_entry_type"
+ (!type already_seen 23)
+ (!srcfileloc "symtab-thunks.cc" 54)
+ nil )
+ (!pair "saved_symtab"
+ (!type already_seen 1089)
+ (!srcfileloc "cgraph.h" 3553)
+ nil )
+ (!pair "symtab"
+ (!type already_seen 1089)
+ (!srcfileloc "cgraph.h" 2554)
+ nil )
+ (!pair "in_cold_section_p"
+ (!type already_seen 2)
+ (!srcfileloc "output.h" 535)
+ nil )
+ (!pair "in_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 534)
+ nil )
+ (!pair "bss_noswitch_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 532)
+ nil )
+ (!pair "lcomm_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 531)
+ nil )
+ (!pair "comm_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 530)
+ nil )
+ (!pair "tls_comm_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 529)
+ nil )
+ (!pair "eh_frame_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 528)
+ nil )
+ (!pair "exception_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 527)
+ nil )
+ (!pair "sbss_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 526)
+ nil )
+ (!pair "bss_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 525)
+ nil )
+ (!pair "dtors_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 524)
+ nil )
+ (!pair "ctors_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 523)
+ nil )
+ (!pair "sdata_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 522)
+ nil )
+ (!pair "readonly_data_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 521)
+ nil )
+ (!pair "data_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 520)
+ nil )
+ (!pair "text_section"
+ (!type already_seen 214)
+ (!srcfileloc "output.h" 519)
+ nil )
+ (!pair "types_used_by_cur_var_decl"
+ (!type already_seen 85)
+ (!srcfileloc "function.h" 501)
+ nil )
+ (!pair "types_used_by_vars_hash"
+ (!type already_seen 1025)
+ (!srcfileloc "function.h" 495)
+ nil )
+ (!pair "cfun"
+ (!type already_seen 352)
+ (!srcfileloc "function.h" 466)
+ nil )
+ (!pair "regno_reg_rtx"
+ (!type already_seen 101)
+ (!srcfileloc "function.h" 87)
+ (!options
+ (!option length string "crtl->emit.x_reg_rtx_no")
+ )
+ )
+ (!pair "default_target_libfuncs"
+ (!type already_seen 607)
+ (!srcfileloc "libfuncs.h" 64)
+ nil )
+ (!pair "current_function_func_begin_label"
+ (!type already_seen 11)
+ (!srcfileloc "tree-core.h" 2384)
+ nil )
+ (!pair "current_function_decl"
+ (!type already_seen 23)
+ (!srcfileloc "tree-core.h" 2381)
+ nil )
+ (!pair "builtin_info"
+ (!type array 2430 nil gc_used "(int)END_BUILTINS"
+ (!type already_seen 989)
+ )
+ (!srcfileloc "tree-core.h" 2375)
+ nil )
+ (!pair "sizetype_tab"
+ (!type array 2431 nil gc_used "(int) stk_type_kind_last"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 2363)
+ nil )
+ (!pair "integer_types"
+ (!type array 2432 nil gc_used "itk_none"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 2360)
+ nil )
+ (!pair "global_trees"
+ (!type array 2433 nil gc_used "TI_MAX"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "tree-core.h" 2356)
+ nil )
+ (!pair "all_translation_units"
+ (!type already_seen 85)
+ (!srcfileloc "tree-core.h" 2353)
+ nil )
+ (!pair "alias_pairs"
+ (!type already_seen 994)
+ (!srcfileloc "tree-core.h" 2339)
+ nil )
+ (!pair "int_n_trees"
+ (!type array 2434 nil gc_used "NUM_INT_N_ENTS"
+ (!type already_seen 974)
+ )
+ (!srcfileloc "tree.h" 6502)
+ nil )
+ (!pair "stack_limit_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 4488)
+ nil )
+ (!pair "default_target_rtl"
+ (!type already_seen 590)
+ (!srcfileloc "rtl.h" 3922)
+ nil )
+ (!pair "invalid_insn_rtx"
+ (!type already_seen 297)
+ (!srcfileloc "rtl.h" 3820)
+ nil )
+ (!pair "simple_return_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 3819)
+ nil )
+ (!pair "ret_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 3818)
+ nil )
+ (!pair "pc_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 3817)
+ nil )
+ (!pair "const_tiny_rtx"
+ (!type array 2435 nil gc_used "4"
+ (!type array 2436 nil gc_used "(int) MAX_MACHINE_MODE"
+ (!type already_seen 100)
+ )
+ )
+ (!srcfileloc "rtl.h" 3804)
+ nil )
+ (!pair "const_true_rtx"
+ (!type already_seen 100)
+ (!srcfileloc "rtl.h" 3802)
+ nil )
+ (!pair "const_int_rtx"
+ (!type array 2437 nil gc_used "MAX_SAVED_CONST_INT * 2 + 1"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "rtl.h" 3796)
+ nil )
+ (!pair "gcov_fn_info_ptr_type"
+ (!type already_seen 23)
+ (!srcfileloc "coverage.cc" 103)
+ nil )
+ (!pair "gcov_fn_info_type"
+ (!type already_seen 23)
+ (!srcfileloc "coverage.cc" 102)
+ nil )
+ (!pair "gcov_info_var"
+ (!type already_seen 23)
+ (!srcfileloc "coverage.cc" 101)
+ nil )
+ (!pair "fn_v_ctrs"
+ (!type array 2438 nil gc_used "GCOV_COUNTERS"
+ (!type already_seen 23)
+ )
+ (!srcfileloc "coverage.cc" 96)
+ nil )
+ (!pair "functions_head"
+ (!type already_seen 905)
+ (!srcfileloc "coverage.cc" 87)
+ nil )
+ (!pair "thumb_call_via_label"
+ (!type array 2439 nil gc_used "14"
+ (!type already_seen 100)
+ )
+ (!srcfileloc "config/arm/arm.h" 1650)
+ nil )
+ (!pair "arm_target_insn"
+ (!type already_seen 100)
+ (!srcfileloc "config/arm/arm.h" 73)
+ nil )
+ (!pair "saved_line_table"
+ (!type already_seen 705)
+ (!srcfileloc "input.h" 27)
+ nil )
+ (!pair "line_table"
+ (!type already_seen 705)
+ (!srcfileloc "input.h" 26)
+ nil )
+)
+
+(!endfile)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ada/gcc-interface/ada-tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ada/gcc-interface/ada-tree.def
new file mode 100644
index 0000000..ff88e54
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ada/gcc-interface/ada-tree.def
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * *
+ * GNAT COMPILER COMPONENTS *
+ * *
+ * GNAT-SPECIFIC GCC TREE CODES *
+ * *
+ * Specification *
+ * *
+ * Copyright (C) 1992-2023, Free Software Foundation, Inc. *
+ * *
+ * GNAT is free software; you can redistribute it and/or modify it under *
+ * terms of the GNU General Public License as published by the Free Soft- *
+ * ware Foundation; either version 3, or (at your option) any later ver- *
+ * sion. GNAT is distributed in the hope that it will be useful, but WITH- *
+ * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. You should have received a copy of the GNU General *
+ * Public License along with GCC; see the file COPYING3. If not see *
+ * <http://www.gnu.org/licenses/>. *
+ * *
+ * GNAT was originally developed by the GNAT team at New York University. *
+ * Extensive contributions were provided by Ada Core Technologies Inc. *
+ * *
+ ****************************************************************************/
+
+/* A type that is an unconstrained array. This node is never passed to GCC.
+ TREE_TYPE is the type of the fat pointer and TYPE_OBJECT_RECORD_TYPE is
+ the type of a record containing the template and data. */
+DEFTREECODE (UNCONSTRAINED_ARRAY_TYPE, "unconstrained_array_type", tcc_type, 0)
+
+/* A reference to an unconstrained array. This node only exists as an
+ intermediate node during the translation of a GNAT tree to a GCC tree;
+ it is never passed to GCC. The only field used is operand 0, which
+ is the fat pointer object. */
+DEFTREECODE (UNCONSTRAINED_ARRAY_REF, "unconstrained_array_ref",
+ tcc_reference, 1)
+
+/* Same as SAVE_EXPR, but operand 1 contains the statement used to initialize
+ the temporary instead of using the value of operand 0 directly. */
+DEFTREECODE (LOAD_EXPR, "load_expr", tcc_expression, 2)
+
+/* An expression that returns an RTL suitable for its type. Operand 0
+ is an expression to be evaluated for side effects only. */
+DEFTREECODE (NULL_EXPR, "null_expr", tcc_expression, 1)
+
+/* Same as PLUS_EXPR, except that no modulo reduction is applied.
+ This is used for loops and never shows up in the tree. */
+DEFTREECODE (PLUS_NOMOD_EXPR, "plus_nomod_expr", tcc_binary, 2)
+
+/* Same as MINUS_EXPR, except that no modulo reduction is applied.
+ This is used for loops and never shows up in the tree. */
+DEFTREECODE (MINUS_NOMOD_EXPR, "minus_nomod_expr", tcc_binary, 2)
+
+/* An expression that computes an exponentiation. Operand 0 is the base and
+ Operand 1 is the exponent. This node is never passed to GCC: it is only
+ used internally to describe fixed point types scale factors. */
+DEFTREECODE (POWER_EXPR, "power_expr", tcc_binary, 2)
+
+/* Same as ADDR_EXPR, except that if the operand represents a bit field,
+ return the address of the byte containing the bit. This is used
+ for the Address attribute and never shows up in the tree. */
+DEFTREECODE (ATTR_ADDR_EXPR, "attr_addr_expr", tcc_reference, 1)
+
+/* Here are the tree codes for the statement types known to Ada. These
+ must be at the end of this file to allow IS_ADA_STMT to work. */
+
+/* This is how record_code_position and insert_code_for work. The former
+ makes this tree node, whose operand is a statement. The latter inserts
+ the actual statements into this node. Gimplification consists of
+ just returning the inner statement. */
+DEFTREECODE (STMT_STMT, "stmt_stmt", tcc_statement, 1)
+
+/* A loop. LOOP_STMT_COND is the test to exit the loop. LOOP_STMT_UPDATE
+ is the statement to update the loop iteration variable at the continue
+ point. LOOP_STMT_BODY are the statements in the body of the loop. And
+ LOOP_STMT_LABEL points to the LABEL_DECL of the end label of the loop. */
+DEFTREECODE (LOOP_STMT, "loop_stmt", tcc_statement, 4)
+
+/* Conditionally exit a loop. EXIT_STMT_COND is the condition, which, if
+ true, will cause the loop to be exited. If no condition is specified,
+ the loop is unconditionally exited. EXIT_STMT_LABEL is the end label
+ corresponding to the loop to exit. */
+DEFTREECODE (EXIT_STMT, "exit_stmt", tcc_statement, 2)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/addresses.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/addresses.h
new file mode 100644
index 0000000..3519c24
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/addresses.h
@@ -0,0 +1,90 @@
+/* Inline functions to test validity of reg classes for addressing modes.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Wrapper function to unify target macros MODE_CODE_BASE_REG_CLASS,
+ MODE_BASE_REG_REG_CLASS, MODE_BASE_REG_CLASS and BASE_REG_CLASS.
+ Arguments as for the MODE_CODE_BASE_REG_CLASS macro. */
+
+#ifndef GCC_ADDRESSES_H
+#define GCC_ADDRESSES_H
+
+inline enum reg_class
+base_reg_class (machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ enum rtx_code outer_code ATTRIBUTE_UNUSED,
+ enum rtx_code index_code ATTRIBUTE_UNUSED)
+{
+#ifdef MODE_CODE_BASE_REG_CLASS
+ return MODE_CODE_BASE_REG_CLASS (MACRO_MODE (mode), as, outer_code,
+ index_code);
+#else
+#ifdef MODE_BASE_REG_REG_CLASS
+ if (index_code == REG)
+ return MODE_BASE_REG_REG_CLASS (MACRO_MODE (mode));
+#endif
+#ifdef MODE_BASE_REG_CLASS
+ return MODE_BASE_REG_CLASS (MACRO_MODE (mode));
+#else
+ return BASE_REG_CLASS;
+#endif
+#endif
+}
+
+/* Wrapper function to unify target macros REGNO_MODE_CODE_OK_FOR_BASE_P,
+ REGNO_MODE_OK_FOR_REG_BASE_P, REGNO_MODE_OK_FOR_BASE_P and
+ REGNO_OK_FOR_BASE_P.
+ Arguments as for the REGNO_MODE_CODE_OK_FOR_BASE_P macro. */
+
+inline bool
+ok_for_base_p_1 (unsigned regno ATTRIBUTE_UNUSED,
+ machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ enum rtx_code outer_code ATTRIBUTE_UNUSED,
+ enum rtx_code index_code ATTRIBUTE_UNUSED)
+{
+#ifdef REGNO_MODE_CODE_OK_FOR_BASE_P
+ return REGNO_MODE_CODE_OK_FOR_BASE_P (regno, MACRO_MODE (mode), as,
+ outer_code, index_code);
+#else
+#ifdef REGNO_MODE_OK_FOR_REG_BASE_P
+ if (index_code == REG)
+ return REGNO_MODE_OK_FOR_REG_BASE_P (regno, MACRO_MODE (mode));
+#endif
+#ifdef REGNO_MODE_OK_FOR_BASE_P
+ return REGNO_MODE_OK_FOR_BASE_P (regno, MACRO_MODE (mode));
+#else
+ return REGNO_OK_FOR_BASE_P (regno);
+#endif
+#endif
+}
+
+/* Wrapper around ok_for_base_p_1, for use after register allocation is
+ complete. Arguments as for the called function. */
+
+inline bool
+regno_ok_for_base_p (unsigned regno, machine_mode mode, addr_space_t as,
+ enum rtx_code outer_code, enum rtx_code index_code)
+{
+ if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
+}
+
+#endif /* GCC_ADDRESSES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/alias.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/alias.h
new file mode 100644
index 0000000..0f0787d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/alias.h
@@ -0,0 +1,51 @@
+/* Exported functions from alias.cc
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ALIAS_H
+#define GCC_ALIAS_H
+
+extern alias_set_type new_alias_set (void);
+extern alias_set_type get_alias_set (tree);
+extern alias_set_type get_deref_alias_set (tree);
+extern alias_set_type get_varargs_alias_set (void);
+extern alias_set_type get_frame_alias_set (void);
+extern tree component_uses_parent_alias_set_from (const_tree);
+extern bool ends_tbaa_access_path_p (const_tree);
+extern bool alias_set_subset_of (alias_set_type, alias_set_type);
+extern void record_alias_subset (alias_set_type, alias_set_type);
+extern void record_component_aliases (tree);
+extern int alias_sets_conflict_p (alias_set_type, alias_set_type);
+extern int alias_sets_must_conflict_p (alias_set_type, alias_set_type);
+extern int objects_must_conflict_p (tree, tree);
+extern int nonoverlapping_memrefs_p (const_rtx, const_rtx, bool);
+extern void dump_alias_stats_in_alias_c (FILE *s);
+tree reference_alias_ptr_type (tree);
+tree reference_alias_ptr_type_1 (tree *);
+bool alias_ptr_types_compatible_p (tree, tree);
+int compare_base_decls (tree, tree);
+bool refs_same_for_tbaa_p (tree, tree);
+bool mems_same_for_tbaa_p (rtx, rtx);
+
+/* This alias set can be used to force a memory to conflict with all
+ other memories, creating a barrier across which no memory reference
+ can move. Note that there are other legacy ways to create such
+ memory barriers, including an address of SCRATCH. */
+#define ALIAS_SET_MEMORY_BARRIER ((alias_set_type) -1)
+
+#endif /* GCC_ALIAS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/align.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/align.h
new file mode 100644
index 0000000..5e94bd3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/align.h
@@ -0,0 +1,83 @@
+/* Alignment-related classes.
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Align flags tuple with alignment in log form and with a maximum skip. */
+
+struct align_flags_tuple
+{
+ /* Values of the -falign-* flags: how much to align labels in code.
+ log is "align to 2^log" (so 0 means no alignment).
+ maxskip is the maximum allowed amount of padding to insert. */
+ int log;
+ int maxskip;
+
+ /* Normalize filled values so that maxskip is not bigger than 1 << log. */
+ void normalize ()
+ {
+ int n = (1 << log);
+ if (maxskip > n)
+ maxskip = n - 1;
+ }
+
+ /* Return original value of an alignment flag. */
+ int get_value ()
+ {
+ return maxskip + 1;
+ }
+};
+
+/* Alignment flags is structure used as value of -align-* options.
+ It's used in target-dependant code. */
+
+class align_flags
+{
+public:
+ /* Default constructor. */
+ align_flags (int log0 = 0, int maxskip0 = 0, int log1 = 0, int maxskip1 = 0)
+ {
+ levels[0].log = log0;
+ levels[0].maxskip = maxskip0;
+ levels[1].log = log1;
+ levels[1].maxskip = maxskip1;
+ normalize ();
+ }
+
+ /* Normalize both components of align_flags. */
+ void normalize ()
+ {
+ for (unsigned i = 0; i < 2; i++)
+ levels[i].normalize ();
+ }
+
+ /* Get alignment that is common bigger alignment of alignments F0 and F1. */
+ static align_flags max (const align_flags f0, const align_flags f1)
+ {
+ int log0 = MAX (f0.levels[0].log, f1.levels[0].log);
+ int maxskip0 = MAX (f0.levels[0].maxskip, f1.levels[0].maxskip);
+ int log1 = MAX (f0.levels[1].log, f1.levels[1].log);
+ int maxskip1 = MAX (f0.levels[1].maxskip, f1.levels[1].maxskip);
+ return align_flags (log0, maxskip0, log1, maxskip1);
+ }
+
+ align_flags_tuple levels[2];
+};
+
+/* Define maximum supported code alignment. */
+#define MAX_CODE_ALIGN 16
+#define MAX_CODE_ALIGN_VALUE (1 << MAX_CODE_ALIGN)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/all-tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/all-tree.def
new file mode 100644
index 0000000..791c7a8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/all-tree.def
@@ -0,0 +1,8 @@
+#include "tree.def"
+END_OF_BASE_TREE_CODES
+#include "c-family/c-common.def"
+#include "ada/gcc-interface/ada-tree.def"
+#include "cp/cp-tree.def"
+#include "d/d-tree.def"
+#include "m2/m2-tree.def"
+#include "objc/objc-tree.def"
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/alloc-pool.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/alloc-pool.h
new file mode 100644
index 0000000..e56709b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/alloc-pool.h
@@ -0,0 +1,576 @@
+/* Functions to support a pool of allocatable objects
+ Copyright (C) 1997-2023 Free Software Foundation, Inc.
+ Contributed by Daniel Berlin <dan@cgsoftware.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+#ifndef ALLOC_POOL_H
+#define ALLOC_POOL_H
+
+#include "memory-block.h"
+#include "options.h" // for flag_checking
+
+extern void dump_alloc_pool_statistics (void);
+
+/* Flag indicates whether memory statistics are gathered any longer. */
+extern bool after_memory_report;
+
+typedef unsigned long ALLOC_POOL_ID_TYPE;
+
+/* Last used ID. */
+extern ALLOC_POOL_ID_TYPE last_id;
+
+/* Pool allocator memory usage. */
+class pool_usage: public mem_usage
+{
+public:
+ /* Default contructor. */
+ pool_usage (): m_element_size (0), m_pool_name ("") {}
+ /* Constructor. */
+ pool_usage (size_t allocated, size_t times, size_t peak,
+ size_t instances, size_t element_size,
+ const char *pool_name)
+ : mem_usage (allocated, times, peak, instances),
+ m_element_size (element_size),
+ m_pool_name (pool_name) {}
+
+ /* Sum the usage with SECOND usage. */
+ pool_usage
+ operator+ (const pool_usage &second)
+ {
+ return pool_usage (m_allocated + second.m_allocated,
+ m_times + second.m_times,
+ m_peak + second.m_peak,
+ m_instances + second.m_instances,
+ m_element_size, m_pool_name);
+ }
+
+ /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
+ inline void
+ dump (mem_location *loc, const mem_usage &total) const
+ {
+ char *location_string = loc->to_string ();
+
+ fprintf (stderr, "%-32s%-48s " PRsa(5) PRsa(9) ":%5.1f%%"
+ PRsa(9) PRsa(9) ":%5.1f%%%12" PRIu64 "\n",
+ m_pool_name, location_string,
+ SIZE_AMOUNT (m_instances),
+ SIZE_AMOUNT (m_allocated),
+ get_percent (m_allocated, total.m_allocated),
+ SIZE_AMOUNT (m_peak),
+ SIZE_AMOUNT (m_times),
+ get_percent (m_times, total.m_times),
+ (uint64_t)m_element_size);
+
+ free (location_string);
+ }
+
+ /* Dump header with NAME. */
+ static inline void
+ dump_header (const char *name)
+ {
+ fprintf (stderr, "%-32s%-48s %6s%11s%16s%17s%12s\n", "Pool name", name,
+ "Pools", "Leak", "Peak", "Times", "Elt size");
+ }
+
+ /* Dump footer. */
+ inline void
+ dump_footer ()
+ {
+ fprintf (stderr, "%s" PRsa(82) PRsa(10) "\n", "Total",
+ SIZE_AMOUNT (m_instances), SIZE_AMOUNT (m_allocated));
+ }
+
+ /* Element size. */
+ size_t m_element_size;
+ /* Pool name. */
+ const char *m_pool_name;
+};
+
+extern mem_alloc_description<pool_usage> pool_allocator_usage;
+
+#if 0
+/* If a pool with custom block size is needed, one might use the following
+ template. An instance of this template can be used as a parameter for
+ instantiating base_pool_allocator template:
+
+ typedef custom_block_allocator <128*1024> huge_block_allocator;
+ ...
+ static base_pool_allocator <huge_block_allocator>
+ value_pool ("value", 16384);
+
+ Right now it's not used anywhere in the code, and is given here as an
+ example). */
+
+template <size_t BlockSize>
+class custom_block_allocator
+{
+public:
+ static const size_t block_size = BlockSize;
+
+ static inline void *
+ allocate () ATTRIBUTE_MALLOC
+ {
+ return XNEWVEC (char, BlockSize);
+ }
+
+ static inline void
+ release (void *block)
+ {
+ XDELETEVEC (block);
+ }
+};
+#endif
+
+/* Generic pool allocator. */
+
+template <typename TBlockAllocator>
+class base_pool_allocator
+{
+public:
+ /* Default constructor for pool allocator called NAME. */
+ base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
+ ~base_pool_allocator ();
+ void release ();
+ void release_if_empty ();
+ void *allocate () ATTRIBUTE_MALLOC;
+ void remove (void *object);
+ size_t num_elts_current ();
+
+private:
+ struct allocation_pool_list
+ {
+ allocation_pool_list *next;
+ };
+
+ /* Initialize a pool allocator. */
+ void initialize ();
+
+ struct allocation_object
+ {
+#if CHECKING_P
+ /* The ID of alloc pool which the object was allocated from. */
+ ALLOC_POOL_ID_TYPE id;
+#endif
+
+ union
+ {
+ /* The data of the object. */
+ char data[1];
+
+ /* Because we want any type of data to be well aligned after the ID,
+ the following elements are here. They are never accessed so
+ the allocated object may be even smaller than this structure.
+ We do not care about alignment for floating-point types. */
+ char *align_p;
+ int64_t align_i;
+ } u;
+
+#if CHECKING_P
+ static inline allocation_object*
+ get_instance (void *data_ptr)
+ {
+ return (allocation_object *)(((char *)(data_ptr))
+ - offsetof (allocation_object,
+ u.data));
+ }
+#endif
+
+ static inline void*
+ get_data (void *instance_ptr)
+ {
+ return (void*)(((allocation_object *) instance_ptr)->u.data);
+ }
+ };
+
+ /* Align X to 8. */
+ static inline size_t
+ align_eight (size_t x)
+ {
+ return (((x+7) >> 3) << 3);
+ }
+
+ const char *m_name;
+ ALLOC_POOL_ID_TYPE m_id;
+ size_t m_elts_per_block;
+
+ /* These are the elements that have been allocated at least once
+ and freed. */
+ allocation_pool_list *m_returned_free_list;
+
+ /* These are the elements that have not yet been allocated out of
+ the last block obtained from XNEWVEC. */
+ char* m_virgin_free_list;
+
+ /* The number of elements in the virgin_free_list that can be
+ allocated before needing another block. */
+ size_t m_virgin_elts_remaining;
+ /* The number of elements that are allocated. */
+ size_t m_elts_allocated;
+ /* The number of elements that are released. */
+ size_t m_elts_free;
+ /* The number of allocated blocks. */
+ size_t m_blocks_allocated;
+ /* List of blocks that are used to allocate new objects. */
+ allocation_pool_list *m_block_list;
+ /* Size of a pool elements in bytes. */
+ size_t m_elt_size;
+ /* Size in bytes that should be allocated for each element. */
+ size_t m_size;
+ /* Flag if a pool allocator is initialized. */
+ bool m_initialized;
+ /* Memory allocation location. */
+ mem_location m_location;
+};
+
+template <typename TBlockAllocator>
+inline
+base_pool_allocator <TBlockAllocator>::base_pool_allocator (
+ const char *name, size_t size MEM_STAT_DECL):
+ m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
+ m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
+ m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_elt_size (0),
+ m_size (size), m_initialized (false),
+ m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
+
+/* Initialize a pool allocator. */
+
+template <typename TBlockAllocator>
+inline void
+base_pool_allocator <TBlockAllocator>::initialize ()
+{
+ gcc_checking_assert (!m_initialized);
+ m_initialized = true;
+
+ size_t size = m_size;
+
+ gcc_checking_assert (m_name);
+ gcc_checking_assert (m_size);
+
+ /* Make size large enough to store the list header. */
+ if (size < sizeof (allocation_pool_list*))
+ size = sizeof (allocation_pool_list*);
+
+ /* Now align the size to a multiple of 8. */
+ size = align_eight (size);
+
+ /* Add the aligned size of ID. */
+ size += offsetof (allocation_object, u.data);
+
+ m_elt_size = size;
+
+ if (GATHER_STATISTICS)
+ {
+ pool_usage *u = pool_allocator_usage.register_descriptor
+ (this, new mem_location (m_location));
+
+ u->m_element_size = m_elt_size;
+ u->m_pool_name = m_name;
+ }
+
+ /* List header size should be a multiple of 8. */
+ size_t header_size = align_eight (sizeof (allocation_pool_list));
+
+ m_elts_per_block = (TBlockAllocator::block_size - header_size) / size;
+ gcc_checking_assert (m_elts_per_block != 0);
+
+ /* Increase the last used ID and use it for this pool.
+ ID == 0 is used for free elements of pool so skip it. */
+ last_id++;
+ if (last_id == 0)
+ last_id++;
+
+ m_id = last_id;
+}
+
+/* Free all memory allocated for the given memory pool. */
+template <typename TBlockAllocator>
+inline void
+base_pool_allocator <TBlockAllocator>::release ()
+{
+ if (!m_initialized)
+ return;
+
+ allocation_pool_list *block, *next_block;
+
+ /* Free each block allocated to the pool. */
+ for (block = m_block_list; block != NULL; block = next_block)
+ {
+ next_block = block->next;
+ TBlockAllocator::release (block);
+ }
+
+ if (GATHER_STATISTICS && !after_memory_report)
+ {
+ pool_allocator_usage.release_instance_overhead
+ (this, (m_elts_allocated - m_elts_free) * m_elt_size);
+ }
+
+ m_returned_free_list = NULL;
+ m_virgin_free_list = NULL;
+ m_virgin_elts_remaining = 0;
+ m_elts_allocated = 0;
+ m_elts_free = 0;
+ m_blocks_allocated = 0;
+ m_block_list = NULL;
+}
+
+template <typename TBlockAllocator>
+inline void
+base_pool_allocator <TBlockAllocator>::release_if_empty ()
+{
+ if (m_elts_free == m_elts_allocated)
+ release ();
+}
+
+template <typename TBlockAllocator>
+inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator ()
+{
+ release ();
+}
+
+/* Allocates one element from the pool specified. */
+template <typename TBlockAllocator>
+inline void*
+base_pool_allocator <TBlockAllocator>::allocate ()
+{
+ if (!m_initialized)
+ initialize ();
+
+ allocation_pool_list *header;
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+ int size;
+#endif
+
+ if (GATHER_STATISTICS)
+ {
+ pool_allocator_usage.register_instance_overhead (m_elt_size, this);
+ }
+
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+ size = m_elt_size - offsetof (allocation_object, u.data);
+#endif
+
+ /* If there are no more free elements, make some more!. */
+ if (!m_returned_free_list)
+ {
+ char *block;
+ if (!m_virgin_elts_remaining)
+ {
+ allocation_pool_list *block_header;
+
+ /* Make the block. */
+ block = reinterpret_cast<char *> (TBlockAllocator::allocate ());
+ block_header = new (block) allocation_pool_list;
+ block += align_eight (sizeof (allocation_pool_list));
+
+ /* Throw it on the block list. */
+ block_header->next = m_block_list;
+ m_block_list = block_header;
+
+ /* Make the block available for allocation. */
+ m_virgin_free_list = block;
+ m_virgin_elts_remaining = m_elts_per_block;
+
+ /* Also update the number of elements we have free/allocated, and
+ increment the allocated block count. */
+ m_elts_allocated += m_elts_per_block;
+ m_elts_free += m_elts_per_block;
+ m_blocks_allocated += 1;
+ }
+
+ /* We now know that we can take the first elt off the virgin list and
+ put it on the returned list. */
+ block = m_virgin_free_list;
+ header = (allocation_pool_list*) allocation_object::get_data (block);
+ header->next = NULL;
+
+ /* Mark the element to be free. */
+#if CHECKING_P
+ ((allocation_object*) block)->id = 0;
+#endif
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
+ m_returned_free_list = header;
+ m_virgin_free_list += m_elt_size;
+ m_virgin_elts_remaining--;
+
+ }
+
+ /* Pull the first free element from the free list, and return it. */
+ header = m_returned_free_list;
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
+ m_returned_free_list = header->next;
+ m_elts_free--;
+
+ /* Set the ID for element. */
+#if CHECKING_P
+ allocation_object::get_instance (header)->id = m_id;
+#endif
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
+
+ return (void *)(header);
+}
+
+/* Puts PTR back on POOL's free list. */
+template <typename TBlockAllocator>
+inline void
+base_pool_allocator <TBlockAllocator>::remove (void *object)
+{
+ int size = m_elt_size - offsetof (allocation_object, u.data);
+
+ if (flag_checking)
+ {
+ gcc_assert (m_initialized);
+ gcc_assert (object
+ /* Check if we free more than we allocated. */
+ && m_elts_free < m_elts_allocated);
+#if CHECKING_P
+ /* Check whether the PTR was allocated from POOL. */
+ gcc_assert (m_id == allocation_object::get_instance (object)->id);
+#endif
+
+ memset (object, 0xaf, size);
+ }
+
+#if CHECKING_P
+ /* Mark the element to be free. */
+ allocation_object::get_instance (object)->id = 0;
+#endif
+
+ allocation_pool_list *header = new (object) allocation_pool_list;
+ header->next = m_returned_free_list;
+ m_returned_free_list = header;
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
+ m_elts_free++;
+
+ if (GATHER_STATISTICS)
+ {
+ pool_allocator_usage.release_instance_overhead (this, m_elt_size);
+ }
+}
+
+/* Number of elements currently active (not returned to pool). Used for cheap
+ consistency checks. */
+template <typename TBlockAllocator>
+inline size_t
+base_pool_allocator <TBlockAllocator>::num_elts_current ()
+{
+ return m_elts_allocated - m_elts_free;
+}
+
+/* Specialization of base_pool_allocator which should be used in most cases.
+ Another specialization may be needed, if object size is greater than
+ memory_block_pool::block_size (64 KB). */
+typedef base_pool_allocator <memory_block_pool> pool_allocator;
+
+/* Type based memory pool allocator. */
+template <typename T>
+class object_allocator
+{
+public:
+ /* Default constructor for pool allocator called NAME. */
+ object_allocator (const char *name CXX_MEM_STAT_INFO):
+ m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
+
+ inline void
+ release ()
+ {
+ m_allocator.release ();
+ }
+
+ inline void release_if_empty ()
+ {
+ m_allocator.release_if_empty ();
+ }
+
+
+ /* Allocate memory for instance of type T and call a default constructor. */
+
+ inline T *
+ allocate () ATTRIBUTE_MALLOC
+ {
+ return ::new (m_allocator.allocate ()) T;
+ }
+
+ /* Allocate memory for instance of type T and return void * that
+ could be used in situations where a default constructor is not provided
+ by the class T. */
+
+ inline void *
+ allocate_raw () ATTRIBUTE_MALLOC
+ {
+ return m_allocator.allocate ();
+ }
+
+ inline void
+ remove (T *object)
+ {
+ /* Call destructor. */
+ object->~T ();
+
+ m_allocator.remove (object);
+ }
+
+ inline void
+ remove_raw (void *object)
+ {
+ m_allocator.remove (object);
+ }
+
+ inline size_t
+ num_elts_current ()
+ {
+ return m_allocator.num_elts_current ();
+ }
+
+private:
+ pool_allocator m_allocator;
+};
+
+/* Store information about each particular alloc_pool. Note that this
+ will underestimate the amount the amount of storage used by a small amount:
+ 1) The overhead in a pool is not accounted for.
+ 2) The unallocated elements in a block are not accounted for. Note
+ that this can at worst case be one element smaller that the block
+ size for that pool. */
+struct alloc_pool_descriptor
+{
+ /* Number of pools allocated. */
+ unsigned long created;
+ /* Gross allocated storage. */
+ unsigned long allocated;
+ /* Amount of currently active storage. */
+ unsigned long current;
+ /* Peak amount of storage used. */
+ unsigned long peak;
+ /* Size of element in the pool. */
+ int elt_size;
+};
+
+/* Helper for classes that do not provide default ctor. */
+
+template <typename T>
+inline void *
+operator new (size_t, object_allocator<T> &a)
+{
+ return a.allocate_raw ();
+}
+
+/* Hashtable mapping alloc_pool names to descriptors. */
+extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ansidecl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ansidecl.h
new file mode 100644
index 0000000..39375e1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ansidecl.h
@@ -0,0 +1,354 @@
+/* Compiler compatibility macros
+ Copyright (C) 1991-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* For ease of writing code which uses GCC extensions but needs to be
+ portable to other compilers, we provide the GCC_VERSION macro that
+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
+ wrappers around __attribute__. Also, __extension__ will be #defined
+ to nothing if it doesn't work. See below. */
+
+#ifndef _ANSIDECL_H
+#define _ANSIDECL_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Every source file includes this file,
+ so they will all get the switch for lint. */
+/* LINTLIBRARY */
+
+/* Using MACRO(x,y) in cpp #if conditionals does not work with some
+ older preprocessors. Thus we can't define something like this:
+
+#define HAVE_GCC_VERSION(MAJOR, MINOR) \
+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
+
+and then test "#if HAVE_GCC_VERSION(2,7)".
+
+So instead we use the macro below and test it against specific values. */
+
+/* This macro simplifies testing whether we are using gcc, and if it
+ is of a particular minimum version. (Both major & minor numbers are
+ significant.) This macro will evaluate to 0 if we are not using
+ gcc at all. */
+#ifndef GCC_VERSION
+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif /* GCC_VERSION */
+
+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
+ it too, but it's not in C89. */
+#undef inline
+#if (!defined(__cplusplus) && __STDC_VERSION__ >= 199901L) || defined(__cplusplus) || (defined(__SUNPRO_C) && defined(__C99FEATURES__))
+/* it's a keyword */
+#else
+# if GCC_VERSION >= 2007
+# define inline __inline__ /* __inline__ prevents -pedantic warnings */
+# else
+# define inline /* nothing */
+# endif
+#endif
+
+/* Define macros for some gcc attributes. This permits us to use the
+ macros freely, and know that they will come into play for the
+ version of gcc in which they are supported. */
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
+#ifndef ATTRIBUTE_MALLOC
+# if (GCC_VERSION >= 2096)
+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
+# else
+# define ATTRIBUTE_MALLOC
+# endif /* GNUC >= 2.96 */
+#endif /* ATTRIBUTE_MALLOC */
+
+/* Attributes on labels were valid as of gcc 2.93 and g++ 4.5. For
+ g++ an attribute on a label must be followed by a semicolon. */
+#ifndef ATTRIBUTE_UNUSED_LABEL
+# ifndef __cplusplus
+# if GCC_VERSION >= 2093
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif
+# else
+# if GCC_VERSION >= 4005
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED ;
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif
+# endif
+#endif
+
+/* Similarly to ARG_UNUSED below. Prior to GCC 3.4, the C++ frontend
+ couldn't parse attributes placed after the identifier name, and now
+ the entire compiler is built with C++. */
+#ifndef ATTRIBUTE_UNUSED
+#if GCC_VERSION >= 3004
+# define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#else
+#define ATTRIBUTE_UNUSED
+#endif
+#endif /* ATTRIBUTE_UNUSED */
+
+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
+ identifier name. */
+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
+#else /* !__cplusplus || GNUC >= 3.4 */
+# define ARG_UNUSED(NAME) NAME
+#endif /* !__cplusplus || GNUC >= 3.4 */
+
+#ifndef ATTRIBUTE_NORETURN
+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
+#endif /* ATTRIBUTE_NORETURN */
+
+/* Attribute `nonnull' was valid as of gcc 3.3. */
+#ifndef ATTRIBUTE_NONNULL
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
+# else
+# define ATTRIBUTE_NONNULL(m)
+# endif /* GNUC >= 3.3 */
+#endif /* ATTRIBUTE_NONNULL */
+
+/* Attribute `returns_nonnull' was valid as of gcc 4.9. */
+#ifndef ATTRIBUTE_RETURNS_NONNULL
+# if (GCC_VERSION >= 4009)
+# define ATTRIBUTE_RETURNS_NONNULL __attribute__ ((__returns_nonnull__))
+# else
+# define ATTRIBUTE_RETURNS_NONNULL
+# endif /* GNUC >= 4.9 */
+#endif /* ATTRIBUTE_RETURNS_NONNULL */
+
+/* Attribute `pure' was valid as of gcc 3.0. */
+#ifndef ATTRIBUTE_PURE
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define ATTRIBUTE_PURE
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_PURE */
+
+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
+ This was the case for the `printf' format attribute by itself
+ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
+ attribute to retain this behavior. */
+#ifndef ATTRIBUTE_PRINTF
+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
+#endif /* ATTRIBUTE_PRINTF */
+
+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
+ a function pointer. Format attributes were allowed on function
+ pointers as of gcc 3.1. */
+#ifndef ATTRIBUTE_FPTR_PRINTF
+# if (GCC_VERSION >= 3001)
+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
+# else
+# define ATTRIBUTE_FPTR_PRINTF(m, n)
+# endif /* GNUC >= 3.1 */
+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
+#endif /* ATTRIBUTE_FPTR_PRINTF */
+
+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
+ NULL format specifier was allowed as of gcc 3.3. */
+#ifndef ATTRIBUTE_NULL_PRINTF
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
+# else
+# define ATTRIBUTE_NULL_PRINTF(m, n)
+# endif /* GNUC >= 3.3 */
+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
+#endif /* ATTRIBUTE_NULL_PRINTF */
+
+/* Attribute `sentinel' was valid as of gcc 3.5. */
+#ifndef ATTRIBUTE_SENTINEL
+# if (GCC_VERSION >= 3005)
+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
+# else
+# define ATTRIBUTE_SENTINEL
+# endif /* GNUC >= 3.5 */
+#endif /* ATTRIBUTE_SENTINEL */
+
+
+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
+# else
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
+
+/* Useful for structures whose layout must match some binary specification
+ regardless of the alignment and padding qualities of the compiler. */
+#ifndef ATTRIBUTE_PACKED
+# define ATTRIBUTE_PACKED __attribute__ ((packed))
+#endif
+
+/* Attribute `hot' and `cold' was valid as of gcc 4.3. */
+#ifndef ATTRIBUTE_COLD
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_COLD __attribute__ ((__cold__))
+# else
+# define ATTRIBUTE_COLD
+# endif /* GNUC >= 4.3 */
+#endif /* ATTRIBUTE_COLD */
+#ifndef ATTRIBUTE_HOT
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_HOT __attribute__ ((__hot__))
+# else
+# define ATTRIBUTE_HOT
+# endif /* GNUC >= 4.3 */
+#endif /* ATTRIBUTE_HOT */
+
+/* Attribute 'no_sanitize_undefined' was valid as of gcc 4.9. */
+#ifndef ATTRIBUTE_NO_SANITIZE_UNDEFINED
+# if (GCC_VERSION >= 4009)
+# define ATTRIBUTE_NO_SANITIZE_UNDEFINED __attribute__ ((no_sanitize_undefined))
+# else
+# define ATTRIBUTE_NO_SANITIZE_UNDEFINED
+# endif /* GNUC >= 4.9 */
+#endif /* ATTRIBUTE_NO_SANITIZE_UNDEFINED */
+
+/* Attribute 'nonstring' was valid as of gcc 8. */
+#ifndef ATTRIBUTE_NONSTRING
+# if GCC_VERSION >= 8000
+# define ATTRIBUTE_NONSTRING __attribute__ ((__nonstring__))
+# else
+# define ATTRIBUTE_NONSTRING
+# endif
+#endif
+
+/* Attribute `alloc_size' was valid as of gcc 4.3. */
+#ifndef ATTRIBUTE_RESULT_SIZE_1
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_RESULT_SIZE_1 __attribute__ ((alloc_size (1)))
+# else
+# define ATTRIBUTE_RESULT_SIZE_1
+#endif
+#endif
+
+#ifndef ATTRIBUTE_RESULT_SIZE_2
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_RESULT_SIZE_2 __attribute__ ((alloc_size (2)))
+# else
+# define ATTRIBUTE_RESULT_SIZE_2
+#endif
+#endif
+
+#ifndef ATTRIBUTE_RESULT_SIZE_1_2
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_RESULT_SIZE_1_2 __attribute__ ((alloc_size (1, 2)))
+# else
+# define ATTRIBUTE_RESULT_SIZE_1_2
+#endif
+#endif
+
+/* Attribute `warn_unused_result' was valid as of gcc 3.3. */
+#ifndef ATTRIBUTE_WARN_UNUSED_RESULT
+# if GCC_VERSION >= 3003
+# define ATTRIBUTE_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__))
+# else
+# define ATTRIBUTE_WARN_UNUSED_RESULT
+# endif
+#endif
+
+/* We use __extension__ in some places to suppress -pedantic warnings
+ about GCC extensions. This feature didn't work properly before
+ gcc 2.8. */
+#if GCC_VERSION < 2008
+#define __extension__
+#endif
+
+/* This is used to declare a const variable which should be visible
+ outside of the current compilation unit. Use it as
+ EXPORTED_CONST int i = 1;
+ This is because the semantics of const are different in C and C++.
+ "extern const" is permitted in C but it looks strange, and gcc
+ warns about it when -Wc++-compat is not used. */
+#ifdef __cplusplus
+#define EXPORTED_CONST extern const
+#else
+#define EXPORTED_CONST const
+#endif
+
+/* Be conservative and only use enum bitfields with C++ or GCC.
+ FIXME: provide a complete autoconf test for buggy enum bitfields. */
+
+#ifdef __cplusplus
+#define ENUM_BITFIELD(TYPE) enum TYPE
+#elif (GCC_VERSION > 2000)
+#define ENUM_BITFIELD(TYPE) __extension__ enum TYPE
+#else
+#define ENUM_BITFIELD(TYPE) unsigned int
+#endif
+
+#if defined(__cplusplus) && __cpp_constexpr >= 200704
+#define CONSTEXPR constexpr
+#else
+#define CONSTEXPR
+#endif
+
+/* A macro to disable the copy constructor and assignment operator.
+ When building with C++11 and above, the methods are explicitly
+ deleted, causing a compile-time error if something tries to copy.
+ For C++03, this just declares the methods, causing a link-time
+ error if the methods end up called (assuming you don't
+ define them). For C++03, for best results, place the macro
+ under the private: access specifier, like this,
+
+ class name_lookup
+ {
+ private:
+ DISABLE_COPY_AND_ASSIGN (name_lookup);
+ };
+
+ so that most attempts at copy are caught at compile-time. */
+
+#if defined(__cplusplus) && __cplusplus >= 201103
+#define DISABLE_COPY_AND_ASSIGN(TYPE) \
+ TYPE (const TYPE&) = delete; \
+ void operator= (const TYPE &) = delete
+ #else
+#define DISABLE_COPY_AND_ASSIGN(TYPE) \
+ TYPE (const TYPE&); \
+ void operator= (const TYPE &)
+#endif /* __cplusplus >= 201103 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ansidecl.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-cpu.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-cpu.h
new file mode 100644
index 0000000..2d8470a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-cpu.h
@@ -0,0 +1,182 @@
+/* -*- buffer-read-only: t -*-
+ Generated automatically by parsecpu.awk from arm-cpus.in.
+ Do not edit.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3,
+ or (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+enum processor_type
+{
+ TARGET_CPU_arm8,
+ TARGET_CPU_arm810,
+ TARGET_CPU_strongarm,
+ TARGET_CPU_fa526,
+ TARGET_CPU_fa626,
+ TARGET_CPU_arm7tdmi,
+ TARGET_CPU_arm710t,
+ TARGET_CPU_arm9,
+ TARGET_CPU_arm9tdmi,
+ TARGET_CPU_arm920t,
+ TARGET_CPU_arm10tdmi,
+ TARGET_CPU_arm9e,
+ TARGET_CPU_arm10e,
+ TARGET_CPU_xscale,
+ TARGET_CPU_iwmmxt,
+ TARGET_CPU_iwmmxt2,
+ TARGET_CPU_fa606te,
+ TARGET_CPU_fa626te,
+ TARGET_CPU_fmp626,
+ TARGET_CPU_fa726te,
+ TARGET_CPU_arm926ejs,
+ TARGET_CPU_arm1026ejs,
+ TARGET_CPU_arm1136js,
+ TARGET_CPU_arm1136jfs,
+ TARGET_CPU_arm1176jzs,
+ TARGET_CPU_arm1176jzfs,
+ TARGET_CPU_mpcorenovfp,
+ TARGET_CPU_mpcore,
+ TARGET_CPU_arm1156t2s,
+ TARGET_CPU_arm1156t2fs,
+ TARGET_CPU_cortexm1,
+ TARGET_CPU_cortexm0,
+ TARGET_CPU_cortexm0plus,
+ TARGET_CPU_cortexm1smallmultiply,
+ TARGET_CPU_cortexm0smallmultiply,
+ TARGET_CPU_cortexm0plussmallmultiply,
+ TARGET_CPU_genericv7a,
+ TARGET_CPU_cortexa5,
+ TARGET_CPU_cortexa7,
+ TARGET_CPU_cortexa8,
+ TARGET_CPU_cortexa9,
+ TARGET_CPU_cortexa12,
+ TARGET_CPU_cortexa15,
+ TARGET_CPU_cortexa17,
+ TARGET_CPU_cortexr4,
+ TARGET_CPU_cortexr4f,
+ TARGET_CPU_cortexr5,
+ TARGET_CPU_cortexr7,
+ TARGET_CPU_cortexr8,
+ TARGET_CPU_cortexm7,
+ TARGET_CPU_cortexm4,
+ TARGET_CPU_cortexm3,
+ TARGET_CPU_marvell_pj4,
+ TARGET_CPU_cortexa15cortexa7,
+ TARGET_CPU_cortexa17cortexa7,
+ TARGET_CPU_cortexa32,
+ TARGET_CPU_cortexa35,
+ TARGET_CPU_cortexa53,
+ TARGET_CPU_cortexa57,
+ TARGET_CPU_cortexa72,
+ TARGET_CPU_cortexa73,
+ TARGET_CPU_exynosm1,
+ TARGET_CPU_xgene1,
+ TARGET_CPU_cortexa57cortexa53,
+ TARGET_CPU_cortexa72cortexa53,
+ TARGET_CPU_cortexa73cortexa35,
+ TARGET_CPU_cortexa73cortexa53,
+ TARGET_CPU_cortexa55,
+ TARGET_CPU_cortexa75,
+ TARGET_CPU_cortexa76,
+ TARGET_CPU_cortexa76ae,
+ TARGET_CPU_cortexa77,
+ TARGET_CPU_cortexa78,
+ TARGET_CPU_cortexa78ae,
+ TARGET_CPU_cortexa78c,
+ TARGET_CPU_cortexa710,
+ TARGET_CPU_cortexx1,
+ TARGET_CPU_cortexx1c,
+ TARGET_CPU_neoversen1,
+ TARGET_CPU_cortexa75cortexa55,
+ TARGET_CPU_cortexa76cortexa55,
+ TARGET_CPU_neoversev1,
+ TARGET_CPU_neoversen2,
+ TARGET_CPU_cortexm23,
+ TARGET_CPU_cortexm33,
+ TARGET_CPU_cortexm35p,
+ TARGET_CPU_cortexm55,
+ TARGET_CPU_starmc1,
+ TARGET_CPU_cortexm85,
+ TARGET_CPU_cortexr52,
+ TARGET_CPU_cortexr52plus,
+ TARGET_CPU_arm_none
+};
+
+enum arch_type
+{
+ TARGET_ARCH_armv4,
+ TARGET_ARCH_armv4t,
+ TARGET_ARCH_armv5t,
+ TARGET_ARCH_armv5te,
+ TARGET_ARCH_armv5tej,
+ TARGET_ARCH_armv6,
+ TARGET_ARCH_armv6j,
+ TARGET_ARCH_armv6k,
+ TARGET_ARCH_armv6z,
+ TARGET_ARCH_armv6kz,
+ TARGET_ARCH_armv6zk,
+ TARGET_ARCH_armv6t2,
+ TARGET_ARCH_armv6_m,
+ TARGET_ARCH_armv6s_m,
+ TARGET_ARCH_armv7,
+ TARGET_ARCH_armv7_a,
+ TARGET_ARCH_armv7ve,
+ TARGET_ARCH_armv7_r,
+ TARGET_ARCH_armv7_m,
+ TARGET_ARCH_armv7e_m,
+ TARGET_ARCH_armv8_a,
+ TARGET_ARCH_armv8_1_a,
+ TARGET_ARCH_armv8_2_a,
+ TARGET_ARCH_armv8_3_a,
+ TARGET_ARCH_armv8_4_a,
+ TARGET_ARCH_armv8_5_a,
+ TARGET_ARCH_armv8_6_a,
+ TARGET_ARCH_armv8_m_base,
+ TARGET_ARCH_armv8_m_main,
+ TARGET_ARCH_armv8_r,
+ TARGET_ARCH_armv8_1_m_main,
+ TARGET_ARCH_armv9_a,
+ TARGET_ARCH_iwmmxt,
+ TARGET_ARCH_iwmmxt2,
+ TARGET_ARCH_arm_none
+};
+
+enum fpu_type
+{
+ TARGET_FPU_vfp,
+ TARGET_FPU_vfpv2,
+ TARGET_FPU_vfpv3,
+ TARGET_FPU_vfpv3_fp16,
+ TARGET_FPU_vfpv3_d16,
+ TARGET_FPU_vfpv3_d16_fp16,
+ TARGET_FPU_vfpv3xd,
+ TARGET_FPU_vfpv3xd_fp16,
+ TARGET_FPU_neon,
+ TARGET_FPU_neon_vfpv3,
+ TARGET_FPU_neon_fp16,
+ TARGET_FPU_vfpv4,
+ TARGET_FPU_neon_vfpv4,
+ TARGET_FPU_vfpv4_d16,
+ TARGET_FPU_fpv4_sp_d16,
+ TARGET_FPU_fpv5_sp_d16,
+ TARGET_FPU_fpv5_d16,
+ TARGET_FPU_fp_armv8,
+ TARGET_FPU_neon_fp_armv8,
+ TARGET_FPU_crypto_neon_fp_armv8,
+ TARGET_FPU_vfp3,
+ TARGET_FPU_auto
+};
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-isa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-isa.h
new file mode 100644
index 0000000..839b9e5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/arm-isa.h
@@ -0,0 +1,682 @@
+/* -*- buffer-read-only: t -*-
+ Generated automatically by parsecpu.awk from arm-cpus.in.
+ Do not edit.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3,
+ or (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public
+ License along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+enum isa_feature {
+ isa_nobit = 0,
+ isa_bit_quirk_vlldm,
+ isa_bit_fp16fml,
+ isa_bit_mve,
+ isa_bit_cmse,
+ isa_bit_quirk_armv6kz,
+ isa_bit_dotprod,
+ isa_bit_crc32,
+ isa_bit_xscale,
+ isa_bit_pacbti,
+ isa_bit_vfpv2,
+ isa_bit_vfpv3,
+ isa_bit_vfpv4,
+ isa_bit_lpae,
+ isa_bit_armv7em,
+ isa_bit_fp16,
+ isa_bit_adiv,
+ isa_bit_fp_d32,
+ isa_bit_be8,
+ isa_bit_fp16conv,
+ isa_bit_thumb2,
+ isa_bit_crypto,
+ isa_bit_mp,
+ isa_bit_sec,
+ isa_bit_sb,
+ isa_bit_bf16,
+ isa_bit_predres,
+ isa_bit_armv4,
+ isa_bit_quirk_cm3_ldrd,
+ isa_bit_smallmul,
+ isa_bit_armv5t,
+ isa_bit_armv8_1m_main,
+ isa_bit_armv6,
+ isa_bit_thumb,
+ isa_bit_quirk_no_asmcpu,
+ isa_bit_armv7,
+ isa_bit_armv8,
+ isa_bit_armv9,
+ isa_bit_i8mm,
+ isa_bit_fp_dbl,
+ isa_bit_armv5te,
+ isa_bit_fpv5,
+ isa_bit_iwmmxt2,
+ isa_bit_quirk_aes_1742098,
+ isa_bit_notm,
+ isa_bit_cdecp0,
+ isa_bit_cdecp1,
+ isa_bit_cdecp2,
+ isa_bit_cdecp3,
+ isa_bit_iwmmxt,
+ isa_bit_cdecp4,
+ isa_bit_cdecp5,
+ isa_bit_cdecp6,
+ isa_bit_cdecp7,
+ isa_bit_mve_float,
+ isa_bit_armv8_1,
+ isa_bit_armv8_2,
+ isa_bit_armv8_3,
+ isa_bit_tdiv,
+ isa_bit_armv8_4,
+ isa_bit_armv8_5,
+ isa_bit_armv8_6,
+ isa_bit_neon,
+ isa_bit_quirk_no_volatile_ce,
+ isa_bit_armv6k,
+ isa_bit_vfp_base,
+ isa_num_bits
+};
+
+#define ISA_ARMv8_6a \
+ isa_bit_tdiv, \
+ isa_bit_notm, \
+ isa_bit_mp, \
+ isa_bit_armv5t, \
+ isa_bit_thumb2, \
+ isa_bit_crc32, \
+ isa_bit_armv6k, \
+ isa_bit_armv5te, \
+ isa_bit_sec, \
+ isa_bit_be8, \
+ isa_bit_sb, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_adiv, \
+ isa_bit_armv8_1, \
+ isa_bit_armv8_2, \
+ isa_bit_armv8_3, \
+ isa_bit_armv8_4, \
+ isa_bit_armv8_5, \
+ isa_bit_armv8_6, \
+ isa_bit_lpae, \
+ isa_bit_thumb, \
+ isa_bit_predres
+
+#define ISA_ARMv8r \
+ isa_bit_adiv, \
+ isa_bit_mp, \
+ isa_bit_lpae, \
+ isa_bit_armv5te, \
+ isa_bit_tdiv, \
+ isa_bit_thumb2, \
+ isa_bit_notm, \
+ isa_bit_armv5t, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_thumb, \
+ isa_bit_armv6k, \
+ isa_bit_sec, \
+ isa_bit_be8
+
+#define ISA_ARMv6z \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_be8, \
+ isa_bit_thumb, \
+ isa_bit_armv5te, \
+ isa_bit_armv5t, \
+ isa_bit_notm
+
+#define ISA_DOTPROD \
+ isa_bit_dotprod, \
+ isa_bit_neon, \
+ isa_bit_fp_dbl, \
+ isa_bit_fp_d32
+
+#define ISA_ALL_CRYPTO \
+ isa_bit_crypto
+
+#define ISA_ALL_FPU_EXTERNAL \
+ isa_bit_fp16, \
+ isa_bit_bf16
+
+#define ISA_ALL_SIMD \
+ isa_bit_fp_d32, \
+ isa_bit_fp16fml, \
+ isa_bit_dotprod, \
+ isa_bit_neon, \
+ isa_bit_crypto, \
+ isa_bit_i8mm
+
+#define ISA_ALL_QUIRKS \
+ isa_bit_quirk_no_asmcpu, \
+ isa_bit_quirk_vlldm, \
+ isa_bit_xscale, \
+ isa_bit_quirk_cm3_ldrd, \
+ isa_bit_quirk_no_volatile_ce, \
+ isa_bit_quirk_armv6kz, \
+ isa_bit_quirk_aes_1742098
+
+#define ISA_CRYPTO \
+ isa_bit_neon, \
+ isa_bit_fp_dbl, \
+ isa_bit_fp_d32, \
+ isa_bit_crypto
+
+#define ISA_ARMv8m_base \
+ isa_bit_be8, \
+ isa_bit_thumb, \
+ isa_bit_tdiv, \
+ isa_bit_armv5t, \
+ isa_bit_armv5te, \
+ isa_bit_cmse, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv8
+
+#define ISA_ARMv6zk \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_thumb, \
+ isa_bit_notm, \
+ isa_bit_armv5t, \
+ isa_bit_armv5te, \
+ isa_bit_armv6k, \
+ isa_bit_be8
+
+#define ISA_VFPv2 \
+ isa_bit_vfpv2
+
+#define ISA_VFPv3 \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3
+
+#define ISA_VFPv4 \
+ isa_bit_fp16conv, \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3, \
+ isa_bit_vfpv4
+
+#define ISA_FP_D32 \
+ isa_bit_fp_dbl, \
+ isa_bit_fp_d32
+
+#define ISA_ARMv7ve \
+ isa_bit_armv5te, \
+ isa_bit_be8, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_adiv, \
+ isa_bit_lpae, \
+ isa_bit_tdiv, \
+ isa_bit_armv6k, \
+ isa_bit_notm, \
+ isa_bit_mp, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_thumb, \
+ isa_bit_sec
+
+#define ISA_ARMv7a \
+ isa_bit_armv5te, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_thumb, \
+ isa_bit_be8, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_notm, \
+ isa_bit_armv6k
+
+#define ISA_ALL_SIMD_INTERNAL \
+ isa_bit_fp_d32, \
+ isa_bit_crypto, \
+ isa_bit_neon
+
+#define ISA_ARMv8_1a \
+ isa_bit_crc32, \
+ isa_bit_tdiv, \
+ isa_bit_armv8_1, \
+ isa_bit_armv5t, \
+ isa_bit_notm, \
+ isa_bit_thumb2, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_armv6k, \
+ isa_bit_thumb, \
+ isa_bit_armv5te, \
+ isa_bit_sec, \
+ isa_bit_be8, \
+ isa_bit_adiv, \
+ isa_bit_lpae, \
+ isa_bit_mp
+
+#define ISA_ARMv7em \
+ isa_bit_armv5te, \
+ isa_bit_be8, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_thumb, \
+ isa_bit_armv7em, \
+ isa_bit_tdiv
+
+#define ISA_FP_DBL \
+ isa_bit_fp_dbl
+
+#define ISA_ARMv9a \
+ isa_bit_be8, \
+ isa_bit_sb, \
+ isa_bit_adiv, \
+ isa_bit_lpae, \
+ isa_bit_armv8_1, \
+ isa_bit_armv8_2, \
+ isa_bit_armv8_3, \
+ isa_bit_armv8_4, \
+ isa_bit_armv8_5, \
+ isa_bit_tdiv, \
+ isa_bit_predres, \
+ isa_bit_notm, \
+ isa_bit_crc32, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_armv9, \
+ isa_bit_mp, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_armv5te, \
+ isa_bit_thumb, \
+ isa_bit_armv6k, \
+ isa_bit_sec
+
+#define ISA_ARMv8_3a \
+ isa_bit_armv5te, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_sec, \
+ isa_bit_mp, \
+ isa_bit_be8, \
+ isa_bit_crc32, \
+ isa_bit_armv6k, \
+ isa_bit_adiv, \
+ isa_bit_lpae, \
+ isa_bit_armv8_1, \
+ isa_bit_armv8_2, \
+ isa_bit_armv8_3, \
+ isa_bit_tdiv, \
+ isa_bit_notm, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_thumb
+
+#define ISA_ARMv5t \
+ isa_bit_thumb, \
+ isa_bit_notm, \
+ isa_bit_armv5t, \
+ isa_bit_armv4
+
+#define ISA_ARMv7m \
+ isa_bit_be8, \
+ isa_bit_armv5te, \
+ isa_bit_tdiv, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_thumb
+
+#define ISA_ARMv4 \
+ isa_bit_notm, \
+ isa_bit_armv4
+
+#define ISA_ARMv6kz \
+ isa_bit_be8, \
+ isa_bit_armv5te, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv5t, \
+ isa_bit_quirk_armv6kz, \
+ isa_bit_notm, \
+ isa_bit_thumb, \
+ isa_bit_armv6k
+
+#define ISA_ARMv6 \
+ isa_bit_armv5t, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_thumb, \
+ isa_bit_armv5te, \
+ isa_bit_notm, \
+ isa_bit_be8
+
+#define ISA_ALL_FP \
+ isa_bit_fp_dbl, \
+ isa_bit_fp16conv, \
+ isa_bit_fp16, \
+ isa_bit_fp_d32, \
+ isa_bit_bf16, \
+ isa_bit_fpv5, \
+ isa_bit_neon, \
+ isa_bit_fp16fml, \
+ isa_bit_crypto, \
+ isa_bit_i8mm, \
+ isa_bit_dotprod, \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3, \
+ isa_bit_vfpv4
+
+#define ISA_ARMv7 \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_thumb, \
+ isa_bit_be8, \
+ isa_bit_armv5te, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t
+
+#define ISA_ARMv8_5a \
+ isa_bit_adiv, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_lpae, \
+ isa_bit_armv8_1, \
+ isa_bit_armv8_2, \
+ isa_bit_armv8_3, \
+ isa_bit_armv8_4, \
+ isa_bit_armv8_5, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_thumb, \
+ isa_bit_predres, \
+ isa_bit_tdiv, \
+ isa_bit_notm, \
+ isa_bit_armv6k, \
+ isa_bit_mp, \
+ isa_bit_armv5te, \
+ isa_bit_crc32, \
+ isa_bit_sec, \
+ isa_bit_be8, \
+ isa_bit_sb
+
+#define ISA_ARMv7r \
+ isa_bit_be8, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_tdiv, \
+ isa_bit_armv5te, \
+ isa_bit_notm, \
+ isa_bit_armv6k, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_thumb
+
+#define ISA_ALL_FPU_INTERNAL \
+ isa_bit_fp16conv, \
+ isa_bit_crypto, \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3, \
+ isa_bit_vfpv4, \
+ isa_bit_fpv5, \
+ isa_bit_neon, \
+ isa_bit_fp_dbl, \
+ isa_bit_fp_d32
+
+#define ISA_FPv5 \
+ isa_bit_fpv5, \
+ isa_bit_fp16conv, \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3, \
+ isa_bit_vfpv4
+
+#define ISA_ARMv6t2 \
+ isa_bit_armv5te, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_notm, \
+ isa_bit_thumb, \
+ isa_bit_be8
+
+#define ISA_ARMv8m_main \
+ isa_bit_tdiv, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_be8, \
+ isa_bit_armv5t, \
+ isa_bit_thumb2, \
+ isa_bit_thumb, \
+ isa_bit_cmse, \
+ isa_bit_armv5te
+
+#define ISA_ARMv8_1m_main \
+ isa_bit_be8, \
+ isa_bit_armv5te, \
+ isa_bit_cmse, \
+ isa_bit_armv8_1m_main, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_thumb, \
+ isa_bit_tdiv
+
+#define ISA_NEON \
+ isa_bit_fp_d32, \
+ isa_bit_neon, \
+ isa_bit_fp_dbl
+
+#define ISA_ARMv5te \
+ isa_bit_armv4, \
+ isa_bit_thumb, \
+ isa_bit_notm, \
+ isa_bit_armv5te, \
+ isa_bit_armv5t
+
+#define ISA_FP_ARMv8 \
+ isa_bit_fp_dbl, \
+ isa_bit_fp_d32, \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3, \
+ isa_bit_vfpv4, \
+ isa_bit_fp16conv, \
+ isa_bit_fpv5
+
+#define ISA_ARMv8a \
+ isa_bit_armv5te, \
+ isa_bit_thumb, \
+ isa_bit_sec, \
+ isa_bit_adiv, \
+ isa_bit_be8, \
+ isa_bit_lpae, \
+ isa_bit_tdiv, \
+ isa_bit_notm, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_mp, \
+ isa_bit_armv6k
+
+#define ISA_MVE \
+ isa_bit_armv7em, \
+ isa_bit_mve
+
+#define ISA_ALL_SIMD_EXTERNAL \
+ isa_bit_fp16fml, \
+ isa_bit_dotprod, \
+ isa_bit_i8mm
+
+#define ISA_ARMv8_2a \
+ isa_bit_armv5t, \
+ isa_bit_thumb, \
+ isa_bit_thumb2, \
+ isa_bit_armv6k, \
+ isa_bit_armv5te, \
+ isa_bit_sec, \
+ isa_bit_be8, \
+ isa_bit_mp, \
+ isa_bit_crc32, \
+ isa_bit_adiv, \
+ isa_bit_lpae, \
+ isa_bit_tdiv, \
+ isa_bit_armv8_1, \
+ isa_bit_armv8_2, \
+ isa_bit_notm, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8
+
+#define ISA_ARMv6j \
+ isa_bit_be8, \
+ isa_bit_armv5te, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_notm, \
+ isa_bit_thumb, \
+ isa_bit_armv5t
+
+#define ISA_ARMv6k \
+ isa_bit_be8, \
+ isa_bit_notm, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv5t, \
+ isa_bit_armv5te, \
+ isa_bit_thumb, \
+ isa_bit_armv6k
+
+#define ISA_ARMv4t \
+ isa_bit_armv4, \
+ isa_bit_thumb, \
+ isa_bit_notm
+
+#define ISA_ARMv6m \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_be8, \
+ isa_bit_thumb, \
+ isa_bit_armv5te, \
+ isa_bit_armv5t
+
+#define ISA_IGNORE_FOR_MULTILIB \
+ isa_bit_cdecp0, \
+ isa_bit_cdecp1, \
+ isa_bit_cdecp2, \
+ isa_bit_cdecp3, \
+ isa_bit_cdecp4, \
+ isa_bit_cdecp5, \
+ isa_bit_cdecp6, \
+ isa_bit_cdecp7
+
+#define ISA_MVE_FP \
+ isa_bit_fp16, \
+ isa_bit_armv7em, \
+ isa_bit_mve, \
+ isa_bit_fpv5, \
+ isa_bit_vfpv2, \
+ isa_bit_vfpv3, \
+ isa_bit_vfpv4, \
+ isa_bit_fp16conv, \
+ isa_bit_mve_float
+
+#define ISA_ARMv5tej \
+ isa_bit_armv4, \
+ isa_bit_armv5te, \
+ isa_bit_thumb, \
+ isa_bit_notm, \
+ isa_bit_armv5t
+
+#define ISA_ARMv8_4a \
+ isa_bit_sec, \
+ isa_bit_crc32, \
+ isa_bit_be8, \
+ isa_bit_thumb2, \
+ isa_bit_armv5t, \
+ isa_bit_adiv, \
+ isa_bit_lpae, \
+ isa_bit_armv4, \
+ isa_bit_armv6, \
+ isa_bit_armv7, \
+ isa_bit_armv8, \
+ isa_bit_tdiv, \
+ isa_bit_armv8_1, \
+ isa_bit_armv8_2, \
+ isa_bit_armv8_3, \
+ isa_bit_armv8_4, \
+ isa_bit_armv6k, \
+ isa_bit_notm, \
+ isa_bit_thumb, \
+ isa_bit_mp, \
+ isa_bit_armv5te
+
+struct fbit_implication {
+ /* Represents a feature implication, where:
+ ante IMPLIES cons
+ meaning that if ante is enabled then we should
+ also implicitly enable cons. */
+ enum isa_feature ante;
+ enum isa_feature cons;
+};
+
+static const struct fbit_implication all_implied_fbits[] =
+{
+ { isa_bit_neon, isa_bit_vfp_base },
+ { isa_bit_vfpv4, isa_bit_vfp_base },
+ { isa_bit_fp_d32, isa_bit_vfp_base },
+ { isa_bit_fp_dbl, isa_bit_vfp_base },
+ { isa_bit_mve_float, isa_bit_vfp_base },
+ { isa_bit_mve, isa_bit_vfp_base },
+ { isa_bit_dotprod, isa_bit_vfp_base },
+ { isa_bit_crypto, isa_bit_vfp_base },
+ { isa_bit_fp16, isa_bit_vfp_base },
+ { isa_bit_armv7em, isa_bit_vfp_base },
+ { isa_bit_i8mm, isa_bit_vfp_base },
+ { isa_bit_fp16conv, isa_bit_vfp_base },
+ { isa_bit_fpv5, isa_bit_vfp_base },
+ { isa_bit_fp16fml, isa_bit_vfp_base },
+ { isa_bit_bf16, isa_bit_vfp_base },
+ { isa_bit_vfpv2, isa_bit_vfp_base },
+ { isa_bit_vfpv3, isa_bit_vfp_base },
+ { isa_nobit, isa_nobit }
+};
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/array-traits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/array-traits.h
new file mode 100644
index 0000000..4cc686f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/array-traits.h
@@ -0,0 +1,48 @@
+/* Descriptions of array-like objects.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ARRAY_TRAITS_H
+#define GCC_ARRAY_TRAITS_H
+
+/* Implementation for single integers (and similar types). */
+template<typename T, T zero = T (0)>
+struct scalar_array_traits
+{
+ typedef T element_type;
+ static const bool has_constant_size = true;
+ static const size_t constant_size = 1;
+ static const T *base (const T &x) { return &x; }
+ static size_t size (const T &) { return 1; }
+};
+
+template<typename T>
+struct array_traits : scalar_array_traits<T> {};
+
+/* Implementation for arrays with a static size. */
+template<typename T, size_t N>
+struct array_traits<T[N]>
+{
+ typedef T element_type;
+ static const bool has_constant_size = true;
+ static const size_t constant_size = N;
+ static const T *base (const T (&x)[N]) { return x; }
+ static size_t size (const T (&)[N]) { return N; }
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/asan.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/asan.h
new file mode 100644
index 0000000..7d26b41
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/asan.h
@@ -0,0 +1,264 @@
+/* AddressSanitizer, a fast memory error detector.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Kostya Serebryany <kcc@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_ASAN
+#define TREE_ASAN
+
+extern void asan_function_start (void);
+extern void asan_finish_file (void);
+extern rtx_insn *asan_emit_stack_protection (rtx, rtx, unsigned int,
+ HOST_WIDE_INT *, tree *, int);
+extern rtx_insn *asan_emit_allocas_unpoison (rtx, rtx, rtx_insn *);
+extern bool asan_protect_global (tree, bool ignore_decl_rtl_set_p = false);
+extern void initialize_sanitizer_builtins (void);
+extern tree asan_dynamic_init_call (bool);
+extern bool asan_expand_check_ifn (gimple_stmt_iterator *, bool);
+extern bool asan_expand_mark_ifn (gimple_stmt_iterator *);
+extern bool asan_expand_poison_ifn (gimple_stmt_iterator *, bool *,
+ hash_map<tree, tree> &);
+extern rtx asan_memfn_rtl (tree);
+
+extern void hwasan_record_frame_init ();
+extern void hwasan_record_stack_var (rtx, rtx, poly_int64, poly_int64);
+extern void hwasan_emit_prologue ();
+extern rtx_insn *hwasan_emit_untag_frame (rtx, rtx);
+extern rtx hwasan_get_frame_extent ();
+extern rtx hwasan_frame_base ();
+extern void hwasan_maybe_emit_frame_base_init (void);
+extern bool stack_vars_base_reg_p (rtx);
+extern uint8_t hwasan_current_frame_tag ();
+extern void hwasan_increment_frame_tag ();
+extern rtx hwasan_truncate_to_tag_size (rtx, rtx);
+extern void hwasan_finish_file (void);
+extern bool hwasan_sanitize_p (void);
+extern bool hwasan_sanitize_stack_p (void);
+extern bool hwasan_sanitize_allocas_p (void);
+extern bool hwasan_expand_check_ifn (gimple_stmt_iterator *, bool);
+extern bool hwasan_expand_mark_ifn (gimple_stmt_iterator *);
+extern bool gate_hwasan (void);
+
+extern gimple_stmt_iterator create_cond_insert_point
+ (gimple_stmt_iterator *, bool, bool, bool, basic_block *, basic_block *);
+
+/* Alias set for accessing the shadow memory. */
+extern alias_set_type asan_shadow_set;
+
+/* Hash set of labels that are either used in a goto, or their address
+ has been taken. */
+extern hash_set <tree> *asan_used_labels;
+
+/* Shadow memory is found at
+ (address >> ASAN_SHADOW_SHIFT) + asan_shadow_offset (). */
+#define ASAN_SHADOW_SHIFT 3
+#define ASAN_SHADOW_GRANULARITY (1UL << ASAN_SHADOW_SHIFT)
+
+/* Red zone size, stack and global variables are padded by ASAN_RED_ZONE_SIZE
+ up to 2 * ASAN_RED_ZONE_SIZE - 1 bytes. */
+#define ASAN_RED_ZONE_SIZE 32
+
+/* Stack variable use more compact red zones. The size includes also
+ size of variable itself. */
+
+#define ASAN_MIN_RED_ZONE_SIZE 16
+
+/* Shadow memory values for stack protection. Left is below protected vars,
+ the first pointer in stack corresponding to that offset contains
+ ASAN_STACK_FRAME_MAGIC word, the second pointer to a string describing
+ the frame. Middle is for padding in between variables, right is
+ above the last protected variable and partial immediately after variables
+ up to ASAN_RED_ZONE_SIZE alignment. */
+#define ASAN_STACK_MAGIC_LEFT 0xf1
+#define ASAN_STACK_MAGIC_MIDDLE 0xf2
+#define ASAN_STACK_MAGIC_RIGHT 0xf3
+#define ASAN_STACK_MAGIC_USE_AFTER_RET 0xf5
+#define ASAN_STACK_MAGIC_USE_AFTER_SCOPE 0xf8
+
+#define ASAN_STACK_FRAME_MAGIC 0x41b58ab3
+#define ASAN_STACK_RETIRED_MAGIC 0x45e0360e
+
+#define ASAN_USE_AFTER_SCOPE_ATTRIBUTE "use after scope memory"
+
+/* NOTE: The values below and the hooks under targetm.memtag define an ABI and
+ are hard-coded to these values in libhwasan, hence they can't be changed
+ independently here. */
+/* How many bits are used to store a tag in a pointer.
+ The default version uses the entire top byte of a pointer (i.e. 8 bits). */
+#define HWASAN_TAG_SIZE targetm.memtag.tag_size ()
+/* Tag Granule of HWASAN shadow stack.
+ This is the size in real memory that each byte in the shadow memory refers
+ to. I.e. if a variable is X bytes long in memory then its tag in shadow
+ memory will span X / HWASAN_TAG_GRANULE_SIZE bytes.
+ Most variables will need to be aligned to this amount since two variables
+ that are neighbors in memory and share a tag granule would need to share the
+ same tag (the shared tag granule can only store one tag). */
+#define HWASAN_TAG_GRANULE_SIZE targetm.memtag.granule_size ()
+/* Define the tag for the stack background.
+ This defines what tag the stack pointer will be and hence what tag all
+ variables that are not given special tags are (e.g. spilled registers,
+ and parameters passed on the stack). */
+#define HWASAN_STACK_BACKGROUND gen_int_mode (0, QImode)
+
+/* Various flags for Asan builtins. */
+enum asan_check_flags
+{
+ ASAN_CHECK_STORE = 1 << 0,
+ ASAN_CHECK_SCALAR_ACCESS = 1 << 1,
+ ASAN_CHECK_NON_ZERO_LEN = 1 << 2,
+ ASAN_CHECK_LAST = 1 << 3
+};
+
+/* Flags for Asan check builtins. */
+#define IFN_ASAN_MARK_FLAGS DEF(POISON), DEF(UNPOISON)
+
+enum asan_mark_flags
+{
+#define DEF(X) ASAN_MARK_##X
+ IFN_ASAN_MARK_FLAGS
+#undef DEF
+};
+
+/* Return true if STMT is ASAN_MARK with FLAG as first argument. */
+extern bool asan_mark_p (gimple *stmt, enum asan_mark_flags flag);
+
+/* Return the size of padding needed to insert after a protected
+ decl of SIZE. */
+
+inline unsigned int
+asan_red_zone_size (unsigned int size)
+{
+ unsigned int c = size & (ASAN_RED_ZONE_SIZE - 1);
+ return c ? 2 * ASAN_RED_ZONE_SIZE - c : ASAN_RED_ZONE_SIZE;
+}
+
+/* Return how much a stack variable occupis on a stack
+ including a space for red zone. */
+
+inline unsigned HOST_WIDE_INT
+asan_var_and_redzone_size (unsigned HOST_WIDE_INT size)
+{
+ if (size <= 4)
+ return 16;
+ else if (size <= 16)
+ return 32;
+ else if (size <= 128)
+ return size + 32;
+ else if (size <= 512)
+ return size + 64;
+ else if (size <= 4096)
+ return size + 128;
+ else
+ return size + 256;
+}
+
+extern bool set_asan_shadow_offset (const char *);
+
+extern bool asan_shadow_offset_set_p ();
+
+extern void set_sanitized_sections (const char *);
+
+extern bool asan_sanitize_stack_p (void);
+
+extern bool asan_sanitize_allocas_p (void);
+
+extern hash_set<tree> *asan_handled_variables;
+
+/* Return TRUE if builtin with given FCODE will be intercepted by
+ libasan. */
+
+inline bool
+asan_intercepted_p (enum built_in_function fcode)
+{
+ if (hwasan_sanitize_p ())
+ return false;
+
+ return fcode == BUILT_IN_INDEX
+ || fcode == BUILT_IN_MEMCHR
+ || fcode == BUILT_IN_MEMCMP
+ || fcode == BUILT_IN_MEMCPY
+ || fcode == BUILT_IN_MEMMOVE
+ || fcode == BUILT_IN_MEMSET
+ || fcode == BUILT_IN_STRCASECMP
+ || fcode == BUILT_IN_STRCAT
+ || fcode == BUILT_IN_STRCHR
+ || fcode == BUILT_IN_STRCMP
+ || fcode == BUILT_IN_STRCPY
+ || fcode == BUILT_IN_STRDUP
+ || fcode == BUILT_IN_STRLEN
+ || fcode == BUILT_IN_STRNCASECMP
+ || fcode == BUILT_IN_STRNCAT
+ || fcode == BUILT_IN_STRNCMP
+ || fcode == BUILT_IN_STRCSPN
+ || fcode == BUILT_IN_STRPBRK
+ || fcode == BUILT_IN_STRSPN
+ || fcode == BUILT_IN_STRSTR
+ || fcode == BUILT_IN_STRNCPY;
+}
+
+/* Return TRUE if we should instrument for use-after-scope sanity checking. */
+
+inline bool
+asan_sanitize_use_after_scope (void)
+{
+ return (flag_sanitize_address_use_after_scope
+ && (asan_sanitize_stack_p () || hwasan_sanitize_stack_p ()));
+}
+
+/* Return true if DECL should be guarded on the stack. */
+
+inline bool
+asan_protect_stack_decl (tree decl)
+{
+ return DECL_P (decl)
+ && (!DECL_ARTIFICIAL (decl)
+ || (asan_sanitize_use_after_scope () && TREE_ADDRESSABLE (decl)));
+}
+
+/* Return true when flag_sanitize & FLAG is non-zero. If FN is non-null,
+ remove all flags mentioned in "no_sanitize" of DECL_ATTRIBUTES. */
+
+inline bool
+sanitize_flags_p (unsigned int flag, const_tree fn = current_function_decl)
+{
+ unsigned int result_flags = flag_sanitize & flag;
+ if (result_flags == 0)
+ return false;
+
+ if (fn != NULL_TREE)
+ {
+ tree value = lookup_attribute ("no_sanitize", DECL_ATTRIBUTES (fn));
+ if (value)
+ result_flags &= ~tree_to_uhwi (TREE_VALUE (value));
+ }
+
+ return result_flags;
+}
+
+/* Return true when coverage sanitization should happend for FN function. */
+
+inline bool
+sanitize_coverage_p (const_tree fn = current_function_decl)
+{
+ return (flag_sanitize_coverage
+ && (fn == NULL_TREE
+ || lookup_attribute ("no_sanitize_coverage",
+ DECL_ATTRIBUTES (fn)) == NULL_TREE));
+}
+
+#endif /* TREE_ASAN */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/attr-fnspec.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/attr-fnspec.h
new file mode 100644
index 0000000..99d5f89
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/attr-fnspec.h
@@ -0,0 +1,304 @@
+/* Handling of fnspec attribute specifiers
+ Copyright (C) 2008-2023 Free Software Foundation, Inc.
+ Contributed by Richard Guenther <rguenther@suse.de>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Parse string of attribute "fn spec". This is an internal attribute
+ describing side effects of a function as follows:
+
+ character 0 specifies properties of return values as follows:
+ '1'...'4' specifies number of argument function returns (as in memset)
+ 'm' specifies that returned value is noalias (as in malloc)
+ '.' specifies that nothing is known.
+ character 1 specifies additional function properties
+ ' ' specifies that nothing is known
+ 'p' or 'P' specifies that function is pure except for described side
+ effects.
+ 'c' or 'C' specifies that function is const except for described side
+ effects.
+ The uppercase letter in addition specifies that function clobbers errno.
+
+ character 2+2i specifies properties of argument number i as follows:
+ 'x' or 'X' specifies that parameter is unused.
+ 'r' or 'R' specifies that the memory pointed to by the parameter is only
+ read and does not escape
+ 'o' or 'O' specifies that the memory pointed to by the parameter is only
+ written and does not escape
+ 'w' or 'W' specifies that the memory pointed to by the parameter does not
+ escape
+ '1'....'9' specifies that the memory pointed to by the parameter is
+ copied to memory pointed to by different parameter
+ (as in memcpy).
+ '.' specifies that nothing is known.
+ The uppercase letter in addition specifies that the memory pointed to
+ by the parameter is not dereferenced. For 'r' only read applies
+ transitively to pointers read from the pointed-to memory.
+
+ character 3+2i specifies additional properties of argument number i
+ as follows:
+ ' ' nothing is known
+ 't' the size of value written/read corresponds to the size of
+ of the pointed-to type of the argument type
+ '1'...'9' specifies the size of value written/read is bound by the
+ specified argument
+ */
+
+#ifndef ATTR_FNSPEC_H
+#define ATTR_FNSPEC_H
+
+class attr_fnspec
+{
+private:
+ /* fn spec attribute string. */
+ const char *str;
+ /* length of the fn spec string. */
+ const unsigned len;
+ /* Number of characters specifying return value. */
+ const unsigned int return_desc_size = 2;
+ /* Number of characters specifying size. */
+ const unsigned int arg_desc_size = 2;
+
+ /* Return start of specifier of arg i. */
+ unsigned int arg_idx (int i)
+ {
+ return return_desc_size + arg_desc_size * i;
+ }
+
+public:
+ attr_fnspec (const char *str, unsigned len)
+ : str (str), len (len)
+ {
+ if (flag_checking)
+ verify ();
+ }
+ attr_fnspec (const char *str)
+ : str (str), len (strlen (str))
+ {
+ if (flag_checking)
+ verify ();
+ }
+ attr_fnspec (const_tree identifier)
+ : str (TREE_STRING_POINTER (identifier)),
+ len (TREE_STRING_LENGTH (identifier))
+ {
+ if (flag_checking)
+ verify ();
+ }
+ attr_fnspec ()
+ : str (NULL), len (0)
+ {
+ }
+
+ /* Return true if fn spec is known. */
+ bool
+ known_p ()
+ {
+ return len;
+ }
+
+ /* Return true if arg I is specified. */
+ bool
+ arg_specified_p (unsigned int i)
+ {
+ return len >= arg_idx (i + 1);
+ }
+
+ /* True if the argument is not dereferenced recursively, thus only
+ directly reachable memory is read or written. */
+ bool
+ arg_direct_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx] == 'R' || str[idx] == 'O'
+ || str[idx] == 'W' || (str[idx] >= '1' && str[idx] <= '9');
+ }
+
+ /* True if argument is used. */
+ bool
+ arg_used_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx] != 'x' && str[idx] != 'X';
+ }
+
+ /* True if memory reached by the argument is readonly (not clobbered). */
+ bool
+ arg_readonly_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx] == 'r' || str[idx] == 'R' || (str[idx] >= '1' && str[idx] <= '9');
+ }
+
+ /* True if memory reached by the argument is read (directly or indirectly) */
+ bool
+ arg_maybe_read_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx] != 'o' && str[idx] != 'O'
+ && str[idx] != 'x' && str[idx] != 'X';
+ }
+
+ /* True if memory reached by the argument is written.
+ (directly or indirectly) */
+ bool
+ arg_maybe_written_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx] != 'r' && str[idx] != 'R'
+ && (str[idx] < '1' || str[idx] > '9')
+ && str[idx] != 'x' && str[idx] != 'X';
+ }
+
+ /* Return true if load of memory pointed to by argument I is bound
+ by another argument. In this case set ARG. */
+ bool
+ arg_max_access_size_given_by_arg_p (unsigned int i, unsigned int *arg)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ if (str[idx + 1] >= '1' && str[idx + 1] <= '9')
+ {
+ *arg = str[idx + 1] - '1';
+ return true;
+ }
+ else
+ return false;
+ }
+
+ /* Return true if the pointed-to type of the argument correspond to the
+ size of the memory acccess. */
+ bool
+ arg_access_size_given_by_type_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx + 1] == 't';
+ }
+
+ /* Return true if memory pointer to by argument is copied to a memory
+ pointed to by a different argument (as in memcpy).
+ In this case set ARG. */
+ bool
+ arg_copied_to_arg_p (unsigned int i, unsigned int *arg)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ if (str[idx] < '1' || str[idx] > '9')
+ return false;
+ *arg = str[idx] - '1';
+ return true;
+ }
+
+
+ /* True if the argument does not escape. */
+ bool
+ arg_noescape_p (unsigned int i)
+ {
+ unsigned int idx = arg_idx (i);
+ gcc_checking_assert (arg_specified_p (i));
+ return str[idx] == 'w' || str[idx] == 'W'
+ || str[idx] == 'r' || str[idx] == 'R'
+ || str[idx] == 'o' || str[idx] == 'O';
+ }
+
+ /* Return true if function returns value of its parameter. If ARG_NO is
+ non-NULL return initialize it to the argument returned. */
+ bool
+ returns_arg (unsigned int *arg_no)
+ {
+ if (str[0] >= '1' && str[0] <= '4')
+ {
+ if (arg_no)
+ *arg_no = str[0] - '1';
+ return true;
+ }
+ return false;
+ }
+
+ /* Nonzero if the return value does not alias with anything. Functions
+ with the malloc attribute have this set on their return value. */
+ bool
+ returns_noalias_p ()
+ {
+ return str[0] == 'm';
+ }
+
+ /* Return true if all memory read by the function is specified by fnspec. */
+ bool
+ global_memory_read_p ()
+ {
+ return str[1] != 'c' && str[1] != 'C';
+ }
+
+ /* Return true if all memory written by the function
+ is specified by fnspec. */
+ bool
+ global_memory_written_p ()
+ {
+ return str[1] != 'c' && str[1] != 'C' && str[1] != 'p' && str[1] != 'P';
+ }
+
+ bool
+ errno_maybe_written_p ()
+ {
+ return str[1] == 'C' || str[1] == 'P';
+ }
+
+ /* Return EAF flags for arg I. */
+ int
+ arg_eaf_flags (unsigned int i)
+ {
+ int flags = 0;
+
+ if (!arg_specified_p (i))
+ ;
+ else if (!arg_used_p (i))
+ flags = EAF_UNUSED;
+ else
+ {
+ if (arg_direct_p (i))
+ flags |= EAF_NO_INDIRECT_READ | EAF_NO_INDIRECT_ESCAPE
+ | EAF_NOT_RETURNED_INDIRECTLY | EAF_NO_INDIRECT_CLOBBER;
+ if (arg_noescape_p (i))
+ flags |= EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE;
+ if (arg_readonly_p (i))
+ flags |= EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER;
+ }
+ return flags;
+ }
+
+ /* Check validity of the string. */
+ void verify ();
+
+ /* Return the fnspec string. */
+ const char *
+ get_str ()
+ {
+ return str;
+ }
+};
+
+extern attr_fnspec gimple_call_fnspec (const gcall *stmt);
+extern attr_fnspec builtin_fnspec (tree);
+
+#endif /* ATTR_FNSPEC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/attribs.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/attribs.h
new file mode 100644
index 0000000..84a4365
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/attribs.h
@@ -0,0 +1,401 @@
+/* Declarations and definitions dealing with attribute handling.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ATTRIBS_H
+#define GCC_ATTRIBS_H
+
+extern const struct attribute_spec *lookup_attribute_spec (const_tree);
+extern void free_attr_data ();
+extern void init_attributes (void);
+
+/* Process the attributes listed in ATTRIBUTES and install them in *NODE,
+ which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL,
+ it should be modified in place; if a TYPE, a copy should be created
+ unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further
+ information, in the form of a bitwise OR of flags in enum attribute_flags
+ from tree.h. Depending on these flags, some attributes may be
+ returned to be applied at a later stage (for example, to apply
+ a decl attribute to the declaration rather than to its type). */
+extern tree decl_attributes (tree *, tree, int, tree = NULL_TREE);
+
+extern bool cxx11_attribute_p (const_tree);
+extern tree get_attribute_name (const_tree);
+extern tree get_attribute_namespace (const_tree);
+extern void apply_tm_attr (tree, tree);
+extern tree make_attribute (const char *, const char *, tree);
+extern bool attribute_ignored_p (tree);
+extern bool attribute_ignored_p (const attribute_spec *const);
+
+extern struct scoped_attributes* register_scoped_attributes (const struct attribute_spec *,
+ const char *,
+ bool = false);
+
+extern char *sorted_attr_string (tree);
+extern bool common_function_versions (tree, tree);
+extern tree make_dispatcher_decl (const tree);
+extern bool is_function_default_version (const tree);
+extern void handle_ignored_attributes_option (vec<char *> *);
+
+/* Return a type like TTYPE except that its TYPE_ATTRIBUTES
+ is ATTRIBUTE.
+
+ Such modified types already made are recorded so that duplicates
+ are not made. */
+
+extern tree build_type_attribute_variant (tree, tree);
+extern tree build_decl_attribute_variant (tree, tree);
+extern tree build_type_attribute_qual_variant (tree, tree, int);
+
+extern bool simple_cst_list_equal (const_tree, const_tree);
+extern bool attribute_value_equal (const_tree, const_tree);
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+ are compatible, and 2 if they are nearly compatible (which causes a
+ warning to be generated). */
+extern int comp_type_attributes (const_tree, const_tree);
+
+extern tree affects_type_identity_attributes (tree, bool = true);
+extern tree restrict_type_identity_attributes_to (tree, tree);
+
+/* Default versions of target-overridable functions. */
+extern tree merge_decl_attributes (tree, tree);
+extern tree merge_type_attributes (tree, tree);
+
+/* Remove any instances of attribute ATTR_NAME in LIST and return the
+ modified list. */
+
+extern tree remove_attribute (const char *, tree);
+
+/* Similarly but also with specific attribute namespace. */
+
+extern tree remove_attribute (const char *, const char *, tree);
+
+/* Given two attributes lists, return a list of their union. */
+
+extern tree merge_attributes (tree, tree);
+
+/* Duplicate all attributes with name NAME in ATTR list to *ATTRS if
+ they are missing there. */
+
+extern void duplicate_one_attribute (tree *, tree, const char *);
+
+/* Duplicate all attributes from user DECL to the corresponding
+ builtin that should be propagated. */
+
+extern void copy_attributes_to_builtin (tree);
+
+/* Given two Windows decl attributes lists, possibly including
+ dllimport, return a list of their union . */
+extern tree merge_dllimport_decl_attributes (tree, tree);
+
+/* Handle a "dllimport" or "dllexport" attribute. */
+extern tree handle_dll_attribute (tree *, tree, tree, int, bool *);
+
+extern int attribute_list_equal (const_tree, const_tree);
+extern int attribute_list_contained (const_tree, const_tree);
+
+/* The backbone of lookup_attribute(). ATTR_LEN is the string length
+ of ATTR_NAME, and LIST is not NULL_TREE.
+
+ The function is called from lookup_attribute in order to optimize
+ for size. */
+extern tree private_lookup_attribute (const char *attr_name, size_t attr_len,
+ tree list);
+extern tree private_lookup_attribute (const char *attr_ns,
+ const char *attr_name,
+ size_t attr_ns_len, size_t attr_len,
+ tree list);
+
+extern unsigned decls_mismatched_attributes (tree, tree, tree,
+ const char* const[],
+ pretty_printer*);
+
+extern void maybe_diag_alias_attributes (tree, tree);
+
+/* For a given string S of length L, strip leading and trailing '_' characters
+ so that we have a canonical form of attribute names. NB: This function may
+ change S and L. */
+
+template <typename T>
+inline bool
+canonicalize_attr_name (const char *&s, T &l)
+{
+ if (l > 4 && s[0] == '_' && s[1] == '_' && s[l - 1] == '_' && s[l - 2] == '_')
+ {
+ s += 2;
+ l -= 4;
+ return true;
+ }
+ return false;
+}
+
+/* For a given IDENTIFIER_NODE, strip leading and trailing '_' characters
+ so that we have a canonical form of attribute names. */
+
+inline tree
+canonicalize_attr_name (tree attr_name)
+{
+ size_t l = IDENTIFIER_LENGTH (attr_name);
+ const char *s = IDENTIFIER_POINTER (attr_name);
+
+ if (canonicalize_attr_name (s, l))
+ return get_identifier_with_length (s, l);
+
+ return attr_name;
+}
+
+/* Compare attribute identifiers ATTR1 and ATTR2 with length ATTR1_LEN and
+ ATTR2_LEN. */
+
+inline bool
+cmp_attribs (const char *attr1, size_t attr1_len,
+ const char *attr2, size_t attr2_len)
+{
+ return attr1_len == attr2_len && strncmp (attr1, attr2, attr1_len) == 0;
+}
+
+/* Compare attribute identifiers ATTR1 and ATTR2. */
+
+inline bool
+cmp_attribs (const char *attr1, const char *attr2)
+{
+ return cmp_attribs (attr1, strlen (attr1), attr2, strlen (attr2));
+}
+
+/* Given an identifier node IDENT and a string ATTR_NAME, return true
+ if the identifier node is a valid attribute name for the string. */
+
+inline bool
+is_attribute_p (const char *attr_name, const_tree ident)
+{
+ return cmp_attribs (attr_name, strlen (attr_name),
+ IDENTIFIER_POINTER (ident), IDENTIFIER_LENGTH (ident));
+}
+
+/* Given an attribute ATTR and a string ATTR_NS, return true
+ if the attribute namespace is valid for the string. ATTR_NS "" stands
+ for standard attribute (NULL get_attribute_namespace) or "gnu"
+ namespace. */
+
+inline bool
+is_attribute_namespace_p (const char *attr_ns, const_tree attr)
+{
+ tree ident = get_attribute_namespace (attr);
+ if (attr_ns == NULL)
+ return ident == NULL_TREE;
+ if (attr_ns[0])
+ return ident && is_attribute_p (attr_ns, ident);
+ return ident == NULL_TREE || is_attribute_p ("gnu", ident);
+}
+
+/* Given an attribute name ATTR_NAME and a list of attributes LIST,
+ return a pointer to the attribute's list element if the attribute
+ is part of the list, or NULL_TREE if not found. If the attribute
+ appears more than once, this only returns the first occurrence; the
+ TREE_CHAIN of the return value should be passed back in if further
+ occurrences are wanted. ATTR_NAME must be in the form 'text' (not
+ '__text__'). */
+
+inline tree
+lookup_attribute (const char *attr_name, tree list)
+{
+ if (CHECKING_P && attr_name[0] != '_')
+ {
+ size_t attr_len = strlen (attr_name);
+ gcc_checking_assert (!canonicalize_attr_name (attr_name, attr_len));
+ }
+ /* In most cases, list is NULL_TREE. */
+ if (list == NULL_TREE)
+ return NULL_TREE;
+ else
+ {
+ size_t attr_len = strlen (attr_name);
+ /* Do the strlen() before calling the out-of-line implementation.
+ In most cases attr_name is a string constant, and the compiler
+ will optimize the strlen() away. */
+ return private_lookup_attribute (attr_name, attr_len, list);
+ }
+}
+
+/* Similar to lookup_attribute, but also match the attribute namespace.
+ ATTR_NS "" stands for either standard attribute or "gnu" namespace. */
+
+inline tree
+lookup_attribute (const char *attr_ns, const char *attr_name, tree list)
+{
+ if (CHECKING_P && attr_name[0] != '_')
+ {
+ size_t attr_len = strlen (attr_name);
+ gcc_checking_assert (!canonicalize_attr_name (attr_name, attr_len));
+ }
+ if (CHECKING_P && attr_ns && attr_ns[0] != '_')
+ {
+ size_t attr_ns_len = strlen (attr_ns);
+ gcc_checking_assert (!canonicalize_attr_name (attr_ns, attr_ns_len));
+ }
+ /* In most cases, list is NULL_TREE. */
+ if (list == NULL_TREE)
+ return NULL_TREE;
+ else
+ {
+ size_t attr_ns_len = attr_ns ? strlen (attr_ns) : 0;
+ size_t attr_len = strlen (attr_name);
+ /* Do the strlen() before calling the out-of-line implementation.
+ In most cases attr_name is a string constant, and the compiler
+ will optimize the strlen() away. */
+ return private_lookup_attribute (attr_ns, attr_name,
+ attr_ns_len, attr_len, list);
+ }
+}
+
+/* Given an attribute name ATTR_NAME and a list of attributes LIST,
+ return a pointer to the attribute's list first element if the attribute
+ starts with ATTR_NAME. ATTR_NAME must be in the form 'text' (not
+ '__text__'). */
+
+inline tree
+lookup_attribute_by_prefix (const char *attr_name, tree list)
+{
+ gcc_checking_assert (attr_name[0] != '_');
+ /* In most cases, list is NULL_TREE. */
+ if (list == NULL_TREE)
+ return NULL_TREE;
+ else
+ {
+ size_t attr_len = strlen (attr_name);
+ while (list)
+ {
+ tree name = get_attribute_name (list);
+ size_t ident_len = IDENTIFIER_LENGTH (name);
+
+ if (attr_len > ident_len)
+ {
+ list = TREE_CHAIN (list);
+ continue;
+ }
+
+ const char *p = IDENTIFIER_POINTER (name);
+ gcc_checking_assert (attr_len == 0 || p[0] != '_'
+ || (ident_len > 1 && p[1] != '_'));
+ if (strncmp (attr_name, p, attr_len) == 0)
+ break;
+
+ list = TREE_CHAIN (list);
+ }
+
+ return list;
+ }
+}
+
+/* Description of a function argument declared with attribute access.
+ Used as an "iterator" over all such arguments in a function declaration
+ or call. */
+
+struct attr_access
+{
+ /* The beginning and end of the internal string representation. */
+ const char *str, *end;
+ /* The attribute pointer argument. */
+ tree ptr;
+ /* For a declaration, a TREE_CHAIN of VLA bound expressions stored
+ in TREE_VALUE and their positions in the argument list (stored
+ in TREE_PURPOSE). Each expression may be a PARM_DECL or some
+ other DECL (for ordinary variables), or an EXPR for other
+ expressions (e.g., funcion calls). */
+ tree size;
+
+ /* The zero-based position of each of the formal function arguments.
+ For the optional SIZARG, UINT_MAX when not specified. For VLAs
+ with multiple variable bounds, SIZARG is the position corresponding
+ to the most significant bound in the argument list. Positions of
+ subsequent bounds are in the TREE_PURPOSE field of the SIZE chain. */
+ unsigned ptrarg;
+ unsigned sizarg;
+ /* For internal specifications only, the constant minimum size of
+ the array, zero if not specified, and HWI_M1U for the unspecified
+ VLA [*] notation. Meaningless for external (explicit) access
+ specifications. */
+ unsigned HOST_WIDE_INT minsize;
+
+ /* The access mode. */
+ access_mode mode;
+
+ /* Set for an attribute added internally rather than by an explicit
+ declaration. */
+ bool internal_p;
+ /* Set for the T[static MINSIZE] array notation for nonzero MINSIZE
+ less than HWI_M1U. */
+ bool static_p;
+
+ /* Return the number of specified VLA bounds. */
+ unsigned vla_bounds (unsigned *) const;
+
+ /* Return internal representation as STRING_CST. */
+ tree to_internal_string () const;
+
+ /* Return the human-readable representation of the external attribute
+ specification (as it might appear in the source code) as STRING_CST. */
+ tree to_external_string () const;
+
+ /* Return argument of array type formatted as a readable string. */
+ std::string array_as_string (tree) const;
+
+ /* Return the access mode corresponding to the character code. */
+ static access_mode from_mode_char (char);
+
+ /* Reset front end-specific attribute access data from attributes. */
+ static void free_lang_data (tree);
+
+ /* The character codes corresponding to all the access modes. */
+ static constexpr char mode_chars[5] = { '-', 'r', 'w', 'x', '^' };
+
+ /* The strings corresponding to just the external access modes. */
+ static constexpr char mode_names[4][11] =
+ {
+ "none", "read_only", "write_only", "read_write"
+ };
+};
+
+inline access_mode
+attr_access::from_mode_char (char c)
+{
+ switch (c)
+ {
+ case mode_chars[access_none]: return access_none;
+ case mode_chars[access_read_only]: return access_read_only;
+ case mode_chars[access_write_only]: return access_write_only;
+ case mode_chars[access_read_write]: return access_read_write;
+ case mode_chars[access_deferred]: return access_deferred;
+ }
+ gcc_unreachable ();
+}
+
+/* Used to define rdwr_map below. */
+struct rdwr_access_hash: int_hash<int, -1> { };
+
+/* A mapping between argument number corresponding to attribute access
+ mode (read_only, write_only, or read_write) and operands. */
+struct attr_access;
+typedef hash_map<rdwr_access_hash, attr_access> rdwr_map;
+
+extern void init_attr_rdwr_indices (rdwr_map *, tree);
+extern attr_access *get_parm_access (rdwr_map &, tree,
+ tree = current_function_decl);
+
+#endif // GCC_ATTRIBS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-host.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-host.h
new file mode 100644
index 0000000..df1b3be
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-host.h
@@ -0,0 +1,2693 @@
+/* auto-host.h. Generated from config.in by configure. */
+/* config.in. Generated from configure.ac by autoheader. */
+
+/* Define if this compiler should be built as the offload target compiler. */
+#ifndef USED_FOR_TARGET
+/* #undef ACCEL_COMPILER */
+#endif
+
+
+/* Define if building universal (internal helper macro) */
+#ifndef USED_FOR_TARGET
+/* #undef AC_APPLE_UNIVERSAL_BUILD */
+#endif
+
+
+/* Define to the assembler option to enable compressed debug sections. */
+#ifndef USED_FOR_TARGET
+#define AS_COMPRESS_DEBUG_OPTION "--compress-debug-sections"
+#endif
+
+
+/* Define to the assembler option to disable compressed debug sections. */
+#ifndef USED_FOR_TARGET
+#define AS_NO_COMPRESS_DEBUG_OPTION "--nocompress-debug-sections"
+#endif
+
+
+/* Define to the root for URLs about GCC changes. */
+#ifndef USED_FOR_TARGET
+#define CHANGES_ROOT_URL "https://gcc.gnu.org/"
+#endif
+
+
+/* Define as the number of bits in a byte, if `limits.h' doesn't. */
+#ifndef USED_FOR_TARGET
+/* #undef CHAR_BIT */
+#endif
+
+
+/* Define to 0/1 if you want more run-time sanity checks. This one gets a grab
+ bag of miscellaneous but relatively cheap checks. */
+#ifndef USED_FOR_TARGET
+#define CHECKING_P 0
+#endif
+
+
+/* Define 0/1 to force the choice for exception handling model. */
+#ifndef USED_FOR_TARGET
+/* #undef CONFIG_SJLJ_EXCEPTIONS */
+#endif
+
+
+/* Define to enable the use of a default assembler. */
+#ifndef USED_FOR_TARGET
+/* #undef DEFAULT_ASSEMBLER */
+#endif
+
+
+/* Define to enable the use of a default debug linker. */
+#ifndef USED_FOR_TARGET
+/* #undef DEFAULT_DSYMUTIL */
+#endif
+
+
+/* Define to enable the use of a default linker. */
+#ifndef USED_FOR_TARGET
+/* #undef DEFAULT_LINKER */
+#endif
+
+
+/* Define to larger than zero set the default stack clash protector size. */
+#ifndef USED_FOR_TARGET
+#define DEFAULT_STK_CLASH_GUARD_SIZE 0
+#endif
+
+
+/* Define if you want to use __cxa_atexit, rather than atexit, to register C++
+ destructors for local statics and global objects. This is essential for
+ fully standards-compliant handling of destructors, but requires
+ __cxa_atexit in libc. */
+#ifndef USED_FOR_TARGET
+#define DEFAULT_USE_CXA_ATEXIT 2
+#endif
+
+
+/* The default for -fdiagnostics-color option */
+#ifndef USED_FOR_TARGET
+#define DIAGNOSTICS_COLOR_DEFAULT DIAGNOSTICS_COLOR_AUTO
+#endif
+
+
+/* The default for -fdiagnostics-urls option */
+#ifndef USED_FOR_TARGET
+#define DIAGNOSTICS_URLS_DEFAULT DIAGNOSTICS_URL_AUTO
+#endif
+
+
+/* Define to the root for documentation URLs. */
+#ifndef USED_FOR_TARGET
+#define DOCUMENTATION_ROOT_URL "https://gcc.gnu.org/onlinedocs/"
+#endif
+
+
+/* Define to the dsymutil version. */
+#ifndef USED_FOR_TARGET
+/* #undef DSYMUTIL_VERSION */
+#endif
+
+
+/* Define 0/1 if static analyzer feature is enabled. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_ANALYZER 1
+#endif
+
+
+/* Define if you want assertions enabled. This is a cheap check. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_ASSERT_CHECKING 1
+#endif
+
+
+/* Define to 1 to specify that we are using the BID decimal floating point
+ format instead of DPD */
+#ifndef USED_FOR_TARGET
+#define ENABLE_DECIMAL_BID_FORMAT 0
+#endif
+
+
+/* Define to 1 to enable decimal float extension to C. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_DECIMAL_FLOAT 0
+#endif
+
+
+/* Define if your target supports default PIE and it is enabled. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_DEFAULT_PIE */
+#endif
+
+
+/* Define if your target supports default stack protector and it is enabled.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_DEFAULT_SSP */
+#endif
+
+
+/* Define if you want more run-time sanity checks for dataflow. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_DF_CHECKING */
+#endif
+
+
+/* Define to 0/1 if you want extra run-time checking that might affect code
+ generation. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_EXTRA_CHECKING 0
+#endif
+
+
+/* Define to 1 to enable fixed-point arithmetic extension to C. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_FIXED_POINT 1
+#endif
+
+
+/* Define if you want fold checked that it never destructs its argument. This
+ is quite expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_FOLD_CHECKING */
+#endif
+
+
+/* Define if you want the garbage collector to operate in maximally paranoid
+ mode, validating the entire heap and collecting garbage at every
+ opportunity. This is extremely expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_GC_ALWAYS_COLLECT */
+#endif
+
+
+/* Define if you want the garbage collector to do object poisoning and other
+ memory allocation checks. This is quite expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_GC_CHECKING */
+#endif
+
+
+/* Define if you want operations on GIMPLE (the basic data structure of the
+ high-level optimizers) to be checked for dynamic type safety at runtime.
+ This is moderately expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_GIMPLE_CHECKING */
+#endif
+
+
+/* Define if gcc should always pass --build-id to linker. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_LD_BUILDID */
+#endif
+
+
+/* Define to 1 to enable libquadmath support */
+#ifndef USED_FOR_TARGET
+#define ENABLE_LIBQUADMATH_SUPPORT 1
+#endif
+
+
+/* Define to enable LTO support. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_LTO 1
+#endif
+
+
+/* If --with-multiarch option is used */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_MULTIARCH */
+#endif
+
+
+/* Define to 1 if translation of program messages to the user's native
+ language is requested. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_NLS */
+#endif
+
+
+/* Define this to enable support for offloading. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_OFFLOADING 0
+#endif
+
+
+/* Define to enable plugin support. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_PLUGIN 1
+#endif
+
+
+/* Define if you want all operations on RTL (the basic data structure of the
+ optimizer and back end) to be checked for dynamic type safety at runtime.
+ This is quite expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_RTL_CHECKING */
+#endif
+
+
+/* Define if you want RTL flag accesses to be checked against the RTL codes
+ that are supported for each access macro. This is relatively cheap. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_RTL_FLAG_CHECKING */
+#endif
+
+
+/* Define if you want runtime assertions enabled. This is a cheap check. */
+#define ENABLE_RUNTIME_CHECKING 1
+
+/* Define to enable evaluating float expressions with double precision in
+ standards-compatible mode on s390 targets. */
+/* #undef ENABLE_S390_EXCESS_FLOAT_PRECISION */
+
+/* Define if the -stdlib= option should be enabled. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_STDLIB_OPTION 0
+#endif
+
+
+/* Define if you want all operations on trees (the basic data structure of the
+ front ends) to be checked for dynamic type safety at runtime. This is
+ moderately expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_TREE_CHECKING */
+#endif
+
+
+/* Define if you want all gimple types to be verified after gimplifiation.
+ This is cheap. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_TYPES_CHECKING */
+#endif
+
+
+/* Define to get calls to the valgrind runtime enabled. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_VALGRIND_ANNOTATIONS */
+#endif
+
+
+/* Define if you want to run subprograms and generated programs through
+ valgrind (a memory checker). This is extremely expensive. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_VALGRIND_CHECKING */
+#endif
+
+
+/* Define 0/1 if vtable verification feature is enabled. */
+#ifndef USED_FOR_TARGET
+#define ENABLE_VTABLE_VERIFY 0
+#endif
+
+
+/* Define to 1 if installation paths should be looked up in the Windows
+ Registry. Ignored on non-Windows hosts. */
+#ifndef USED_FOR_TARGET
+/* #undef ENABLE_WIN32_REGISTRY */
+#endif
+
+
+/* Define to the name of a file containing a list of extra machine modes for
+ this architecture. */
+#ifndef USED_FOR_TARGET
+#define EXTRA_MODES_FILE "config/arm/arm-modes.def"
+#endif
+
+
+/* Define to enable detailed memory allocation stats gathering. */
+#ifndef USED_FOR_TARGET
+#define GATHER_STATISTICS 0
+#endif
+
+
+/* Define to 1 if `TIOCGWINSZ' requires <sys/ioctl.h>. */
+#ifndef USED_FOR_TARGET
+#define GWINSZ_IN_SYS_IOCTL 1
+#endif
+
+
+/* mcontext_t fields start with __ */
+#ifndef USED_FOR_TARGET
+/* #undef HAS_MCONTEXT_T_UNDERSCORES */
+#endif
+
+
+/* Define if AF_INET6 supported. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AF_INET6 1
+#endif
+
+
+/* Define if AF_UNIX supported. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AF_UNIX 1
+#endif
+
+
+/* Define if your assembler supports architecture modifiers. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_ARCHITECTURE_MODIFIERS */
+#endif
+
+
+/* Define if your avr assembler supports -mgcc-isr option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_AVR_MGCCISR_OPTION */
+#endif
+
+
+/* Define if your avr assembler supports --mlink-relax option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_AVR_MLINK_RELAX_OPTION */
+#endif
+
+
+/* Define if your avr assembler supports -mrmw option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_AVR_MRMW_OPTION */
+#endif
+
+
+/* Define to the level of your assembler's compressed debug section support.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_COMPRESS_DEBUG 1
+#endif
+
+
+/* Define if your assembler supports the --debug-prefix-map option. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_DEBUG_PREFIX_MAP 1
+#endif
+
+
+/* Define if your assembler supports .module. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_DOT_MODULE */
+#endif
+
+
+/* Define if your assembler supports DSPR1 mult. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_DSPR1_MULT */
+#endif
+
+
+/* Define if your assembler supports .dtprelword. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_DTPRELWORD */
+#endif
+
+
+/* Define if your assembler supports dwarf2 .file/.loc directives, and
+ preserves file table indices exactly as given. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_DWARF2_DEBUG_LINE 1
+#endif
+
+
+/* Define if your assembler supports views in dwarf2 .loc directives. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_DWARF2_DEBUG_VIEW 1
+#endif
+
+
+/* Define if your assembler supports eh_frame pcrel encoding. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT */
+#endif
+
+
+/* Define if your assembler supports the R_PPC64_ENTRY relocation. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_ENTRY_MARKERS */
+#endif
+
+
+/* Define if your assembler supports explicit relocation. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_EXPLICIT_RELOCS */
+#endif
+
+
+/* Define if your assembler supports FMAF, HPC, and VIS 3.0 instructions. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_FMAF_HPC_VIS3 */
+#endif
+
+
+/* Define if your assembler supports the --gdwarf2 option. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_GDWARF2_DEBUG_FLAG 1
+#endif
+
+
+/* Define if your assembler supports the --gdwarf-5 option. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_GDWARF_5_DEBUG_FLAG 1
+#endif
+
+
+/* Define if your assembler supports .gnu_attribute. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_GNU_ATTRIBUTE */
+#endif
+
+
+/* Define true if the assembler supports '.long foo@GOTOFF'. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_GOTOFF_IN_DATA */
+#endif
+
+
+/* Define if your assembler supports the Sun syntax for cmov. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_CMOV_SUN_SYNTAX */
+#endif
+
+
+/* Define if your assembler supports the subtraction of symbols in different
+ sections. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_DIFF_SECT_DELTA */
+#endif
+
+
+/* Define if your assembler supports the ffreep mnemonic. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_FFREEP */
+#endif
+
+
+/* Define if your assembler uses fildq and fistq mnemonics. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_FILDQ */
+#endif
+
+
+/* Define if your assembler uses filds and fists mnemonics. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_FILDS */
+#endif
+
+
+/* Define 0/1 if your assembler and linker support @GOT. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_GOT32X */
+#endif
+
+
+/* Define if your assembler supports HLE prefixes. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_HLE */
+#endif
+
+
+/* Define if your assembler supports interunit movq mnemonic. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_INTERUNIT_MOVQ */
+#endif
+
+
+/* Define if your assembler supports the .quad directive. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_QUAD */
+#endif
+
+
+/* Define if the assembler supports 'rep <insn>, lock <insn>'. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_REP_LOCK_PREFIX */
+#endif
+
+
+/* Define if your assembler supports the sahf mnemonic in 64bit mode. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_SAHF */
+#endif
+
+
+/* Define if your assembler supports the swap suffix. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_SWAP */
+#endif
+
+
+/* Define if your assembler and linker support @tlsgdplt. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_TLSGDPLT */
+#endif
+
+
+/* Define to 1 if your assembler and linker support @tlsldm. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_TLSLDM */
+#endif
+
+
+/* Define to 1 if your assembler and linker support @tlsldmplt. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_TLSLDMPLT */
+#endif
+
+
+/* Define 0/1 if your assembler and linker support calling ___tls_get_addr via
+ GOT. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_TLS_GET_ADDR_GOT */
+#endif
+
+
+/* Define if your assembler supports the 'ud2' mnemonic. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_IX86_UD2 */
+#endif
+
+
+/* Define if your assembler supports the lituse_jsrdirect relocation. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_JSRDIRECT_RELOCS */
+#endif
+
+
+/* Define if your assembler supports .sleb128 and .uleb128. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_LEB128 1
+#endif
+
+
+/* Define if your assembler supports LEON instructions. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_LEON */
+#endif
+
+
+/* Define if the assembler won't complain about a line such as # 0 "" 2. */
+#ifndef USED_FOR_TARGET
+#define HAVE_AS_LINE_ZERO 1
+#endif
+
+
+/* Define if your assembler supports ltoffx and ldxmov relocations. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_LTOFFX_LDXMOV_RELOCS */
+#endif
+
+
+/* Define if your assembler supports the -mabi option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MABI_OPTION */
+#endif
+
+
+/* Define if your assembler supports .machine and .machinemode. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MACHINE_MACHINEMODE */
+#endif
+
+
+/* Define if the assembler understands -march=rv*_zifencei. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MARCH_ZIFENCEI */
+#endif
+
+
+/* Define if your assembler supports mfcr field. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MFCRF */
+#endif
+
+
+/* Define if the assembler understands -misa-spec=. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MISA_SPEC */
+#endif
+
+
+/* Define if your Mac OS X assembler supports -mllvm -x86-pad-for-align=false.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MLLVM_X86_PAD_FOR_ALIGN */
+#endif
+
+
+/* Define if your Mac OS X assembler supports the -mmacos-version-min option.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MMACOSX_VERSION_MIN_OPTION */
+#endif
+
+
+/* Define if your assembler supports .mspabi_attribute. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_MSPABI_ATTRIBUTE */
+#endif
+
+
+/* Define if the assembler understands -mnan=. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_NAN */
+#endif
+
+
+/* Define if your assembler supports %gotoff relocation syntax. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_NIOS2_GOTOFF_RELOCATION */
+#endif
+
+
+/* Define if your assembler supports the -no-mul-bug-abort option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_NO_MUL_BUG_ABORT_OPTION */
+#endif
+
+
+/* Define if the assembler understands -mno-shared. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_NO_SHARED */
+#endif
+
+
+/* Define if your assembler supports offsetable %lo(). */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_OFFSETABLE_LO10 */
+#endif
+
+
+/* Define if your assembler supports R_PPC*_PLTSEQ relocations. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_PLTSEQ */
+#endif
+
+
+/* Define if your assembler supports .ref */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_REF */
+#endif
+
+
+/* Define if your assembler supports R_PPC_REL16 relocs. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_REL16 */
+#endif
+
+
+/* Define if your assembler supports -relax option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_RELAX_OPTION */
+#endif
+
+
+/* Define if your assembler supports .attribute. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_RISCV_ATTRIBUTE */
+#endif
+
+
+/* Define if your assembler supports relocs needed by -fpic. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SMALL_PIC_RELOCS */
+#endif
+
+
+/* Define if your assembler supports SPARC4 instructions. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SPARC4 */
+#endif
+
+
+/* Define if your assembler supports SPARC5 and VIS 4.0 instructions. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SPARC5_VIS4 */
+#endif
+
+
+/* Define if your assembler supports SPARC6 instructions. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SPARC6 */
+#endif
+
+
+/* Define if your assembler and linker support GOTDATA_OP relocs. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SPARC_GOTDATA_OP */
+#endif
+
+
+/* Define if your assembler and linker support unaligned PC relative relocs.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SPARC_UA_PCREL */
+#endif
+
+
+/* Define if your assembler and linker support unaligned PC relative relocs
+ against hidden symbols. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_SPARC_UA_PCREL_HIDDEN */
+#endif
+
+
+/* Define if your assembler and linker support thread-local storage. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_TLS */
+#endif
+
+
+/* Define if your assembler supports vl/vst/vlm/vstm with an optional
+ alignment hint argument. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_VECTOR_LOADSTORE_ALIGNMENT_HINTS */
+#endif
+
+
+/* Define if your assembler supports vl/vst/vlm/vstm with an optional
+ alignment hint argument on z13. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_VECTOR_LOADSTORE_ALIGNMENT_HINTS_ON_Z13 */
+#endif
+
+
+/* Define if your assembler supports VSX instructions. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_VSX */
+#endif
+
+
+/* Define if your assembler supports --gdwarf-4/--gdwarf-5 even with compiler
+ generated .debug_line. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_WORKING_DWARF_N_FLAG */
+#endif
+
+
+/* Define if your assembler supports -xbrace_comment option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_AS_XBRACE_COMMENT_OPTION */
+#endif
+
+
+/* Define to 1 if you have the `atoq' function. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_ATOQ */
+#endif
+
+
+/* Define to 1 if you have the `clearerr_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_CLEARERR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `clock' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_CLOCK 1
+#endif
+
+
+/* Define if <time.h> defines clock_t. */
+#ifndef USED_FOR_TARGET
+#define HAVE_CLOCK_T 1
+#endif
+
+
+/* Define 0/1 if your assembler and linker support COMDAT groups. */
+#ifndef USED_FOR_TARGET
+#define HAVE_COMDAT_GROUP 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'abort', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_ABORT 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'asprintf', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_ASPRINTF 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'atof', otherwise define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_ATOF 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'atol', otherwise define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_ATOL 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'atoll', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_ATOLL 1
+#endif
+
+
+/* Define to 1 if you have the declaration of `basename(const char*)', and to
+ 0 if you don't. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_BASENAME 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'calloc', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_CALLOC 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'clearerr_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_CLEARERR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'clock', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_CLOCK 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'errno', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_ERRNO 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'feof_unlocked', otherwise define
+ to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FEOF_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'ferror_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FERROR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fflush_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FFLUSH_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'ffs', otherwise define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FFS 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fgetc_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FGETC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fgets_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FGETS_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fileno_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FILENO_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fprintf_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FPRINTF_UNLOCKED 0
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fputc_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FPUTC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fputs_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FPUTS_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fread_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FREAD_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'free', otherwise define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FREE 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'fwrite_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_FWRITE_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getchar_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETCHAR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getcwd', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETCWD 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getc_unlocked', otherwise define
+ to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getenv', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETENV 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getopt', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETOPT 0
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getpagesize', otherwise define
+ to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETPAGESIZE 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getrlimit', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETRLIMIT 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getrusage', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETRUSAGE 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'getwd', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_GETWD 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'ldgetname', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_LDGETNAME 0
+#endif
+
+
+/* Define to 1 if we found a declaration for 'madvise', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_MADVISE 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'mallinfo', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_MALLINFO 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'mallinfo2', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_MALLINFO2 0
+#endif
+
+
+/* Define to 1 if we found a declaration for 'malloc', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_MALLOC 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'putchar_unlocked', otherwise
+ define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_PUTCHAR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'putc_unlocked', otherwise define
+ to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_PUTC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'realloc', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_REALLOC 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'sbrk', otherwise define to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_SBRK 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'setenv', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_SETENV 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'setrlimit', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_SETRLIMIT 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'sigaltstack', otherwise define
+ to 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_SIGALTSTACK 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'snprintf', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_SNPRINTF 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'stpcpy', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STPCPY 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strnlen', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRNLEN 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strsignal', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRSIGNAL 1
+#endif
+
+
+/* Define to 1 if you have the declaration of `strstr(const char*,const
+ char*)', and to 0 if you don't. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRSTR 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strtol', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRTOL 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strtoll', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRTOLL 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strtoul', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRTOUL 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strtoull', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRTOULL 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'strverscmp', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_STRVERSCMP 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'times', otherwise define to 0.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_TIMES 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'unsetenv', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_UNSETENV 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'vasprintf', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_VASPRINTF 1
+#endif
+
+
+/* Define to 1 if we found a declaration for 'vsnprintf', otherwise define to
+ 0. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DECL_VSNPRINTF 1
+#endif
+
+
+/* Define to 1 if you have the <direct.h> header file. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_DIRECT_H */
+#endif
+
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_DLFCN_H 1
+#endif
+
+
+/* Define to 1 if you have the <ext/hash_map> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_EXT_HASH_MAP 1
+#endif
+
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FCNTL_H 1
+#endif
+
+
+/* Define to 1 if you have the `feof_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FEOF_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `ferror_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FERROR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fflush_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FFLUSH_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fgetc_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FGETC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fgets_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FGETS_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fileno_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FILENO_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fork' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FORK 1
+#endif
+
+
+/* Define to 1 if you have the `fprintf_unlocked' function. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_FPRINTF_UNLOCKED */
+#endif
+
+
+/* Define to 1 if you have the `fputc_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FPUTC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fputs_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FPUTS_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fread_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FREAD_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `fstatat' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FSTATAT 1
+#endif
+
+
+/* Define to 1 if you have the <ftw.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FTW_H 1
+#endif
+
+
+/* Define to 1 if you have the `fwrite_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_FWRITE_UNLOCKED 1
+#endif
+
+
+/* Define if your assembler supports specifying the alignment of objects
+ allocated using the GAS .comm command. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_ALIGNED_COMM */
+#endif
+
+
+/* Define if your Arm assembler permits context-specific feature extensions.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_ARM_EXTENDED_ARCH 1
+#endif
+
+
+/* Define if your assembler supports .balign and .p2align. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_BALIGN_AND_P2ALIGN 1
+#endif
+
+
+/* Define 0/1 if your assembler supports CFI directives. */
+#define HAVE_GAS_CFI_DIRECTIVE 1
+
+/* Define 0/1 if your assembler supports .cfi_personality. */
+#define HAVE_GAS_CFI_PERSONALITY_DIRECTIVE 1
+
+/* Define 0/1 if your assembler supports .cfi_sections. */
+#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 1
+
+/* Define if your assembler supports the .loc discriminator sub-directive. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_DISCRIMINATOR 1
+#endif
+
+
+/* Define if your assembler supports @gnu_unique_object. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_GNU_UNIQUE_OBJECT */
+#endif
+
+
+/* Define if your assembler and linker support .hidden. */
+#define HAVE_GAS_HIDDEN 1
+
+/* Define if your assembler supports .lcomm with an alignment field. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_LCOMM_WITH_ALIGNMENT */
+#endif
+
+
+/* Define if your assembler supports .literal16. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_LITERAL16 */
+#endif
+
+
+/* Define if your assembler supports the .loc is_stmt sub-directive. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_LOC_STMT 1
+#endif
+
+
+/* Define if your assembler supports specifying the maximum number of bytes to
+ skip when using the GAS .p2align command. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_MAX_SKIP_P2ALIGN 1
+#endif
+
+
+/* Define if your assembler supports the .set micromips directive */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_MICROMIPS */
+#endif
+
+
+/* Define if your assembler supports .nsubspa comdat option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_NSUBSPA_COMDAT */
+#endif
+
+
+/* Define if your assembler and linker support 32-bit section relative relocs
+ via '.secrel32 label'. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GAS_PE_SECREL32_RELOC */
+#endif
+
+
+/* Define if your assembler supports specifying the exclude section flag. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_SECTION_EXCLUDE 1
+#endif
+
+
+/* Define 0/1 if your assembler supports 'o' flag in .section directive. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_SECTION_LINK_ORDER 1
+#endif
+
+
+/* Define 0/1 if your assembler supports marking sections with SHF_GNU_RETAIN
+ flag. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_SHF_GNU_RETAIN 1
+#endif
+
+
+/* Define 0/1 if your assembler supports marking sections with SHF_MERGE flag.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_SHF_MERGE 1
+#endif
+
+
+/* Define if your assembler supports .subsection and .subsection -1 starts
+ emitting at the beginning of your section. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_SUBSECTION_ORDERING 1
+#endif
+
+
+/* Define if your assembler supports .weak. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_WEAK 1
+#endif
+
+
+/* Define if your assembler supports .weakref. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GAS_WEAKREF 1
+#endif
+
+
+/* Define to 1 if you have the `getauxval' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GETAUXVAL 1
+#endif
+
+
+/* Define to 1 if you have the `getchar_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GETCHAR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `getc_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GETC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `getrlimit' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GETRLIMIT 1
+#endif
+
+
+/* Define to 1 if you have the `getrusage' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GETRUSAGE 1
+#endif
+
+
+/* Define to 1 if you have the `gettimeofday' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GETTIMEOFDAY 1
+#endif
+
+
+/* Define to 1 if using GNU as. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GNU_AS 1
+#endif
+
+
+/* Define if your system supports gnu indirect functions. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GNU_INDIRECT_FUNCTION 0
+#endif
+
+
+/* Define to 1 if using GNU ld. */
+#ifndef USED_FOR_TARGET
+#define HAVE_GNU_LD 1
+#endif
+
+
+/* Define if the gold linker supports split stack and is available as a
+ non-default */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_GOLD_NON_DEFAULT_SPLIT_STACK */
+#endif
+
+
+/* Define if you have the iconv() function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_ICONV 1
+#endif
+
+
+/* Define to 1 if you have the <iconv.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_ICONV_H 1
+#endif
+
+
+/* Define 0/1 if .init_array/.fini_array sections are available and working.
+ */
+#ifndef USED_FOR_TARGET
+#define HAVE_INITFINI_ARRAY_SUPPORT 0
+#endif
+
+
+/* Define to 1 if the system has the type `intmax_t'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_INTMAX_T 1
+#endif
+
+
+/* Define to 1 if the system has the type `intptr_t'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_INTPTR_T 1
+#endif
+
+
+/* Define if you have a working <inttypes.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_INTTYPES_H 1
+#endif
+
+
+/* Define to 1 if you have the `kill' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_KILL 1
+#endif
+
+
+/* Define if you have <langinfo.h> and nl_langinfo(CODESET). */
+#ifndef USED_FOR_TARGET
+#define HAVE_LANGINFO_CODESET 1
+#endif
+
+
+/* Define to 1 if you have the <langinfo.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LANGINFO_H 1
+#endif
+
+
+/* Define if your <locale.h> file defines LC_MESSAGES. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LC_MESSAGES 1
+#endif
+
+
+/* Define to 1 if you have the <ldfcn.h> header file. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LDFCN_H */
+#endif
+
+
+/* Define 0/1 if your linker supports the SHF_MERGE flag with section
+ alignment > 1. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_ALIGNED_SHF_MERGE 1
+#endif
+
+
+/* Define if your linker supports --as-needed/--no-as-needed or equivalent
+ options. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_AS_NEEDED 1
+#endif
+
+
+/* Define if your default avr linker script for avrxmega3 leaves .rodata in
+ flash. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_AVR_AVRXMEGA3_RODATA_IN_FLASH */
+#endif
+
+
+/* Define if your linker supports -z bndplt */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_BNDPLT_SUPPORT */
+#endif
+
+
+/* Define if the PE linker has broken DWARF 5 support. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_BROKEN_PE_DWARF5 */
+#endif
+
+
+/* Define if your linker supports --build-id. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_BUILDID 1
+#endif
+
+
+/* Define if the linker supports clearing hardware capabilities via mapfile.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_CLEARCAP */
+#endif
+
+
+/* Define to the level of your linker's compressed debug section support. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_COMPRESS_DEBUG 2
+#endif
+
+
+/* Define if your linker supports --demangle option. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_DEMANGLE 1
+#endif
+
+
+/* Define 0/1 if your linker supports CIE v3 in .eh_frame. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_EH_FRAME_CIEV3 1
+#endif
+
+
+/* Define if your linker supports .eh_frame_hdr. */
+#define HAVE_LD_EH_FRAME_HDR 1
+
+/* Define if your linker supports garbage collection of sections in presence
+ of EH frames. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_EH_GC_SECTIONS */
+#endif
+
+
+/* Define if your linker has buggy garbage collection of sections support when
+ .text.startup.foo like sections are used. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_EH_GC_SECTIONS_BUG */
+#endif
+
+
+/* Define if your PowerPC64 linker supports a large TOC. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_LARGE_TOC */
+#endif
+
+
+/* Define if your PowerPC64 linker only needs function descriptor syms. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_NO_DOT_SYMS */
+#endif
+
+
+/* Define if your linker can relax absolute .eh_frame personality pointers
+ into PC-relative form. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_PERSONALITY_RELAXATION */
+#endif
+
+
+/* Define if the PE linker supports --disable-dynamicbase option. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_PE_DISABLE_DYNAMICBASE */
+#endif
+
+
+/* Define if your linker supports PIE option. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_PIE 1
+#endif
+
+
+/* Define 0/1 if your linker supports -pie option with copy reloc. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_PIE_COPYRELOC 0
+#endif
+
+
+/* Define if your PowerPC linker has .gnu.attributes long double support. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE */
+#endif
+
+
+/* Define if your linker supports --push-state/--pop-state */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_PUSHPOPSTATE_SUPPORT 1
+#endif
+
+
+/* Define if your linker links a mix of read-only and read-write sections into
+ a read-write section. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_RO_RW_SECTION_MIXING 1
+#endif
+
+
+/* Define if your linker supports the *_sol2 emulations. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_LD_SOL2_EMULATION */
+#endif
+
+
+/* Define if your linker supports -Bstatic/-Bdynamic or equivalent options. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_STATIC_DYNAMIC 1
+#endif
+
+
+/* Define if your linker supports --sysroot. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LD_SYSROOT 1
+#endif
+
+
+/* Define to 1 if you have the <limits.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LIMITS_H 1
+#endif
+
+
+/* Define to 1 if you have the <locale.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LOCALE_H 1
+#endif
+
+
+/* Define to 1 if the system has the type `long long'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LONG_LONG 1
+#endif
+
+
+/* Define to 1 if the system has the type `long long int'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LONG_LONG_INT 1
+#endif
+
+
+/* Define to the level of your linker's plugin support. */
+#ifndef USED_FOR_TARGET
+#define HAVE_LTO_PLUGIN 2
+#endif
+
+
+/* Define to 1 if you have the `madvise' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MADVISE 1
+#endif
+
+
+/* Define to 1 if you have the `mallinfo' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MALLINFO 1
+#endif
+
+
+/* Define to 1 if you have the `mallinfo2' function. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_MALLINFO2 */
+#endif
+
+
+/* Define to 1 if you have the <malloc.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MALLOC_H 1
+#endif
+
+
+/* Define to 1 if you have the `mbstowcs' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MBSTOWCS 1
+#endif
+
+
+/* Define if valgrind's memcheck.h header is installed. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_MEMCHECK_H */
+#endif
+
+
+/* Define to 1 if you have the <memory.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MEMORY_H 1
+#endif
+
+
+/* Define to 1 if you have the `mmap' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MMAP 1
+#endif
+
+
+/* Define if mmap with MAP_ANON(YMOUS) works. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MMAP_ANON 1
+#endif
+
+
+/* Define if mmap of /dev/zero works. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MMAP_DEV_ZERO 1
+#endif
+
+
+/* Define if read-only mmap of a plain file works. */
+#ifndef USED_FOR_TARGET
+#define HAVE_MMAP_FILE 1
+#endif
+
+
+/* Define if GCC has been configured with --enable-newlib-nano-formatted-io.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_NEWLIB_NANO_FORMATTED_IO */
+#endif
+
+
+/* Define to 1 if you have the `nl_langinfo' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_NL_LANGINFO 1
+#endif
+
+
+/* Define to 1 if you have the `popen' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_POPEN 1
+#endif
+
+
+/* Define to 1 if you have the `posix_fallocate' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_POSIX_FALLOCATE 1
+#endif
+
+
+/* Define to 1 if you have the `putchar_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_PUTCHAR_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `putc_unlocked' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_PUTC_UNLOCKED 1
+#endif
+
+
+/* Define to 1 if you have the `setlocale' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SETLOCALE 1
+#endif
+
+
+/* Define to 1 if you have the `setrlimit' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SETRLIMIT 1
+#endif
+
+
+/* Define if <sys/signal.h> defines sighandler_t */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_SIGHANDLER_T */
+#endif
+
+
+/* Define if the system-provided CRTs are present on Solaris. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_SOLARIS_CRTS */
+#endif
+
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STDDEF_H 1
+#endif
+
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STDINT_H 1
+#endif
+
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STDLIB_H 1
+#endif
+
+
+/* Define to 1 if you have the <strings.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STRINGS_H 1
+#endif
+
+
+/* Define to 1 if you have the <string.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STRING_H 1
+#endif
+
+
+/* Define to 1 if you have the `strsignal' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STRSIGNAL 1
+#endif
+
+
+/* Define if <sys/times.h> defines struct tms. */
+#ifndef USED_FOR_TARGET
+#define HAVE_STRUCT_TMS 1
+#endif
+
+
+/* Define if <utility> defines std::swap. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SWAP_IN_UTILITY 1
+#endif
+
+
+/* Define to 1 if you have the `sysconf' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYSCONF 1
+#endif
+
+
+/* Define to 1 if you have the <sys/auxv.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_AUXV_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/file.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_FILE_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/locking.h> header file. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_SYS_LOCKING_H */
+#endif
+
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_MMAN_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_PARAM_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_RESOURCE_H 1
+#endif
+
+
+/* Define if your target C library provides sys/sdt.h */
+/* #undef HAVE_SYS_SDT_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_STAT_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/times.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_TIMES_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_TIME_H 1
+#endif
+
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_TYPES_H 1
+#endif
+
+
+/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */
+#ifndef USED_FOR_TARGET
+#define HAVE_SYS_WAIT_H 1
+#endif
+
+
+/* Define to 1 if you have the `times' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_TIMES 1
+#endif
+
+
+/* Define to 1 if you have the <time.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_TIME_H 1
+#endif
+
+
+/* Define to 1 if you have the <tr1/unordered_map> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_TR1_UNORDERED_MAP 1
+#endif
+
+
+/* Define to 1 if the system has the type `uintmax_t'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_UINTMAX_T 1
+#endif
+
+
+/* Define to 1 if the system has the type `uintptr_t'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_UINTPTR_T 1
+#endif
+
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_UNISTD_H 1
+#endif
+
+
+/* Define to 1 if you have the <unordered_map> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_UNORDERED_MAP 1
+#endif
+
+
+/* Define to 1 if the system has the type `unsigned long long int'. */
+#ifndef USED_FOR_TARGET
+#define HAVE_UNSIGNED_LONG_LONG_INT 1
+#endif
+
+
+/* Define if valgrind's valgrind/memcheck.h header is installed. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_VALGRIND_MEMCHECK_H */
+#endif
+
+
+/* Define to 1 if you have the `vfork' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_VFORK 1
+#endif
+
+
+/* Define to 1 if you have the <vfork.h> header file. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_VFORK_H */
+#endif
+
+
+/* Define to 1 if you have the <wchar.h> header file. */
+#ifndef USED_FOR_TARGET
+#define HAVE_WCHAR_H 1
+#endif
+
+
+/* Define to 1 if you have the `wcswidth' function. */
+#ifndef USED_FOR_TARGET
+#define HAVE_WCSWIDTH 1
+#endif
+
+
+/* Define to 1 if `fork' works. */
+#ifndef USED_FOR_TARGET
+#define HAVE_WORKING_FORK 1
+#endif
+
+
+/* Define this macro if mbstowcs does not crash when its first argument is
+ NULL. */
+#ifndef USED_FOR_TARGET
+#define HAVE_WORKING_MBSTOWCS 1
+#endif
+
+
+/* Define to 1 if `vfork' works. */
+#ifndef USED_FOR_TARGET
+#define HAVE_WORKING_VFORK 1
+#endif
+
+
+/* Define if your assembler supports AIX debug frame section label reference.
+ */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_XCOFF_DWARF_EXTRAS */
+#endif
+
+
+/* Define if you have a working <zstd.h> header file. */
+#ifndef USED_FOR_TARGET
+/* #undef HAVE_ZSTD_H */
+#endif
+
+
+/* Define if isl is in use. */
+#ifndef USED_FOR_TARGET
+#define HAVE_isl 1
+#endif
+
+
+/* Define if F_SETLKW supported by fcntl. */
+#ifndef USED_FOR_TARGET
+#define HOST_HAS_F_SETLKW 1
+#endif
+
+
+/* Define if _LK_LOC supported by _locking. */
+#ifndef USED_FOR_TARGET
+/* #undef HOST_HAS_LK_LOCK */
+#endif
+
+
+/* Define if O_CLOEXEC supported by fcntl. */
+#ifndef USED_FOR_TARGET
+#define HOST_HAS_O_CLOEXEC 1
+#endif
+
+
+/* Define if O_NONBLOCK supported by fcntl. */
+#ifndef USED_FOR_TARGET
+#define HOST_HAS_O_NONBLOCK 1
+#endif
+
+
+/* Define which stat syscall is able to handle 64bit indodes. */
+#ifndef USED_FOR_TARGET
+/* #undef HOST_STAT_FOR_64BIT_INODES */
+#endif
+
+
+/* Define as const if the declaration of iconv() needs const. */
+#ifndef USED_FOR_TARGET
+#define ICONV_CONST
+#endif
+
+
+/* Define if int64_t uses long as underlying type. */
+#ifndef USED_FOR_TARGET
+#define INT64_T_IS_LONG 1
+#endif
+
+
+/* Define to 1 if ld64 supports '-export_dynamic'. */
+#ifndef USED_FOR_TARGET
+/* #undef LD64_HAS_EXPORT_DYNAMIC */
+#endif
+
+
+/* Define to 1 if ld64 supports '-platform_version'. */
+#ifndef USED_FOR_TARGET
+/* #undef LD64_HAS_PLATFORM_VERSION */
+#endif
+
+
+/* Define to ld64 version. */
+#ifndef USED_FOR_TARGET
+/* #undef LD64_VERSION */
+#endif
+
+
+/* Define to the linker option to ignore unused dependencies. */
+#ifndef USED_FOR_TARGET
+#define LD_AS_NEEDED_OPTION "--push-state --as-needed"
+#endif
+
+
+/* Define to the linker option to enable compressed debug sections. */
+#ifndef USED_FOR_TARGET
+#define LD_COMPRESS_DEBUG_OPTION "--compress-debug-sections"
+#endif
+
+
+/* Define to the linker option to enable use of shared objects. */
+#ifndef USED_FOR_TARGET
+#define LD_DYNAMIC_OPTION "-Bdynamic"
+#endif
+
+
+/* Define to the linker option to keep unused dependencies. */
+#ifndef USED_FOR_TARGET
+#define LD_NO_AS_NEEDED_OPTION "--pop-state"
+#endif
+
+
+/* Define to the linker option to disable use of shared objects. */
+#ifndef USED_FOR_TARGET
+#define LD_STATIC_OPTION "-Bstatic"
+#endif
+
+
+/* The linker hash style */
+#ifndef USED_FOR_TARGET
+/* #undef LINKER_HASH_STYLE */
+#endif
+
+
+/* Define to the name of the LTO plugin DSO that must be passed to the
+ linker's -plugin=LIB option. */
+#ifndef USED_FOR_TARGET
+#define LTOPLUGINSONAME "liblto_plugin.so"
+#endif
+
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#ifndef USED_FOR_TARGET
+#define LT_OBJDIR ".libs/"
+#endif
+
+
+/* Define if we should link mingw executables with --large-address-aware */
+#ifndef USED_FOR_TARGET
+/* #undef MINGW_DEFAULT_LARGE_ADDR_AWARE */
+#endif
+
+
+/* Value to set mingw's _dowildcard to. */
+#ifndef USED_FOR_TARGET
+/* #undef MINGW_DOWILDCARD */
+#endif
+
+
+/* Define if host mkdir takes a single argument. */
+#ifndef USED_FOR_TARGET
+/* #undef MKDIR_TAKES_ONE_ARG */
+#endif
+
+
+/* Define to 1 to if -foffload is defaulted */
+#ifndef USED_FOR_TARGET
+/* #undef OFFLOAD_DEFAULTED */
+#endif
+
+
+/* Define to offload targets, separated by commas. */
+#ifndef USED_FOR_TARGET
+#define OFFLOAD_TARGETS ""
+#endif
+
+
+/* Define to the address where bug reports for this package should be sent. */
+#ifndef USED_FOR_TARGET
+#define PACKAGE_BUGREPORT ""
+#endif
+
+
+/* Define to the full name of this package. */
+#ifndef USED_FOR_TARGET
+#define PACKAGE_NAME ""
+#endif
+
+
+/* Define to the full name and version of this package. */
+#ifndef USED_FOR_TARGET
+#define PACKAGE_STRING ""
+#endif
+
+
+/* Define to the one symbol short name of this package. */
+#ifndef USED_FOR_TARGET
+#define PACKAGE_TARNAME ""
+#endif
+
+
+/* Define to the home page for this package. */
+#ifndef USED_FOR_TARGET
+#define PACKAGE_URL ""
+#endif
+
+
+/* Define to the version of this package. */
+#ifndef USED_FOR_TARGET
+#define PACKAGE_VERSION ""
+#endif
+
+
+/* Specify plugin linker */
+#ifndef USED_FOR_TARGET
+#define PLUGIN_LD_SUFFIX "ld"
+#endif
+
+
+/* Define to .TOC. alignment forced by your linker. */
+#ifndef USED_FOR_TARGET
+/* #undef POWERPC64_TOC_POINTER_ALIGNMENT */
+#endif
+
+
+/* Define to PREFIX/include if cpp should also search that directory. */
+#ifndef USED_FOR_TARGET
+/* #undef PREFIX_INCLUDE_DIR */
+#endif
+
+
+/* The size of `dev_t', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_DEV_T 8
+#endif
+
+
+/* The size of `ino_t', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_INO_T 8
+#endif
+
+
+/* The size of `int', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_INT 4
+#endif
+
+
+/* The size of `long', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_LONG 8
+#endif
+
+
+/* The size of `long long', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_LONG_LONG 8
+#endif
+
+
+/* The size of `short', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_SHORT 2
+#endif
+
+
+/* The size of `void *', as computed by sizeof. */
+#ifndef USED_FOR_TARGET
+#define SIZEOF_VOID_P 8
+#endif
+
+
+/* Define to 1 if you have the ANSI C header files. */
+#ifndef USED_FOR_TARGET
+#define STDC_HEADERS 1
+#endif
+
+
+/* Define if you can safely include both <string.h> and <strings.h>. */
+#ifndef USED_FOR_TARGET
+#define STRING_WITH_STRINGS 1
+#endif
+
+
+/* Define if TFmode long double should be the default */
+#ifndef USED_FOR_TARGET
+/* #undef TARGET_DEFAULT_LONG_DOUBLE_128 */
+#endif
+
+
+/* Define if your target C library provides the `dl_iterate_phdr' function. */
+/* #undef TARGET_DL_ITERATE_PHDR */
+
+/* GNU C Library major version number used on the target, or 0. */
+#ifndef USED_FOR_TARGET
+#define TARGET_GLIBC_MAJOR 0
+#endif
+
+
+/* GNU C Library minor version number used on the target, or 0. */
+#ifndef USED_FOR_TARGET
+#define TARGET_GLIBC_MINOR 0
+#endif
+
+
+/* Define if your target C Library properly handles PT_GNU_STACK */
+#ifndef USED_FOR_TARGET
+/* #undef TARGET_LIBC_GNUSTACK */
+#endif
+
+
+/* Define if your target C Library provides the AT_HWCAP value in the TCB */
+#ifndef USED_FOR_TARGET
+/* #undef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
+#endif
+
+
+/* Define if your target C library provides stack protector support */
+#ifndef USED_FOR_TARGET
+/* #undef TARGET_LIBC_PROVIDES_SSP */
+#endif
+
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#ifndef USED_FOR_TARGET
+#define TIME_WITH_SYS_TIME 1
+#endif
+
+
+/* Define to the flag used to mark TLS sections if the default (`T') doesn't
+ work. */
+#ifndef USED_FOR_TARGET
+/* #undef TLS_SECTION_ASM_FLAG */
+#endif
+
+
+/* Define if your assembler mis-optimizes .eh_frame data. */
+#ifndef USED_FOR_TARGET
+/* #undef USE_AS_TRADITIONAL_FORMAT */
+#endif
+
+
+/* Define if you want to generate code by default that assumes that the Cygwin
+ DLL exports wrappers to support libstdc++ function replacement. */
+#ifndef USED_FOR_TARGET
+/* #undef USE_CYGWIN_LIBSTDCXX_WRAPPERS */
+#endif
+
+
+/* Define 0/1 if your linker supports hidden thunks in linkonce sections. */
+#ifndef USED_FOR_TARGET
+/* #undef USE_HIDDEN_LINKONCE */
+#endif
+
+
+/* Define to 1 if the 'long long' type is wider than 'long' but still
+ efficiently supported by the host hardware. */
+#ifndef USED_FOR_TARGET
+/* #undef USE_LONG_LONG_FOR_WIDEST_FAST_INT */
+#endif
+
+
+/* Define if we should use leading underscore on 64 bit mingw targets */
+#ifndef USED_FOR_TARGET
+/* #undef USE_MINGW64_LEADING_UNDERSCORES */
+#endif
+
+
+/* Enable extensions on AIX 3, Interix. */
+#ifndef _ALL_SOURCE
+# define _ALL_SOURCE 1
+#endif
+/* Enable GNU extensions on systems that have them. */
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE 1
+#endif
+/* Enable threading extensions on Solaris. */
+#ifndef _POSIX_PTHREAD_SEMANTICS
+# define _POSIX_PTHREAD_SEMANTICS 1
+#endif
+/* Enable extensions on HP NonStop. */
+#ifndef _TANDEM_SOURCE
+# define _TANDEM_SOURCE 1
+#endif
+/* Enable general extensions on Solaris. */
+#ifndef __EXTENSIONS__
+# define __EXTENSIONS__ 1
+#endif
+
+
+/* Define to be the last component of the Windows registry key under which to
+ look for installation paths. The full key used will be
+ HKEY_LOCAL_MACHINE/SOFTWARE/Free Software Foundation/{WIN32_REGISTRY_KEY}.
+ The default is the GCC version number. */
+#ifndef USED_FOR_TARGET
+/* #undef WIN32_REGISTRY_KEY */
+#endif
+
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+/* # undef WORDS_BIGENDIAN */
+# endif
+#endif
+
+/* Enable large inode numbers on Mac OS X 10.5. */
+#ifndef _DARWIN_USE_64_BIT_INODE
+# define _DARWIN_USE_64_BIT_INODE 1
+#endif
+
+/* Number of bits in a file offset, on hosts where this is settable. */
+#ifndef USED_FOR_TARGET
+/* #undef _FILE_OFFSET_BITS */
+#endif
+
+
+/* Define for large files, on AIX-style hosts. */
+#ifndef USED_FOR_TARGET
+/* #undef _LARGE_FILES */
+#endif
+
+
+/* Define to 1 if on MINIX. */
+#ifndef USED_FOR_TARGET
+/* #undef _MINIX */
+#endif
+
+
+/* Define to 2 if the system does not provide POSIX.1 features except with
+ this defined. */
+#ifndef USED_FOR_TARGET
+/* #undef _POSIX_1_SOURCE */
+#endif
+
+
+/* Define to 1 if you need to in order for `stat' and other things to work. */
+#ifndef USED_FOR_TARGET
+/* #undef _POSIX_SOURCE */
+#endif
+
+
+/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#ifndef USED_FOR_TARGET
+/* #undef _UINT32_T */
+#endif
+
+
+/* Define for Solaris 2.5.1 so the uint64_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#ifndef USED_FOR_TARGET
+/* #undef _UINT64_T */
+#endif
+
+
+/* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>,
+ <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
+ #define below would cause a syntax error. */
+#ifndef USED_FOR_TARGET
+/* #undef _UINT8_T */
+#endif
+
+
+/* Define to `char *' if <sys/types.h> does not define. */
+#ifndef USED_FOR_TARGET
+/* #undef caddr_t */
+#endif
+
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef __cplusplus
+/* #undef inline */
+#endif
+
+/* Define to the type of a signed integer type of width exactly 16 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef int16_t */
+#endif
+
+
+/* Define to the type of a signed integer type of width exactly 32 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef int32_t */
+#endif
+
+
+/* Define to the type of a signed integer type of width exactly 64 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef int64_t */
+#endif
+
+
+/* Define to the type of a signed integer type of width exactly 8 bits if such
+ a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef int8_t */
+#endif
+
+
+/* Define to the widest signed integer type if <stdint.h> and <inttypes.h> do
+ not define. */
+#ifndef USED_FOR_TARGET
+/* #undef intmax_t */
+#endif
+
+
+/* Define to the type of a signed integer type wide enough to hold a pointer,
+ if such a type exists, and if the system does not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef intptr_t */
+#endif
+
+
+/* Define to `int' if <sys/types.h> does not define. */
+#ifndef USED_FOR_TARGET
+/* #undef pid_t */
+#endif
+
+
+/* Define to `long' if <sys/resource.h> doesn't define. */
+#ifndef USED_FOR_TARGET
+/* #undef rlim_t */
+#endif
+
+
+/* Define to `int' if <sys/types.h> does not define. */
+#ifndef USED_FOR_TARGET
+/* #undef ssize_t */
+#endif
+
+
+/* Define to the type of an unsigned integer type of width exactly 16 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef uint16_t */
+#endif
+
+
+/* Define to the type of an unsigned integer type of width exactly 32 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef uint32_t */
+#endif
+
+
+/* Define to the type of an unsigned integer type of width exactly 64 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef uint64_t */
+#endif
+
+
+/* Define to the type of an unsigned integer type of width exactly 8 bits if
+ such a type exists and the standard includes do not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef uint8_t */
+#endif
+
+
+/* Define to the widest unsigned integer type if <stdint.h> and <inttypes.h>
+ do not define. */
+#ifndef USED_FOR_TARGET
+/* #undef uintmax_t */
+#endif
+
+
+/* Define to the type of an unsigned integer type wide enough to hold a
+ pointer, if such a type exists, and if the system does not define it. */
+#ifndef USED_FOR_TARGET
+/* #undef uintptr_t */
+#endif
+
+
+/* Define as `fork' if `vfork' does not work. */
+#ifndef USED_FOR_TARGET
+/* #undef vfork */
+#endif
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-profile.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-profile.h
new file mode 100644
index 0000000..8b4cd06
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/auto-profile.h
@@ -0,0 +1,31 @@
+/* auto-profile.h - Defines data exported from auto-profile.cc
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+ Contributed by Dehao Chen (dehao@google.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef AUTO_PROFILE_H
+#define AUTO_PROFILE_H
+
+/* Read, process, finalize AutoFDO data structures. */
+extern void read_autofdo_file (void);
+extern void end_auto_profile (void);
+
+/* Returns TRUE if EDGE is hot enough to be inlined early. */
+extern bool afdo_callsite_hot_enough_for_early_inline (struct cgraph_edge *);
+
+#endif /* AUTO_PROFILE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/b-header-vars b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/b-header-vars
new file mode 100644
index 0000000..7134043
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/b-header-vars
@@ -0,0 +1,95 @@
+USER_H=float.h iso646.h stdarg.h stdbool.h stddef.h varargs.h stdfix.h stdnoreturn.h stdalign.h stdatomic.h config/arm/mmintrin.h arm_neon.h arm_acle.h arm_fp16.h arm_cmse.h arm_bf16.h arm_mve_types.h arm_mve.h arm_cde.h tgmath.h unwind-arm-common.h
+T_GLIMITS_H=glimits.h
+T_STDINT_GCC_H=stdint-gcc.h
+HASHTAB_H=hashtab.h
+OBSTACK_H=obstack.h
+SPLAY_TREE_H=splay-tree.h
+MD5_H=md5.h
+XREGEX_H=xregex.h
+FNMATCH_H=fnmatch.h
+LINKER_PLUGIN_API_H=plugin-api.h
+BCONFIG_H=bconfig.h auto-host.h ansidecl.h
+CONFIG_H=config.h auto-host.h ansidecl.h
+TCONFIG_H=tconfig.h auto-host.h ansidecl.h
+TM_P_H=tm_p.h config/arm/arm-flags.h arm-protos.h aarch-common-protos.h tm-preds.h
+TM_D_H=tm_d.h config/arm/arm-d.h
+GTM_H=tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h
+TM_H=tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h
+DUMPFILE_H=line-map.h dumpfile.h
+VEC_H=vec.h statistics.h ggc.h gtype-desc.h statistics.h
+HASH_TABLE_H=hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h
+EXCEPT_H=except.h hashtab.h
+TARGET_H=tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h target.h target.def target-hooks-macros.h target-insns.def insn-modes.h insn-codes.h
+C_TARGET_H=c-family/c-target.h c-family/c-target.def target-hooks-macros.h
+COMMON_TARGET_H=common/common-target.h line-map.h input.h common/common-target.def target-hooks-macros.h
+D_TARGET_H=d/d-target.h d/d-target.def target-hooks-macros.h
+MACHMODE_H=machmode.h mode-classes.def
+HOOKS_H=hooks.h
+HOSTHOOKS_DEF_H=hosthooks-def.h hooks.h
+LANGHOOKS_DEF_H=langhooks-def.h hooks.h
+TARGET_DEF_H=target-def.h target-hooks-def.h hooks.h targhooks.h
+C_TARGET_DEF_H=c-family/c-target-def.h c-family/c-target-hooks-def.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def hooks.h common/common-targhooks.h
+CORETYPES_H=coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h
+RTL_BASE_H=coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h rtl.h rtl.def reg-notes.def insn-notes.def line-map.h input.h real.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h fixed-value.h alias.h hashtab.h
+FIXED_VALUE_H=fixed-value.h
+RTL_H=coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h rtl.h rtl.def reg-notes.def insn-notes.def line-map.h input.h real.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h fixed-value.h alias.h hashtab.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h genrtl.h
+READ_MD_H=obstack.h hashtab.h read-md.h
+INTERNAL_FN_H=internal-fn.h internal-fn.def
+TREE_CORE_H=tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h
+TREE_H=tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h
+REGSET_H=regset.h bitmap.h hashtab.h statistics.h hard-reg-set.h
+BASIC_BLOCK_H=basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h
+GIMPLE_H=gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h
+GCOV_IO_H=gcov-io.h version.h auto-host.h gcov-counter.def
+RECOG_H=recog.h
+EMIT_RTL_H=emit-rtl.h
+FLAGS_H=flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h
+OPTIONS_H=options.h flag-types.h config/arm/arm-opts.h aarch-common.h
+FUNCTION_H=function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h
+EXPR_H=expr.h insn-config.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h rtl.h rtl.def reg-notes.def insn-notes.def line-map.h input.h real.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h fixed-value.h alias.h hashtab.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h genrtl.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h emit-rtl.h
+OPTABS_H=optabs.h insn-codes.h insn-opinit.h
+REGS_H=regs.h hard-reg-set.h
+CFGLOOP_H=cfgloop.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h bitmap.h hashtab.h statistics.h sbitmap.h
+IPA_UTILS_H=ipa-utils.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h
+IPA_REFERENCE_H=ipa-reference.h bitmap.h hashtab.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h
+CGRAPH_H=cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h
+DF_H=df.h bitmap.h hashtab.h statistics.h regset.h bitmap.h hashtab.h statistics.h hard-reg-set.h sbitmap.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h alloc-pool.h timevar.h timevar.def
+RESOURCE_H=resource.h hard-reg-set.h df.h bitmap.h hashtab.h statistics.h regset.h bitmap.h hashtab.h statistics.h hard-reg-set.h sbitmap.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h alloc-pool.h timevar.h timevar.def
+GCC_H=gcc.h version.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def
+GGC_H=ggc.h gtype-desc.h statistics.h
+TIMEVAR_H=timevar.h timevar.def
+INSN_ATTR_H=insn-attr.h insn-attr-common.h insn-addr.h
+INSN_ADDR_H=insn-addr.h
+C_COMMON_H=c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def
+C_PRAGMA_H=c-family/c-pragma.h line-map.h cpplib.h
+C_TREE_H=c/c-tree.h c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def diagnostic.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def pretty-print.h line-map.h input.h obstack.h wide-int-print.h
+SYSTEM_H=system.h hwint.h libiberty.h safe-ctype.h filenames.h hashtab.h
+PREDICT_H=predict.h predict.def
+CPPLIB_H=line-map.h cpplib.h
+CODYLIB_H=cody.hh
+INPUT_H=line-map.h input.h
+OPTS_H=line-map.h input.h vec.h statistics.h ggc.h gtype-desc.h statistics.h opts.h obstack.h
+SYMTAB_H=symtab.h obstack.h
+CPP_INTERNAL_H=internal.h
+TREE_DUMP_H=tree-dump.h splay-tree.h line-map.h dumpfile.h
+TREE_PASS_H=tree-pass.h timevar.h timevar.def line-map.h dumpfile.h
+TREE_SSA_H=tree-ssa.h tree-ssa-operands.h bitmap.h hashtab.h statistics.h sbitmap.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h hashtab.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h ipa-reference.h bitmap.h hashtab.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h tree-ssa-alias.h
+PRETTY_PRINT_H=pretty-print.h line-map.h input.h obstack.h wide-int-print.h
+TREE_PRETTY_PRINT_H=tree-pretty-print.h pretty-print.h line-map.h input.h obstack.h wide-int-print.h
+GIMPLE_PRETTY_PRINT_H=gimple-pretty-print.h tree-pretty-print.h pretty-print.h line-map.h input.h obstack.h wide-int-print.h
+DIAGNOSTIC_CORE_H=diagnostic-core.h line-map.h input.h bversion.h diagnostic.def
+DIAGNOSTIC_H=diagnostic.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def pretty-print.h line-map.h input.h obstack.h wide-int-print.h
+C_PRETTY_PRINT_H=c-family/c-pretty-print.h pretty-print.h line-map.h input.h obstack.h wide-int-print.h c-family/c-common.h c-family/c-common.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h splay-tree.h line-map.h cpplib.h ggc.h gtype-desc.h statistics.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h
+TREE_INLINE_H=tree-inline.h
+REAL_H=real.h
+LTO_STREAMER_H=lto-streamer.h plugin-api.h tm.h options.h config/vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h target.h target.def target-hooks-macros.h target-insns.def insn-modes.h insn-codes.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h vec.h statistics.h ggc.h gtype-desc.h statistics.h hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h gcov-io.h version.h auto-host.h gcov-counter.def diagnostic.h diagnostic-core.h line-map.h input.h bversion.h diagnostic.def pretty-print.h line-map.h input.h obstack.h wide-int-print.h alloc-pool.h
+IPA_PROP_H=ipa-prop.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h config/arm/arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h vec.h statistics.h ggc.h gtype-desc.h statistics.h cgraph.h vec.h statistics.h ggc.h gtype-desc.h statistics.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cif-code.def ipa-ref.h plugin-api.h is-a.h gimple.h gimple.def gsstruct.def vec.h statistics.h ggc.h gtype-desc.h statistics.h ggc.h gtype-desc.h statistics.h basic-block.h predict.h predict.def vec.h statistics.h ggc.h gtype-desc.h statistics.h function.h hashtab.h tm.h options.h vxworks-dummy.h elfos.h unknown-elf.h elf.h bpabi.h newlib-stdint.h aout.h arm.h arm-mlib.h initfini-array.h defaults.h insn-constants.h arm-cpu.h arm-isa.h insn-flags.h options.h flag-types.h arm-opts.h aarch-common.h hard-reg-set.h vec.h statistics.h ggc.h gtype-desc.h statistics.h line-map.h input.h cfg-flags.def cfghooks.h profile-count.h tree.h tree-core.h coretypes.h insn-modes.h signop.h wide-int.h wide-int-print.h insn-modes-inline.h machmode.h mode-classes.def double-int.h align.h poly-int.h poly-int-types.h all-tree.def tree.def c-family/c-common.def ada-tree.def cp-tree.def d-tree.def m2-tree.def objc-tree.def builtins.def sync-builtins.def omp-builtins.def gtm-builtins.def sanitizer.def line-map.h input.h statistics.h vec.h statistics.h ggc.h gtype-desc.h statistics.h treestruct.def hashtab.h alias.h symtab.h obstack.h flags.h flag-types.h options.h flag-types.h arm-opts.h aarch-common.h real.h fixed-value.h tree-check.h tree-ssa-operands.h tree-ssa-alias.h internal-fn.h internal-fn.def hashtab.h hash-table.h ggc.h gtype-desc.h statistics.h is-a.h alloc-pool.h
+BITMAP_H=bitmap.h hashtab.h statistics.h
+GCC_PLUGIN_H=gcc-plugin.h highlev-plugin-common.h plugin.def config.h auto-host.h ansidecl.h system.h hwint.h libiberty.h safe-ctype.h filenames.h hashtab.h hashtab.h
+PLUGIN_H=plugin.h gcc-plugin.h highlev-plugin-common.h plugin.def config.h auto-host.h ansidecl.h system.h hwint.h libiberty.h safe-ctype.h filenames.h hashtab.h hashtab.h
+PLUGIN_VERSION_H=plugin-version.h configargs.h
+CONTEXT_H=context.h
+GENSUPPORT_H=gensupport.h read-md.h optabs.def
+RTL_SSA_H=pretty-print.h line-map.h input.h obstack.h wide-int-print.h insn-config.h splay-tree-utils.h recog.h regs.h hard-reg-set.h function-abi.h obstack-utils.h mux-utils.h rtlanal.h memmodel.h emit-rtl.h rtl-ssa/accesses.h rtl-ssa/insns.h rtl-ssa/blocks.h rtl-ssa/changes.h rtl-ssa/functions.h rtl-ssa/is-a.inl rtl-ssa/access-utils.h rtl-ssa/insn-utils.h rtl-ssa/movement.h rtl-ssa/change-utils.h rtl-ssa/member-fns.inl
+GTFILES_H=gt-coverage.h gt-symtab-thunks.h gt-caller-save.h gt-symtab.h gt-alias.h gt-bitmap.h gt-cselib.h gt-cgraph.h gt-ipa-prop.h gt-ipa-cp.h gt-ipa-sra.h gt-ipa-modref.h gt-diagnostic-spec.h gt-dwarf2asm.h gt-dwarf2cfi.h gt-dwarf2ctf.h gt-dwarf2out.h gt-ctfout.h gt-btfout.h gt-tree-vect-generic.h gt-gimple-isel.h gt-dojump.h gt-emit-rtl.h gt-explow.h gt-expr.h gt-function.h gt-except.h gt-ggc-tests.h gt-gcse.h gt-godump.h gt-lists.h gt-optabs-libfuncs.h gt-profile.h gt-mcf.h gt-reg-stack.h gt-cfgrtl.h gt-stor-layout.h gt-stringpool.h gt-tree.h gt-varasm.h gt-tree-ssanames.h gt-tree-eh.h gt-tree-ssa-address.h gt-tree-cfg.h gt-tree-ssa-loop-ivopts.h gt-tree-dfa.h gt-tree-iterator.h gt-gimple-expr.h gt-tree-scalar-evolution.h gt-tree-profile.h gt-tree-nested.h gt-omp-general.h gt-omp-low.h gt-targhooks.h gt-arm.h gt-passes.h gt-cgraphclones.h gt-tree-phinodes.h gt-trans-mem.h gt-vtable-verify.h gt-asan.h gt-ubsan.h gt-tsan.h gt-sanopt.h gt-sancov.h gt-ipa-devirt.h gt-calls.h gt-analyzer-analyzer-language.h gt-arm-builtins.h gt-arm-mve-builtins.h gt-ada-decl.h gt-ada-trans.h gt-ada-utils.h gt-ada-misc.h gt-c-c-lang.h gt-c-c-decl.h gt-c-family-c-common.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-c-family-c-format.h gt-c-c-objc-common.h gt-c-c-parser.h gt-c-family-c-common.h gt-c-family-c-format.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-cp-call.h gt-cp-class.h gt-cp-constexpr.h gt-cp-contracts.h gt-cp-constraint.h gt-cp-coroutines.h gt-cp-cp-gimplify.h gt-cp-cp-lang.h gt-cp-cp-objcp-common.h gt-cp-decl.h gt-cp-decl2.h gt-cp-except.h gt-cp-friend.h gt-cp-init.h gt-cp-lambda.h gt-cp-lex.h gt-cp-logic.h gt-cp-mangle.h gt-cp-method.h gt-cp-module.h gt-cp-name-lookup.h gt-cp-parser.h gt-cp-pt.h gt-cp-rtti.h gt-cp-semantics.h gt-cp-tree.h gt-cp-typeck2.h gt-cp-vtable-class-hierarchy.h gt-d-d-builtins.h gt-d-d-lang.h gt-d-typeinfo.h gt-fortran-f95-lang.h gt-fortran-trans-decl.h gt-fortran-trans-intrinsic.h gt-fortran-trans-io.h gt-fortran-trans-stmt.h gt-fortran-trans-types.h gt-go-go-lang.h gt-jit-dummy-frontend.h gt-lto-lto-lang.h gt-lto-lto.h gt-lto-lto-common.h gt-lto-lto-dump.h gt-m2-gm2-lang.h gt-m2-rtegraph.h gt-m2-m2block.h gt-m2-m2builtins.h gt-m2-m2decl.h gt-m2-m2except.h gt-m2-m2expr.h gt-m2-m2statement.h gt-m2-m2type.h gt-objc-objc-act.h gt-objc-objc-runtime-shared-support.h gt-objc-objc-gnu-runtime-abi-01.h gt-objc-objc-next-runtime-abi-01.h gt-objc-objc-next-runtime-abi-02.h gt-c-c-parser.h gt-c-c-decl.h gt-c-c-objc-common.h gt-c-family-c-common.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-c-family-c-format.h gt-c-family-c-common.h gt-c-family-c-format.h gt-c-family-c-cppbuiltin.h gt-c-family-c-pragma.h gt-cp-call.h gt-cp-class.h gt-cp-constexpr.h gt-cp-contracts.h gt-cp-constraint.h gt-cp-coroutines.h gt-cp-cp-gimplify.h gt-objcp-objcp-lang.h gt-cp-cp-objcp-common.h gt-cp-decl.h gt-cp-decl2.h gt-cp-except.h gt-cp-friend.h gt-cp-init.h gt-cp-lambda.h gt-cp-lex.h gt-cp-logic.h gt-cp-mangle.h gt-cp-method.h gt-cp-module.h gt-cp-name-lookup.h gt-cp-parser.h gt-cp-pt.h gt-cp-rtti.h gt-cp-semantics.h gt-cp-tree.h gt-cp-typeck2.h gt-cp-vtable-class-hierarchy.h gt-objc-objc-act.h gt-objc-objc-gnu-runtime-abi-01.h gt-objc-objc-next-runtime-abi-01.h gt-objc-objc-next-runtime-abi-02.h gt-objc-objc-runtime-shared-support.h gt-rust-rust-lang.h
+GTFILES_LANG_H=gtype-ada.h gtype-c.h gtype-cp.h gtype-d.h gtype-fortran.h gtype-go.h gtype-jit.h gtype-lto.h gtype-m2.h gtype-objc.h gtype-objcp.h gtype-rust.h
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/backend.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/backend.h
new file mode 100644
index 0000000..42c6b76
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/backend.h
@@ -0,0 +1,35 @@
+/* Common Backend requirements.
+
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_BACKEND_H
+#define GCC_BACKEND_H
+
+/* This is an aggregation header file. This means it should contain only
+ other include files. */
+
+#include "tm.h"
+#include "function.h"
+#include "bitmap.h"
+#include "sbitmap.h"
+#include "basic-block.h"
+#include "cfg.h"
+
+#endif /*GCC_BACKEND_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/basic-block.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/basic-block.h
new file mode 100644
index 0000000..29191e5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/basic-block.h
@@ -0,0 +1,642 @@
+/* Define control flow data structures for the CFG.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_BASIC_BLOCK_H
+#define GCC_BASIC_BLOCK_H
+
+#include <profile-count.h>
+
+/* Control flow edge information. */
+class GTY((user)) edge_def {
+public:
+ /* The two blocks at the ends of the edge. */
+ basic_block src;
+ basic_block dest;
+
+ /* Instructions queued on the edge. */
+ union edge_def_insns {
+ gimple_seq g;
+ rtx_insn *r;
+ } insns;
+
+ /* Auxiliary info specific to a pass. */
+ void *aux;
+
+ /* Location of any goto implicit in the edge. */
+ location_t goto_locus;
+
+ /* The index number corresponding to this edge in the edge vector
+ dest->preds. */
+ unsigned int dest_idx;
+
+ int flags; /* see cfg-flags.def */
+ profile_probability probability;
+
+ /* Return count of edge E. */
+ inline profile_count count () const;
+};
+
+/* Masks for edge.flags. */
+#define DEF_EDGE_FLAG(NAME,IDX) EDGE_##NAME = 1 << IDX ,
+enum cfg_edge_flags {
+#include "cfg-flags.def"
+ LAST_CFG_EDGE_FLAG /* this is only used for EDGE_ALL_FLAGS */
+};
+#undef DEF_EDGE_FLAG
+
+/* Bit mask for all edge flags. */
+#define EDGE_ALL_FLAGS ((LAST_CFG_EDGE_FLAG - 1) * 2 - 1)
+
+/* The following four flags all indicate something special about an edge.
+ Test the edge flags on EDGE_COMPLEX to detect all forms of "strange"
+ control flow transfers. */
+#define EDGE_COMPLEX \
+ (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE)
+
+struct GTY(()) rtl_bb_info {
+ /* The first insn of the block is embedded into bb->il.x. */
+ /* The last insn of the block. */
+ rtx_insn *end_;
+
+ /* In CFGlayout mode points to insn notes/jumptables to be placed just before
+ and after the block. */
+ rtx_insn *header_;
+ rtx_insn *footer_;
+};
+
+struct GTY(()) gimple_bb_info {
+ /* Sequence of statements in this block. */
+ gimple_seq seq;
+
+ /* PHI nodes for this block. */
+ gimple_seq phi_nodes;
+};
+
+/* A basic block is a sequence of instructions with only one entry and
+ only one exit. If any one of the instructions are executed, they
+ will all be executed, and in sequence from first to last.
+
+ There may be COND_EXEC instructions in the basic block. The
+ COND_EXEC *instructions* will be executed -- but if the condition
+ is false the conditionally executed *expressions* will of course
+ not be executed. We don't consider the conditionally executed
+ expression (which might have side-effects) to be in a separate
+ basic block because the program counter will always be at the same
+ location after the COND_EXEC instruction, regardless of whether the
+ condition is true or not.
+
+ Basic blocks need not start with a label nor end with a jump insn.
+ For example, a previous basic block may just "conditionally fall"
+ into the succeeding basic block, and the last basic block need not
+ end with a jump insn. Block 0 is a descendant of the entry block.
+
+ A basic block beginning with two labels cannot have notes between
+ the labels.
+
+ Data for jump tables are stored in jump_insns that occur in no
+ basic block even though these insns can follow or precede insns in
+ basic blocks. */
+
+/* Basic block information indexed by block number. */
+struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_def {
+ /* The edges into and out of the block. */
+ vec<edge, va_gc> *preds;
+ vec<edge, va_gc> *succs;
+
+ /* Auxiliary info specific to a pass. */
+ void *GTY ((skip (""))) aux;
+
+ /* Innermost loop containing the block. */
+ class loop *loop_father;
+
+ /* The dominance and postdominance information node. */
+ struct et_node * GTY ((skip (""))) dom[2];
+
+ /* Previous and next blocks in the chain. */
+ basic_block prev_bb;
+ basic_block next_bb;
+
+ union basic_block_il_dependent {
+ struct gimple_bb_info GTY ((tag ("0"))) gimple;
+ struct {
+ rtx_insn *head_;
+ struct rtl_bb_info * rtl;
+ } GTY ((tag ("1"))) x;
+ } GTY ((desc ("((%1.flags & BB_RTL) != 0)"))) il;
+
+ /* Various flags. See cfg-flags.def. */
+ int flags;
+
+ /* The index of this block. */
+ int index;
+
+ /* Expected number of executions: calculated in profile.cc. */
+ profile_count count;
+};
+
+/* This ensures that struct gimple_bb_info is smaller than
+ struct rtl_bb_info, so that inlining the former into basic_block_def
+ is the better choice. */
+STATIC_ASSERT (sizeof (rtl_bb_info) >= sizeof (gimple_bb_info));
+
+#define BB_FREQ_MAX 10000
+
+/* Masks for basic_block.flags. */
+#define DEF_BASIC_BLOCK_FLAG(NAME,IDX) BB_##NAME = 1 << IDX ,
+enum cfg_bb_flags
+{
+#include "cfg-flags.def"
+ LAST_CFG_BB_FLAG /* this is only used for BB_ALL_FLAGS */
+};
+#undef DEF_BASIC_BLOCK_FLAG
+
+/* Bit mask for all basic block flags. */
+#define BB_ALL_FLAGS ((LAST_CFG_BB_FLAG - 1) * 2 - 1)
+
+/* Bit mask for all basic block flags that must be preserved. These are
+ the bit masks that are *not* cleared by clear_bb_flags. */
+#define BB_FLAGS_TO_PRESERVE \
+ (BB_DISABLE_SCHEDULE | BB_RTL | BB_NON_LOCAL_GOTO_TARGET \
+ | BB_HOT_PARTITION | BB_COLD_PARTITION)
+
+/* Dummy bitmask for convenience in the hot/cold partitioning code. */
+#define BB_UNPARTITIONED 0
+
+/* Partitions, to be used when partitioning hot and cold basic blocks into
+ separate sections. */
+#define BB_PARTITION(bb) ((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))
+#define BB_SET_PARTITION(bb, part) do { \
+ basic_block bb_ = (bb); \
+ bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) \
+ | (part)); \
+} while (0)
+
+#define BB_COPY_PARTITION(dstbb, srcbb) \
+ BB_SET_PARTITION (dstbb, BB_PARTITION (srcbb))
+
+/* Defines for accessing the fields of the CFG structure for function FN. */
+#define ENTRY_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_entry_block_ptr)
+#define EXIT_BLOCK_PTR_FOR_FN(FN) ((FN)->cfg->x_exit_block_ptr)
+#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info)
+#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
+#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
+#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block)
+#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map)
+#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status)
+
+#define BASIC_BLOCK_FOR_FN(FN,N) \
+ ((*basic_block_info_for_fn (FN))[(N)])
+#define SET_BASIC_BLOCK_FOR_FN(FN,N,BB) \
+ ((*basic_block_info_for_fn (FN))[(N)] = (BB))
+
+/* For iterating over basic blocks. */
+#define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \
+ for (BB = FROM; BB != TO; BB = BB->DIR)
+
+#define FOR_EACH_BB_FN(BB, FN) \
+ FOR_BB_BETWEEN (BB, (FN)->cfg->x_entry_block_ptr->next_bb, (FN)->cfg->x_exit_block_ptr, next_bb)
+
+#define FOR_EACH_BB_REVERSE_FN(BB, FN) \
+ FOR_BB_BETWEEN (BB, (FN)->cfg->x_exit_block_ptr->prev_bb, (FN)->cfg->x_entry_block_ptr, prev_bb)
+
+/* For iterating over insns in basic block. */
+#define FOR_BB_INSNS(BB, INSN) \
+ for ((INSN) = BB_HEAD (BB); \
+ (INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \
+ (INSN) = NEXT_INSN (INSN))
+
+/* For iterating over insns in basic block when we might remove the
+ current insn. */
+#define FOR_BB_INSNS_SAFE(BB, INSN, CURR) \
+ for ((INSN) = BB_HEAD (BB), (CURR) = (INSN) ? NEXT_INSN ((INSN)): NULL; \
+ (INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \
+ (INSN) = (CURR), (CURR) = (INSN) ? NEXT_INSN ((INSN)) : NULL)
+
+#define FOR_BB_INSNS_REVERSE(BB, INSN) \
+ for ((INSN) = BB_END (BB); \
+ (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \
+ (INSN) = PREV_INSN (INSN))
+
+#define FOR_BB_INSNS_REVERSE_SAFE(BB, INSN, CURR) \
+ for ((INSN) = BB_END (BB),(CURR) = (INSN) ? PREV_INSN ((INSN)) : NULL; \
+ (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \
+ (INSN) = (CURR), (CURR) = (INSN) ? PREV_INSN ((INSN)) : NULL)
+
+/* Cycles through _all_ basic blocks, even the fake ones (entry and
+ exit block). */
+
+#define FOR_ALL_BB_FN(BB, FN) \
+ for (BB = ENTRY_BLOCK_PTR_FOR_FN (FN); BB; BB = BB->next_bb)
+
+
+/* Stuff for recording basic block info. */
+
+/* For now, these will be functions (so that they can include checked casts
+ to rtx_insn. Once the underlying fields are converted from rtx
+ to rtx_insn, these can be converted back to macros. */
+
+#define BB_HEAD(B) (B)->il.x.head_
+#define BB_END(B) (B)->il.x.rtl->end_
+#define BB_HEADER(B) (B)->il.x.rtl->header_
+#define BB_FOOTER(B) (B)->il.x.rtl->footer_
+
+/* Special block numbers [markers] for entry and exit.
+ Neither of them is supposed to hold actual statements. */
+#define ENTRY_BLOCK (0)
+#define EXIT_BLOCK (1)
+
+/* The two blocks that are always in the cfg. */
+#define NUM_FIXED_BLOCKS (2)
+
+/* This is the value which indicates no edge is present. */
+#define EDGE_INDEX_NO_EDGE -1
+
+/* EDGE_INDEX returns an integer index for an edge, or EDGE_INDEX_NO_EDGE
+ if there is no edge between the 2 basic blocks. */
+#define EDGE_INDEX(el, pred, succ) (find_edge_index ((el), (pred), (succ)))
+
+/* INDEX_EDGE_PRED_BB and INDEX_EDGE_SUCC_BB return a pointer to the basic
+ block which is either the pred or succ end of the indexed edge. */
+#define INDEX_EDGE_PRED_BB(el, index) ((el)->index_to_edge[(index)]->src)
+#define INDEX_EDGE_SUCC_BB(el, index) ((el)->index_to_edge[(index)]->dest)
+
+/* INDEX_EDGE returns a pointer to the edge. */
+#define INDEX_EDGE(el, index) ((el)->index_to_edge[(index)])
+
+/* Number of edges in the compressed edge list. */
+#define NUM_EDGES(el) ((el)->num_edges)
+
+/* BB is assumed to contain conditional jump. Return the fallthru edge. */
+#define FALLTHRU_EDGE(bb) (EDGE_SUCC ((bb), 0)->flags & EDGE_FALLTHRU \
+ ? EDGE_SUCC ((bb), 0) : EDGE_SUCC ((bb), 1))
+
+/* BB is assumed to contain conditional jump. Return the branch edge. */
+#define BRANCH_EDGE(bb) (EDGE_SUCC ((bb), 0)->flags & EDGE_FALLTHRU \
+ ? EDGE_SUCC ((bb), 1) : EDGE_SUCC ((bb), 0))
+
+/* Return expected execution frequency of the edge E. */
+#define EDGE_FREQUENCY(e) e->count ().to_frequency (cfun)
+
+/* Compute a scale factor (or probability) suitable for scaling of
+ gcov_type values via apply_probability() and apply_scale(). */
+#define GCOV_COMPUTE_SCALE(num,den) \
+ ((den) ? RDIV ((num) * REG_BR_PROB_BASE, (den)) : REG_BR_PROB_BASE)
+
+/* Return nonzero if edge is critical. */
+#define EDGE_CRITICAL_P(e) (EDGE_COUNT ((e)->src->succs) >= 2 \
+ && EDGE_COUNT ((e)->dest->preds) >= 2)
+
+#define EDGE_COUNT(ev) vec_safe_length (ev)
+#define EDGE_I(ev,i) (*ev)[(i)]
+#define EDGE_PRED(bb,i) (*(bb)->preds)[(i)]
+#define EDGE_SUCC(bb,i) (*(bb)->succs)[(i)]
+
+/* Returns true if BB has precisely one successor. */
+
+inline bool
+single_succ_p (const_basic_block bb)
+{
+ return EDGE_COUNT (bb->succs) == 1;
+}
+
+/* Returns true if BB has precisely one predecessor. */
+
+inline bool
+single_pred_p (const_basic_block bb)
+{
+ return EDGE_COUNT (bb->preds) == 1;
+}
+
+/* Returns the single successor edge of basic block BB. Aborts if
+ BB does not have exactly one successor. */
+
+inline edge
+single_succ_edge (const_basic_block bb)
+{
+ gcc_checking_assert (single_succ_p (bb));
+ return EDGE_SUCC (bb, 0);
+}
+
+/* Returns the single predecessor edge of basic block BB. Aborts
+ if BB does not have exactly one predecessor. */
+
+inline edge
+single_pred_edge (const_basic_block bb)
+{
+ gcc_checking_assert (single_pred_p (bb));
+ return EDGE_PRED (bb, 0);
+}
+
+/* Returns the single successor block of basic block BB. Aborts
+ if BB does not have exactly one successor. */
+
+inline basic_block
+single_succ (const_basic_block bb)
+{
+ return single_succ_edge (bb)->dest;
+}
+
+/* Returns the single predecessor block of basic block BB. Aborts
+ if BB does not have exactly one predecessor.*/
+
+inline basic_block
+single_pred (const_basic_block bb)
+{
+ return single_pred_edge (bb)->src;
+}
+
+/* Iterator object for edges. */
+
+struct edge_iterator {
+ unsigned index;
+ vec<edge, va_gc> **container;
+};
+
+inline vec<edge, va_gc> *
+ei_container (edge_iterator i)
+{
+ gcc_checking_assert (i.container);
+ return *i.container;
+}
+
+#define ei_start(iter) ei_start_1 (&(iter))
+#define ei_last(iter) ei_last_1 (&(iter))
+
+/* Return an iterator pointing to the start of an edge vector. */
+inline edge_iterator
+ei_start_1 (vec<edge, va_gc> **ev)
+{
+ edge_iterator i;
+
+ i.index = 0;
+ i.container = ev;
+
+ return i;
+}
+
+/* Return an iterator pointing to the last element of an edge
+ vector. */
+inline edge_iterator
+ei_last_1 (vec<edge, va_gc> **ev)
+{
+ edge_iterator i;
+
+ i.index = EDGE_COUNT (*ev) - 1;
+ i.container = ev;
+
+ return i;
+}
+
+/* Is the iterator `i' at the end of the sequence? */
+inline bool
+ei_end_p (edge_iterator i)
+{
+ return (i.index == EDGE_COUNT (ei_container (i)));
+}
+
+/* Is the iterator `i' at one position before the end of the
+ sequence? */
+inline bool
+ei_one_before_end_p (edge_iterator i)
+{
+ return (i.index + 1 == EDGE_COUNT (ei_container (i)));
+}
+
+/* Advance the iterator to the next element. */
+inline void
+ei_next (edge_iterator *i)
+{
+ gcc_checking_assert (i->index < EDGE_COUNT (ei_container (*i)));
+ i->index++;
+}
+
+/* Move the iterator to the previous element. */
+inline void
+ei_prev (edge_iterator *i)
+{
+ gcc_checking_assert (i->index > 0);
+ i->index--;
+}
+
+/* Return the edge pointed to by the iterator `i'. */
+inline edge
+ei_edge (edge_iterator i)
+{
+ return EDGE_I (ei_container (i), i.index);
+}
+
+/* Return an edge pointed to by the iterator. Do it safely so that
+ NULL is returned when the iterator is pointing at the end of the
+ sequence. */
+inline edge
+ei_safe_edge (edge_iterator i)
+{
+ return !ei_end_p (i) ? ei_edge (i) : NULL;
+}
+
+/* Return 1 if we should continue to iterate. Return 0 otherwise.
+ *Edge P is set to the next edge if we are to continue to iterate
+ and NULL otherwise. */
+
+inline bool
+ei_cond (edge_iterator ei, edge *p)
+{
+ if (!ei_end_p (ei))
+ {
+ *p = ei_edge (ei);
+ return 1;
+ }
+ else
+ {
+ *p = NULL;
+ return 0;
+ }
+}
+
+/* This macro serves as a convenient way to iterate each edge in a
+ vector of predecessor or successor edges. It must not be used when
+ an element might be removed during the traversal, otherwise
+ elements will be missed. Instead, use a for-loop like that shown
+ in the following pseudo-code:
+
+ FOR (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ {
+ IF (e != taken_edge)
+ remove_edge (e);
+ ELSE
+ ei_next (&ei);
+ }
+*/
+
+#define FOR_EACH_EDGE(EDGE,ITER,EDGE_VEC) \
+ for ((ITER) = ei_start ((EDGE_VEC)); \
+ ei_cond ((ITER), &(EDGE)); \
+ ei_next (&(ITER)))
+
+#define CLEANUP_EXPENSIVE 1 /* Do relatively expensive optimizations
+ except for edge forwarding */
+#define CLEANUP_CROSSJUMP 2 /* Do crossjumping. */
+#define CLEANUP_POST_REGSTACK 4 /* We run after reg-stack and need
+ to care REG_DEAD notes. */
+#define CLEANUP_THREADING 8 /* Do jump threading. */
+#define CLEANUP_NO_INSN_DEL 16 /* Do not try to delete trivially dead
+ insns. */
+#define CLEANUP_CFGLAYOUT 32 /* Do cleanup in cfglayout mode. */
+#define CLEANUP_CFG_CHANGED 64 /* The caller changed the CFG. */
+#define CLEANUP_NO_PARTITIONING 128 /* Do not try to fix partitions. */
+#define CLEANUP_FORCE_FAST_DCE 0x100 /* Force run_fast_dce to be called
+ at least once. */
+
+/* Return true if BB is in a transaction. */
+
+inline bool
+bb_in_transaction (basic_block bb)
+{
+ return bb->flags & BB_IN_TRANSACTION;
+}
+
+/* Return true when one of the predecessor edges of BB is marked with EDGE_EH. */
+inline bool
+bb_has_eh_pred (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->flags & EDGE_EH)
+ return true;
+ }
+ return false;
+}
+
+/* Return true when one of the predecessor edges of BB is marked with EDGE_ABNORMAL. */
+inline bool
+bb_has_abnormal_pred (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->flags & EDGE_ABNORMAL)
+ return true;
+ }
+ return false;
+}
+
+/* Return the fallthru edge in EDGES if it exists, NULL otherwise. */
+inline edge
+find_fallthru_edge (vec<edge, va_gc> *edges)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, edges)
+ if (e->flags & EDGE_FALLTHRU)
+ break;
+
+ return e;
+}
+
+/* Check tha probability is sane. */
+
+inline void
+check_probability (int prob)
+{
+ gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
+}
+
+/* Given PROB1 and PROB2, return PROB1*PROB2/REG_BR_PROB_BASE.
+ Used to combine BB probabilities. */
+
+inline int
+combine_probabilities (int prob1, int prob2)
+{
+ check_probability (prob1);
+ check_probability (prob2);
+ return RDIV (prob1 * prob2, REG_BR_PROB_BASE);
+}
+
+/* Apply scale factor SCALE on frequency or count FREQ. Use this
+ interface when potentially scaling up, so that SCALE is not
+ constrained to be < REG_BR_PROB_BASE. */
+
+inline gcov_type
+apply_scale (gcov_type freq, gcov_type scale)
+{
+ return RDIV (freq * scale, REG_BR_PROB_BASE);
+}
+
+/* Apply probability PROB on frequency or count FREQ. */
+
+inline gcov_type
+apply_probability (gcov_type freq, int prob)
+{
+ check_probability (prob);
+ return apply_scale (freq, prob);
+}
+
+/* Return inverse probability for PROB. */
+
+inline int
+inverse_probability (int prob1)
+{
+ check_probability (prob1);
+ return REG_BR_PROB_BASE - prob1;
+}
+
+/* Return true if BB has at least one abnormal outgoing edge. */
+
+inline bool
+has_abnormal_or_eh_outgoing_edge_p (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
+ return true;
+
+ return false;
+}
+
+/* Return true when one of the predecessor edges of BB is marked with
+ EDGE_ABNORMAL_CALL or EDGE_EH. */
+
+inline bool
+has_abnormal_call_or_eh_pred_edge_p (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
+ return true;
+
+ return false;
+}
+
+/* Return count of edge E. */
+inline profile_count edge_def::count () const
+{
+ return src->count.apply_probability (probability);
+}
+
+#endif /* GCC_BASIC_BLOCK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bb-reorder.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bb-reorder.h
new file mode 100644
index 0000000..c24322d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bb-reorder.h
@@ -0,0 +1,40 @@
+/* Basic block reordering routines for the GNU compiler.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_BB_REORDER
+#define GCC_BB_REORDER
+
+/* Target-specific globals. */
+struct target_bb_reorder {
+ /* Length of unconditional jump instruction. */
+ int x_uncond_jump_length;
+};
+
+extern struct target_bb_reorder default_target_bb_reorder;
+#if SWITCHABLE_TARGET
+extern struct target_bb_reorder *this_target_bb_reorder;
+#else
+#define this_target_bb_reorder (&default_target_bb_reorder)
+#endif
+
+extern int get_uncond_jump_length (void);
+
+extern void insert_section_boundary_note (void);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bitmap.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bitmap.h
new file mode 100644
index 0000000..43337d2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bitmap.h
@@ -0,0 +1,1089 @@
+/* Functions to support general ended bitmaps.
+ Copyright (C) 1997-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_BITMAP_H
+#define GCC_BITMAP_H
+
+/* Implementation of sparse integer sets as a linked list or tree.
+
+ This sparse set representation is suitable for sparse sets with an
+ unknown (a priori) universe.
+
+ Sets are represented as double-linked lists of container nodes of
+ type "struct bitmap_element" or as a binary trees of the same
+ container nodes. Each container node consists of an index for the
+ first member that could be held in the container, a small array of
+ integers that represent the members in the container, and pointers
+ to the next and previous element in the linked list, or left and
+ right children in the tree. In linked-list form, the container
+ nodes in the list are sorted in ascending order, i.e. the head of
+ the list holds the element with the smallest member of the set.
+ In tree form, nodes to the left have a smaller container index.
+
+ For a given member I in the set:
+ - the element for I will have index is I / (bits per element)
+ - the position for I within element is I % (bits per element)
+
+ This representation is very space-efficient for large sparse sets, and
+ the size of the set can be changed dynamically without much overhead.
+ An important parameter is the number of bits per element. In this
+ implementation, there are 128 bits per element. This results in a
+ high storage overhead *per element*, but a small overall overhead if
+ the set is very sparse.
+
+ The storage requirements for linked-list sparse sets are O(E), with E->N
+ in the worst case (a sparse set with large distances between the values
+ of the set members).
+
+ This representation also works well for data flow problems where the size
+ of the set may grow dynamically, but care must be taken that the member_p,
+ add_member, and remove_member operations occur with a suitable access
+ pattern.
+
+ The linked-list set representation works well for problems involving very
+ sparse sets. The canonical example in GCC is, of course, the "set of
+ sets" for some CFG-based data flow problems (liveness analysis, dominance
+ frontiers, etc.).
+
+ For random-access sparse sets of unknown universe, the binary tree
+ representation is likely to be a more suitable choice. Theoretical
+ access times for the binary tree representation are better than those
+ for the linked-list, but in practice this is only true for truely
+ random access.
+
+ Often the most suitable representation during construction of the set
+ is not the best choice for the usage of the set. For such cases, the
+ "view" of the set can be changed from one representation to the other.
+ This is an O(E) operation:
+
+ * from list to tree view : bitmap_tree_view
+ * from tree to list view : bitmap_list_view
+
+ Traversing linked lists or trees can be cache-unfriendly. Performance
+ can be improved by keeping container nodes in the set grouped together
+ in memory, using a dedicated obstack for a set (or group of related
+ sets). Elements allocated on obstacks are released to a free-list and
+ taken off the free list. If multiple sets are allocated on the same
+ obstack, elements freed from one set may be re-used for one of the other
+ sets. This usually helps avoid cache misses.
+
+ A single free-list is used for all sets allocated in GGC space. This is
+ bad for persistent sets, so persistent sets should be allocated on an
+ obstack whenever possible.
+
+ For random-access sets with a known, relatively small universe size, the
+ SparseSet or simple bitmap representations may be more efficient than a
+ linked-list set.
+
+
+ LINKED LIST FORM
+ ================
+
+ In linked-list form, in-order iterations of the set can be executed
+ efficiently. The downside is that many random-access operations are
+ relatively slow, because the linked list has to be traversed to test
+ membership (i.e. member_p/ add_member/remove_member).
+
+ To improve the performance of this set representation, the last
+ accessed element and its index are cached. For membership tests on
+ members close to recently accessed members, the cached last element
+ improves membership test to a constant-time operation.
+
+ The following operations can always be performed in O(1) time in
+ list view:
+
+ * clear : bitmap_clear
+ * smallest_member : bitmap_first_set_bit
+ * choose_one : (not implemented, but could be
+ in constant time)
+
+ The following operations can be performed in O(E) time worst-case in
+ list view (with E the number of elements in the linked list), but in
+ O(1) time with a suitable access patterns:
+
+ * member_p : bitmap_bit_p
+ * add_member : bitmap_set_bit / bitmap_set_range
+ * remove_member : bitmap_clear_bit / bitmap_clear_range
+
+ The following operations can be performed in O(E) time in list view:
+
+ * cardinality : bitmap_count_bits
+ * largest_member : bitmap_last_set_bit (but this could
+ in constant time with a pointer to
+ the last element in the chain)
+ * set_size : bitmap_last_set_bit
+
+ In tree view the following operations can all be performed in O(log E)
+ amortized time with O(E) worst-case behavior.
+
+ * smallest_member
+ * largest_member
+ * set_size
+ * member_p
+ * add_member
+ * remove_member
+
+ Additionally, the linked-list sparse set representation supports
+ enumeration of the members in O(E) time:
+
+ * forall : EXECUTE_IF_SET_IN_BITMAP
+ * set_copy : bitmap_copy
+ * set_intersection : bitmap_intersect_p /
+ bitmap_and / bitmap_and_into /
+ EXECUTE_IF_AND_IN_BITMAP
+ * set_union : bitmap_ior / bitmap_ior_into
+ * set_difference : bitmap_intersect_compl_p /
+ bitmap_and_comp / bitmap_and_comp_into /
+ EXECUTE_IF_AND_COMPL_IN_BITMAP
+ * set_disjuction : bitmap_xor_comp / bitmap_xor_comp_into
+ * set_compare : bitmap_equal_p
+
+ Some operations on 3 sets that occur frequently in data flow problems
+ are also implemented:
+
+ * A | (B & C) : bitmap_ior_and_into
+ * A | (B & ~C) : bitmap_ior_and_compl /
+ bitmap_ior_and_compl_into
+
+
+ BINARY TREE FORM
+ ================
+ An alternate "view" of a bitmap is its binary tree representation.
+ For this representation, splay trees are used because they can be
+ implemented using the same data structures as the linked list, with
+ no overhead for meta-data (like color, or rank) on the tree nodes.
+
+ In binary tree form, random-access to the set is much more efficient
+ than for the linked-list representation. Downsides are the high cost
+ of clearing the set, and the relatively large number of operations
+ necessary to balance the tree. Also, iterating the set members is
+ not supported.
+
+ As for the linked-list representation, the last accessed element and
+ its index are cached, so that membership tests on the latest accessed
+ members is a constant-time operation. Other lookups take O(logE)
+ time amortized (but O(E) time worst-case).
+
+ The following operations can always be performed in O(1) time:
+
+ * choose_one : (not implemented, but could be
+ implemented in constant time)
+
+ The following operations can be performed in O(logE) time amortized
+ but O(E) time worst-case, but in O(1) time if the same element is
+ accessed.
+
+ * member_p : bitmap_bit_p
+ * add_member : bitmap_set_bit
+ * remove_member : bitmap_clear_bit
+
+ The following operations can be performed in O(logE) time amortized
+ but O(E) time worst-case:
+
+ * smallest_member : bitmap_first_set_bit
+ * largest_member : bitmap_last_set_bit
+ * set_size : bitmap_last_set_bit
+
+ The following operations can be performed in O(E) time:
+
+ * clear : bitmap_clear
+
+ The binary tree sparse set representation does *not* support any form
+ of enumeration, and does also *not* support logical operations on sets.
+ The binary tree representation is only supposed to be used for sets
+ on which many random-access membership tests will happen. */
+
+#include "obstack.h"
+#include "array-traits.h"
+
+/* Bitmap memory usage. */
+class bitmap_usage: public mem_usage
+{
+public:
+ /* Default contructor. */
+ bitmap_usage (): m_nsearches (0), m_search_iter (0) {}
+ /* Constructor. */
+ bitmap_usage (size_t allocated, size_t times, size_t peak,
+ uint64_t nsearches, uint64_t search_iter)
+ : mem_usage (allocated, times, peak),
+ m_nsearches (nsearches), m_search_iter (search_iter) {}
+
+ /* Sum the usage with SECOND usage. */
+ bitmap_usage
+ operator+ (const bitmap_usage &second)
+ {
+ return bitmap_usage (m_allocated + second.m_allocated,
+ m_times + second.m_times,
+ m_peak + second.m_peak,
+ m_nsearches + second.m_nsearches,
+ m_search_iter + second.m_search_iter);
+ }
+
+ /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
+ inline void
+ dump (mem_location *loc, const mem_usage &total) const
+ {
+ char *location_string = loc->to_string ();
+
+ fprintf (stderr, "%-48s " PRsa (9) ":%5.1f%%"
+ PRsa (9) PRsa (9) ":%5.1f%%"
+ PRsa (11) PRsa (11) "%10s\n",
+ location_string, SIZE_AMOUNT (m_allocated),
+ get_percent (m_allocated, total.m_allocated),
+ SIZE_AMOUNT (m_peak), SIZE_AMOUNT (m_times),
+ get_percent (m_times, total.m_times),
+ SIZE_AMOUNT (m_nsearches), SIZE_AMOUNT (m_search_iter),
+ loc->m_ggc ? "ggc" : "heap");
+
+ free (location_string);
+ }
+
+ /* Dump header with NAME. */
+ static inline void
+ dump_header (const char *name)
+ {
+ fprintf (stderr, "%-48s %11s%16s%17s%12s%12s%10s\n", name, "Leak", "Peak",
+ "Times", "N searches", "Search iter", "Type");
+ }
+
+ /* Number search operations. */
+ uint64_t m_nsearches;
+ /* Number of search iterations. */
+ uint64_t m_search_iter;
+};
+
+/* Bitmap memory description. */
+extern mem_alloc_description<bitmap_usage> bitmap_mem_desc;
+
+/* Fundamental storage type for bitmap. */
+
+typedef unsigned long BITMAP_WORD;
+/* BITMAP_WORD_BITS needs to be unsigned, but cannot contain casts as
+ it is used in preprocessor directives -- hence the 1u. */
+#define BITMAP_WORD_BITS (CHAR_BIT * SIZEOF_LONG * 1u)
+
+/* Number of words to use for each element in the linked list. */
+
+#ifndef BITMAP_ELEMENT_WORDS
+#define BITMAP_ELEMENT_WORDS ((128 + BITMAP_WORD_BITS - 1) / BITMAP_WORD_BITS)
+#endif
+
+/* Number of bits in each actual element of a bitmap. */
+
+#define BITMAP_ELEMENT_ALL_BITS (BITMAP_ELEMENT_WORDS * BITMAP_WORD_BITS)
+
+/* Obstack for allocating bitmaps and elements from. */
+struct bitmap_obstack {
+ struct bitmap_element *elements;
+ bitmap_head *heads;
+ struct obstack obstack;
+};
+
+/* Bitmap set element. We use a linked list to hold only the bits that
+ are set. This allows for use to grow the bitset dynamically without
+ having to realloc and copy a giant bit array.
+
+ The free list is implemented as a list of lists. There is one
+ outer list connected together by prev fields. Each element of that
+ outer is an inner list (that may consist only of the outer list
+ element) that are connected by the next fields. The prev pointer
+ is undefined for interior elements. This allows
+ bitmap_elt_clear_from to be implemented in unit time rather than
+ linear in the number of elements to be freed. */
+
+struct GTY((chain_next ("%h.next"))) bitmap_element {
+ /* In list form, the next element in the linked list;
+ in tree form, the left child node in the tree. */
+ struct bitmap_element *next;
+ /* In list form, the previous element in the linked list;
+ in tree form, the right child node in the tree. */
+ struct bitmap_element *prev;
+ /* regno/BITMAP_ELEMENT_ALL_BITS. */
+ unsigned int indx;
+ /* Bits that are set, counting from INDX, inclusive */
+ BITMAP_WORD bits[BITMAP_ELEMENT_WORDS];
+};
+
+/* Head of bitmap linked list. The 'current' member points to something
+ already pointed to by the chain started by first, so GTY((skip)) it. */
+
+class GTY(()) bitmap_head {
+public:
+ static bitmap_obstack crashme;
+ /* Poison obstack to not make it not a valid initialized GC bitmap. */
+ CONSTEXPR bitmap_head()
+ : indx (0), tree_form (false), padding (0), alloc_descriptor (0), first (NULL),
+ current (NULL), obstack (&crashme)
+ {}
+ /* Index of last element looked at. */
+ unsigned int indx;
+ /* False if the bitmap is in list form; true if the bitmap is in tree form.
+ Bitmap iterators only work on bitmaps in list form. */
+ unsigned tree_form: 1;
+ /* Next integer is shifted, so padding is needed. */
+ unsigned padding: 2;
+ /* Bitmap UID used for memory allocation statistics. */
+ unsigned alloc_descriptor: 29;
+ /* In list form, the first element in the linked list;
+ in tree form, the root of the tree. */
+ bitmap_element *first;
+ /* Last element looked at. */
+ bitmap_element * GTY((skip(""))) current;
+ /* Obstack to allocate elements from. If NULL, then use GGC allocation. */
+ bitmap_obstack * GTY((skip(""))) obstack;
+
+ /* Dump bitmap. */
+ void dump ();
+
+ /* Get bitmap descriptor UID casted to an unsigned integer pointer.
+ Shift the descriptor because pointer_hash<Type>::hash is
+ doing >> 3 shift operation. */
+ unsigned *get_descriptor ()
+ {
+ return (unsigned *)(ptrdiff_t)(alloc_descriptor << 3);
+ }
+};
+
+/* Global data */
+extern bitmap_element bitmap_zero_bits; /* Zero bitmap element */
+extern bitmap_obstack bitmap_default_obstack; /* Default bitmap obstack */
+
+/* Change the view of the bitmap to list, or tree. */
+void bitmap_list_view (bitmap);
+void bitmap_tree_view (bitmap);
+
+/* Clear a bitmap by freeing up the linked list. */
+extern void bitmap_clear (bitmap);
+
+/* Copy a bitmap to another bitmap. */
+extern void bitmap_copy (bitmap, const_bitmap);
+
+/* Move a bitmap to another bitmap. */
+extern void bitmap_move (bitmap, bitmap);
+
+/* True if two bitmaps are identical. */
+extern bool bitmap_equal_p (const_bitmap, const_bitmap);
+
+/* True if the bitmaps intersect (their AND is non-empty). */
+extern bool bitmap_intersect_p (const_bitmap, const_bitmap);
+
+/* True if the complement of the second intersects the first (their
+ AND_COMPL is non-empty). */
+extern bool bitmap_intersect_compl_p (const_bitmap, const_bitmap);
+
+/* True if MAP is an empty bitmap. */
+inline bool bitmap_empty_p (const_bitmap map)
+{
+ return !map->first;
+}
+
+/* True if the bitmap has only a single bit set. */
+extern bool bitmap_single_bit_set_p (const_bitmap);
+
+/* Count the number of bits set in the bitmap. */
+extern unsigned long bitmap_count_bits (const_bitmap);
+
+/* Count the number of unique bits set across the two bitmaps. */
+extern unsigned long bitmap_count_unique_bits (const_bitmap, const_bitmap);
+
+/* Boolean operations on bitmaps. The _into variants are two operand
+ versions that modify the first source operand. The other variants
+ are three operand versions that to not destroy the source bitmaps.
+ The operations supported are &, & ~, |, ^. */
+extern void bitmap_and (bitmap, const_bitmap, const_bitmap);
+extern bool bitmap_and_into (bitmap, const_bitmap);
+extern bool bitmap_and_compl (bitmap, const_bitmap, const_bitmap);
+extern bool bitmap_and_compl_into (bitmap, const_bitmap);
+#define bitmap_compl_and(DST, A, B) bitmap_and_compl (DST, B, A)
+extern void bitmap_compl_and_into (bitmap, const_bitmap);
+extern void bitmap_clear_range (bitmap, unsigned int, unsigned int);
+extern void bitmap_set_range (bitmap, unsigned int, unsigned int);
+extern bool bitmap_ior (bitmap, const_bitmap, const_bitmap);
+extern bool bitmap_ior_into (bitmap, const_bitmap);
+extern bool bitmap_ior_into_and_free (bitmap, bitmap *);
+extern void bitmap_xor (bitmap, const_bitmap, const_bitmap);
+extern void bitmap_xor_into (bitmap, const_bitmap);
+
+/* DST = A | (B & C). Return true if DST changes. */
+extern bool bitmap_ior_and_into (bitmap DST, const_bitmap B, const_bitmap C);
+/* DST = A | (B & ~C). Return true if DST changes. */
+extern bool bitmap_ior_and_compl (bitmap DST, const_bitmap A,
+ const_bitmap B, const_bitmap C);
+/* A |= (B & ~C). Return true if A changes. */
+extern bool bitmap_ior_and_compl_into (bitmap A,
+ const_bitmap B, const_bitmap C);
+
+/* Clear a single bit in a bitmap. Return true if the bit changed. */
+extern bool bitmap_clear_bit (bitmap, int);
+
+/* Set a single bit in a bitmap. Return true if the bit changed. */
+extern bool bitmap_set_bit (bitmap, int);
+
+/* Return true if a bit is set in a bitmap. */
+extern bool bitmap_bit_p (const_bitmap, int);
+
+/* Set and get multiple bit values in a sparse bitmap. This allows a bitmap to
+ function as a sparse array of bit patterns where the patterns are
+ multiples of power of 2. This is more efficient than performing this as
+ multiple individual operations. */
+void bitmap_set_aligned_chunk (bitmap, unsigned int, unsigned int, BITMAP_WORD);
+BITMAP_WORD bitmap_get_aligned_chunk (const_bitmap, unsigned int, unsigned int);
+
+/* Debug functions to print a bitmap. */
+extern void debug_bitmap (const_bitmap);
+extern void debug_bitmap_file (FILE *, const_bitmap);
+
+/* Print a bitmap. */
+extern void bitmap_print (FILE *, const_bitmap, const char *, const char *);
+
+/* Initialize and release a bitmap obstack. */
+extern void bitmap_obstack_initialize (bitmap_obstack *);
+extern void bitmap_obstack_release (bitmap_obstack *);
+extern void bitmap_register (bitmap MEM_STAT_DECL);
+extern void dump_bitmap_statistics (void);
+
+/* Initialize a bitmap header. OBSTACK indicates the bitmap obstack
+ to allocate from, NULL for GC'd bitmap. */
+
+inline void
+bitmap_initialize (bitmap head, bitmap_obstack *obstack CXX_MEM_STAT_INFO)
+{
+ head->first = head->current = NULL;
+ head->indx = head->tree_form = 0;
+ head->padding = 0;
+ head->alloc_descriptor = 0;
+ head->obstack = obstack;
+ if (GATHER_STATISTICS)
+ bitmap_register (head PASS_MEM_STAT);
+}
+
+/* Release a bitmap (but not its head). This is suitable for pairing with
+ bitmap_initialize. */
+
+inline void
+bitmap_release (bitmap head)
+{
+ bitmap_clear (head);
+ /* Poison the obstack pointer so the obstack can be safely released.
+ Do not zero it as the bitmap then becomes initialized GC. */
+ head->obstack = &bitmap_head::crashme;
+}
+
+/* Allocate and free bitmaps from obstack, malloc and gc'd memory. */
+extern bitmap bitmap_alloc (bitmap_obstack *obstack CXX_MEM_STAT_INFO);
+#define BITMAP_ALLOC bitmap_alloc
+extern bitmap bitmap_gc_alloc (ALONE_CXX_MEM_STAT_INFO);
+#define BITMAP_GGC_ALLOC bitmap_gc_alloc
+extern void bitmap_obstack_free (bitmap);
+
+/* A few compatibility/functions macros for compatibility with sbitmaps */
+inline void dump_bitmap (FILE *file, const_bitmap map)
+{
+ bitmap_print (file, map, "", "\n");
+}
+extern void debug (const bitmap_head &ref);
+extern void debug (const bitmap_head *ptr);
+
+extern unsigned bitmap_first_set_bit (const_bitmap);
+extern unsigned bitmap_last_set_bit (const_bitmap);
+
+/* Compute bitmap hash (for purposes of hashing etc.) */
+extern hashval_t bitmap_hash (const_bitmap);
+
+/* Do any cleanup needed on a bitmap when it is no longer used. */
+#define BITMAP_FREE(BITMAP) \
+ ((void) (bitmap_obstack_free ((bitmap) BITMAP), (BITMAP) = (bitmap) NULL))
+
+/* Iterator for bitmaps. */
+
+struct bitmap_iterator
+{
+ /* Pointer to the current bitmap element. */
+ bitmap_element *elt1;
+
+ /* Pointer to 2nd bitmap element when two are involved. */
+ bitmap_element *elt2;
+
+ /* Word within the current element. */
+ unsigned word_no;
+
+ /* Contents of the actually processed word. When finding next bit
+ it is shifted right, so that the actual bit is always the least
+ significant bit of ACTUAL. */
+ BITMAP_WORD bits;
+};
+
+/* Initialize a single bitmap iterator. START_BIT is the first bit to
+ iterate from. */
+
+inline void
+bmp_iter_set_init (bitmap_iterator *bi, const_bitmap map,
+ unsigned start_bit, unsigned *bit_no)
+{
+ bi->elt1 = map->first;
+ bi->elt2 = NULL;
+
+ gcc_checking_assert (!map->tree_form);
+
+ /* Advance elt1 until it is not before the block containing start_bit. */
+ while (1)
+ {
+ if (!bi->elt1)
+ {
+ bi->elt1 = &bitmap_zero_bits;
+ break;
+ }
+
+ if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS)
+ break;
+ bi->elt1 = bi->elt1->next;
+ }
+
+ /* We might have gone past the start bit, so reinitialize it. */
+ if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS)
+ start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
+
+ /* Initialize for what is now start_bit. */
+ bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+ bi->bits = bi->elt1->bits[bi->word_no];
+ bi->bits >>= start_bit % BITMAP_WORD_BITS;
+
+ /* If this word is zero, we must make sure we're not pointing at the
+ first bit, otherwise our incrementing to the next word boundary
+ will fail. It won't matter if this increment moves us into the
+ next word. */
+ start_bit += !bi->bits;
+
+ *bit_no = start_bit;
+}
+
+/* Initialize an iterator to iterate over the intersection of two
+ bitmaps. START_BIT is the bit to commence from. */
+
+inline void
+bmp_iter_and_init (bitmap_iterator *bi, const_bitmap map1, const_bitmap map2,
+ unsigned start_bit, unsigned *bit_no)
+{
+ bi->elt1 = map1->first;
+ bi->elt2 = map2->first;
+
+ gcc_checking_assert (!map1->tree_form && !map2->tree_form);
+
+ /* Advance elt1 until it is not before the block containing
+ start_bit. */
+ while (1)
+ {
+ if (!bi->elt1)
+ {
+ bi->elt2 = NULL;
+ break;
+ }
+
+ if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS)
+ break;
+ bi->elt1 = bi->elt1->next;
+ }
+
+ /* Advance elt2 until it is not before elt1. */
+ while (1)
+ {
+ if (!bi->elt2)
+ {
+ bi->elt1 = bi->elt2 = &bitmap_zero_bits;
+ break;
+ }
+
+ if (bi->elt2->indx >= bi->elt1->indx)
+ break;
+ bi->elt2 = bi->elt2->next;
+ }
+
+ /* If we're at the same index, then we have some intersecting bits. */
+ if (bi->elt1->indx == bi->elt2->indx)
+ {
+ /* We might have advanced beyond the start_bit, so reinitialize
+ for that. */
+ if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS)
+ start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
+
+ bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+ bi->bits = bi->elt1->bits[bi->word_no] & bi->elt2->bits[bi->word_no];
+ bi->bits >>= start_bit % BITMAP_WORD_BITS;
+ }
+ else
+ {
+ /* Otherwise we must immediately advance elt1, so initialize for
+ that. */
+ bi->word_no = BITMAP_ELEMENT_WORDS - 1;
+ bi->bits = 0;
+ }
+
+ /* If this word is zero, we must make sure we're not pointing at the
+ first bit, otherwise our incrementing to the next word boundary
+ will fail. It won't matter if this increment moves us into the
+ next word. */
+ start_bit += !bi->bits;
+
+ *bit_no = start_bit;
+}
+
+/* Initialize an iterator to iterate over the bits in MAP1 & ~MAP2. */
+
+inline void
+bmp_iter_and_compl_init (bitmap_iterator *bi,
+ const_bitmap map1, const_bitmap map2,
+ unsigned start_bit, unsigned *bit_no)
+{
+ bi->elt1 = map1->first;
+ bi->elt2 = map2->first;
+
+ gcc_checking_assert (!map1->tree_form && !map2->tree_form);
+
+ /* Advance elt1 until it is not before the block containing start_bit. */
+ while (1)
+ {
+ if (!bi->elt1)
+ {
+ bi->elt1 = &bitmap_zero_bits;
+ break;
+ }
+
+ if (bi->elt1->indx >= start_bit / BITMAP_ELEMENT_ALL_BITS)
+ break;
+ bi->elt1 = bi->elt1->next;
+ }
+
+ /* Advance elt2 until it is not before elt1. */
+ while (bi->elt2 && bi->elt2->indx < bi->elt1->indx)
+ bi->elt2 = bi->elt2->next;
+
+ /* We might have advanced beyond the start_bit, so reinitialize for
+ that. */
+ if (bi->elt1->indx != start_bit / BITMAP_ELEMENT_ALL_BITS)
+ start_bit = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
+
+ bi->word_no = start_bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+ bi->bits = bi->elt1->bits[bi->word_no];
+ if (bi->elt2 && bi->elt1->indx == bi->elt2->indx)
+ bi->bits &= ~bi->elt2->bits[bi->word_no];
+ bi->bits >>= start_bit % BITMAP_WORD_BITS;
+
+ /* If this word is zero, we must make sure we're not pointing at the
+ first bit, otherwise our incrementing to the next word boundary
+ will fail. It won't matter if this increment moves us into the
+ next word. */
+ start_bit += !bi->bits;
+
+ *bit_no = start_bit;
+}
+
+/* Advance to the next bit in BI. We don't advance to the next
+ nonzero bit yet. */
+
+inline void
+bmp_iter_next (bitmap_iterator *bi, unsigned *bit_no)
+{
+ bi->bits >>= 1;
+ *bit_no += 1;
+}
+
+/* Advance to first set bit in BI. */
+
+inline void
+bmp_iter_next_bit (bitmap_iterator * bi, unsigned *bit_no)
+{
+#if (GCC_VERSION >= 3004)
+ {
+ unsigned int n = __builtin_ctzl (bi->bits);
+ gcc_assert (sizeof (unsigned long) == sizeof (BITMAP_WORD));
+ bi->bits >>= n;
+ *bit_no += n;
+ }
+#else
+ while (!(bi->bits & 1))
+ {
+ bi->bits >>= 1;
+ *bit_no += 1;
+ }
+#endif
+}
+
+/* Advance to the next nonzero bit of a single bitmap, we will have
+ already advanced past the just iterated bit. Return true if there
+ is a bit to iterate. */
+
+inline bool
+bmp_iter_set (bitmap_iterator *bi, unsigned *bit_no)
+{
+ /* If our current word is nonzero, it contains the bit we want. */
+ if (bi->bits)
+ {
+ next_bit:
+ bmp_iter_next_bit (bi, bit_no);
+ return true;
+ }
+
+ /* Round up to the word boundary. We might have just iterated past
+ the end of the last word, hence the -1. It is not possible for
+ bit_no to point at the beginning of the now last word. */
+ *bit_no = ((*bit_no + BITMAP_WORD_BITS - 1)
+ / BITMAP_WORD_BITS * BITMAP_WORD_BITS);
+ bi->word_no++;
+
+ while (1)
+ {
+ /* Find the next nonzero word in this elt. */
+ while (bi->word_no != BITMAP_ELEMENT_WORDS)
+ {
+ bi->bits = bi->elt1->bits[bi->word_no];
+ if (bi->bits)
+ goto next_bit;
+ *bit_no += BITMAP_WORD_BITS;
+ bi->word_no++;
+ }
+
+ /* Make sure we didn't remove the element while iterating. */
+ gcc_checking_assert (bi->elt1->indx != -1U);
+
+ /* Advance to the next element. */
+ bi->elt1 = bi->elt1->next;
+ if (!bi->elt1)
+ return false;
+ *bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
+ bi->word_no = 0;
+ }
+}
+
+/* Advance to the next nonzero bit of an intersecting pair of
+ bitmaps. We will have already advanced past the just iterated bit.
+ Return true if there is a bit to iterate. */
+
+inline bool
+bmp_iter_and (bitmap_iterator *bi, unsigned *bit_no)
+{
+ /* If our current word is nonzero, it contains the bit we want. */
+ if (bi->bits)
+ {
+ next_bit:
+ bmp_iter_next_bit (bi, bit_no);
+ return true;
+ }
+
+ /* Round up to the word boundary. We might have just iterated past
+ the end of the last word, hence the -1. It is not possible for
+ bit_no to point at the beginning of the now last word. */
+ *bit_no = ((*bit_no + BITMAP_WORD_BITS - 1)
+ / BITMAP_WORD_BITS * BITMAP_WORD_BITS);
+ bi->word_no++;
+
+ while (1)
+ {
+ /* Find the next nonzero word in this elt. */
+ while (bi->word_no != BITMAP_ELEMENT_WORDS)
+ {
+ bi->bits = bi->elt1->bits[bi->word_no] & bi->elt2->bits[bi->word_no];
+ if (bi->bits)
+ goto next_bit;
+ *bit_no += BITMAP_WORD_BITS;
+ bi->word_no++;
+ }
+
+ /* Advance to the next identical element. */
+ do
+ {
+ /* Make sure we didn't remove the element while iterating. */
+ gcc_checking_assert (bi->elt1->indx != -1U);
+
+ /* Advance elt1 while it is less than elt2. We always want
+ to advance one elt. */
+ do
+ {
+ bi->elt1 = bi->elt1->next;
+ if (!bi->elt1)
+ return false;
+ }
+ while (bi->elt1->indx < bi->elt2->indx);
+
+ /* Make sure we didn't remove the element while iterating. */
+ gcc_checking_assert (bi->elt2->indx != -1U);
+
+ /* Advance elt2 to be no less than elt1. This might not
+ advance. */
+ while (bi->elt2->indx < bi->elt1->indx)
+ {
+ bi->elt2 = bi->elt2->next;
+ if (!bi->elt2)
+ return false;
+ }
+ }
+ while (bi->elt1->indx != bi->elt2->indx);
+
+ *bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
+ bi->word_no = 0;
+ }
+}
+
+/* Advance to the next nonzero bit in the intersection of
+ complemented bitmaps. We will have already advanced past the just
+ iterated bit. */
+
+inline bool
+bmp_iter_and_compl (bitmap_iterator *bi, unsigned *bit_no)
+{
+ /* If our current word is nonzero, it contains the bit we want. */
+ if (bi->bits)
+ {
+ next_bit:
+ bmp_iter_next_bit (bi, bit_no);
+ return true;
+ }
+
+ /* Round up to the word boundary. We might have just iterated past
+ the end of the last word, hence the -1. It is not possible for
+ bit_no to point at the beginning of the now last word. */
+ *bit_no = ((*bit_no + BITMAP_WORD_BITS - 1)
+ / BITMAP_WORD_BITS * BITMAP_WORD_BITS);
+ bi->word_no++;
+
+ while (1)
+ {
+ /* Find the next nonzero word in this elt. */
+ while (bi->word_no != BITMAP_ELEMENT_WORDS)
+ {
+ bi->bits = bi->elt1->bits[bi->word_no];
+ if (bi->elt2 && bi->elt2->indx == bi->elt1->indx)
+ bi->bits &= ~bi->elt2->bits[bi->word_no];
+ if (bi->bits)
+ goto next_bit;
+ *bit_no += BITMAP_WORD_BITS;
+ bi->word_no++;
+ }
+
+ /* Make sure we didn't remove the element while iterating. */
+ gcc_checking_assert (bi->elt1->indx != -1U);
+
+ /* Advance to the next element of elt1. */
+ bi->elt1 = bi->elt1->next;
+ if (!bi->elt1)
+ return false;
+
+ /* Make sure we didn't remove the element while iterating. */
+ gcc_checking_assert (! bi->elt2 || bi->elt2->indx != -1U);
+
+ /* Advance elt2 until it is no less than elt1. */
+ while (bi->elt2 && bi->elt2->indx < bi->elt1->indx)
+ bi->elt2 = bi->elt2->next;
+
+ *bit_no = bi->elt1->indx * BITMAP_ELEMENT_ALL_BITS;
+ bi->word_no = 0;
+ }
+}
+
+/* If you are modifying a bitmap you are currently iterating over you
+ have to ensure to
+ - never remove the current bit;
+ - if you set or clear a bit before the current bit this operation
+ will not affect the set of bits you are visiting during the iteration;
+ - if you set or clear a bit after the current bit it is unspecified
+ whether that affects the set of bits you are visiting during the
+ iteration.
+ If you want to remove the current bit you can delay this to the next
+ iteration (and after the iteration in case the last iteration is
+ affected). */
+
+/* Loop over all bits set in BITMAP, starting with MIN and setting
+ BITNUM to the bit number. ITER is a bitmap iterator. BITNUM
+ should be treated as a read-only variable as it contains loop
+ state. */
+
+#ifndef EXECUTE_IF_SET_IN_BITMAP
+/* See sbitmap.h for the other definition of EXECUTE_IF_SET_IN_BITMAP. */
+#define EXECUTE_IF_SET_IN_BITMAP(BITMAP, MIN, BITNUM, ITER) \
+ for (bmp_iter_set_init (&(ITER), (BITMAP), (MIN), &(BITNUM)); \
+ bmp_iter_set (&(ITER), &(BITNUM)); \
+ bmp_iter_next (&(ITER), &(BITNUM)))
+#endif
+
+/* Loop over all the bits set in BITMAP1 & BITMAP2, starting with MIN
+ and setting BITNUM to the bit number. ITER is a bitmap iterator.
+ BITNUM should be treated as a read-only variable as it contains
+ loop state. */
+
+#define EXECUTE_IF_AND_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, ITER) \
+ for (bmp_iter_and_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \
+ &(BITNUM)); \
+ bmp_iter_and (&(ITER), &(BITNUM)); \
+ bmp_iter_next (&(ITER), &(BITNUM)))
+
+/* Loop over all the bits set in BITMAP1 & ~BITMAP2, starting with MIN
+ and setting BITNUM to the bit number. ITER is a bitmap iterator.
+ BITNUM should be treated as a read-only variable as it contains
+ loop state. */
+
+#define EXECUTE_IF_AND_COMPL_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, ITER) \
+ for (bmp_iter_and_compl_init (&(ITER), (BITMAP1), (BITMAP2), (MIN), \
+ &(BITNUM)); \
+ bmp_iter_and_compl (&(ITER), &(BITNUM)); \
+ bmp_iter_next (&(ITER), &(BITNUM)))
+
+/* A class that ties the lifetime of a bitmap to its scope. */
+class auto_bitmap
+{
+ public:
+ auto_bitmap (ALONE_CXX_MEM_STAT_INFO)
+ { bitmap_initialize (&m_bits, &bitmap_default_obstack PASS_MEM_STAT); }
+ explicit auto_bitmap (bitmap_obstack *o CXX_MEM_STAT_INFO)
+ { bitmap_initialize (&m_bits, o PASS_MEM_STAT); }
+ ~auto_bitmap () { bitmap_clear (&m_bits); }
+ // Allow calling bitmap functions on our bitmap.
+ operator bitmap () { return &m_bits; }
+
+ private:
+ // Prevent making a copy that references our bitmap.
+ auto_bitmap (const auto_bitmap &);
+ auto_bitmap &operator = (const auto_bitmap &);
+ auto_bitmap (auto_bitmap &&);
+ auto_bitmap &operator = (auto_bitmap &&);
+
+ bitmap_head m_bits;
+};
+
+extern void debug (const auto_bitmap &ref);
+extern void debug (const auto_bitmap *ptr);
+
+/* Base class for bitmap_view; see there for details. */
+template<typename T, typename Traits = array_traits<T> >
+class base_bitmap_view
+{
+public:
+ typedef typename Traits::element_type array_element_type;
+
+ base_bitmap_view (const T &, bitmap_element *);
+ operator const_bitmap () const { return &m_head; }
+
+private:
+ base_bitmap_view (const base_bitmap_view &);
+
+ bitmap_head m_head;
+};
+
+/* Provides a read-only bitmap view of a single integer bitmask or a
+ constant-sized array of integer bitmasks, or of a wrapper around such
+ bitmasks. */
+template<typename T, typename Traits>
+class bitmap_view<T, Traits, true> : public base_bitmap_view<T, Traits>
+{
+public:
+ bitmap_view (const T &array)
+ : base_bitmap_view<T, Traits> (array, m_bitmap_elements) {}
+
+private:
+ /* How many bitmap_elements we need to hold a full T. */
+ static const size_t num_bitmap_elements
+ = CEIL (CHAR_BIT
+ * sizeof (typename Traits::element_type)
+ * Traits::constant_size,
+ BITMAP_ELEMENT_ALL_BITS);
+ bitmap_element m_bitmap_elements[num_bitmap_elements];
+};
+
+/* Initialize the view for array ARRAY, using the array of bitmap
+ elements in BITMAP_ELEMENTS (which is known to contain enough
+ entries). */
+template<typename T, typename Traits>
+base_bitmap_view<T, Traits>::base_bitmap_view (const T &array,
+ bitmap_element *bitmap_elements)
+{
+ m_head.obstack = NULL;
+
+ /* The code currently assumes that each element of ARRAY corresponds
+ to exactly one bitmap_element. */
+ const size_t array_element_bits = CHAR_BIT * sizeof (array_element_type);
+ STATIC_ASSERT (BITMAP_ELEMENT_ALL_BITS % array_element_bits == 0);
+ size_t array_step = BITMAP_ELEMENT_ALL_BITS / array_element_bits;
+ size_t array_size = Traits::size (array);
+
+ /* Process each potential bitmap_element in turn. The loop is written
+ this way rather than per array element because usually there are
+ only a small number of array elements per bitmap element (typically
+ two or four). The inner loops should therefore unroll completely. */
+ const array_element_type *array_elements = Traits::base (array);
+ unsigned int indx = 0;
+ for (size_t array_base = 0;
+ array_base < array_size;
+ array_base += array_step, indx += 1)
+ {
+ /* How many array elements are in this particular bitmap_element. */
+ unsigned int array_count
+ = (STATIC_CONSTANT_P (array_size % array_step == 0)
+ ? array_step : MIN (array_step, array_size - array_base));
+
+ /* See whether we need this bitmap element. */
+ array_element_type ior = array_elements[array_base];
+ for (size_t i = 1; i < array_count; ++i)
+ ior |= array_elements[array_base + i];
+ if (ior == 0)
+ continue;
+
+ /* Grab the next bitmap element and chain it. */
+ bitmap_element *bitmap_element = bitmap_elements++;
+ if (m_head.current)
+ m_head.current->next = bitmap_element;
+ else
+ m_head.first = bitmap_element;
+ bitmap_element->prev = m_head.current;
+ bitmap_element->next = NULL;
+ bitmap_element->indx = indx;
+ m_head.current = bitmap_element;
+ m_head.indx = indx;
+
+ /* Fill in the bits of the bitmap element. */
+ if (array_element_bits < BITMAP_WORD_BITS)
+ {
+ /* Multiple array elements fit in one element of
+ bitmap_element->bits. */
+ size_t array_i = array_base;
+ for (unsigned int word_i = 0; word_i < BITMAP_ELEMENT_WORDS;
+ ++word_i)
+ {
+ BITMAP_WORD word = 0;
+ for (unsigned int shift = 0;
+ shift < BITMAP_WORD_BITS && array_i < array_size;
+ shift += array_element_bits)
+ word |= array_elements[array_i++] << shift;
+ bitmap_element->bits[word_i] = word;
+ }
+ }
+ else
+ {
+ /* Array elements are the same size as elements of
+ bitmap_element->bits, or are an exact multiple of that size. */
+ unsigned int word_i = 0;
+ for (unsigned int i = 0; i < array_count; ++i)
+ for (unsigned int shift = 0; shift < array_element_bits;
+ shift += BITMAP_WORD_BITS)
+ bitmap_element->bits[word_i++]
+ = array_elements[array_base + i] >> shift;
+ while (word_i < BITMAP_ELEMENT_WORDS)
+ bitmap_element->bits[word_i++] = 0;
+ }
+ }
+}
+
+#endif /* GCC_BITMAP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-attrs.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-attrs.def
new file mode 100644
index 0000000..782e82d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-attrs.def
@@ -0,0 +1,427 @@
+/* Copyright (C) 2001-2023 Free Software Foundation, Inc.
+ Contributed by Joseph Myers <jsm28@cam.ac.uk>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This header provides a declarative way of describing the attributes
+ that are applied to some built-in functions by default. Attributes
+ that are meant to be used by user-defined functions but aren't used
+ by any built-ins, or attributes that apply to types or variables
+ but not to functions need not and should not be defined here.
+
+ Before including this header, you must define the following macros.
+ In each case where there is an ENUM, it is an identifier used to
+ reference the tree in subsequent definitions.
+
+ DEF_ATTR_NULL_TREE (ENUM)
+
+ Constructs a NULL_TREE.
+
+ DEF_ATTR_INT (ENUM, VALUE)
+
+ Constructs an INTEGER_CST with value VALUE (an integer representable
+ in HOST_WIDE_INT).
+
+ DEF_ATTR_IDENT (ENUM, STRING)
+
+ Constructs an IDENTIFIER_NODE for STRING.
+
+ DEF_ATTR_TREE_LIST (ENUM, PURPOSE, VALUE, CHAIN)
+
+ Constructs a TREE_LIST with given PURPOSE, VALUE and CHAIN (given
+ as previous ENUM names). */
+
+DEF_ATTR_NULL_TREE (ATTR_NULL)
+
+/* Construct a tree for a given integer and a list containing it. */
+#define DEF_ATTR_FOR_INT(VALUE) \
+ DEF_ATTR_INT (ATTR_##VALUE, VALUE) \
+ DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE, ATTR_NULL, \
+ ATTR_##VALUE, ATTR_NULL)
+DEF_ATTR_FOR_INT (0)
+DEF_ATTR_FOR_INT (1)
+DEF_ATTR_FOR_INT (2)
+DEF_ATTR_FOR_INT (3)
+DEF_ATTR_FOR_INT (4)
+DEF_ATTR_FOR_INT (5)
+DEF_ATTR_FOR_INT (6)
+#undef DEF_ATTR_FOR_INT
+
+/* Construct a tree for a given string and a list containing it. */
+#define DEF_ATTR_FOR_STRING(ENUM, VALUE) \
+ DEF_ATTR_STRING (ATTR_##ENUM, VALUE) \
+ DEF_ATTR_TREE_LIST (ATTR_LIST_##ENUM, ATTR_NULL, \
+ ATTR_##ENUM, ATTR_NULL)
+DEF_ATTR_FOR_STRING (STR1, "1 ")
+DEF_ATTR_FOR_STRING (STRERRNOC, ".C")
+DEF_ATTR_FOR_STRING (STRERRNOP, ".P")
+#undef DEF_ATTR_FOR_STRING
+
+/* Construct a tree for a list of two integers. */
+#define DEF_LIST_INT_INT(VALUE1, VALUE2) \
+ DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE1##_##VALUE2, ATTR_NULL, \
+ ATTR_##VALUE1, ATTR_LIST_##VALUE2)
+DEF_LIST_INT_INT (1,0)
+DEF_LIST_INT_INT (1,2)
+DEF_LIST_INT_INT (1,3)
+DEF_LIST_INT_INT (1,4)
+DEF_LIST_INT_INT (1,5)
+DEF_LIST_INT_INT (2,0)
+DEF_LIST_INT_INT (2,3)
+DEF_LIST_INT_INT (3,0)
+DEF_LIST_INT_INT (3,4)
+DEF_LIST_INT_INT (4,0)
+DEF_LIST_INT_INT (4,5)
+DEF_LIST_INT_INT (5,0)
+DEF_LIST_INT_INT (5,6)
+#undef DEF_LIST_INT_INT
+
+/* Construct trees for identifiers used in built-in function attributes.
+ The construction contributes to startup costs so only attributes that
+ are used to define built-ins should be defined here. */
+DEF_ATTR_IDENT (ATTR_ALLOC_SIZE, "alloc_size")
+DEF_ATTR_IDENT (ATTR_COLD, "cold")
+DEF_ATTR_IDENT (ATTR_CONST, "const")
+DEF_ATTR_IDENT (ATTR_FORMAT, "format")
+DEF_ATTR_IDENT (ATTR_FORMAT_ARG, "format_arg")
+DEF_ATTR_IDENT (ATTR_MALLOC, "malloc")
+DEF_ATTR_IDENT (ATTR_NONNULL, "nonnull")
+DEF_ATTR_IDENT (ATTR_NORETURN, "noreturn")
+DEF_ATTR_IDENT (ATTR_NOTHROW, "nothrow")
+DEF_ATTR_IDENT (ATTR_LEAF, "leaf")
+DEF_ATTR_IDENT (ATTR_FNSPEC, "fn spec")
+DEF_ATTR_IDENT (ATTR_PRINTF, "printf")
+DEF_ATTR_IDENT (ATTR_ASM_FPRINTF, "asm_fprintf")
+DEF_ATTR_IDENT (ATTR_GCC_DIAG, "gcc_diag")
+DEF_ATTR_IDENT (ATTR_GCC_CDIAG, "gcc_cdiag")
+DEF_ATTR_IDENT (ATTR_GCC_CXXDIAG, "gcc_cxxdiag")
+DEF_ATTR_IDENT (ATTR_PURE, "pure")
+DEF_ATTR_IDENT (ATTR_NOVOPS, "no vops")
+DEF_ATTR_IDENT (ATTR_SCANF, "scanf")
+DEF_ATTR_IDENT (ATTR_SENTINEL, "sentinel")
+DEF_ATTR_IDENT (ATTR_STRFMON, "strfmon")
+DEF_ATTR_IDENT (ATTR_STRFTIME, "strftime")
+DEF_ATTR_IDENT (ATTR_TYPEGENERIC, "type generic")
+DEF_ATTR_IDENT (ATTR_TM_REGPARM, "*tm regparm")
+DEF_ATTR_IDENT (ATTR_TM_TMPURE, "transaction_pure")
+DEF_ATTR_IDENT (ATTR_RETURNS_TWICE, "returns_twice")
+DEF_ATTR_IDENT (ATTR_RETURNS_NONNULL, "returns_nonnull")
+DEF_ATTR_IDENT (ATTR_WARN_UNUSED_RESULT, "warn_unused_result")
+
+DEF_ATTR_TREE_LIST (ATTR_NOVOPS_LIST, ATTR_NOVOPS, ATTR_NULL, ATTR_NULL)
+
+DEF_ATTR_TREE_LIST (ATTR_NOVOPS_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NOVOPS_LIST)
+
+DEF_ATTR_TREE_LIST (ATTR_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NULL)
+
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_LIST, ATTR_NOTHROW, ATTR_NULL, ATTR_NULL)
+
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NOTHROW_LIST)
+
+DEF_ATTR_TREE_LIST (ATTR_NOVOPS_NOTHROW_LEAF_LIST, ATTR_NOVOPS, \
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_LIST, ATTR_CONST, \
+ ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_LEAF_LIST, ATTR_CONST, \
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_ERRNOCONST_NOTHROW_LEAF_LIST, ATTR_FNSPEC,\
+ ATTR_LIST_STRERRNOC, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_ERRNOPURE_NOTHROW_LEAF_LIST, ATTR_FNSPEC,\
+ ATTR_LIST_STRERRNOP, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_LIST, ATTR_PURE, \
+ ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_LEAF_LIST, ATTR_PURE, \
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LIST, ATTR_NORETURN, \
+ ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LEAF_LIST, ATTR_NORETURN,\
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LEAF_COLD_LIST, ATTR_COLD,\
+ ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_RT_NOTHROW_LEAF_LIST, ATTR_RETURNS_TWICE,\
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_COLD_NOTHROW_LEAF_LIST, ATTR_COLD,\
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST, ATTR_COLD,\
+ ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST, ATTR_CONST,\
+ ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_CONST_NORETURN_NOTHROW_LEAF_COLD_LIST, ATTR_COLD,\
+ ATTR_NULL, ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_NOTHROW_LIST, ATTR_MALLOC, \
+ ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST, ATTR_WARN_UNUSED_RESULT, \
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST, ATTR_MALLOC, \
+ ATTR_NULL, ATTR_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_SENTINEL_NOTHROW_LIST, ATTR_SENTINEL, \
+ ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_SENTINEL_NOTHROW_LEAF_LIST, ATTR_SENTINEL, \
+ ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST, ATTR_CONST,\
+ ATTR_NULL, ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+
+/* Allocation functions like malloc and realloc whose first argument
+ with _SIZE_1, or second argument with _SIZE_2, specifies the size
+ of the allocated object. */
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_SIZE_1_NOTHROW_LIST, ATTR_ALLOC_SIZE, \
+ ATTR_LIST_1, ATTR_MALLOC_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LIST, ATTR_WARN_UNUSED_RESULT, \
+ ATTR_NULL, ATTR_MALLOC_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LIST, ATTR_ALLOC_SIZE, \
+ ATTR_LIST_2, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST, ATTR_ALLOC_SIZE, \
+ ATTR_LIST_1, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST)
+/* Alloca is just like malloc except that it never returns null. */
+DEF_ATTR_TREE_LIST (ATTR_ALLOCA_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST, ATTR_RETURNS_NONNULL,
+ ATTR_NULL, ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST)
+
+/* Allocation functions like calloc the product of whose first two arguments
+ specifies the size of the allocated object. */
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_2_NOTHROW_LEAF_LIST, ATTR_ALLOC_SIZE, \
+ ATTR_LIST_1_2, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST)
+
+/* Allocation functions like realloc whose second argument specifies
+ the size of the allocated object. */
+DEF_ATTR_TREE_LIST (ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LEAF_LIST, ATTR_ALLOC_SIZE, \
+ ATTR_LIST_2, ATTR_WARN_UNUSED_RESULT_NOTHROW_LEAF_LIST)
+
+/* Functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_LIST, ATTR_NONNULL, ATTR_NULL, ATTR_NULL)
+/* Functions whose first parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_1, ATTR_NONNULL, ATTR_LIST_1, ATTR_NULL)
+/* Functions whose second parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_2, ATTR_NONNULL, ATTR_LIST_2, ATTR_NULL)
+/* Functions whose third parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_3, ATTR_NONNULL, ATTR_LIST_3, ATTR_NULL)
+/* Nothrow functions with the sentinel(1) attribute. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_SENTINEL_1, ATTR_SENTINEL, ATTR_LIST_1, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL, ATTR_NONNULL, ATTR_NULL, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow leaf functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_LEAF, ATTR_NONNULL, ATTR_NULL, \
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_LEAF_LIST, ATTR_LEAF, ATTR_NULL, ATTR_NOTHROW_NONNULL_LEAF)
+/* Nothrow functions whose first parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1, ATTR_NONNULL, ATTR_LIST_1, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions whose second parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_2, ATTR_NONNULL, ATTR_LIST_2, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions whose third parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_3, ATTR_NONNULL, ATTR_LIST_3, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions whose fourth parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_4, ATTR_NONNULL, ATTR_LIST_4, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions whose fifth parameter is a nonnull pointer. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_5, ATTR_NONNULL, ATTR_LIST_5, \
+ ATTR_NOTHROW_LIST)
+
+/* Same as ATTR_NONNULL_1. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_1, ATTR_NONNULL, ATTR_LIST_1, ATTR_NULL)
+/* Functions like {v,}fprintf whose first and second parameters are
+ nonnull pointers. As cancellation points the functions are not
+ nothrow. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_2, ATTR_NONNULL, ATTR_LIST_1_2, ATTR_NULL)
+/* The following don't have {v,}fprintf forms. They exist only to
+ make it possible to declare {v,}{f,s}printf attributes using
+ the same macro. */
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_3, ATTR_NONNULL, ATTR_LIST_1_3, ATTR_NULL)
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_1_4, ATTR_NULL)
+DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_5, ATTR_NONNULL, ATTR_LIST_1_5, ATTR_NULL)
+
+/* Same as ATTR_NOTHROW_NONNULL_1. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_1, ATTR_NONNULL, ATTR_LIST_1,
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions like {v,}sprintf whose first and second parameters
+ are nonnull pointers. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_2, ATTR_NONNULL, ATTR_LIST_1_2, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions like {v,}snprintf whose first and third parameters
+ are nonnull pointers. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_3, ATTR_NONNULL, ATTR_LIST_1_3, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions like {v,}sprintf_chk whose first and fourth parameters
+ are nonnull pointers. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_1_4, \
+ ATTR_NOTHROW_LIST)
+/* Nothrow functions like {v,}snprintf_chk whose first and fifth parameters
+ are nonnull pointers. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_5, ATTR_NONNULL, ATTR_LIST_1_5, \
+ ATTR_NOTHROW_LIST)
+
+/* Nothrow leaf functions which are type-generic. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_TYPEGENERIC_LEAF, ATTR_TYPEGENERIC, ATTR_NULL, \
+ ATTR_NOTHROW_LEAF_LIST)
+/* Nothrow nonnull leaf functions that are type-generic. */
+DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF,
+ ATTR_TYPEGENERIC, ATTR_NULL,
+ ATTR_NOTHROW_NONNULL_LEAF)
+/* Nothrow const functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_NONNULL, ATTR_CONST, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL)
+/* Nothrow leaf functions whose pointer parameter(s) are all nonnull,
+ and which return their first argument. */
+DEF_ATTR_TREE_LIST (ATTR_RET1_NOTHROW_NONNULL_LEAF, ATTR_FNSPEC, ATTR_LIST_STR1, \
+ ATTR_NOTHROW_NONNULL_LEAF)
+/* Nothrow leaf functions whose pointer parameter(s) are all nonnull,
+ and return value is also nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_RETNONNULL_NOTHROW_LEAF, ATTR_RETURNS_NONNULL, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL_LEAF)
+/* Nothrow const leaf functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_NONNULL_LEAF, ATTR_CONST, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL_LEAF)
+/* Nothrow const functions which are type-generic. */
+DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_TYPEGENERIC, ATTR_TYPEGENERIC, ATTR_NULL, \
+ ATTR_CONST_NOTHROW_LIST)
+/* Nothrow const leaf functions which are type-generic. */
+DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF, ATTR_TYPEGENERIC, ATTR_NULL, \
+ ATTR_CONST_NOTHROW_LEAF_LIST)
+/* Nothrow pure functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL, ATTR_PURE, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL)
+/* Nothrow pure leaf functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL_LEAF, ATTR_PURE, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL_LEAF)
+/* Nothrow malloc functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL, ATTR_WARN_UNUSED_RESULT, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL)
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL, ATTR_MALLOC, ATTR_NULL, \
+ ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL)
+/* Nothrow malloc leaf functions whose pointer parameter(s) are all nonnull. */
+DEF_ATTR_TREE_LIST (ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF, ATTR_WARN_UNUSED_RESULT, ATTR_NULL, \
+ ATTR_NOTHROW_NONNULL_LEAF)
+DEF_ATTR_TREE_LIST (ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF, ATTR_MALLOC, ATTR_NULL, \
+ ATTR_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF)
+
+/* Construct a tree for the format attribute (and implicitly nonnull). */
+#define DEF_FORMAT_ATTRIBUTE(TYPE, FA, VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \
+ ATTR_##TYPE, ATTR_LIST_##VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_##VALUES, ATTR_FORMAT, \
+ ATTR_##TYPE##_##VALUES, ATTR_NONNULL_##FA)
+
+/* Construct a tree for the format and nothrow attributes (format
+ implies nonnull). */
+#define DEF_FORMAT_ATTRIBUTE_NOTHROW(TYPE, FA, VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \
+ ATTR_##TYPE, ATTR_LIST_##VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_NOTHROW_##VALUES, ATTR_FORMAT,\
+ ATTR_##TYPE##_##VALUES, ATTR_NOTHROW_NONNULL_##FA)
+
+/* Construct one tree for the format attribute and another for the format
+ and nothrow attributes (in both cases format implies nonnull). */
+#define DEF_FORMAT_ATTRIBUTE_BOTH(TYPE, FA, VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \
+ ATTR_##TYPE, ATTR_LIST_##VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_##VALUES, ATTR_FORMAT, \
+ ATTR_##TYPE##_##VALUES, ATTR_NONNULL_##FA) \
+ DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_NOTHROW_##VALUES, ATTR_FORMAT,\
+ ATTR_##TYPE##_##VALUES, ATTR_NOTHROW_NONNULL_##FA)
+
+/* Construct a pair of trees for the nonnull attribute for the first
+ argument, plus format printf attribute (format implies nonnull):
+ the first ordinary and the second nothrow. */
+#define DEF_FORMAT_ATTRIBUTE_NONNULL(TYPE, FA, VALUES) \
+ DEF_ATTR_TREE_LIST (ATTR_NONNULL_1_FORMAT_##TYPE##_##VALUES, \
+ ATTR_FORMAT, ATTR_##TYPE##_##VALUES, \
+ ATTR_NONNULL_1_##FA) \
+ DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_FORMAT_##TYPE##_##VALUES, \
+ ATTR_FORMAT, ATTR_##TYPE##_##VALUES, \
+ ATTR_NOTHROW_NONNULL_1_##FA)
+
+DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_0)
+DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_2)
+DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,2,2_0)
+DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,2,2_3)
+DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,3,3_0)
+DEF_FORMAT_ATTRIBUTE_BOTH(PRINTF,3,3_4)
+DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,4,4_0)
+DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,4,4_5)
+DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,5,5_0)
+DEF_FORMAT_ATTRIBUTE_NOTHROW(PRINTF,5,5_6)
+
+/* Attributes for fprintf(f, f, va). */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,1,1_2)
+/* Attributes for v{f,s}printf(d, f, va). vsprintf is nothrow, vfprintf
+ is not. */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,2,2_0)
+/* Attributes for {f,s}printf(d, f, ...). sprintf is nothrow, fprintf
+ is not. */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,2,2_3)
+/* Attributes for vprintf_chk. */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,3,3_0)
+/* Attributes for printf_chk. */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,3,3_4)
+/* Attributes for v{f,s}printf_chk(d, t, bos, f, va). vsprintf_chk is
+ nothrow, vfprintf_chk is not. */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,4,4_0)
+/* Attributes for {f,s}printf_chk(d, t, bos, f, ...). sprintf_chk is
+ nothrow, fprintf_chk is not. */
+DEF_FORMAT_ATTRIBUTE_NONNULL(PRINTF,4,4_5)
+
+DEF_FORMAT_ATTRIBUTE(SCANF,1,1_0)
+DEF_FORMAT_ATTRIBUTE(SCANF,1,1_2)
+DEF_FORMAT_ATTRIBUTE_BOTH(SCANF,2,2_0)
+DEF_FORMAT_ATTRIBUTE_BOTH(SCANF,2,2_3)
+DEF_FORMAT_ATTRIBUTE_NOTHROW(STRFTIME,3,3_0)
+DEF_FORMAT_ATTRIBUTE_NOTHROW(STRFMON,3,3_4)
+#undef DEF_FORMAT_ATTRIBUTE
+#undef DEF_FORMAT_ATTRIBUTE_NOTHROW
+#undef DEF_FORMAT_ATTRIBUTE_BOTH
+
+/* Transactional memory variants of the above. */
+
+DEF_ATTR_TREE_LIST (ATTR_TM_NOTHROW_LIST,
+ ATTR_TM_REGPARM, ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_TMPURE_NOTHROW_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_TM_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_PURE_TMPURE_NOTHROW_LIST,
+ ATTR_PURE, ATTR_NULL, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_NORETURN_NOTHROW_LIST,
+ ATTR_TM_REGPARM, ATTR_NULL, ATTR_NORETURN_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_CONST_NOTHROW_LIST,
+ ATTR_TM_REGPARM, ATTR_NULL, ATTR_CONST_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_NOTHROW_RT_LIST,
+ ATTR_RETURNS_TWICE, ATTR_NULL, ATTR_TM_NOTHROW_LIST)
+
+/* Same attributes used for BUILT_IN_MALLOC except with TM_PURE thrown in. */
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_MALLOC_NOTHROW_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_MALLOC_NOTHROW_LIST)
+/* Same attributes used for BUILT_IN_FREE except with TM_PURE thrown in. */
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_NOTHROW_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_NOTHROW_LIST)
+
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_NOTHROW_LEAF_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_NORETURN_NOTHROW_LEAF_COLD_LIST,
+ ATTR_COLD, ATTR_NULL,
+ ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+
+/* Construct a tree for a format_arg attribute. */
+#define DEF_FORMAT_ARG_ATTRIBUTE(FA) \
+ DEF_ATTR_TREE_LIST (ATTR_FORMAT_ARG_##FA, ATTR_FORMAT_ARG, \
+ ATTR_LIST_##FA, ATTR_NOTHROW_NONNULL_##FA)
+DEF_FORMAT_ARG_ATTRIBUTE(1)
+DEF_FORMAT_ARG_ATTRIBUTE(2)
+#undef DEF_FORMAT_ARG_ATTRIBUTE
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-types.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-types.def
new file mode 100644
index 0000000..5cb25ab
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtin-types.def
@@ -0,0 +1,1062 @@
+/* Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This header provides a declarative way of describing the types that
+ are used when declaring builtin functions.
+
+ Before including this header, you must define the following macros:
+
+ DEF_PRIMITIVE_TYPE (ENUM, TYPE)
+
+ The ENUM is an identifier indicating which type is being defined.
+ TYPE is an expression for a `tree' that represents the type.
+
+ DEF_FUNCTION_TYPE_0 (ENUM, RETURN)
+ DEF_FUNCTION_TYPE_1 (ENUM, RETURN, ARG1)
+ DEF_FUNCTION_TYPE_2 (ENUM, RETURN, ARG1, ARG2)
+ [...]
+
+ These macros describe function types. ENUM is as above. The
+ RETURN type is one of the enumerals already defined. ARG1, ARG2,
+ etc, give the types of the arguments, similarly.
+
+ DEF_FUNCTION_TYPE_VAR_0 (ENUM, RETURN)
+ DEF_FUNCTION_TYPE_VAR_1 (ENUM, RETURN, ARG1)
+ DEF_FUNCTION_TYPE_VAR_2 (ENUM, RETURN, ARG1, ARG2)
+ [...]
+
+ Similar, but for function types that take variable arguments.
+ For example:
+
+ DEF_FUNCTION_TYPE_1 (BT_INT_DOUBLE, BT_INT, BT_DOUBLE)
+
+ describes the type `int ()(double)', using the enumeral
+ BT_INT_DOUBLE, whereas:
+
+ DEF_FUNCTION_TYPE_VAR_1 (BT_INT_DOUBLE_VAR, BT_INT, BT_DOUBLE)
+
+ describes the type `int ()(double, ...)'.
+
+ DEF_POINTER_TYPE (ENUM, TYPE)
+
+ This macro describes a pointer type. ENUM is as above; TYPE is
+ the type pointed to. */
+
+DEF_PRIMITIVE_TYPE (BT_VOID, void_type_node)
+DEF_PRIMITIVE_TYPE (BT_BOOL, boolean_type_node)
+DEF_PRIMITIVE_TYPE (BT_INT, integer_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINT, unsigned_type_node)
+DEF_PRIMITIVE_TYPE (BT_LONG, long_integer_type_node)
+DEF_PRIMITIVE_TYPE (BT_ULONG, long_unsigned_type_node)
+DEF_PRIMITIVE_TYPE (BT_LONGLONG, long_long_integer_type_node)
+DEF_PRIMITIVE_TYPE (BT_ULONGLONG, long_long_unsigned_type_node)
+DEF_PRIMITIVE_TYPE (BT_INTMAX, intmax_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINTMAX, uintmax_type_node)
+DEF_PRIMITIVE_TYPE (BT_INT8, signed_char_type_node)
+DEF_PRIMITIVE_TYPE (BT_INT16, short_integer_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINT8, unsigned_char_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINT16, uint16_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINT32, uint32_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINT64, uint64_type_node)
+DEF_PRIMITIVE_TYPE (BT_UINT128, uint128_type_node
+ ? uint128_type_node
+ : error_mark_node)
+DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 1))
+DEF_PRIMITIVE_TYPE (BT_UNWINDWORD, (*lang_hooks.types.type_for_mode)
+ (targetm.unwind_word_mode (), 1))
+DEF_PRIMITIVE_TYPE (BT_FLOAT, float_type_node)
+DEF_PRIMITIVE_TYPE (BT_DOUBLE, double_type_node)
+DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE, long_double_type_node)
+DEF_PRIMITIVE_TYPE (BT_BFLOAT16, (bfloat16_type_node
+ ? bfloat16_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT16, (float16_type_node
+ ? float16_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT32, (float32_type_node
+ ? float32_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT64, (float64_type_node
+ ? float64_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT128, (float128_type_node
+ ? float128_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT32X, (float32x_type_node
+ ? float32x_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT64X, (float64x_type_node
+ ? float64x_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT128X, (float128x_type_node
+ ? float128x_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT, complex_float_type_node)
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_DOUBLE, complex_double_type_node)
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_LONGDOUBLE, complex_long_double_type_node)
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT16, (float16_type_node
+ ? build_complex_type
+ (float16_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT32, (float32_type_node
+ ? build_complex_type
+ (float32_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT64, (float64_type_node
+ ? build_complex_type
+ (float64_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT128, (float128_type_node
+ ? build_complex_type
+ (float128_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT32X, (float32x_type_node
+ ? build_complex_type
+ (float32x_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT64X, (float64x_type_node
+ ? build_complex_type
+ (float64x_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT128X, (float128x_type_node
+ ? build_complex_type
+ (float128x_type_node)
+ : error_mark_node))
+
+DEF_PRIMITIVE_TYPE (BT_PTR, ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_FILEPTR, fileptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_TM_PTR, const_tm_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_FENV_T_PTR, fenv_t_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_FENV_T_PTR, const_fenv_t_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_FEXCEPT_T_PTR, fexcept_t_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_FEXCEPT_T_PTR, const_fexcept_t_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_PTR, const_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_VOLATILE_PTR,
+ build_pointer_type
+ (build_qualified_type (void_type_node,
+ TYPE_QUAL_VOLATILE)))
+DEF_PRIMITIVE_TYPE (BT_CONST_VOLATILE_PTR,
+ build_pointer_type
+ (build_qualified_type (void_type_node,
+ TYPE_QUAL_VOLATILE|TYPE_QUAL_CONST)))
+DEF_PRIMITIVE_TYPE (BT_PTRMODE, (*lang_hooks.types.type_for_mode)(ptr_mode, 0))
+DEF_PRIMITIVE_TYPE (BT_INT_PTR, integer_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_FLOAT_PTR, float_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_DOUBLE_PTR, double_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_DOUBLE_PTR,
+ build_pointer_type
+ (build_qualified_type (double_type_node,
+ TYPE_QUAL_CONST)))
+DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE_PTR, long_double_ptr_type_node)
+DEF_PRIMITIVE_TYPE (BT_FLOAT16_PTR, (float16_type_node
+ ? build_pointer_type (float16_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT32_PTR, (float32_type_node
+ ? build_pointer_type (float32_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT64_PTR, (float64_type_node
+ ? build_pointer_type (float64_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT128_PTR, (float128_type_node
+ ? build_pointer_type (float128_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT32X_PTR, (float32x_type_node
+ ? build_pointer_type (float32x_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT64X_PTR, (float64x_type_node
+ ? build_pointer_type (float64x_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_FLOAT128X_PTR, (float128x_type_node
+ ? build_pointer_type (float128x_type_node)
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_PID, pid_type_node)
+DEF_PRIMITIVE_TYPE (BT_SIZE, size_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_SIZE,
+ build_qualified_type (size_type_node, TYPE_QUAL_CONST))
+DEF_PRIMITIVE_TYPE (BT_SSIZE, signed_size_type_node)
+DEF_PRIMITIVE_TYPE (BT_WINT, wint_type_node)
+DEF_PRIMITIVE_TYPE (BT_STRING, string_type_node)
+DEF_PRIMITIVE_TYPE (BT_CONST_STRING, const_string_type_node)
+
+DEF_PRIMITIVE_TYPE (BT_DFLOAT32, (dfloat32_type_node
+ ? dfloat32_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_DFLOAT64, (dfloat64_type_node
+ ? dfloat64_type_node
+ : error_mark_node))
+DEF_PRIMITIVE_TYPE (BT_DFLOAT128, (dfloat128_type_node
+ ? dfloat128_type_node
+ : error_mark_node))
+
+DEF_PRIMITIVE_TYPE (BT_VALIST_REF, va_list_ref_type_node)
+DEF_PRIMITIVE_TYPE (BT_VALIST_ARG, va_list_arg_type_node)
+
+DEF_PRIMITIVE_TYPE (BT_I1, builtin_type_for_size (BITS_PER_UNIT*1, 1))
+DEF_PRIMITIVE_TYPE (BT_I2, builtin_type_for_size (BITS_PER_UNIT*2, 1))
+DEF_PRIMITIVE_TYPE (BT_I4, builtin_type_for_size (BITS_PER_UNIT*4, 1))
+DEF_PRIMITIVE_TYPE (BT_I8, builtin_type_for_size (BITS_PER_UNIT*8, 1))
+DEF_PRIMITIVE_TYPE (BT_I16, builtin_type_for_size (BITS_PER_UNIT*16, 1))
+
+/* The C type `char * const *'. */
+DEF_PRIMITIVE_TYPE (BT_PTR_CONST_STRING,
+ build_pointer_type
+ (build_qualified_type (string_type_node,
+ TYPE_QUAL_CONST)))
+
+DEF_POINTER_TYPE (BT_PTR_UINT, BT_UINT)
+DEF_POINTER_TYPE (BT_PTR_LONG, BT_LONG)
+DEF_POINTER_TYPE (BT_PTR_ULONG, BT_ULONG)
+DEF_POINTER_TYPE (BT_PTR_LONGLONG, BT_LONGLONG)
+DEF_POINTER_TYPE (BT_PTR_ULONGLONG, BT_ULONGLONG)
+DEF_POINTER_TYPE (BT_PTR_PTR, BT_PTR)
+
+DEF_FUNCTION_TYPE_0 (BT_FN_VOID, BT_VOID)
+DEF_FUNCTION_TYPE_0 (BT_FN_BOOL, BT_BOOL)
+DEF_FUNCTION_TYPE_0 (BT_FN_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_0 (BT_FN_CONST_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_0 (BT_FN_PID, BT_PID)
+DEF_FUNCTION_TYPE_0 (BT_FN_INT, BT_INT)
+DEF_FUNCTION_TYPE_0 (BT_FN_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_0 (BT_FN_ULONG, BT_ULONG)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_0 (BT_FN_DOUBLE, BT_DOUBLE)
+/* For "long double" we use LONGDOUBLE (not LONG_DOUBLE) to
+ distinguish it from two types in sequence, "long" followed by
+ "double". */
+DEF_FUNCTION_TYPE_0 (BT_FN_LONGDOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT16, BT_FLOAT16)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT32, BT_FLOAT32)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT64, BT_FLOAT64)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT128, BT_FLOAT128)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT32X, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT64X, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT128X, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_0 (BT_FN_DFLOAT32, BT_DFLOAT32)
+DEF_FUNCTION_TYPE_0 (BT_FN_DFLOAT64, BT_DFLOAT64)
+DEF_FUNCTION_TYPE_0 (BT_FN_DFLOAT128, BT_DFLOAT128)
+
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGLONG, BT_LONGLONG, BT_LONGLONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_INTMAX_INTMAX, BT_INTMAX, BT_INTMAX)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_LONGDOUBLE,
+ BT_LONGDOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT16_FLOAT16, BT_FLOAT16, BT_FLOAT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32_FLOAT32, BT_FLOAT32, BT_FLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64_FLOAT64, BT_FLOAT64, BT_FLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128_FLOAT128, BT_FLOAT128, BT_FLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32X_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64X_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128X_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT,
+ BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE,
+ BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE,
+ BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT16_COMPLEX_FLOAT16,
+ BT_COMPLEX_FLOAT16, BT_COMPLEX_FLOAT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT32_COMPLEX_FLOAT32,
+ BT_COMPLEX_FLOAT32, BT_COMPLEX_FLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT64_COMPLEX_FLOAT64,
+ BT_COMPLEX_FLOAT64, BT_COMPLEX_FLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT128_COMPLEX_FLOAT128,
+ BT_COMPLEX_FLOAT128, BT_COMPLEX_FLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT32X_COMPLEX_FLOAT32X,
+ BT_COMPLEX_FLOAT32X, BT_COMPLEX_FLOAT32X)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT64X_COMPLEX_FLOAT64X,
+ BT_COMPLEX_FLOAT64X, BT_COMPLEX_FLOAT64X)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT128X_COMPLEX_FLOAT128X,
+ BT_COMPLEX_FLOAT128X, BT_COMPLEX_FLOAT128X)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_COMPLEX_FLOAT,
+ BT_FLOAT, BT_COMPLEX_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_COMPLEX_DOUBLE,
+ BT_DOUBLE, BT_COMPLEX_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE,
+ BT_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT16_COMPLEX_FLOAT16,
+ BT_FLOAT16, BT_COMPLEX_FLOAT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32_COMPLEX_FLOAT32,
+ BT_FLOAT32, BT_COMPLEX_FLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64_COMPLEX_FLOAT64,
+ BT_FLOAT64, BT_COMPLEX_FLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128_COMPLEX_FLOAT128,
+ BT_FLOAT128, BT_COMPLEX_FLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32X_COMPLEX_FLOAT32X,
+ BT_FLOAT32X, BT_COMPLEX_FLOAT32X)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64X_COMPLEX_FLOAT64X,
+ BT_FLOAT64X, BT_COMPLEX_FLOAT64X)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128X_COMPLEX_FLOAT128X,
+ BT_FLOAT128X, BT_COMPLEX_FLOAT128X)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_FLOAT,
+ BT_COMPLEX_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_DOUBLE,
+ BT_COMPLEX_DOUBLE, BT_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_LONGDOUBLE,
+ BT_COMPLEX_LONGDOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_PTR_UINT, BT_PTR, BT_UINT)
+DEF_FUNCTION_TYPE_1 (BT_FN_PTR_SIZE, BT_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_INT, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_UINT, BT_INT, BT_UINT)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONG, BT_INT, BT_LONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_ULONG, BT_INT, BT_ULONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGLONG, BT_INT, BT_LONGLONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_ULONGLONG, BT_INT, BT_ULONGLONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_INTMAX, BT_INT, BT_INTMAX)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_UINTMAX, BT_INT, BT_UINTMAX)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_PTR, BT_INT, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT, BT_INT, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_DOUBLE, BT_INT, BT_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT16, BT_INT, BT_FLOAT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT32, BT_INT, BT_FLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT64, BT_INT, BT_FLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT128, BT_INT, BT_FLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT32X, BT_INT, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT64X, BT_INT, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT128X, BT_INT, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_DFLOAT32, BT_INT, BT_DFLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_DFLOAT64, BT_INT, BT_DFLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_DFLOAT128, BT_INT, BT_DFLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT, BT_LONG, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_DOUBLE, BT_LONG, BT_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONGDOUBLE, BT_LONG, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT16, BT_LONG, BT_FLOAT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT32, BT_LONG, BT_FLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT64, BT_LONG, BT_FLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT128, BT_LONG, BT_FLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT32X, BT_LONG, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT64X, BT_LONG, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT128X, BT_LONG, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT, BT_LONGLONG, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_DOUBLE, BT_LONGLONG, BT_DOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGDOUBLE, BT_LONGLONG, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT16, BT_LONGLONG, BT_FLOAT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT32, BT_LONGLONG, BT_FLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT64, BT_LONGLONG, BT_FLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT128, BT_LONGLONG, BT_FLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT32X, BT_LONGLONG, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT64X, BT_LONGLONG, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT128X, BT_LONGLONG, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_PTR, BT_VOID, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_SIZE_CONST_STRING, BT_SIZE, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_CONST_STRING, BT_INT, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_PTR_PTR, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VALIST_REF, BT_VOID, BT_VALIST_REF)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_INT, BT_VOID, BT_INT)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_BOOL, BT_VOID, BT_BOOL)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_CONST_STRING, BT_FLOAT, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_CONST_STRING, BT_DOUBLE, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_CONST_STRING,
+ BT_LONGDOUBLE, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_BFLOAT16_CONST_STRING, BT_BFLOAT16, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT16_CONST_STRING, BT_FLOAT16, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32_CONST_STRING, BT_FLOAT32, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64_CONST_STRING, BT_FLOAT64, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128_CONST_STRING, BT_FLOAT128, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT32X_CONST_STRING, BT_FLOAT32X, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT64X_CONST_STRING, BT_FLOAT64X, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT128X_CONST_STRING, BT_FLOAT128X, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT32_CONST_STRING, BT_DFLOAT32, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT64_CONST_STRING, BT_DFLOAT64, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT128_CONST_STRING,
+ BT_DFLOAT128, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_1 (BT_FN_UNWINDWORD_PTR, BT_UNWINDWORD, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_WINT, BT_INT, BT_WINT)
+DEF_FUNCTION_TYPE_1 (BT_FN_WINT_WINT, BT_WINT, BT_WINT)
+DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT32_DFLOAT32, BT_DFLOAT32, BT_DFLOAT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT64_DFLOAT64, BT_DFLOAT64, BT_DFLOAT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_DFLOAT128_DFLOAT128, BT_DFLOAT128, BT_DFLOAT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VPTR, BT_VOID, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_PTRPTR, BT_VOID, BT_PTR_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_VOID_CONST_PTR, BT_VOID, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT_UINT, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT_INT, BT_UINT, BT_INT)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT_ULONG, BT_UINT, BT_ULONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT_LONG, BT_UINT, BT_LONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT_PTR, BT_UINT, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT_CONST_PTR, BT_UINT, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_PTR, BT_ULONG, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_CONST_PTR, BT_ULONG, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_ULONG, BT_ULONG, BT_ULONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_ULONGLONG_ULONGLONG, BT_ULONGLONG, BT_ULONGLONG)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT8_FLOAT, BT_INT8, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT16_FLOAT, BT_INT16, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_FLOAT, BT_UINT32, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_FLOAT, BT_UINT16, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT8_FLOAT, BT_UINT8, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_UINT16, BT_UINT16, BT_UINT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_UINT32, BT_UINT32, BT_UINT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT64_UINT64, BT_UINT64, BT_UINT64)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT128_UINT128, BT_UINT128, BT_UINT128)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT64_FLOAT, BT_UINT64, BT_FLOAT)
+DEF_FUNCTION_TYPE_1 (BT_FN_BOOL_INT, BT_BOOL, BT_INT)
+DEF_FUNCTION_TYPE_1 (BT_FN_BOOL_PTR, BT_BOOL, BT_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_PTR_CONST_PTR, BT_PTR, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_CONST_PTR_CONST_PTR, BT_CONST_PTR, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_UINT32, BT_UINT16, BT_UINT32)
+DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_UINT16, BT_UINT32, BT_UINT16)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_FENV_T_PTR, BT_INT, BT_FENV_T_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_INT_CONST_FENV_T_PTR, BT_INT, BT_CONST_FENV_T_PTR)
+
+DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR, BT_FN_VOID_PTR)
+
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_INT, BT_VOID, BT_PTR, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING,
+ BT_STRING, BT_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_CONST_STRING,
+ BT_INT, BT_CONST_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_CONST_STRING,
+ BT_STRING, BT_CONST_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_CONST_STRING,
+ BT_SIZE, BT_CONST_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_INT,
+ BT_STRING, BT_CONST_STRING, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_SIZE,
+ BT_STRING, BT_CONST_STRING, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_SIZE,
+ BT_SIZE, BT_CONST_STRING, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_FILEPTR,
+ BT_INT, BT_CONST_STRING, BT_FILEPTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_FILEPTR,
+ BT_INT, BT_INT, BT_FILEPTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT16_UINT16_UINT16,
+ BT_UINT16, BT_UINT16, BT_UINT16)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_PTR_INT,
+ BT_INT, BT_PTR, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT_PTR_UINT,
+ BT_UINT, BT_PTR, BT_UINT)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONG_PTR_LONG,
+ BT_LONG, BT_PTR, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_PTR_ULONG,
+ BT_ULONG, BT_PTR, BT_ULONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRMODE_PTR,
+ BT_VOID, BT_PTRMODE, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTRMODE,
+ BT_VOID, BT_PTR, BT_PTRMODE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT8_UINT8,
+ BT_VOID, BT_UINT8, BT_UINT8)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT16_UINT16,
+ BT_VOID, BT_UINT16, BT_UINT16)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT32_UINT32,
+ BT_VOID, BT_UINT32, BT_UINT32)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT64_UINT64,
+ BT_VOID, BT_UINT64, BT_UINT64)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_FLOAT_FLOAT,
+ BT_VOID, BT_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_DOUBLE_DOUBLE,
+ BT_VOID, BT_DOUBLE, BT_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT64_PTR,
+ BT_VOID, BT_UINT64, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VALIST_REF_VALIST_ARG,
+ BT_VOID, BT_VALIST_REF, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONG_LONG_LONG,
+ BT_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT8_UINT8_UINT8,
+ BT_UINT8, BT_UINT8, BT_UINT8)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT8_INT8_INT8,
+ BT_INT8, BT_INT8, BT_INT8)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT16_INT16_INT16,
+ BT_INT16, BT_INT16, BT_INT16)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_INT,
+ BT_INT, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT_FLOAT_UINT,
+ BT_UINT, BT_FLOAT, BT_UINT)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT_DOUBLE_UINT,
+ BT_UINT, BT_DOUBLE, BT_UINT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_UINT_UINT,
+ BT_FLOAT, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_UINT_UINT,
+ BT_ULONG, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_UINT_PTR,
+ BT_ULONG, BT_UINT, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_ULONG_ULONG_ULONG,
+ BT_ULONG, BT_ULONG, BT_ULONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT_UINT_UINT,
+ BT_UINT, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_PTR_CONST_STRING,
+ BT_INT, BT_PTR, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_SIZE,
+ BT_VOID, BT_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_UINT_PTR,
+ BT_VOID, BT_UINT, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOAT,
+ BT_FLOAT, BT_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLE,
+ BT_DOUBLE, BT_DOUBLE, BT_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT16_FLOAT16_FLOAT16,
+ BT_FLOAT16, BT_FLOAT16, BT_FLOAT16)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32_FLOAT32_FLOAT32,
+ BT_FLOAT32, BT_FLOAT32, BT_FLOAT32)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64_FLOAT64_FLOAT64,
+ BT_FLOAT64, BT_FLOAT64, BT_FLOAT64)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128_FLOAT128_FLOAT128,
+ BT_FLOAT128, BT_FLOAT128, BT_FLOAT128)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X,
+ BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X,
+ BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X,
+ BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_2 (BT_FN_BFLOAT16_BFLOAT16_BFLOAT16,
+ BT_BFLOAT16, BT_BFLOAT16, BT_BFLOAT16)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOATPTR,
+ BT_FLOAT, BT_FLOAT, BT_FLOAT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLEPTR,
+ BT_DOUBLE, BT_DOUBLE, BT_DOUBLE_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT16_FLOAT16_FLOAT16PTR,
+ BT_FLOAT16, BT_FLOAT16, BT_FLOAT16_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32_FLOAT32_FLOAT32PTR,
+ BT_FLOAT32, BT_FLOAT32, BT_FLOAT32_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64_FLOAT64_FLOAT64PTR,
+ BT_FLOAT64, BT_FLOAT64, BT_FLOAT64_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128_FLOAT128_FLOAT128PTR,
+ BT_FLOAT128, BT_FLOAT128, BT_FLOAT128_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32X_FLOAT32X_FLOAT32XPTR,
+ BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64X_FLOAT64X_FLOAT64XPTR,
+ BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128X_FLOAT128X_FLOAT128XPTR,
+ BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONGDOUBLE,
+ BT_FLOAT, BT_FLOAT, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONGDOUBLE,
+ BT_DOUBLE, BT_DOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INT,
+ BT_FLOAT, BT_FLOAT, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INT,
+ BT_DOUBLE, BT_DOUBLE, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INT,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT16_FLOAT16_INT,
+ BT_FLOAT16, BT_FLOAT16, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32_FLOAT32_INT,
+ BT_FLOAT32, BT_FLOAT32, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64_FLOAT64_INT,
+ BT_FLOAT64, BT_FLOAT64, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128_FLOAT128_INT,
+ BT_FLOAT128, BT_FLOAT128, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32X_FLOAT32X_INT,
+ BT_FLOAT32X, BT_FLOAT32X, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64X_FLOAT64X_INT,
+ BT_FLOAT64X, BT_FLOAT64X, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128X_FLOAT128X_INT,
+ BT_FLOAT128X, BT_FLOAT128X, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INTPTR,
+ BT_FLOAT, BT_FLOAT, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INTPTR,
+ BT_DOUBLE, BT_DOUBLE, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT16_FLOAT16_INTPTR,
+ BT_FLOAT16, BT_FLOAT16, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32_FLOAT32_INTPTR,
+ BT_FLOAT32, BT_FLOAT32, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64_FLOAT64_INTPTR,
+ BT_FLOAT64, BT_FLOAT64, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128_FLOAT128_INTPTR,
+ BT_FLOAT128, BT_FLOAT128, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32X_FLOAT32X_INTPTR,
+ BT_FLOAT32X, BT_FLOAT32X, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64X_FLOAT64X_INTPTR,
+ BT_FLOAT64X, BT_FLOAT64X, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128X_FLOAT128X_INTPTR,
+ BT_FLOAT128X, BT_FLOAT128X, BT_INT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_INT_FLOAT,
+ BT_FLOAT, BT_INT, BT_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_INT_DOUBLE,
+ BT_DOUBLE, BT_INT, BT_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_INT_LONGDOUBLE,
+ BT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONG,
+ BT_FLOAT, BT_FLOAT, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONG,
+ BT_DOUBLE, BT_DOUBLE, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONG,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT16_FLOAT16_LONG,
+ BT_FLOAT16, BT_FLOAT16, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32_FLOAT32_LONG,
+ BT_FLOAT32, BT_FLOAT32, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64_FLOAT64_LONG,
+ BT_FLOAT64, BT_FLOAT64, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128_FLOAT128_LONG,
+ BT_FLOAT128, BT_FLOAT128, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT32X_FLOAT32X_LONG,
+ BT_FLOAT32X, BT_FLOAT32X, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT64X_FLOAT64X_LONG,
+ BT_FLOAT64X, BT_FLOAT64X, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT128X_FLOAT128X_LONG,
+ BT_FLOAT128X, BT_FLOAT128X, BT_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_2 (BT_FN_PTR_SIZE_SIZE,
+ BT_PTR, BT_SIZE, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_PTR_PTR_SIZE,
+ BT_PTR, BT_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT,
+ BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE,
+ BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE,
+ BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT16_COMPLEX_FLOAT16_COMPLEX_FLOAT16,
+ BT_COMPLEX_FLOAT16, BT_COMPLEX_FLOAT16, BT_COMPLEX_FLOAT16)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT32_COMPLEX_FLOAT32_COMPLEX_FLOAT32,
+ BT_COMPLEX_FLOAT32, BT_COMPLEX_FLOAT32, BT_COMPLEX_FLOAT32)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT64_COMPLEX_FLOAT64_COMPLEX_FLOAT64,
+ BT_COMPLEX_FLOAT64, BT_COMPLEX_FLOAT64, BT_COMPLEX_FLOAT64)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT128_COMPLEX_FLOAT128_COMPLEX_FLOAT128,
+ BT_COMPLEX_FLOAT128, BT_COMPLEX_FLOAT128, BT_COMPLEX_FLOAT128)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT32X_COMPLEX_FLOAT32X_COMPLEX_FLOAT32X,
+ BT_COMPLEX_FLOAT32X, BT_COMPLEX_FLOAT32X, BT_COMPLEX_FLOAT32X)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT64X_COMPLEX_FLOAT64X_COMPLEX_FLOAT64X,
+ BT_COMPLEX_FLOAT64X, BT_COMPLEX_FLOAT64X, BT_COMPLEX_FLOAT64X)
+DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT128X_COMPLEX_FLOAT128X_COMPLEX_FLOAT128X,
+ BT_COMPLEX_FLOAT128X, BT_COMPLEX_FLOAT128X, BT_COMPLEX_FLOAT128X)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING,
+ BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING)
+DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_PTR_INT, BT_SIZE, BT_CONST_PTR, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_I1_VPTR_I1, BT_I1, BT_VOLATILE_PTR, BT_I1)
+DEF_FUNCTION_TYPE_2 (BT_FN_I2_VPTR_I2, BT_I2, BT_VOLATILE_PTR, BT_I2)
+DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_I4, BT_I4, BT_VOLATILE_PTR, BT_I4)
+DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8)
+DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_I16, BT_I16, BT_VOLATILE_PTR, BT_I16)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_LONGPTR_LONGPTR,
+ BT_BOOL, BT_PTR_LONG, BT_PTR_LONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
+ BT_BOOL, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
+DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR,
+ BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_I2_CONST_VPTR_INT, BT_I2, BT_CONST_VOLATILE_PTR,
+ BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_I4_CONST_VPTR_INT, BT_I4, BT_CONST_VOLATILE_PTR,
+ BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_I8_CONST_VPTR_INT, BT_I8, BT_CONST_VOLATILE_PTR,
+ BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_I16_CONST_VPTR_INT, BT_I16, BT_CONST_VOLATILE_PTR,
+ BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_INT, BT_VOID, BT_VOLATILE_PTR, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_VPTR_INT, BT_BOOL, BT_VOLATILE_PTR, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_SIZE_CONST_VPTR, BT_BOOL, BT_SIZE,
+ BT_CONST_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_INT_BOOL, BT_BOOL, BT_INT, BT_BOOL)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT_UINT_PTR, BT_UINT, BT_UINT, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT_UINT_CONST_PTR, BT_UINT, BT_UINT, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_PTR_CONST_PTR_SIZE, BT_PTR, BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_PTR_CONST_PTR_CONST_PTR, BT_PTR, BT_CONST_PTR, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRPTR_CONST_PTR, BT_VOID, BT_PTR_PTR, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_CONST_PTR_SIZE, BT_VOID, BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_2 (BT_FN_CONST_PTR_CONST_PTR_CONST_PTR, BT_CONST_PTR, BT_CONST_PTR, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_UINT32_UINT64_PTR,
+ BT_UINT32, BT_UINT64, BT_PTR)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_FEXCEPT_T_PTR_INT, BT_INT, BT_FEXCEPT_T_PTR,
+ BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_FEXCEPT_T_PTR_INT, BT_INT,
+ BT_CONST_FEXCEPT_T_PTR, BT_INT)
+DEF_FUNCTION_TYPE_2 (BT_FN_PTR_CONST_PTR_UINT8, BT_PTR, BT_CONST_PTR, BT_UINT8)
+
+DEF_POINTER_TYPE (BT_PTR_FN_VOID_PTR_PTR, BT_FN_VOID_PTR_PTR)
+
+DEF_FUNCTION_TYPE_3 (BT_FN_STRING_STRING_CONST_STRING_SIZE,
+ BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_SIZE,
+ BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_PTR_SIZE,
+ BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_CONST_PTR_SIZE,
+ BT_VOID, BT_PTR, BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_PTR_CONST_PTR_SIZE,
+ BT_INT, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_INT_SIZE,
+ BT_PTR, BT_PTR, BT_INT, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_SIZE,
+ BT_VOID, BT_PTR, BT_INT, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_INT,
+ BT_VOID, BT_PTR, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_CONST_PTR_PTR_SIZE,
+ BT_VOID, BT_CONST_PTR, BT_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_STRING_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_STRING, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_FILEPTR, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_INT_UINT_UINT,
+ BT_INT, BT_INT, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_3 (BT_FN_UINT_UINT_UINT_UINT,
+ BT_UINT, BT_UINT, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_3 (BT_FN_UINT_UINT_UINT_PTR,
+ BT_UINT, BT_UINT, BT_UINT, BT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_UINT_ULONG_ULONG_UINT,
+ BT_UINT, BT_ULONG, BT_ULONG, BT_UINT)
+DEF_FUNCTION_TYPE_3 (BT_FN_ULONG_ULONG_ULONG_ULONG,
+ BT_ULONG, BT_ULONG, BT_ULONG, BT_ULONG)
+DEF_FUNCTION_TYPE_3 (BT_FN_LONG_LONG_UINT_UINT,
+ BT_LONG, BT_LONG, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_3 (BT_FN_LONG_LONG_LONG_DOUBLE,
+ BT_LONG, BT_LONG, BT_LONG, BT_DOUBLE)
+DEF_FUNCTION_TYPE_3 (BT_FN_ULONG_ULONG_UINT_UINT,
+ BT_ULONG, BT_ULONG, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_3 (BT_FN_STRING_CONST_STRING_CONST_STRING_INT,
+ BT_STRING, BT_CONST_STRING, BT_CONST_STRING, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_FLOAT,
+ BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE,
+ BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE)
+DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT16_FLOAT16_FLOAT16_FLOAT16,
+ BT_FLOAT16, BT_FLOAT16, BT_FLOAT16, BT_FLOAT16)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT32_FLOAT32_FLOAT32_FLOAT32,
+ BT_FLOAT32, BT_FLOAT32, BT_FLOAT32, BT_FLOAT32)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT64_FLOAT64_FLOAT64_FLOAT64,
+ BT_FLOAT64, BT_FLOAT64, BT_FLOAT64, BT_FLOAT64)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT128_FLOAT128_FLOAT128_FLOAT128,
+ BT_FLOAT128, BT_FLOAT128, BT_FLOAT128, BT_FLOAT128)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X_FLOAT32X,
+ BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X_FLOAT64X,
+ BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X_FLOAT128X,
+ BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_INTPTR,
+ BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR,
+ BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR,
+ BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT16_FLOAT16_FLOAT16_INTPTR,
+ BT_FLOAT16, BT_FLOAT16, BT_FLOAT16, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT32_FLOAT32_FLOAT32_INTPTR,
+ BT_FLOAT32, BT_FLOAT32, BT_FLOAT32, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT64_FLOAT64_FLOAT64_INTPTR,
+ BT_FLOAT64, BT_FLOAT64, BT_FLOAT64, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT128_FLOAT128_FLOAT128_INTPTR,
+ BT_FLOAT128, BT_FLOAT128, BT_FLOAT128, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT32X_FLOAT32X_FLOAT32X_INTPTR,
+ BT_FLOAT32X, BT_FLOAT32X, BT_FLOAT32X, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT64X_FLOAT64X_FLOAT64X_INTPTR,
+ BT_FLOAT64X, BT_FLOAT64X, BT_FLOAT64X, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT128X_FLOAT128X_FLOAT128X_INTPTR,
+ BT_FLOAT128X, BT_FLOAT128X, BT_FLOAT128X, BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR,
+ BT_VOID, BT_FLOAT, BT_FLOAT_PTR, BT_FLOAT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR,
+ BT_VOID, BT_DOUBLE, BT_DOUBLE_PTR, BT_DOUBLE_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR,
+ BT_VOID, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR, BT_LONGDOUBLE_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_PTR, BT_VOID, BT_PTR, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_UINT32, BT_VOID, BT_PTR, BT_PTR, BT_UINT32)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING,
+ BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING, BT_PTR_CONST_STRING)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_INT_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_INT, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I1_I1, BT_BOOL, BT_VOLATILE_PTR,
+ BT_I1, BT_I1)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I2_I2, BT_BOOL, BT_VOLATILE_PTR,
+ BT_I2, BT_I2)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I4_I4, BT_BOOL, BT_VOLATILE_PTR,
+ BT_I4, BT_I4)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I8_I8, BT_BOOL, BT_VOLATILE_PTR,
+ BT_I8, BT_I8)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_VPTR_I16_I16, BT_BOOL, BT_VOLATILE_PTR,
+ BT_I16, BT_I16)
+DEF_FUNCTION_TYPE_3 (BT_FN_I1_VPTR_I1_I1, BT_I1, BT_VOLATILE_PTR, BT_I1, BT_I1)
+DEF_FUNCTION_TYPE_3 (BT_FN_I2_VPTR_I2_I2, BT_I2, BT_VOLATILE_PTR, BT_I2, BT_I2)
+DEF_FUNCTION_TYPE_3 (BT_FN_I4_VPTR_I4_I4, BT_I4, BT_VOLATILE_PTR, BT_I4, BT_I4)
+DEF_FUNCTION_TYPE_3 (BT_FN_I8_VPTR_I8_I8, BT_I8, BT_VOLATILE_PTR, BT_I8, BT_I8)
+DEF_FUNCTION_TYPE_3 (BT_FN_I16_VPTR_I16_I16, BT_I16, BT_VOLATILE_PTR,
+ BT_I16, BT_I16)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_CONST_PTR_INT_SIZE, BT_PTR,
+ BT_CONST_PTR, BT_INT, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_I1_VPTR_I1_INT, BT_I1, BT_VOLATILE_PTR, BT_I1, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_I2_VPTR_I2_INT, BT_I2, BT_VOLATILE_PTR, BT_I2, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_I4_VPTR_I4_INT, BT_I4, BT_VOLATILE_PTR, BT_I4, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_I8_VPTR_I8_INT, BT_I8, BT_VOLATILE_PTR, BT_I8, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_I16_VPTR_I16_INT, BT_I16, BT_VOLATILE_PTR, BT_I16, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I1_INT, BT_VOID, BT_VOLATILE_PTR, BT_I1, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I2_INT, BT_VOID, BT_VOLATILE_PTR, BT_I2, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I4_INT, BT_VOID, BT_VOLATILE_PTR, BT_I4, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I8_INT, BT_VOID, BT_VOLATILE_PTR, BT_I8, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_VPTR_I16_INT, BT_VOID, BT_VOLATILE_PTR, BT_I16, BT_INT)
+DEF_FUNCTION_TYPE_3 (BT_FN_INT_PTRPTR_SIZE_SIZE, BT_INT, BT_PTR_PTR, BT_SIZE, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_CONST_PTR_CONST_PTR_SIZE, BT_PTR, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_INT_INT_INTPTR, BT_BOOL, BT_INT, BT_INT,
+ BT_INT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_LONG_LONG_LONGPTR, BT_BOOL, BT_LONG, BT_LONG,
+ BT_PTR_LONG)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, BT_BOOL,
+ BT_LONGLONG, BT_LONGLONG, BT_PTR_LONGLONG)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_UINT_UINT_UINTPTR, BT_BOOL, BT_UINT, BT_UINT,
+ BT_PTR_UINT)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_ULONG_ULONG_ULONGPTR, BT_BOOL, BT_ULONG,
+ BT_ULONG, BT_PTR_ULONG)
+DEF_FUNCTION_TYPE_3 (BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, BT_BOOL,
+ BT_ULONGLONG, BT_ULONGLONG, BT_PTR_ULONGLONG)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_UINT32_UINT64_PTR,
+ BT_VOID, BT_UINT32, BT_UINT64, BT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_UINT32_UINT32_PTR,
+ BT_VOID, BT_UINT32, BT_UINT32, BT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_SIZE_SIZE_PTR, BT_VOID, BT_SIZE, BT_SIZE,
+ BT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_UINT_UINT_PTR_PTR, BT_UINT, BT_UINT, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_SIZE_BOOL,
+ BT_PTR, BT_PTR, BT_CONST_SIZE, BT_BOOL)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_SIZE_SIZE_PTRMODE,
+ BT_PTR, BT_SIZE, BT_SIZE, BT_PTRMODE)
+DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_UINT8_PTRMODE, BT_VOID, BT_PTR, BT_UINT8,
+ BT_PTRMODE)
+
+DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR,
+ BT_SIZE, BT_CONST_PTR, BT_SIZE, BT_SIZE, BT_FILEPTR)
+DEF_FUNCTION_TYPE_4 (BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_TM_PTR,
+ BT_SIZE, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_CONST_TM_PTR)
+DEF_FUNCTION_TYPE_4 (BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE,
+ BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE, BT_SIZE)
+DEF_FUNCTION_TYPE_4 (BT_FN_PTR_PTR_INT_SIZE_SIZE,
+ BT_PTR, BT_PTR, BT_INT, BT_SIZE, BT_SIZE)
+DEF_FUNCTION_TYPE_4 (BT_FN_UINT_UINT_UINT_UINT_UINT,
+ BT_UINT, BT_UINT, BT_UINT, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_4 (BT_FN_UINT_FLOAT_FLOAT_FLOAT_FLOAT,
+ BT_UINT, BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT)
+DEF_FUNCTION_TYPE_4 (BT_FN_ULONG_ULONG_ULONG_UINT_UINT,
+ BT_ULONG, BT_ULONG, BT_ULONG, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_4 (BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE,
+ BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE, BT_SIZE)
+DEF_FUNCTION_TYPE_4 (BT_FN_INT_FILEPTR_INT_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_FILEPTR, BT_INT, BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_4 (BT_FN_VOID_OMPFN_PTR_UINT_UINT,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_4 (BT_FN_UINT_OMPFN_PTR_UINT_UINT,
+ BT_UINT, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, BT_UINT)
+DEF_FUNCTION_TYPE_4 (BT_FN_VOID_PTR_WORD_WORD_PTR,
+ BT_VOID, BT_PTR, BT_WORD, BT_WORD, BT_PTR)
+DEF_FUNCTION_TYPE_4 (BT_FN_VOID_SIZE_VPTR_PTR_INT, BT_VOID, BT_SIZE,
+ BT_VOLATILE_PTR, BT_PTR, BT_INT)
+DEF_FUNCTION_TYPE_4 (BT_FN_VOID_SIZE_CONST_VPTR_PTR_INT, BT_VOID, BT_SIZE,
+ BT_CONST_VOLATILE_PTR, BT_PTR, BT_INT)
+DEF_FUNCTION_TYPE_4 (BT_FN_BOOL_UINT_LONGPTR_LONGPTR_LONGPTR,
+ BT_BOOL, BT_UINT, BT_PTR_LONG, BT_PTR_LONG, BT_PTR_LONG)
+DEF_FUNCTION_TYPE_4 (BT_FN_BOOL_UINT_ULLPTR_ULLPTR_ULLPTR,
+ BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG,
+ BT_PTR_ULONGLONG)
+DEF_FUNCTION_TYPE_4 (BT_FN_VOID_UINT_PTR_INT_PTR, BT_VOID, BT_INT, BT_PTR,
+ BT_INT, BT_PTR)
+DEF_FUNCTION_TYPE_4 (BT_FN_BOOL_UINT_UINT_UINT_BOOL,
+ BT_BOOL, BT_UINT, BT_UINT, BT_UINT, BT_BOOL)
+
+DEF_FUNCTION_TYPE_5 (BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_STRING, BT_INT, BT_SIZE, BT_CONST_STRING,
+ BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ BT_BOOL, BT_LONG, BT_LONG, BT_LONG,
+ BT_PTR_LONG, BT_PTR_LONG)
+DEF_FUNCTION_TYPE_5 (BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, BT_VOID, BT_SIZE,
+ BT_VOLATILE_PTR, BT_PTR, BT_PTR, BT_INT)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I1_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I1, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I2_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I2, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I4_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I4, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I8_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I8, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_VPTR_PTR_I16_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I16, BT_INT, BT_INT)
+DEF_FUNCTION_TYPE_5 (BT_FN_VOID_INT_SIZE_PTR_PTR_PTR,
+ BT_VOID, BT_INT, BT_SIZE, BT_PTR, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_5 (BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT, BT_UINT,
+ BT_UINT)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR,
+ BT_BOOL, BT_UINT, BT_PTR_LONG, BT_LONG, BT_PTR_LONG,
+ BT_PTR_LONG)
+DEF_FUNCTION_TYPE_5 (BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_ULONGLONG,
+ BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
+
+DEF_FUNCTION_TYPE_6 (BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VALIST_ARG,
+ BT_INT, BT_STRING, BT_SIZE, BT_INT, BT_SIZE,
+ BT_CONST_STRING, BT_VALIST_ARG)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ BT_BOOL, BT_LONG, BT_LONG, BT_LONG, BT_LONG,
+ BT_PTR_LONG, BT_PTR_LONG)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I1, BT_BOOL, BT_INT,
+ BT_INT)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I2, BT_BOOL, BT_INT,
+ BT_INT)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I4, BT_BOOL, BT_INT,
+ BT_INT)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I8, BT_BOOL, BT_INT,
+ BT_INT)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT,
+ BT_BOOL, BT_VOLATILE_PTR, BT_PTR, BT_I16, BT_BOOL, BT_INT,
+ BT_INT)
+DEF_FUNCTION_TYPE_6 (BT_FN_BOOL_SIZE_VPTR_PTR_PTR_INT_INT, BT_BOOL, BT_SIZE,
+ BT_VOLATILE_PTR, BT_PTR, BT_PTR, BT_INT, BT_INT)
+
+DEF_FUNCTION_TYPE_7 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT,
+ BT_LONG, BT_LONG, BT_LONG, BT_UINT)
+DEF_FUNCTION_TYPE_7 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_ULONGLONG,
+ BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
+DEF_FUNCTION_TYPE_7 (BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_UINT_PTR,
+ BT_VOID, BT_INT, BT_SIZE, BT_PTR, BT_PTR, BT_PTR, BT_UINT,
+ BT_PTR)
+
+DEF_FUNCTION_TYPE_8 (BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR, BT_UINT,
+ BT_LONG, BT_LONG, BT_LONG, BT_LONG, BT_UINT)
+DEF_FUNCTION_TYPE_8 (BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
+ BT_BOOL, BT_UINT, BT_PTR_LONG, BT_LONG, BT_LONG,
+ BT_PTR_LONG, BT_PTR_LONG, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_8 (BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
+ BT_BOOL, BT_UINT, BT_PTR_ULONGLONG, BT_LONG, BT_ULONGLONG,
+ BT_PTR_ULONGLONG, BT_PTR_ULONGLONG, BT_PTR, BT_PTR)
+
+DEF_FUNCTION_TYPE_9 (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR,
+ BT_VOID, BT_INT, BT_PTR_FN_VOID_PTR, BT_SIZE, BT_PTR,
+ BT_PTR, BT_PTR, BT_UINT, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_9 (BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
+ BT_BOOL, BT_LONG, BT_LONG, BT_LONG, BT_LONG, BT_LONG,
+ BT_PTR_LONG, BT_PTR_LONG, BT_PTR, BT_PTR)
+
+DEF_FUNCTION_TYPE_10 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT_PTR,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_BOOL, BT_UINT, BT_PTR, BT_INT, BT_PTR)
+DEF_FUNCTION_TYPE_10 (BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
+ BT_BOOL, BT_BOOL, BT_ULONGLONG, BT_ULONGLONG,
+ BT_ULONGLONG, BT_LONG, BT_ULONGLONG, BT_PTR_ULONGLONG,
+ BT_PTR_ULONGLONG, BT_PTR, BT_PTR)
+
+DEF_FUNCTION_TYPE_11 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_LONG_LONG_LONG,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_UINT, BT_LONG, BT_INT, BT_LONG, BT_LONG, BT_LONG)
+DEF_FUNCTION_TYPE_11 (BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_ULL_ULL_ULL,
+ BT_VOID, BT_PTR_FN_VOID_PTR, BT_PTR,
+ BT_PTR_FN_VOID_PTR_PTR, BT_LONG, BT_LONG,
+ BT_UINT, BT_LONG, BT_INT,
+ BT_ULONGLONG, BT_ULONGLONG, BT_ULONGLONG)
+
+DEF_FUNCTION_TYPE_VAR_0 (BT_FN_VOID_VAR, BT_VOID)
+DEF_FUNCTION_TYPE_VAR_0 (BT_FN_INT_VAR, BT_INT)
+DEF_FUNCTION_TYPE_VAR_0 (BT_FN_PTR_VAR, BT_PTR)
+DEF_FUNCTION_TYPE_VAR_0 (BT_FN_BOOL_VAR, BT_BOOL)
+
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_VALIST_REF_VAR,
+ BT_VOID, BT_VALIST_REF)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_CONST_PTR_VAR,
+ BT_VOID, BT_CONST_PTR)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_INT_CONST_STRING_VAR,
+ BT_INT, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_UINT32_UINT32_VAR,
+ BT_UINT32, BT_UINT32)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_LONG_VAR,
+ BT_VOID, BT_LONG)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_ULL_VAR,
+ BT_VOID, BT_ULONGLONG)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_PTR_PTR_VAR, BT_PTR, BT_PTR)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I1_I1_VAR, BT_I1, BT_I1)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I2_I2_VAR, BT_I2, BT_I2)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I4_I4_VAR, BT_I4, BT_I4)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I8_I8_VAR, BT_I8, BT_I8)
+DEF_FUNCTION_TYPE_VAR_1 (BT_FN_I16_I16_VAR, BT_I16, BT_I16)
+
+DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_FILEPTR_CONST_STRING_VAR,
+ BT_INT, BT_FILEPTR, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_STRING_CONST_STRING_VAR,
+ BT_INT, BT_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_CONST_STRING_CONST_STRING_VAR,
+ BT_INT, BT_CONST_STRING, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_INT_CONST_STRING_VAR,
+ BT_INT, BT_INT, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_2 (BT_FN_PTR_CONST_PTR_SIZE_VAR, BT_PTR,
+ BT_CONST_PTR, BT_SIZE)
+DEF_FUNCTION_TYPE_VAR_2 (BT_FN_VOID_INT_INT_VAR, BT_VOID,
+ BT_INT, BT_INT)
+
+DEF_FUNCTION_TYPE_VAR_3 (BT_FN_INT_STRING_SIZE_CONST_STRING_VAR,
+ BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_3 (BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR,
+ BT_SSIZE, BT_STRING, BT_SIZE, BT_CONST_STRING)
+DEF_FUNCTION_TYPE_VAR_3 (BT_FN_INT_FILEPTR_INT_CONST_STRING_VAR,
+ BT_INT, BT_FILEPTR, BT_INT, BT_CONST_STRING)
+
+DEF_FUNCTION_TYPE_VAR_4 (BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VAR,
+ BT_INT, BT_STRING, BT_INT, BT_SIZE, BT_CONST_STRING)
+
+DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VAR,
+ BT_INT, BT_STRING, BT_SIZE, BT_INT, BT_SIZE,
+ BT_CONST_STRING)
+
+DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_INT_INT_INT_INT_INT_VAR,
+ BT_INT, BT_INT, BT_INT, BT_INT, BT_INT, BT_INT)
+
+DEF_FUNCTION_TYPE_VAR_6 (BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_VAR,
+ BT_VOID, BT_INT, BT_PTR_FN_VOID_PTR, BT_SIZE,
+ BT_PTR, BT_PTR, BT_PTR)
+
+DEF_FUNCTION_TYPE_VAR_7 (BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_INT_INT_VAR,
+ BT_VOID, BT_INT, BT_SIZE, BT_PTR, BT_PTR,
+ BT_PTR, BT_INT, BT_INT)
+
+DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR)
+DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE,
+ BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE)
+
+
+DEF_FUNCTION_TYPE_1 (BT_FN_I1_VPTR, BT_I1, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I2_VPTR, BT_I2, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I4_VPTR, BT_I4, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I8_VPTR, BT_I8, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_VPTR, BT_FLOAT, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_CONST_DOUBLE_PTR, BT_DOUBLE, BT_DOUBLE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_LDOUBLE_VPTR, BT_LONGDOUBLE, BT_VOLATILE_PTR)
+
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I1, BT_VOID, BT_VOLATILE_PTR, BT_I1)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I2, BT_VOID, BT_VOLATILE_PTR, BT_I2)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I4, BT_VOID, BT_VOLATILE_PTR, BT_I4)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I8, BT_VOID, BT_VOLATILE_PTR, BT_I8)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_FLOAT, BT_VOID, BT_VOLATILE_PTR, BT_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_DOUBLE, BT_VOID,
+ BT_VOLATILE_PTR, BT_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_LDOUBLE, BT_VOID,
+ BT_VOLATILE_PTR, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_SIZE, BT_VOID,
+ BT_VOLATILE_PTR, BT_SIZE)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.def
new file mode 100644
index 0000000..4ad95a1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.def
@@ -0,0 +1,1190 @@
+/* This file contains the definitions and documentation for the
+ builtins used in the GNU compiler.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, you should define a macro:
+
+ DEF_BUILTIN (ENUM, NAME, CLASS, TYPE, LIBTYPE, BOTH_P,
+ FALLBACK_P, NONANSI_P, ATTRS, IMPLICIT, COND)
+
+ This macro will be called once for each builtin function. The
+ ENUM will be of type `enum built_in_function', and will indicate
+ which builtin function is being processed. The NAME of the builtin
+ function (which will always start with `__builtin_') is a string
+ literal. The CLASS is of type `enum built_in_class' and indicates
+ what kind of builtin is being processed.
+
+ Some builtins are actually two separate functions. For example,
+ for `strcmp' there are two builtin functions; `__builtin_strcmp'
+ and `strcmp' itself. Both behave identically. Other builtins
+ define only the `__builtin' variant. If BOTH_P is TRUE, then this
+ builtin has both variants; otherwise, it is has only the first
+ variant.
+
+ TYPE indicates the type of the function. The symbols correspond to
+ enumerals from builtin-types.def. If BOTH_P is true, then LIBTYPE
+ is the type of the non-`__builtin_' variant. Otherwise, LIBTYPE
+ should be ignored.
+
+ If FALLBACK_P is true then, if for some reason, the compiler cannot
+ expand the builtin function directly, it will call the
+ corresponding library function (which does not have the
+ `__builtin_' prefix.
+
+ If NONANSI_P is true, then the non-`__builtin_' variant is not an
+ ANSI/ISO library function, and so we should pretend it does not
+ exist when compiling in ANSI conformant mode.
+
+ ATTRs is an attribute list as defined in builtin-attrs.def that
+ describes the attributes of this builtin function.
+
+ IMPLICIT specifies condition when the builtin can be produced by
+ compiler. For instance C90 reserves floorf function, but does not
+ define it's meaning. When user uses floorf we may assume that the
+ floorf has the meaning we expect, but we can't produce floorf by
+ simplifying floor((double)float) since the runtime need not implement
+ it.
+
+ The builtins is registered only if COND is true. */
+
+/* A GCC builtin (like __builtin_saveregs) is provided by the
+ compiler, but does not correspond to a function in the standard
+ library. */
+#undef DEF_GCC_BUILTIN
+#define DEF_GCC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \
+ false, false, false, ATTRS, true, true)
+
+/* Like DEF_GCC_BUILTIN, except we don't prepend "__builtin_". */
+#undef DEF_SYNC_BUILTIN
+#define DEF_SYNC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \
+ false, false, false, ATTRS, true, true)
+
+/* A set of GCC builtins for _FloatN and _FloatNx types. TYPE_MACRO
+ is called with an argument such as FLOAT32 to produce the enum
+ value for the type. */
+#undef DEF_GCC_FLOATN_NX_BUILTINS
+#define DEF_GCC_FLOATN_NX_BUILTINS(ENUM, NAME, TYPE_MACRO, ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F16, NAME "f16", TYPE_MACRO (FLOAT16), ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F32, NAME "f32", TYPE_MACRO (FLOAT32), ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F64, NAME "f64", TYPE_MACRO (FLOAT64), ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F128, NAME "f128", TYPE_MACRO (FLOAT128), ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F32X, NAME "f32x", TYPE_MACRO (FLOAT32X), ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F64X, NAME "f64x", TYPE_MACRO (FLOAT64X), ATTRS) \
+ DEF_GCC_BUILTIN (ENUM ## F128X, NAME "f128x", TYPE_MACRO (FLOAT128X), ATTRS)
+
+/* A library builtin (like __builtin_strchr) is a builtin equivalent
+ of an ANSI/ISO standard library function. In addition to the
+ `__builtin' version, we will create an ordinary version (e.g,
+ `strchr') as well. If we cannot compute the answer using the
+ builtin function, we will fall back to the standard library
+ version. */
+#undef DEF_LIB_BUILTIN
+#define DEF_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, false, ATTRS, true, true)
+
+/* Like DEF_LIB_BUILTIN, except that the function is not one that is
+ specified by ANSI/ISO C. So, when we're being fully conformant we
+ ignore the version of these builtins that does not begin with
+ __builtin. */
+#undef DEF_EXT_LIB_BUILTIN
+#define DEF_EXT_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, true, ATTRS, false, true)
+
+/* A set of GCC builtins for _FloatN and _FloatNx types. TYPE_MACRO is called
+ with an argument such as FLOAT32 to produce the enum value for the type. If
+ we are compiling for the C language with GNU extensions, we enable the name
+ without the __builtin_ prefix as well as the name with the __builtin_
+ prefix. C++ does not enable these names by default because a class based
+ library should use the __builtin_ names. */
+#undef DEF_FLOATN_BUILTIN
+#define DEF_FLOATN_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ targetm.floatn_builtin_p ((int) ENUM), true, true, ATTRS, \
+ false, true)
+#undef DEF_EXT_LIB_FLOATN_NX_BUILTINS
+#define DEF_EXT_LIB_FLOATN_NX_BUILTINS(ENUM, NAME, TYPE_MACRO, ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F16, NAME "f16", TYPE_MACRO (FLOAT16), ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F32, NAME "f32", TYPE_MACRO (FLOAT32), ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F64, NAME "f64", TYPE_MACRO (FLOAT64), ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F128, NAME "f128", TYPE_MACRO (FLOAT128), ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F32X, NAME "f32x", TYPE_MACRO (FLOAT32X), ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F64X, NAME "f64x", TYPE_MACRO (FLOAT64X), ATTRS) \
+ DEF_FLOATN_BUILTIN (ENUM ## F128X, NAME "f128x", TYPE_MACRO (FLOAT128X), \
+ ATTRS)
+
+/* Like DEF_LIB_BUILTIN, except that the function is only a part of
+ the standard in C94 or above. */
+#undef DEF_C94_BUILTIN
+#define DEF_C94_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc94, ATTRS, \
+ targetm.libc_has_function (function_c94, NULL_TREE), true)
+
+/* Like DEF_LIB_BUILTIN, except that the function is only a part of
+ the standard in C99 or above. */
+#undef DEF_C99_BUILTIN
+#define DEF_C99_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc99, ATTRS, \
+ targetm.libc_has_function (function_c99_misc, NULL_TREE), true)
+
+/* Like DEF_LIB_BUILTIN, except that the function is only a part of
+ the standard in C11 or above. */
+#undef DEF_C11_BUILTIN
+#define DEF_C11_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc11, ATTRS, \
+ targetm.libc_has_function (function_c11_misc, NULL_TREE), true)
+
+/* Like DEF_LIB_BUILTIN, except that the function is only a part of
+ the standard in C2x or above. */
+#undef DEF_C2X_BUILTIN
+#define DEF_C2X_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc2x, ATTRS, \
+ targetm.libc_has_function (function_c2x_misc, NULL_TREE), true)
+
+/* Like DEF_C99_BUILTIN, but for complex math functions. */
+#undef DEF_C99_COMPL_BUILTIN
+#define DEF_C99_COMPL_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc99, ATTRS, \
+ targetm.libc_has_function (function_c99_math_complex, \
+ NULL_TREE), \
+ true)
+
+/* Builtin that is specified by C99 and C90 reserve the name for future use.
+ We can still recognize the builtin in C90 mode but we can't produce it
+ implicitly. */
+#undef DEF_C99_C90RES_BUILTIN
+#define DEF_C99_C90RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, !flag_isoc99, ATTRS, \
+ targetm.libc_has_function (function_c99_misc, NULL_TREE), true)
+
+/* Builtin that C99 reserve the name for future use. We can still recognize
+ the builtin in C99 mode but we can't produce it implicitly. */
+#undef DEF_EXT_C99RES_BUILTIN
+#define DEF_EXT_C99RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, true, ATTRS, false, true)
+
+/* Allocate the enum and the name for a builtin, but do not actually
+ define it here at all. */
+#undef DEF_BUILTIN_STUB
+#define DEF_BUILTIN_STUB(ENUM, NAME) \
+ DEF_BUILTIN (ENUM, NAME, BUILT_IN_NORMAL, BT_LAST, BT_LAST, false, false, \
+ false, ATTR_LAST, false, false)
+
+/* Builtins used in implementing coroutine support. */
+#undef DEF_COROUTINE_BUILTIN
+#define DEF_COROUTINE_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_coro_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, true, ATTRS, true, flag_coroutines)
+
+/* Builtin used by the implementation of OpenACC and OpenMP. Few of these are
+ actually implemented in the compiler; most are in libgomp. */
+#undef DEF_GOACC_BUILTIN
+#define DEF_GOACC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ false, true, true, ATTRS, false, \
+ flag_openacc)
+#undef DEF_GOACC_BUILTIN_COMPILER
+#define DEF_GOACC_BUILTIN_COMPILER(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ flag_openacc, true, true, ATTRS, false, true)
+#undef DEF_GOACC_BUILTIN_ONLY
+#define DEF_GOACC_BUILTIN_ONLY(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \
+ false, false, true, ATTRS, false, flag_openacc)
+#undef DEF_GOMP_BUILTIN
+#define DEF_GOMP_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ false, true, true, ATTRS, false, \
+ (flag_openacc \
+ || flag_openmp \
+ || flag_tree_parallelize_loops > 1))
+
+/* Builtin used by the implementation of GNU TM. These
+ functions are mapped to the actual implementation of the STM library. */
+#undef DEF_TM_BUILTIN
+#define DEF_TM_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \
+ false, true, true, ATTRS, false, flag_tm)
+
+/* Builtin used by the implementation of libsanitizer. These
+ functions are mapped to the actual implementation of the
+ libtsan library. */
+#undef DEF_SANITIZER_BUILTIN
+#define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ true, true, true, ATTRS, true, \
+ (flag_sanitize & (SANITIZE_ADDRESS | SANITIZE_THREAD \
+ | SANITIZE_HWADDRESS \
+ | SANITIZE_UNDEFINED \
+ | SANITIZE_UNDEFINED_NONDEFAULT) \
+ || flag_sanitize_coverage))
+
+/* Define an attribute list for math functions that are normally
+ "impure" because some of them may write into global memory for
+ `errno'. If !flag_errno_math they are instead "const". */
+#undef ATTR_MATHFN_ERRNO
+#define ATTR_MATHFN_ERRNO (flag_errno_math ? \
+ ATTR_ERRNOCONST_NOTHROW_LEAF_LIST : ATTR_CONST_NOTHROW_LEAF_LIST)
+
+/* Define an attribute list for math functions that are normally
+ "const" but if flag_rounding_math is set they are instead "pure".
+ This distinction accounts for the fact that some math functions
+ check the rounding mode which is akin to examining global
+ memory. */
+#undef ATTR_MATHFN_FPROUNDING
+#define ATTR_MATHFN_FPROUNDING (flag_rounding_math ? \
+ ATTR_PURE_NOTHROW_LEAF_LIST : ATTR_CONST_NOTHROW_LEAF_LIST)
+
+/* Define an attribute list for math functions that are normally
+ "impure" because some of them may write into global memory for
+ `errno'. If !flag_errno_math, we can possibly use "pure" or
+ "const" depending on whether we care about FP rounding. */
+#undef ATTR_MATHFN_FPROUNDING_ERRNO
+#define ATTR_MATHFN_FPROUNDING_ERRNO (flag_errno_math ? \
+ (flag_rounding_math ? ATTR_ERRNOPURE_NOTHROW_LEAF_LIST \
+ : ATTR_ERRNOCONST_NOTHROW_LEAF_LIST) : ATTR_MATHFN_FPROUNDING)
+
+/* Define an attribute list for math functions that need to mind FP
+ rounding, but because they store into memory they are never "const"
+ or "pure". Use of this macro is mainly for documentation and
+ maintenance purposes. */
+#undef ATTR_MATHFN_FPROUNDING_STORE
+#define ATTR_MATHFN_FPROUNDING_STORE ATTR_NOTHROW_LEAF_LIST
+
+/* Define an attribute list for leaf functions that do not throw
+ exceptions normally, but may throw exceptions when using
+ -fnon-call-exceptions. */
+#define ATTR_NOTHROWCALL_LEAF_LIST (flag_non_call_exceptions ? \
+ ATTR_LEAF_LIST : ATTR_NOTHROW_LEAF_LIST)
+
+/* Make sure 0 is not a legitimate builtin. */
+DEF_BUILTIN_STUB(BUILT_IN_NONE, (const char *)0)
+
+/* Category: math builtins. */
+DEF_LIB_BUILTIN (BUILT_IN_ACOS, "acos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSF, "acosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ACOSH, "acosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ACOSHF, "acoshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ACOSHL, "acoshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define ACOSH_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ACOSH, "acosh", ACOSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSL, "acosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ACOS, "acos", ACOSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C11_BUILTIN (BUILT_IN_ALIGNED_ALLOC, "aligned_alloc", BT_FN_PTR_SIZE_SIZE, ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ASIN, "asin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINF, "asinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ASINH, "asinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_ASINHF, "asinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_ASINHL, "asinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ASINH, "asinh", ACOSH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINL, "asinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ASIN, "asin", ACOSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef ACOSH_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_ATAN, "atan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_LIB_BUILTIN (BUILT_IN_ATAN2, "atan2", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2F, "atan2f", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2L, "atan2l", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define ATAN2_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ATAN2, "atan2", ATAN2_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef ATAN2_TYPE
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANF, "atanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_ATANH, "atanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ATANHF, "atanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ATANHL, "atanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define ATANH_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ATANH, "atanh", ATANH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANL, "atanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ATAN, "atan", ATANH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_CBRT, "cbrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_CBRTF, "cbrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_CBRTL, "cbrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CBRT, "cbrt", ATANH_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef ATANH_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_CEIL, "ceil", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILF, "ceilf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILL, "ceill", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define CEIL_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CEIL, "ceil", CEIL_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef CEIL_TYPE
+DEF_C99_BUILTIN (BUILT_IN_COPYSIGN, "copysign", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_COPYSIGNF, "copysignf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_COPYSIGNL, "copysignl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define COPYSIGN_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_COPYSIGN, "copysign", COPYSIGN_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef COPYSIGN_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_COS, "cos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_COSF, "cosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_LIB_BUILTIN (BUILT_IN_COSH, "cosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHF, "coshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHL, "coshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define COSH_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_COSH, "cosh", COSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_COSL, "cosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_COS, "cos", COSH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_DREM, "drem", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_DREMF, "dremf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_DREML, "dreml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ERF, "erf", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_ERFC, "erfc", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ERFCF, "erfcf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ERFCL, "erfcl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ERFC, "erfc", COSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ERFF, "erff", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_ERFL, "erfl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ERF, "erf", COSH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_LIB_BUILTIN (BUILT_IN_EXP, "exp", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C2X_BUILTIN (BUILT_IN_EXP10, "exp10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C2X_BUILTIN (BUILT_IN_EXP10F, "exp10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C2X_BUILTIN (BUILT_IN_EXP10L, "exp10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_EXP2, "exp2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_EXP2F, "exp2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_EXP2L, "exp2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_EXP2, "exp2", COSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPF, "expf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPL, "expl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_EXP, "exp", COSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_EXPM1, "expm1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_EXPM1, "expm1", COSH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef COSH_TYPE
+DEF_C99_BUILTIN (BUILT_IN_EXPM1F, "expm1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_EXPM1L, "expm1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_LIB_BUILTIN (BUILT_IN_FABS, "fabs", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSF, "fabsf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSL, "fabsl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define FABS_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FABS, "fabs", FABS_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef FABS_TYPE
+DEF_C2X_BUILTIN (BUILT_IN_FABSD32, "fabsd32", BT_FN_DFLOAT32_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C2X_BUILTIN (BUILT_IN_FABSD64, "fabsd64", BT_FN_DFLOAT64_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C2X_BUILTIN (BUILT_IN_FABSD128, "fabsd128", BT_FN_DFLOAT128_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FDIM, "fdim", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_FDIMF, "fdimf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_FDIML, "fdiml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define FDIM_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FDIM, "fdim", FDIM_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef FDIM_TYPE
+DEF_C99_BUILTIN (BUILT_IN_FECLEAREXCEPT, "feclearexcept", BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FEGETENV, "fegetenv", BT_FN_INT_FENV_T_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FEGETEXCEPTFLAG, "fegetexceptflag", BT_FN_INT_FEXCEPT_T_PTR_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FEGETROUND, "fegetround", BT_FN_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FEHOLDEXCEPT, "feholdexcept", BT_FN_INT_FENV_T_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FERAISEEXCEPT, "feraiseexcept", BT_FN_INT_INT, ATTR_NULL)
+DEF_C99_BUILTIN (BUILT_IN_FESETENV, "fesetenv", BT_FN_INT_CONST_FENV_T_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FESETEXCEPTFLAG, "fesetexceptflag", BT_FN_INT_CONST_FEXCEPT_T_PTR_INT, ATTR_NULL)
+DEF_C99_BUILTIN (BUILT_IN_FESETROUND, "fesetround", BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FETESTEXCEPT, "fetestexcept", BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FEUPDATEENV, "feupdateenv", BT_FN_INT_CONST_FENV_T_PTR, ATTR_NULL)
+DEF_LIB_BUILTIN (BUILT_IN_FLOOR, "floor", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORF, "floorf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORL, "floorl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define FLOOR_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FLOOR, "floor", FLOOR_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef FLOOR_TYPE
+DEF_C99_BUILTIN (BUILT_IN_FMA, "fma", BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_FMAF, "fmaf", BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_FMAL, "fmal", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define FMA_TYPE(F) BT_FN_##F##_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMA, "fma", FMA_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef FMA_TYPE
+DEF_C99_BUILTIN (BUILT_IN_FMAX, "fmax", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FMAXF, "fmaxf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FMAXL, "fmaxl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define FMAX_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMAX, "fmax", FMAX_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef FMAX_TYPE
+DEF_C99_BUILTIN (BUILT_IN_FMIN, "fmin", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FMINF, "fminf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_FMINL, "fminl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define FMIN_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMIN, "fmin", FMIN_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_FMOD, "fmod", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODF, "fmodf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODL, "fmodl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FMOD, "fmod", FMIN_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef FMIN_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_FREXP, "frexp", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPF, "frexpf", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPL, "frexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+#define FREXP_TYPE(F) BT_FN_##F##_##F##_INTPTR
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_FREXP, "frexp", FREXP_TYPE, ATTR_MATHFN_FPROUNDING_STORE)
+#undef FREXP_TYPE
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA, "gamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF, "gammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL, "gammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA_R, "gamma_r", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF_R, "gammaf_r", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL_R, "gammal_r", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_GCC_BUILTIN (BUILT_IN_HUGE_VAL, "huge_val", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALF, "huge_valf", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALL, "huge_vall", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define HUGE_VAL_TYPE(F) BT_FN_##F
+DEF_GCC_FLOATN_NX_BUILTINS (BUILT_IN_HUGE_VAL, "huge_val", HUGE_VAL_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef HUGE_VAL_TYPE
+DEF_C99_BUILTIN (BUILT_IN_HYPOT, "hypot", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_HYPOTF, "hypotf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_HYPOTL, "hypotl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define HYPOT_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_HYPOT, "hypot", HYPOT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef HYPOT_TYPE
+DEF_GCC_BUILTIN (BUILT_IN_ICEIL, "iceil", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_ICEILF, "iceilf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_ICEILL, "iceill", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_IFLOOR, "ifloor", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_IFLOORF, "ifloorf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_IFLOORL, "ifloorl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_ILOGB, "ilogb", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ILOGBF, "ilogbf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_ILOGBL, "ilogbl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define ILOGB_TYPE(F) BT_FN_INT_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ILOGB, "ilogb", ILOGB_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef ILOGB_TYPE
+DEF_GCC_BUILTIN (BUILT_IN_INF, "inf", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_INFF, "inff", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_INFL, "infl", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define INF_TYPE(F) BT_FN_##F
+DEF_GCC_FLOATN_NX_BUILTINS (BUILT_IN_INF, "inf", INF_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef INF_TYPE
+DEF_GCC_BUILTIN (BUILT_IN_INFD32, "infd32", BT_FN_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_INFD64, "infd64", BT_FN_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_INFD128, "infd128", BT_FN_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_IRINT, "irint", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_IRINTF, "irintf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_IRINTL, "irintl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_IROUND, "iround", BT_FN_INT_DOUBLE, ATTR_MATHFN_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_IROUNDF, "iroundf", BT_FN_INT_FLOAT, ATTR_MATHFN_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_IROUNDL, "iroundl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_J0, "j0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_J0F, "j0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_J0L, "j0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_J1, "j1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_J1F, "j1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_J1L, "j1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_JN, "jn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_JNF, "jnf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_JNL, "jnl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_LCEIL, "lceil", BT_FN_LONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LCEILF, "lceilf", BT_FN_LONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LCEILL, "lceill", BT_FN_LONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_LDEXP, "ldexp", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPF, "ldexpf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPL, "ldexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define LDEXP_TYPE(F) BT_FN_##F##_##F##_INT
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LDEXP, "ldexp", LDEXP_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef LDEXP_TYPE
+DEF_GCC_BUILTIN (BUILT_IN_LFLOOR, "lfloor", BT_FN_LONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LFLOORF, "lfloorf", BT_FN_LONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LFLOORL, "lfloorl", BT_FN_LONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_LGAMMA, "lgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_BUILTIN (BUILT_IN_LGAMMAF, "lgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_BUILTIN (BUILT_IN_LGAMMAL, "lgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_STORE)
+#define LGAMMA_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LGAMMA, "lgamma", LGAMMA_TYPE, ATTR_MATHFN_FPROUNDING_STORE)
+#undef LGAMMA_TYPE
+DEF_EXT_LIB_BUILTIN (BUILT_IN_LGAMMA_R, "lgamma_r", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_LGAMMAF_R, "lgammaf_r", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_LGAMMAL_R, "lgammal_r", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_GCC_BUILTIN (BUILT_IN_LLCEIL, "llceil", BT_FN_LONGLONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LLCEILF, "llceilf", BT_FN_LONGLONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LLCEILL, "llceill", BT_FN_LONGLONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LLFLOOR, "llfloor", BT_FN_LONGLONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LLFLOORF, "llfloorf", BT_FN_LONGLONG_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LLFLOORL, "llfloorl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_LLRINT, "llrint", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LLRINTF, "llrintf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LLRINTL, "llrintl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define LLRINT_TYPE(F) BT_FN_LONGLONG_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LLRINT, "llrint", LLRINT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LLROUND, "llround", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LLROUNDF, "llroundf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LLROUNDL, "llroundl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LLROUND, "llround", LLRINT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef LLRINT_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_LOG, "log", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_LIB_BUILTIN (BUILT_IN_LOG10, "log10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10F, "log10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10L, "log10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define LOG10_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LOG10, "log10", LOG10_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOG1P, "log1p", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOG1PF, "log1pf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOG1PL, "log1pl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LOG1P, "log1p", LOG10_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOG2, "log2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOG2F, "log2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOG2L, "log2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LOG2, "log2", LOG10_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOGB, "logb", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOGBF, "logbf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LOGBL, "logbl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LOGB, "logb", LOG10_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGF, "logf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGL, "logl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LOG, "log", LOG10_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef LOG10_TYPE
+DEF_C99_BUILTIN (BUILT_IN_LRINT, "lrint", BT_FN_LONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LRINTF, "lrintf", BT_FN_LONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LRINTL, "lrintl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define LRINT_TYPE(F) BT_FN_LONG_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LRINT, "lrint", LRINT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LROUND, "lround", BT_FN_LONG_DOUBLE, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LROUNDF, "lroundf", BT_FN_LONG_FLOAT, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_LROUNDL, "lroundl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_LROUND, "lround", LRINT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef LRINT_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_MODF, "modf", BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFF, "modff", BT_FN_FLOAT_FLOAT_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFL, "modfl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE)
+#define MODF_TYPE(F) BT_FN_##F##_##F##_##F##PTR
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_MODF, "modf", MODF_TYPE, ATTR_MATHFN_FPROUNDING_STORE)
+#undef MODF_TYPE
+DEF_C99_BUILTIN (BUILT_IN_NAN, "nan", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_C99_BUILTIN (BUILT_IN_NANF, "nanf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_C99_BUILTIN (BUILT_IN_NANL, "nanl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+#define NAN_TYPE(F) BT_FN_##F##_CONST_STRING
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_NAN, "nan", NAN_TYPE, ATTR_CONST_NOTHROW_NONNULL)
+DEF_C2X_BUILTIN (BUILT_IN_NAND32, "nand32", BT_FN_DFLOAT32_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_C2X_BUILTIN (BUILT_IN_NAND64, "nand64", BT_FN_DFLOAT64_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_C2X_BUILTIN (BUILT_IN_NAND128, "nand128", BT_FN_DFLOAT128_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_BUILTIN (BUILT_IN_NANS, "nans", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_BUILTIN (BUILT_IN_NANSF, "nansf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_BUILTIN (BUILT_IN_NANSL, "nansl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_FLOATN_NX_BUILTINS (BUILT_IN_NANS, "nans", NAN_TYPE, ATTR_CONST_NOTHROW_NONNULL)
+#undef NAN_TYPE
+DEF_GCC_BUILTIN (BUILT_IN_NANSF16B, "nansf16b", BT_FN_BFLOAT16_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_BUILTIN (BUILT_IN_NANSD32, "nansd32", BT_FN_DFLOAT32_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_BUILTIN (BUILT_IN_NANSD64, "nansd64", BT_FN_DFLOAT64_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_GCC_BUILTIN (BUILT_IN_NANSD128, "nansd128", BT_FN_DFLOAT128_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL)
+DEF_C99_BUILTIN (BUILT_IN_NEARBYINT, "nearbyint", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_NEARBYINTF, "nearbyintf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_NEARBYINTL, "nearbyintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define NEARBYINT_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_NEARBYINT, "nearbyint", NEARBYINT_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef NEARBYINT_TYPE
+DEF_C99_BUILTIN (BUILT_IN_NEXTAFTER, "nextafter", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERF, "nextafterf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERL, "nextafterl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+#define NEXTAFTER_TYPE(F) BT_FN_##F##_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_NEXTAFTER, "nextafter", NEXTAFTER_TYPE, ATTR_MATHFN_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_NEXTAFTERF16B, "nextafterf16b", BT_FN_BFLOAT16_BFLOAT16_BFLOAT16, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARD, "nexttoward", BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDF, "nexttowardf", BT_FN_FLOAT_FLOAT_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDL, "nexttowardl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_ERRNO)
+DEF_LIB_BUILTIN (BUILT_IN_POW, "pow", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10, "pow10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10F, "pow10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10L, "pow10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_POWF, "powf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_GCC_BUILTIN (BUILT_IN_POWI, "powi", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING)
+DEF_GCC_BUILTIN (BUILT_IN_POWIF, "powif", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING)
+DEF_GCC_BUILTIN (BUILT_IN_POWIL, "powil", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_POWL, "powl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_POW, "pow", NEXTAFTER_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_REMAINDER, "remainder", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_REMAINDERF, "remainderf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_REMAINDERL, "remainderl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_REMAINDER, "remainder", NEXTAFTER_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef NEXTAFTER_TYPE
+DEF_C99_BUILTIN (BUILT_IN_REMQUO, "remquo", BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_BUILTIN (BUILT_IN_REMQUOF, "remquof", BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_BUILTIN (BUILT_IN_REMQUOL, "remquol", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE)
+#define REMQUO_TYPE(F) BT_FN_##F##_##F##_##F##_INTPTR
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_REMQUO, "remquo", REMQUO_TYPE, ATTR_MATHFN_FPROUNDING_STORE)
+#undef REMQUO_TYPE
+DEF_C99_BUILTIN (BUILT_IN_RINT, "rint", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_RINTF, "rintf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_RINTL, "rintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define RINT_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_RINT, "rint", RINT_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef RINT_TYPE
+DEF_C2X_BUILTIN (BUILT_IN_ROUNDEVEN, "roundeven", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C2X_BUILTIN (BUILT_IN_ROUNDEVENF, "roundevenf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C2X_BUILTIN (BUILT_IN_ROUNDEVENL, "roundevenl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_ROUND, "round", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_ROUNDF, "roundf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_ROUNDL, "roundl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define ROUND_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ROUND, "round", ROUND_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef ROUND_TYPE
+#define ROUNDEVEN_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_ROUNDEVEN, "roundeven", ROUNDEVEN_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef ROUNDEVEN_TYPE
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALB, "scalb", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBF, "scalbf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBL, "scalbl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_SCALBLN, "scalbln", BT_FN_DOUBLE_DOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_SCALBLNF, "scalblnf", BT_FN_FLOAT_FLOAT_LONG, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_SCALBLNL, "scalblnl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define SCALBLN_TYPE(F) BT_FN_##F##_##F##_LONG
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_SCALBLN, "scalbln", SCALBLN_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef SCALBLN_TYPE
+DEF_C99_BUILTIN (BUILT_IN_SCALBN, "scalbn", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_SCALBNF, "scalbnf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_SCALBNL, "scalbnl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define SCALBN_TYPE(F) BT_FN_##F##_##F##_INT
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_SCALBN, "scalbn", SCALBN_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef SCALBN_TYPE
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBIT, "signbit", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITF, "signbitf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITL, "signbitl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITD32, "signbitd32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITD64, "signbitd64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITD128, "signbitd128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICAND, "significand", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDF, "significandf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDL, "significandl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_LIB_BUILTIN (BUILT_IN_SIN, "sin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOS, "sincos", BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSF, "sincosf", BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSL, "sincosl", BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_SINF, "sinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_LIB_BUILTIN (BUILT_IN_SINH, "sinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHF, "sinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHL, "sinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define SINH_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_SINH, "sinh", SINH_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_SINL, "sinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_SIN, "sin", SINH_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef SINH_TYPE
+DEF_LIB_BUILTIN (BUILT_IN_SQRT, "sqrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTF, "sqrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTL, "sqrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#define SQRT_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_SQRT, "sqrt", SQRT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_LIB_BUILTIN (BUILT_IN_TAN, "tan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_TANF, "tanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_LIB_BUILTIN (BUILT_IN_TANH, "tanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHF, "tanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHL, "tanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_TANH, "tanh", SQRT_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_TANL, "tanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_TAN, "tan", SQRT_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_BUILTIN (BUILT_IN_TGAMMA, "tgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_TGAMMAF, "tgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_C99_BUILTIN (BUILT_IN_TGAMMAL, "tgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_TGAMMA, "tgamma", SQRT_TYPE, ATTR_MATHFN_FPROUNDING_ERRNO)
+#undef SQRT_TYPE
+DEF_C99_BUILTIN (BUILT_IN_TRUNC, "trunc", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_TRUNCF, "truncf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_TRUNCL, "truncl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define TRUNC_TYPE(F) BT_FN_##F##_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_TRUNC, "trunc", TRUNC_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#undef TRUNC_TYPE
+DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0, "y0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0F, "y0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0L, "y0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1, "y1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1F, "y1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1L, "y1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_YN, "yn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_YNF, "ynf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_YNL, "ynl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO)
+
+/* Category: _Complex math builtins. */
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CABS, "cabs", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CABSF, "cabsf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CABSL, "cabsl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define CABS_TYPE(F) BT_FN_##F##_COMPLEX_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CABS, "cabs", CABS_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef CABS_TYPE
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOS, "cacos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSF, "cacosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSH, "cacosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSHF, "cacoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSHL, "cacoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define CACOSH_TYPE(F) BT_FN_COMPLEX_##F##_COMPLEX_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CACOSH, "cacosh", CACOSH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CACOSL, "cacosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CACOS, "cacos", CACOSH_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef CACOSH_TYPE
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CARG, "carg", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CARGF, "cargf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CARGL, "cargl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define CARG_TYPE(F) BT_FN_##F##_COMPLEX_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CARG, "carg", CARG_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef CARG_TYPE
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CASIN, "casin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINF, "casinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINH, "casinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINHF, "casinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINHL, "casinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define CASINH_TYPE(F) BT_FN_COMPLEX_##F##_COMPLEX_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CASINH, "casinh", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CASINL, "casinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CASIN, "casin", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CATAN, "catan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANF, "catanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANH, "catanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANHF, "catanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANHL, "catanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CATANH, "catanh", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CATANL, "catanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CATAN, "catan", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOS, "ccos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSF, "ccosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSH, "ccosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSHF, "ccoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSHL, "ccoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CCOSH, "ccosh", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CCOSL, "ccosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CCOS, "ccos", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CEXP, "cexp", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CEXPF, "cexpf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CEXPL, "cexpl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CEXP, "cexp", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_GCC_BUILTIN (BUILT_IN_CEXPI, "cexpi", BT_FN_COMPLEX_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_GCC_BUILTIN (BUILT_IN_CEXPIF, "cexpif", BT_FN_COMPLEX_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_GCC_BUILTIN (BUILT_IN_CEXPIL, "cexpil", BT_FN_COMPLEX_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CIMAG, "cimag", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CIMAGF, "cimagf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CIMAGL, "cimagl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CLOG, "clog", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CLOGF, "clogf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CLOGL, "clogl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CLOG, "clog", CASINH_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef CASINH_TYPE
+DEF_EXT_C99RES_BUILTIN (BUILT_IN_CLOG10, "clog10", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_C99RES_BUILTIN (BUILT_IN_CLOG10F, "clog10f", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_C99RES_BUILTIN (BUILT_IN_CLOG10L, "clog10l", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CONJ, "conj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CONJF, "conjf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CONJL, "conjl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CPOW, "cpow", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CPOWF, "cpowf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CPOWL, "cpowl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+#define CPOW_TYPE(F) BT_FN_COMPLEX_##F##_COMPLEX_##F##_COMPLEX_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CPOW, "cpow", CPOW_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef CPOW_TYPE
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CPROJ, "cproj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CPROJF, "cprojf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CPROJL, "cprojl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+#define CPROJ_TYPE(F) BT_FN_COMPLEX_##F##_COMPLEX_##F
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CPROJ, "cproj", CPROJ_TYPE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CREAL, "creal", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CREALF, "crealf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CREALL, "creall", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSIN, "csin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINF, "csinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINH, "csinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINHF, "csinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINHL, "csinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CSINH, "csinh", CPROJ_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSINL, "csinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CSIN, "csin", CPROJ_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSQRT, "csqrt", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSQRTF, "csqrtf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CSQRTL, "csqrtl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CSQRT, "csqrt", CPROJ_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CTAN, "ctan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANF, "ctanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANH, "ctanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANHF, "ctanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANHL, "ctanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CTANH, "ctanh", CPROJ_TYPE, ATTR_MATHFN_FPROUNDING)
+DEF_C99_COMPL_BUILTIN (BUILT_IN_CTANL, "ctanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)
+DEF_EXT_LIB_FLOATN_NX_BUILTINS (BUILT_IN_CTAN, "ctan", CPROJ_TYPE, ATTR_MATHFN_FPROUNDING)
+#undef CPROJ_TYPE
+
+/* Category: string/memory builtins. */
+DEF_EXT_LIB_BUILTIN (BUILT_IN_BCMP, "bcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_BCOPY, "bcopy", BT_FN_VOID_CONST_PTR_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_BZERO, "bzero", BT_FN_VOID_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_INDEX, "index", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_MEMCHR, "memchr", BT_FN_PTR_CONST_PTR_INT_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_MEMCMP, "memcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_MEMCPY, "memcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_MEMMOVE, "memmove", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY, "mempcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_MEMSET, "memset", BT_FN_PTR_PTR_INT_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_RINDEX, "rindex", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY, "stpcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_RETNONNULL_NOTHROW_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STPNCPY, "stpncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRCASECMP, "strcasecmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRCAT, "strcat", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRCHR, "strchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRCMP, "strcmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRCPY, "strcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRCSPN, "strcspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_C2X_BUILTIN (BUILT_IN_STRDUP, "strdup", BT_FN_STRING_CONST_STRING, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF)
+DEF_C2X_BUILTIN (BUILT_IN_STRNDUP, "strndup", BT_FN_STRING_CONST_STRING_SIZE, ATTR_MALLOC_WARN_UNUSED_RESULT_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRLEN, "strlen", BT_FN_SIZE_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNCASECMP, "strncasecmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRNCAT, "strncat", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRNCMP, "strncmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRNCPY, "strncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNLEN, "strnlen", BT_FN_SIZE_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRPBRK, "strpbrk", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRRCHR, "strrchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRSPN, "strspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_STRSTR, "strstr", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_LEAF)
+
+/* Category: stdio builtins. */
+DEF_LIB_BUILTIN (BUILT_IN_FPRINTF, "fprintf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_2_3)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_UNLOCKED, "fprintf_unlocked", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_2_3)
+DEF_LIB_BUILTIN (BUILT_IN_PUTC, "putc", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTC_UNLOCKED, "putc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_FPUTC, "fputc", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTC_UNLOCKED, "fputc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NONNULL_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_FPUTS, "fputs", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NONNULL_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTS_UNLOCKED, "fputs_unlocked", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NONNULL_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_FSCANF, "fscanf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3)
+DEF_LIB_BUILTIN (BUILT_IN_FWRITE, "fwrite", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NONNULL_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FWRITE_UNLOCKED, "fwrite_unlocked", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NONNULL_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_PRINTF, "printf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_UNLOCKED, "printf_unlocked", BT_FN_INT_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_1_2)
+DEF_LIB_BUILTIN (BUILT_IN_PUTCHAR, "putchar", BT_FN_INT_INT, ATTR_NULL)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTCHAR_UNLOCKED, "putchar_unlocked", BT_FN_INT_INT, ATTR_NULL)
+DEF_LIB_BUILTIN (BUILT_IN_PUTS, "puts", BT_FN_INT_CONST_STRING, ATTR_NONNULL_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTS_UNLOCKED, "puts_unlocked", BT_FN_INT_CONST_STRING, ATTR_NONNULL_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_SCANF, "scanf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_SCANF_1_2)
+DEF_C99_BUILTIN (BUILT_IN_SNPRINTF, "snprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_NOTHROW_3_4)
+
+DEF_LIB_BUILTIN (BUILT_IN_SPRINTF, "sprintf", BT_FN_INT_STRING_CONST_STRING_VAR, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_2_3)
+DEF_LIB_BUILTIN (BUILT_IN_SSCANF, "sscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_FORMAT_SCANF_NOTHROW_2_3)
+DEF_LIB_BUILTIN (BUILT_IN_VFPRINTF, "vfprintf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_NONNULL_1_FORMAT_PRINTF_2_0)
+DEF_C99_BUILTIN (BUILT_IN_VFSCANF, "vfscanf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0)
+DEF_LIB_BUILTIN (BUILT_IN_VPRINTF, "vprintf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_1_0)
+DEF_C99_BUILTIN (BUILT_IN_VSCANF, "vscanf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_1_0)
+DEF_C99_BUILTIN (BUILT_IN_VSNPRINTF, "vsnprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_NOTHROW_3_0)
+DEF_LIB_BUILTIN (BUILT_IN_VSPRINTF, "vsprintf", BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_2_0)
+DEF_C99_BUILTIN (BUILT_IN_VSSCANF, "vsscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_NOTHROW_2_0)
+
+/* Category: ctype builtins. */
+DEF_LIB_BUILTIN (BUILT_IN_ISALNUM, "isalnum", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISALPHA, "isalpha", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISASCII, "isascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_ISBLANK, "isblank", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISCNTRL, "iscntrl", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISDIGIT, "isdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISGRAPH, "isgraph", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISLOWER, "islower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISPRINT, "isprint", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISPUNCT, "ispunct", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISSPACE, "isspace", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISUPPER, "isupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ISXDIGIT, "isxdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_TOASCII, "toascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_TOLOWER, "tolower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_TOUPPER, "toupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+
+/* Category: wctype builtins. */
+DEF_C94_BUILTIN (BUILT_IN_ISWALNUM, "iswalnum", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWALPHA, "iswalpha", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_ISWBLANK, "iswblank", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWCNTRL, "iswcntrl", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWDIGIT, "iswdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWGRAPH, "iswgraph", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWLOWER, "iswlower", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWPRINT, "iswprint", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWPUNCT, "iswpunct", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWSPACE, "iswspace", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWUPPER, "iswupper", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_ISWXDIGIT, "iswxdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_TOWLOWER, "towlower", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_C94_BUILTIN (BUILT_IN_TOWUPPER, "towupper", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LEAF_LIST)
+
+/* Category: integer overflow checking builtins. */
+DEF_GCC_BUILTIN (BUILT_IN_ADD_OVERFLOW, "add_overflow", BT_FN_BOOL_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_SUB_OVERFLOW, "sub_overflow", BT_FN_BOOL_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_MUL_OVERFLOW, "mul_overflow", BT_FN_BOOL_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ADD_OVERFLOW_P, "add_overflow_p", BT_FN_BOOL_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_SUB_OVERFLOW_P, "sub_overflow_p", BT_FN_BOOL_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_MUL_OVERFLOW_P, "mul_overflow_p", BT_FN_BOOL_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+/* Clang compatibility. */
+DEF_GCC_BUILTIN (BUILT_IN_SADD_OVERFLOW, "sadd_overflow", BT_FN_BOOL_INT_INT_INTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SADDL_OVERFLOW, "saddl_overflow", BT_FN_BOOL_LONG_LONG_LONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SADDLL_OVERFLOW, "saddll_overflow", BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SSUB_OVERFLOW, "ssub_overflow", BT_FN_BOOL_INT_INT_INTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SSUBL_OVERFLOW, "ssubl_overflow", BT_FN_BOOL_LONG_LONG_LONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SSUBLL_OVERFLOW, "ssubll_overflow", BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SMUL_OVERFLOW, "smul_overflow", BT_FN_BOOL_INT_INT_INTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SMULL_OVERFLOW, "smull_overflow", BT_FN_BOOL_LONG_LONG_LONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SMULLL_OVERFLOW, "smulll_overflow", BT_FN_BOOL_LONGLONG_LONGLONG_LONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UADD_OVERFLOW, "uadd_overflow", BT_FN_BOOL_UINT_UINT_UINTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UADDL_OVERFLOW, "uaddl_overflow", BT_FN_BOOL_ULONG_ULONG_ULONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UADDLL_OVERFLOW, "uaddll_overflow", BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_USUB_OVERFLOW, "usub_overflow", BT_FN_BOOL_UINT_UINT_UINTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_USUBL_OVERFLOW, "usubl_overflow", BT_FN_BOOL_ULONG_ULONG_ULONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_USUBLL_OVERFLOW, "usubll_overflow", BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UMUL_OVERFLOW, "umul_overflow", BT_FN_BOOL_UINT_UINT_UINTPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UMULL_OVERFLOW, "umull_overflow", BT_FN_BOOL_ULONG_ULONG_ULONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UMULLL_OVERFLOW, "umulll_overflow", BT_FN_BOOL_ULONGLONG_ULONGLONG_ULONGLONGPTR, ATTR_NOTHROW_NONNULL_LEAF_LIST)
+
+/* Category: miscellaneous builtins. */
+DEF_LIB_BUILTIN (BUILT_IN_ABORT, "abort", BT_FN_VOID, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_COLD_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_ABS, "abs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_AGGREGATE_INCOMING_ADDRESS, "aggregate_incoming_address", BT_FN_PTR_VAR, ATTR_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_ALLOCA_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_BSWAP16, "bswap16", BT_FN_UINT16_UINT16, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_BSWAP32, "bswap32", BT_FN_UINT32_UINT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_BSWAP64, "bswap64", BT_FN_UINT64_UINT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_BSWAP128, "bswap128", BT_FN_UINT128_UINT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+
+DEF_EXT_LIB_BUILTIN (BUILT_IN_CLEAR_CACHE, "__clear_cache", BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
+/* [trans-mem]: Adjust BUILT_IN_TM_CALLOC if BUILT_IN_CALLOC is changed. */
+DEF_LIB_BUILTIN (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_2_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLASSIFY_TYPE, "classify_type", BT_FN_INT_VAR, ATTR_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLEAR_PADDING, "clear_padding", BT_FN_VOID_VAR, ATTR_NOTHROW_NONNULL_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_CLZ, "clz", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLZIMAX, "clzimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLZL, "clzl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLZLL, "clzll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CONSTANT_P, "constant_p", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CTZ, "ctz", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CTZIMAX, "ctzimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CTZL, "ctzl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CTZLL, "ctzll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLRSB, "clrsb", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLRSBIMAX, "clrsbimax", BT_FN_INT_INTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLRSBL, "clrsbl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_CLRSBLL, "clrsbll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_DCGETTEXT, "dcgettext", BT_FN_STRING_CONST_STRING_CONST_STRING_INT, ATTR_FORMAT_ARG_2)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_DGETTEXT, "dgettext", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_FORMAT_ARG_2)
+DEF_GCC_BUILTIN (BUILT_IN_DWARF_CFA, "dwarf_cfa", BT_FN_PTR, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_DWARF_SP_COLUMN, "dwarf_sp_column", BT_FN_UINT, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN, "eh_return", BT_FN_VOID_PTRMODE_PTR, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN_DATA_REGNO, "eh_return_data_regno", BT_FN_INT_INT, ATTR_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECL, "execl", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_SENTINEL_NOTHROW_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLP, "execlp", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_SENTINEL_NOTHROW_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLE, "execle", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_SENTINEL_1)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECV, "execv", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVP, "execvp", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVE, "execve", BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_EXIT, "exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_EXPECT, "expect", BT_FN_LONG_LONG_LONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_EXPECT_WITH_PROBABILITY, "expect_with_probability", BT_FN_LONG_LONG_LONG_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_ASSUME_ALIGNED, "assume_aligned", BT_FN_PTR_CONST_PTR_SIZE_VAR, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_EXTEND_POINTER, "extend_pointer", BT_FN_UNWINDWORD_PTR, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_EXTRACT_RETURN_ADDR, "extract_return_addr", BT_FN_PTR_PTR, ATTR_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FFS, "ffs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSIMAX, "ffsimax", BT_FN_INT_INTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL)
+/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed. */
+DEF_LIB_BUILTIN (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1)
+DEF_C99_BUILTIN (BUILT_IN_IMAXABS, "imaxabs", BT_FN_INTMAX_INTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_INIT_DWARF_REG_SIZES, "init_dwarf_reg_size_table", BT_FN_VOID_PTR, ATTR_NULL)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITE, "finite", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEF, "finitef", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEL, "finitel", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITED32, "finited32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITED64, "finited64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITED128, "finited128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_FPCLASSIFY, "fpclassify", BT_FN_INT_INT_INT_INT_INT_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISFINITE, "isfinite", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISINF_SIGN, "isinf_sign", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ISINF, "isinf", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFF, "isinff", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFL, "isinfl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFD32, "isinfd32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFD64, "isinfd64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFD128, "isinfd128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_C90RES_BUILTIN (BUILT_IN_ISNAN, "isnan", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANF, "isnanf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANL, "isnanl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNAND32, "isnand32", BT_FN_INT_DFLOAT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNAND64, "isnand64", BT_FN_INT_DFLOAT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNAND128, "isnand128", BT_FN_INT_DFLOAT128, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_ISNORMAL, "isnormal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISGREATER, "isgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISGREATEREQUAL, "isgreaterequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISLESS, "isless", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISLESSEQUAL, "islessequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISLESSGREATER, "islessgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_ISSIGNALING, "issignaling", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_TYPEGENERIC_LEAF)
+DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LIST)
+/* [trans-mem]: Adjust BUILT_IN_TM_MALLOC if BUILT_IN_MALLOC is changed. */
+DEF_LIB_BUILTIN (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, ATTR_MALLOC_WARN_UNUSED_RESULT_SIZE_1_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, ATTR_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_PARITY, "parity", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_PARITYIMAX, "parityimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_PARITYL, "parityl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_PARITYLL, "parityll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_POPCOUNT, "popcount", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTIMAX, "popcountimax", BT_FN_INT_UINTMAX, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_ULONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_POSIX_MEMALIGN, "posix_memalign", BT_FN_INT_PTRPTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_GCC_BUILTIN (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST)
+DEF_LIB_BUILTIN (BUILT_IN_REALLOC, "realloc", BT_FN_PTR_PTR_SIZE, ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UINT, ATTR_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SAVEREGS, "saveregs", BT_FN_PTR_VAR, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_SETJMP, "setjmp", BT_FN_INT_PTR, ATTR_RT_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRFMON, "strfmon", BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_STRFMON_NOTHROW_3_4)
+DEF_LIB_BUILTIN (BUILT_IN_STRFTIME, "strftime", BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_TM_PTR, ATTR_FORMAT_STRFTIME_NOTHROW_3_0)
+DEF_GCC_BUILTIN (BUILT_IN_TRAP, "trap", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LEAF_COLD_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UNREACHABLE_TRAP, "unreachable trap", BT_FN_VOID, ATTR_CONST_NORETURN_NOTHROW_LEAF_COLD_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UNREACHABLE, "unreachable", BT_FN_VOID, ATTR_CONST_NORETURN_NOTHROW_LEAF_COLD_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_UNWIND_INIT, "unwind_init", BT_FN_VOID, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_UPDATE_SETJMP_BUF, "update_setjmp_buf", BT_FN_VOID_PTR, ATTR_NULL)
+DEF_GCC_BUILTIN (BUILT_IN_VA_COPY, "va_copy", BT_FN_VOID_VALIST_REF_VALIST_ARG, ATTR_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_VA_END, "va_end", BT_FN_VOID_VALIST_REF, ATTR_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_VA_START, "va_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_VA_ARG_PACK, "va_arg_pack", BT_FN_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_VA_ARG_PACK_LEN, "va_arg_pack_len", BT_FN_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN__EXIT, "_exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_C99_BUILTIN (BUILT_IN__EXIT2, "_Exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+
+/* Implementing nested functions. */
+DEF_BUILTIN_STUB (BUILT_IN_INIT_TRAMPOLINE, "__builtin_init_trampoline")
+DEF_BUILTIN_STUB (BUILT_IN_INIT_HEAP_TRAMPOLINE, "__builtin_init_heap_trampoline")
+DEF_BUILTIN_STUB (BUILT_IN_ADJUST_TRAMPOLINE, "__builtin_adjust_trampoline")
+DEF_BUILTIN_STUB (BUILT_IN_INIT_DESCRIPTOR, "__builtin_init_descriptor")
+DEF_BUILTIN_STUB (BUILT_IN_ADJUST_DESCRIPTOR, "__builtin_adjust_descriptor")
+DEF_BUILTIN_STUB (BUILT_IN_NONLOCAL_GOTO, "__builtin_nonlocal_goto")
+
+/* Implementing __builtin_setjmp. */
+DEF_BUILTIN_STUB (BUILT_IN_SETJMP_SETUP, "__builtin_setjmp_setup")
+DEF_BUILTIN_STUB (BUILT_IN_SETJMP_RECEIVER, "__builtin_setjmp_receiver")
+
+/* Implementing variable sized local variables. */
+DEF_BUILTIN_STUB (BUILT_IN_STACK_SAVE, "__builtin_stack_save")
+DEF_BUILTIN_STUB (BUILT_IN_STACK_RESTORE, "__builtin_stack_restore")
+DEF_BUILTIN_STUB (BUILT_IN_ALLOCA_WITH_ALIGN, "__builtin_alloca_with_align")
+DEF_BUILTIN_STUB (BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX, "__builtin_alloca_with_align_and_max")
+
+/* An internal version of memcmp, used when the result is only tested for
+ equality with zero. */
+DEF_BUILTIN_STUB (BUILT_IN_MEMCMP_EQ, "__builtin_memcmp_eq")
+
+/* An internal version of strcmp/strncmp, used when the result is only
+ tested for equality with zero. */
+DEF_BUILTIN_STUB (BUILT_IN_STRCMP_EQ, "__builtin_strcmp_eq")
+DEF_BUILTIN_STUB (BUILT_IN_STRNCMP_EQ, "__builtin_strncmp_eq")
+
+/* Object size checking builtins. */
+DEF_GCC_BUILTIN (BUILT_IN_OBJECT_SIZE, "object_size", BT_FN_SIZE_CONST_PTR_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_DYNAMIC_OBJECT_SIZE, "dynamic_object_size", BT_FN_SIZE_CONST_PTR_INT, ATTR_PURE_NOTHROW_LEAF_LIST)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMCPY_CHK, "__memcpy_chk", BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMMOVE_CHK, "__memmove_chk", BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY_CHK, "__mempcpy_chk", BT_FN_PTR_PTR_CONST_PTR_SIZE_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMSET_CHK, "__memset_chk", BT_FN_PTR_PTR_INT_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY_CHK, "__stpcpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STPNCPY_CHK, "__stpncpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, ATTR_RETNONNULL_NOTHROW_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRCAT_CHK, "__strcat_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRCPY_CHK, "__strcpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNCAT_CHK, "__strncat_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_STRNCPY_CHK, "__strncpy_chk", BT_FN_STRING_STRING_CONST_STRING_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SNPRINTF_CHK, "__snprintf_chk", BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_NOTHROW_5_6)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_SPRINTF_CHK, "__sprintf_chk", BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VAR, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_4_5)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_VSNPRINTF_CHK, "__vsnprintf_chk", BT_FN_INT_STRING_SIZE_INT_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_NOTHROW_5_0)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_VSPRINTF_CHK, "__vsprintf_chk", BT_FN_INT_STRING_INT_SIZE_CONST_STRING_VALIST_ARG, ATTR_NOTHROW_NONNULL_1_FORMAT_PRINTF_4_0)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_CHK, "__fprintf_chk", BT_FN_INT_FILEPTR_INT_CONST_STRING_VAR, ATTR_NONNULL_1_FORMAT_PRINTF_3_4)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_CHK, "__printf_chk", BT_FN_INT_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_VFPRINTF_CHK, "__vfprintf_chk", BT_FN_INT_FILEPTR_INT_CONST_STRING_VALIST_ARG, ATTR_NONNULL_1_FORMAT_PRINTF_3_0)
+DEF_EXT_LIB_BUILTIN (BUILT_IN_VPRINTF_CHK, "__vprintf_chk", BT_FN_INT_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0)
+
+/* Profiling hooks. */
+DEF_BUILTIN (BUILT_IN_PROFILE_FUNC_ENTER, "__cyg_profile_func_enter", BUILT_IN_NORMAL, BT_FN_VOID_PTR_PTR, BT_LAST,
+ false, false, false, ATTR_NULL, true, true)
+DEF_BUILTIN (BUILT_IN_PROFILE_FUNC_EXIT, "__cyg_profile_func_exit", BUILT_IN_NORMAL, BT_FN_VOID_PTR_PTR, BT_LAST,
+ false, false, false, ATTR_NULL, true, true)
+
+/* TLS thread pointer related builtins. */
+DEF_BUILTIN (BUILT_IN_THREAD_POINTER, "__builtin_thread_pointer",
+ BUILT_IN_NORMAL, BT_FN_PTR, BT_LAST,
+ false, false, true, ATTR_CONST_NOTHROW_LIST, true,
+ targetm.have_tls)
+
+DEF_BUILTIN (BUILT_IN_SET_THREAD_POINTER, "__builtin_set_thread_pointer",
+ BUILT_IN_NORMAL, BT_FN_VOID_PTR, BT_LAST,
+ false, false, true, ATTR_NOTHROW_LIST, true,
+ targetm.have_tls)
+
+/* TLS emulation. */
+DEF_BUILTIN (BUILT_IN_EMUTLS_GET_ADDRESS, targetm.emutls.get_address,
+ BUILT_IN_NORMAL,
+ BT_FN_PTR_PTR, BT_FN_PTR_PTR,
+ true, true, true, ATTR_CONST_NOTHROW_NONNULL_LEAF, false,
+ !targetm.have_tls)
+DEF_BUILTIN (BUILT_IN_EMUTLS_REGISTER_COMMON,
+ targetm.emutls.register_common, BUILT_IN_NORMAL,
+ BT_FN_VOID_PTR_WORD_WORD_PTR, BT_FN_VOID_PTR_WORD_WORD_PTR,
+ true, true, true, ATTR_NOTHROW_LEAF_LIST, false,
+ !targetm.have_tls)
+
+/* Suppressing speculation. Users are expected to use the first (N)
+ variant, which will be translated internally into one of the other
+ types. */
+
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_N, "speculation_safe_value",
+ BT_FN_VOID_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_PTR,
+ "speculation_safe_value_ptr", BT_FN_PTR_PTR_VAR,
+ ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_1, "speculation_safe_value_1",
+ BT_FN_I1_I1_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_2, "speculation_safe_value_2",
+ BT_FN_I2_I2_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_4, "speculation_safe_value_4",
+ BT_FN_I4_I4_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_8, "speculation_safe_value_8",
+ BT_FN_I8_I8_VAR, ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_SPECULATION_SAFE_VALUE_16,
+ "speculation_safe_value_16", BT_FN_I16_I16_VAR,
+ ATTR_NOVOPS_NOTHROW_LEAF_LIST)
+
+/* Exception support. */
+DEF_BUILTIN_STUB (BUILT_IN_UNWIND_RESUME, "__builtin_unwind_resume")
+DEF_BUILTIN_STUB (BUILT_IN_CXA_END_CLEANUP, "__builtin_cxa_end_cleanup")
+DEF_BUILTIN_STUB (BUILT_IN_EH_POINTER, "__builtin_eh_pointer")
+DEF_BUILTIN_STUB (BUILT_IN_EH_FILTER, "__builtin_eh_filter")
+DEF_BUILTIN_STUB (BUILT_IN_EH_COPY_VALUES, "__builtin_eh_copy_values")
+
+/* __FILE__, __LINE__, __FUNCTION__ as builtins. */
+DEF_GCC_BUILTIN (BUILT_IN_FILE, "FILE", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_FUNCTION, "FUNCTION", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST)
+DEF_GCC_BUILTIN (BUILT_IN_LINE, "LINE", BT_FN_INT, ATTR_NOTHROW_LEAF_LIST)
+
+/* Synchronization Primitives. */
+#include "sync-builtins.def"
+
+/* Offloading and Multi Processing builtins. */
+#include "omp-builtins.def"
+
+/* GTM builtins. */
+#include "gtm-builtins.def"
+
+/* Sanitizer builtins. */
+#include "sanitizer.def"
+
+/* Coroutine builtins. */
+#include "coroutine-builtins.def"
+
+#undef DEF_BUILTIN
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.h
new file mode 100644
index 0000000..6a43de4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/builtins.h
@@ -0,0 +1,160 @@
+/* Expand builtin functions.
+ Copyright (C) 1988-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_BUILTINS_H
+#define GCC_BUILTINS_H
+
+#include <mpc.h>
+
+/* Target-dependent globals. */
+struct target_builtins {
+ /* For each register that may be used for calling a function, this
+ gives a mode used to copy the register's value. VOIDmode indicates
+ the register is not used for calling a function. If the machine
+ has register windows, this gives only the outbound registers.
+ INCOMING_REGNO gives the corresponding inbound register. */
+ fixed_size_mode_pod x_apply_args_mode[FIRST_PSEUDO_REGISTER];
+
+ /* For each register that may be used for returning values, this gives
+ a mode used to copy the register's value. VOIDmode indicates the
+ register is not used for returning values. If the machine has
+ register windows, this gives only the outbound registers.
+ INCOMING_REGNO gives the corresponding inbound register. */
+ fixed_size_mode_pod x_apply_result_mode[FIRST_PSEUDO_REGISTER];
+};
+
+extern struct target_builtins default_target_builtins;
+#if SWITCHABLE_TARGET
+extern struct target_builtins *this_target_builtins;
+#else
+#define this_target_builtins (&default_target_builtins)
+#endif
+
+/* Non-zero if __builtin_constant_p should be folded right away. */
+extern bool force_folding_builtin_constant_p;
+
+extern bool called_as_built_in (tree);
+extern bool get_object_alignment_1 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *);
+extern bool get_object_alignment_2 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *, bool);
+extern unsigned int get_object_alignment (tree);
+extern bool get_pointer_alignment_1 (tree, unsigned int *,
+ unsigned HOST_WIDE_INT *);
+extern unsigned int get_pointer_alignment (tree);
+extern unsigned string_length (const void*, unsigned, unsigned);
+
+struct c_strlen_data
+{
+ /* [MINLEN, MAXBOUND, MAXLEN] is a range describing the length of
+ one or more strings of possibly unknown length. For a single
+ string of known length the range is a constant where
+ MINLEN == MAXBOUND == MAXLEN holds.
+ For other strings, MINLEN is the length of the shortest known
+ string. MAXBOUND is the length of a string that could be stored
+ in the largest array referenced by the expression. MAXLEN is
+ the length of the longest sequence of non-zero bytes
+ in an object referenced by the expression. For such strings,
+ MINLEN <= MAXBOUND <= MAXLEN holds. For example, given:
+ struct A { char a[7], b[]; };
+ extern struct A *p;
+ n = strlen (p->a);
+ the computed range will be [0, 6, ALL_ONES].
+ However, for a conditional expression involving a string
+ of known length and an array of unknown bound such as
+ n = strlen (i ? p->b : "123");
+ the range will be [3, 3, ALL_ONES].
+ MINLEN != 0 && MAXLEN == ALL_ONES indicates that MINLEN is
+ the length of the shortest known string and implies that
+ the shortest possible string referenced by the expression may
+ actually be the empty string. This distinction is useful for
+ diagnostics. get_range_strlen() return value distinguishes
+ between these two cases.
+ As the tighter (and more optimistic) bound, MAXBOUND is suitable
+ for diagnostics but not for optimization.
+ As the more conservative bound, MAXLEN is intended to be used
+ for optimization. */
+ tree minlen;
+ tree maxlen;
+ tree maxbound;
+ /* When non-null, DECL refers to the declaration known to store
+ an unterminated constant character array, as in:
+ const char s[] = { 'a', 'b', 'c' };
+ It is used to diagnose uses of such arrays in functions such as
+ strlen() that expect a nul-terminated string as an argument. */
+ tree decl;
+ /* Non-constant offset from the beginning of a string not accounted
+ for in the length range. Used to improve diagnostics. */
+ tree off;
+};
+
+extern tree c_strlen (tree, int, c_strlen_data * = NULL, unsigned = 1);
+extern rtx c_readstr (const char *, scalar_int_mode, bool = true);
+extern void expand_builtin_setjmp_setup (rtx, rtx);
+extern void expand_builtin_setjmp_receiver (rtx);
+extern void expand_builtin_update_setjmp_buf (rtx);
+extern tree mathfn_built_in (tree, enum built_in_function fn);
+extern tree mathfn_built_in (tree, combined_fn);
+extern tree mathfn_built_in_type (combined_fn);
+extern rtx builtin_strncpy_read_str (void *, void *, HOST_WIDE_INT,
+ fixed_size_mode);
+extern rtx builtin_memset_read_str (void *, void *, HOST_WIDE_INT,
+ fixed_size_mode);
+extern rtx expand_builtin_memset (tree, rtx, machine_mode);
+extern rtx expand_builtin_saveregs (void);
+extern tree std_build_builtin_va_list (void);
+extern tree std_fn_abi_va_list (tree);
+extern tree std_canonical_va_list_type (tree);
+extern void std_expand_builtin_va_start (tree, rtx);
+extern void expand_builtin_trap (void);
+extern void expand_ifn_atomic_bit_test_and (gcall *);
+extern void expand_ifn_atomic_compare_exchange (gcall *);
+extern void expand_ifn_atomic_op_fetch_cmp_0 (gcall *);
+extern rtx expand_builtin (tree, rtx, rtx, machine_mode, int);
+extern enum built_in_function builtin_mathfn_code (const_tree);
+extern tree fold_builtin_expect (location_t, tree, tree, tree, tree);
+extern bool avoid_folding_inline_builtin (tree);
+extern tree fold_call_expr (location_t, tree, bool);
+extern tree fold_builtin_call_array (location_t, tree, tree, int, tree *);
+extern bool validate_gimple_arglist (const gcall *, ...);
+extern rtx default_expand_builtin (tree, rtx, rtx, machine_mode, int);
+extern void maybe_emit_call_builtin___clear_cache (rtx, rtx);
+extern bool fold_builtin_next_arg (tree, bool);
+extern tree do_mpc_arg2 (tree, tree, tree, int, int (*)(mpc_ptr, mpc_srcptr, mpc_srcptr, mpc_rnd_t));
+extern tree fold_call_stmt (gcall *, bool);
+extern void set_builtin_user_assembler_name (tree decl, const char *asmspec);
+extern bool is_simple_builtin (tree);
+extern bool is_inexpensive_builtin (tree);
+extern bool readonly_data_expr (tree exp);
+extern bool init_target_chars (void);
+extern unsigned HOST_WIDE_INT target_newline;
+extern unsigned HOST_WIDE_INT target_percent;
+extern char target_percent_s[3];
+extern char target_percent_c[3];
+extern char target_percent_s_newline[4];
+extern bool target_char_cst_p (tree t, char *p);
+extern rtx get_memory_rtx (tree exp, tree len);
+
+extern internal_fn associated_internal_fn (combined_fn, tree);
+extern internal_fn associated_internal_fn (tree);
+extern internal_fn replacement_internal_fn (gcall *);
+
+extern bool builtin_with_linkage_p (tree);
+
+#endif /* GCC_BUILTINS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bversion.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bversion.h
new file mode 100644
index 0000000..a34321c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/bversion.h
@@ -0,0 +1,4 @@
+#define BUILDING_GCC_MAJOR 13
+#define BUILDING_GCC_MINOR 2
+#define BUILDING_GCC_PATCHLEVEL 1
+#define BUILDING_GCC_VERSION (BUILDING_GCC_MAJOR * 1000 + BUILDING_GCC_MINOR)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.def
new file mode 100644
index 0000000..68ae964
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.def
@@ -0,0 +1,96 @@
+/* This file contains the definitions and documentation for the
+ additional tree codes used in the GNU C compiler (see tree.def
+ for the standard codes).
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+ Written by Benjamin Chelf <chelf@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Tree nodes used in the C frontend. These are also shared with the
+ C++ and Objective C frontends. */
+
+/* A C_MAYBE_CONST_EXPR, currently only used for C and Objective C,
+ tracks information about constancy of an expression and VLA type
+ sizes or VM expressions from typeof that need to be evaluated
+ before the main expression. It is used during parsing and removed
+ in c_fully_fold. C_MAYBE_CONST_EXPR_PRE is the expression to
+ evaluate first, if not NULL; C_MAYBE_CONST_EXPR_EXPR is the main
+ expression. If C_MAYBE_CONST_EXPR_INT_OPERANDS is set then the
+ expression may be used in an unevaluated part of an integer
+ constant expression, but not in an evaluated part. If
+ C_MAYBE_CONST_EXPR_NON_CONST is set then the expression contains
+ something that cannot occur in an evaluated part of a constant
+ expression (or outside of sizeof in C90 mode); otherwise it does
+ not. */
+DEFTREECODE (C_MAYBE_CONST_EXPR, "c_maybe_const_expr", tcc_expression, 2)
+
+/* An EXCESS_PRECISION_EXPR represents an expression evaluated in greater
+ range or precision than its type. The type of the EXCESS_PRECISION_EXPR
+ is the semantic type while the operand represents what is actually being
+ evaluated. */
+DEFTREECODE (EXCESS_PRECISION_EXPR, "excess_precision_expr", tcc_expression, 1)
+
+/* Used to represent a user-defined literal.
+ The operands are an IDENTIFIER for the suffix, the VALUE of the literal,
+ and for numeric literals the original string representation of the
+ number. */
+DEFTREECODE (USERDEF_LITERAL, "userdef_literal", tcc_exceptional, 3)
+
+/* Represents a 'sizeof' expression during C++ template expansion,
+ or for the purpose of -Wsizeof-pointer-memaccess warning. */
+DEFTREECODE (SIZEOF_EXPR, "sizeof_expr", tcc_expression, 1)
+
+/* Like above, but enclosed in parentheses. Used to suppress warnings. */
+DEFTREECODE (PAREN_SIZEOF_EXPR, "paren_sizeof_expr", tcc_expression, 1)
+
+/* Used to represent a `for' statement. The operands are
+ FOR_INIT_STMT, FOR_COND, FOR_EXPR, FOR_BODY, and FOR_SCOPE,
+ respectively. */
+DEFTREECODE (FOR_STMT, "for_stmt", tcc_statement, 5)
+
+/* Used to represent a 'while' statement. The operands are WHILE_COND
+ and WHILE_BODY, respectively. */
+DEFTREECODE (WHILE_STMT, "while_stmt", tcc_statement, 2)
+
+/* Used to represent a 'do' statement. The operands are DO_COND and
+ DO_BODY, respectively. */
+DEFTREECODE (DO_STMT, "do_stmt", tcc_statement, 2)
+
+/* Used to represent a 'break' statement. */
+DEFTREECODE (BREAK_STMT, "break_stmt", tcc_statement, 0)
+
+/* Used to represent a 'continue' statement. */
+DEFTREECODE (CONTINUE_STMT, "continue_stmt", tcc_statement, 0)
+
+/* Used to represent a 'switch' statement. The operands are
+ SWITCH_STMT_COND, SWITCH_STMT_BODY, SWITCH_STMT_TYPE, and
+ SWITCH_STMT_SCOPE, respectively. */
+DEFTREECODE (SWITCH_STMT, "switch_stmt", tcc_statement, 4)
+
+/* Extensions for C++ Concepts. */
+
+/* Concept definition. This is not entirely different than a VAR_DECL
+ except that a) it must be a template, and b) doesn't have the wide
+ range of value and linkage options available to variables. Used
+ by C++ FE and in c-family attribute handling. */
+DEFTREECODE (CONCEPT_DECL, "concept_decl", tcc_declaration, 0)
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.h
new file mode 100644
index 0000000..f96350b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-common.h
@@ -0,0 +1,1587 @@
+/* Definitions for c-common.cc.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C_COMMON_H
+#define GCC_C_COMMON_H
+
+#include "splay-tree.h"
+#include "cpplib.h"
+#include "alias.h"
+#include "tree.h"
+#include "fold-const.h"
+#include "wide-int-bitmask.h"
+
+/* In order for the format checking to accept the C frontend
+ diagnostic framework extensions, you must include this file before
+ diagnostic-core.h, not after. The C front end formats are a subset of those
+ for C++, so they are the appropriate set to use in common code;
+ cp-tree.h overrides this for C++. */
+#if defined(GCC_DIAGNOSTIC_CORE_H)
+#error \
+In order for the format checking to accept the C front end diagnostic \
+framework extensions, you must include this file before diagnostic-core.h \
+never after.
+#endif
+#ifndef GCC_DIAG_STYLE
+#define GCC_DIAG_STYLE __gcc_cdiag__
+#endif
+#include "diagnostic-core.h"
+
+/* Usage of TREE_LANG_FLAG_?:
+ 0: IDENTIFIER_MARKED (used by search routines).
+ C_MAYBE_CONST_EXPR_INT_OPERANDS (in C_MAYBE_CONST_EXPR, for C)
+ 1: C_DECLARED_LABEL_FLAG (in LABEL_DECL)
+ STATEMENT_LIST_STMT_EXPR (in STATEMENT_LIST)
+ C_MAYBE_CONST_EXPR_NON_CONST (in C_MAYBE_CONST_EXPR, for C)
+ 2: unused
+ 3: STATEMENT_LIST_HAS_LABEL (in STATEMENT_LIST)
+ 4: unused
+*/
+
+/* Reserved identifiers. This is the union of all the keywords for C,
+ C++, and Objective-C. All the type modifiers have to be in one
+ block at the beginning, because they are used as mask bits. There
+ are 28 type modifiers; if we add many more we will have to redesign
+ the mask mechanism. */
+
+enum rid
+{
+ /* Modifiers: */
+ /* C, in empirical order of frequency. */
+ RID_STATIC = 0,
+ RID_UNSIGNED, RID_LONG, RID_CONST, RID_EXTERN,
+ RID_REGISTER, RID_TYPEDEF, RID_SHORT, RID_INLINE,
+ RID_VOLATILE, RID_SIGNED, RID_AUTO, RID_RESTRICT,
+ RID_NORETURN, RID_ATOMIC,
+
+ /* C extensions */
+ RID_COMPLEX, RID_THREAD, RID_SAT,
+
+ /* C++ */
+ RID_FRIEND, RID_VIRTUAL, RID_EXPLICIT, RID_EXPORT, RID_MUTABLE,
+
+ /* ObjC ("PQ" reserved words - they do not appear after a '@' and
+ are keywords only in specific contexts) */
+ RID_IN, RID_OUT, RID_INOUT, RID_BYCOPY, RID_BYREF, RID_ONEWAY,
+
+ /* ObjC ("PATTR" reserved words - they do not appear after a '@'
+ and are keywords only as property attributes) */
+ RID_GETTER, RID_SETTER,
+ RID_READONLY, RID_READWRITE,
+ RID_ASSIGN, RID_RETAIN, RID_COPY,
+ RID_PROPATOMIC, RID_NONATOMIC,
+
+ /* ObjC nullability support keywords that also can appear in the
+ property attribute context. These values should remain contiguous
+ with the other property attributes. */
+ RID_NULL_UNSPECIFIED, RID_NULLABLE, RID_NONNULL, RID_NULL_RESETTABLE,
+
+ /* C (reserved and imaginary types not implemented, so any use is a
+ syntax error) */
+ RID_IMAGINARY,
+
+ /* C */
+ RID_INT, RID_CHAR, RID_FLOAT, RID_DOUBLE, RID_VOID,
+ RID_ENUM, RID_STRUCT, RID_UNION, RID_IF, RID_ELSE,
+ RID_WHILE, RID_DO, RID_FOR, RID_SWITCH, RID_CASE,
+ RID_DEFAULT, RID_BREAK, RID_CONTINUE, RID_RETURN, RID_GOTO,
+ RID_SIZEOF,
+
+ /* C extensions */
+ RID_ASM, RID_TYPEOF, RID_TYPEOF_UNQUAL, RID_ALIGNOF, RID_ATTRIBUTE,
+ RID_VA_ARG,
+ RID_EXTENSION, RID_IMAGPART, RID_REALPART, RID_LABEL, RID_CHOOSE_EXPR,
+ RID_TYPES_COMPATIBLE_P, RID_BUILTIN_COMPLEX, RID_BUILTIN_SHUFFLE,
+ RID_BUILTIN_SHUFFLEVECTOR, RID_BUILTIN_CONVERTVECTOR, RID_BUILTIN_TGMATH,
+ RID_BUILTIN_HAS_ATTRIBUTE, RID_BUILTIN_ASSOC_BARRIER,
+ RID_DFLOAT32, RID_DFLOAT64, RID_DFLOAT128,
+
+ /* TS 18661-3 keywords, in the same sequence as the TI_* values. */
+ RID_FLOAT16,
+ RID_FLOATN_NX_FIRST = RID_FLOAT16,
+ RID_FLOAT32,
+ RID_FLOAT64,
+ RID_FLOAT128,
+ RID_FLOAT32X,
+ RID_FLOAT64X,
+ RID_FLOAT128X,
+#define CASE_RID_FLOATN_NX \
+ case RID_FLOAT16: case RID_FLOAT32: case RID_FLOAT64: case RID_FLOAT128: \
+ case RID_FLOAT32X: case RID_FLOAT64X: case RID_FLOAT128X
+
+ RID_FRACT, RID_ACCUM, RID_AUTO_TYPE, RID_BUILTIN_CALL_WITH_STATIC_CHAIN,
+
+ /* "__GIMPLE", for the GIMPLE-parsing extension to the C frontend. */
+ RID_GIMPLE,
+
+ /* "__PHI", for parsing PHI function in GIMPLE FE. */
+ RID_PHI,
+
+ /* "__RTL", for the RTL-parsing extension to the C frontend. */
+ RID_RTL,
+
+ /* C11 */
+ RID_ALIGNAS, RID_GENERIC,
+
+ /* This means to warn that this is a C++ keyword, and then treat it
+ as a normal identifier. */
+ RID_CXX_COMPAT_WARN,
+
+ /* GNU transactional memory extension */
+ RID_TRANSACTION_ATOMIC, RID_TRANSACTION_RELAXED, RID_TRANSACTION_CANCEL,
+
+ /* Too many ways of getting the name of a function as a string */
+ RID_FUNCTION_NAME, RID_PRETTY_FUNCTION_NAME, RID_C99_FUNCTION_NAME,
+
+ /* C++ (some of these are keywords in Objective-C as well, but only
+ if they appear after a '@') */
+ RID_BOOL, RID_WCHAR, RID_CLASS,
+ RID_PUBLIC, RID_PRIVATE, RID_PROTECTED,
+ RID_TEMPLATE, RID_NULL, RID_CATCH,
+ RID_DELETE, RID_FALSE, RID_NAMESPACE,
+ RID_NEW, RID_OFFSETOF, RID_OPERATOR,
+ RID_THIS, RID_THROW, RID_TRUE,
+ RID_TRY, RID_TYPENAME, RID_TYPEID,
+ RID_USING, RID_CHAR16, RID_CHAR32,
+
+ /* casts */
+ RID_CONSTCAST, RID_DYNCAST, RID_REINTCAST, RID_STATCAST,
+
+ /* C++ extensions */
+ RID_ADDRESSOF,
+ RID_BUILTIN_LAUNDER,
+ RID_BUILTIN_BIT_CAST,
+
+#define DEFTRAIT(TCC, CODE, NAME, ARITY) \
+ RID_##CODE,
+#include "cp/cp-trait.def"
+#undef DEFTRAIT
+
+ /* C++11 */
+ RID_CONSTEXPR, RID_DECLTYPE, RID_NOEXCEPT, RID_NULLPTR, RID_STATIC_ASSERT,
+
+ /* C++20 */
+ RID_CONSTINIT, RID_CONSTEVAL,
+
+ /* char8_t */
+ RID_CHAR8,
+
+ /* C++ concepts */
+ RID_CONCEPT, RID_REQUIRES,
+
+ /* C++ modules. */
+ RID__MODULE, RID__IMPORT, RID__EXPORT, /* Internal tokens. */
+
+ /* C++ coroutines */
+ RID_CO_AWAIT, RID_CO_YIELD, RID_CO_RETURN,
+
+ /* C++ transactional memory. */
+ RID_ATOMIC_NOEXCEPT, RID_ATOMIC_CANCEL, RID_SYNCHRONIZED,
+
+ /* Objective-C ("AT" reserved words - they are only keywords when
+ they follow '@') */
+ RID_AT_ENCODE, RID_AT_END,
+ RID_AT_CLASS, RID_AT_ALIAS, RID_AT_DEFS,
+ RID_AT_PRIVATE, RID_AT_PROTECTED, RID_AT_PUBLIC, RID_AT_PACKAGE,
+ RID_AT_PROTOCOL, RID_AT_SELECTOR,
+ RID_AT_THROW, RID_AT_TRY, RID_AT_CATCH,
+ RID_AT_FINALLY, RID_AT_SYNCHRONIZED,
+ RID_AT_OPTIONAL, RID_AT_REQUIRED, RID_AT_PROPERTY,
+ RID_AT_SYNTHESIZE, RID_AT_DYNAMIC,
+ RID_AT_INTERFACE,
+ RID_AT_IMPLEMENTATION,
+
+ /* OpenMP */
+ RID_OMP_ALL_MEMORY,
+
+ /* Named address support, mapping the keyword to a particular named address
+ number. Named address space 0 is reserved for the generic address. If
+ there are more than 254 named addresses, the addr_space_t type will need
+ to be grown from an unsigned char to unsigned short. */
+ RID_ADDR_SPACE_0, /* generic address */
+ RID_ADDR_SPACE_1,
+ RID_ADDR_SPACE_2,
+ RID_ADDR_SPACE_3,
+ RID_ADDR_SPACE_4,
+ RID_ADDR_SPACE_5,
+ RID_ADDR_SPACE_6,
+ RID_ADDR_SPACE_7,
+ RID_ADDR_SPACE_8,
+ RID_ADDR_SPACE_9,
+ RID_ADDR_SPACE_10,
+ RID_ADDR_SPACE_11,
+ RID_ADDR_SPACE_12,
+ RID_ADDR_SPACE_13,
+ RID_ADDR_SPACE_14,
+ RID_ADDR_SPACE_15,
+
+ RID_FIRST_ADDR_SPACE = RID_ADDR_SPACE_0,
+ RID_LAST_ADDR_SPACE = RID_ADDR_SPACE_15,
+
+ /* __intN keywords. The _N_M here doesn't correspond to the intN
+ in the keyword; use the bitsize in int_n_t_data_t[M] for that.
+ For example, if int_n_t_data_t[0].bitsize is 13, then RID_INT_N_0
+ is for __int13. */
+
+ /* Note that the range to use is RID_FIRST_INT_N through
+ RID_FIRST_INT_N + NUM_INT_N_ENTS - 1 and c-parser.cc has a list of
+ all RID_INT_N_* in a case statement. */
+
+ RID_INT_N_0,
+ RID_INT_N_1,
+ RID_INT_N_2,
+ RID_INT_N_3,
+
+ RID_FIRST_INT_N = RID_INT_N_0,
+ RID_LAST_INT_N = RID_INT_N_3,
+
+ RID_MAX,
+
+ RID_FIRST_MODIFIER = RID_STATIC,
+ RID_LAST_MODIFIER = RID_ONEWAY,
+
+ RID_FIRST_CXX11 = RID_CONSTEXPR,
+ RID_LAST_CXX11 = RID_STATIC_ASSERT,
+ RID_FIRST_CXX20 = RID_CONSTINIT,
+ RID_LAST_CXX20 = RID_CO_RETURN,
+ RID_FIRST_AT = RID_AT_ENCODE,
+ RID_LAST_AT = RID_AT_IMPLEMENTATION,
+ RID_FIRST_PQ = RID_IN,
+ RID_LAST_PQ = RID_ONEWAY,
+ RID_FIRST_PATTR = RID_GETTER,
+ RID_LAST_PATTR = RID_NULL_RESETTABLE
+};
+
+#define OBJC_IS_AT_KEYWORD(rid) \
+ ((unsigned int) (rid) >= (unsigned int) RID_FIRST_AT && \
+ (unsigned int) (rid) <= (unsigned int) RID_LAST_AT)
+
+#define OBJC_IS_PQ_KEYWORD(rid) \
+ ((unsigned int) (rid) >= (unsigned int) RID_FIRST_PQ && \
+ (unsigned int) (rid) <= (unsigned int) RID_LAST_PQ)
+
+/* Keywords permitted in an @property attribute context. */
+#define OBJC_IS_PATTR_KEYWORD(rid) \
+ ((((unsigned int) (rid) >= (unsigned int) RID_FIRST_PATTR && \
+ (unsigned int) (rid) <= (unsigned int) RID_LAST_PATTR)) \
+ || rid == RID_CLASS)
+
+/* OBJC_IS_CXX_KEYWORD recognizes the 'CXX_OBJC' keywords (such as
+ 'class') which are shared in a subtle way between Objective-C and
+ C++. When the lexer is lexing in Objective-C/Objective-C++, if it
+ finds '@' followed by one of these identifiers (eg, '@class'), it
+ recognizes the whole as an Objective-C keyword. If the identifier
+ is found elsewhere, it follows the rules of the C/C++ language.
+ */
+#define OBJC_IS_CXX_KEYWORD(rid) \
+ (rid == RID_CLASS || rid == RID_SYNCHRONIZED \
+ || rid == RID_PUBLIC || rid == RID_PROTECTED || rid == RID_PRIVATE \
+ || rid == RID_TRY || rid == RID_THROW || rid == RID_CATCH)
+
+/* The elements of `ridpointers' are identifier nodes for the reserved
+ type names and storage classes. It is indexed by a RID_... value. */
+extern GTY ((length ("(int) RID_MAX"))) tree *ridpointers;
+
+/* Standard named or nameless data types of the C compiler. */
+
+enum c_tree_index
+{
+ CTI_CHAR8_TYPE,
+ CTI_CHAR16_TYPE,
+ CTI_CHAR32_TYPE,
+ CTI_WCHAR_TYPE,
+ CTI_UNDERLYING_WCHAR_TYPE,
+ CTI_WINT_TYPE,
+ CTI_SIGNED_SIZE_TYPE, /* For format checking only. */
+ CTI_UNSIGNED_PTRDIFF_TYPE, /* For format checking only. */
+ CTI_INTMAX_TYPE,
+ CTI_UINTMAX_TYPE,
+ CTI_WIDEST_INT_LIT_TYPE,
+ CTI_WIDEST_UINT_LIT_TYPE,
+
+ /* Types for <stdint.h>, that may not be defined on all
+ targets. */
+ CTI_SIG_ATOMIC_TYPE,
+ CTI_INT8_TYPE,
+ CTI_INT16_TYPE,
+ CTI_INT32_TYPE,
+ CTI_INT64_TYPE,
+ CTI_UINT8_TYPE,
+ CTI_UINT16_TYPE,
+ CTI_UINT32_TYPE,
+ CTI_UINT64_TYPE,
+ CTI_INT_LEAST8_TYPE,
+ CTI_INT_LEAST16_TYPE,
+ CTI_INT_LEAST32_TYPE,
+ CTI_INT_LEAST64_TYPE,
+ CTI_UINT_LEAST8_TYPE,
+ CTI_UINT_LEAST16_TYPE,
+ CTI_UINT_LEAST32_TYPE,
+ CTI_UINT_LEAST64_TYPE,
+ CTI_INT_FAST8_TYPE,
+ CTI_INT_FAST16_TYPE,
+ CTI_INT_FAST32_TYPE,
+ CTI_INT_FAST64_TYPE,
+ CTI_UINT_FAST8_TYPE,
+ CTI_UINT_FAST16_TYPE,
+ CTI_UINT_FAST32_TYPE,
+ CTI_UINT_FAST64_TYPE,
+ CTI_INTPTR_TYPE,
+ CTI_UINTPTR_TYPE,
+
+ CTI_CHAR_ARRAY_TYPE,
+ CTI_CHAR8_ARRAY_TYPE,
+ CTI_CHAR16_ARRAY_TYPE,
+ CTI_CHAR32_ARRAY_TYPE,
+ CTI_WCHAR_ARRAY_TYPE,
+ CTI_STRING_TYPE,
+ CTI_CONST_STRING_TYPE,
+
+ /* Type for boolean expressions (bool in C++, int in C). */
+ CTI_TRUTHVALUE_TYPE,
+ CTI_TRUTHVALUE_TRUE,
+ CTI_TRUTHVALUE_FALSE,
+
+ CTI_DEFAULT_FUNCTION_TYPE,
+
+ CTI_NULL,
+ CTI_NULLPTR,
+ CTI_NULLPTR_TYPE,
+
+ /* These are not types, but we have to look them up all the time. */
+ CTI_FUNCTION_NAME_DECL,
+ CTI_PRETTY_FUNCTION_NAME_DECL,
+ CTI_C99_FUNCTION_NAME_DECL,
+
+ CTI_MODULE_HWM,
+ /* Below here entities change during compilation. */
+
+ CTI_SAVED_FUNCTION_NAME_DECLS,
+
+ CTI_MAX
+};
+
+#define C_CPP_HASHNODE(id) \
+ (&(((struct c_common_identifier *) (id))->node))
+#define C_RID_CODE(id) \
+ ((enum rid) (((struct c_common_identifier *) (id))->node.rid_code))
+#define C_SET_RID_CODE(id, code) \
+ (((struct c_common_identifier *) (id))->node.rid_code = (unsigned char) code)
+
+/* Identifier part common to the C front ends. Inherits from
+ tree_identifier, despite appearances. */
+struct GTY(()) c_common_identifier {
+ struct tree_common common;
+ struct cpp_hashnode node;
+};
+
+/* An entry in the reserved keyword table. */
+
+struct c_common_resword
+{
+ const char *const word;
+ ENUM_BITFIELD(rid) const rid : 16;
+ const unsigned int disable : 32;
+};
+
+/* Mode used to build pointers (VOIDmode means ptr_mode). */
+
+extern machine_mode c_default_pointer_mode;
+
+/* Extra cpp_ttype values for C++. */
+
+/* A token type for template-ids. If a template-id is processed while
+ parsing tentatively, it is replaced with a CPP_TEMPLATE_ID token;
+ the value of the CPP_TEMPLATE_ID is whatever was returned by
+ cp_parser_template_id. */
+#define CPP_TEMPLATE_ID ((enum cpp_ttype) (CPP_KEYWORD + 1))
+
+/* A token type for nested-name-specifiers. If a
+ nested-name-specifier is processed while parsing tentatively, it is
+ replaced with a CPP_NESTED_NAME_SPECIFIER token; the value of the
+ CPP_NESTED_NAME_SPECIFIER is whatever was returned by
+ cp_parser_nested_name_specifier_opt. */
+#define CPP_NESTED_NAME_SPECIFIER ((enum cpp_ttype) (CPP_TEMPLATE_ID + 1))
+
+/* A token type for pre-parsed C++0x decltype. */
+#define CPP_DECLTYPE ((enum cpp_ttype) (CPP_NESTED_NAME_SPECIFIER + 1))
+
+/* A token type for pre-parsed primary-expression (lambda- or statement-). */
+#define CPP_PREPARSED_EXPR ((enum cpp_ttype) (CPP_DECLTYPE + 1))
+
+/* The number of token types, including C++-specific ones. */
+#define N_CP_TTYPES ((int) (CPP_PREPARSED_EXPR + 1))
+
+/* Disable mask. Keywords are disabled if (reswords[i].disable &
+ mask) is _true_. Thus for keywords which are present in all
+ languages the disable field is zero. */
+
+#define D_CONLY 0x0001 /* C only (not in C++). */
+#define D_CXXONLY 0x0002 /* C++ only (not in C). */
+#define D_C99 0x0004 /* In C, C99 only. */
+#define D_C2X 0x0008 /* In C, C2X only. */
+#define D_CXX11 0x0010 /* In C++, C++11 only. */
+#define D_EXT 0x0020 /* GCC extension. */
+#define D_EXT89 0x0040 /* GCC extension incorporated in C99. */
+#define D_EXT11 0x0080 /* GCC extension incorporated in C2X. */
+#define D_ASM 0x0100 /* Disabled by -fno-asm. */
+#define D_OBJC 0x0200 /* In Objective C and neither C nor C++. */
+#define D_CXX_OBJC 0x0400 /* In Objective C, and C++, but not C. */
+#define D_CXXWARN 0x0800 /* In C warn with -Wcxx-compat. */
+#define D_CXX_CONCEPTS 0x1000 /* In C++, only with concepts. */
+#define D_TRANSMEM 0x2000 /* C++ transactional memory TS. */
+#define D_CXX_CHAR8_T 0x4000 /* In C++, only with -fchar8_t. */
+#define D_CXX20 0x8000 /* In C++, C++20 only. */
+#define D_CXX_COROUTINES 0x10000 /* In C++, only with coroutines. */
+#define D_CXX_MODULES 0x20000 /* In C++, only with modules. */
+
+#define D_CXX_CONCEPTS_FLAGS D_CXXONLY | D_CXX_CONCEPTS
+#define D_CXX_CHAR8_T_FLAGS D_CXXONLY | D_CXX_CHAR8_T
+#define D_CXX_MODULES_FLAGS (D_CXXONLY | D_CXX_MODULES)
+#define D_CXX_COROUTINES_FLAGS (D_CXXONLY | D_CXX_COROUTINES)
+
+/* The reserved keyword table. */
+extern const struct c_common_resword c_common_reswords[];
+
+/* The number of items in the reserved keyword table. */
+extern const unsigned int num_c_common_reswords;
+
+#define char8_type_node c_global_trees[CTI_CHAR8_TYPE]
+#define char16_type_node c_global_trees[CTI_CHAR16_TYPE]
+#define char32_type_node c_global_trees[CTI_CHAR32_TYPE]
+#define wchar_type_node c_global_trees[CTI_WCHAR_TYPE]
+#define underlying_wchar_type_node c_global_trees[CTI_UNDERLYING_WCHAR_TYPE]
+#define wint_type_node c_global_trees[CTI_WINT_TYPE]
+#define signed_size_type_node c_global_trees[CTI_SIGNED_SIZE_TYPE]
+#define unsigned_ptrdiff_type_node c_global_trees[CTI_UNSIGNED_PTRDIFF_TYPE]
+#define intmax_type_node c_global_trees[CTI_INTMAX_TYPE]
+#define uintmax_type_node c_global_trees[CTI_UINTMAX_TYPE]
+#define widest_integer_literal_type_node c_global_trees[CTI_WIDEST_INT_LIT_TYPE]
+#define widest_unsigned_literal_type_node c_global_trees[CTI_WIDEST_UINT_LIT_TYPE]
+
+#define sig_atomic_type_node c_global_trees[CTI_SIG_ATOMIC_TYPE]
+#define int8_type_node c_global_trees[CTI_INT8_TYPE]
+#define int16_type_node c_global_trees[CTI_INT16_TYPE]
+#define int32_type_node c_global_trees[CTI_INT32_TYPE]
+#define int64_type_node c_global_trees[CTI_INT64_TYPE]
+#define uint8_type_node c_global_trees[CTI_UINT8_TYPE]
+#define c_uint16_type_node c_global_trees[CTI_UINT16_TYPE]
+#define c_uint32_type_node c_global_trees[CTI_UINT32_TYPE]
+#define c_uint64_type_node c_global_trees[CTI_UINT64_TYPE]
+#define int_least8_type_node c_global_trees[CTI_INT_LEAST8_TYPE]
+#define int_least16_type_node c_global_trees[CTI_INT_LEAST16_TYPE]
+#define int_least32_type_node c_global_trees[CTI_INT_LEAST32_TYPE]
+#define int_least64_type_node c_global_trees[CTI_INT_LEAST64_TYPE]
+#define uint_least8_type_node c_global_trees[CTI_UINT_LEAST8_TYPE]
+#define uint_least16_type_node c_global_trees[CTI_UINT_LEAST16_TYPE]
+#define uint_least32_type_node c_global_trees[CTI_UINT_LEAST32_TYPE]
+#define uint_least64_type_node c_global_trees[CTI_UINT_LEAST64_TYPE]
+#define int_fast8_type_node c_global_trees[CTI_INT_FAST8_TYPE]
+#define int_fast16_type_node c_global_trees[CTI_INT_FAST16_TYPE]
+#define int_fast32_type_node c_global_trees[CTI_INT_FAST32_TYPE]
+#define int_fast64_type_node c_global_trees[CTI_INT_FAST64_TYPE]
+#define uint_fast8_type_node c_global_trees[CTI_UINT_FAST8_TYPE]
+#define uint_fast16_type_node c_global_trees[CTI_UINT_FAST16_TYPE]
+#define uint_fast32_type_node c_global_trees[CTI_UINT_FAST32_TYPE]
+#define uint_fast64_type_node c_global_trees[CTI_UINT_FAST64_TYPE]
+#define intptr_type_node c_global_trees[CTI_INTPTR_TYPE]
+#define uintptr_type_node c_global_trees[CTI_UINTPTR_TYPE]
+
+#define truthvalue_type_node c_global_trees[CTI_TRUTHVALUE_TYPE]
+#define truthvalue_true_node c_global_trees[CTI_TRUTHVALUE_TRUE]
+#define truthvalue_false_node c_global_trees[CTI_TRUTHVALUE_FALSE]
+
+#define char_array_type_node c_global_trees[CTI_CHAR_ARRAY_TYPE]
+#define char8_array_type_node c_global_trees[CTI_CHAR8_ARRAY_TYPE]
+#define char16_array_type_node c_global_trees[CTI_CHAR16_ARRAY_TYPE]
+#define char32_array_type_node c_global_trees[CTI_CHAR32_ARRAY_TYPE]
+#define wchar_array_type_node c_global_trees[CTI_WCHAR_ARRAY_TYPE]
+#define string_type_node c_global_trees[CTI_STRING_TYPE]
+#define const_string_type_node c_global_trees[CTI_CONST_STRING_TYPE]
+
+#define default_function_type c_global_trees[CTI_DEFAULT_FUNCTION_TYPE]
+
+#define function_name_decl_node c_global_trees[CTI_FUNCTION_NAME_DECL]
+#define pretty_function_name_decl_node c_global_trees[CTI_PRETTY_FUNCTION_NAME_DECL]
+#define c99_function_name_decl_node c_global_trees[CTI_C99_FUNCTION_NAME_DECL]
+#define saved_function_name_decls c_global_trees[CTI_SAVED_FUNCTION_NAME_DECLS]
+
+/* The node for C++ `__null'. */
+#define null_node c_global_trees[CTI_NULL]
+/* The nodes for `nullptr'. */
+#define nullptr_node c_global_trees[CTI_NULLPTR]
+#define nullptr_type_node c_global_trees[CTI_NULLPTR_TYPE]
+
+extern GTY(()) tree c_global_trees[CTI_MAX];
+
+/* Mark which labels are explicitly declared.
+ These may be shadowed, and may be referenced from nested functions. */
+#define C_DECLARED_LABEL_FLAG(label) TREE_LANG_FLAG_1 (label)
+
+enum c_language_kind
+{
+ clk_c = 0, /* C90, C94, C99, C11 or C2X */
+ clk_objc = 1, /* clk_c with ObjC features. */
+ clk_cxx = 2, /* ANSI/ISO C++ */
+ clk_objcxx = 3 /* clk_cxx with ObjC features. */
+};
+
+/* To test for a specific language use c_language, defined by each
+ front end. For "ObjC features" or "not C++" use the macros. */
+extern c_language_kind c_language;
+
+#define c_dialect_cxx() ((c_language & clk_cxx) != 0)
+#define c_dialect_objc() ((c_language & clk_objc) != 0)
+
+/* The various name of operator that appears in error messages. */
+enum ref_operator {
+ /* NULL */
+ RO_NULL,
+ /* array indexing */
+ RO_ARRAY_INDEXING,
+ /* unary * */
+ RO_UNARY_STAR,
+ /* -> */
+ RO_ARROW,
+ /* implicit conversion */
+ RO_IMPLICIT_CONVERSION,
+ /* ->* */
+ RO_ARROW_STAR
+};
+
+/* Information about a statement tree. */
+
+struct GTY(()) stmt_tree_s {
+ /* A stack of statement lists being collected. */
+ vec<tree, va_gc> *x_cur_stmt_list;
+
+ /* In C++, Nonzero if we should treat statements as full
+ expressions. In particular, this variable is non-zero if at the
+ end of a statement we should destroy any temporaries created
+ during that statement. Similarly, if, at the end of a block, we
+ should destroy any local variables in this block. Normally, this
+ variable is nonzero, since those are the normal semantics of
+ C++.
+
+ This flag has no effect in C. */
+ int stmts_are_full_exprs_p;
+};
+
+typedef struct stmt_tree_s *stmt_tree;
+
+/* Global state pertinent to the current function. Some C dialects
+ extend this structure with additional fields. */
+
+struct GTY(()) c_language_function {
+ /* While we are parsing the function, this contains information
+ about the statement-tree that we are building. */
+ struct stmt_tree_s x_stmt_tree;
+
+ /* Vector of locally defined typedefs, for
+ -Wunused-local-typedefs. */
+ vec<tree, va_gc> *local_typedefs;
+};
+
+#define stmt_list_stack (current_stmt_tree ()->x_cur_stmt_list)
+
+/* When building a statement-tree, this is the current statement list
+ being collected. */
+#define cur_stmt_list (stmt_list_stack->last ())
+
+#define building_stmt_list_p() (stmt_list_stack && !stmt_list_stack->is_empty())
+
+/* Language-specific hooks. */
+
+/* If non-NULL, this function is called after a precompile header file
+ is loaded. */
+extern void (*lang_post_pch_load) (void);
+
+extern void push_file_scope (void);
+extern void pop_file_scope (void);
+extern stmt_tree current_stmt_tree (void);
+extern tree push_stmt_list (void);
+extern tree pop_stmt_list (tree);
+extern tree add_stmt (tree);
+extern void push_cleanup (tree, tree, bool);
+
+extern tree build_modify_expr (location_t, tree, tree, enum tree_code,
+ location_t, tree, tree);
+extern tree build_indirect_ref (location_t, tree, ref_operator);
+
+extern bool has_c_linkage (const_tree decl);
+extern bool c_decl_implicit (const_tree);
+
+/* Switches common to the C front ends. */
+
+/* Nonzero means don't output line number information. */
+
+extern char flag_no_line_commands;
+
+/* Nonzero causes -E output not to be done, but directives such as
+ #define that have side effects are still obeyed. */
+
+extern char flag_no_output;
+
+/* Nonzero means dump macros in some fashion; contains the 'D', 'M',
+ 'N' or 'U' of the command line switch. */
+
+extern char flag_dump_macros;
+
+/* Nonzero means pass #include lines through to the output. */
+
+extern char flag_dump_includes;
+
+/* Nonzero means process PCH files while preprocessing. */
+
+extern bool flag_pch_preprocess;
+
+/* The file name to which we should write a precompiled header, or
+ NULL if no header will be written in this compile. */
+
+extern const char *pch_file;
+
+/* Nonzero if an ISO standard was selected. It rejects macros in the
+ user's namespace. */
+
+extern int flag_iso;
+
+/* C/ObjC language option variables. */
+
+
+/* Nonzero means allow type mismatches in conditional expressions;
+ just make their values `void'. */
+
+extern int flag_cond_mismatch;
+
+/* Nonzero means enable C89 Amendment 1 features. */
+
+extern int flag_isoc94;
+
+/* Nonzero means use the ISO C99 (or later) dialect of C. */
+
+extern int flag_isoc99;
+
+/* Nonzero means use the ISO C11 (or later) dialect of C. */
+
+extern int flag_isoc11;
+
+/* Nonzero means use the ISO C2X dialect of C. */
+
+extern int flag_isoc2x;
+
+/* Nonzero means that we have builtin functions, and main is an int. */
+
+extern int flag_hosted;
+
+/* ObjC language option variables. */
+
+
+/* Tells the compiler that this is a special run. Do not perform any
+ compiling, instead we are to test some platform dependent features
+ and output a C header file with appropriate definitions. */
+
+extern int print_struct_values;
+
+/* Tells the compiler what is the constant string class for ObjC. */
+
+extern const char *constant_string_class_name;
+
+
+/* C++ language option variables. */
+
+/* The reference version of the ABI for -Wabi. */
+
+extern int warn_abi_version;
+
+/* Return TRUE if one of {flag_abi_version,flag_abi_compat_version} is
+ less than N and the other is at least N. */
+#define abi_compat_version_crosses(N) \
+ (abi_version_at_least(N) \
+ != (flag_abi_compat_version == 0 \
+ || flag_abi_compat_version >= (N)))
+
+/* Return TRUE if one of {flag_abi_version,warn_abi_version} is
+ less than N and the other is at least N, for use by -Wabi. */
+#define abi_version_crosses(N) \
+ (abi_version_at_least(N) \
+ != (warn_abi_version == 0 \
+ || warn_abi_version >= (N)))
+
+/* The supported C++ dialects. */
+
+enum cxx_dialect {
+ cxx_unset,
+ /* C++98 with TC1 */
+ cxx98,
+ cxx03 = cxx98,
+ /* C++11 */
+ cxx0x,
+ cxx11 = cxx0x,
+ /* C++14 */
+ cxx14,
+ /* C++17 */
+ cxx17,
+ /* C++20 */
+ cxx20,
+ /* C++23 */
+ cxx23
+};
+
+/* The C++ dialect being used. C++98 is the default. */
+extern enum cxx_dialect cxx_dialect;
+
+/* Maximum template instantiation depth. This limit is rather
+ arbitrary, but it exists to limit the time it takes to notice
+ excessively recursive template instantiations. */
+
+extern int max_tinst_depth;
+
+/* Nonzero means that we should not issue warnings about problems that
+ occur when the code is executed, because the code being processed
+ is not expected to be executed. This is set during parsing. This
+ is used for cases like sizeof() and "0 ? a : b". This is a count,
+ not a bool, because unexecuted expressions can nest. */
+
+extern int c_inhibit_evaluation_warnings;
+
+/* Depending on which phase of processing we are in, we may need
+ to prefer input_location to libcpp's locations. (Specifically,
+ after the C++ lexer is done lexing tokens, but prior to calling
+ cpp_finish (), we need to do so. */
+
+extern bool override_libcpp_locations;
+
+/* C types are partitioned into three subsets: object, function, and
+ incomplete types. */
+#define C_TYPE_OBJECT_P(type) \
+ (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type))
+
+#define C_TYPE_INCOMPLETE_P(type) \
+ (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type) == 0)
+
+#define C_TYPE_FUNCTION_P(type) \
+ (TREE_CODE (type) == FUNCTION_TYPE)
+
+/* For convenience we define a single macro to identify the class of
+ object or incomplete types. */
+#define C_TYPE_OBJECT_OR_INCOMPLETE_P(type) \
+ (!C_TYPE_FUNCTION_P (type))
+
+/* Return true if TYPE is a vector type that should be subject to the GNU
+ vector extensions (as opposed to a vector type that is used only for
+ the purposes of defining target-specific built-in functions). */
+
+inline bool
+gnu_vector_type_p (const_tree type)
+{
+ return TREE_CODE (type) == VECTOR_TYPE && !TYPE_INDIVISIBLE_P (type);
+}
+
+struct visibility_flags
+{
+ unsigned inpragma : 1; /* True when in #pragma GCC visibility. */
+ unsigned inlines_hidden : 1; /* True when -finlineshidden in effect. */
+};
+
+/* These enumerators are possible types of unsafe conversions. */
+enum conversion_safety {
+ /* The conversion is safe. */
+ SAFE_CONVERSION = 0,
+ /* Another type of conversion with problems. */
+ UNSAFE_OTHER,
+ /* Conversion between signed and unsigned integers. */
+ UNSAFE_SIGN,
+ /* Conversions that reduce the precision of reals including conversions
+ from reals to integers. */
+ UNSAFE_REAL,
+ /* Conversions from complex to reals or integers, that discard imaginary
+ component. */
+ UNSAFE_IMAGINARY
+};
+
+/* Global visibility options. */
+extern struct visibility_flags visibility_options;
+
+/* Attribute table common to the C front ends. */
+extern const struct attribute_spec c_common_attribute_table[];
+extern const struct attribute_spec c_common_format_attribute_table[];
+
+/* Pointer to function to lazily generate the VAR_DECL for __FUNCTION__ etc.
+ ID is the identifier to use, NAME is the string.
+ TYPE_DEP indicates whether it depends on type of the function or not
+ (i.e. __PRETTY_FUNCTION__). */
+
+extern tree (*make_fname_decl) (location_t, tree, int);
+
+/* In c-decl.cc and cp/tree.cc. FIXME. */
+extern void c_register_addr_space (const char *str, addr_space_t as);
+
+/* In c-common.cc. */
+extern bool in_late_binary_op;
+extern const char *c_addr_space_name (addr_space_t as);
+extern tree identifier_global_value (tree);
+extern tree identifier_global_tag (tree);
+extern bool names_builtin_p (const char *);
+extern tree c_linkage_bindings (tree);
+extern void record_builtin_type (enum rid, const char *, tree);
+extern void start_fname_decls (void);
+extern void finish_fname_decls (void);
+extern const char *fname_as_string (int);
+extern tree fname_decl (location_t, unsigned, tree);
+
+extern int check_user_alignment (const_tree, bool, bool);
+extern bool check_function_arguments (location_t loc, const_tree, const_tree,
+ int, tree *, vec<location_t> *);
+extern void check_function_arguments_recurse (void (*)
+ (void *, tree,
+ unsigned HOST_WIDE_INT),
+ void *, tree,
+ unsigned HOST_WIDE_INT,
+ opt_code);
+extern bool check_builtin_function_arguments (location_t, vec<location_t>,
+ tree, tree, int, tree *);
+extern void check_function_format (const_tree, tree, int, tree *,
+ vec<location_t> *);
+extern bool attribute_fallthrough_p (tree);
+extern tree handle_format_attribute (tree *, tree, tree, int, bool *);
+extern tree handle_format_arg_attribute (tree *, tree, tree, int, bool *);
+extern bool c_common_handle_option (size_t, const char *, HOST_WIDE_INT, int,
+ location_t,
+ const struct cl_option_handlers *);
+extern bool default_handle_c_option (size_t, const char *, int);
+extern tree c_common_type_for_mode (machine_mode, int);
+extern tree c_common_type_for_size (unsigned int, int);
+extern tree c_common_fixed_point_type_for_size (unsigned int, unsigned int,
+ int, int);
+extern tree c_common_unsigned_type (tree);
+extern tree c_common_signed_type (tree);
+extern tree c_common_signed_or_unsigned_type (int, tree);
+extern void c_common_init_ts (void);
+extern tree c_build_bitfield_integer_type (unsigned HOST_WIDE_INT, int);
+extern enum conversion_safety unsafe_conversion_p (tree, tree, tree, bool);
+extern bool decl_with_nonnull_addr_p (const_tree);
+extern tree c_fully_fold (tree, bool, bool *, bool = false);
+extern tree c_wrap_maybe_const (tree, bool);
+extern tree c_common_truthvalue_conversion (location_t, tree);
+extern void c_apply_type_quals_to_decl (int, tree);
+extern tree c_sizeof_or_alignof_type (location_t, tree, bool, bool, int);
+extern tree c_alignof_expr (location_t, tree);
+/* Print an error message for invalid operands to arith operation CODE.
+ NOP_EXPR is used as a special case (see truthvalue_conversion). */
+extern void binary_op_error (rich_location *, enum tree_code, tree, tree);
+extern tree fix_string_type (tree);
+extern tree convert_and_check (location_t, tree, tree, bool = false);
+extern bool c_determine_visibility (tree);
+extern bool vector_types_compatible_elements_p (tree, tree);
+extern void mark_valid_location_for_stdc_pragma (bool);
+extern bool valid_location_for_stdc_pragma_p (void);
+extern void set_float_const_decimal64 (void);
+extern void clear_float_const_decimal64 (void);
+extern bool float_const_decimal64_p (void);
+
+extern bool keyword_begins_type_specifier (enum rid);
+extern bool keyword_is_storage_class_specifier (enum rid);
+extern bool keyword_is_type_qualifier (enum rid);
+extern bool keyword_is_decl_specifier (enum rid);
+extern unsigned max_align_t_align (void);
+extern bool cxx_fundamental_alignment_p (unsigned);
+extern bool pointer_to_zero_sized_aggr_p (tree);
+extern bool bool_promoted_to_int_p (tree);
+extern tree fold_for_warn (tree);
+extern tree c_common_get_narrower (tree, int *);
+extern bool get_attribute_operand (tree, unsigned HOST_WIDE_INT *);
+extern void c_common_finalize_early_debug (void);
+extern unsigned int c_strict_flex_array_level_of (tree);
+extern bool c_option_is_from_cpp_diagnostics (int);
+
+/* Used by convert_and_check; in front ends. */
+extern tree convert_init (tree, tree);
+
+#define c_sizeof(LOC, T) c_sizeof_or_alignof_type (LOC, T, true, false, 1)
+#define c_alignof(LOC, T) c_sizeof_or_alignof_type (LOC, T, false, false, 1)
+
+/* Subroutine of build_binary_op, used for certain operations. */
+extern tree shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise);
+
+/* Return true if division or modulo op0 / op1 or op0 % op1 may be shortened.
+ We can shorten only if we can guarantee that op0 is not signed integral
+ minimum or op1 is not -1, because e.g. (long long) INT_MIN / -1 is
+ well defined INT_MAX + 1LL if long long is wider than int, but INT_MIN / -1
+ is UB. */
+inline bool
+may_shorten_divmod (tree op0, tree op1)
+{
+ tree type0 = TREE_TYPE (op0);
+ if (TYPE_UNSIGNED (type0))
+ return true;
+ /* A cast from narrower unsigned won't be signed integral minimum,
+ but cast from same or wider precision unsigned could be. */
+ if (TREE_CODE (op0) == NOP_EXPR
+ && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ < TYPE_PRECISION (type0)))
+ return true;
+ if (TREE_CODE (op1) == INTEGER_CST && !integer_all_onesp (op1))
+ return true;
+ return false;
+}
+
+/* Subroutine of build_binary_op, used for comparison operations.
+ See if the operands have both been converted from subword integer types
+ and, if so, perhaps change them both back to their original type. */
+extern tree shorten_compare (location_t, tree *, tree *, tree *,
+ enum tree_code *);
+
+extern tree pointer_int_sum (location_t, enum tree_code, tree, tree,
+ bool = true);
+
+/* Add qualifiers to a type, in the fashion for C. */
+extern tree c_build_qualified_type (tree, int, tree = NULL_TREE, size_t = 0);
+
+/* Build tree nodes and builtin functions common to both C and C++ language
+ frontends. */
+extern void c_common_nodes_and_builtins (void);
+
+extern void disable_builtin_function (const char *);
+
+extern void set_compound_literal_name (tree decl);
+
+extern tree build_va_arg (location_t, tree, tree);
+
+extern const unsigned int c_family_lang_mask;
+extern unsigned int c_common_option_lang_mask (void);
+extern void c_common_diagnostics_set_defaults (diagnostic_context *);
+extern bool c_common_complain_wrong_lang_p (const struct cl_option *);
+extern void c_common_init_options_struct (struct gcc_options *);
+extern void c_common_init_options (unsigned int, struct cl_decoded_option *);
+extern bool c_common_post_options (const char **);
+extern bool c_common_init (void);
+extern void c_common_finish (void);
+extern void c_common_parse_file (void);
+extern alias_set_type c_common_get_alias_set (tree);
+extern void c_register_builtin_type (tree, const char*);
+extern bool c_promoting_integer_type_p (const_tree);
+extern bool self_promoting_args_p (const_tree);
+extern tree strip_pointer_operator (tree);
+extern tree strip_pointer_or_array_types (tree);
+extern HOST_WIDE_INT c_common_to_target_charset (HOST_WIDE_INT);
+
+/* This is the basic parsing function. */
+extern void c_parse_file (void);
+
+extern void c_parse_final_cleanups (void);
+
+/* These macros provide convenient access to the various _STMT nodes. */
+
+/* Nonzero if a given STATEMENT_LIST represents the outermost binding
+ if a statement expression. */
+#define STATEMENT_LIST_STMT_EXPR(NODE) \
+ TREE_LANG_FLAG_1 (STATEMENT_LIST_CHECK (NODE))
+
+/* Nonzero if a label has been added to the statement list. */
+#define STATEMENT_LIST_HAS_LABEL(NODE) \
+ TREE_LANG_FLAG_3 (STATEMENT_LIST_CHECK (NODE))
+
+/* C_MAYBE_CONST_EXPR accessors. */
+#define C_MAYBE_CONST_EXPR_PRE(NODE) \
+ TREE_OPERAND (C_MAYBE_CONST_EXPR_CHECK (NODE), 0)
+#define C_MAYBE_CONST_EXPR_EXPR(NODE) \
+ TREE_OPERAND (C_MAYBE_CONST_EXPR_CHECK (NODE), 1)
+#define C_MAYBE_CONST_EXPR_INT_OPERANDS(NODE) \
+ TREE_LANG_FLAG_0 (C_MAYBE_CONST_EXPR_CHECK (NODE))
+#define C_MAYBE_CONST_EXPR_NON_CONST(NODE) \
+ TREE_LANG_FLAG_1 (C_MAYBE_CONST_EXPR_CHECK (NODE))
+#define EXPR_INT_CONST_OPERANDS(EXPR) \
+ (INTEGRAL_TYPE_P (TREE_TYPE (EXPR)) \
+ && (TREE_CODE (EXPR) == INTEGER_CST \
+ || (TREE_CODE (EXPR) == C_MAYBE_CONST_EXPR \
+ && C_MAYBE_CONST_EXPR_INT_OPERANDS (EXPR))))
+
+/* In a FIELD_DECL, nonzero if the decl was originally a bitfield. */
+#define DECL_C_BIT_FIELD(NODE) \
+ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) == 1)
+#define SET_DECL_C_BIT_FIELD(NODE) \
+ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 1)
+#define CLEAR_DECL_C_BIT_FIELD(NODE) \
+ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 0)
+
+/* True if the decl was an unnamed bitfield. */
+#define DECL_UNNAMED_BIT_FIELD(NODE) \
+ (DECL_C_BIT_FIELD (NODE) && !DECL_NAME (NODE))
+
+/* True iff TYPE is cv decltype(nullptr). */
+#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
+
+/* Returns the underlying type of the given enumeration type. The
+ underlying type is determined in different ways, depending on the
+ properties of the enum:
+
+ - In C++0x or C2x, the underlying type can be explicitly specified, e.g.,
+
+ enum E1 : char { ... } // underlying type is char
+
+ - In a C++0x scoped enumeration, the underlying type is int
+ unless otherwises specified:
+
+ enum class E2 { ... } // underlying type is int
+
+ - Otherwise, the underlying type is determined based on the
+ values of the enumerators. In this case, the
+ ENUM_UNDERLYING_TYPE will not be set until after the definition
+ of the enumeration is completed by finish_enum. */
+#define ENUM_UNDERLYING_TYPE(TYPE) \
+ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
+
+/* Determines whether an ENUMERAL_TYPE has an explicit
+ underlying type. */
+#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
+
+extern tree do_case (location_t, tree, tree, tree);
+extern tree build_stmt (location_t, enum tree_code, ...);
+extern tree build_real_imag_expr (location_t, enum tree_code, tree);
+
+/* These functions must be defined by each front-end which implements
+ a variant of the C language. They are used in c-common.cc. */
+
+extern tree build_unary_op (location_t, enum tree_code, tree, bool);
+extern tree build_binary_op (location_t, enum tree_code, tree, tree, bool);
+extern tree perform_integral_promotions (tree);
+
+/* These functions must be defined by each front-end which implements
+ a variant of the C language. They are used by port files. */
+
+extern tree default_conversion (tree);
+
+/* Given two integer or real types, return the type for their sum.
+ Given two compatible ANSI C types, returns the merged type. */
+
+extern tree common_type (tree, tree);
+
+extern tree decl_constant_value (tree);
+
+/* Handle increment and decrement of boolean types. */
+extern tree boolean_increment (enum tree_code, tree);
+
+extern int case_compare (splay_tree_key, splay_tree_key);
+
+extern tree c_add_case_label (location_t, splay_tree, tree, tree, tree,
+ tree = NULL_TREE);
+extern bool c_switch_covers_all_cases_p (splay_tree, tree);
+extern bool c_block_may_fallthru (const_tree);
+
+extern tree build_function_call (location_t, tree, tree);
+
+extern tree build_function_call_vec (location_t, vec<location_t>, tree,
+ vec<tree, va_gc> *, vec<tree, va_gc> *,
+ tree = NULL_TREE);
+
+extern tree resolve_overloaded_builtin (location_t, tree, vec<tree, va_gc> *);
+
+extern tree finish_label_address_expr (tree, location_t);
+
+/* Same function prototype, but the C and C++ front ends have
+ different implementations. Used in c-common.cc. */
+extern tree lookup_label (tree);
+extern tree lookup_name (tree);
+extern bool lvalue_p (const_tree);
+extern int maybe_adjust_arg_pos_for_attribute (const_tree);
+extern bool instantiation_dependent_expression_p (tree);
+
+extern bool vector_targets_convertible_p (const_tree t1, const_tree t2);
+extern bool vector_types_convertible_p (const_tree t1, const_tree t2, bool emit_lax_note);
+extern tree c_build_vec_perm_expr (location_t, tree, tree, tree, bool = true);
+extern tree c_build_shufflevector (location_t, tree, tree,
+ const vec<tree> &, bool = true);
+extern tree c_build_vec_convert (location_t, tree, location_t, tree, bool = true);
+
+extern void init_c_lex (void);
+
+extern void c_cpp_builtins (cpp_reader *);
+extern void c_cpp_builtins_optimize_pragma (cpp_reader *, tree, tree);
+extern bool c_cpp_diagnostic (cpp_reader *, enum cpp_diagnostic_level,
+ enum cpp_warning_reason, rich_location *,
+ const char *, va_list *)
+ ATTRIBUTE_GCC_DIAG(5,0);
+extern int c_common_has_attribute (cpp_reader *, bool);
+extern int c_common_has_builtin (cpp_reader *);
+
+extern bool parse_optimize_options (tree, bool);
+
+/* Positive if an implicit `extern "C"' scope has just been entered;
+ negative if such a scope has just been exited. */
+extern GTY(()) int pending_lang_change;
+
+/* Information recorded about each file examined during compilation. */
+
+struct c_fileinfo
+{
+ int time; /* Time spent in the file. */
+
+ /* Flags used only by C++.
+ INTERFACE_ONLY nonzero means that we are in an "interface" section
+ of the compiler. INTERFACE_UNKNOWN nonzero means we cannot trust
+ the value of INTERFACE_ONLY. If INTERFACE_UNKNOWN is zero and
+ INTERFACE_ONLY is zero, it means that we are responsible for
+ exporting definitions that others might need. */
+ short interface_only;
+ short interface_unknown;
+};
+
+struct c_fileinfo *get_fileinfo (const char *);
+extern void dump_time_statistics (void);
+
+extern bool c_dump_tree (void *, tree);
+
+extern void verify_sequence_points (tree);
+
+extern tree fold_offsetof (tree, tree = size_type_node,
+ tree_code ctx = ERROR_MARK);
+
+extern int complete_array_type (tree *, tree, bool);
+extern void complete_flexible_array_elts (tree);
+
+extern tree builtin_type_for_size (int, bool);
+
+extern void c_common_mark_addressable_vec (tree);
+
+extern void set_underlying_type (tree);
+extern bool user_facing_original_type_p (const_tree);
+extern void record_types_used_by_current_var_decl (tree);
+extern vec<tree, va_gc> *make_tree_vector (void);
+extern void release_tree_vector (vec<tree, va_gc> *);
+extern vec<tree, va_gc> *make_tree_vector_single (tree);
+extern vec<tree, va_gc> *make_tree_vector_from_list (tree);
+extern vec<tree, va_gc> *make_tree_vector_from_ctor (tree);
+extern vec<tree, va_gc> *make_tree_vector_copy (const vec<tree, va_gc> *);
+
+/* Used for communication between c_common_type_for_mode and
+ c_register_builtin_type. */
+extern GTY(()) tree registered_builtin_types;
+
+/* Read SOURCE_DATE_EPOCH from environment to have a deterministic
+ timestamp to replace embedded current dates to get reproducible
+ results. Returns -1 if SOURCE_DATE_EPOCH is not defined. */
+extern time_t cb_get_source_date_epoch (cpp_reader *pfile);
+
+/* The value (as a unix timestamp) corresponds to date
+ "Dec 31 9999 23:59:59 UTC", which is the latest date that __DATE__ and
+ __TIME__ can store. */
+#define MAX_SOURCE_DATE_EPOCH HOST_WIDE_INT_C (253402300799)
+
+/* Callback for libcpp for offering spelling suggestions for misspelled
+ directives. */
+extern const char *cb_get_suggestion (cpp_reader *, const char *,
+ const char *const *);
+
+extern GTY(()) string_concat_db *g_string_concat_db;
+
+class substring_loc;
+extern const char *c_get_substring_location (const substring_loc &substr_loc,
+ location_t *out_loc);
+
+/* In c-gimplify.cc. */
+typedef struct bc_state
+{
+ tree bc_label[2];
+} bc_state_t;
+extern void save_bc_state (bc_state_t *);
+extern void restore_bc_state (bc_state_t *);
+extern tree c_genericize_control_stmt (tree *, int *, void *,
+ walk_tree_fn, walk_tree_lh);
+extern void c_genericize (tree);
+extern int c_gimplify_expr (tree *, gimple_seq *, gimple_seq *);
+extern tree c_build_bind_expr (location_t, tree, tree);
+
+/* In c-lex.cc. */
+extern enum cpp_ttype
+conflict_marker_get_final_tok_kind (enum cpp_ttype tok1_kind);
+
+/* In c-pch.cc */
+extern void pch_init (void);
+extern void pch_cpp_save_state (void);
+extern int c_common_valid_pch (cpp_reader *pfile, const char *name, int fd);
+extern void c_common_read_pch (cpp_reader *pfile, const char *name, int fd,
+ const char *orig);
+extern void c_common_write_pch (void);
+extern void c_common_no_more_pch (void);
+extern void c_common_pch_pragma (cpp_reader *pfile, const char *);
+
+/* In *-checksum.c */
+extern const unsigned char executable_checksum[16];
+
+/* In c-cppbuiltin.cc */
+extern void builtin_define_std (const char *macro);
+extern void builtin_define_with_value (const char *, const char *, int);
+extern void builtin_define_with_int_value (const char *, HOST_WIDE_INT);
+extern void builtin_define_type_sizeof (const char *, tree);
+extern void c_stddef_cpp_builtins (void);
+extern void fe_file_change (const line_map_ordinary *);
+extern void c_parse_error (const char *, enum cpp_ttype, tree, unsigned char,
+ rich_location *richloc);
+
+/* In c-ppoutput.cc */
+extern void init_pp_output (FILE *);
+extern void preprocess_file (cpp_reader *);
+extern void pp_file_change (const line_map_ordinary *);
+extern void pp_dir_change (cpp_reader *, const char *);
+extern bool check_missing_format_attribute (tree, tree);
+extern void c_pp_stream_token (cpp_reader *, const cpp_token *, location_t loc);
+
+/* In c-omp.cc */
+typedef wide_int_bitmask omp_clause_mask;
+
+#define OMP_CLAUSE_MASK_1 omp_clause_mask (1)
+
+enum c_omp_clause_split
+{
+ C_OMP_CLAUSE_SPLIT_TARGET = 0,
+ C_OMP_CLAUSE_SPLIT_TEAMS,
+ C_OMP_CLAUSE_SPLIT_DISTRIBUTE,
+ C_OMP_CLAUSE_SPLIT_PARALLEL,
+ C_OMP_CLAUSE_SPLIT_FOR,
+ C_OMP_CLAUSE_SPLIT_SIMD,
+ C_OMP_CLAUSE_SPLIT_COUNT,
+ C_OMP_CLAUSE_SPLIT_SECTIONS = C_OMP_CLAUSE_SPLIT_FOR,
+ C_OMP_CLAUSE_SPLIT_TASKLOOP = C_OMP_CLAUSE_SPLIT_FOR,
+ C_OMP_CLAUSE_SPLIT_LOOP = C_OMP_CLAUSE_SPLIT_FOR,
+ C_OMP_CLAUSE_SPLIT_MASKED = C_OMP_CLAUSE_SPLIT_DISTRIBUTE
+};
+
+enum c_omp_region_type
+{
+ C_ORT_OMP = 1 << 0,
+ C_ORT_ACC = 1 << 1,
+ C_ORT_DECLARE_SIMD = 1 << 2,
+ C_ORT_TARGET = 1 << 3,
+ C_ORT_OMP_DECLARE_SIMD = C_ORT_OMP | C_ORT_DECLARE_SIMD,
+ C_ORT_OMP_TARGET = C_ORT_OMP | C_ORT_TARGET
+};
+
+extern tree c_finish_omp_master (location_t, tree);
+extern tree c_finish_omp_masked (location_t, tree, tree);
+extern tree c_finish_omp_taskgroup (location_t, tree, tree);
+extern tree c_finish_omp_critical (location_t, tree, tree, tree);
+extern tree c_finish_omp_ordered (location_t, tree, tree);
+extern void c_finish_omp_barrier (location_t);
+extern tree c_finish_omp_atomic (location_t, enum tree_code, enum tree_code,
+ tree, tree, tree, tree, tree, tree, bool,
+ enum omp_memory_order, bool, bool = false);
+extern bool c_omp_depend_t_p (tree);
+extern void c_finish_omp_depobj (location_t, tree, enum omp_clause_depend_kind,
+ tree);
+extern void c_finish_omp_flush (location_t, int);
+extern void c_finish_omp_taskwait (location_t);
+extern void c_finish_omp_taskyield (location_t);
+extern tree c_finish_omp_for (location_t, enum tree_code, tree, tree, tree,
+ tree, tree, tree, tree, bool);
+extern bool c_omp_check_loop_iv (tree, tree, walk_tree_lh);
+extern bool c_omp_check_loop_iv_exprs (location_t, enum tree_code, tree, int,
+ tree, tree, tree, walk_tree_lh);
+extern tree c_finish_oacc_wait (location_t, tree, tree);
+extern tree c_oacc_split_loop_clauses (tree, tree *, bool);
+extern void c_omp_split_clauses (location_t, enum tree_code, omp_clause_mask,
+ tree, tree *);
+extern tree c_omp_declare_simd_clauses_to_numbers (tree, tree);
+extern void c_omp_declare_simd_clauses_to_decls (tree, tree);
+extern bool c_omp_predefined_variable (tree);
+extern enum omp_clause_default_kind c_omp_predetermined_sharing (tree);
+extern enum omp_clause_defaultmap_kind c_omp_predetermined_mapping (tree);
+extern tree c_omp_check_context_selector (location_t, tree);
+extern void c_omp_mark_declare_variant (location_t, tree, tree);
+extern void c_omp_adjust_map_clauses (tree, bool);
+
+enum c_omp_directive_kind {
+ C_OMP_DIR_STANDALONE,
+ C_OMP_DIR_CONSTRUCT,
+ C_OMP_DIR_DECLARATIVE,
+ C_OMP_DIR_UTILITY,
+ C_OMP_DIR_INFORMATIONAL
+};
+
+struct c_omp_directive {
+ const char *first, *second, *third;
+ unsigned int id;
+ enum c_omp_directive_kind kind;
+ bool simd;
+};
+
+extern const struct c_omp_directive c_omp_directives[];
+extern const struct c_omp_directive *c_omp_categorize_directive (const char *,
+ const char *,
+ const char *);
+
+/* Return next tree in the chain for chain_next walking of tree nodes. */
+inline tree
+c_tree_chain_next (tree t)
+{
+ /* TREE_CHAIN of a type is TYPE_STUB_DECL, which is different
+ kind of object, never a long chain of nodes. Prefer
+ TYPE_NEXT_VARIANT for types. */
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_TYPE_COMMON))
+ return TYPE_NEXT_VARIANT (t);
+ /* Otherwise, if there is TREE_CHAIN, return it. */
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_COMMON))
+ return TREE_CHAIN (t);
+ return NULL;
+}
+
+/* Mask used by tm_stmt_attr. */
+#define TM_STMT_ATTR_OUTER 2
+#define TM_STMT_ATTR_ATOMIC 4
+#define TM_STMT_ATTR_RELAXED 8
+
+/* Mask used by tm_attr_to_mask and tm_mask_to_attr. Note that these
+ are ordered specifically such that more restrictive attributes are
+ at lower bit positions. This fact is known by the C++ tm attribute
+ inheritance code such that least bit extraction (mask & -mask) results
+ in the most restrictive attribute. */
+#define TM_ATTR_SAFE 1
+#define TM_ATTR_CALLABLE 2
+#define TM_ATTR_PURE 4
+#define TM_ATTR_IRREVOCABLE 8
+#define TM_ATTR_MAY_CANCEL_OUTER 16
+
+/* A suffix-identifier value doublet that represents user-defined literals
+ for C++-0x. */
+enum overflow_type {
+ OT_UNDERFLOW = -1,
+ OT_NONE,
+ OT_OVERFLOW
+};
+
+struct GTY(()) tree_userdef_literal {
+ struct tree_base base;
+ tree suffix_id;
+ tree value;
+ tree num_string;
+ enum overflow_type overflow;
+};
+
+#define USERDEF_LITERAL_SUFFIX_ID(NODE) \
+ (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->suffix_id)
+
+#define USERDEF_LITERAL_VALUE(NODE) \
+ (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->value)
+
+#define USERDEF_LITERAL_OVERFLOW(NODE) \
+ (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->overflow)
+
+#define USERDEF_LITERAL_NUM_STRING(NODE) \
+ (((struct tree_userdef_literal *)USERDEF_LITERAL_CHECK (NODE))->num_string)
+
+#define USERDEF_LITERAL_TYPE(NODE) \
+ (TREE_TYPE (USERDEF_LITERAL_VALUE (NODE)))
+
+extern tree build_userdef_literal (tree suffix_id, tree value,
+ enum overflow_type overflow,
+ tree num_string);
+
+
+/* WHILE_STMT accessors. These give access to the condition of the
+ while statement and the body of the while statement, respectively. */
+#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
+#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
+
+/* DO_STMT accessors. These give access to the condition of the do
+ statement and the body of the do statement, respectively. */
+#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
+#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
+
+/* FOR_STMT accessors. These give access to the init statement,
+ condition, update expression, and body of the for statement,
+ respectively. */
+#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
+#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
+#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
+#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
+#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
+
+#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
+#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
+#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
+#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
+/* True if there are case labels for all possible values of switch cond, either
+ because there is a default: case label or because the case label ranges cover
+ all values. */
+#define SWITCH_STMT_ALL_CASES_P(NODE) \
+ TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE))
+/* True if the body of a switch stmt contains no BREAK_STMTs. */
+#define SWITCH_STMT_NO_BREAK_P(NODE) \
+ TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE))
+
+
+/* Nonzero if NODE is the target for genericization of 'break' stmts. */
+#define LABEL_DECL_BREAK(NODE) \
+ DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
+
+/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
+#define LABEL_DECL_CONTINUE(NODE) \
+ DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
+
+extern bool convert_vector_to_array_for_subscript (location_t, tree *, tree);
+
+/* Possibe cases of scalar_to_vector conversion. */
+enum stv_conv {
+ stv_error, /* Error occurred. */
+ stv_nothing, /* Nothing happened. */
+ stv_firstarg, /* First argument must be expanded. */
+ stv_secondarg /* Second argument must be expanded. */
+};
+
+extern enum stv_conv scalar_to_vector (location_t loc, enum tree_code code,
+ tree op0, tree op1, bool);
+
+extern tree find_inv_trees (tree *, int *, void *);
+extern tree replace_inv_trees (tree *, int *, void *);
+
+extern bool reject_gcc_builtin (const_tree, location_t = UNKNOWN_LOCATION);
+extern bool valid_array_size_p (location_t, const_tree, tree, bool = true);
+extern void invalid_array_size_error (location_t, cst_size_error,
+ const_tree, const_tree);
+
+/* In c-warn.cc. */
+extern void constant_expression_warning (tree);
+extern void constant_expression_error (tree);
+extern void overflow_warning (location_t, tree, tree = NULL_TREE);
+extern void warn_logical_operator (location_t, enum tree_code, tree,
+ enum tree_code, tree, enum tree_code, tree);
+extern void warn_tautological_cmp (const op_location_t &, enum tree_code,
+ tree, tree);
+extern void warn_logical_not_parentheses (location_t, enum tree_code, tree,
+ tree);
+extern bool warn_if_unused_value (const_tree, location_t, bool = false);
+extern bool strict_aliasing_warning (location_t, tree, tree);
+extern void sizeof_pointer_memaccess_warning (location_t *, tree,
+ vec<tree, va_gc> *, tree *,
+ bool (*) (tree, tree));
+extern void check_main_parameter_types (tree decl);
+extern void warnings_for_convert_and_check (location_t, tree, tree, tree);
+extern void c_do_switch_warnings (splay_tree, location_t, tree, tree, bool);
+extern void warn_for_omitted_condop (location_t, tree);
+extern bool warn_for_restrict (unsigned, tree *, unsigned);
+extern void warn_for_address_or_pointer_of_packed_member (tree, tree);
+extern void warn_parm_array_mismatch (location_t, tree, tree);
+extern void maybe_warn_sizeof_array_div (location_t, tree, tree, tree, tree);
+extern void do_warn_array_compare (location_t, tree_code, tree, tree);
+
+/* Places where an lvalue, or modifiable lvalue, may be required.
+ Used to select diagnostic messages in lvalue_error and
+ readonly_error. */
+enum lvalue_use {
+ lv_assign,
+ lv_increment,
+ lv_decrement,
+ lv_addressof,
+ lv_asm
+};
+
+extern void lvalue_error (location_t, enum lvalue_use);
+extern void invalid_indirection_error (location_t, tree, ref_operator);
+extern void readonly_error (location_t, tree, enum lvalue_use);
+extern void warn_array_subscript_with_type_char (location_t, tree);
+extern void warn_about_parentheses (location_t,
+ enum tree_code,
+ enum tree_code, tree,
+ enum tree_code, tree);
+extern void warn_for_unused_label (tree label);
+extern void warn_for_div_by_zero (location_t, tree divisor);
+extern void warn_for_memset (location_t, tree, tree, int);
+extern void warn_for_sign_compare (location_t,
+ tree orig_op0, tree orig_op1,
+ tree op0, tree op1,
+ tree result_type,
+ enum tree_code resultcode);
+extern void do_warn_double_promotion (tree, tree, tree, const char *,
+ location_t);
+extern void do_warn_unused_parameter (tree);
+extern void record_locally_defined_typedef (tree);
+extern void maybe_record_typedef_use (tree);
+extern void maybe_warn_unused_local_typedefs (void);
+extern void maybe_warn_bool_compare (location_t, enum tree_code, tree, tree);
+extern bool maybe_warn_shift_overflow (location_t, tree, tree);
+extern void warn_duplicated_cond_add_or_warn (location_t, tree, vec<tree> **);
+extern bool diagnose_mismatched_attributes (tree, tree);
+extern tree do_warn_duplicated_branches_r (tree *, int *, void *);
+extern void warn_for_multistatement_macros (location_t, location_t,
+ location_t, enum rid);
+
+extern void check_for_xor_used_as_pow (location_t lhs_loc, tree lhs_val,
+ location_t operator_loc,
+ location_t rhs_loc, tree rhs_val);
+
+/* In c-attribs.cc. */
+extern bool attribute_takes_identifier_p (const_tree);
+extern tree handle_deprecated_attribute (tree *, tree, tree, int, bool *);
+extern tree handle_unused_attribute (tree *, tree, tree, int, bool *);
+extern tree handle_fallthrough_attribute (tree *, tree, tree, int, bool *);
+extern int parse_tm_stmt_attr (tree, int);
+extern int tm_attr_to_mask (tree);
+extern tree tm_mask_to_attr (int);
+extern tree find_tm_attribute (tree);
+extern const struct attribute_spec::exclusions attr_cold_hot_exclusions[];
+extern const struct attribute_spec::exclusions attr_noreturn_exclusions[];
+extern tree handle_noreturn_attribute (tree *, tree, tree, int, bool *);
+extern bool has_attribute (location_t, tree, tree, tree (*)(tree));
+extern tree build_attr_access_from_parms (tree, bool);
+
+/* In c-format.cc. */
+extern bool valid_format_string_type_p (tree);
+
+/* A bitmap of flags to positional_argument. */
+enum posargflags {
+ /* Consider positional attribute argument value zero valid. */
+ POSARG_ZERO = 1,
+ /* Consider positional attribute argument value valid if it refers
+ to the ellipsis (i.e., beyond the last typed argument). */
+ POSARG_ELLIPSIS = 2
+};
+
+extern tree positional_argument (const_tree, const_tree, tree &, tree_code,
+ int = 0, int = posargflags ());
+
+extern enum flt_eval_method
+excess_precision_mode_join (enum flt_eval_method, enum flt_eval_method);
+
+extern int c_flt_eval_method (bool ts18661_p);
+extern void add_no_sanitize_value (tree node, unsigned int flags);
+
+extern void maybe_add_include_fixit (rich_location *, const char *, bool);
+extern void maybe_suggest_missing_token_insertion (rich_location *richloc,
+ enum cpp_ttype token_type,
+ location_t prev_token_loc);
+extern tree braced_lists_to_strings (tree, tree);
+
+#if CHECKING_P
+namespace selftest {
+ /* Declarations for specific families of tests within c-family,
+ by source file, in alphabetical order. */
+ extern void c_diagnostic_cc_tests (void);
+ extern void c_format_cc_tests (void);
+ extern void c_indentation_cc_tests (void);
+ extern void c_opt_problem_cc_tests (void);
+ extern void c_pretty_print_cc_tests (void);
+ extern void c_spellcheck_cc_tests (void);
+
+ /* The entrypoint for running all of the above tests. */
+ extern void c_family_tests (void);
+} // namespace selftest
+#endif /* #if CHECKING_P */
+
+#endif /* ! GCC_C_COMMON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-objc.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-objc.h
new file mode 100644
index 0000000..ee88a79
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-objc.h
@@ -0,0 +1,183 @@
+/* Definitions of Objective-C front-end entry points used for C and C++.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C_COMMON_OBJC_H
+#define GCC_C_COMMON_OBJC_H
+
+/* ObjC ivar visibility types. */
+enum GTY(()) objc_ivar_visibility_kind {
+ OBJC_IVAR_VIS_PROTECTED = 0,
+ OBJC_IVAR_VIS_PUBLIC = 1,
+ OBJC_IVAR_VIS_PRIVATE = 2,
+ OBJC_IVAR_VIS_PACKAGE = 3
+};
+
+/* ObjC property attribute kinds.
+ These have two fields; a unique value (that identifies which attribute)
+ and a group key that indicates membership of an exclusion group.
+ Only one member may be present from an exclusion group in a given attribute
+ list.
+ getters and setters have additional rules, since they are excluded from
+ non-overlapping group sets. */
+
+enum objc_property_attribute_group
+{
+ OBJC_PROPATTR_GROUP_UNKNOWN = 0,
+ OBJC_PROPATTR_GROUP_GETTER,
+ OBJC_PROPATTR_GROUP_SETTER,
+ OBJC_PROPATTR_GROUP_READWRITE,
+ OBJC_PROPATTR_GROUP_ASSIGN,
+ OBJC_PROPATTR_GROUP_ATOMIC,
+ OBJC_PROPATTR_GROUP_NULLABLE,
+ OBJC_PROPATTR_GROUP_CLASS,
+ OBJC_PROPATTR_GROUP_MAX
+};
+
+enum objc_property_attribute_kind
+{
+ OBJC_PROPERTY_ATTR_UNKNOWN = 0|OBJC_PROPATTR_GROUP_UNKNOWN,
+ OBJC_PROPERTY_ATTR_GETTER = ( 1 << 8)|OBJC_PROPATTR_GROUP_GETTER,
+ OBJC_PROPERTY_ATTR_SETTER = ( 2 << 8)|OBJC_PROPATTR_GROUP_SETTER,
+ OBJC_PROPERTY_ATTR_READONLY = ( 3 << 8)|OBJC_PROPATTR_GROUP_READWRITE,
+ OBJC_PROPERTY_ATTR_READWRITE = ( 4 << 8)|OBJC_PROPATTR_GROUP_READWRITE,
+ OBJC_PROPERTY_ATTR_ASSIGN = ( 5 << 8)|OBJC_PROPATTR_GROUP_ASSIGN,
+ OBJC_PROPERTY_ATTR_RETAIN = ( 6 << 8)|OBJC_PROPATTR_GROUP_ASSIGN,
+ OBJC_PROPERTY_ATTR_COPY = ( 7 << 8)|OBJC_PROPATTR_GROUP_ASSIGN,
+ OBJC_PROPERTY_ATTR_ATOMIC = ( 8 << 8)|OBJC_PROPATTR_GROUP_ATOMIC,
+ OBJC_PROPERTY_ATTR_NONATOMIC = ( 9 << 8)|OBJC_PROPATTR_GROUP_ATOMIC,
+ OBJC_PROPERTY_ATTR_NULL_UNSPECIFIED = (12 << 8)|OBJC_PROPATTR_GROUP_NULLABLE,
+ OBJC_PROPERTY_ATTR_NULLABLE = (13 << 8)|OBJC_PROPATTR_GROUP_NULLABLE,
+ OBJC_PROPERTY_ATTR_NONNULL = (14 << 8)|OBJC_PROPATTR_GROUP_NULLABLE,
+ OBJC_PROPERTY_ATTR_NULL_RESETTABLE = (15 << 8)|OBJC_PROPATTR_GROUP_NULLABLE,
+ OBJC_PROPERTY_ATTR_CLASS = (16 << 8)|OBJC_PROPATTR_GROUP_CLASS,
+ OBJC_PROPERTY_ATTR_MAX = (255 << 8|OBJC_PROPATTR_GROUP_MAX)
+};
+
+#define OBJC_PROPATTR_GROUP_MASK 0x0f
+
+/* To contain parsed, but unverified, information about a single property
+ attribute. */
+struct property_attribute_info
+{
+ property_attribute_info () = default;
+ property_attribute_info (tree name, location_t loc,
+ enum objc_property_attribute_kind k)
+ : name (name), ident (NULL_TREE), prop_loc (loc), prop_kind (k),
+ parse_error (false) {}
+
+ enum objc_property_attribute_group group ()
+ {
+ return (enum objc_property_attribute_group)
+ ((unsigned)prop_kind & OBJC_PROPATTR_GROUP_MASK);
+ }
+
+ tree name; /* Name of the attribute. */
+ tree ident; /* For getter/setter cases, the method/selector name. */
+ location_t prop_loc; /* Extended location covering the parsed attr. */
+ enum objc_property_attribute_kind prop_kind : 16;
+ unsigned parse_error : 1; /* The C/C++ parser saw an error in this attr. */
+};
+
+extern enum objc_property_attribute_kind objc_prop_attr_kind_for_rid (enum rid);
+
+/* Objective-C / Objective-C++ entry points. */
+
+/* The following ObjC/ObjC++ functions are called by the C and/or C++
+ front-ends; they all must have corresponding stubs in stub-objc.cc. */
+extern void objc_write_global_declarations (void);
+extern tree objc_is_class_name (tree);
+extern tree objc_is_object_ptr (tree);
+extern void objc_check_decl (tree);
+extern void objc_check_global_decl (tree);
+extern tree objc_common_type (tree, tree);
+extern bool objc_compare_types (tree, tree, int, tree);
+extern bool objc_have_common_type (tree, tree, int, tree);
+extern bool objc_diagnose_private_ivar (tree);
+extern void objc_volatilize_decl (tree);
+extern tree objc_rewrite_function_call (tree, tree);
+extern tree objc_message_selector (void);
+extern tree objc_lookup_ivar (tree, tree);
+extern void objc_clear_super_receiver (void);
+extern int objc_is_public (tree, tree);
+extern tree objc_is_id (tree);
+extern void objc_declare_alias (tree, tree);
+extern void objc_declare_class (tree);
+extern void objc_declare_protocol (tree, tree);
+extern tree objc_build_message_expr (tree, tree);
+extern tree objc_finish_message_expr (tree, tree, tree, tree*);
+extern tree objc_build_selector_expr (location_t, tree);
+extern tree objc_build_protocol_expr (tree);
+extern tree objc_build_encode_expr (tree);
+extern tree objc_build_string_object (tree);
+extern tree objc_get_protocol_qualified_type (tree, tree);
+extern tree objc_get_class_reference (tree);
+extern tree objc_get_class_ivars (tree);
+extern bool objc_detect_field_duplicates (bool);
+extern void objc_start_class_interface (tree, location_t, tree, tree, tree);
+extern void objc_start_category_interface (tree, tree, tree, tree);
+extern void objc_start_protocol (tree, tree, tree);
+extern void objc_continue_interface (void);
+extern void objc_finish_interface (void);
+extern void objc_start_class_implementation (tree, tree);
+extern void objc_start_category_implementation (tree, tree);
+extern void objc_continue_implementation (void);
+extern void objc_finish_implementation (void);
+extern void objc_set_visibility (objc_ivar_visibility_kind);
+extern tree objc_build_method_signature (bool, tree, tree, tree, bool);
+extern void objc_add_method_declaration (bool, tree, tree);
+extern bool objc_start_method_definition (bool, tree, tree, tree);
+extern void objc_finish_method_definition (tree);
+extern void objc_add_instance_variable (tree);
+extern tree objc_build_keyword_decl (tree, tree, tree, tree);
+extern tree objc_build_throw_stmt (location_t, tree);
+extern void objc_begin_try_stmt (location_t, tree);
+extern tree objc_finish_try_stmt (void);
+extern void objc_begin_catch_clause (tree);
+extern void objc_finish_catch_clause (void);
+extern void objc_build_finally_clause (location_t, tree);
+extern tree objc_build_synchronized (location_t, tree, tree);
+extern int objc_static_init_needed_p (void);
+extern tree objc_generate_static_init_call (tree);
+extern tree objc_generate_write_barrier (tree, enum tree_code, tree);
+extern void objc_set_method_opt (bool);
+extern void objc_finish_foreach_loop (location_t, tree, tree, tree, tree, tree);
+extern bool objc_method_decl (enum tree_code);
+extern void objc_add_property_declaration (location_t, tree,
+ vec<property_attribute_info *>&);
+extern tree objc_maybe_build_component_ref (tree, tree);
+extern tree objc_build_class_component_ref (tree, tree);
+extern tree objc_maybe_build_modify_expr (tree, tree);
+extern tree objc_build_incr_expr_for_property_ref (location_t, enum tree_code,
+ tree, tree);
+extern void objc_add_synthesize_declaration (location_t, tree);
+extern void objc_add_dynamic_declaration (location_t, tree);
+extern const char * objc_maybe_printable_name (tree, int);
+extern bool objc_is_property_ref (tree);
+extern bool objc_non_constant_expr_p (tree);
+extern bool objc_string_ref_type_p (tree);
+extern void objc_check_format_arg (tree, tree);
+extern void objc_finish_function (void);
+extern void objc_maybe_warn_exceptions (location_t);
+
+/* The following are provided by the C and C++ front-ends, and called by
+ ObjC/ObjC++. */
+extern void *objc_get_current_scope (void);
+extern void objc_mark_locals_volatile (void *);
+
+#endif /* ! GCC_C_COMMON_OBJC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pragma.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pragma.h
new file mode 100644
index 0000000..9cc95ab
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pragma.h
@@ -0,0 +1,286 @@
+/* Pragma related interfaces.
+ Copyright (C) 1995-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C_PRAGMA_H
+#define GCC_C_PRAGMA_H
+
+#include "cpplib.h" /* For enum cpp_ttype. */
+
+/* Pragma identifiers built in to the front end parsers. Identifiers
+ for ancillary handlers will follow these. */
+enum pragma_kind {
+ PRAGMA_NONE = 0,
+
+ PRAGMA_OACC_ATOMIC,
+ PRAGMA_OACC_CACHE,
+ PRAGMA_OACC_DATA,
+ PRAGMA_OACC_DECLARE,
+ PRAGMA_OACC_ENTER_DATA,
+ PRAGMA_OACC_EXIT_DATA,
+ PRAGMA_OACC_HOST_DATA,
+ PRAGMA_OACC_KERNELS,
+ PRAGMA_OACC_LOOP,
+ PRAGMA_OACC_PARALLEL,
+ PRAGMA_OACC_ROUTINE,
+ PRAGMA_OACC_SERIAL,
+ PRAGMA_OACC_UPDATE,
+ PRAGMA_OACC_WAIT,
+
+ /* PRAGMA_OMP__START_ should be equal to the first PRAGMA_OMP_* code. */
+ PRAGMA_OMP_ALLOCATE,
+ PRAGMA_OMP__START_ = PRAGMA_OMP_ALLOCATE,
+ PRAGMA_OMP_ASSUME,
+ PRAGMA_OMP_ASSUMES,
+ PRAGMA_OMP_ATOMIC,
+ PRAGMA_OMP_BARRIER,
+ PRAGMA_OMP_BEGIN,
+ PRAGMA_OMP_CANCEL,
+ PRAGMA_OMP_CANCELLATION_POINT,
+ PRAGMA_OMP_CRITICAL,
+ PRAGMA_OMP_DECLARE,
+ PRAGMA_OMP_DEPOBJ,
+ PRAGMA_OMP_DISTRIBUTE,
+ PRAGMA_OMP_ERROR,
+ PRAGMA_OMP_END,
+ PRAGMA_OMP_FLUSH,
+ PRAGMA_OMP_FOR,
+ PRAGMA_OMP_LOOP,
+ PRAGMA_OMP_NOTHING,
+ PRAGMA_OMP_MASKED,
+ PRAGMA_OMP_MASTER,
+ PRAGMA_OMP_ORDERED,
+ PRAGMA_OMP_PARALLEL,
+ PRAGMA_OMP_REQUIRES,
+ PRAGMA_OMP_SCAN,
+ PRAGMA_OMP_SCOPE,
+ PRAGMA_OMP_SECTION,
+ PRAGMA_OMP_SECTIONS,
+ PRAGMA_OMP_SIMD,
+ PRAGMA_OMP_SINGLE,
+ PRAGMA_OMP_TARGET,
+ PRAGMA_OMP_TASK,
+ PRAGMA_OMP_TASKGROUP,
+ PRAGMA_OMP_TASKLOOP,
+ PRAGMA_OMP_TASKWAIT,
+ PRAGMA_OMP_TASKYIELD,
+ PRAGMA_OMP_THREADPRIVATE,
+ PRAGMA_OMP_TEAMS,
+ /* PRAGMA_OMP__LAST_ should be equal to the last PRAGMA_OMP_* code. */
+ PRAGMA_OMP__LAST_ = PRAGMA_OMP_TEAMS,
+
+ PRAGMA_GCC_PCH_PREPROCESS,
+ PRAGMA_IVDEP,
+ PRAGMA_UNROLL,
+
+ PRAGMA_FIRST_EXTERNAL
+};
+
+
+/* All clauses defined by OpenACC 2.0, and OpenMP 2.5, 3.0, 3.1, 4.0, 4.5, 5.0,
+ 5.1 and 5.2. Used internally by both C and C++ parsers. */
+enum pragma_omp_clause {
+ PRAGMA_OMP_CLAUSE_NONE = 0,
+
+ PRAGMA_OMP_CLAUSE_AFFINITY,
+ PRAGMA_OMP_CLAUSE_ALIGNED,
+ PRAGMA_OMP_CLAUSE_ALLOCATE,
+ PRAGMA_OMP_CLAUSE_BIND,
+ PRAGMA_OMP_CLAUSE_COLLAPSE,
+ PRAGMA_OMP_CLAUSE_COPYIN,
+ PRAGMA_OMP_CLAUSE_COPYPRIVATE,
+ PRAGMA_OMP_CLAUSE_DEFAULT,
+ PRAGMA_OMP_CLAUSE_DEFAULTMAP,
+ PRAGMA_OMP_CLAUSE_DEPEND,
+ PRAGMA_OMP_CLAUSE_DETACH,
+ PRAGMA_OMP_CLAUSE_DEVICE,
+ PRAGMA_OMP_CLAUSE_DEVICE_TYPE,
+ PRAGMA_OMP_CLAUSE_DIST_SCHEDULE,
+ PRAGMA_OMP_CLAUSE_DOACROSS,
+ PRAGMA_OMP_CLAUSE_ENTER,
+ PRAGMA_OMP_CLAUSE_FILTER,
+ PRAGMA_OMP_CLAUSE_FINAL,
+ PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
+ PRAGMA_OMP_CLAUSE_FOR,
+ PRAGMA_OMP_CLAUSE_FROM,
+ PRAGMA_OMP_CLAUSE_GRAINSIZE,
+ PRAGMA_OMP_CLAUSE_HAS_DEVICE_ADDR,
+ PRAGMA_OMP_CLAUSE_HINT,
+ PRAGMA_OMP_CLAUSE_IF,
+ PRAGMA_OMP_CLAUSE_IN_REDUCTION,
+ PRAGMA_OMP_CLAUSE_INBRANCH,
+ PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR,
+ PRAGMA_OMP_CLAUSE_LASTPRIVATE,
+ PRAGMA_OMP_CLAUSE_LINEAR,
+ PRAGMA_OMP_CLAUSE_LINK,
+ PRAGMA_OMP_CLAUSE_MAP,
+ PRAGMA_OMP_CLAUSE_MERGEABLE,
+ PRAGMA_OMP_CLAUSE_NOGROUP,
+ PRAGMA_OMP_CLAUSE_NONTEMPORAL,
+ PRAGMA_OMP_CLAUSE_NOTINBRANCH,
+ PRAGMA_OMP_CLAUSE_NOWAIT,
+ PRAGMA_OMP_CLAUSE_NUM_TASKS,
+ PRAGMA_OMP_CLAUSE_NUM_TEAMS,
+ PRAGMA_OMP_CLAUSE_NUM_THREADS,
+ PRAGMA_OMP_CLAUSE_ORDER,
+ PRAGMA_OMP_CLAUSE_ORDERED,
+ PRAGMA_OMP_CLAUSE_PARALLEL,
+ PRAGMA_OMP_CLAUSE_PRIORITY,
+ PRAGMA_OMP_CLAUSE_PRIVATE,
+ PRAGMA_OMP_CLAUSE_PROC_BIND,
+ PRAGMA_OMP_CLAUSE_REDUCTION,
+ PRAGMA_OMP_CLAUSE_SAFELEN,
+ PRAGMA_OMP_CLAUSE_SCHEDULE,
+ PRAGMA_OMP_CLAUSE_SECTIONS,
+ PRAGMA_OMP_CLAUSE_SHARED,
+ PRAGMA_OMP_CLAUSE_SIMD,
+ PRAGMA_OMP_CLAUSE_SIMDLEN,
+ PRAGMA_OMP_CLAUSE_TASK_REDUCTION,
+ PRAGMA_OMP_CLAUSE_TASKGROUP,
+ PRAGMA_OMP_CLAUSE_THREAD_LIMIT,
+ PRAGMA_OMP_CLAUSE_THREADS,
+ PRAGMA_OMP_CLAUSE_TO,
+ PRAGMA_OMP_CLAUSE_UNIFORM,
+ PRAGMA_OMP_CLAUSE_UNTIED,
+ PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR,
+ PRAGMA_OMP_CLAUSE_USE_DEVICE_ADDR,
+
+ /* Clauses for OpenACC. */
+ PRAGMA_OACC_CLAUSE_ASYNC,
+ PRAGMA_OACC_CLAUSE_ATTACH,
+ PRAGMA_OACC_CLAUSE_AUTO,
+ PRAGMA_OACC_CLAUSE_COPY,
+ PRAGMA_OACC_CLAUSE_COPYOUT,
+ PRAGMA_OACC_CLAUSE_CREATE,
+ PRAGMA_OACC_CLAUSE_DELETE,
+ PRAGMA_OACC_CLAUSE_DEVICEPTR,
+ PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT,
+ PRAGMA_OACC_CLAUSE_FINALIZE,
+ PRAGMA_OACC_CLAUSE_GANG,
+ PRAGMA_OACC_CLAUSE_HOST,
+ PRAGMA_OACC_CLAUSE_INDEPENDENT,
+ PRAGMA_OACC_CLAUSE_NO_CREATE,
+ PRAGMA_OACC_CLAUSE_NOHOST,
+ PRAGMA_OACC_CLAUSE_NUM_GANGS,
+ PRAGMA_OACC_CLAUSE_NUM_WORKERS,
+ PRAGMA_OACC_CLAUSE_PRESENT,
+ PRAGMA_OACC_CLAUSE_SELF,
+ PRAGMA_OACC_CLAUSE_SEQ,
+ PRAGMA_OACC_CLAUSE_TILE,
+ PRAGMA_OACC_CLAUSE_VECTOR,
+ PRAGMA_OACC_CLAUSE_VECTOR_LENGTH,
+ PRAGMA_OACC_CLAUSE_WAIT,
+ PRAGMA_OACC_CLAUSE_WORKER,
+ PRAGMA_OACC_CLAUSE_IF_PRESENT,
+ PRAGMA_OACC_CLAUSE_COLLAPSE = PRAGMA_OMP_CLAUSE_COLLAPSE,
+ PRAGMA_OACC_CLAUSE_COPYIN = PRAGMA_OMP_CLAUSE_COPYIN,
+ PRAGMA_OACC_CLAUSE_DEVICE = PRAGMA_OMP_CLAUSE_DEVICE,
+ PRAGMA_OACC_CLAUSE_DEFAULT = PRAGMA_OMP_CLAUSE_DEFAULT,
+ PRAGMA_OACC_CLAUSE_DETACH = PRAGMA_OMP_CLAUSE_DETACH,
+ PRAGMA_OACC_CLAUSE_FIRSTPRIVATE = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
+ PRAGMA_OACC_CLAUSE_IF = PRAGMA_OMP_CLAUSE_IF,
+ PRAGMA_OACC_CLAUSE_PRIVATE = PRAGMA_OMP_CLAUSE_PRIVATE,
+ PRAGMA_OACC_CLAUSE_REDUCTION = PRAGMA_OMP_CLAUSE_REDUCTION,
+ PRAGMA_OACC_CLAUSE_LINK = PRAGMA_OMP_CLAUSE_LINK,
+ PRAGMA_OACC_CLAUSE_USE_DEVICE = PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR
+};
+
+extern struct cpp_reader* parse_in;
+
+/* It's safe to always leave visibility pragma enabled as if
+ visibility is not supported on the host OS platform the
+ statements are ignored. */
+extern void push_visibility (const char *, int);
+extern bool pop_visibility (int);
+
+extern void init_pragma (void);
+
+/* Front-end wrappers for pragma registration. */
+typedef void (*pragma_handler_1arg)(struct cpp_reader *);
+/* A second pragma handler, which adds a void * argument allowing to pass extra
+ data to the handler. */
+typedef void (*pragma_handler_2arg)(struct cpp_reader *, void *);
+
+/* This union allows to abstract the different handlers. */
+union gen_pragma_handler {
+ pragma_handler_1arg handler_1arg;
+ pragma_handler_2arg handler_2arg;
+};
+/* Internally used to keep the data of the handler. */
+struct internal_pragma_handler {
+ union gen_pragma_handler handler, early_handler;
+ /* Permits to know if handler is a pragma_handler_1arg (extra_data is false)
+ or a pragma_handler_2arg (extra_data is true). */
+ bool extra_data;
+ /* A data field which can be used when extra_data is true. */
+ void * data;
+};
+
+extern void c_register_pragma (const char *space, const char *name,
+ pragma_handler_1arg handler);
+extern void c_register_pragma_with_data (const char *space, const char *name,
+ pragma_handler_2arg handler,
+ void *data);
+
+extern void c_register_pragma_with_expansion (const char *space,
+ const char *name,
+ pragma_handler_1arg handler);
+extern void c_register_pragma_with_expansion_and_data (const char *space,
+ const char *name,
+ pragma_handler_2arg handler,
+ void *data);
+extern void c_invoke_pragma_handler (unsigned int);
+
+/* Early pragma handlers run in addition to the normal ones. They can be used
+ by frontends such as C++ that may want to process some pragmas during lexing
+ before they start processing them. */
+extern void
+c_register_pragma_with_early_handler (const char *space, const char *name,
+ pragma_handler_1arg handler,
+ pragma_handler_1arg early_handler);
+extern void c_invoke_early_pragma_handler (unsigned int);
+extern void c_pp_invoke_early_pragma_handler (unsigned int);
+
+
+extern void maybe_apply_pragma_weak (tree);
+extern void maybe_apply_pending_pragma_weaks (void);
+extern tree maybe_apply_renaming_pragma (tree, tree);
+extern void maybe_apply_pragma_scalar_storage_order (tree);
+extern void add_to_renaming_pragma_list (tree, tree);
+
+extern enum cpp_ttype pragma_lex (tree *, location_t *loc = NULL);
+
+/* Flags for use with c_lex_with_flags. The values here were picked
+ so that 0 means to translate and join strings. */
+#define C_LEX_STRING_NO_TRANSLATE 1 /* Do not lex strings into
+ execution character set. */
+#define C_LEX_STRING_NO_JOIN 2 /* Do not concatenate strings
+ nor translate them into execution
+ character set. */
+
+/* This is not actually available to pragma parsers. It's merely a
+ convenient location to declare this function for c-lex, after
+ having enum cpp_ttype declared. */
+extern enum cpp_ttype c_lex_with_flags (tree *, location_t *, unsigned char *,
+ int);
+
+extern void c_pp_lookup_pragma (unsigned int, const char **, const char **);
+
+extern GTY(()) tree pragma_extern_prefix;
+
+#endif /* GCC_C_PRAGMA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pretty-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pretty-print.h
new file mode 100644
index 0000000..a7076f2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-family/c-pretty-print.h
@@ -0,0 +1,142 @@
+/* Various declarations for the C and C++ pretty-printers.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+ Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C_PRETTY_PRINTER
+#define GCC_C_PRETTY_PRINTER
+
+#include "tree.h"
+#include "c-family/c-common.h"
+#include "pretty-print.h"
+
+
+enum pp_c_pretty_print_flags
+ {
+ pp_c_flag_abstract = 1 << 1,
+ pp_c_flag_gnu_v3 = 1 << 2,
+ pp_c_flag_last_bit = 3
+ };
+
+
+/* The data type used to bundle information necessary for pretty-printing
+ a C or C++ entity. */
+class c_pretty_printer;
+
+/* The type of a C pretty-printer 'member' function. */
+typedef void (*c_pretty_print_fn) (c_pretty_printer *, tree);
+
+/* The datatype that contains information necessary for pretty-printing
+ a tree that represents a C construct. Any pretty-printer for a
+ language using C syntax can derive from this datatype and reuse
+ facilities provided here. A derived pretty-printer can override
+ any function listed in the vtable below. See cp/cxx-pretty-print.h
+ and cp/cxx-pretty-print.cc for an example of derivation. */
+class c_pretty_printer : public pretty_printer
+{
+public:
+ c_pretty_printer ();
+ pretty_printer *clone () const override;
+
+ // Format string, possibly translated.
+ void translate_string (const char *);
+
+ virtual void constant (tree);
+ virtual void id_expression (tree);
+ virtual void primary_expression (tree);
+ virtual void postfix_expression (tree);
+ virtual void unary_expression (tree);
+ virtual void multiplicative_expression (tree);
+ virtual void conditional_expression (tree);
+ virtual void assignment_expression (tree);
+ virtual void expression (tree);
+
+ virtual void type_id (tree);
+ virtual void statement (tree);
+
+ virtual void declaration (tree);
+ virtual void declaration_specifiers (tree);
+ virtual void simple_type_specifier (tree);
+ virtual void function_specifier (tree);
+ virtual void storage_class_specifier (tree);
+ virtual void declarator (tree);
+ virtual void direct_declarator (tree);
+ virtual void abstract_declarator (tree);
+ virtual void direct_abstract_declarator (tree);
+
+ virtual void initializer (tree);
+ /* Points to the first element of an array of offset-list.
+ Not used yet. */
+ int *offset_list;
+
+ pp_flags flags;
+
+ /* These must be overridden by each of the C and C++ front-end to
+ reflect their understanding of syntactic productions when they differ. */
+ c_pretty_print_fn type_specifier_seq;
+ c_pretty_print_fn ptr_operator;
+ c_pretty_print_fn parameter_list;
+};
+
+#define pp_c_tree_identifier(PPI, ID) \
+ pp_c_identifier (PPI, IDENTIFIER_POINTER (ID))
+
+#define pp_type_specifier_seq(PP, D) (PP)->type_specifier_seq (PP, D)
+#define pp_ptr_operator(PP, D) (PP)->ptr_operator (PP, D)
+#define pp_parameter_list(PP, T) (PP)->parameter_list (PP, T)
+
+void pp_c_whitespace (c_pretty_printer *);
+void pp_c_left_paren (c_pretty_printer *);
+void pp_c_right_paren (c_pretty_printer *);
+void pp_c_left_brace (c_pretty_printer *);
+void pp_c_right_brace (c_pretty_printer *);
+void pp_c_left_bracket (c_pretty_printer *);
+void pp_c_right_bracket (c_pretty_printer *);
+void pp_c_dot (c_pretty_printer *);
+void pp_c_ampersand (c_pretty_printer *);
+void pp_c_star (c_pretty_printer *);
+void pp_c_arrow (c_pretty_printer *);
+void pp_c_semicolon (c_pretty_printer *);
+void pp_c_complement (c_pretty_printer *);
+void pp_c_exclamation (c_pretty_printer *);
+void pp_c_space_for_pointer_operator (c_pretty_printer *, tree);
+
+/* Declarations. */
+void pp_c_tree_decl_identifier (c_pretty_printer *, tree);
+void pp_c_function_definition (c_pretty_printer *, tree);
+void pp_c_attributes_display (c_pretty_printer *, tree);
+void pp_c_cv_qualifiers (c_pretty_printer *pp, int qualifiers, bool func_type);
+void pp_c_type_qualifier_list (c_pretty_printer *, tree);
+void pp_c_parameter_type_list (c_pretty_printer *, tree);
+void pp_c_specifier_qualifier_list (c_pretty_printer *, tree);
+/* Expressions. */
+void pp_c_logical_or_expression (c_pretty_printer *, tree);
+void pp_c_expression_list (c_pretty_printer *, tree);
+void pp_c_constructor_elts (c_pretty_printer *, vec<constructor_elt, va_gc> *);
+void pp_c_call_argument_list (c_pretty_printer *, tree);
+void pp_c_type_cast (c_pretty_printer *, tree);
+void pp_c_cast_expression (c_pretty_printer *, tree);
+void pp_c_init_declarator (c_pretty_printer *, tree);
+void pp_c_ws_string (c_pretty_printer *, const char *);
+void pp_c_identifier (c_pretty_printer *, const char *);
+void pp_c_string_literal (c_pretty_printer *, tree);
+void pp_c_integer_constant (c_pretty_printer *, tree);
+
+void print_c_tree (FILE *file, tree t);
+
+#endif /* GCC_C_PRETTY_PRINTER */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-tree.h
new file mode 100644
index 0000000..e6b6fe9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/c-tree.h
@@ -0,0 +1,911 @@
+/* Definitions for C parsing and type checking.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C_TREE_H
+#define GCC_C_TREE_H
+
+#include "c-family/c-common.h"
+#include "diagnostic.h"
+
+/* struct lang_identifier is private to c-decl.cc, but langhooks.cc needs to
+ know how big it is. This is sanity-checked in c-decl.cc. */
+#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
+ (sizeof (struct c_common_identifier) + 3 * sizeof (void *))
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
+#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
+#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is
+ volatile, restrict-qualified or atomic; that is, has a type not
+ permitted for a constexpr object. */
+#define C_TYPE_FIELDS_NON_CONSTEXPR(TYPE) TREE_LANG_FLAG_4 (TYPE)
+
+/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
+ nonzero if the definition of the type has already started. */
+#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
+
+/* In an incomplete RECORD_TYPE, UNION_TYPE or ENUMERAL_TYPE, a list of
+ variable declarations whose type would be completed by completing
+ that type. */
+#define C_TYPE_INCOMPLETE_VARS(TYPE) \
+ TYPE_LANG_SLOT_1 (TREE_CHECK4 (TYPE, RECORD_TYPE, UNION_TYPE, \
+ QUAL_UNION_TYPE, ENUMERAL_TYPE))
+
+/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
+ keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
+#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
+
+/* Record whether a type or decl was written with nonconstant size.
+ Note that TYPE_SIZE may have simplified to a constant. */
+#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
+#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
+
+/* Record whether a type is variably modified. */
+#define C_TYPE_VARIABLY_MODIFIED(TYPE) TYPE_LANG_FLAG_6 (TYPE)
+
+
+/* Record whether a type is defined inside a struct or union type.
+ This is used for -Wc++-compat. */
+#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
+
+/* Record whether a typedef for type `int' was actually `signed int'. */
+#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
+
+/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
+ return type. */
+#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
+
+/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
+#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
+
+/* For a PARM_DECL, nonzero if it was declared as an array. */
+#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
+
+/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
+ been declared. */
+#define C_DECL_DECLARED_BUILTIN(EXP) \
+ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
+
+/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
+ built-in prototype and does not have a non-built-in prototype. */
+#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
+ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
+
+/* Record whether a decl was declared register. This is strictly a
+ front-end flag, whereas DECL_REGISTER is used for code generation;
+ they may differ for structures with volatile fields. */
+#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
+
+/* Record whether a decl was used in an expression anywhere except an
+ unevaluated operand of sizeof / typeof / alignof. This is only
+ used for functions declared static but not defined, though outside
+ sizeof and typeof it is set for other function decls as well. */
+#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
+
+/* Record whether a variable has been declared threadprivate by
+ #pragma omp threadprivate. */
+#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
+
+/* Set on VAR_DECLs for compound literals. */
+#define C_DECL_COMPOUND_LITERAL_P(DECL) \
+ DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL))
+
+/* Set on decls used as placeholders for a C2x underspecified object
+ definition. */
+#define C_DECL_UNDERSPECIFIED(DECL) DECL_LANG_FLAG_7 (DECL)
+
+/* Set on VAR_DECLs declared as 'constexpr'. */
+#define C_DECL_DECLARED_CONSTEXPR(DECL) \
+ DECL_LANG_FLAG_8 (VAR_DECL_CHECK (DECL))
+
+/* Nonzero for a decl which either doesn't exist or isn't a prototype.
+ N.B. Could be simplified if all built-in decls had complete prototypes
+ (but this is presently difficult because some of them need FILE*). */
+#define C_DECL_ISNT_PROTOTYPE(EXP) \
+ (EXP == 0 \
+ || (!prototype_p (TREE_TYPE (EXP)) \
+ && !fndecl_built_in_p (EXP)))
+
+/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
+ TYPE_ARG_TYPES for functions with prototypes, but created for functions
+ without prototypes. */
+#define TYPE_ACTUAL_ARG_TYPES(NODE) \
+ TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE))
+
+/* For a CONSTRUCTOR, whether some initializer contains a
+ subexpression meaning it is not a constant expression. */
+#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
+
+/* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already
+ been folded. */
+#define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP))
+
+/* Whether a type has boolean semantics: either a boolean type or an
+ enumeration type with a boolean type as its underlying type. */
+#define C_BOOLEAN_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == BOOLEAN_TYPE \
+ || (TREE_CODE (TYPE) == ENUMERAL_TYPE \
+ && ENUM_UNDERLYING_TYPE (TYPE) != NULL_TREE \
+ && TREE_CODE (ENUM_UNDERLYING_TYPE (TYPE)) == BOOLEAN_TYPE))
+
+/* Record parser information about an expression that is irrelevant
+ for code generation alongside a tree representing its value. */
+struct c_expr
+{
+ /* The value of the expression. */
+ tree value;
+ /* Record the original unary/binary operator of an expression, which may
+ have been changed by fold, STRING_CST for unparenthesized string
+ constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
+ (even if parenthesized), for subexpressions, and for non-constant
+ initializers, or ERROR_MARK for other expressions (including
+ parenthesized expressions). */
+ enum tree_code original_code;
+ /* If not NULL, the original type of an expression. This will
+ differ from the type of the value field for an enum constant.
+ The type of an enum constant is a plain integer type, but this
+ field will be the enum type. */
+ tree original_type;
+
+ /* The source range of this expression. This is redundant
+ for node values that have locations, but not all node kinds
+ have locations (e.g. constants, and references to params, locals,
+ etc), so we stash a copy here. */
+ source_range src_range;
+
+ /* True if this was directly from a decimal constant token. */
+ bool m_decimal : 1;
+
+ /* Access to the first and last locations within the source spelling
+ of this expression. */
+ location_t get_start () const { return src_range.m_start; }
+ location_t get_finish () const { return src_range.m_finish; }
+
+ location_t get_location () const
+ {
+ if (EXPR_HAS_LOCATION (value))
+ return EXPR_LOCATION (value);
+ else
+ return make_location (get_start (), get_start (), get_finish ());
+ }
+
+ /* Set the value to error_mark_node whilst ensuring that src_range
+ and m_decimal are initialized. */
+ void set_error ()
+ {
+ value = error_mark_node;
+ src_range.m_start = UNKNOWN_LOCATION;
+ src_range.m_finish = UNKNOWN_LOCATION;
+ m_decimal = 0;
+ }
+};
+
+/* Type alias for struct c_expr. This allows to use the structure
+ inside the VEC types. */
+typedef struct c_expr c_expr_t;
+
+/* A kind of type specifier. Note that this information is currently
+ only used to distinguish tag definitions, tag references and typeof
+ uses. */
+enum c_typespec_kind {
+ /* No typespec. This appears only in struct c_declspec. */
+ ctsk_none,
+ /* A reserved keyword type specifier. */
+ ctsk_resword,
+ /* A reference to a tag, previously declared, such as "struct foo".
+ This includes where the previous declaration was as a different
+ kind of tag, in which case this is only valid if shadowing that
+ tag in an inner scope. */
+ ctsk_tagref,
+ /* Likewise, with standard attributes present in the reference. */
+ ctsk_tagref_attrs,
+ /* A reference to a tag, not previously declared in a visible
+ scope. */
+ ctsk_tagfirstref,
+ /* Likewise, with standard attributes present in the reference. */
+ ctsk_tagfirstref_attrs,
+ /* A definition of a tag such as "struct foo { int a; }". */
+ ctsk_tagdef,
+ /* A typedef name. */
+ ctsk_typedef,
+ /* An ObjC-specific kind of type specifier. */
+ ctsk_objc,
+ /* A typeof specifier, or _Atomic ( type-name ). */
+ ctsk_typeof
+};
+
+/* A type specifier: this structure is created in the parser and
+ passed to declspecs_add_type only. */
+struct c_typespec {
+ /* What kind of type specifier this is. */
+ enum c_typespec_kind kind;
+ /* Whether the expression has operands suitable for use in constant
+ expressions. */
+ bool expr_const_operands;
+ /* Whether the type specifier includes an enum type specifier (that
+ is, ": specifier-qualifier-list" in a declaration using
+ "enum"). */
+ bool has_enum_type_specifier;
+ /* The specifier itself. */
+ tree spec;
+ /* An expression to be evaluated before the type specifier, in the
+ case of typeof specifiers, or NULL otherwise or if no such
+ expression is required for a particular typeof specifier. In
+ particular, when typeof is applied to an expression of variably
+ modified type, that expression must be evaluated in order to
+ determine array sizes that form part of the type, but the
+ expression itself (as opposed to the array sizes) forms no part
+ of the type and so needs to be recorded separately. */
+ tree expr;
+};
+
+/* A storage class specifier. */
+enum c_storage_class {
+ csc_none,
+ csc_auto,
+ csc_extern,
+ csc_register,
+ csc_static,
+ csc_typedef
+};
+
+/* A type specifier keyword "void", "_Bool", "char", "int", "float",
+ "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
+ or none of these. */
+enum c_typespec_keyword {
+ cts_none,
+ cts_void,
+ cts_bool,
+ cts_char,
+ cts_int,
+ cts_float,
+ cts_int_n,
+ cts_double,
+ cts_dfloat32,
+ cts_dfloat64,
+ cts_dfloat128,
+ cts_floatn_nx,
+ cts_fract,
+ cts_accum,
+ cts_auto_type
+};
+
+/* This enum lists all the possible declarator specifiers, storage
+ class or attribute that a user can write. There is at least one
+ enumerator per possible declarator specifier in the struct
+ c_declspecs below.
+
+ It is used to index the array of declspec locations in struct
+ c_declspecs. */
+enum c_declspec_word {
+ cdw_typespec /* A catch-all for a typespec. */,
+ cdw_storage_class /* A catch-all for a storage class */,
+ cdw_attributes,
+ cdw_typedef,
+ cdw_explicit_signed,
+ cdw_deprecated,
+ cdw_default_int,
+ cdw_long,
+ cdw_long_long,
+ cdw_short,
+ cdw_signed,
+ cdw_unsigned,
+ cdw_complex,
+ cdw_inline,
+ cdw_noreturn,
+ cdw_thread,
+ cdw_const,
+ cdw_volatile,
+ cdw_restrict,
+ cdw_atomic,
+ cdw_saturating,
+ cdw_alignas,
+ cdw_address_space,
+ cdw_gimple,
+ cdw_rtl,
+ cdw_number_of_elements /* This one must always be the last
+ enumerator. */
+};
+
+enum c_declspec_il {
+ cdil_none,
+ cdil_gimple, /* __GIMPLE */
+ cdil_gimple_cfg, /* __GIMPLE(cfg) */
+ cdil_gimple_ssa, /* __GIMPLE(ssa) */
+ cdil_rtl /* __RTL */
+};
+
+/* A sequence of declaration specifiers in C. When a new declaration
+ specifier is added, please update the enum c_declspec_word above
+ accordingly. */
+struct c_declspecs {
+ location_t locations[cdw_number_of_elements];
+ /* The type specified, if a single type specifier such as a struct,
+ union or enum specifier, typedef name or typeof specifies the
+ whole type, or NULL_TREE if none or a keyword such as "void" or
+ "char" is used. Does not include qualifiers. */
+ tree type;
+ /* Any expression to be evaluated before the type, from a typeof
+ specifier. */
+ tree expr;
+ /* The attributes from a typedef decl. */
+ tree decl_attr;
+ /* When parsing, the GNU attributes and prefix standard attributes.
+ Outside the parser, this will be NULL; attributes (possibly from
+ multiple lists) will be passed separately. */
+ tree attrs;
+ /* When parsing, postfix standard attributes (which appertain to the
+ type specified by the preceding declaration specifiers, unlike
+ prefix standard attributes which appertain to the declaration or
+ declarations as a whole). */
+ tree postfix_attrs;
+ /* The pass to start compiling a __GIMPLE or __RTL function with. */
+ char *gimple_or_rtl_pass;
+ /* ENTRY BB count. */
+ profile_count entry_bb_count;
+ /* The base-2 log of the greatest alignment required by an _Alignas
+ specifier, in bytes, or -1 if no such specifiers with nonzero
+ alignment. */
+ int align_log;
+ /* For the __intN declspec, this stores the index into the int_n_* arrays. */
+ int int_n_idx;
+ /* For the _FloatN and _FloatNx declspec, this stores the index into
+ the floatn_nx_types array. */
+ int floatn_nx_idx;
+ /* The storage class specifier, or csc_none if none. */
+ enum c_storage_class storage_class;
+ /* Any type specifier keyword used such as "int", not reflecting
+ modifiers such as "short", or cts_none if none. */
+ ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
+ /* The kind of type specifier if one has been seen, ctsk_none
+ otherwise. */
+ ENUM_BITFIELD (c_typespec_kind) typespec_kind : 4;
+ ENUM_BITFIELD (c_declspec_il) declspec_il : 3;
+ /* Whether any expressions in typeof specifiers may appear in
+ constant expressions. */
+ BOOL_BITFIELD expr_const_operands : 1;
+ /* Whether any declaration specifiers have been seen at all. */
+ BOOL_BITFIELD declspecs_seen_p : 1;
+ /* Whether any declaration specifiers other than standard attributes
+ have been seen at all. If only standard attributes have been
+ seen, this is an attribute-declaration. */
+ BOOL_BITFIELD non_std_attrs_seen_p : 1;
+ /* Whether something other than a storage class specifier or
+ attribute has been seen. This is used to warn for the
+ obsolescent usage of storage class specifiers other than at the
+ start of the list. (Doing this properly would require function
+ specifiers to be handled separately from storage class
+ specifiers.) */
+ BOOL_BITFIELD non_sc_seen_p : 1;
+ /* Whether the type is specified by a typedef or typeof name. */
+ BOOL_BITFIELD typedef_p : 1;
+ /* Whether the type is explicitly "signed" or specified by a typedef
+ whose type is explicitly "signed". */
+ BOOL_BITFIELD explicit_signed_p : 1;
+ /* Whether the specifiers include a deprecated typedef. */
+ BOOL_BITFIELD deprecated_p : 1;
+ /* Whether the specifiers include an unavailable typedef. */
+ BOOL_BITFIELD unavailable_p : 1;
+ /* Whether the type defaulted to "int" because there were no type
+ specifiers. */
+ BOOL_BITFIELD default_int_p : 1;
+ /* Whether "long" was specified. */
+ BOOL_BITFIELD long_p : 1;
+ /* Whether "long" was specified more than once. */
+ BOOL_BITFIELD long_long_p : 1;
+ /* Whether "short" was specified. */
+ BOOL_BITFIELD short_p : 1;
+ /* Whether "signed" was specified. */
+ BOOL_BITFIELD signed_p : 1;
+ /* Whether "unsigned" was specified. */
+ BOOL_BITFIELD unsigned_p : 1;
+ /* Whether "complex" was specified. */
+ BOOL_BITFIELD complex_p : 1;
+ /* Whether "inline" was specified. */
+ BOOL_BITFIELD inline_p : 1;
+ /* Whether "_Noreturn" was speciied. */
+ BOOL_BITFIELD noreturn_p : 1;
+ /* Whether "__thread" or "_Thread_local" was specified. */
+ BOOL_BITFIELD thread_p : 1;
+ /* Whether "__thread" rather than "_Thread_local" was specified. */
+ BOOL_BITFIELD thread_gnu_p : 1;
+ /* Whether "const" was specified. */
+ BOOL_BITFIELD const_p : 1;
+ /* Whether "volatile" was specified. */
+ BOOL_BITFIELD volatile_p : 1;
+ /* Whether "restrict" was specified. */
+ BOOL_BITFIELD restrict_p : 1;
+ /* Whether "_Atomic" was specified. */
+ BOOL_BITFIELD atomic_p : 1;
+ /* Whether "_Sat" was specified. */
+ BOOL_BITFIELD saturating_p : 1;
+ /* Whether any alignment specifier (even with zero alignment) was
+ specified. */
+ BOOL_BITFIELD alignas_p : 1;
+ /* Whether an enum type specifier (": specifier-qualifier-list") was
+ specified other than in a definition of that enum (if so, this is
+ invalid unless it is an empty declaration "enum identifier
+ enum-type-specifier;", but such an empty declaration is valid in
+ C2x when "enum identifier;" would not be). */
+ BOOL_BITFIELD enum_type_specifier_ref_p : 1;
+ /* Whether "auto" was specified in C2X (or later) mode and means the
+ type is to be deduced from an initializer, or would mean that if
+ no type specifier appears later in these declaration
+ specifiers. */
+ BOOL_BITFIELD c2x_auto_p : 1;
+ /* Whether "constexpr" was specified. */
+ BOOL_BITFIELD constexpr_p : 1;
+ /* The address space that the declaration belongs to. */
+ addr_space_t address_space;
+};
+
+/* The various kinds of declarators in C. */
+enum c_declarator_kind {
+ /* An identifier. */
+ cdk_id,
+ /* A function. */
+ cdk_function,
+ /* An array. */
+ cdk_array,
+ /* A pointer. */
+ cdk_pointer,
+ /* Parenthesized declarator with nested attributes. */
+ cdk_attrs
+};
+
+struct c_arg_tag {
+ /* The argument name. */
+ tree id;
+ /* The type of the argument. */
+ tree type;
+};
+
+
+/* Information about the parameters in a function declarator. */
+struct c_arg_info {
+ /* A list of parameter decls. */
+ tree parms;
+ /* A list of structure, union and enum tags defined. */
+ vec<c_arg_tag, va_gc> *tags;
+ /* A list of argument types to go in the FUNCTION_TYPE. */
+ tree types;
+ /* A list of non-parameter decls (notably enumeration constants)
+ defined with the parameters. */
+ tree others;
+ /* A compound expression of VLA sizes from the parameters, or NULL.
+ In a function definition, these are used to ensure that
+ side-effects in sizes of arrays converted to pointers (such as a
+ parameter int i[n++]) take place; otherwise, they are
+ ignored. */
+ tree pending_sizes;
+ /* True when these arguments had [*]. */
+ BOOL_BITFIELD had_vla_unspec : 1;
+ /* True when the arguments are a (...) prototype. */
+ BOOL_BITFIELD no_named_args_stdarg_p : 1;
+};
+
+/* A declarator. */
+struct c_declarator {
+ /* The kind of declarator. */
+ enum c_declarator_kind kind;
+ location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
+ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
+ struct c_declarator *declarator;
+ union {
+ /* For identifiers. */
+ struct {
+ /* An IDENTIFIER_NODE, or NULL_TREE if an abstract
+ declarator. */
+ tree id;
+ /* Any attributes (which apply to the declaration rather than to
+ the type described by the outer declarators). */
+ tree attrs;
+ } id;
+ /* For functions. */
+ struct c_arg_info *arg_info;
+ /* For arrays. */
+ struct {
+ /* The array dimension, or NULL for [] and [*]. */
+ tree dimen;
+ /* The qualifiers inside []. */
+ int quals;
+ /* The attributes (currently ignored) inside []. */
+ tree attrs;
+ /* Whether [static] was used. */
+ BOOL_BITFIELD static_p : 1;
+ /* Whether [*] was used. */
+ BOOL_BITFIELD vla_unspec_p : 1;
+ } array;
+ /* For pointers, the qualifiers on the pointer type. */
+ int pointer_quals;
+ /* For attributes. */
+ tree attrs;
+ } u;
+};
+
+/* A type name. */
+struct c_type_name {
+ /* The declaration specifiers. */
+ struct c_declspecs *specs;
+ /* The declarator. */
+ struct c_declarator *declarator;
+};
+
+/* A parameter. */
+struct c_parm {
+ /* The declaration specifiers, minus any prefix attributes. */
+ struct c_declspecs *specs;
+ /* The attributes. */
+ tree attrs;
+ /* The declarator. */
+ struct c_declarator *declarator;
+ /* The location of the parameter. */
+ location_t loc;
+};
+
+/* Used when parsing an enum. Initialized by start_enum. */
+struct c_enum_contents
+{
+ /* While defining an enum type, this is 1 plus the last enumerator
+ constant value. */
+ tree enum_next_value;
+
+ /* The enumeration type itself. */
+ tree enum_type;
+
+ /* Nonzero means that there was overflow computing enum_next_value. */
+ int enum_overflow;
+};
+
+/* A type of reference to a static identifier in an inline
+ function. */
+enum c_inline_static_type {
+ /* Identifier with internal linkage used in function that may be an
+ inline definition (i.e., file-scope static). */
+ csi_internal,
+ /* Modifiable object with static storage duration defined in
+ function that may be an inline definition (i.e., local
+ static). */
+ csi_modifiable
+};
+
+
+/* in c-parser.cc */
+extern void c_parse_init (void);
+extern bool c_keyword_starts_typename (enum rid keyword);
+
+/* in c-aux-info.cc */
+extern void gen_aux_info_record (tree, int, int, int);
+
+/* in c-decl.cc */
+struct c_spot_bindings;
+class c_struct_parse_info;
+extern struct obstack parser_obstack;
+/* Set to IN_ITERATION_STMT if parsing an iteration-statement,
+ to IN_OMP_BLOCK if parsing OpenMP structured block and
+ IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement,
+ this is bitwise ORed with IN_SWITCH_STMT, unless parsing an
+ iteration-statement, OpenMP block or loop within that switch. */
+#define IN_SWITCH_STMT 1
+#define IN_ITERATION_STMT 2
+#define IN_OMP_BLOCK 4
+#define IN_OMP_FOR 8
+#define IN_OBJC_FOREACH 16
+extern unsigned char in_statement;
+
+extern bool switch_statement_break_seen_p;
+
+extern bool global_bindings_p (void);
+extern tree pushdecl (tree);
+extern unsigned int start_underspecified_init (location_t, tree);
+extern void finish_underspecified_init (tree, unsigned int);
+extern void push_scope (void);
+extern tree pop_scope (void);
+extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
+extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
+
+extern void record_inline_static (location_t, tree, tree,
+ enum c_inline_static_type);
+extern void c_init_decl_processing (void);
+extern void c_print_identifier (FILE *, tree, int);
+extern int quals_from_declspecs (const struct c_declspecs *);
+extern struct c_declarator *build_array_declarator (location_t, tree,
+ struct c_declspecs *,
+ bool, bool);
+extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
+ tree, tree);
+extern tree check_for_loop_decls (location_t, bool);
+extern void mark_forward_parm_decls (void);
+extern void declare_parm_level (void);
+extern void undeclared_variable (location_t, tree);
+extern tree lookup_label_for_goto (location_t, tree);
+extern tree declare_label (tree);
+extern tree define_label (location_t, tree);
+extern struct c_spot_bindings *c_get_switch_bindings (void);
+extern void c_release_switch_bindings (struct c_spot_bindings *);
+extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
+ location_t, location_t);
+extern void finish_decl (tree, location_t, tree, tree, tree);
+extern tree finish_enum (tree, tree, tree);
+extern void finish_function (location_t = input_location);
+extern tree finish_struct (location_t, tree, tree, tree,
+ class c_struct_parse_info *);
+extern tree c_simulate_enum_decl (location_t, const char *,
+ vec<string_int_pair> *);
+extern tree c_simulate_record_decl (location_t, const char *,
+ array_slice<const tree>);
+extern struct c_arg_info *build_arg_info (void);
+extern struct c_arg_info *get_parm_info (bool, tree);
+extern tree grokfield (location_t, struct c_declarator *,
+ struct c_declspecs *, tree, tree *);
+extern tree groktypename (struct c_type_name *, tree *, bool *);
+extern tree grokparm (const struct c_parm *, tree *);
+extern tree implicitly_declare (location_t, tree);
+extern void keep_next_level (void);
+extern void pending_xref_error (void);
+extern void c_push_function_context (void);
+extern void c_pop_function_context (void);
+extern void push_parm_decl (const struct c_parm *, tree *);
+extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
+ struct c_declarator *);
+extern tree c_builtin_function (tree);
+extern tree c_builtin_function_ext_scope (tree);
+extern tree c_simulate_builtin_function_decl (tree);
+extern void c_warn_unused_attributes (tree);
+extern tree c_warn_type_attributes (tree);
+extern void shadow_tag (const struct c_declspecs *);
+extern void shadow_tag_warned (const struct c_declspecs *, int);
+extern tree start_enum (location_t, struct c_enum_contents *, tree, tree);
+extern bool start_function (struct c_declspecs *, struct c_declarator *, tree);
+extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
+ tree, bool = true, location_t * = NULL);
+extern tree start_struct (location_t, enum tree_code, tree,
+ class c_struct_parse_info **);
+extern void store_parm_decls (void);
+extern void store_parm_decls_from (struct c_arg_info *);
+extern void temp_store_parm_decls (tree, tree);
+extern void temp_pop_parm_decls (void);
+extern tree xref_tag (enum tree_code, tree);
+extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree,
+ bool, tree, bool);
+extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
+ struct c_declarator *, location_t);
+extern struct c_declarator *build_attrs_declarator (tree,
+ struct c_declarator *);
+extern struct c_declarator *build_function_declarator (struct c_arg_info *,
+ struct c_declarator *);
+extern struct c_declarator *build_id_declarator (tree);
+extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
+ struct c_declarator *);
+extern struct c_declspecs *build_null_declspecs (void);
+extern struct c_declspecs *declspecs_add_qual (location_t,
+ struct c_declspecs *, tree);
+extern struct c_declspecs *declspecs_add_type (location_t,
+ struct c_declspecs *,
+ struct c_typespec);
+extern struct c_declspecs *declspecs_add_scspec (location_t,
+ struct c_declspecs *, tree);
+extern struct c_declspecs *declspecs_add_attrs (location_t,
+ struct c_declspecs *, tree);
+extern struct c_declspecs *declspecs_add_addrspace (location_t,
+ struct c_declspecs *,
+ addr_space_t);
+extern struct c_declspecs *declspecs_add_alignas (location_t,
+ struct c_declspecs *, tree);
+extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
+
+/* in c-objc-common.cc */
+extern bool c_objc_common_init (void);
+extern bool c_missing_noreturn_ok_p (tree);
+extern bool c_warn_unused_global_decl (const_tree);
+extern void c_initialize_diagnostics (diagnostic_context *);
+extern bool c_var_mod_p (tree x, tree fn);
+extern alias_set_type c_get_alias_set (tree);
+
+/* in c-typeck.cc */
+extern int in_alignof;
+extern int in_sizeof;
+extern int in_typeof;
+extern bool c_in_omp_for;
+
+extern tree c_last_sizeof_arg;
+extern location_t c_last_sizeof_loc;
+
+extern struct c_switch *c_switch_stack;
+
+extern bool null_pointer_constant_p (const_tree);
+
+
+inline
+bool c_type_variably_modified_p (tree t)
+{
+ return error_mark_node != t && C_TYPE_VARIABLY_MODIFIED (t);
+}
+
+
+extern bool char_type_p (tree);
+extern tree c_objc_common_truthvalue_conversion (location_t, tree);
+extern tree require_complete_type (location_t, tree);
+extern bool same_translation_unit_p (const_tree, const_tree);
+extern int comptypes (tree, tree);
+extern int comptypes_check_different_types (tree, tree, bool *);
+extern int comptypes_check_enum_int (tree, tree, bool *);
+extern bool c_mark_addressable (tree, bool = false);
+extern void c_incomplete_type_error (location_t, const_tree, const_tree);
+extern tree c_type_promotes_to (tree);
+extern struct c_expr default_function_array_conversion (location_t,
+ struct c_expr);
+extern struct c_expr default_function_array_read_conversion (location_t,
+ struct c_expr);
+extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
+ bool, bool, bool = false);
+extern tree decl_constant_value_1 (tree, bool);
+extern void mark_exp_read (tree);
+extern tree composite_type (tree, tree);
+extern tree build_component_ref (location_t, tree, tree, location_t,
+ location_t);
+extern tree build_array_ref (location_t, tree, tree);
+extern tree build_external_ref (location_t, tree, bool, tree *);
+extern void pop_maybe_used (bool);
+extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
+extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
+extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
+ struct c_expr);
+extern struct c_expr parser_build_binary_op (location_t,
+ enum tree_code, struct c_expr,
+ struct c_expr);
+extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
+ location_t, tree, tree, location_t);
+extern tree build_compound_expr (location_t, tree, tree);
+extern tree c_cast_expr (location_t, struct c_type_name *, tree);
+extern tree build_c_cast (location_t, tree, tree);
+extern void store_init_value (location_t, tree, tree, tree);
+extern void maybe_warn_string_init (location_t, tree, struct c_expr);
+extern void start_init (tree, tree, bool, bool, rich_location *);
+extern void finish_init (void);
+extern void really_start_incremental_init (tree);
+extern void finish_implicit_inits (location_t, struct obstack *);
+extern void push_init_level (location_t, int, struct obstack *);
+extern struct c_expr pop_init_level (location_t, int, struct obstack *,
+ location_t);
+extern void set_init_index (location_t, tree, tree, struct obstack *);
+extern void set_init_label (location_t, tree, location_t, struct obstack *);
+extern void process_init_element (location_t, struct c_expr, bool,
+ struct obstack *);
+extern tree build_compound_literal (location_t, tree, tree, bool,
+ unsigned int, struct c_declspecs *);
+extern void check_compound_literal_type (location_t, struct c_type_name *);
+extern tree c_start_switch (location_t, location_t, tree, bool);
+extern void c_finish_switch (tree, tree);
+extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool,
+ bool);
+extern tree build_asm_stmt (bool, tree);
+extern int c_types_compatible_p (tree, tree);
+extern tree c_begin_compound_stmt (bool);
+extern tree c_end_compound_stmt (location_t, tree, bool);
+extern void c_finish_if_stmt (location_t, tree, tree, tree);
+extern void c_finish_loop (location_t, location_t, tree, location_t, tree,
+ tree, tree, tree, bool);
+extern tree c_begin_stmt_expr (void);
+extern tree c_finish_stmt_expr (location_t, tree);
+extern tree c_process_expr_stmt (location_t, tree);
+extern tree c_finish_expr_stmt (location_t, tree);
+extern tree c_finish_return (location_t, tree, tree);
+extern tree c_finish_bc_stmt (location_t, tree, bool);
+extern tree c_finish_goto_label (location_t, tree);
+extern tree c_finish_goto_ptr (location_t, c_expr val);
+extern tree c_expr_to_decl (tree, bool *, bool *);
+extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree);
+extern tree c_finish_oacc_data (location_t, tree, tree);
+extern tree c_finish_oacc_host_data (location_t, tree, tree);
+extern tree c_begin_omp_parallel (void);
+extern tree c_finish_omp_parallel (location_t, tree, tree);
+extern tree c_begin_omp_task (void);
+extern tree c_finish_omp_task (location_t, tree, tree);
+extern void c_finish_omp_cancel (location_t, tree);
+extern void c_finish_omp_cancellation_point (location_t, tree);
+extern tree c_finish_omp_clauses (tree, enum c_omp_region_type);
+extern tree c_build_va_arg (location_t, tree, location_t, tree);
+extern tree c_finish_transaction (location_t, tree, int);
+extern bool c_tree_equal (tree, tree);
+extern tree c_build_function_call_vec (location_t, const vec<location_t>&,
+ tree, vec<tree, va_gc> *,
+ vec<tree, va_gc> *);
+extern tree c_omp_clause_copy_ctor (tree, tree, tree);
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+extern int current_function_returns_value;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+extern int current_function_returns_null;
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a call to a noreturn function is seen. */
+
+extern int current_function_returns_abnormally;
+
+/* In c-decl.cc */
+
+/* Tell the binding oracle what kind of binding we are looking for. */
+
+enum c_oracle_request
+{
+ C_ORACLE_SYMBOL,
+ C_ORACLE_TAG,
+ C_ORACLE_LABEL
+};
+
+/* If this is non-NULL, then it is a "binding oracle" which can lazily
+ create bindings when needed by the C compiler. The oracle is told
+ the name and type of the binding to create. It can call pushdecl
+ or the like to ensure the binding is visible; or do nothing,
+ leaving the binding untouched. c-decl.cc takes note of when the
+ oracle has been called and will not call it again if it fails to
+ create a given binding. */
+
+typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
+
+extern c_binding_oracle_function *c_binding_oracle;
+
+extern void c_finish_incomplete_decl (tree);
+extern tree c_omp_reduction_id (enum tree_code, tree);
+extern tree c_omp_reduction_decl (tree);
+extern tree c_omp_reduction_lookup (tree, tree);
+extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
+extern bool c_check_in_current_scope (tree);
+extern void c_pushtag (location_t, tree, tree);
+extern void c_bind (location_t, tree, bool);
+extern bool tag_exists_p (enum tree_code, tree);
+
+/* In c-errors.cc */
+extern bool pedwarn_c90 (location_t, int opt, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+extern bool pedwarn_c11 (location_t, int opt, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+
+extern void
+set_c_expr_source_range (c_expr *expr,
+ location_t start, location_t finish);
+
+extern void
+set_c_expr_source_range (c_expr *expr,
+ source_range src_range);
+
+/* In c-fold.cc */
+extern vec<tree> incomplete_record_decls;
+
+extern const char *c_get_sarif_source_language (const char *filename);
+
+#if CHECKING_P
+namespace selftest {
+ extern void run_c_tests (void);
+} // namespace selftest
+#endif /* #if CHECKING_P */
+
+
+#endif /* ! GCC_C_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/calls.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/calls.h
new file mode 100644
index 0000000..c7f8c5e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/calls.h
@@ -0,0 +1,138 @@
+/* Declarations and data types for RTL call insn generation.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CALLS_H
+#define GCC_CALLS_H
+
+/* Describes a function argument.
+
+ Each argument conceptually has a gimple-level type. Usually this type
+ is available directly as a tree via the TYPE field, but when calling
+ libgcc support functions it might instead be inferred from a mode,
+ in which case the type isn't available directly.
+
+ This gimple-level type might go through promotion before being passed to
+ the target function. Depending on the context, the MODE field is either
+ the mode of the gimple-level type (whether explicitly given or not)
+ or the mode after promotion has been performed. */
+class function_arg_info
+{
+public:
+ function_arg_info ()
+ : type (NULL_TREE), mode (VOIDmode), named (false),
+ pass_by_reference (false)
+ {}
+
+ /* Initialize an argument of mode MODE, either before or after promotion. */
+ function_arg_info (machine_mode mode, bool named)
+ : type (NULL_TREE), mode (mode), named (named), pass_by_reference (false)
+ {}
+
+ /* Initialize an unpromoted argument of type TYPE. */
+ function_arg_info (tree type, bool named)
+ : type (type), mode (TYPE_MODE (type)), named (named),
+ pass_by_reference (false)
+ {}
+
+ /* Initialize an argument with explicit properties. */
+ function_arg_info (tree type, machine_mode mode, bool named)
+ : type (type), mode (mode), named (named), pass_by_reference (false)
+ {}
+
+ /* Return true if the gimple-level type is an aggregate. */
+ bool aggregate_type_p () const { return type && AGGREGATE_TYPE_P (type); }
+
+ /* Return the size of the gimple-level type, or -1 if the size is
+ variable or otherwise not representable as a poly_int64.
+
+ Use this function when MODE is the mode of the type before promotion,
+ or in any context if the target never promotes function arguments. */
+ poly_int64 type_size_in_bytes () const
+ {
+ if (type)
+ return int_size_in_bytes (type);
+ return GET_MODE_SIZE (mode);
+ }
+
+ /* Return the size of the argument after promotion, or -1 if the size
+ is variable or otherwise not representable as a poly_int64.
+
+ Use this function when MODE is the mode of the type after promotion. */
+ poly_int64 promoted_size_in_bytes () const
+ {
+ if (mode == BLKmode)
+ return int_size_in_bytes (type);
+ return GET_MODE_SIZE (mode);
+ }
+
+ /* True if the argument represents the end of the argument list,
+ as returned by end_marker (). */
+ bool end_marker_p () const { return mode == VOIDmode; }
+
+ /* Return a function_arg_info that represents the end of the
+ argument list. */
+ static function_arg_info end_marker ()
+ {
+ return function_arg_info (void_type_node, /*named=*/true);
+ }
+
+ /* The type of the argument, or null if not known (which is true for
+ libgcc support functions). */
+ tree type;
+
+ /* The mode of the argument. Depending on context, this might be
+ the mode of the argument type or the mode after promotion. */
+ machine_mode mode;
+
+ /* True if the argument is treated as a named argument, false if it is
+ treated as an unnamed variadic argument (i.e. one passed through
+ "..."). See also TARGET_STRICT_ARGUMENT_NAMING. */
+ unsigned int named : 1;
+
+ /* True if we have decided to pass the argument by reference, in which case
+ the function_arg_info describes a pointer to the original argument. */
+ unsigned int pass_by_reference : 1;
+};
+
+extern int flags_from_decl_or_type (const_tree);
+extern int call_expr_flags (const_tree);
+extern int setjmp_call_p (const_tree);
+extern bool gimple_maybe_alloca_call_p (const gimple *);
+extern bool gimple_alloca_call_p (const gimple *);
+extern bool alloca_call_p (const_tree);
+extern bool must_pass_in_stack_var_size (const function_arg_info &);
+extern bool must_pass_in_stack_var_size_or_pad (const function_arg_info &);
+extern bool must_pass_va_arg_in_stack (tree);
+extern rtx prepare_call_address (tree, rtx, rtx, rtx *, int, int);
+extern bool shift_return_value (machine_mode, bool, rtx);
+extern rtx expand_call (tree, rtx, int);
+extern void fixup_tail_calls (void);
+
+extern bool pass_by_reference (CUMULATIVE_ARGS *, function_arg_info);
+extern bool pass_va_arg_by_reference (tree);
+extern bool apply_pass_by_reference_rules (CUMULATIVE_ARGS *,
+ function_arg_info &);
+extern bool reference_callee_copied (CUMULATIVE_ARGS *,
+ const function_arg_info &);
+extern void maybe_complain_about_tail_call (tree, const char *);
+
+extern rtx rtx_for_static_chain (const_tree, bool);
+extern bool cxx17_empty_base_field_p (const_tree);
+
+#endif // GCC_CALLS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ccmp.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ccmp.h
new file mode 100644
index 0000000..f6f7389
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ccmp.h
@@ -0,0 +1,25 @@
+/* Conditional comapre related functions.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CCMP_H
+#define GCC_CCMP_H
+
+extern rtx expand_ccmp_expr (gimple *, machine_mode);
+
+#endif /* GCC_CCMP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg-flags.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg-flags.def
new file mode 100644
index 0000000..2d8ccd9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg-flags.def
@@ -0,0 +1,191 @@
+/* Flags on basic blocks and edges.
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file defines flags that may appear on basic blocks or on
+ edges. Source files define DEF_BASIC_BLOCK_FLAG or DEF_EDGE_FLAG
+ appropriately before including this file. */
+
+#if !defined(DEF_BASIC_BLOCK_FLAG) && !defined(DEF_EDGE_FLAG)
+#error "You must define DEF_BASIC_BLOCK_FLAG or DEF_EDGE_FLAG"
+#endif
+
+#ifdef DEF_BASIC_BLOCK_FLAG
+
+/* Masks for basic_block.flags.
+
+ The format of this file is: DEF_BASIC_BLOCK_FLAG(NAME, IDX).
+ NAME is the name of the basic block flag. A flag BB_#NAME will be
+ created and the name is used in dump_edge_info.
+ IDX is a sequence number that is used to determine the value
+ of the flag, which is 1 << IDX).
+
+ BB_HOT_PARTITION and BB_COLD_PARTITION should be preserved throughout
+ the compilation, so they are never cleared.
+
+ All other flags may be cleared by clear_bb_flags(). It is generally
+ a bad idea to rely on any flags being up-to-date. */
+
+/* Only set on blocks that have just been created by create_bb. */
+DEF_BASIC_BLOCK_FLAG(NEW, 0)
+
+/* Set by find_unreachable_blocks. Do not rely on this being set in any
+ pass. */
+DEF_BASIC_BLOCK_FLAG(REACHABLE, 1)
+
+/* Set for blocks in an irreducible loop by loop analysis. */
+DEF_BASIC_BLOCK_FLAG(IRREDUCIBLE_LOOP, 2)
+
+/* Set on blocks that may actually not be single-entry single-exit block. */
+DEF_BASIC_BLOCK_FLAG(SUPERBLOCK, 3)
+
+/* Set on basic blocks that the scheduler should not touch. This is used
+ by SMS to prevent other schedulers from messing with the loop schedule. */
+DEF_BASIC_BLOCK_FLAG(DISABLE_SCHEDULE, 4)
+
+/* Set on blocks that should be put in a hot section. */
+DEF_BASIC_BLOCK_FLAG(HOT_PARTITION, 5)
+
+/* Set on blocks that should be put in a cold section. */
+DEF_BASIC_BLOCK_FLAG(COLD_PARTITION, 6)
+
+/* Set on block that was duplicated. */
+DEF_BASIC_BLOCK_FLAG(DUPLICATED, 7)
+
+/* Set if the label at the top of this block is the target of a non-local goto. */
+DEF_BASIC_BLOCK_FLAG(NON_LOCAL_GOTO_TARGET, 8)
+
+/* Set on blocks that are in RTL format. */
+DEF_BASIC_BLOCK_FLAG(RTL, 9)
+
+/* Set on blocks that are forwarder blocks.
+ Only used in cfgcleanup.cc. */
+DEF_BASIC_BLOCK_FLAG(FORWARDER_BLOCK, 10)
+
+/* Set on blocks that cannot be threaded through.
+ Only used for jump threading. */
+DEF_BASIC_BLOCK_FLAG(NONTHREADABLE_BLOCK, 11)
+
+/* Set on blocks that were modified in some way. This bit is set in
+ df_set_bb_dirty, but not cleared by df_analyze, so it can be used
+ to test whether a block has been modified prior to a df_analyze call. */
+DEF_BASIC_BLOCK_FLAG(MODIFIED, 12)
+
+/* A general visited flag for passes to use. */
+DEF_BASIC_BLOCK_FLAG(VISITED, 13)
+
+/* Set on blocks that are in a transaction. This is calculated on
+ demand, and is available after calling compute_transaction_bits(). */
+DEF_BASIC_BLOCK_FLAG(IN_TRANSACTION, 14)
+
+#endif
+
+#ifdef DEF_EDGE_FLAG
+
+/* Masks for edge.flags.
+
+ The format of this file is: DEF_EDGE_FLAG(NAME, IDX, STRING).
+ NAME is the name of the edge flag. A flag EDGE_#NAME will be
+ created and the name is used in dump_edge_info.
+ IDX is a sequence number that is used to determine the value
+ of the flag, which is 1 << IDX). */
+
+/* 'Straight line' flow. In GIMPLE and in cfglayout mode, all normal
+ edges are fallthru edges. In cfgrtl mode, this flag really means
+ that control flow falls through to the next basic block in the line. */
+DEF_EDGE_FLAG(FALLTHRU, 0)
+
+/* Strange flow, like a computed jump or exception handling. Usually
+ this means that the edge cannot be split. */
+DEF_EDGE_FLAG(ABNORMAL, 1)
+
+/* Edge out of a basic block that ends with a CALL_INSN with abnormal
+ exit, like an exception or a non-local goto.
+ ABNORMAL_CALL edges also have ABNORMAL set.
+ This flag is only used for the RTL CFG. */
+DEF_EDGE_FLAG(ABNORMAL_CALL, 2)
+
+/* Exception edge. Exception handling edges represent possible control
+ transfers from a trapping instruction to an exception handler.
+ EH edges also have ABNORMAL set for the RTL CFG. */
+DEF_EDGE_FLAG(EH, 3)
+
+/* Never merge blocks via this edge. This is used for exception handling,
+ to prevent merging away edges to the post-landing-pad basic block.
+ This flag is only used for the RTL CFG. */
+DEF_EDGE_FLAG(PRESERVE, 4)
+
+/* Not a real edge. This is used to connect parts of the CFG that do
+ not halt, such as infinite loops and noreturn functions, to the
+ EXIT_BLOCK, so that traversing of the reverse CFG is possible. */
+DEF_EDGE_FLAG(FAKE, 5)
+
+/* A back edge, marked in a depth-first search of the CFG. Back edges
+ are hints that this edge may be part of a loop in the CFG. */
+DEF_EDGE_FLAG(DFS_BACK, 6)
+
+/* Edge in a part of the CFG that is an irreducible loop. */
+DEF_EDGE_FLAG(IRREDUCIBLE_LOOP, 7)
+
+/* Edge taken when controlling predicate is nonzero.
+ This is only used for the GIMPLE CFG. */
+DEF_EDGE_FLAG(TRUE_VALUE, 8)
+
+/* Edge taken when controlling predicate is zero.
+ This is only used for the GIMPLE CFG. */
+DEF_EDGE_FLAG(FALSE_VALUE, 9)
+
+/* Edge is executable. This is only used in GIMPLE SSA-CCP and VRP.
+ This is only used for the GIMPLE CFG. */
+DEF_EDGE_FLAG(EXECUTABLE, 10)
+
+/* Edge crosses between hot and cold sections, when we do partitioning.
+ This flag is only used for the RTL CFG. */
+DEF_EDGE_FLAG(CROSSING, 11)
+
+/* Edge from a sibcall CALL_INSN to exit.
+ SIBCALL edges also have ABNORMAL set.
+ This flag is only used for the RTL CFG. */
+DEF_EDGE_FLAG(SIBCALL, 12)
+
+/* Candidate for straight line flow. Only used in bb-reorder.cc.
+ This flag is only used for the RTL CFG. */
+DEF_EDGE_FLAG(CAN_FALLTHRU, 13)
+
+/* Exit of a loop. This is only used in ifcvt.cc.
+ This flag is only used for the RTL CFG. */
+DEF_EDGE_FLAG(LOOP_EXIT, 14)
+
+/* Uninstrumented edge out of a GIMPLE_TRANSACTION statement. */
+DEF_EDGE_FLAG(TM_UNINSTRUMENTED, 15)
+
+/* Abort (over) edge out of a GIMPLE_TRANSACTION statement. */
+DEF_EDGE_FLAG(TM_ABORT, 16)
+
+/* An edge we should ignore. It should be entirely local to
+ passes. ie, it is never set on any edge upon the completion
+ of any pass. */
+DEF_EDGE_FLAG(IGNORE, 17)
+
+#endif
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg.h
new file mode 100644
index 0000000..4cd2958
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfg.h
@@ -0,0 +1,186 @@
+/* Control flow graph manipulation code header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFG_H
+#define GCC_CFG_H
+
+#include "dominance.h"
+
+/* What sort of profiling information we have. */
+enum profile_status_d
+{
+ PROFILE_ABSENT,
+ PROFILE_GUESSED,
+ PROFILE_READ,
+ PROFILE_LAST /* Last value, used by profile streaming. */
+};
+
+/* A structure to group all the per-function control flow graph data.
+ The x_* prefixing is necessary because otherwise references to the
+ fields of this struct are interpreted as the defines for backward
+ source compatibility following the definition of this struct. */
+struct GTY(()) control_flow_graph {
+ /* Block pointers for the exit and entry of a function.
+ These are always the head and tail of the basic block list. */
+ basic_block x_entry_block_ptr;
+ basic_block x_exit_block_ptr;
+
+ /* Index by basic block number, get basic block struct info. */
+ vec<basic_block, va_gc> *x_basic_block_info;
+
+ /* Number of basic blocks in this flow graph. */
+ int x_n_basic_blocks;
+
+ /* Number of edges in this flow graph. */
+ int x_n_edges;
+
+ /* The first free basic block number. */
+ int x_last_basic_block;
+
+ /* UIDs for LABEL_DECLs. */
+ int last_label_uid;
+
+ /* Mapping of labels to their associated blocks. At present
+ only used for the gimple CFG. */
+ vec<basic_block, va_gc> *x_label_to_block_map;
+
+ enum profile_status_d x_profile_status;
+
+ /* Whether the dominators and the postdominators are available. */
+ enum dom_state x_dom_computed[2];
+
+ /* Number of basic blocks in the dominance tree. */
+ unsigned x_n_bbs_in_dom_tree[2];
+
+ /* Maximal number of entities in the single jumptable. Used to estimate
+ final flowgraph size. */
+ int max_jumptable_ents;
+
+ /* Maximal count of BB in function. */
+ profile_count count_max;
+
+ /* Dynamically allocated edge/bb flags. */
+ int edge_flags_allocated;
+ int bb_flags_allocated;
+};
+
+
+extern void init_flow (function *);
+extern void free_cfg (function *);
+extern basic_block alloc_block (void);
+extern void link_block (basic_block, basic_block);
+extern void unlink_block (basic_block);
+extern void compact_blocks (void);
+extern void expunge_block (basic_block);
+extern edge unchecked_make_edge (basic_block, basic_block, int);
+extern edge cached_make_edge (sbitmap, basic_block, basic_block, int);
+extern edge make_edge (basic_block, basic_block, int);
+extern edge make_single_succ_edge (basic_block, basic_block, int);
+extern void remove_edge_raw (edge);
+extern void redirect_edge_succ (edge, basic_block);
+extern void redirect_edge_pred (edge, basic_block);
+extern void clear_bb_flags (void);
+extern void dump_edge_info (FILE *, edge, dump_flags_t, int);
+extern void debug (edge_def &ref);
+extern void debug (edge_def *ptr);
+extern void alloc_aux_for_blocks (int);
+extern void clear_aux_for_blocks (void);
+extern void free_aux_for_blocks (void);
+extern void alloc_aux_for_edge (edge, int);
+extern void alloc_aux_for_edges (int);
+extern void clear_aux_for_edges (void);
+extern void free_aux_for_edges (void);
+extern void debug_bb (basic_block);
+extern basic_block debug_bb_n (int);
+extern void debug_bb (basic_block, dump_flags_t);
+extern basic_block debug_bb_n (int, dump_flags_t);
+extern void dump_bb_info (FILE *, basic_block, int, dump_flags_t, bool, bool);
+extern void brief_dump_cfg (FILE *, dump_flags_t);
+extern void update_bb_profile_for_threading (basic_block, profile_count, edge);
+extern void scale_bbs_frequencies_profile_count (basic_block *, int,
+ profile_count, profile_count);
+extern void scale_bbs_frequencies (basic_block *, int, profile_probability);
+extern void initialize_original_copy_tables (void);
+extern void reset_original_copy_tables (void);
+extern void free_original_copy_tables (void);
+extern bool original_copy_tables_initialized_p (void);
+extern void set_bb_original (basic_block, basic_block);
+extern basic_block get_bb_original (basic_block);
+extern void set_bb_copy (basic_block, basic_block);
+extern basic_block get_bb_copy (basic_block);
+void set_loop_copy (class loop *, class loop *);
+class loop *get_loop_copy (class loop *);
+
+/* Generic RAII class to allocate a bit from storage of integer type T.
+ The allocated bit is accessible as mask with the single bit set
+ via the conversion operator to T. */
+
+template <class T>
+class auto_flag
+{
+public:
+ /* static assert T is integer type of max HOST_WIDE_INT precision. */
+ auto_flag (T *sptr)
+ {
+ m_sptr = sptr;
+ int free_bit = ffs_hwi (~*sptr);
+ /* If there are no unset bits... */
+ if (free_bit == 0)
+ gcc_unreachable ();
+ m_flag = HOST_WIDE_INT_1U << (free_bit - 1);
+ /* ...or if T is signed and thus the complement is sign-extended,
+ check if we ran out of bits. We could spare us this bit
+ if we could use C++11 std::make_unsigned<T>::type to pass
+ ~*sptr to ffs_hwi. */
+ if (m_flag == 0)
+ gcc_unreachable ();
+ gcc_checking_assert ((*sptr & m_flag) == 0);
+ *sptr |= m_flag;
+ }
+ ~auto_flag ()
+ {
+ gcc_checking_assert ((*m_sptr & m_flag) == m_flag);
+ *m_sptr &= ~m_flag;
+ }
+ operator T () const { return m_flag; }
+private:
+ T *m_sptr;
+ T m_flag;
+};
+
+/* RAII class to allocate an edge flag for temporary use. You have
+ to clear the flag from all edges when you are finished using it. */
+
+class auto_edge_flag : public auto_flag<int>
+{
+public:
+ auto_edge_flag (function *fun)
+ : auto_flag<int> (&fun->cfg->edge_flags_allocated) {}
+};
+
+/* RAII class to allocate a bb flag for temporary use. You have
+ to clear the flag from all edges when you are finished using it. */
+class auto_bb_flag : public auto_flag<int>
+{
+public:
+ auto_bb_flag (function *fun)
+ : auto_flag<int> (&fun->cfg->bb_flags_allocated) {}
+};
+
+#endif /* GCC_CFG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfganal.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfganal.h
new file mode 100644
index 0000000..0b6c67d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfganal.h
@@ -0,0 +1,90 @@
+/* Control flow graph analysis header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_CFGANAL_H
+#define GCC_CFGANAL_H
+
+/* This structure maintains an edge list vector. */
+/* FIXME: Make this a vec<edge>. */
+struct edge_list
+{
+ int num_edges;
+ edge *index_to_edge;
+};
+
+
+/* Class to compute and manage control dependences on an edge-list. */
+class control_dependences
+{
+public:
+ control_dependences ();
+ ~control_dependences ();
+ bitmap get_edges_dependent_on (int);
+ basic_block get_edge_src (int);
+ basic_block get_edge_dest (int);
+
+private:
+ void set_control_dependence_map_bit (basic_block, int);
+ void clear_control_dependence_bitmap (basic_block);
+ void find_control_dependence (int);
+ vec<bitmap_head> control_dependence_map;
+ vec<std::pair<int, int> > m_el;
+ bitmap_obstack m_bitmaps;
+};
+
+extern bool mark_dfs_back_edges (struct function *);
+extern bool mark_dfs_back_edges (void);
+extern void verify_marked_backedges (struct function *);
+extern void find_unreachable_blocks (void);
+extern void verify_no_unreachable_blocks (void);
+struct edge_list * create_edge_list (void);
+void free_edge_list (struct edge_list *);
+void print_edge_list (FILE *, struct edge_list *);
+void verify_edge_list (FILE *, struct edge_list *);
+edge find_edge (basic_block, basic_block);
+int find_edge_index (struct edge_list *, basic_block, basic_block);
+extern void remove_fake_edges (void);
+extern void remove_fake_exit_edges (void);
+extern void add_noreturn_fake_exit_edges (void);
+extern void connect_infinite_loops_to_exit (void);
+extern int post_order_compute (int *, bool, bool);
+extern basic_block dfs_find_deadend (basic_block);
+extern void inverted_post_order_compute (vec<int> *postorder, sbitmap *start_points = 0);
+extern int pre_and_rev_post_order_compute_fn (struct function *,
+ int *, int *, bool);
+extern int pre_and_rev_post_order_compute (int *, int *, bool);
+extern int rev_post_order_and_mark_dfs_back_seme (struct function *, edge,
+ bitmap, bool, int *,
+ vec<std::pair<int, int> > *);
+extern int dfs_enumerate_from (basic_block, int,
+ bool (*)(const_basic_block, const void *),
+ basic_block *, int, const void *);
+extern void compute_dominance_frontiers (class bitmap_head *);
+extern bitmap compute_idf (bitmap, class bitmap_head *);
+extern void bitmap_intersection_of_succs (sbitmap, sbitmap *, basic_block);
+extern void bitmap_intersection_of_preds (sbitmap, sbitmap *, basic_block);
+extern void bitmap_union_of_succs (sbitmap, sbitmap *, basic_block);
+extern void bitmap_union_of_preds (sbitmap, sbitmap *, basic_block);
+extern basic_block * single_pred_before_succ_order (void);
+extern edge single_incoming_edge_ignoring_loop_edges (basic_block, bool);
+extern edge single_pred_edge_ignoring_loop_edges (basic_block, bool);
+
+
+#endif /* GCC_CFGANAL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgbuild.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgbuild.h
new file mode 100644
index 0000000..51d3ecc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgbuild.h
@@ -0,0 +1,28 @@
+/* Control flow graph building header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFGBUILD_H
+#define GCC_CFGBUILD_H
+
+extern bool inside_basic_block_p (const rtx_insn *);
+extern bool control_flow_insn_p (const rtx_insn *);
+extern void rtl_make_eh_edge (sbitmap, basic_block, rtx);
+extern void find_many_sub_basic_blocks (sbitmap);
+
+#endif /* GCC_CFGBUILD_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgcleanup.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgcleanup.h
new file mode 100644
index 0000000..46ce640
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgcleanup.h
@@ -0,0 +1,35 @@
+/* Control flow optimization header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_CFGCLEANUP_H
+#define GCC_CFGCLEANUP_H
+
+enum replace_direction { dir_none, dir_forward, dir_backward, dir_both };
+
+extern int flow_find_cross_jump (basic_block, basic_block, rtx_insn **,
+ rtx_insn **, enum replace_direction*);
+extern int flow_find_head_matching_sequence (basic_block, basic_block,
+ rtx_insn **, rtx_insn **, int);
+extern bool delete_unreachable_blocks (void);
+extern void delete_dead_jumptables (void);
+extern bool cleanup_cfg (int);
+extern bool bb_is_just_return (basic_block, rtx_insn **, rtx_insn **);
+
+#endif /* GCC_CFGCLEANUP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgexpand.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgexpand.h
new file mode 100644
index 0000000..0e551f6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgexpand.h
@@ -0,0 +1,28 @@
+/* Header file for lowering trees to RTL.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFGEXPAND_H
+#define GCC_CFGEXPAND_H
+
+extern tree gimple_assign_rhs_to_tree (gimple *);
+extern HOST_WIDE_INT estimated_stack_frame_size (struct cgraph_node *);
+extern void set_parm_rtl (tree, rtx);
+
+
+#endif /* GCC_CFGEXPAND_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfghooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfghooks.h
new file mode 100644
index 0000000..7c87712
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfghooks.h
@@ -0,0 +1,289 @@
+/* Hooks for cfg representation specific functions.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Sebastian Pop <s.pop@laposte.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFGHOOKS_H
+#define GCC_CFGHOOKS_H
+
+#include "predict.h"
+
+/* Structure to gather statistic about profile consistency, per pass.
+ An array of this structure, indexed by pass static number, is allocated
+ in passes.cc. The structure is defined here so that different CFG modes
+ can do their book-keeping via CFG hooks.
+
+ For every field[2], field[0] is the count before the pass runs, and
+ field[1] is the post-pass count. This allows us to monitor the effect
+ of each individual pass on the profile consistency.
+
+ This structure is not supposed to be used by anything other than passes.cc
+ and one CFG hook per CFG mode. */
+struct profile_record
+{
+ /* A weighted cost of the run-time of the function body. */
+ double time;
+ /* Frequency of execution of basic blocks where sum(prob) of the block's
+ predecessors doesn't match reasonably probability 1. */
+ double dyn_mismatched_prob_out;
+ /* Frequency of execution basic blocks where sum(count) of the block's
+ predecessors doesn't match reasonably well with the incoming frequency. */
+ double dyn_mismatched_count_in;
+ /* The number of basic blocks where sum(prob) of the block's predecessors
+ doesn't match reasonably probability 1. */
+ int num_mismatched_prob_out;
+ /* The number of basic blocks where sum(count) of the block's predecessors
+ doesn't match reasonably well with the incoming frequency. */
+ int num_mismatched_count_in;
+ /* A weighted cost of the size of the function body. */
+ int size;
+ /* True iff this pass actually was run. */
+ bool run;
+ bool fdo;
+};
+
+typedef int_hash <unsigned short, 0> dependence_hash;
+
+/* Optional data for duplicate_block. */
+
+class copy_bb_data
+{
+public:
+ copy_bb_data() : dependence_map (NULL) {}
+ ~copy_bb_data () { delete dependence_map; }
+
+ /* A map from the copied BBs dependence info cliques to
+ equivalents in the BBs duplicated to. */
+ hash_map<dependence_hash, unsigned short> *dependence_map;
+};
+
+struct cfg_hooks
+{
+ /* Name of the corresponding ir. */
+ const char *name;
+
+ /* Debugging. */
+ int (*verify_flow_info) (void);
+ void (*dump_bb) (FILE *, basic_block, int, dump_flags_t);
+ void (*dump_bb_for_graph) (pretty_printer *, basic_block);
+
+ /* Basic CFG manipulation. */
+
+ /* Return new basic block. */
+ basic_block (*create_basic_block) (void *head, void *end, basic_block after);
+
+ /* Redirect edge E to the given basic block B and update underlying program
+ representation. Returns edge representing redirected branch (that may not
+ be equivalent to E in the case of duplicate edges being removed) or NULL
+ if edge is not easily redirectable for whatever reason. */
+ edge (*redirect_edge_and_branch) (edge e, basic_block b);
+
+ /* Same as the above but allows redirecting of fallthru edges. In that case
+ newly created forwarder basic block is returned. The edge must
+ not be abnormal. */
+ basic_block (*redirect_edge_and_branch_force) (edge, basic_block);
+
+ /* Returns true if it is possible to remove the edge by redirecting it
+ to the destination of the other edge going from its source. */
+ bool (*can_remove_branch_p) (const_edge);
+
+ /* Remove statements corresponding to a given basic block. */
+ void (*delete_basic_block) (basic_block);
+
+ /* Creates a new basic block just after basic block B by splitting
+ everything after specified instruction I. */
+ basic_block (*split_block) (basic_block b, void * i);
+
+ /* Move block B immediately after block A. */
+ bool (*move_block_after) (basic_block b, basic_block a);
+
+ /* Return true when blocks A and B can be merged into single basic block. */
+ bool (*can_merge_blocks_p) (basic_block a, basic_block b);
+
+ /* Merge blocks A and B. */
+ void (*merge_blocks) (basic_block a, basic_block b);
+
+ /* Predict edge E using PREDICTOR to given PROBABILITY. */
+ void (*predict_edge) (edge e, enum br_predictor predictor, int probability);
+
+ /* Return true if the one of outgoing edges is already predicted by
+ PREDICTOR. */
+ bool (*predicted_by_p) (const_basic_block bb, enum br_predictor predictor);
+
+ /* Return true when block A can be duplicated. */
+ bool (*can_duplicate_block_p) (const_basic_block a);
+
+ /* Duplicate block A. */
+ basic_block (*duplicate_block) (basic_block a, copy_bb_data *);
+
+ /* Higher level functions representable by primitive operations above if
+ we didn't have some oddities in RTL and Tree representations. */
+ basic_block (*split_edge) (edge);
+ void (*make_forwarder_block) (edge);
+
+ /* Try to make the edge fallthru. */
+ void (*tidy_fallthru_edge) (edge);
+
+ /* Make the edge non-fallthru. */
+ basic_block (*force_nonfallthru) (edge);
+
+ /* Say whether a block ends with a call, possibly followed by some
+ other code that must stay with the call. */
+ bool (*block_ends_with_call_p) (basic_block);
+
+ /* Say whether a block ends with a conditional branch. Switches
+ and unconditional branches do not qualify. */
+ bool (*block_ends_with_condjump_p) (const_basic_block);
+
+ /* Add fake edges to the function exit for any non constant and non noreturn
+ calls, volatile inline assembly in the bitmap of blocks specified by
+ BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
+ that were split.
+
+ The goal is to expose cases in which entering a basic block does not imply
+ that all subsequent instructions must be executed. */
+ int (*flow_call_edges_add) (sbitmap);
+
+ /* This function is called immediately after edge E is added to the
+ edge vector E->dest->preds. */
+ void (*execute_on_growing_pred) (edge);
+
+ /* This function is called immediately before edge E is removed from
+ the edge vector E->dest->preds. */
+ void (*execute_on_shrinking_pred) (edge);
+
+ /* A hook for duplicating loop in CFG, currently this is used
+ in loop versioning. */
+ bool (*cfg_hook_duplicate_loop_body_to_header_edge) (class loop *, edge,
+ unsigned, sbitmap, edge,
+ vec<edge> *, int);
+
+ /* Add condition to new basic block and update CFG used in loop
+ versioning. */
+ void (*lv_add_condition_to_bb) (basic_block, basic_block, basic_block,
+ void *);
+ /* Update the PHI nodes in case of loop versioning. */
+ void (*lv_adjust_loop_header_phi) (basic_block, basic_block,
+ basic_block, edge);
+
+ /* Given a condition BB extract the true/false taken/not taken edges
+ (depending if we are on tree's or RTL). */
+ void (*extract_cond_bb_edges) (basic_block, edge *, edge *);
+
+
+ /* Add PHI arguments queued in PENDINT_STMT list on edge E to edge
+ E->dest (only in tree-ssa loop versioning. */
+ void (*flush_pending_stmts) (edge);
+
+ /* True if a block contains no executable instructions. */
+ bool (*empty_block_p) (basic_block);
+
+ /* Split a basic block if it ends with a conditional branch and if
+ the other part of the block is not empty. */
+ basic_block (*split_block_before_cond_jump) (basic_block);
+
+ /* Do book-keeping of a basic block for the profile consistency checker. */
+ void (*account_profile_record) (basic_block, struct profile_record *);
+};
+
+extern void verify_flow_info (void);
+
+/* Check control flow invariants, if internal consistency checks are
+ enabled. */
+
+inline void
+checking_verify_flow_info (void)
+{
+ /* TODO: Add a separate option for -fchecking=cfg. */
+ if (flag_checking)
+ verify_flow_info ();
+}
+
+extern void dump_bb (FILE *, basic_block, int, dump_flags_t);
+extern void dump_bb_for_graph (pretty_printer *, basic_block);
+extern void dump_flow_info (FILE *, dump_flags_t);
+
+extern edge redirect_edge_and_branch (edge, basic_block);
+extern basic_block redirect_edge_and_branch_force (edge, basic_block);
+extern edge redirect_edge_succ_nodup (edge, basic_block);
+extern bool can_remove_branch_p (const_edge);
+extern void remove_branch (edge);
+extern void remove_edge (edge);
+extern edge split_block (basic_block, rtx);
+extern edge split_block (basic_block, gimple *);
+extern edge split_block_after_labels (basic_block);
+extern bool move_block_after (basic_block, basic_block);
+extern void delete_basic_block (basic_block);
+extern basic_block split_edge (edge);
+extern basic_block create_basic_block (rtx, rtx, basic_block);
+extern basic_block create_basic_block (gimple_seq, basic_block);
+extern basic_block create_empty_bb (basic_block);
+extern bool can_merge_blocks_p (basic_block, basic_block);
+extern void merge_blocks (basic_block, basic_block);
+extern edge make_forwarder_block (basic_block, bool (*)(edge),
+ void (*) (basic_block));
+extern basic_block force_nonfallthru (edge);
+extern void tidy_fallthru_edge (edge);
+extern void tidy_fallthru_edges (void);
+extern void predict_edge (edge e, enum br_predictor predictor, int probability);
+extern bool predicted_by_p (const_basic_block bb, enum br_predictor predictor);
+extern bool can_duplicate_block_p (const_basic_block);
+extern basic_block duplicate_block (basic_block, edge, basic_block,
+ copy_bb_data * = NULL);
+extern bool block_ends_with_call_p (basic_block bb);
+extern bool empty_block_p (basic_block);
+extern basic_block split_block_before_cond_jump (basic_block);
+extern bool block_ends_with_condjump_p (const_basic_block bb);
+extern int flow_call_edges_add (sbitmap);
+extern void execute_on_growing_pred (edge);
+extern void execute_on_shrinking_pred (edge);
+extern bool
+cfg_hook_duplicate_loop_body_to_header_edge (class loop *loop, edge,
+ unsigned int ndupl,
+ sbitmap wont_exit, edge orig,
+ vec<edge> *to_remove, int flags);
+
+extern void lv_flush_pending_stmts (edge);
+extern void extract_cond_bb_edges (basic_block, edge *, edge*);
+extern void lv_adjust_loop_header_phi (basic_block, basic_block, basic_block,
+ edge);
+extern void lv_add_condition_to_bb (basic_block, basic_block, basic_block,
+ void *);
+
+extern bool can_copy_bbs_p (basic_block *, unsigned);
+extern void copy_bbs (basic_block *, unsigned, basic_block *,
+ edge *, unsigned, edge *, class loop *,
+ basic_block, bool);
+
+void profile_record_check_consistency (profile_record *);
+void profile_record_account_profile (profile_record *);
+
+/* Hooks containers. */
+extern struct cfg_hooks gimple_cfg_hooks;
+extern struct cfg_hooks rtl_cfg_hooks;
+extern struct cfg_hooks cfg_layout_rtl_cfg_hooks;
+
+/* Declarations. */
+extern enum ir_type current_ir_type (void);
+extern void rtl_register_cfg_hooks (void);
+extern void cfg_layout_rtl_register_cfg_hooks (void);
+extern void gimple_register_cfg_hooks (void);
+extern struct cfg_hooks get_cfg_hooks (void);
+extern void set_cfg_hooks (struct cfg_hooks);
+
+#endif /* GCC_CFGHOOKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloop.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloop.h
new file mode 100644
index 0000000..e7ac2b5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloop.h
@@ -0,0 +1,936 @@
+/* Natural loop functions
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFGLOOP_H
+#define GCC_CFGLOOP_H
+
+#include "cfgloopmanip.h"
+
+/* Structure to hold decision about unrolling/peeling. */
+enum lpt_dec
+{
+ LPT_NONE,
+ LPT_UNROLL_CONSTANT,
+ LPT_UNROLL_RUNTIME,
+ LPT_UNROLL_STUPID
+};
+
+struct GTY (()) lpt_decision {
+ enum lpt_dec decision;
+ unsigned times;
+};
+
+/* The type of extend applied to an IV. */
+enum iv_extend_code
+{
+ IV_SIGN_EXTEND,
+ IV_ZERO_EXTEND,
+ IV_UNKNOWN_EXTEND
+};
+
+/* The structure describing a bound on number of iterations of a loop. */
+
+class GTY ((chain_next ("%h.next"))) nb_iter_bound {
+public:
+ /* The statement STMT is executed at most ... */
+ gimple *stmt;
+
+ /* ... BOUND + 1 times (BOUND must be an unsigned constant).
+ The + 1 is added for the following reasons:
+
+ a) 0 would otherwise be unused, while we would need to care more about
+ overflows (as MAX + 1 is sometimes produced as the estimate on number
+ of executions of STMT).
+ b) it is consistent with the result of number_of_iterations_exit. */
+ widest_int bound;
+
+ /* True if, after executing the statement BOUND + 1 times, we will
+ leave the loop; that is, all the statements after it are executed at most
+ BOUND times. */
+ bool is_exit;
+
+ /* The next bound in the list. */
+ class nb_iter_bound *next;
+};
+
+/* Description of the loop exit. */
+
+struct GTY ((for_user)) loop_exit {
+ /* The exit edge. */
+ edge e;
+
+ /* Previous and next exit in the list of the exits of the loop. */
+ struct loop_exit *prev;
+ struct loop_exit *next;
+
+ /* Next element in the list of loops from that E exits. */
+ struct loop_exit *next_e;
+};
+
+struct loop_exit_hasher : ggc_ptr_hash<loop_exit>
+{
+ typedef edge compare_type;
+
+ static hashval_t hash (loop_exit *);
+ static bool equal (loop_exit *, edge);
+ static void remove (loop_exit *);
+};
+
+typedef class loop *loop_p;
+
+/* An integer estimation of the number of iterations. Estimate_state
+ describes what is the state of the estimation. */
+enum loop_estimation
+{
+ /* Estimate was not computed yet. */
+ EST_NOT_COMPUTED,
+ /* Estimate is ready. */
+ EST_AVAILABLE,
+ EST_LAST
+};
+
+/* The structure describing non-overflow control induction variable for
+ loop's exit edge. */
+struct GTY ((chain_next ("%h.next"))) control_iv {
+ tree base;
+ tree step;
+ struct control_iv *next;
+};
+
+/* Structure to hold information for each natural loop. */
+class GTY ((chain_next ("%h.next"))) loop {
+public:
+ /* Index into loops array. Note indices will never be reused after loop
+ is destroyed. */
+ int num;
+
+ /* Number of loop insns. */
+ unsigned ninsns;
+
+ /* Basic block of loop header. */
+ basic_block header;
+
+ /* Basic block of loop latch. */
+ basic_block latch;
+
+ /* For loop unrolling/peeling decision. */
+ struct lpt_decision lpt_decision;
+
+ /* Average number of executed insns per iteration. */
+ unsigned av_ninsns;
+
+ /* Number of blocks contained within the loop. */
+ unsigned num_nodes;
+
+ /* Superloops of the loop, starting with the outermost loop. */
+ vec<loop_p, va_gc> *superloops;
+
+ /* The first inner (child) loop or NULL if innermost loop. */
+ class loop *inner;
+
+ /* Link to the next (sibling) loop. */
+ class loop *next;
+
+ /* Auxiliary info specific to a pass. */
+ void *GTY ((skip (""))) aux;
+
+ /* The number of times the latch of the loop is executed. This can be an
+ INTEGER_CST, or a symbolic expression representing the number of
+ iterations like "N - 1", or a COND_EXPR containing the runtime
+ conditions under which the number of iterations is non zero.
+
+ Don't access this field directly: number_of_latch_executions
+ computes and caches the computed information in this field. */
+ tree nb_iterations;
+
+ /* An integer guaranteed to be greater or equal to nb_iterations. Only
+ valid if any_upper_bound is true. */
+ widest_int nb_iterations_upper_bound;
+
+ widest_int nb_iterations_likely_upper_bound;
+
+ /* An integer giving an estimate on nb_iterations. Unlike
+ nb_iterations_upper_bound, there is no guarantee that it is at least
+ nb_iterations. */
+ widest_int nb_iterations_estimate;
+
+ /* If > 0, an integer, where the user asserted that for any
+ I in [ 0, nb_iterations ) and for any J in
+ [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations
+ of the loop can be safely evaluated concurrently. */
+ int safelen;
+
+ /* Preferred vectorization factor for the loop if non-zero. */
+ int simdlen;
+
+ /* Constraints are generally set by consumers and affect certain
+ semantics of niter analyzer APIs. Currently the APIs affected are
+ number_of_iterations_exit* functions and their callers. One typical
+ use case of constraints is to vectorize possibly infinite loop:
+
+ 1) Compute niter->assumptions by calling niter analyzer API and
+ record it as possible condition for loop versioning.
+ 2) Clear buffered result of niter/scev analyzer.
+ 3) Set constraint LOOP_C_FINITE assuming the loop is finite.
+ 4) Analyze data references. Since data reference analysis depends
+ on niter/scev analyzer, the point is that niter/scev analysis
+ is done under circumstance of LOOP_C_FINITE constraint.
+ 5) Version the loop with niter->assumptions computed in step 1).
+ 6) Vectorize the versioned loop in which niter->assumptions is
+ checked to be true.
+ 7) Update constraints in versioned loops so that niter analyzer
+ in following passes can use it.
+
+ Note consumers are usually the loop optimizers and it is consumers'
+ responsibility to set/clear constraints correctly. Failing to do
+ that might result in hard to track down bugs in niter/scev consumers. */
+ unsigned constraints;
+
+ /* An integer estimation of the number of iterations. Estimate_state
+ describes what is the state of the estimation. */
+ ENUM_BITFIELD(loop_estimation) estimate_state : 8;
+
+ unsigned any_upper_bound : 1;
+ unsigned any_estimate : 1;
+ unsigned any_likely_upper_bound : 1;
+
+ /* True if the loop can be parallel. */
+ unsigned can_be_parallel : 1;
+
+ /* True if -Waggressive-loop-optimizations warned about this loop
+ already. */
+ unsigned warned_aggressive_loop_optimizations : 1;
+
+ /* True if this loop should never be vectorized. */
+ unsigned dont_vectorize : 1;
+
+ /* True if we should try harder to vectorize this loop. */
+ unsigned force_vectorize : 1;
+
+ /* True if the loop is part of an oacc kernels region. */
+ unsigned in_oacc_kernels_region : 1;
+
+ /* True if the loop is known to be finite. This is a localized
+ flag_finite_loops or similar pragmas state. */
+ unsigned finite_p : 1;
+
+ /* The number of times to unroll the loop. 0 means no information given,
+ just do what we always do. A value of 1 means do not unroll the loop.
+ A value of USHRT_MAX means unroll with no specific unrolling factor.
+ Other values means unroll with the given unrolling factor. */
+ unsigned short unroll;
+
+ /* If this loop was inlined the main clique of the callee which does
+ not need remapping when copying the loop body. */
+ unsigned short owned_clique;
+
+ /* For SIMD loops, this is a unique identifier of the loop, referenced
+ by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
+ builtins. */
+ tree simduid;
+
+ /* In loop optimization, it's common to generate loops from the original
+ loop. This field records the index of the original loop which can be
+ used to track the original loop from newly generated loops. This can
+ be done by calling function get_loop (cfun, orig_loop_num). Note the
+ original loop could be destroyed for various reasons thus no longer
+ exists, as a result, function call to get_loop returns NULL pointer.
+ In this case, this field should not be used and needs to be cleared
+ whenever possible. */
+ int orig_loop_num;
+
+ /* Upper bound on number of iterations of a loop. */
+ class nb_iter_bound *bounds;
+
+ /* Non-overflow control ivs of a loop. */
+ struct control_iv *control_ivs;
+
+ /* Head of the cyclic list of the exits of the loop. */
+ struct loop_exit *exits;
+
+ /* Number of iteration analysis data for RTL. */
+ class niter_desc *simple_loop_desc;
+
+ /* For sanity checking during loop fixup we record here the former
+ loop header for loops marked for removal. Note that this prevents
+ the basic-block from being collected but its index can still be
+ reused. */
+ basic_block former_header;
+};
+
+/* Set if the loop is known to be infinite. */
+#define LOOP_C_INFINITE (1 << 0)
+/* Set if the loop is known to be finite without any assumptions. */
+#define LOOP_C_FINITE (1 << 1)
+
+/* Set C to the LOOP constraint. */
+inline void
+loop_constraint_set (class loop *loop, unsigned c)
+{
+ loop->constraints |= c;
+}
+
+/* Clear C from the LOOP constraint. */
+inline void
+loop_constraint_clear (class loop *loop, unsigned c)
+{
+ loop->constraints &= ~c;
+}
+
+/* Check if C is set in the LOOP constraint. */
+inline bool
+loop_constraint_set_p (class loop *loop, unsigned c)
+{
+ return (loop->constraints & c) == c;
+}
+
+/* Flags for state of loop structure. */
+enum
+{
+ LOOPS_HAVE_PREHEADERS = 1,
+ LOOPS_HAVE_SIMPLE_LATCHES = 2,
+ LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4,
+ LOOPS_HAVE_RECORDED_EXITS = 8,
+ LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16,
+ LOOP_CLOSED_SSA = 32,
+ LOOPS_NEED_FIXUP = 64,
+ LOOPS_HAVE_FALLTHRU_PREHEADERS = 128
+};
+
+#define LOOPS_NORMAL (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \
+ | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
+#define AVOID_CFG_MODIFICATIONS (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
+
+/* Structure to hold CFG information about natural loops within a function. */
+struct GTY (()) loops {
+ /* State of loops. */
+ int state;
+
+ /* Array of the loops. */
+ vec<loop_p, va_gc> *larray;
+
+ /* Maps edges to the list of their descriptions as loop exits. Edges
+ whose sources or destinations have loop_father == NULL (which may
+ happen during the cfg manipulations) should not appear in EXITS. */
+ hash_table<loop_exit_hasher> *GTY(()) exits;
+
+ /* Pointer to root of loop hierarchy tree. */
+ class loop *tree_root;
+};
+
+/* Loop recognition. */
+bool bb_loop_header_p (basic_block);
+void init_loops_structure (struct function *, struct loops *, unsigned);
+extern struct loops *flow_loops_find (struct loops *);
+extern void disambiguate_loops_with_multiple_latches (void);
+extern void flow_loops_free (struct loops *);
+extern void flow_loops_dump (FILE *,
+ void (*)(const class loop *, FILE *, int), int);
+extern void flow_loop_dump (const class loop *, FILE *,
+ void (*)(const class loop *, FILE *, int), int);
+class loop *alloc_loop (void);
+extern void flow_loop_free (class loop *);
+int flow_loop_nodes_find (basic_block, class loop *);
+unsigned fix_loop_structure (bitmap changed_bbs);
+bool mark_irreducible_loops (void);
+void release_recorded_exits (function *);
+void record_loop_exits (void);
+void rescan_loop_exit (edge, bool, bool);
+void sort_sibling_loops (function *);
+
+/* Loop data structure manipulation/querying. */
+extern void flow_loop_tree_node_add (class loop *, class loop *,
+ class loop * = NULL);
+extern void flow_loop_tree_node_remove (class loop *);
+extern bool flow_loop_nested_p (const class loop *, const class loop *);
+extern bool flow_bb_inside_loop_p (const class loop *, const_basic_block);
+extern class loop * find_common_loop (class loop *, class loop *);
+class loop *superloop_at_depth (class loop *, unsigned);
+struct eni_weights;
+extern int num_loop_insns (const class loop *);
+extern int average_num_loop_insns (const class loop *);
+extern unsigned get_loop_level (const class loop *);
+extern bool loop_exit_edge_p (const class loop *, const_edge);
+extern bool loop_exits_to_bb_p (class loop *, basic_block);
+extern bool loop_exits_from_bb_p (class loop *, basic_block);
+extern void mark_loop_exit_edges (void);
+extern dump_user_location_t get_loop_location (class loop *loop);
+
+/* Loops & cfg manipulation. */
+extern basic_block *get_loop_body (const class loop *);
+extern unsigned get_loop_body_with_size (const class loop *, basic_block *,
+ unsigned);
+extern basic_block *get_loop_body_in_dom_order (const class loop *);
+extern basic_block *get_loop_body_in_bfs_order (const class loop *);
+extern basic_block *get_loop_body_in_custom_order (const class loop *,
+ int (*) (const void *, const void *));
+extern basic_block *get_loop_body_in_custom_order (const class loop *, void *,
+ int (*) (const void *, const void *, void *));
+
+extern auto_vec<edge> get_loop_exit_edges (const class loop *, basic_block * = NULL);
+extern edge single_exit (const class loop *);
+extern edge single_likely_exit (class loop *loop, const vec<edge> &);
+extern unsigned num_loop_branches (const class loop *);
+
+extern edge loop_preheader_edge (const class loop *);
+extern edge loop_latch_edge (const class loop *);
+
+extern void add_bb_to_loop (basic_block, class loop *);
+extern void remove_bb_from_loops (basic_block);
+
+extern void cancel_loop_tree (class loop *);
+extern void delete_loop (class loop *);
+
+
+extern void verify_loop_structure (void);
+
+/* Loop analysis. */
+extern bool just_once_each_iteration_p (const class loop *, const_basic_block);
+gcov_type expected_loop_iterations_unbounded (const class loop *,
+ bool *read_profile_p = NULL, bool by_profile_only = false);
+extern unsigned expected_loop_iterations (class loop *);
+extern rtx doloop_condition_get (rtx_insn *);
+
+void mark_loop_for_removal (loop_p);
+
+/* Induction variable analysis. */
+
+/* The description of induction variable. The things are a bit complicated
+ due to need to handle subregs and extends. The value of the object described
+ by it can be obtained as follows (all computations are done in extend_mode):
+
+ Value in i-th iteration is
+ delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)).
+
+ If first_special is true, the value in the first iteration is
+ delta + mult * base
+
+ If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
+ subreg_{mode} (base + i * step)
+
+ The get_iv_value function can be used to obtain these expressions.
+
+ ??? Add a third mode field that would specify the mode in that inner
+ computation is done, which would enable it to be different from the
+ outer one? */
+
+class rtx_iv
+{
+public:
+ /* Its base and step (mode of base and step is supposed to be extend_mode,
+ see the description above). */
+ rtx base, step;
+
+ /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND,
+ or IV_UNKNOWN_EXTEND). */
+ enum iv_extend_code extend;
+
+ /* Operations applied in the extended mode. */
+ rtx delta, mult;
+
+ /* The mode it is extended to. */
+ scalar_int_mode extend_mode;
+
+ /* The mode the variable iterates in. */
+ scalar_int_mode mode;
+
+ /* Whether the first iteration needs to be handled specially. */
+ unsigned first_special : 1;
+};
+
+/* The description of an exit from the loop and of the number of iterations
+ till we take the exit. */
+
+class GTY(()) niter_desc
+{
+public:
+ /* The edge out of the loop. */
+ edge out_edge;
+
+ /* The other edge leading from the condition. */
+ edge in_edge;
+
+ /* True if we are able to say anything about number of iterations of the
+ loop. */
+ bool simple_p;
+
+ /* True if the loop iterates the constant number of times. */
+ bool const_iter;
+
+ /* Number of iterations if constant. */
+ uint64_t niter;
+
+ /* Assumptions under that the rest of the information is valid. */
+ rtx assumptions;
+
+ /* Assumptions under that the loop ends before reaching the latch,
+ even if value of niter_expr says otherwise. */
+ rtx noloop_assumptions;
+
+ /* Condition under that the loop is infinite. */
+ rtx infinite;
+
+ /* Whether the comparison is signed. */
+ bool signed_p;
+
+ /* The mode in that niter_expr should be computed. */
+ scalar_int_mode mode;
+
+ /* The number of iterations of the loop. */
+ rtx niter_expr;
+};
+
+extern void iv_analysis_loop_init (class loop *);
+extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
+extern bool iv_analyze_result (rtx_insn *, rtx, class rtx_iv *);
+extern bool iv_analyze_expr (rtx_insn *, scalar_int_mode, rtx,
+ class rtx_iv *);
+extern rtx get_iv_value (class rtx_iv *, rtx);
+extern bool biv_p (rtx_insn *, scalar_int_mode, rtx);
+extern void iv_analysis_done (void);
+
+extern class niter_desc *get_simple_loop_desc (class loop *loop);
+extern void free_simple_loop_desc (class loop *loop);
+
+inline class niter_desc *
+simple_loop_desc (class loop *loop)
+{
+ return loop->simple_loop_desc;
+}
+
+/* Accessors for the loop structures. */
+
+/* Returns the loop with index NUM from FNs loop tree. */
+
+inline class loop *
+get_loop (struct function *fn, unsigned num)
+{
+ return (*loops_for_fn (fn)->larray)[num];
+}
+
+/* Returns the number of superloops of LOOP. */
+
+inline unsigned
+loop_depth (const class loop *loop)
+{
+ return vec_safe_length (loop->superloops);
+}
+
+/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
+ loop. */
+
+inline class loop *
+loop_outer (const class loop *loop)
+{
+ unsigned n = vec_safe_length (loop->superloops);
+
+ if (n == 0)
+ return NULL;
+
+ return (*loop->superloops)[n - 1];
+}
+
+/* Returns true if LOOP has at least one exit edge. */
+
+inline bool
+loop_has_exit_edges (const class loop *loop)
+{
+ return loop->exits->next->e != NULL;
+}
+
+/* Returns the list of loops in FN. */
+
+inline vec<loop_p, va_gc> *
+get_loops (struct function *fn)
+{
+ struct loops *loops = loops_for_fn (fn);
+ if (!loops)
+ return NULL;
+
+ return loops->larray;
+}
+
+/* Returns the number of loops in FN (including the removed
+ ones and the fake loop that forms the root of the loop tree). */
+
+inline unsigned
+number_of_loops (struct function *fn)
+{
+ struct loops *loops = loops_for_fn (fn);
+ if (!loops)
+ return 0;
+
+ return vec_safe_length (loops->larray);
+}
+
+/* Returns true if state of the loops satisfies all properties
+ described by FLAGS. */
+
+inline bool
+loops_state_satisfies_p (function *fn, unsigned flags)
+{
+ return (loops_for_fn (fn)->state & flags) == flags;
+}
+
+inline bool
+loops_state_satisfies_p (unsigned flags)
+{
+ return loops_state_satisfies_p (cfun, flags);
+}
+
+/* Sets FLAGS to the loops state. */
+
+inline void
+loops_state_set (function *fn, unsigned flags)
+{
+ loops_for_fn (fn)->state |= flags;
+}
+
+inline void
+loops_state_set (unsigned flags)
+{
+ loops_state_set (cfun, flags);
+}
+
+/* Clears FLAGS from the loops state. */
+
+inline void
+loops_state_clear (function *fn, unsigned flags)
+{
+ loops_for_fn (fn)->state &= ~flags;
+}
+
+inline void
+loops_state_clear (unsigned flags)
+{
+ if (!current_loops)
+ return;
+ loops_state_clear (cfun, flags);
+}
+
+/* Check loop structure invariants, if internal consistency checks are
+ enabled. */
+
+inline void
+checking_verify_loop_structure (void)
+{
+ /* VERIFY_LOOP_STRUCTURE essentially asserts that no loops need fixups.
+
+ The loop optimizers should never make changes to the CFG which
+ require loop fixups. But the low level CFG manipulation code may
+ set the flag conservatively.
+
+ Go ahead and clear the flag here. That avoids the assert inside
+ VERIFY_LOOP_STRUCTURE, and if there is an inconsistency in the loop
+ structures VERIFY_LOOP_STRUCTURE will detect it.
+
+ This also avoid the compile time cost of excessive fixups. */
+ loops_state_clear (LOOPS_NEED_FIXUP);
+ if (flag_checking)
+ verify_loop_structure ();
+}
+
+/* Loop iterators. */
+
+/* Flags for loop iteration. */
+
+enum li_flags
+{
+ LI_INCLUDE_ROOT = 1, /* Include the fake root of the loop tree. */
+ LI_FROM_INNERMOST = 2, /* Iterate over the loops in the reverse order,
+ starting from innermost ones. */
+ LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */
+};
+
+/* Provide the functionality of std::as_const to support range-based for
+ to use const iterator. (We can't use std::as_const itself because it's
+ a C++17 feature.) */
+template <typename T>
+constexpr const T &
+as_const (T &t)
+{
+ return t;
+}
+
+/* A list for visiting loops, which contains the loop numbers instead of
+ the loop pointers. If the loop ROOT is offered (non-null), the visiting
+ will start from it, otherwise it would start from the tree_root of
+ loops_for_fn (FN) instead. The scope is restricted in function FN and
+ the visiting order is specified by FLAGS. */
+
+class loops_list
+{
+public:
+ loops_list (function *fn, unsigned flags, class loop *root = nullptr);
+
+ template <typename T> class Iter
+ {
+ public:
+ Iter (const loops_list &l, unsigned idx) : list (l), curr_idx (idx)
+ {
+ fill_curr_loop ();
+ }
+
+ T operator* () const { return curr_loop; }
+
+ Iter &
+ operator++ ()
+ {
+ if (curr_idx < list.to_visit.length ())
+ {
+ /* Bump the index and fill a new one. */
+ curr_idx++;
+ fill_curr_loop ();
+ }
+ else
+ gcc_assert (!curr_loop);
+
+ return *this;
+ }
+
+ bool
+ operator!= (const Iter &rhs) const
+ {
+ return this->curr_idx != rhs.curr_idx;
+ }
+
+ private:
+ /* Fill the current loop starting from the current index. */
+ void fill_curr_loop ();
+
+ /* Reference to the loop list to visit. */
+ const loops_list &list;
+
+ /* The current index in the list to visit. */
+ unsigned curr_idx;
+
+ /* The loop implied by the current index. */
+ class loop *curr_loop;
+ };
+
+ using iterator = Iter<class loop *>;
+ using const_iterator = Iter<const class loop *>;
+
+ iterator
+ begin ()
+ {
+ return iterator (*this, 0);
+ }
+
+ iterator
+ end ()
+ {
+ return iterator (*this, to_visit.length ());
+ }
+
+ const_iterator
+ begin () const
+ {
+ return const_iterator (*this, 0);
+ }
+
+ const_iterator
+ end () const
+ {
+ return const_iterator (*this, to_visit.length ());
+ }
+
+private:
+ /* Walk loop tree starting from ROOT as the visiting order specified
+ by FLAGS. */
+ void walk_loop_tree (class loop *root, unsigned flags);
+
+ /* The function we are visiting. */
+ function *fn;
+
+ /* The list of loops to visit. */
+ auto_vec<int, 16> to_visit;
+};
+
+/* Starting from current index CURR_IDX (inclusive), find one index
+ which stands for one valid loop and fill the found loop as CURR_LOOP,
+ if we can't find one, set CURR_LOOP as null. */
+
+template <typename T>
+inline void
+loops_list::Iter<T>::fill_curr_loop ()
+{
+ int anum;
+
+ while (this->list.to_visit.iterate (this->curr_idx, &anum))
+ {
+ class loop *loop = get_loop (this->list.fn, anum);
+ if (loop)
+ {
+ curr_loop = loop;
+ return;
+ }
+ this->curr_idx++;
+ }
+
+ curr_loop = nullptr;
+}
+
+/* Set up the loops list to visit according to the specified
+ function scope FN and iterating order FLAGS. If ROOT is
+ not null, the visiting would start from it, otherwise it
+ will start from tree_root of loops_for_fn (FN). */
+
+inline loops_list::loops_list (function *fn, unsigned flags, class loop *root)
+{
+ struct loops *loops = loops_for_fn (fn);
+ gcc_assert (!root || loops);
+
+ /* Check mutually exclusive flags should not co-exist. */
+ unsigned checked_flags = LI_ONLY_INNERMOST | LI_FROM_INNERMOST;
+ gcc_assert ((flags & checked_flags) != checked_flags);
+
+ this->fn = fn;
+ if (!loops)
+ return;
+
+ class loop *tree_root = root ? root : loops->tree_root;
+
+ this->to_visit.reserve_exact (number_of_loops (fn));
+
+ /* When root is tree_root of loops_for_fn (fn) and the visiting
+ order is LI_ONLY_INNERMOST, we would like to use linear
+ search here since it has a more stable bound than the
+ walk_loop_tree. */
+ if (flags & LI_ONLY_INNERMOST && tree_root == loops->tree_root)
+ {
+ gcc_assert (tree_root->num == 0);
+ if (tree_root->inner == NULL)
+ {
+ if (flags & LI_INCLUDE_ROOT)
+ this->to_visit.quick_push (0);
+
+ return;
+ }
+
+ class loop *aloop;
+ unsigned int i;
+ for (i = 1; vec_safe_iterate (loops->larray, i, &aloop); i++)
+ if (aloop != NULL && aloop->inner == NULL)
+ this->to_visit.quick_push (aloop->num);
+ }
+ else
+ walk_loop_tree (tree_root, flags);
+}
+
+/* The properties of the target. */
+struct target_cfgloop {
+ /* Number of available registers. */
+ unsigned x_target_avail_regs;
+
+ /* Number of available registers that are call-clobbered. */
+ unsigned x_target_clobbered_regs;
+
+ /* Number of registers reserved for temporary expressions. */
+ unsigned x_target_res_regs;
+
+ /* The cost for register when there still is some reserve, but we are
+ approaching the number of available registers. */
+ unsigned x_target_reg_cost[2];
+
+ /* The cost for register when we need to spill. */
+ unsigned x_target_spill_cost[2];
+};
+
+extern struct target_cfgloop default_target_cfgloop;
+#if SWITCHABLE_TARGET
+extern struct target_cfgloop *this_target_cfgloop;
+#else
+#define this_target_cfgloop (&default_target_cfgloop)
+#endif
+
+#define target_avail_regs \
+ (this_target_cfgloop->x_target_avail_regs)
+#define target_clobbered_regs \
+ (this_target_cfgloop->x_target_clobbered_regs)
+#define target_res_regs \
+ (this_target_cfgloop->x_target_res_regs)
+#define target_reg_cost \
+ (this_target_cfgloop->x_target_reg_cost)
+#define target_spill_cost \
+ (this_target_cfgloop->x_target_spill_cost)
+
+/* Register pressure estimation for induction variable optimizations & loop
+ invariant motion. */
+extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool);
+extern void init_set_costs (void);
+
+/* Loop optimizer initialization. */
+extern void loop_optimizer_init (unsigned);
+extern void loop_optimizer_finalize (function *, bool = false);
+inline void
+loop_optimizer_finalize ()
+{
+ loop_optimizer_finalize (cfun);
+}
+
+/* Optimization passes. */
+enum
+{
+ UAP_UNROLL = 1, /* Enables unrolling of loops if it seems profitable. */
+ UAP_UNROLL_ALL = 2 /* Enables unrolling of all loops. */
+};
+
+extern void doloop_optimize_loops (void);
+extern void move_loop_invariants (void);
+extern auto_vec<basic_block> get_loop_hot_path (const class loop *loop);
+
+/* Returns the outermost loop of the loop nest that contains LOOP.*/
+inline class loop *
+loop_outermost (class loop *loop)
+{
+ unsigned n = vec_safe_length (loop->superloops);
+
+ if (n <= 1)
+ return loop;
+
+ return (*loop->superloops)[1];
+}
+
+extern void record_niter_bound (class loop *, const widest_int &, bool, bool);
+extern HOST_WIDE_INT get_estimated_loop_iterations_int (class loop *);
+extern HOST_WIDE_INT get_max_loop_iterations_int (const class loop *);
+extern HOST_WIDE_INT get_likely_max_loop_iterations_int (class loop *);
+extern bool get_estimated_loop_iterations (class loop *loop, widest_int *nit);
+extern bool get_max_loop_iterations (const class loop *loop, widest_int *nit);
+extern bool get_likely_max_loop_iterations (class loop *loop, widest_int *nit);
+extern int bb_loop_depth (const_basic_block);
+
+/* Converts VAL to widest_int. */
+
+inline widest_int
+gcov_type_to_wide_int (gcov_type val)
+{
+ HOST_WIDE_INT a[2];
+
+ a[0] = (unsigned HOST_WIDE_INT) val;
+ /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
+ the size of type. */
+ val >>= HOST_BITS_PER_WIDE_INT - 1;
+ val >>= 1;
+ a[1] = (unsigned HOST_WIDE_INT) val;
+
+ return widest_int::from_array (a, 2);
+}
+#endif /* GCC_CFGLOOP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloopmanip.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloopmanip.h
new file mode 100644
index 0000000..93e417f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgloopmanip.h
@@ -0,0 +1,63 @@
+/* Loop manipulation header.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFGLOOPMANIP_H
+#define GCC_CFGLOOPMANIP_H
+
+enum
+{
+ CP_SIMPLE_PREHEADERS = 1,
+ CP_FALLTHRU_PREHEADERS = 2
+};
+
+#define DLTHE_FLAG_UPDATE_FREQ 1 /* Update frequencies in
+ duplicate_loop_to_header_edge. */
+#define DLTHE_RECORD_COPY_NUMBER 2 /* Record copy number in the aux
+ field of newly create BB. */
+#define DLTHE_FLAG_COMPLETTE_PEEL 4 /* Update frequencies expecting
+ a complete peeling. */
+extern edge mfb_kj_edge;
+
+extern bool remove_path (edge, bool * = NULL, bitmap = NULL);
+extern void place_new_loop (struct function *, class loop *);
+extern void add_loop (class loop *, class loop *);
+extern void scale_loop_frequencies (class loop *, profile_probability);
+extern void scale_loop_profile (class loop *, profile_probability, gcov_type);
+extern edge create_empty_if_region_on_edge (edge, tree);
+extern class loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
+ tree *, tree *, class loop *);
+extern void unloop (class loop *, bool *, bitmap);
+extern void copy_loop_info (class loop *loop, class loop *target);
+extern class loop * duplicate_loop (class loop *, class loop *,
+ class loop * = NULL);
+extern void duplicate_subloops (class loop *, class loop *);
+extern bool can_duplicate_loop_p (const class loop *loop);
+extern bool
+duplicate_loop_body_to_header_edge (class loop *, edge, unsigned, sbitmap, edge,
+ vec<edge> *, int);
+extern bool mfb_keep_just (edge);
+basic_block create_preheader (class loop *, int);
+extern void create_preheaders (int);
+extern void force_single_succ_latches (void);
+class loop * loop_version (class loop *, void *,
+ basic_block *,
+ profile_probability, profile_probability,
+ profile_probability, profile_probability, bool);
+
+#endif /* GCC_CFGLOOPMANIP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgrtl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgrtl.h
new file mode 100644
index 0000000..e295134
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cfgrtl.h
@@ -0,0 +1,61 @@
+/* Define control flow data structures for the CFG.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CFGRTL_H
+#define GCC_CFGRTL_H
+
+extern void delete_insn (rtx_insn *);
+extern bool delete_insn_and_edges (rtx_insn *);
+extern void delete_insn_chain (rtx, rtx_insn *, bool);
+extern basic_block create_basic_block_structure (rtx_insn *, rtx_insn *,
+ rtx_note *, basic_block);
+extern void compute_bb_for_insn (void);
+extern unsigned int free_bb_for_insn (void);
+extern rtx_insn *entry_of_function (void);
+extern void update_bb_for_insn (basic_block);
+extern bool contains_no_active_insn_p (const_basic_block);
+extern bool forwarder_block_p (const_basic_block);
+extern bool can_fallthru (basic_block, basic_block);
+extern rtx_note *bb_note (basic_block);
+extern rtx_code_label *block_label (basic_block);
+extern edge try_redirect_by_replacing_jump (edge, basic_block, bool);
+extern void emit_barrier_after_bb (basic_block bb);
+extern basic_block force_nonfallthru_and_redirect (edge, basic_block, rtx);
+extern void insert_insn_on_edge (rtx, edge);
+extern void commit_one_edge_insertion (edge e);
+extern void commit_edge_insertions (void);
+extern void print_rtl_with_bb (FILE *, const rtx_insn *, dump_flags_t);
+extern void update_br_prob_note (basic_block);
+extern rtx_insn *get_last_bb_insn (basic_block);
+extern void fixup_partitions (void);
+extern bool purge_dead_edges (basic_block);
+extern bool purge_all_dead_edges (void);
+extern bool fixup_abnormal_edges (void);
+extern void update_cfg_for_uncondjump (rtx_insn *);
+extern rtx_insn *unlink_insn_chain (rtx_insn *, rtx_insn *);
+extern void relink_block_chain (bool);
+extern rtx_insn *duplicate_insn_chain (rtx_insn *, rtx_insn *,
+ class loop *, class copy_bb_data *);
+extern void cfg_layout_initialize (int);
+extern void cfg_layout_finalize (void);
+extern void break_superblocks (void);
+extern void init_rtl_bb_info (basic_block);
+extern void find_bbs_reachable_by_hot_paths (hash_set <basic_block> *);
+
+#endif /* GCC_CFGRTL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cgraph.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cgraph.h
new file mode 100644
index 0000000..f5f5476
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cgraph.h
@@ -0,0 +1,3576 @@
+/* Callgraph handling code.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CGRAPH_H
+#define GCC_CGRAPH_H
+
+#include "profile-count.h"
+#include "ipa-ref.h"
+#include "plugin-api.h"
+#include "ipa-param-manipulation.h"
+
+extern void debuginfo_early_init (void);
+extern void debuginfo_init (void);
+extern void debuginfo_fini (void);
+extern void debuginfo_start (void);
+extern void debuginfo_stop (void);
+extern void debuginfo_early_start (void);
+extern void debuginfo_early_stop (void);
+
+class ipa_opt_pass_d;
+typedef ipa_opt_pass_d *ipa_opt_pass;
+
+/* Symbol table consists of functions and variables.
+ TODO: add labels and CONST_DECLs. */
+enum symtab_type
+{
+ SYMTAB_SYMBOL,
+ SYMTAB_FUNCTION,
+ SYMTAB_VARIABLE
+};
+
+/* Section names are stored as reference counted strings in GGC safe hashtable
+ (to make them survive through PCH). */
+
+struct GTY((for_user)) section_hash_entry
+{
+ int ref_count;
+ char *name; /* As long as this datastructure stays in GGC, we cannot put
+ string at the tail of structure of GGC dies in horrible
+ way */
+};
+
+struct section_name_hasher : ggc_ptr_hash<section_hash_entry>
+{
+ typedef const char *compare_type;
+
+ static hashval_t hash (section_hash_entry *);
+ static bool equal (section_hash_entry *, const char *);
+};
+
+enum availability
+{
+ /* Not yet set by cgraph_function_body_availability. */
+ AVAIL_UNSET,
+ /* Function body/variable initializer is unknown. */
+ AVAIL_NOT_AVAILABLE,
+ /* Function body/variable initializer is known but might be replaced
+ by a different one from other compilation unit and thus needs to
+ be dealt with a care. Like AVAIL_NOT_AVAILABLE it can have
+ arbitrary side effects on escaping variables and functions, while
+ like AVAILABLE it might access static variables. */
+ AVAIL_INTERPOSABLE,
+ /* Function body/variable initializer is known and will be used in final
+ program. */
+ AVAIL_AVAILABLE,
+ /* Function body/variable initializer is known and all it's uses are
+ explicitly visible within current unit (i.e. it's address is never taken
+ and it is not exported to other units). Currently used only for
+ functions. */
+ AVAIL_LOCAL
+};
+
+/* Classification of symbols WRT partitioning. */
+enum symbol_partitioning_class
+{
+ /* External declarations are ignored by partitioning algorithms and they are
+ added into the boundary later via compute_ltrans_boundary. */
+ SYMBOL_EXTERNAL,
+ /* Partitioned symbols are put into one of partitions. */
+ SYMBOL_PARTITION,
+ /* Duplicated symbols (such as comdat or constant pool references) are
+ copied into every node needing them via add_symbol_to_partition. */
+ SYMBOL_DUPLICATE
+};
+
+/* Base of all entries in the symbol table.
+ The symtab_node is inherited by cgraph and varpol nodes. */
+struct GTY((desc ("%h.type"), tag ("SYMTAB_SYMBOL"),
+ chain_next ("%h.next"), chain_prev ("%h.previous")))
+ symtab_node
+{
+public:
+ friend class symbol_table;
+
+ /* Constructor. */
+ explicit symtab_node (symtab_type t)
+ : type (t), resolution (LDPR_UNKNOWN), definition (false), alias (false),
+ transparent_alias (false), weakref (false), cpp_implicit_alias (false),
+ symver (false), analyzed (false), writeonly (false),
+ refuse_visibility_changes (false), externally_visible (false),
+ no_reorder (false), force_output (false), forced_by_abi (false),
+ unique_name (false), implicit_section (false), body_removed (false),
+ semantic_interposition (flag_semantic_interposition),
+ used_from_other_partition (false), in_other_partition (false),
+ address_taken (false), in_init_priority_hash (false),
+ need_lto_streaming (false), offloadable (false), ifunc_resolver (false),
+ order (false), next_sharing_asm_name (NULL),
+ previous_sharing_asm_name (NULL), same_comdat_group (NULL), ref_list (),
+ alias_target (NULL), lto_file_data (NULL), aux (NULL),
+ x_comdat_group (NULL_TREE), x_section (NULL)
+ {}
+
+ /* Return name. */
+ const char *name () const;
+
+ /* Return dump name. */
+ const char *dump_name () const;
+
+ /* Return asm name. */
+ const char *asm_name () const;
+
+ /* Return dump name with assembler name. */
+ const char *dump_asm_name () const;
+
+ /* Return visibility name. */
+ const char *get_visibility_string () const;
+
+ /* Return type_name name. */
+ const char *get_symtab_type_string () const;
+
+ /* Add node into symbol table. This function is not used directly, but via
+ cgraph/varpool node creation routines. */
+ void register_symbol (void);
+
+ /* Remove symbol from symbol table. */
+ void remove (void);
+
+ /* Undo any definition or use of the symbol. */
+ void reset (void);
+
+ /* Dump symtab node to F. */
+ void dump (FILE *f);
+
+ /* Dump symtab callgraph in graphviz format. */
+ void dump_graphviz (FILE *f);
+
+ /* Dump symtab node to stderr. */
+ void DEBUG_FUNCTION debug (void);
+
+ /* Verify consistency of node. */
+ void DEBUG_FUNCTION verify (void);
+
+ /* Return ipa reference from this symtab_node to
+ REFERRED_NODE or REFERRED_VARPOOL_NODE. USE_TYPE specify type
+ of the use and STMT the statement (if it exists). */
+ ipa_ref *create_reference (symtab_node *referred_node,
+ enum ipa_ref_use use_type);
+
+ /* Return ipa reference from this symtab_node to
+ REFERRED_NODE or REFERRED_VARPOOL_NODE. USE_TYPE specify type
+ of the use and STMT the statement (if it exists). */
+ ipa_ref *create_reference (symtab_node *referred_node,
+ enum ipa_ref_use use_type, gimple *stmt);
+
+ /* If VAL is a reference to a function or a variable, add a reference from
+ this symtab_node to the corresponding symbol table node. Return the new
+ reference or NULL if none was created. */
+ ipa_ref *maybe_create_reference (tree val, gimple *stmt);
+
+ /* Clone all references from symtab NODE to this symtab_node. */
+ void clone_references (symtab_node *node);
+
+ /* Remove all stmt references in non-speculative references.
+ Those are not maintained during inlining & clonning.
+ The exception are speculative references that are updated along
+ with callgraph edges associated with them. */
+ void clone_referring (symtab_node *node);
+
+ /* Clone reference REF to this symtab_node and set its stmt to STMT. */
+ ipa_ref *clone_reference (ipa_ref *ref, gimple *stmt);
+
+ /* Find the structure describing a reference to REFERRED_NODE of USE_TYPE and
+ associated with statement STMT or LTO_STMT_UID. */
+ ipa_ref *find_reference (symtab_node *referred_node, gimple *stmt,
+ unsigned int lto_stmt_uid,
+ enum ipa_ref_use use_type);
+
+ /* Remove all references that are associated with statement STMT. */
+ void remove_stmt_references (gimple *stmt);
+
+ /* Remove all stmt references in non-speculative references.
+ Those are not maintained during inlining & clonning.
+ The exception are speculative references that are updated along
+ with callgraph edges associated with them. */
+ void clear_stmts_in_references (void);
+
+ /* Remove all references in ref list. */
+ void remove_all_references (void);
+
+ /* Remove all referring items in ref list. */
+ void remove_all_referring (void);
+
+ /* Dump references in ref list to FILE. */
+ void dump_references (FILE *file);
+
+ /* Dump referring in list to FILE. */
+ void dump_referring (FILE *);
+
+ /* Get number of references for this node. */
+ inline unsigned num_references (void)
+ {
+ return ref_list.references.length ();
+ }
+
+ /* Iterates I-th reference in the list, REF is also set. */
+ ipa_ref *iterate_reference (unsigned i, ipa_ref *&ref);
+
+ /* Iterates I-th referring item in the list, REF is also set. */
+ ipa_ref *iterate_referring (unsigned i, ipa_ref *&ref);
+
+ /* Iterates I-th referring alias item in the list, REF is also set. */
+ ipa_ref *iterate_direct_aliases (unsigned i, ipa_ref *&ref);
+
+ /* Return true if symtab node and TARGET represents
+ semantically equivalent symbols. */
+ bool semantically_equivalent_p (symtab_node *target);
+
+ /* Classify symbol symtab node for partitioning. */
+ enum symbol_partitioning_class get_partitioning_class (void);
+
+ /* Return comdat group. */
+ tree get_comdat_group ()
+ {
+ return x_comdat_group;
+ }
+
+ /* Return comdat group as identifier_node. */
+ tree get_comdat_group_id ()
+ {
+ if (x_comdat_group && TREE_CODE (x_comdat_group) != IDENTIFIER_NODE)
+ x_comdat_group = DECL_ASSEMBLER_NAME (x_comdat_group);
+ return x_comdat_group;
+ }
+
+ /* Set comdat group. */
+ void set_comdat_group (tree group)
+ {
+ gcc_checking_assert (!group || TREE_CODE (group) == IDENTIFIER_NODE
+ || DECL_P (group));
+ x_comdat_group = group;
+ }
+
+ /* Return section as string. */
+ const char * get_section () const
+ {
+ if (!x_section)
+ return NULL;
+ return x_section->name;
+ }
+
+ /* Remove node from same comdat group. */
+ void remove_from_same_comdat_group (void);
+
+ /* Add this symtab_node to the same comdat group that OLD is in. */
+ void add_to_same_comdat_group (symtab_node *old_node);
+
+ /* Dissolve the same_comdat_group list in which NODE resides. */
+ void dissolve_same_comdat_group_list (void);
+
+ /* Return true when symtab_node is known to be used from other (non-LTO)
+ object file. Known only when doing LTO via linker plugin. */
+ bool used_from_object_file_p (void);
+
+ /* Walk the alias chain to return the symbol NODE is alias of.
+ If NODE is not an alias, return NODE.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+ symtab_node *ultimate_alias_target (enum availability *avail = NULL,
+ struct symtab_node *ref = NULL);
+
+ /* Return next reachable static symbol with initializer after NODE. */
+ inline symtab_node *next_defined_symbol (void);
+
+ /* Add reference recording that symtab node is alias of TARGET.
+ If TRANSPARENT is true make the alias to be transparent alias.
+ The function can fail in the case of aliasing cycles; in this case
+ it returns false. */
+ bool resolve_alias (symtab_node *target, bool transparent = false);
+
+ /* C++ FE sometimes change linkage flags after producing same
+ body aliases. */
+ void fixup_same_cpp_alias_visibility (symtab_node *target);
+
+ /* Call callback on symtab node and aliases associated to this node.
+ When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are
+ skipped. */
+ bool call_for_symbol_and_aliases (bool (*callback) (symtab_node *, void *),
+ void *data,
+ bool include_overwrite);
+
+ /* If node cannot be interposable by static or dynamic linker to point to
+ different definition, return this symbol. Otherwise look for alias with
+ such property and if none exists, introduce new one. */
+ symtab_node *noninterposable_alias (void);
+
+ /* Return node that alias is aliasing. */
+ inline symtab_node *get_alias_target (void);
+
+ /* Return DECL that alias is aliasing. */
+ inline tree get_alias_target_tree ();
+
+ /* Set section for symbol and its aliases. */
+ void set_section (const char *section);
+
+ /* Like set_section, but copying the section name from another node. */
+ void set_section (const symtab_node &other);
+
+ /* Set section, do not recurse into aliases.
+ When one wants to change section of symbol and its aliases,
+ use set_section. */
+ void set_section_for_node (const char *section);
+
+ /* Like set_section_for_node, but copying the section name from another
+ node. */
+ void set_section_for_node (const symtab_node &other);
+
+ /* Set initialization priority to PRIORITY. */
+ void set_init_priority (priority_type priority);
+
+ /* Return the initialization priority. */
+ priority_type get_init_priority ();
+
+ /* Return availability of NODE when referenced from REF. */
+ enum availability get_availability (symtab_node *ref = NULL);
+
+ /* During LTO stream-in this predicate can be used to check whether node
+ in question prevails in the linking to save some memory usage. */
+ bool prevailing_p (void);
+
+ /* Return true if NODE binds to current definition in final executable
+ when referenced from REF. If REF is NULL return conservative value
+ for any reference. */
+ bool binds_to_current_def_p (symtab_node *ref = NULL);
+
+ /* Make DECL local. */
+ void make_decl_local (void);
+
+ /* Copy visibility from N. */
+ void copy_visibility_from (symtab_node *n);
+
+ /* Return desired alignment of the definition. This is NOT alignment useful
+ to access THIS, because THIS may be interposable and DECL_ALIGN should
+ be used instead. It however must be guaranteed when output definition
+ of THIS. */
+ unsigned int definition_alignment ();
+
+ /* Return true if alignment can be increased. */
+ bool can_increase_alignment_p ();
+
+ /* Increase alignment of symbol to ALIGN. */
+ void increase_alignment (unsigned int align);
+
+ /* Return true if list contains an alias. */
+ bool has_aliases_p (void);
+
+ /* Return true when the symbol is real symbol, i.e. it is not inline clone
+ or abstract function kept for debug info purposes only. */
+ bool real_symbol_p (void);
+
+ /* Return true when the symbol needs to be output to the LTO symbol table. */
+ bool output_to_lto_symbol_table_p (void);
+
+ /* Determine if symbol declaration is needed. That is, visible to something
+ either outside this translation unit, something magic in the system
+ configury. This function is used just during symbol creation. */
+ bool needed_p (void);
+
+ /* Return true if this symbol is a function from the C frontend specified
+ directly in RTL form (with "__RTL"). */
+ bool native_rtl_p () const;
+
+ /* Return true when there are references to the node. */
+ bool referred_to_p (bool include_self = true);
+
+ /* Return true if symbol can be discarded by linker from the binary.
+ Assume that symbol is used (so there is no need to take into account
+ garbage collecting linkers)
+
+ This can happen for comdats, commons and weaks when they are prevailed
+ by other definition at static linking time. */
+ inline bool
+ can_be_discarded_p (void)
+ {
+ return ((DECL_EXTERNAL (decl)
+ && !in_other_partition)
+ || ((get_comdat_group ()
+ || DECL_COMMON (decl)
+ || (DECL_SECTION_NAME (decl) && DECL_WEAK (decl)))
+ && ((resolution != LDPR_PREVAILING_DEF
+ && resolution != LDPR_PREVAILING_DEF_IRONLY_EXP)
+ || flag_incremental_link)
+ && resolution != LDPR_PREVAILING_DEF_IRONLY));
+ }
+
+ /* Return true if NODE is local to a particular COMDAT group, and must not
+ be named from outside the COMDAT. This is used for C++ decloned
+ constructors. */
+ inline bool comdat_local_p (void)
+ {
+ return (same_comdat_group && !TREE_PUBLIC (decl));
+ }
+
+ /* Return true if ONE and TWO are part of the same COMDAT group. */
+ inline bool in_same_comdat_group_p (symtab_node *target);
+
+ /* Return true if symbol is known to be nonzero. */
+ bool nonzero_address ();
+
+ /* Return 0 if symbol is known to have different address than S2,
+ Return 1 if symbol is known to have same address as S2,
+ return 2 otherwise.
+
+ If MEMORY_ACCESSED is true, assume that both memory pointer to THIS
+ and S2 is going to be accessed. This eliminates the situations when
+ either THIS or S2 is NULL and is useful for comparing bases when deciding
+ about memory aliasing. */
+ int equal_address_to (symtab_node *s2, bool memory_accessed = false);
+
+ /* Return true if symbol's address may possibly be compared to other
+ symbol's address. */
+ bool address_matters_p ();
+
+ /* Return true if NODE's address can be compared. This use properties
+ of NODE only and does not look if the address is actually taken in
+ interesting way. For that use ADDRESS_MATTERS_P instead. */
+ bool address_can_be_compared_p (void);
+
+ /* Return symbol table node associated with DECL, if any,
+ and NULL otherwise. */
+ static inline symtab_node *get (const_tree decl)
+ {
+ /* Check that we are called for sane type of object - functions
+ and static or external variables. */
+ gcc_checking_assert (TREE_CODE (decl) == FUNCTION_DECL
+ || (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)
+ || in_lto_p)));
+ /* Check that the mapping is sane - perhaps this check can go away,
+ but at the moment frontends tends to corrupt the mapping by calling
+ memcpy/memset on the tree nodes. */
+ gcc_checking_assert (!decl->decl_with_vis.symtab_node
+ || decl->decl_with_vis.symtab_node->decl == decl);
+ return decl->decl_with_vis.symtab_node;
+ }
+
+ /* Try to find a symtab node for declaration DECL and if it does not
+ exist or if it corresponds to an inline clone, create a new one. */
+ static inline symtab_node * get_create (tree node);
+
+ /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME.
+ Return NULL if there's no such node. */
+ static symtab_node *get_for_asmname (const_tree asmname);
+
+ /* Verify symbol table for internal consistency. */
+ static DEBUG_FUNCTION void verify_symtab_nodes (void);
+
+ /* Perform internal consistency checks, if they are enabled. */
+ static inline void checking_verify_symtab_nodes (void);
+
+ /* Type of the symbol. */
+ ENUM_BITFIELD (symtab_type) type : 8;
+
+ /* The symbols resolution. */
+ ENUM_BITFIELD (ld_plugin_symbol_resolution) resolution : 8;
+
+ /*** Flags representing the symbol type. ***/
+
+ /* True when symbol corresponds to a definition in current unit.
+ set via finalize_function or finalize_decl */
+ unsigned definition : 1;
+ /* True when symbol is an alias.
+ Set by assemble_alias. */
+ unsigned alias : 1;
+ /* When true the alias is translated into its target symbol either by GCC
+ or assembler (it also may just be a duplicate declaration of the same
+ linker name).
+
+ Currently transparent aliases come in three different flavors
+ - aliases having the same assembler name as their target (aka duplicated
+ declarations). In this case the assembler names compare via
+ assembler_names_equal_p and weakref is false
+ - aliases that are renamed at a time being output to final file
+ by varasm.cc. For those DECL_ASSEMBLER_NAME have
+ IDENTIFIER_TRANSPARENT_ALIAS set and thus also their assembler
+ name must be unique.
+ Weakrefs belong to this category when we target assembler without
+ .weakref directive.
+ - weakrefs that are renamed by assembler via .weakref directive.
+ In this case the alias may or may not be definition (depending if
+ target declaration was seen by the compiler), weakref is set.
+ Unless we are before renaming statics, assembler names are different.
+
+ Given that we now support duplicate declarations, the second option is
+ redundant and will be removed. */
+ unsigned transparent_alias : 1;
+ /* True when alias is a weakref. */
+ unsigned weakref : 1;
+ /* C++ frontend produce same body aliases and extra name aliases for
+ virtual functions and vtables that are obviously equivalent.
+ Those aliases are bit special, especially because C++ frontend
+ visibility code is so ugly it cannot get them right at first time
+ and their visibility needs to be copied from their "masters" at
+ the end of parsing. */
+ unsigned cpp_implicit_alias : 1;
+ /* The alias is a symbol version. */
+ unsigned symver : 1;
+ /* Set once the definition was analyzed. The list of references and
+ other properties are built during analysis. */
+ unsigned analyzed : 1;
+ /* Set for write-only variables. */
+ unsigned writeonly : 1;
+ /* Visibility of symbol was used for further optimization; do not
+ permit further changes. */
+ unsigned refuse_visibility_changes : 1;
+
+ /*** Visibility and linkage flags. ***/
+
+ /* Set when function is visible by other units. */
+ unsigned externally_visible : 1;
+ /* Don't reorder to other symbols having this set. */
+ unsigned no_reorder : 1;
+ /* The symbol will be assumed to be used in an invisible way (like
+ by an toplevel asm statement). */
+ unsigned force_output : 1;
+ /* Like FORCE_OUTPUT, but in the case it is ABI requiring the symbol to be
+ exported. Unlike FORCE_OUTPUT this flag gets cleared to symbols promoted
+ to static and it does not inhibit optimization. */
+ unsigned forced_by_abi : 1;
+ /* True when the name is known to be unique and thus it does not need mangling. */
+ unsigned unique_name : 1;
+ /* Specify whether the section was set by user or by
+ compiler via -ffunction-sections. */
+ unsigned implicit_section : 1;
+ /* True when body and other characteristics have been removed by
+ symtab_remove_unreachable_nodes. */
+ unsigned body_removed : 1;
+ /* True when symbol should comply to -fsemantic-interposition flag. */
+ unsigned semantic_interposition : 1;
+
+ /*** WHOPR Partitioning flags.
+ These flags are used at ltrans stage when only part of the callgraph is
+ available. ***/
+
+ /* Set when variable is used from other LTRANS partition. */
+ unsigned used_from_other_partition : 1;
+ /* Set when function is available in the other LTRANS partition.
+ During WPA output it is used to mark nodes that are present in
+ multiple partitions. */
+ unsigned in_other_partition : 1;
+
+
+
+ /*** other flags. ***/
+
+ /* Set when symbol has address taken. */
+ unsigned address_taken : 1;
+ /* Set when init priority is set. */
+ unsigned in_init_priority_hash : 1;
+
+ /* Set when symbol needs to be streamed into LTO bytecode for LTO, or in case
+ of offloading, for separate compilation for a different target. */
+ unsigned need_lto_streaming : 1;
+
+ /* Set when symbol can be streamed into bytecode for offloading. */
+ unsigned offloadable : 1;
+
+ /* Set when symbol is an IFUNC resolver. */
+ unsigned ifunc_resolver : 1;
+
+
+ /* Ordering of all symtab entries. */
+ int order;
+
+ /* Declaration representing the symbol. */
+ tree decl;
+
+ /* Linked list of symbol table entries starting with symtab_nodes. */
+ symtab_node *next;
+ symtab_node *previous;
+
+ /* Linked list of symbols with the same asm name. There may be multiple
+ entries for single symbol name during LTO, because symbols are renamed
+ only after partitioning.
+
+ Because inline clones are kept in the assembler name has, they also produce
+ duplicate entries.
+
+ There are also several long standing bugs where frontends and builtin
+ code produce duplicated decls. */
+ symtab_node *next_sharing_asm_name;
+ symtab_node *previous_sharing_asm_name;
+
+ /* Circular list of nodes in the same comdat group if non-NULL. */
+ symtab_node *same_comdat_group;
+
+ /* Vectors of referring and referenced entities. */
+ ipa_ref_list GTY((skip)) ref_list;
+
+ /* Alias target. May be either DECL pointer or ASSEMBLER_NAME pointer
+ depending to what was known to frontend on the creation time.
+ Once alias is resolved, this pointer become NULL. */
+ tree alias_target;
+
+ /* File stream where this node is being written to. */
+ struct lto_file_decl_data * lto_file_data;
+
+ void *GTY ((skip)) aux;
+
+ /* Comdat group the symbol is in. Can be private if GGC allowed that. */
+ tree x_comdat_group;
+
+ /* Section name. Again can be private, if allowed. */
+ section_hash_entry *x_section;
+
+protected:
+ /* Dump base fields of symtab nodes to F. Not to be used directly. */
+ void dump_base (FILE *);
+
+ /* Verify common part of symtab node. */
+ bool DEBUG_FUNCTION verify_base (void);
+
+ /* Remove node from symbol table. This function is not used directly, but via
+ cgraph/varpool node removal routines. */
+ void unregister (struct clone_info *);
+
+ /* Return the initialization and finalization priority information for
+ DECL. If there is no previous priority information, a freshly
+ allocated structure is returned. */
+ struct symbol_priority_map *priority_info (void);
+
+ /* Worker for call_for_symbol_and_aliases_1. */
+ bool call_for_symbol_and_aliases_1 (bool (*callback) (symtab_node *, void *),
+ void *data,
+ bool include_overwrite);
+private:
+ /* Workers for set_section. */
+ static bool set_section_from_string (symtab_node *n, void *s);
+ static bool set_section_from_node (symtab_node *n, void *o);
+
+ /* Worker for symtab_resolve_alias. */
+ static bool set_implicit_section (symtab_node *n, void *);
+
+ /* Worker searching noninterposable alias. */
+ static bool noninterposable_alias (symtab_node *node, void *data);
+
+ /* Worker for ultimate_alias_target. */
+ symtab_node *ultimate_alias_target_1 (enum availability *avail = NULL,
+ symtab_node *ref = NULL);
+
+ /* Get dump name with normal or assembly name. */
+ const char *get_dump_name (bool asm_name_p) const;
+};
+
+inline void
+symtab_node::checking_verify_symtab_nodes (void)
+{
+ if (flag_checking)
+ symtab_node::verify_symtab_nodes ();
+}
+
+/* Walk all aliases for NODE. */
+#define FOR_EACH_ALIAS(NODE, ALIAS) \
+ for (unsigned ALIAS##_iter_ = 0; \
+ (NODE)->iterate_direct_aliases (ALIAS##_iter_, ALIAS); \
+ ALIAS##_iter_++)
+
+/* This is the information that is put into the cgraph local structure
+ to recover a function. */
+struct lto_file_decl_data;
+
+extern const char * const cgraph_availability_names[];
+extern const char * const ld_plugin_symbol_resolution_names[];
+extern const char * const tls_model_names[];
+
+/* Represent which DECL tree (or reference to such tree)
+ will be replaced by another tree while versioning. */
+struct GTY(()) ipa_replace_map
+{
+ /* The new (replacing) tree. */
+ tree new_tree;
+ /* Parameter number to replace, when old_tree is NULL. */
+ int parm_num;
+ /* Set if the newly added reference should not be an address one, but a load
+ one from the operand of the ADDR_EXPR in NEW_TREE. This is for cases when
+ the corresponding parameter p is used only as *p. */
+ unsigned force_load_ref : 1;
+};
+
+enum cgraph_simd_clone_arg_type
+{
+ SIMD_CLONE_ARG_TYPE_VECTOR,
+ SIMD_CLONE_ARG_TYPE_UNIFORM,
+ /* These are only for integer/pointer arguments passed by value. */
+ SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP,
+ SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP,
+ /* These 6 are only for reference type arguments or arguments passed
+ by reference. */
+ SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP,
+ SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP,
+ SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP,
+ SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP,
+ SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP,
+ SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP,
+ SIMD_CLONE_ARG_TYPE_MASK
+};
+
+/* Function arguments in the original function of a SIMD clone.
+ Supplementary data for `struct simd_clone'. */
+
+struct GTY(()) cgraph_simd_clone_arg {
+ /* Original function argument as it originally existed in
+ DECL_ARGUMENTS. */
+ tree orig_arg;
+
+ /* orig_arg's function (or for extern functions type from
+ TYPE_ARG_TYPES). */
+ tree orig_type;
+
+ /* If argument is a vector, this holds the vector version of
+ orig_arg that after adjusting the argument types will live in
+ DECL_ARGUMENTS. Otherwise, this is NULL.
+
+ This basically holds:
+ vector(simdlen) __typeof__(orig_arg) new_arg. */
+ tree vector_arg;
+
+ /* vector_arg's type (or for extern functions new vector type. */
+ tree vector_type;
+
+ /* If argument is a vector, this holds the array where the simd
+ argument is held while executing the simd clone function. This
+ is a local variable in the cloned function. Its content is
+ copied from vector_arg upon entry to the clone.
+
+ This basically holds:
+ __typeof__(orig_arg) simd_array[simdlen]. */
+ tree simd_array;
+
+ /* A SIMD clone's argument can be either linear (constant or
+ variable), uniform, or vector. */
+ enum cgraph_simd_clone_arg_type arg_type;
+
+ /* Variable alignment if available, otherwise 0. */
+ unsigned int alignment;
+
+ /* For arg_type SIMD_CLONE_ARG_TYPE_LINEAR_*CONSTANT_STEP this is
+ the constant linear step, if arg_type is
+ SIMD_CLONE_ARG_TYPE_LINEAR_*VARIABLE_STEP, this is index of
+ the uniform argument holding the step, otherwise 0. */
+ HOST_WIDE_INT linear_step;
+};
+
+/* Specific data for a SIMD function clone. */
+
+struct GTY(()) cgraph_simd_clone {
+ /* Number of words in the SIMD lane associated with this clone. */
+ poly_uint64 simdlen;
+
+ /* Number of annotated function arguments in `args'. This is
+ usually the number of named arguments in FNDECL. */
+ unsigned int nargs;
+
+ /* Max hardware vector size in bits for integral vectors. */
+ poly_uint64 vecsize_int;
+
+ /* Max hardware vector size in bits for floating point vectors. */
+ poly_uint64 vecsize_float;
+
+ /* Machine mode of the mask argument(s), if they are to be passed
+ as bitmasks in integer argument(s). VOIDmode if masks are passed
+ as vectors of characteristic type. */
+ machine_mode mask_mode;
+
+ /* The mangling character for a given vector size. This is used
+ to determine the ISA mangling bit as specified in the Intel
+ Vector ABI. */
+ unsigned char vecsize_mangle;
+
+ /* True if this is the masked, in-branch version of the clone,
+ otherwise false. */
+ unsigned int inbranch : 1;
+
+ /* Doubly linked list of SIMD clones. */
+ cgraph_node *prev_clone, *next_clone;
+
+ /* Original cgraph node the SIMD clones were created for. */
+ cgraph_node *origin;
+
+ /* Annotated function arguments for the original function. */
+ cgraph_simd_clone_arg GTY((length ("%h.nargs"))) args[1];
+};
+
+/* Function Multiversioning info. */
+struct GTY((for_user)) cgraph_function_version_info {
+ /* The cgraph_node for which the function version info is stored. */
+ cgraph_node *this_node;
+ /* Chains all the semantically identical function versions. The
+ first function in this chain is the version_info node of the
+ default function. */
+ cgraph_function_version_info *prev;
+ /* If this version node corresponds to a dispatcher for function
+ versions, this points to the version info node of the default
+ function, the first node in the chain. */
+ cgraph_function_version_info *next;
+ /* If this node corresponds to a function version, this points
+ to the dispatcher function decl, which is the function that must
+ be called to execute the right function version at run-time.
+
+ If this cgraph node is a dispatcher (if dispatcher_function is
+ true, in the cgraph_node struct) for function versions, this
+ points to resolver function, which holds the function body of the
+ dispatcher. The dispatcher decl is an alias to the resolver
+ function decl. */
+ tree dispatcher_resolver;
+};
+
+#define DEFCIFCODE(code, type, string) CIF_ ## code,
+/* Reasons for inlining failures. */
+
+enum cgraph_inline_failed_t {
+#include "cif-code.def"
+ CIF_N_REASONS
+};
+
+enum cgraph_inline_failed_type_t
+{
+ CIF_FINAL_NORMAL = 0,
+ CIF_FINAL_ERROR
+};
+
+struct cgraph_edge;
+
+struct cgraph_edge_hasher : ggc_ptr_hash<cgraph_edge>
+{
+ typedef gimple *compare_type;
+
+ static hashval_t hash (cgraph_edge *);
+ static hashval_t hash (gimple *);
+ static bool equal (cgraph_edge *, gimple *);
+};
+
+/* The cgraph data structure.
+ Each function decl has assigned cgraph_node listing callees and callers. */
+
+struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
+{
+ friend class symbol_table;
+
+ /* Constructor. */
+ explicit cgraph_node (int uid)
+ : symtab_node (SYMTAB_FUNCTION), callees (NULL), callers (NULL),
+ indirect_calls (NULL),
+ next_sibling_clone (NULL), prev_sibling_clone (NULL), clones (NULL),
+ clone_of (NULL), call_site_hash (NULL), former_clone_of (NULL),
+ simdclone (NULL), simd_clones (NULL), ipa_transforms_to_apply (vNULL),
+ inlined_to (NULL), rtl (NULL),
+ count (profile_count::uninitialized ()),
+ count_materialization_scale (REG_BR_PROB_BASE), profile_id (0),
+ unit_id (0), tp_first_run (0), thunk (false),
+ used_as_abstract_origin (false),
+ lowered (false), process (false), frequency (NODE_FREQUENCY_NORMAL),
+ only_called_at_startup (false), only_called_at_exit (false),
+ tm_clone (false), dispatcher_function (false), calls_comdat_local (false),
+ icf_merged (false), nonfreeing_fn (false), merged_comdat (false),
+ merged_extern_inline (false), parallelized_function (false),
+ split_part (false), indirect_call_target (false), local (false),
+ versionable (false), can_change_signature (false),
+ redefined_extern_inline (false), tm_may_enter_irr (false),
+ ipcp_clone (false), declare_variant_alt (false),
+ calls_declare_variant_alt (false), gc_candidate (false),
+ m_uid (uid), m_summary_id (-1)
+ {}
+
+ /* Remove the node from cgraph and all inline clones inlined into it.
+ Skip however removal of FORBIDDEN_NODE and return true if it needs to be
+ removed. This allows to call the function from outer loop walking clone
+ tree. */
+ bool remove_symbol_and_inline_clones (cgraph_node *forbidden_node = NULL);
+
+ /* Record all references from cgraph_node that are taken
+ in statement STMT. */
+ void record_stmt_references (gimple *stmt);
+
+ /* Like cgraph_set_call_stmt but walk the clone tree and update all
+ clones sharing the same function body.
+ When WHOLE_SPECULATIVE_EDGES is true, all three components of
+ speculative edge gets updated. Otherwise we update only direct
+ call. */
+ void set_call_stmt_including_clones (gimple *old_stmt, gcall *new_stmt,
+ bool update_speculative = true);
+
+ /* Walk the alias chain to return the function cgraph_node is alias of.
+ Walk through thunk, too.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+ cgraph_node *function_symbol (enum availability *avail = NULL,
+ struct symtab_node *ref = NULL);
+
+ /* Walk the alias chain to return the function cgraph_node is alias of.
+ Walk through non virtual thunks, too. Thus we return either a function
+ or a virtual thunk node.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+ cgraph_node *function_or_virtual_thunk_symbol
+ (enum availability *avail = NULL,
+ struct symtab_node *ref = NULL);
+
+ /* Create node representing clone of N executed COUNT times. Decrease
+ the execution counts from original node too.
+ The new clone will have decl set to DECL that may or may not be the same
+ as decl of N.
+
+ When UPDATE_ORIGINAL is true, the counts are subtracted from the original
+ function's profile to reflect the fact that part of execution is handled
+ by node.
+ When CALL_DUPLICATION_HOOK is true, the ipa passes are acknowledged about
+ the new clone. Otherwise the caller is responsible for doing so later.
+
+ If the new node is being inlined into another one, NEW_INLINED_TO should be
+ the outline function the new one is (even indirectly) inlined to.
+ All hooks will see this in node's inlined_to, when invoked.
+ Can be NULL if the node is not inlined. SUFFIX is string that is appended
+ to the original name. */
+ cgraph_node *create_clone (tree decl, profile_count count,
+ bool update_original,
+ vec<cgraph_edge *> redirect_callers,
+ bool call_duplication_hook,
+ cgraph_node *new_inlined_to,
+ ipa_param_adjustments *param_adjustments,
+ const char *suffix = NULL);
+
+ /* Create callgraph node clone with new declaration. The actual body will be
+ copied later at compilation stage. The name of the new clone will be
+ constructed from the name of the original node, SUFFIX and NUM_SUFFIX. */
+ cgraph_node *create_virtual_clone (const vec<cgraph_edge *> &redirect_callers,
+ vec<ipa_replace_map *, va_gc> *tree_map,
+ ipa_param_adjustments *param_adjustments,
+ const char * suffix, unsigned num_suffix);
+
+ /* Remove the node from the tree of virtual and inline clones and make it a
+ standalone node - not a clone any more. */
+ void remove_from_clone_tree ();
+
+ /* cgraph node being removed from symbol table; see if its entry can be
+ replaced by other inline clone. */
+ cgraph_node *find_replacement (struct clone_info *);
+
+ /* Create a new cgraph node which is the new version of
+ callgraph node. REDIRECT_CALLERS holds the callers
+ edges which should be redirected to point to
+ NEW_VERSION. ALL the callees edges of the node
+ are cloned to the new version node. Return the new
+ version node.
+
+ If non-NULL BLOCK_TO_COPY determine what basic blocks
+ was copied to prevent duplications of calls that are dead
+ in the clone.
+
+ SUFFIX is string that is appended to the original name. */
+
+ cgraph_node *create_version_clone (tree new_decl,
+ vec<cgraph_edge *> redirect_callers,
+ bitmap bbs_to_copy,
+ const char *suffix = NULL);
+
+ /* Perform function versioning.
+ Function versioning includes copying of the tree and
+ a callgraph update (creating a new cgraph node and updating
+ its callees and callers).
+
+ REDIRECT_CALLERS varray includes the edges to be redirected
+ to the new version.
+
+ TREE_MAP is a mapping of tree nodes we want to replace with
+ new ones (according to results of prior analysis).
+
+ If non-NULL ARGS_TO_SKIP determine function parameters to remove
+ from new version.
+ If SKIP_RETURN is true, the new version will return void.
+ If non-NULL BLOCK_TO_COPY determine what basic blocks to copy.
+ If non_NULL NEW_ENTRY determine new entry BB of the clone.
+
+ If TARGET_ATTRIBUTES is non-null, when creating a new declaration,
+ add the attributes to DECL_ATTRIBUTES. And call valid_attribute_p
+ that will promote value of the attribute DECL_FUNCTION_SPECIFIC_TARGET
+ of the declaration.
+
+ If VERSION_DECL is set true, use clone_function_name_numbered for the
+ function clone. Otherwise, use clone_function_name.
+
+ Return the new version's cgraph node. */
+ cgraph_node *create_version_clone_with_body
+ (vec<cgraph_edge *> redirect_callers,
+ vec<ipa_replace_map *, va_gc> *tree_map,
+ ipa_param_adjustments *param_adjustments,
+ bitmap bbs_to_copy, basic_block new_entry_block, const char *clone_name,
+ tree target_attributes = NULL_TREE, bool version_decl = true);
+
+ /* Insert a new cgraph_function_version_info node into cgraph_fnver_htab
+ corresponding to cgraph_node. */
+ cgraph_function_version_info *insert_new_function_version (void);
+
+ /* Get the cgraph_function_version_info node corresponding to node. */
+ cgraph_function_version_info *function_version (void);
+
+ /* Discover all functions and variables that are trivially needed, analyze
+ them as well as all functions and variables referred by them */
+ void analyze (void);
+
+ /* Add thunk alias into callgraph. The alias declaration is ALIAS and it
+ aliases DECL with an adjustments made into the first parameter.
+ See comments in struct symtab-thunks.h for detail on the parameters. */
+ cgraph_node * create_thunk (tree alias, tree, bool this_adjusting,
+ HOST_WIDE_INT fixed_offset,
+ HOST_WIDE_INT virtual_value,
+ HOST_WIDE_INT indirect_offset,
+ tree virtual_offset,
+ tree real_alias);
+
+
+ /* Return node that alias is aliasing. */
+ inline cgraph_node *get_alias_target (void);
+
+ /* Given function symbol, walk the alias chain to return the function node
+ is alias of. Do not walk through thunks.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+
+ cgraph_node *ultimate_alias_target (availability *availability = NULL,
+ symtab_node *ref = NULL);
+
+ /* Call expand_thunk on all callers that are thunks and analyze those
+ nodes that were expanded. */
+ void expand_all_artificial_thunks ();
+
+ /* Assemble thunks and aliases associated to node. */
+ void assemble_thunks_and_aliases (void);
+
+ /* Expand function specified by node. */
+ void expand (void);
+
+ /* Creates a wrapper from cgraph_node to TARGET node. Thunk is used for this
+ kind of wrapper method. */
+ void create_wrapper (cgraph_node *target);
+
+ /* Verify cgraph nodes of the cgraph node. */
+ void DEBUG_FUNCTION verify_node (void);
+
+ /* Remove function from symbol table. */
+ void remove (void);
+
+ /* Dump call graph node to file F. */
+ void dump (FILE *f);
+
+ /* Dump call graph node to file F. */
+ void dump_graphviz (FILE *f);
+
+ /* Dump call graph node to stderr. */
+ void DEBUG_FUNCTION debug (void);
+
+ /* When doing LTO, read cgraph_node's body from disk if it is not already
+ present. */
+ bool get_untransformed_body ();
+
+ /* Prepare function body. When doing LTO, read cgraph_node's body from disk
+ if it is not already present. When some IPA transformations are scheduled,
+ apply them. */
+ bool get_body ();
+
+ void materialize_clone (void);
+
+ /* Release memory used to represent body of function.
+ Use this only for functions that are released before being translated to
+ target code (i.e. RTL). Functions that are compiled to RTL and beyond
+ are free'd in final.cc via free_after_compilation(). */
+ void release_body (bool keep_arguments = false);
+
+ /* Return the DECL_STRUCT_FUNCTION of the function. */
+ struct function *get_fun () const;
+
+ /* Bring cgraph node local. */
+ void make_local (void);
+
+ /* Likewise indicate that a node is having address taken. */
+ void mark_address_taken (void);
+
+ /* Set finalization priority to PRIORITY. */
+ void set_fini_priority (priority_type priority);
+
+ /* Return the finalization priority. */
+ priority_type get_fini_priority (void);
+
+ /* Create edge from a given function to CALLEE in the cgraph. */
+ cgraph_edge *create_edge (cgraph_node *callee,
+ gcall *call_stmt, profile_count count,
+ bool cloning_p = false);
+
+ /* Create an indirect edge with a yet-undetermined callee where the call
+ statement destination is a formal parameter of the caller with index
+ PARAM_INDEX. */
+ cgraph_edge *create_indirect_edge (gcall *call_stmt, int ecf_flags,
+ profile_count count,
+ bool cloning_p = false);
+
+ /* Like cgraph_create_edge walk the clone tree and update all clones sharing
+ same function body. If clones already have edge for OLD_STMT; only
+ update the edge same way as cgraph_set_call_stmt_including_clones does. */
+ void create_edge_including_clones (cgraph_node *callee,
+ gimple *old_stmt, gcall *stmt,
+ profile_count count,
+ cgraph_inline_failed_t reason);
+
+ /* Return the callgraph edge representing the GIMPLE_CALL statement
+ CALL_STMT. */
+ cgraph_edge *get_edge (gimple *call_stmt);
+
+ /* Collect all callers of cgraph_node and its aliases that are known to lead
+ to NODE (i.e. are not overwritable) and that are not thunks. */
+ auto_vec<cgraph_edge *> collect_callers (void);
+
+ /* Remove all callers from the node. */
+ void remove_callers (void);
+
+ /* Remove all callees from the node. */
+ void remove_callees (void);
+
+ /* Return function availability. See cgraph.h for description of individual
+ return values. */
+ enum availability get_availability (symtab_node *ref = NULL);
+
+ /* Set TREE_NOTHROW on cgraph_node's decl and on aliases of the node
+ if any to NOTHROW. */
+ bool set_nothrow_flag (bool nothrow);
+
+ /* SET DECL_IS_MALLOC on cgraph_node's decl and on aliases of the node
+ if any. */
+ bool set_malloc_flag (bool malloc_p);
+
+ /* SET TREE_THIS_VOLATILE on cgraph_node's decl and on aliases of the node
+ if any. */
+ bool set_noreturn_flag (bool noreturn_p);
+
+ /* If SET_CONST is true, mark function, aliases and thunks to be ECF_CONST.
+ If SET_CONST if false, clear the flag.
+
+ When setting the flag be careful about possible interposition and
+ do not set the flag for functions that can be interposed and set pure
+ flag for functions that can bind to other definition.
+
+ Return true if any change was done. */
+
+ bool set_const_flag (bool set_const, bool looping);
+
+ /* Set DECL_PURE_P on cgraph_node's decl and on aliases of the node
+ if any to PURE.
+
+ When setting the flag, be careful about possible interposition.
+ Return true if any change was done. */
+
+ bool set_pure_flag (bool pure, bool looping);
+
+ /* Call callback on function and aliases associated to the function.
+ When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are
+ skipped. */
+
+ bool call_for_symbol_and_aliases (bool (*callback) (cgraph_node *,
+ void *),
+ void *data, bool include_overwritable);
+
+ /* Call callback on cgraph_node, thunks and aliases associated to NODE.
+ When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are
+ skipped. When EXCLUDE_VIRTUAL_THUNKS is true, virtual thunks are
+ skipped. */
+ bool call_for_symbol_thunks_and_aliases (bool (*callback) (cgraph_node *node,
+ void *data),
+ void *data,
+ bool include_overwritable,
+ bool exclude_virtual_thunks = false);
+
+ /* Likewise indicate that a node is needed, i.e. reachable via some
+ external means. */
+ inline void mark_force_output (void);
+
+ /* Return true when function can be marked local. */
+ bool local_p (void);
+
+ /* Return true if cgraph_node can be made local for API change.
+ Extern inline functions and C++ COMDAT functions can be made local
+ at the expense of possible code size growth if function is used in multiple
+ compilation units. */
+ bool can_be_local_p (void);
+
+ /* Return true when cgraph_node cannot return or throw and thus
+ it is safe to ignore its side effects for IPA analysis. */
+ bool cannot_return_p (void);
+
+ /* Return true when function cgraph_node and all its aliases are only called
+ directly.
+ i.e. it is not externally visible, address was not taken and
+ it is not used in any other non-standard way. */
+ bool only_called_directly_p (void);
+
+ /* Return true when function is only called directly or it has alias.
+ i.e. it is not externally visible, address was not taken and
+ it is not used in any other non-standard way. */
+ inline bool only_called_directly_or_aliased_p (void);
+
+ /* Return true when function cgraph_node can be expected to be removed
+ from program when direct calls in this compilation unit are removed.
+
+ As a special case COMDAT functions are
+ cgraph_can_remove_if_no_direct_calls_p while the are not
+ cgraph_only_called_directly_p (it is possible they are called from other
+ unit)
+
+ This function behaves as cgraph_only_called_directly_p because eliminating
+ all uses of COMDAT function does not make it necessarily disappear from
+ the program unless we are compiling whole program or we do LTO. In this
+ case we know we win since dynamic linking will not really discard the
+ linkonce section.
+
+ If WILL_INLINE is true, assume that function will be inlined into all the
+ direct calls. */
+ bool will_be_removed_from_program_if_no_direct_calls_p
+ (bool will_inline = false);
+
+ /* Return true when function can be removed from callgraph
+ if all direct calls and references are eliminated. The function does
+ not take into account comdat groups. */
+ bool can_remove_if_no_direct_calls_and_refs_p (void);
+
+ /* Return true when function cgraph_node and its aliases can be removed from
+ callgraph if all direct calls are eliminated.
+ If WILL_INLINE is true, assume that function will be inlined into all the
+ direct calls. */
+ bool can_remove_if_no_direct_calls_p (bool will_inline = false);
+
+ /* Return true when callgraph node is a function with Gimple body defined
+ in current unit. Functions can also be define externally or they
+ can be thunks with no Gimple representation.
+
+ Note that at WPA stage, the function body may not be present in memory. */
+ inline bool has_gimple_body_p (void);
+
+ /* Return true if this node represents a former, i.e. an expanded, thunk. */
+ bool former_thunk_p (void);
+
+ /* Check if function calls comdat local. This is used to recompute
+ calls_comdat_local flag after function transformations. */
+ bool check_calls_comdat_local_p ();
+
+ /* Return true if function should be optimized for size. */
+ enum optimize_size_level optimize_for_size_p (void);
+
+ /* Dump the callgraph to file F. */
+ static void dump_cgraph (FILE *f);
+
+ /* Dump the call graph to stderr. */
+ static inline
+ void debug_cgraph (void)
+ {
+ dump_cgraph (stderr);
+ }
+
+ /* Get unique identifier of the node. */
+ inline int get_uid ()
+ {
+ return m_uid;
+ }
+
+ /* Get summary id of the node. */
+ inline int get_summary_id ()
+ {
+ return m_summary_id;
+ }
+
+ /* Record that DECL1 and DECL2 are semantically identical function
+ versions. */
+ static void record_function_versions (tree decl1, tree decl2);
+
+ /* Remove the cgraph_function_version_info and cgraph_node for DECL. This
+ DECL is a duplicate declaration. */
+ static void delete_function_version_by_decl (tree decl);
+
+ /* Add the function FNDECL to the call graph.
+ Unlike finalize_function, this function is intended to be used
+ by middle end and allows insertion of new function at arbitrary point
+ of compilation. The function can be either in high, low or SSA form
+ GIMPLE.
+
+ The function is assumed to be reachable and have address taken (so no
+ API breaking optimizations are performed on it).
+
+ Main work done by this function is to enqueue the function for later
+ processing to avoid need the passes to be re-entrant. */
+ static void add_new_function (tree fndecl, bool lowered);
+
+ /* Return callgraph node for given symbol and check it is a function. */
+ static inline cgraph_node *get (const_tree decl)
+ {
+ gcc_checking_assert (TREE_CODE (decl) == FUNCTION_DECL);
+ return dyn_cast <cgraph_node *> (symtab_node::get (decl));
+ }
+
+ /* DECL has been parsed. Take it, queue it, compile it at the whim of the
+ logic in effect. If NO_COLLECT is true, then our caller cannot stand to
+ have the garbage collector run at the moment. We would need to either
+ create a new GC context, or just not compile right now. */
+ static void finalize_function (tree, bool);
+
+ /* Return cgraph node assigned to DECL. Create new one when needed. */
+ static cgraph_node * create (tree decl);
+
+ /* Try to find a call graph node for declaration DECL and if it does not
+ exist or if it corresponds to an inline clone, create a new one. */
+ static cgraph_node * get_create (tree);
+
+ /* Return local info for the compiled function. */
+ static cgraph_node *local_info_node (tree decl);
+
+ /* Return RTL info for the compiled function. */
+ static struct cgraph_rtl_info *rtl_info (const_tree);
+
+ /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME.
+ Return NULL if there's no such node. */
+ static cgraph_node *get_for_asmname (tree asmname);
+
+ /* Attempt to mark ALIAS as an alias to DECL. Return alias node if
+ successful and NULL otherwise.
+ Same body aliases are output whenever the body of DECL is output,
+ and cgraph_node::get (ALIAS) transparently
+ returns cgraph_node::get (DECL). */
+ static cgraph_node * create_same_body_alias (tree alias, tree decl);
+
+ /* Verify whole cgraph structure. */
+ static void DEBUG_FUNCTION verify_cgraph_nodes (void);
+
+ /* Verify cgraph, if consistency checking is enabled. */
+ static inline void checking_verify_cgraph_nodes (void);
+
+ /* Worker to bring NODE local. */
+ static bool make_local (cgraph_node *node, void *);
+
+ /* Mark ALIAS as an alias to DECL. DECL_NODE is cgraph node representing
+ the function body is associated
+ with (not necessarily cgraph_node (DECL). */
+ static cgraph_node *create_alias (tree alias, tree target);
+
+ /* Return true if NODE has thunk. */
+ static bool has_thunk_p (cgraph_node *node, void *);
+
+ cgraph_edge *callees;
+ cgraph_edge *callers;
+ /* List of edges representing indirect calls with a yet undetermined
+ callee. */
+ cgraph_edge *indirect_calls;
+ cgraph_node *next_sibling_clone;
+ cgraph_node *prev_sibling_clone;
+ cgraph_node *clones;
+ cgraph_node *clone_of;
+ /* For functions with many calls sites it holds map from call expression
+ to the edge to speed up cgraph_edge function. */
+ hash_table<cgraph_edge_hasher> *GTY(()) call_site_hash;
+ /* Declaration node used to be clone of. */
+ tree former_clone_of;
+
+ /* If this is a SIMD clone, this points to the SIMD specific
+ information for it. */
+ cgraph_simd_clone *simdclone;
+ /* If this function has SIMD clones, this points to the first clone. */
+ cgraph_node *simd_clones;
+
+ /* Interprocedural passes scheduled to have their transform functions
+ applied next time we execute local pass on them. We maintain it
+ per-function in order to allow IPA passes to introduce new functions. */
+ vec<ipa_opt_pass, va_heap, vl_ptr> GTY((skip)) ipa_transforms_to_apply;
+
+ /* For inline clones this points to the function they will be
+ inlined into. */
+ cgraph_node *inlined_to;
+
+ struct cgraph_rtl_info *rtl;
+
+ /* Expected number of executions: calculated in profile.cc. */
+ profile_count count;
+ /* How to scale counts at materialization time; used to merge
+ LTO units with different number of profile runs. */
+ int count_materialization_scale;
+ /* ID assigned by the profiling. */
+ unsigned int profile_id;
+ /* ID of the translation unit. */
+ int unit_id;
+ /* Time profiler: first run of function. */
+ int tp_first_run;
+
+ /* True when symbol is a thunk. */
+ unsigned thunk : 1;
+ /* Set when decl is an abstract function pointed to by the
+ ABSTRACT_DECL_ORIGIN of a reachable function. */
+ unsigned used_as_abstract_origin : 1;
+ /* Set once the function is lowered (i.e. its CFG is built). */
+ unsigned lowered : 1;
+ /* Set once the function has been instantiated and its callee
+ lists created. */
+ unsigned process : 1;
+ /* How commonly executed the node is. Initialized during branch
+ probabilities pass. */
+ ENUM_BITFIELD (node_frequency) frequency : 2;
+ /* True when function can only be called at startup (from static ctor). */
+ unsigned only_called_at_startup : 1;
+ /* True when function can only be called at startup (from static dtor). */
+ unsigned only_called_at_exit : 1;
+ /* True when function is the transactional clone of a function which
+ is called only from inside transactions. */
+ /* ?? We should be able to remove this. We have enough bits in
+ cgraph to calculate it. */
+ unsigned tm_clone : 1;
+ /* True if this decl is a dispatcher for function versions. */
+ unsigned dispatcher_function : 1;
+ /* True if this decl calls a COMDAT-local function. This is set up in
+ compute_fn_summary and inline_call. */
+ unsigned calls_comdat_local : 1;
+ /* True if node has been created by merge operation in IPA-ICF. */
+ unsigned icf_merged: 1;
+ /* True if call to node can't result in a call to free, munmap or
+ other operation that could make previously non-trapping memory
+ accesses trapping. */
+ unsigned nonfreeing_fn : 1;
+ /* True if there was multiple COMDAT bodies merged by lto-symtab. */
+ unsigned merged_comdat : 1;
+ /* True if this def was merged with extern inlines. */
+ unsigned merged_extern_inline : 1;
+ /* True if function was created to be executed in parallel. */
+ unsigned parallelized_function : 1;
+ /* True if function is part split out by ipa-split. */
+ unsigned split_part : 1;
+ /* True if the function appears as possible target of indirect call. */
+ unsigned indirect_call_target : 1;
+ /* Set when function is visible in current compilation unit only and
+ its address is never taken. */
+ unsigned local : 1;
+ /* False when there is something makes versioning impossible. */
+ unsigned versionable : 1;
+ /* False when function calling convention and signature cannot be changed.
+ This is the case when __builtin_apply_args is used. */
+ unsigned can_change_signature : 1;
+ /* True when the function has been originally extern inline, but it is
+ redefined now. */
+ unsigned redefined_extern_inline : 1;
+ /* True if the function may enter serial irrevocable mode. */
+ unsigned tm_may_enter_irr : 1;
+ /* True if this was a clone created by ipa-cp. */
+ unsigned ipcp_clone : 1;
+ /* True if this is the deferred declare variant resolution artificial
+ function. */
+ unsigned declare_variant_alt : 1;
+ /* True if the function calls declare_variant_alt functions. */
+ unsigned calls_declare_variant_alt : 1;
+ /* True if the function should only be emitted if it is used. This flag
+ is set for local SIMD clones when they are created and cleared if the
+ vectorizer uses them. */
+ unsigned gc_candidate : 1;
+
+private:
+ /* Unique id of the node. */
+ int m_uid;
+
+ /* Summary id that is recycled. */
+ int m_summary_id;
+
+ /* Worker for call_for_symbol_and_aliases. */
+ bool call_for_symbol_and_aliases_1 (bool (*callback) (cgraph_node *,
+ void *),
+ void *data, bool include_overwritable);
+};
+
+/* A cgraph node set is a collection of cgraph nodes. A cgraph node
+ can appear in multiple sets. */
+struct cgraph_node_set_def
+{
+ hash_map<cgraph_node *, size_t> *map;
+ vec<cgraph_node *> nodes;
+};
+
+typedef cgraph_node_set_def *cgraph_node_set;
+typedef struct varpool_node_set_def *varpool_node_set;
+
+struct varpool_node;
+
+/* A varpool node set is a collection of varpool nodes. A varpool node
+ can appear in multiple sets. */
+struct varpool_node_set_def
+{
+ hash_map<varpool_node *, size_t> * map;
+ vec<varpool_node *> nodes;
+};
+
+/* Iterator structure for cgraph node sets. */
+struct cgraph_node_set_iterator
+{
+ cgraph_node_set set;
+ unsigned index;
+};
+
+/* Iterator structure for varpool node sets. */
+struct varpool_node_set_iterator
+{
+ varpool_node_set set;
+ unsigned index;
+};
+
+/* Context of polymorphic call. It represent information about the type of
+ instance that may reach the call. This is used by ipa-devirt walkers of the
+ type inheritance graph. */
+
+class GTY(()) ipa_polymorphic_call_context {
+public:
+ /* The called object appears in an object of type OUTER_TYPE
+ at offset OFFSET. When information is not 100% reliable, we
+ use SPECULATIVE_OUTER_TYPE and SPECULATIVE_OFFSET. */
+ HOST_WIDE_INT offset;
+ HOST_WIDE_INT speculative_offset;
+ tree outer_type;
+ tree speculative_outer_type;
+ /* True if outer object may be in construction or destruction. */
+ unsigned maybe_in_construction : 1;
+ /* True if outer object may be of derived type. */
+ unsigned maybe_derived_type : 1;
+ /* True if speculative outer object may be of derived type. We always
+ speculate that construction does not happen. */
+ unsigned speculative_maybe_derived_type : 1;
+ /* True if the context is invalid and all calls should be redirected
+ to BUILTIN_UNREACHABLE. */
+ unsigned invalid : 1;
+ /* True if the outer type is dynamic. */
+ unsigned dynamic : 1;
+
+ /* Build empty "I know nothing" context. */
+ ipa_polymorphic_call_context ();
+ /* Build polymorphic call context for indirect call E. */
+ ipa_polymorphic_call_context (cgraph_edge *e);
+ /* Build polymorphic call context for IP invariant CST.
+ If specified, OTR_TYPE specify the type of polymorphic call
+ that takes CST+OFFSET as a parameter. */
+ ipa_polymorphic_call_context (tree cst, tree otr_type = NULL,
+ HOST_WIDE_INT offset = 0);
+ /* Build context for pointer REF contained in FNDECL at statement STMT.
+ if INSTANCE is non-NULL, return pointer to the object described by
+ the context. */
+ ipa_polymorphic_call_context (tree fndecl, tree ref, gimple *stmt,
+ tree *instance = NULL);
+
+ /* Look for vtable stores or constructor calls to work out dynamic type
+ of memory location. */
+ bool get_dynamic_type (tree, tree, tree, gimple *, unsigned *);
+
+ /* Make context non-speculative. */
+ void clear_speculation ();
+
+ /* Produce context specifying all derived types of OTR_TYPE. If OTR_TYPE is
+ NULL, the context is set to dummy "I know nothing" setting. */
+ void clear_outer_type (tree otr_type = NULL);
+
+ /* Walk container types and modify context to point to actual class
+ containing OTR_TYPE (if non-NULL) as base class.
+ Return true if resulting context is valid.
+
+ When CONSIDER_PLACEMENT_NEW is false, reject contexts that may be made
+ valid only via allocation of new polymorphic type inside by means
+ of placement new.
+
+ When CONSIDER_BASES is false, only look for actual fields, not base types
+ of TYPE. */
+ bool restrict_to_inner_class (tree otr_type,
+ bool consider_placement_new = true,
+ bool consider_bases = true);
+
+ /* Adjust all offsets in contexts by given number of bits. */
+ void offset_by (HOST_WIDE_INT);
+ /* Use when we cannot track dynamic type change. This speculatively assume
+ type change is not happening. */
+ void possible_dynamic_type_change (bool, tree otr_type = NULL);
+ /* Assume that both THIS and a given context is valid and strengthen THIS
+ if possible. Return true if any strengthening was made.
+ If actual type the context is being used in is known, OTR_TYPE should be
+ set accordingly. This improves quality of combined result. */
+ bool combine_with (ipa_polymorphic_call_context, tree otr_type = NULL);
+ bool meet_with (ipa_polymorphic_call_context, tree otr_type = NULL);
+
+ /* Return TRUE if context is fully useless. */
+ bool useless_p () const;
+ /* Return TRUE if this context conveys the same information as X. */
+ bool equal_to (const ipa_polymorphic_call_context &x) const;
+
+ /* Dump human readable context to F. If NEWLINE is true, it will be
+ terminated by a newline. */
+ void dump (FILE *f, bool newline = true) const;
+ void DEBUG_FUNCTION debug () const;
+
+ /* LTO streaming. */
+ void stream_out (struct output_block *) const;
+ void stream_in (class lto_input_block *, class data_in *data_in);
+
+private:
+ bool combine_speculation_with (tree, HOST_WIDE_INT, bool, tree);
+ bool meet_speculation_with (tree, HOST_WIDE_INT, bool, tree);
+ void set_by_decl (tree, HOST_WIDE_INT);
+ bool set_by_invariant (tree, tree, HOST_WIDE_INT);
+ bool speculation_consistent_p (tree, HOST_WIDE_INT, bool, tree) const;
+ void make_speculative (tree otr_type = NULL);
+};
+
+/* Structure containing additional information about an indirect call. */
+
+class GTY(()) cgraph_indirect_call_info
+{
+public:
+ /* When agg_content is set, an offset where the call pointer is located
+ within the aggregate. */
+ HOST_WIDE_INT offset;
+ /* Context of the polymorphic call; use only when POLYMORPHIC flag is set. */
+ ipa_polymorphic_call_context context;
+ /* OBJ_TYPE_REF_TOKEN of a polymorphic call (if polymorphic is set). */
+ HOST_WIDE_INT otr_token;
+ /* Type of the object from OBJ_TYPE_REF_OBJECT. */
+ tree otr_type;
+ /* Index of the parameter that is called. */
+ int param_index;
+ /* ECF flags determined from the caller. */
+ int ecf_flags;
+
+ /* Number of speculative call targets, it's less than GCOV_TOPN_VALUES. */
+ unsigned num_speculative_call_targets : 16;
+
+ /* Set when the call is a virtual call with the parameter being the
+ associated object pointer rather than a simple direct call. */
+ unsigned polymorphic : 1;
+ /* Set when the call is a call of a pointer loaded from contents of an
+ aggregate at offset. */
+ unsigned agg_contents : 1;
+ /* Set when this is a call through a member pointer. */
+ unsigned member_ptr : 1;
+ /* When the agg_contents bit is set, this one determines whether the
+ destination is loaded from a parameter passed by reference. */
+ unsigned by_ref : 1;
+ /* When the agg_contents bit is set, this one determines whether we can
+ deduce from the function body that the loaded value from the reference is
+ never modified between the invocation of the function and the load
+ point. */
+ unsigned guaranteed_unmodified : 1;
+ /* For polymorphic calls this specify whether the virtual table pointer
+ may have changed in between function entry and the call. */
+ unsigned vptr_changed : 1;
+};
+
+class GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"),
+ for_user)) cgraph_edge
+{
+public:
+ friend struct cgraph_node;
+ friend class symbol_table;
+
+ /* Remove EDGE from the cgraph. */
+ static void remove (cgraph_edge *edge);
+
+ /* Change field call_stmt of edge E to NEW_STMT. If UPDATE_SPECULATIVE and E
+ is any component of speculative edge, then update all components.
+ Speculations can be resolved in the process and EDGE can be removed and
+ deallocated. Return the edge that now represents the call. */
+ static cgraph_edge *set_call_stmt (cgraph_edge *e, gcall *new_stmt,
+ bool update_speculative = true);
+
+ /* Redirect callee of the edge to N. The function does not update underlying
+ call expression. */
+ void redirect_callee (cgraph_node *n);
+
+ /* If the edge does not lead to a thunk, simply redirect it to N. Otherwise
+ create one or more equivalent thunks for N and redirect E to the first in
+ the chain. Note that it is then necessary to call
+ n->expand_all_artificial_thunks once all callers are redirected. */
+ void redirect_callee_duplicating_thunks (cgraph_node *n);
+
+ /* Make an indirect edge with an unknown callee an ordinary edge leading to
+ CALLEE. Speculations can be resolved in the process and EDGE can be
+ removed and deallocated. Return the edge that now represents the
+ call. */
+ static cgraph_edge *make_direct (cgraph_edge *edge, cgraph_node *callee);
+
+ /* Turn edge into speculative call calling N2. Update
+ the profile so the direct call is taken COUNT times
+ with FREQUENCY. speculative_id is used to link direct calls with their
+ corresponding IPA_REF_ADDR references when representing speculative calls.
+ */
+ cgraph_edge *make_speculative (cgraph_node *n2, profile_count direct_count,
+ unsigned int speculative_id = 0);
+
+ /* Speculative call consists of an indirect edge and one or more
+ direct edge+ref pairs. Speculative will expand to the following sequence:
+
+ if (call_dest == target1) // reference to target1
+ target1 (); // direct call to target1
+ else if (call_dest == target2) // reference to targt2
+ target2 (); // direct call to target2
+ else
+ call_dest (); // indirect call
+
+ Before the expansion we will have indirect call and the direct call+ref
+ pairs all linked to single statement.
+
+ Note that ref may point to different symbol than the corresponding call
+ becuase the speculated edge may have been optimized (redirected to
+ a clone) or inlined.
+
+ Given an edge which is part of speculative call, return the first
+ direct call edge in the speculative call sequence.
+
+ In the example above called on any cgraph edge in the sequence it will
+ return direct call to target1. */
+ cgraph_edge *first_speculative_call_target ();
+
+ /* Return next speculative call target or NULL if there is none.
+ All targets are required to form an interval in the callee list.
+
+ In example above, if called on call to target1 it will return call to
+ target2. */
+ cgraph_edge *next_speculative_call_target ()
+ {
+ cgraph_edge *e = this;
+ gcc_checking_assert (speculative && callee);
+
+ if (e->next_callee && e->next_callee->speculative
+ && e->next_callee->call_stmt == e->call_stmt
+ && e->next_callee->lto_stmt_uid == e->lto_stmt_uid)
+ return e->next_callee;
+ return NULL;
+ }
+
+ /* When called on any edge in the speculative call return the (unique)
+ indirect call edge in the speculative call sequence. */
+ cgraph_edge *speculative_call_indirect_edge ()
+ {
+ gcc_checking_assert (speculative);
+ if (!callee)
+ return this;
+ for (cgraph_edge *e2 = caller->indirect_calls;
+ true; e2 = e2->next_callee)
+ if (e2->speculative
+ && call_stmt == e2->call_stmt
+ && lto_stmt_uid == e2->lto_stmt_uid)
+ return e2;
+ }
+
+ /* When called on any edge in speculative call and when given any target
+ of ref which is speculated to it returns the corresponding direct call.
+
+ In example above if called on function target2 it will return call to
+ target2. */
+ cgraph_edge *speculative_call_for_target (cgraph_node *);
+
+ /* Return REF corresponding to direct call in the specualtive call
+ sequence. */
+ ipa_ref *speculative_call_target_ref ()
+ {
+ ipa_ref *ref;
+
+ gcc_checking_assert (speculative);
+ for (unsigned int i = 0; caller->iterate_reference (i, ref); i++)
+ if (ref->speculative && ref->speculative_id == speculative_id
+ && ref->stmt == (gimple *)call_stmt
+ && ref->lto_stmt_uid == lto_stmt_uid)
+ return ref;
+ gcc_unreachable ();
+ }
+
+ /* Speculative call edge turned out to be direct call to CALLEE_DECL. Remove
+ the speculative call sequence and return edge representing the call, the
+ original EDGE can be removed and deallocated. It is up to caller to
+ redirect the call as appropriate. Return the edge that now represents the
+ call.
+
+ For "speculative" indirect call that contains multiple "speculative"
+ targets (i.e. edge->indirect_info->num_speculative_call_targets > 1),
+ decrease the count and only remove current direct edge.
+
+ If no speculative direct call left to the speculative indirect call, remove
+ the speculative of both the indirect call and corresponding direct edge.
+
+ It is up to caller to iteratively resolve each "speculative" direct call
+ and redirect the call as appropriate. */
+ static cgraph_edge *resolve_speculation (cgraph_edge *edge,
+ tree callee_decl = NULL);
+
+ /* If necessary, change the function declaration in the call statement
+ associated with edge E so that it corresponds to the edge callee.
+ Speculations can be resolved in the process and EDGE can be removed and
+ deallocated.
+
+ The edge could be one of speculative direct call generated from speculative
+ indirect call. In this circumstance, decrease the speculative targets
+ count (i.e. num_speculative_call_targets) and redirect call stmt to the
+ corresponding i-th target. If no speculative direct call left to the
+ speculative indirect call, remove "speculative" of the indirect call and
+ also redirect stmt to it's final direct target.
+
+ It is up to caller to iteratively transform each "speculative"
+ direct call as appropriate. */
+ static gimple *redirect_call_stmt_to_callee (cgraph_edge *e);
+
+ /* Create clone of edge in the node N represented
+ by CALL_EXPR the callgraph. */
+ cgraph_edge * clone (cgraph_node *n, gcall *call_stmt, unsigned stmt_uid,
+ profile_count num, profile_count den,
+ bool update_original);
+
+ /* Verify edge count and frequency. */
+ bool verify_count ();
+
+ /* Return true when call of edge cannot lead to return from caller
+ and thus it is safe to ignore its side effects for IPA analysis
+ when computing side effects of the caller. */
+ bool cannot_lead_to_return_p (void);
+
+ /* Return true when the edge represents a direct recursion. */
+ bool recursive_p (void);
+
+ /* Return true if the edge may be considered hot. */
+ bool maybe_hot_p (void);
+
+ /* Get unique identifier of the edge. */
+ inline int get_uid ()
+ {
+ return m_uid;
+ }
+
+ /* Get summary id of the edge. */
+ inline int get_summary_id ()
+ {
+ return m_summary_id;
+ }
+
+ /* Rebuild cgraph edges for current function node. This needs to be run after
+ passes that don't update the cgraph. */
+ static unsigned int rebuild_edges (void);
+
+ /* Rebuild cgraph references for current function node. This needs to be run
+ after passes that don't update the cgraph. */
+ static void rebuild_references (void);
+
+ /* During LTO stream in this can be used to check whether call can possibly
+ be internal to the current translation unit. */
+ bool possibly_call_in_translation_unit_p (void);
+
+ /* Return num_speculative_targets of this edge. */
+ int num_speculative_call_targets_p (void);
+
+ /* Expected number of executions: calculated in profile.cc. */
+ profile_count count;
+ cgraph_node *caller;
+ cgraph_node *callee;
+ cgraph_edge *prev_caller;
+ cgraph_edge *next_caller;
+ cgraph_edge *prev_callee;
+ cgraph_edge *next_callee;
+ gcall *call_stmt;
+ /* Additional information about an indirect call. Not cleared when an edge
+ becomes direct. */
+ cgraph_indirect_call_info *indirect_info;
+ void *GTY ((skip (""))) aux;
+ /* When equal to CIF_OK, inline this call. Otherwise, points to the
+ explanation why function was not inlined. */
+ enum cgraph_inline_failed_t inline_failed;
+ /* The stmt_uid of call_stmt. This is used by LTO to recover the call_stmt
+ when the function is serialized in. */
+ unsigned int lto_stmt_uid;
+ /* speculative id is used to link direct calls with their corresponding
+ IPA_REF_ADDR references when representing speculative calls. */
+ unsigned int speculative_id : 16;
+ /* Whether this edge was made direct by indirect inlining. */
+ unsigned int indirect_inlining_edge : 1;
+ /* Whether this edge describes an indirect call with an undetermined
+ callee. */
+ unsigned int indirect_unknown_callee : 1;
+ /* Whether this edge is still a dangling */
+ /* True if the corresponding CALL stmt cannot be inlined. */
+ unsigned int call_stmt_cannot_inline_p : 1;
+ /* Can this call throw externally? */
+ unsigned int can_throw_external : 1;
+ /* Edges with SPECULATIVE flag represents indirect calls that was
+ speculatively turned into direct (i.e. by profile feedback).
+ The final code sequence will have form:
+
+ if (call_target == expected_fn)
+ expected_fn ();
+ else
+ call_target ();
+
+ Every speculative call is represented by three components attached
+ to a same call statement:
+ 1) a direct call (to expected_fn)
+ 2) an indirect call (to call_target)
+ 3) a IPA_REF_ADDR reference to expected_fn.
+
+ Optimizers may later redirect direct call to clone, so 1) and 3)
+ do not need to necessarily agree with destination. */
+ unsigned int speculative : 1;
+ /* Set to true when caller is a constructor or destructor of polymorphic
+ type. */
+ unsigned in_polymorphic_cdtor : 1;
+
+ /* Return true if call must bind to current definition. */
+ bool binds_to_current_def_p ();
+
+ /* Expected frequency of executions within the function.
+ When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
+ per function call. The range is 0 to CGRAPH_FREQ_MAX. */
+ int frequency ();
+
+ /* Expected frequency of executions within the function. */
+ sreal sreal_frequency ();
+private:
+ /* Unique id of the edge. */
+ int m_uid;
+
+ /* Summary id that is recycled. */
+ int m_summary_id;
+
+ /* Remove the edge from the list of the callers of the callee. */
+ void remove_caller (void);
+
+ /* Remove the edge from the list of the callees of the caller. */
+ void remove_callee (void);
+
+ /* Set callee N of call graph edge and add it to the corresponding set of
+ callers. */
+ void set_callee (cgraph_node *n);
+
+ /* Output flags of edge to a file F. */
+ void dump_edge_flags (FILE *f);
+
+ /* Dump edge to stderr. */
+ void DEBUG_FUNCTION debug (void);
+
+ /* Verify that call graph edge corresponds to DECL from the associated
+ statement. Return true if the verification should fail. */
+ bool verify_corresponds_to_fndecl (tree decl);
+};
+
+#define CGRAPH_FREQ_BASE 1000
+#define CGRAPH_FREQ_MAX 100000
+
+/* The varpool data structure.
+ Each static variable decl has assigned varpool_node. */
+
+struct GTY((tag ("SYMTAB_VARIABLE"))) varpool_node : public symtab_node
+{
+ /* Constructor. */
+ explicit varpool_node ()
+ : symtab_node (SYMTAB_VARIABLE), output (0), dynamically_initialized (0),
+ tls_model (TLS_MODEL_NONE), used_by_single_function (0)
+ {}
+
+ /* Dump given varpool node to F. */
+ void dump (FILE *f);
+
+ /* Dump given varpool node to stderr. */
+ void DEBUG_FUNCTION debug (void);
+
+ /* Remove variable from symbol table. */
+ void remove (void);
+
+ /* Remove node initializer when it is no longer needed. */
+ void remove_initializer (void);
+
+ void analyze (void);
+
+ /* Return variable availability. */
+ availability get_availability (symtab_node *ref = NULL);
+
+ /* When doing LTO, read variable's constructor from disk if
+ it is not already present. */
+ tree get_constructor (void);
+
+ /* Return true if variable has constructor that can be used for folding. */
+ bool ctor_useable_for_folding_p (void);
+
+ /* For given variable pool node, walk the alias chain to return the function
+ the variable is alias of. Do not walk through thunks.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+ inline varpool_node *ultimate_alias_target
+ (availability *availability = NULL, symtab_node *ref = NULL);
+
+ /* Return node that alias is aliasing. */
+ inline varpool_node *get_alias_target (void);
+
+ /* Output one variable, if necessary. Return whether we output it. */
+ bool assemble_decl (void);
+
+ /* For variables in named sections make sure get_variable_section
+ is called before we switch to those sections. Then section
+ conflicts between read-only and read-only requiring relocations
+ sections can be resolved. */
+ void finalize_named_section_flags (void);
+
+ /* Call callback on varpool symbol and aliases associated to varpool symbol.
+ When INCLUDE_OVERWRITABLE is false, overwritable aliases and thunks are
+ skipped. */
+ bool call_for_symbol_and_aliases (bool (*callback) (varpool_node *, void *),
+ void *data,
+ bool include_overwritable);
+
+ /* Return true when variable should be considered externally visible. */
+ bool externally_visible_p (void);
+
+ /* Return true when all references to variable must be visible
+ in ipa_ref_list.
+ i.e. if the variable is not externally visible or not used in some magic
+ way (asm statement or such).
+ The magic uses are all summarized in force_output flag. */
+ inline bool all_refs_explicit_p ();
+
+ /* Return true when variable can be removed from variable pool
+ if all direct calls are eliminated. */
+ inline bool can_remove_if_no_refs_p (void);
+
+ /* Add the variable DECL to the varpool.
+ Unlike finalize_decl function is intended to be used
+ by middle end and allows insertion of new variable at arbitrary point
+ of compilation. */
+ static void add (tree decl);
+
+ /* Return varpool node for given symbol and check it is a function. */
+ static inline varpool_node *get (const_tree decl);
+
+ /* Mark DECL as finalized. By finalizing the declaration, frontend instruct
+ the middle end to output the variable to asm file, if needed or externally
+ visible. */
+ static void finalize_decl (tree decl);
+
+ /* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful.
+ Extra name aliases are output whenever DECL is output. */
+ static varpool_node * create_extra_name_alias (tree alias, tree decl);
+
+ /* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful.
+ Extra name aliases are output whenever DECL is output. */
+ static varpool_node * create_alias (tree, tree);
+
+ /* Dump the variable pool to F. */
+ static void dump_varpool (FILE *f);
+
+ /* Dump the variable pool to stderr. */
+ static void DEBUG_FUNCTION debug_varpool (void);
+
+ /* Allocate new callgraph node and insert it into basic data structures. */
+ static varpool_node *create_empty (void);
+
+ /* Return varpool node assigned to DECL. Create new one when needed. */
+ static varpool_node *get_create (tree decl);
+
+ /* Given an assembler name, lookup node. */
+ static varpool_node *get_for_asmname (tree asmname);
+
+ /* Set when variable is scheduled to be assembled. */
+ unsigned output : 1;
+
+ /* Set if the variable is dynamically initialized, except for
+ function local statics. */
+ unsigned dynamically_initialized : 1;
+
+ ENUM_BITFIELD(tls_model) tls_model : 3;
+
+ /* Set if the variable is known to be used by single function only.
+ This is computed by ipa_single_use pass and used by late optimizations
+ in places where optimization would be valid for local static variable
+ if we did not do any inter-procedural code movement. */
+ unsigned used_by_single_function : 1;
+
+private:
+ /* Assemble thunks and aliases associated to varpool node. */
+ void assemble_aliases (void);
+
+ /* Worker for call_for_node_and_aliases. */
+ bool call_for_symbol_and_aliases_1 (bool (*callback) (varpool_node *, void *),
+ void *data,
+ bool include_overwritable);
+};
+
+/* Every top level asm statement is put into a asm_node. */
+
+struct GTY(()) asm_node {
+ /* Next asm node. */
+ asm_node *next;
+ /* String for this asm node. */
+ tree asm_str;
+ /* Ordering of all cgraph nodes. */
+ int order;
+};
+
+/* Report whether or not THIS symtab node is a function, aka cgraph_node. */
+
+template <>
+template <>
+inline bool
+is_a_helper <cgraph_node *>::test (symtab_node *p)
+{
+ return p && p->type == SYMTAB_FUNCTION;
+}
+
+/* Report whether or not THIS symtab node is a variable, aka varpool_node. */
+
+template <>
+template <>
+inline bool
+is_a_helper <varpool_node *>::test (symtab_node *p)
+{
+ return p && p->type == SYMTAB_VARIABLE;
+}
+
+typedef void (*cgraph_edge_hook)(cgraph_edge *, void *);
+typedef void (*cgraph_node_hook)(cgraph_node *, void *);
+typedef void (*varpool_node_hook)(varpool_node *, void *);
+typedef void (*cgraph_2edge_hook)(cgraph_edge *, cgraph_edge *, void *);
+typedef void (*cgraph_2node_hook)(cgraph_node *, cgraph_node *, void *);
+
+struct cgraph_edge_hook_list;
+struct cgraph_node_hook_list;
+struct varpool_node_hook_list;
+struct cgraph_2edge_hook_list;
+struct cgraph_2node_hook_list;
+
+/* Map from a symbol to initialization/finalization priorities. */
+struct GTY(()) symbol_priority_map {
+ priority_type init;
+ priority_type fini;
+};
+
+enum symtab_state
+{
+ /* Frontend is parsing and finalizing functions. */
+ PARSING,
+ /* Callgraph is being constructed. It is safe to add new functions. */
+ CONSTRUCTION,
+ /* Callgraph is being streamed-in at LTO time. */
+ LTO_STREAMING,
+ /* Callgraph is built and early IPA passes are being run. */
+ IPA,
+ /* Callgraph is built and all functions are transformed to SSA form. */
+ IPA_SSA,
+ /* All inline decisions are done; it is now possible to remove extern inline
+ functions and virtual call targets. */
+ IPA_SSA_AFTER_INLINING,
+ /* Functions are now ordered and being passed to RTL expanders. */
+ EXPANSION,
+ /* All cgraph expansion is done. */
+ FINISHED
+};
+
+struct asmname_hasher : ggc_ptr_hash <symtab_node>
+{
+ typedef const_tree compare_type;
+
+ static hashval_t hash (symtab_node *n);
+ static bool equal (symtab_node *n, const_tree t);
+};
+
+/* Core summaries maintained about symbols. */
+
+struct thunk_info;
+template <class T> class function_summary;
+typedef function_summary <thunk_info *> thunk_summary;
+
+struct clone_info;
+template <class T> class function_summary;
+typedef function_summary <clone_info *> clone_summary;
+
+class GTY((tag ("SYMTAB"))) symbol_table
+{
+public:
+ friend struct symtab_node;
+ friend struct cgraph_node;
+ friend struct cgraph_edge;
+
+ symbol_table ():
+ cgraph_count (0), cgraph_max_uid (1), cgraph_max_summary_id (0),
+ edges_count (0), edges_max_uid (1), edges_max_summary_id (0),
+ cgraph_released_summary_ids (), edge_released_summary_ids (),
+ nodes (NULL), asmnodes (NULL), asm_last_node (NULL),
+ order (0), max_unit (0), global_info_ready (false), state (PARSING),
+ function_flags_ready (false), cpp_implicit_aliases_done (false),
+ section_hash (NULL), assembler_name_hash (NULL), init_priority_hash (NULL),
+ dump_file (NULL), ipa_clones_dump_file (NULL), cloned_nodes (),
+ m_thunks (NULL), m_clones (NULL),
+ m_first_edge_removal_hook (NULL), m_first_cgraph_removal_hook (NULL),
+ m_first_edge_duplicated_hook (NULL), m_first_cgraph_duplicated_hook (NULL),
+ m_first_cgraph_insertion_hook (NULL), m_first_varpool_insertion_hook (NULL),
+ m_first_varpool_removal_hook (NULL)
+ {
+ }
+
+ /* Initialize callgraph dump file. */
+ void initialize (void);
+
+ /* Register a top-level asm statement ASM_STR. */
+ inline asm_node *finalize_toplevel_asm (tree asm_str);
+
+ /* Analyze the whole compilation unit once it is parsed completely. */
+ void finalize_compilation_unit (void);
+
+ /* C++ frontend produce same body aliases all over the place, even before PCH
+ gets streamed out. It relies on us linking the aliases with their function
+ in order to do the fixups, but ipa-ref is not PCH safe. Consequently we
+ first produce aliases without links, but once C++ FE is sure it won't
+ stream PCH we build the links via this function. */
+ void process_same_body_aliases (void);
+
+ /* Perform simple optimizations based on callgraph. */
+ void compile (void);
+
+ /* Process CGRAPH_NEW_FUNCTIONS and perform actions necessary to add these
+ functions into callgraph in a way so they look like ordinary reachable
+ functions inserted into callgraph already at construction time. */
+ void process_new_functions (void);
+
+ /* Register a symbol NODE. */
+ inline void register_symbol (symtab_node *node);
+
+ inline void
+ clear_asm_symbols (void)
+ {
+ asmnodes = NULL;
+ asm_last_node = NULL;
+ }
+
+ /* Perform reachability analysis and reclaim all unreachable nodes. */
+ bool remove_unreachable_nodes (FILE *file);
+
+ /* Optimization of function bodies might've rendered some variables as
+ unnecessary so we want to avoid these from being compiled. Re-do
+ reachability starting from variables that are either externally visible
+ or was referred from the asm output routines. */
+ void remove_unreferenced_decls (void);
+
+ /* Unregister a symbol NODE. */
+ inline void unregister (symtab_node *node);
+
+ /* Allocate new callgraph node and insert it into basic data structures. */
+ cgraph_node *create_empty (void);
+
+ /* Release a callgraph NODE. */
+ void release_symbol (cgraph_node *node);
+
+ /* Output all variables enqueued to be assembled. */
+ bool output_variables (void);
+
+ /* Weakrefs may be associated to external decls and thus not output
+ at expansion time. Emit all necessary aliases. */
+ void output_weakrefs (void);
+
+ /* Return first static symbol with definition. */
+ inline symtab_node *first_symbol (void);
+
+ /* Return first assembler symbol. */
+ inline asm_node *
+ first_asm_symbol (void)
+ {
+ return asmnodes;
+ }
+
+ /* Return first static symbol with definition. */
+ inline symtab_node *first_defined_symbol (void);
+
+ /* Return first variable. */
+ inline varpool_node *first_variable (void);
+
+ /* Return next variable after NODE. */
+ inline varpool_node *next_variable (varpool_node *node);
+
+ /* Return first static variable with initializer. */
+ inline varpool_node *first_static_initializer (void);
+
+ /* Return next static variable with initializer after NODE. */
+ inline varpool_node *next_static_initializer (varpool_node *node);
+
+ /* Return first static variable with definition. */
+ inline varpool_node *first_defined_variable (void);
+
+ /* Return next static variable with definition after NODE. */
+ inline varpool_node *next_defined_variable (varpool_node *node);
+
+ /* Return first function with body defined. */
+ inline cgraph_node *first_defined_function (void);
+
+ /* Return next function with body defined after NODE. */
+ inline cgraph_node *next_defined_function (cgraph_node *node);
+
+ /* Return first function. */
+ inline cgraph_node *first_function (void);
+
+ /* Return next function. */
+ inline cgraph_node *next_function (cgraph_node *node);
+
+ /* Return first function with body defined. */
+ cgraph_node *first_function_with_gimple_body (void);
+
+ /* Return next reachable static variable with initializer after NODE. */
+ inline cgraph_node *next_function_with_gimple_body (cgraph_node *node);
+
+ /* Register HOOK to be called with DATA on each removed edge. */
+ cgraph_edge_hook_list *add_edge_removal_hook (cgraph_edge_hook hook,
+ void *data);
+
+ /* Remove ENTRY from the list of hooks called on removing edges. */
+ void remove_edge_removal_hook (cgraph_edge_hook_list *entry);
+
+ /* Register HOOK to be called with DATA on each removed node. */
+ cgraph_node_hook_list *add_cgraph_removal_hook (cgraph_node_hook hook,
+ void *data);
+
+ /* Remove ENTRY from the list of hooks called on removing nodes. */
+ void remove_cgraph_removal_hook (cgraph_node_hook_list *entry);
+
+ /* Register HOOK to be called with DATA on each removed node. */
+ varpool_node_hook_list *add_varpool_removal_hook (varpool_node_hook hook,
+ void *data);
+
+ /* Remove ENTRY from the list of hooks called on removing nodes. */
+ void remove_varpool_removal_hook (varpool_node_hook_list *entry);
+
+ /* Register HOOK to be called with DATA on each inserted node. */
+ cgraph_node_hook_list *add_cgraph_insertion_hook (cgraph_node_hook hook,
+ void *data);
+
+ /* Remove ENTRY from the list of hooks called on inserted nodes. */
+ void remove_cgraph_insertion_hook (cgraph_node_hook_list *entry);
+
+ /* Register HOOK to be called with DATA on each inserted node. */
+ varpool_node_hook_list *add_varpool_insertion_hook (varpool_node_hook hook,
+ void *data);
+
+ /* Remove ENTRY from the list of hooks called on inserted nodes. */
+ void remove_varpool_insertion_hook (varpool_node_hook_list *entry);
+
+ /* Register HOOK to be called with DATA on each duplicated edge. */
+ cgraph_2edge_hook_list *add_edge_duplication_hook (cgraph_2edge_hook hook,
+ void *data);
+ /* Remove ENTRY from the list of hooks called on duplicating edges. */
+ void remove_edge_duplication_hook (cgraph_2edge_hook_list *entry);
+
+ /* Register HOOK to be called with DATA on each duplicated node. */
+ cgraph_2node_hook_list *add_cgraph_duplication_hook (cgraph_2node_hook hook,
+ void *data);
+
+ /* Remove ENTRY from the list of hooks called on duplicating nodes. */
+ void remove_cgraph_duplication_hook (cgraph_2node_hook_list *entry);
+
+ /* Call all edge removal hooks. */
+ void call_edge_removal_hooks (cgraph_edge *e);
+
+ /* Call all node insertion hooks. */
+ void call_cgraph_insertion_hooks (cgraph_node *node);
+
+ /* Call all node removal hooks. */
+ void call_cgraph_removal_hooks (cgraph_node *node);
+
+ /* Call all node duplication hooks. */
+ void call_cgraph_duplication_hooks (cgraph_node *node, cgraph_node *node2);
+
+ /* Call all edge duplication hooks. */
+ void call_edge_duplication_hooks (cgraph_edge *cs1, cgraph_edge *cs2);
+
+ /* Call all node removal hooks. */
+ void call_varpool_removal_hooks (varpool_node *node);
+
+ /* Call all node insertion hooks. */
+ void call_varpool_insertion_hooks (varpool_node *node);
+
+ /* Arrange node to be first in its entry of assembler_name_hash. */
+ void symtab_prevail_in_asm_name_hash (symtab_node *node);
+
+ /* Initialize asm name hash unless. */
+ void symtab_initialize_asm_name_hash (void);
+
+ /* Set the DECL_ASSEMBLER_NAME and update symtab hashtables. */
+ void change_decl_assembler_name (tree decl, tree name);
+
+ /* Dump symbol table to F. */
+ void dump (FILE *f);
+
+ /* Dump symbol table to F in graphviz format. */
+ void dump_graphviz (FILE *f);
+
+ /* Dump symbol table to stderr. */
+ void DEBUG_FUNCTION debug (void);
+
+ /* Assign a new summary ID for the callgraph NODE. */
+ inline int assign_summary_id (cgraph_node *node)
+ {
+ if (!cgraph_released_summary_ids.is_empty ())
+ node->m_summary_id = cgraph_released_summary_ids.pop ();
+ else
+ node->m_summary_id = cgraph_max_summary_id++;
+
+ return node->m_summary_id;
+ }
+
+ /* Assign a new summary ID for the callgraph EDGE. */
+ inline int assign_summary_id (cgraph_edge *edge)
+ {
+ if (!edge_released_summary_ids.is_empty ())
+ edge->m_summary_id = edge_released_summary_ids.pop ();
+ else
+ edge->m_summary_id = edges_max_summary_id++;
+
+ return edge->m_summary_id;
+ }
+
+ /* Return true if assembler names NAME1 and NAME2 leads to the same symbol
+ name. */
+ static bool assembler_names_equal_p (const char *name1, const char *name2);
+
+ int cgraph_count;
+ int cgraph_max_uid;
+ int cgraph_max_summary_id;
+
+ int edges_count;
+ int edges_max_uid;
+ int edges_max_summary_id;
+
+ /* Vector of released summary IDS for cgraph nodes. */
+ vec<int> GTY ((skip)) cgraph_released_summary_ids;
+
+ /* Vector of released summary IDS for cgraph nodes. */
+ vec<int> GTY ((skip)) edge_released_summary_ids;
+
+ /* Return symbol used to separate symbol name from suffix. */
+ static char symbol_suffix_separator ();
+
+ symtab_node* GTY(()) nodes;
+ asm_node* GTY(()) asmnodes;
+ asm_node* GTY(()) asm_last_node;
+
+ /* The order index of the next symtab node to be created. This is
+ used so that we can sort the cgraph nodes in order by when we saw
+ them, to support -fno-toplevel-reorder. */
+ int order;
+
+ /* Maximal unit ID used. */
+ int max_unit;
+
+ /* Set when whole unit has been analyzed so we can access global info. */
+ bool global_info_ready;
+ /* What state callgraph is in right now. */
+ enum symtab_state state;
+ /* Set when the cgraph is fully build and the basic flags are computed. */
+ bool function_flags_ready;
+
+ bool cpp_implicit_aliases_done;
+
+ /* Hash table used to hold sections. */
+ hash_table<section_name_hasher> *GTY(()) section_hash;
+
+ /* Hash table used to convert assembler names into nodes. */
+ hash_table<asmname_hasher> *assembler_name_hash;
+
+ /* Hash table used to hold init priorities. */
+ hash_map<symtab_node *, symbol_priority_map> *init_priority_hash;
+
+ FILE* GTY ((skip)) dump_file;
+
+ FILE* GTY ((skip)) ipa_clones_dump_file;
+
+ hash_set <const cgraph_node *> GTY ((skip)) cloned_nodes;
+
+ /* Thunk annotations. */
+ thunk_summary *m_thunks;
+
+ /* Virtual clone annotations. */
+ clone_summary *m_clones;
+
+private:
+ /* Allocate a cgraph_edge structure and fill it with data according to the
+ parameters of which only CALLEE can be NULL (when creating an indirect
+ call edge). CLONING_P should be set if properties that are copied from an
+ original edge should not be calculated. */
+ cgraph_edge *create_edge (cgraph_node *caller, cgraph_node *callee,
+ gcall *call_stmt, profile_count count,
+ bool indir_unknown_callee, bool cloning_p);
+
+ /* Put the edge onto the free list. */
+ void free_edge (cgraph_edge *e);
+
+ /* Insert NODE to assembler name hash. */
+ void insert_to_assembler_name_hash (symtab_node *node, bool with_clones);
+
+ /* Remove NODE from assembler name hash. */
+ void unlink_from_assembler_name_hash (symtab_node *node, bool with_clones);
+
+ /* Hash asmnames ignoring the user specified marks. */
+ static hashval_t decl_assembler_name_hash (const_tree asmname);
+
+ /* Compare ASMNAME with the DECL_ASSEMBLER_NAME of DECL. */
+ static bool decl_assembler_name_equal (tree decl, const_tree asmname);
+
+ friend struct asmname_hasher;
+
+ /* List of hooks triggered when an edge is removed. */
+ cgraph_edge_hook_list * GTY((skip)) m_first_edge_removal_hook;
+ /* List of hooks trigger_red when a cgraph node is removed. */
+ cgraph_node_hook_list * GTY((skip)) m_first_cgraph_removal_hook;
+ /* List of hooks triggered when an edge is duplicated. */
+ cgraph_2edge_hook_list * GTY((skip)) m_first_edge_duplicated_hook;
+ /* List of hooks triggered when a node is duplicated. */
+ cgraph_2node_hook_list * GTY((skip)) m_first_cgraph_duplicated_hook;
+ /* List of hooks triggered when an function is inserted. */
+ cgraph_node_hook_list * GTY((skip)) m_first_cgraph_insertion_hook;
+ /* List of hooks triggered when an variable is inserted. */
+ varpool_node_hook_list * GTY((skip)) m_first_varpool_insertion_hook;
+ /* List of hooks triggered when a node is removed. */
+ varpool_node_hook_list * GTY((skip)) m_first_varpool_removal_hook;
+};
+
+extern GTY(()) symbol_table *symtab;
+
+extern vec<cgraph_node *> cgraph_new_nodes;
+
+inline hashval_t
+asmname_hasher::hash (symtab_node *n)
+{
+ return symbol_table::decl_assembler_name_hash
+ (DECL_ASSEMBLER_NAME (n->decl));
+}
+
+inline bool
+asmname_hasher::equal (symtab_node *n, const_tree t)
+{
+ return symbol_table::decl_assembler_name_equal (n->decl, t);
+}
+
+/* In cgraph.cc */
+void cgraph_cc_finalize (void);
+void release_function_body (tree);
+cgraph_indirect_call_info *cgraph_allocate_init_indirect_info (void);
+
+void cgraph_update_edges_for_call_stmt (gimple *, tree, gimple *);
+bool cgraph_function_possibly_inlined_p (tree);
+
+const char* cgraph_inline_failed_string (cgraph_inline_failed_t);
+cgraph_inline_failed_type_t cgraph_inline_failed_type (cgraph_inline_failed_t);
+
+/* In cgraphunit.cc */
+void cgraphunit_cc_finalize (void);
+int tp_first_run_node_cmp (const void *pa, const void *pb);
+
+/* In symtab-thunks.cc */
+void symtab_thunks_cc_finalize (void);
+
+/* Initialize datastructures so DECL is a function in lowered gimple form.
+ IN_SSA is true if the gimple is in SSA. */
+basic_block init_lowered_empty_function (tree, bool, profile_count);
+
+tree thunk_adjust (gimple_stmt_iterator *, tree, bool, HOST_WIDE_INT, tree,
+ HOST_WIDE_INT);
+/* In cgraphclones.cc */
+
+tree clone_function_name_numbered (const char *name, const char *suffix);
+tree clone_function_name_numbered (tree decl, const char *suffix);
+tree clone_function_name (const char *name, const char *suffix,
+ unsigned long number);
+tree clone_function_name (tree decl, const char *suffix,
+ unsigned long number);
+tree clone_function_name (tree decl, const char *suffix);
+
+void tree_function_versioning (tree, tree, vec<ipa_replace_map *, va_gc> *,
+ ipa_param_adjustments *,
+ bool, bitmap, basic_block);
+
+void dump_callgraph_transformation (const cgraph_node *original,
+ const cgraph_node *clone,
+ const char *suffix);
+/* In cgraphbuild.cc */
+int compute_call_stmt_bb_frequency (tree, basic_block bb);
+void record_references_in_initializer (tree, bool);
+
+/* In ipa.cc */
+void cgraph_build_static_cdtor (char which, tree body, int priority);
+bool ipa_discover_variable_flags (void);
+
+/* In varpool.cc */
+tree ctor_for_folding (tree);
+
+/* In ipa-inline-analysis.cc */
+void initialize_inline_failed (struct cgraph_edge *);
+bool speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining);
+
+/* Return true when the symbol is real symbol, i.e. it is not inline clone
+ or abstract function kept for debug info purposes only. */
+inline bool
+symtab_node::real_symbol_p (void)
+{
+ cgraph_node *cnode;
+
+ if (DECL_ABSTRACT_P (decl))
+ return false;
+ if (transparent_alias && definition)
+ return false;
+ if (!is_a <cgraph_node *> (this))
+ return true;
+ cnode = dyn_cast <cgraph_node *> (this);
+ if (cnode->inlined_to)
+ return false;
+ return true;
+}
+
+/* Return true if DECL should have entry in symbol table if used.
+ Those are functions and static & external variables. */
+
+inline bool
+decl_in_symtab_p (const_tree decl)
+{
+ return (TREE_CODE (decl) == FUNCTION_DECL
+ || (TREE_CODE (decl) == VAR_DECL
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))));
+}
+
+inline bool
+symtab_node::in_same_comdat_group_p (symtab_node *target)
+{
+ symtab_node *source = this;
+
+ if (cgraph_node *cn = dyn_cast <cgraph_node *> (target))
+ {
+ if (cn->inlined_to)
+ source = cn->inlined_to;
+ }
+ if (cgraph_node *cn = dyn_cast <cgraph_node *> (target))
+ {
+ if (cn->inlined_to)
+ target = cn->inlined_to;
+ }
+
+ return source->get_comdat_group () == target->get_comdat_group ();
+}
+
+/* Return node that alias is aliasing. */
+
+inline symtab_node *
+symtab_node::get_alias_target (void)
+{
+ ipa_ref *ref = NULL;
+ iterate_reference (0, ref);
+ gcc_checking_assert (ref->use == IPA_REF_ALIAS);
+ return ref->referred;
+}
+
+/* Return the DECL (or identifier) that alias is aliasing. Unlike the above,
+ this works whether or not the alias has been analyzed already. */
+
+inline tree
+symtab_node::get_alias_target_tree ()
+{
+ if (alias_target)
+ return alias_target;
+ return get_alias_target ()->decl;
+}
+
+/* Return next reachable static symbol with initializer after the node. */
+
+inline symtab_node *
+symtab_node::next_defined_symbol (void)
+{
+ symtab_node *node1 = next;
+
+ for (; node1; node1 = node1->next)
+ if (node1->definition)
+ return node1;
+
+ return NULL;
+}
+
+/* Iterates I-th reference in the list, REF is also set. */
+
+inline ipa_ref *
+symtab_node::iterate_reference (unsigned i, ipa_ref *&ref)
+{
+ ref_list.references.iterate (i, &ref);
+
+ return ref;
+}
+
+/* Iterates I-th referring item in the list, REF is also set. */
+
+inline ipa_ref *
+symtab_node::iterate_referring (unsigned i, ipa_ref *&ref)
+{
+ ref_list.referring.iterate (i, &ref);
+
+ return ref;
+}
+
+/* Iterates I-th referring alias item in the list, REF is also set. */
+
+inline ipa_ref *
+symtab_node::iterate_direct_aliases (unsigned i, ipa_ref *&ref)
+{
+ ref_list.referring.iterate (i, &ref);
+
+ if (ref && ref->use != IPA_REF_ALIAS)
+ return NULL;
+
+ return ref;
+}
+
+/* Return true if list contains an alias. */
+
+inline bool
+symtab_node::has_aliases_p (void)
+{
+ ipa_ref *ref = NULL;
+
+ return (iterate_direct_aliases (0, ref) != NULL);
+}
+
+/* Return true when RESOLUTION indicate that linker will use
+ the symbol from non-LTO object files. */
+
+inline bool
+resolution_used_from_other_file_p (enum ld_plugin_symbol_resolution resolution)
+{
+ return (resolution == LDPR_PREVAILING_DEF
+ || resolution == LDPR_PREEMPTED_REG
+ || resolution == LDPR_RESOLVED_EXEC
+ || resolution == LDPR_RESOLVED_DYN);
+}
+
+/* Return true when symtab_node is known to be used from other (non-LTO)
+ object file. Known only when doing LTO via linker plugin. */
+
+inline bool
+symtab_node::used_from_object_file_p (void)
+{
+ if (!TREE_PUBLIC (decl) || DECL_EXTERNAL (decl))
+ return false;
+ if (resolution_used_from_other_file_p (resolution))
+ return true;
+ return false;
+}
+
+/* Return varpool node for given symbol and check it is a function. */
+
+inline varpool_node *
+varpool_node::get (const_tree decl)
+{
+ gcc_checking_assert (TREE_CODE (decl) == VAR_DECL);
+ return dyn_cast<varpool_node *> (symtab_node::get (decl));
+}
+
+/* Register a symbol NODE. */
+
+inline void
+symbol_table::register_symbol (symtab_node *node)
+{
+ node->next = nodes;
+ node->previous = NULL;
+
+ if (nodes)
+ nodes->previous = node;
+ nodes = node;
+
+ node->order = order++;
+}
+
+/* Register a top-level asm statement ASM_STR. */
+
+asm_node *
+symbol_table::finalize_toplevel_asm (tree asm_str)
+{
+ asm_node *node;
+
+ node = ggc_cleared_alloc<asm_node> ();
+ node->asm_str = asm_str;
+ node->order = order++;
+ node->next = NULL;
+
+ if (asmnodes == NULL)
+ asmnodes = node;
+ else
+ asm_last_node->next = node;
+
+ asm_last_node = node;
+ return node;
+}
+
+/* Unregister a symbol NODE. */
+inline void
+symbol_table::unregister (symtab_node *node)
+{
+ if (node->previous)
+ node->previous->next = node->next;
+ else
+ nodes = node->next;
+
+ if (node->next)
+ node->next->previous = node->previous;
+
+ node->next = NULL;
+ node->previous = NULL;
+}
+
+/* Release a callgraph NODE with UID and put in to the list of free nodes. */
+
+inline void
+symbol_table::release_symbol (cgraph_node *node)
+{
+ cgraph_count--;
+ if (node->m_summary_id != -1)
+ cgraph_released_summary_ids.safe_push (node->m_summary_id);
+ ggc_free (node);
+}
+
+/* Return first static symbol with definition. */
+inline symtab_node *
+symbol_table::first_symbol (void)
+{
+ return nodes;
+}
+
+/* Walk all symbols. */
+#define FOR_EACH_SYMBOL(node) \
+ for ((node) = symtab->first_symbol (); (node); (node) = (node)->next)
+
+/* Return first static symbol with definition. */
+inline symtab_node *
+symbol_table::first_defined_symbol (void)
+{
+ symtab_node *node;
+
+ for (node = nodes; node; node = node->next)
+ if (node->definition)
+ return node;
+
+ return NULL;
+}
+
+/* Walk all symbols with definitions in current unit. */
+#define FOR_EACH_DEFINED_SYMBOL(node) \
+ for ((node) = symtab->first_defined_symbol (); (node); \
+ (node) = node->next_defined_symbol ())
+
+/* Return first variable. */
+inline varpool_node *
+symbol_table::first_variable (void)
+{
+ symtab_node *node;
+ for (node = nodes; node; node = node->next)
+ if (varpool_node *vnode = dyn_cast <varpool_node *> (node))
+ return vnode;
+ return NULL;
+}
+
+/* Return next variable after NODE. */
+inline varpool_node *
+symbol_table::next_variable (varpool_node *node)
+{
+ symtab_node *node1 = node->next;
+ for (; node1; node1 = node1->next)
+ if (varpool_node *vnode1 = dyn_cast <varpool_node *> (node1))
+ return vnode1;
+ return NULL;
+}
+/* Walk all variables. */
+#define FOR_EACH_VARIABLE(node) \
+ for ((node) = symtab->first_variable (); \
+ (node); \
+ (node) = symtab->next_variable ((node)))
+
+/* Return first static variable with initializer. */
+inline varpool_node *
+symbol_table::first_static_initializer (void)
+{
+ symtab_node *node;
+ for (node = nodes; node; node = node->next)
+ {
+ varpool_node *vnode = dyn_cast <varpool_node *> (node);
+ if (vnode && DECL_INITIAL (node->decl))
+ return vnode;
+ }
+ return NULL;
+}
+
+/* Return next static variable with initializer after NODE. */
+inline varpool_node *
+symbol_table::next_static_initializer (varpool_node *node)
+{
+ symtab_node *node1 = node->next;
+ for (; node1; node1 = node1->next)
+ {
+ varpool_node *vnode1 = dyn_cast <varpool_node *> (node1);
+ if (vnode1 && DECL_INITIAL (node1->decl))
+ return vnode1;
+ }
+ return NULL;
+}
+
+/* Walk all static variables with initializer set. */
+#define FOR_EACH_STATIC_INITIALIZER(node) \
+ for ((node) = symtab->first_static_initializer (); (node); \
+ (node) = symtab->next_static_initializer (node))
+
+/* Return first static variable with definition. */
+inline varpool_node *
+symbol_table::first_defined_variable (void)
+{
+ symtab_node *node;
+ for (node = nodes; node; node = node->next)
+ {
+ varpool_node *vnode = dyn_cast <varpool_node *> (node);
+ if (vnode && vnode->definition)
+ return vnode;
+ }
+ return NULL;
+}
+
+/* Return next static variable with definition after NODE. */
+inline varpool_node *
+symbol_table::next_defined_variable (varpool_node *node)
+{
+ symtab_node *node1 = node->next;
+ for (; node1; node1 = node1->next)
+ {
+ varpool_node *vnode1 = dyn_cast <varpool_node *> (node1);
+ if (vnode1 && vnode1->definition)
+ return vnode1;
+ }
+ return NULL;
+}
+/* Walk all variables with definitions in current unit. */
+#define FOR_EACH_DEFINED_VARIABLE(node) \
+ for ((node) = symtab->first_defined_variable (); (node); \
+ (node) = symtab->next_defined_variable (node))
+
+/* Return first function with body defined. */
+inline cgraph_node *
+symbol_table::first_defined_function (void)
+{
+ symtab_node *node;
+ for (node = nodes; node; node = node->next)
+ {
+ cgraph_node *cn = dyn_cast <cgraph_node *> (node);
+ if (cn && cn->definition)
+ return cn;
+ }
+ return NULL;
+}
+
+/* Return next function with body defined after NODE. */
+inline cgraph_node *
+symbol_table::next_defined_function (cgraph_node *node)
+{
+ symtab_node *node1 = node->next;
+ for (; node1; node1 = node1->next)
+ {
+ cgraph_node *cn1 = dyn_cast <cgraph_node *> (node1);
+ if (cn1 && cn1->definition)
+ return cn1;
+ }
+ return NULL;
+}
+
+/* Walk all functions with body defined. */
+#define FOR_EACH_DEFINED_FUNCTION(node) \
+ for ((node) = symtab->first_defined_function (); (node); \
+ (node) = symtab->next_defined_function ((node)))
+
+/* Return first function. */
+inline cgraph_node *
+symbol_table::first_function (void)
+{
+ symtab_node *node;
+ for (node = nodes; node; node = node->next)
+ if (cgraph_node *cn = dyn_cast <cgraph_node *> (node))
+ return cn;
+ return NULL;
+}
+
+/* Return next function. */
+inline cgraph_node *
+symbol_table::next_function (cgraph_node *node)
+{
+ symtab_node *node1 = node->next;
+ for (; node1; node1 = node1->next)
+ if (cgraph_node *cn1 = dyn_cast <cgraph_node *> (node1))
+ return cn1;
+ return NULL;
+}
+
+/* Return first function with body defined. */
+inline cgraph_node *
+symbol_table::first_function_with_gimple_body (void)
+{
+ symtab_node *node;
+ for (node = nodes; node; node = node->next)
+ {
+ cgraph_node *cn = dyn_cast <cgraph_node *> (node);
+ if (cn && cn->has_gimple_body_p ())
+ return cn;
+ }
+ return NULL;
+}
+
+/* Return next reachable static variable with initializer after NODE. */
+inline cgraph_node *
+symbol_table::next_function_with_gimple_body (cgraph_node *node)
+{
+ symtab_node *node1 = node->next;
+ for (; node1; node1 = node1->next)
+ {
+ cgraph_node *cn1 = dyn_cast <cgraph_node *> (node1);
+ if (cn1 && cn1->has_gimple_body_p ())
+ return cn1;
+ }
+ return NULL;
+}
+
+/* Walk all functions. */
+#define FOR_EACH_FUNCTION(node) \
+ for ((node) = symtab->first_function (); (node); \
+ (node) = symtab->next_function ((node)))
+
+/* Return true when callgraph node is a function with Gimple body defined
+ in current unit. Functions can also be define externally or they
+ can be thunks with no Gimple representation.
+
+ Note that at WPA stage, the function body may not be present in memory. */
+
+inline bool
+cgraph_node::has_gimple_body_p (void)
+{
+ return definition && !thunk && !alias;
+}
+
+/* Walk all functions with body defined. */
+#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
+ for ((node) = symtab->first_function_with_gimple_body (); (node); \
+ (node) = symtab->next_function_with_gimple_body (node))
+
+/* Uniquize all constants that appear in memory.
+ Each constant in memory thus far output is recorded
+ in `const_desc_table'. */
+
+struct GTY((for_user)) constant_descriptor_tree {
+ /* A MEM for the constant. */
+ rtx rtl;
+
+ /* The value of the constant. */
+ tree value;
+
+ /* Hash of value. Computing the hash from value each time
+ hashfn is called can't work properly, as that means recursive
+ use of the hash table during hash table expansion. */
+ hashval_t hash;
+};
+
+/* Return true when function is only called directly or it has alias.
+ i.e. it is not externally visible, address was not taken and
+ it is not used in any other non-standard way. */
+
+inline bool
+cgraph_node::only_called_directly_or_aliased_p (void)
+{
+ gcc_assert (!inlined_to);
+ return (!force_output && !address_taken
+ && !ifunc_resolver
+ && !used_from_other_partition
+ && !DECL_VIRTUAL_P (decl)
+ && !DECL_STATIC_CONSTRUCTOR (decl)
+ && !DECL_STATIC_DESTRUCTOR (decl)
+ && !used_from_object_file_p ()
+ && !externally_visible);
+}
+
+/* Return true when function can be removed from callgraph
+ if all direct calls are eliminated. */
+
+inline bool
+cgraph_node::can_remove_if_no_direct_calls_and_refs_p (void)
+{
+ gcc_checking_assert (!inlined_to);
+ /* Extern inlines can always go, we will use the external definition. */
+ if (DECL_EXTERNAL (decl))
+ return true;
+ /* When function is needed, we cannot remove it. */
+ if (force_output || used_from_other_partition)
+ return false;
+ if (DECL_STATIC_CONSTRUCTOR (decl)
+ || DECL_STATIC_DESTRUCTOR (decl))
+ return false;
+ /* Only COMDAT functions can be removed if externally visible. */
+ if (externally_visible
+ && ((!DECL_COMDAT (decl) || ifunc_resolver)
+ || forced_by_abi
+ || used_from_object_file_p ()))
+ return false;
+ return true;
+}
+
+/* Verify cgraph, if consistency checking is enabled. */
+
+inline void
+cgraph_node::checking_verify_cgraph_nodes (void)
+{
+ if (flag_checking)
+ cgraph_node::verify_cgraph_nodes ();
+}
+
+/* Return true when variable can be removed from variable pool
+ if all direct calls are eliminated. */
+
+inline bool
+varpool_node::can_remove_if_no_refs_p (void)
+{
+ if (DECL_EXTERNAL (decl))
+ return true;
+ return (!force_output && !used_from_other_partition
+ && ((DECL_COMDAT (decl)
+ && !forced_by_abi
+ && !used_from_object_file_p ())
+ || !externally_visible
+ || DECL_HAS_VALUE_EXPR_P (decl)));
+}
+
+/* Return true when all references to variable must be visible in ipa_ref_list.
+ i.e. if the variable is not externally visible or not used in some magic
+ way (asm statement or such).
+ The magic uses are all summarized in force_output flag. */
+
+inline bool
+varpool_node::all_refs_explicit_p ()
+{
+ return (definition
+ && !externally_visible
+ && !used_from_other_partition
+ && !force_output);
+}
+
+struct tree_descriptor_hasher : ggc_ptr_hash<constant_descriptor_tree>
+{
+ static hashval_t hash (constant_descriptor_tree *);
+ static bool equal (constant_descriptor_tree *, constant_descriptor_tree *);
+};
+
+/* Constant pool accessor function. */
+hash_table<tree_descriptor_hasher> *constant_pool_htab (void);
+
+/* Return node that alias is aliasing. */
+
+inline cgraph_node *
+cgraph_node::get_alias_target (void)
+{
+ return dyn_cast <cgraph_node *> (symtab_node::get_alias_target ());
+}
+
+/* Return node that alias is aliasing. */
+
+inline varpool_node *
+varpool_node::get_alias_target (void)
+{
+ return dyn_cast <varpool_node *> (symtab_node::get_alias_target ());
+}
+
+/* Walk the alias chain to return the symbol NODE is alias of.
+ If NODE is not an alias, return NODE.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+
+inline symtab_node *
+symtab_node::ultimate_alias_target (enum availability *availability,
+ symtab_node *ref)
+{
+ if (!alias)
+ {
+ if (availability)
+ *availability = get_availability (ref);
+ return this;
+ }
+
+ return ultimate_alias_target_1 (availability, ref);
+}
+
+/* Given function symbol, walk the alias chain to return the function node
+ is alias of. Do not walk through thunks.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+
+inline cgraph_node *
+cgraph_node::ultimate_alias_target (enum availability *availability,
+ symtab_node *ref)
+{
+ cgraph_node *n = dyn_cast <cgraph_node *>
+ (symtab_node::ultimate_alias_target (availability, ref));
+ if (!n && availability)
+ *availability = AVAIL_NOT_AVAILABLE;
+ return n;
+}
+
+/* For given variable pool node, walk the alias chain to return the function
+ the variable is alias of. Do not walk through thunks.
+ When AVAILABILITY is non-NULL, get minimal availability in the chain.
+ When REF is non-NULL, assume that reference happens in symbol REF
+ when determining the availability. */
+
+inline varpool_node *
+varpool_node::ultimate_alias_target (availability *availability,
+ symtab_node *ref)
+{
+ varpool_node *n = dyn_cast <varpool_node *>
+ (symtab_node::ultimate_alias_target (availability, ref));
+
+ if (!n && availability)
+ *availability = AVAIL_NOT_AVAILABLE;
+ return n;
+}
+
+/* Set callee N of call graph edge and add it to the corresponding set of
+ callers. */
+
+inline void
+cgraph_edge::set_callee (cgraph_node *n)
+{
+ prev_caller = NULL;
+ if (n->callers)
+ n->callers->prev_caller = this;
+ next_caller = n->callers;
+ n->callers = this;
+ callee = n;
+}
+
+/* Return true when the edge represents a direct recursion. */
+
+inline bool
+cgraph_edge::recursive_p (void)
+{
+ cgraph_node *c = callee->ultimate_alias_target ();
+ if (caller->inlined_to)
+ return caller->inlined_to->decl == c->decl;
+ else
+ return caller->decl == c->decl;
+}
+
+/* Remove the edge from the list of the callers of the callee. */
+
+inline void
+cgraph_edge::remove_callee (void)
+{
+ gcc_assert (!indirect_unknown_callee);
+ if (prev_caller)
+ prev_caller->next_caller = next_caller;
+ if (next_caller)
+ next_caller->prev_caller = prev_caller;
+ if (!prev_caller)
+ callee->callers = next_caller;
+}
+
+/* Return true if call must bind to current definition. */
+
+inline bool
+cgraph_edge::binds_to_current_def_p ()
+{
+ if (callee)
+ return callee->binds_to_current_def_p (caller);
+ else
+ return false;
+}
+
+/* Expected frequency of executions within the function.
+ When set to CGRAPH_FREQ_BASE, the edge is expected to be called once
+ per function call. The range is 0 to CGRAPH_FREQ_MAX. */
+
+inline int
+cgraph_edge::frequency ()
+{
+ return count.to_cgraph_frequency (caller->inlined_to
+ ? caller->inlined_to->count
+ : caller->count);
+}
+
+
+/* Return true if the TM_CLONE bit is set for a given FNDECL. */
+inline bool
+decl_is_tm_clone (const_tree fndecl)
+{
+ cgraph_node *n = cgraph_node::get (fndecl);
+ if (n)
+ return n->tm_clone;
+ return false;
+}
+
+/* Likewise indicate that a node is needed, i.e. reachable via some
+ external means. */
+
+inline void
+cgraph_node::mark_force_output (void)
+{
+ force_output = 1;
+ gcc_checking_assert (!inlined_to);
+}
+
+/* Return true if function should be optimized for size. */
+
+inline enum optimize_size_level
+cgraph_node::optimize_for_size_p (void)
+{
+ if (opt_for_fn (decl, optimize_size))
+ return OPTIMIZE_SIZE_MAX;
+ if (count == profile_count::zero ())
+ return OPTIMIZE_SIZE_MAX;
+ if (frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
+ return OPTIMIZE_SIZE_BALANCED;
+ else
+ return OPTIMIZE_SIZE_NO;
+}
+
+/* Return symtab_node for NODE or create one if it is not present
+ in symtab. */
+
+inline symtab_node *
+symtab_node::get_create (tree node)
+{
+ if (TREE_CODE (node) == VAR_DECL)
+ return varpool_node::get_create (node);
+ else
+ return cgraph_node::get_create (node);
+}
+
+/* Return availability of NODE when referenced from REF. */
+
+inline enum availability
+symtab_node::get_availability (symtab_node *ref)
+{
+ if (is_a <cgraph_node *> (this))
+ return dyn_cast <cgraph_node *> (this)->get_availability (ref);
+ else
+ return dyn_cast <varpool_node *> (this)->get_availability (ref);
+}
+
+/* Call callback on symtab node and aliases associated to this node.
+ When INCLUDE_OVERWRITABLE is false, overwritable symbols are skipped. */
+
+inline bool
+symtab_node::call_for_symbol_and_aliases (bool (*callback) (symtab_node *,
+ void *),
+ void *data,
+ bool include_overwritable)
+{
+ if (include_overwritable
+ || get_availability () > AVAIL_INTERPOSABLE)
+ {
+ if (callback (this, data))
+ return true;
+ }
+ if (has_aliases_p ())
+ return call_for_symbol_and_aliases_1 (callback, data, include_overwritable);
+ return false;
+}
+
+/* Call callback on function and aliases associated to the function.
+ When INCLUDE_OVERWRITABLE is false, overwritable symbols are
+ skipped. */
+
+inline bool
+cgraph_node::call_for_symbol_and_aliases (bool (*callback) (cgraph_node *,
+ void *),
+ void *data,
+ bool include_overwritable)
+{
+ if (include_overwritable
+ || get_availability () > AVAIL_INTERPOSABLE)
+ {
+ if (callback (this, data))
+ return true;
+ }
+ if (has_aliases_p ())
+ return call_for_symbol_and_aliases_1 (callback, data, include_overwritable);
+ return false;
+}
+
+/* Call callback on varpool symbol and aliases associated to varpool symbol.
+ When INCLUDE_OVERWRITABLE is false, overwritable symbols are
+ skipped. */
+
+inline bool
+varpool_node::call_for_symbol_and_aliases (bool (*callback) (varpool_node *,
+ void *),
+ void *data,
+ bool include_overwritable)
+{
+ if (include_overwritable
+ || get_availability () > AVAIL_INTERPOSABLE)
+ {
+ if (callback (this, data))
+ return true;
+ }
+ if (has_aliases_p ())
+ return call_for_symbol_and_aliases_1 (callback, data, include_overwritable);
+ return false;
+}
+
+/* Return true if reference may be used in address compare. */
+
+inline bool
+ipa_ref::address_matters_p ()
+{
+ if (use != IPA_REF_ADDR)
+ return false;
+ /* Addresses taken from virtual tables are never compared. */
+ if (is_a <varpool_node *> (referring)
+ && DECL_VIRTUAL_P (referring->decl))
+ return false;
+ return referred->address_can_be_compared_p ();
+}
+
+/* Build polymorphic call context for indirect call E. */
+
+inline
+ipa_polymorphic_call_context::ipa_polymorphic_call_context (cgraph_edge *e)
+{
+ gcc_checking_assert (e->indirect_info->polymorphic);
+ *this = e->indirect_info->context;
+}
+
+/* Build empty "I know nothing" context. */
+
+inline
+ipa_polymorphic_call_context::ipa_polymorphic_call_context ()
+{
+ clear_speculation ();
+ clear_outer_type ();
+ invalid = false;
+}
+
+/* Make context non-speculative. */
+
+inline void
+ipa_polymorphic_call_context::clear_speculation ()
+{
+ speculative_outer_type = NULL;
+ speculative_offset = 0;
+ speculative_maybe_derived_type = false;
+}
+
+/* Produce context specifying all derived types of OTR_TYPE. If OTR_TYPE is
+ NULL, the context is set to dummy "I know nothing" setting. */
+
+inline void
+ipa_polymorphic_call_context::clear_outer_type (tree otr_type)
+{
+ outer_type = otr_type ? TYPE_MAIN_VARIANT (otr_type) : NULL;
+ offset = 0;
+ maybe_derived_type = true;
+ maybe_in_construction = true;
+ dynamic = true;
+}
+
+/* Adjust all offsets in contexts by OFF bits. */
+
+inline void
+ipa_polymorphic_call_context::offset_by (HOST_WIDE_INT off)
+{
+ if (outer_type)
+ offset += off;
+ if (speculative_outer_type)
+ speculative_offset += off;
+}
+
+/* Return TRUE if context is fully useless. */
+
+inline bool
+ipa_polymorphic_call_context::useless_p () const
+{
+ return (!outer_type && !speculative_outer_type);
+}
+
+/* When using fprintf (or similar), problems can arise with
+ transient generated strings. Many string-generation APIs
+ only support one result being alive at once (e.g. by
+ returning a pointer to a statically-allocated buffer).
+
+ If there is more than one generated string within one
+ fprintf call: the first string gets evicted or overwritten
+ by the second, before fprintf is fully evaluated.
+ See e.g. PR/53136.
+
+ This function provides a workaround for this, by providing
+ a simple way to create copies of these transient strings,
+ without the need to have explicit cleanup:
+
+ fprintf (dumpfile, "string 1: %s string 2:%s\n",
+ xstrdup_for_dump (EXPR_1),
+ xstrdup_for_dump (EXPR_2));
+
+ This is actually a simple wrapper around ggc_strdup, but
+ the name documents the intent. We require that no GC can occur
+ within the fprintf call. */
+
+inline const char *
+xstrdup_for_dump (const char *transient_str)
+{
+ return ggc_strdup (transient_str);
+}
+
+/* During LTO stream-in this predicate can be used to check whether node
+ in question prevails in the linking to save some memory usage. */
+inline bool
+symtab_node::prevailing_p (void)
+{
+ return definition && ((!TREE_PUBLIC (decl) && !DECL_EXTERNAL (decl))
+ || previous_sharing_asm_name == NULL);
+}
+
+extern GTY(()) symbol_table *saved_symtab;
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* An RAII-style class for use in selftests for temporarily using a different
+ symbol_table, so that such tests can be isolated from each other. */
+
+class symbol_table_test
+{
+ public:
+ /* Constructor. Override "symtab". */
+ symbol_table_test ();
+
+ /* Destructor. Restore the saved_symtab. */
+ ~symbol_table_test ();
+};
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+#endif /* GCC_CGRAPH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cif-code.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cif-code.def
new file mode 100644
index 0000000..af88d83
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cif-code.def
@@ -0,0 +1,144 @@
+/* This file contains the definitions of the cgraph_inline_failed_t
+ enums used in GCC.
+
+ Copyright (C) 2008-2023 Free Software Foundation, Inc.
+ Contributed by Doug Kwan <dougkwan@google.com>
+
+This file is part of GCC.
+
+GCC is free software you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The format of this file is
+ DEFCIFCODE(code, string).
+
+ Where symbol is the enumeration name without the ``''.
+ The argument STRING is a explain the failure. Except for OK,
+ which is a NULL pointer. */
+
+/* Inlining successful. This must be the first code. */
+DEFCIFCODE(OK, CIF_FINAL_NORMAL, NULL)
+
+/* Inlining failed for an unspecified reason. */
+DEFCIFCODE(UNSPECIFIED, CIF_FINAL_ERROR, "")
+
+/* Function has not be considered for inlining. This is the code for
+ functions that have not been rejected for inlining yet. */
+DEFCIFCODE(FUNCTION_NOT_CONSIDERED, CIF_FINAL_NORMAL,
+ N_("function not considered for inlining"))
+
+/* Caller is compiled with optimizations disabled. */
+DEFCIFCODE(FUNCTION_NOT_OPTIMIZED, CIF_FINAL_ERROR,
+ N_("caller is not optimized"))
+
+/* Inlining failed owing to unavailable function body. */
+DEFCIFCODE(BODY_NOT_AVAILABLE, CIF_FINAL_ERROR,
+ N_("function body not available"))
+
+/* Extern inline function that has been redefined. */
+DEFCIFCODE(REDEFINED_EXTERN_INLINE, CIF_FINAL_ERROR,
+ N_("redefined extern inline functions are not considered for "
+ "inlining"))
+
+/* Function is not inlinable. */
+DEFCIFCODE(FUNCTION_NOT_INLINABLE, CIF_FINAL_ERROR,
+ N_("function not inlinable"))
+
+/* Function is overwritable. */
+DEFCIFCODE(OVERWRITABLE, CIF_FINAL_ERROR,
+ N_("function body can be overwritten at link time"))
+
+/* Function is not an inlining candidate. */
+DEFCIFCODE(FUNCTION_NOT_INLINE_CANDIDATE, CIF_FINAL_NORMAL,
+ N_("function not inline candidate"))
+
+/* Inlining failed because of various limit parameters. */
+DEFCIFCODE(LARGE_FUNCTION_GROWTH_LIMIT, CIF_FINAL_NORMAL,
+ N_("--param large-function-growth limit reached"))
+DEFCIFCODE(LARGE_STACK_FRAME_GROWTH_LIMIT, CIF_FINAL_NORMAL,
+ N_("--param large-stack-frame-growth limit reached"))
+DEFCIFCODE(MAX_INLINE_INSNS_SINGLE_LIMIT, CIF_FINAL_NORMAL,
+ N_("--param max-inline-insns-single limit reached"))
+DEFCIFCODE(MAX_INLINE_INSNS_AUTO_LIMIT, CIF_FINAL_NORMAL,
+ N_("--param max-inline-insns-auto limit reached"))
+DEFCIFCODE(INLINE_UNIT_GROWTH_LIMIT, CIF_FINAL_NORMAL,
+ N_("--param inline-unit-growth limit reached"))
+
+/* Recursive inlining. */
+DEFCIFCODE(RECURSIVE_INLINING, CIF_FINAL_NORMAL,
+ N_("recursive inlining"))
+
+/* Call is unlikely. */
+DEFCIFCODE(UNLIKELY_CALL, CIF_FINAL_NORMAL,
+ N_("call is unlikely and code size would grow"))
+
+/* Call is considered never executed. */
+DEFCIFCODE(NEVER_CALL, CIF_FINAL_NORMAL,
+ N_("call is considered never executed and code size would grow"))
+
+/* Function is not declared as inline. */
+DEFCIFCODE(NOT_DECLARED_INLINED, CIF_FINAL_NORMAL,
+ N_("function not declared inline and code size would grow"))
+
+/* Caller and callee disagree on the arguments. */
+DEFCIFCODE(LTO_MISMATCHED_DECLARATIONS, CIF_FINAL_ERROR,
+ N_("mismatched declarations during linktime optimization"))
+
+/* Caller is variadic thunk. */
+DEFCIFCODE(VARIADIC_THUNK, CIF_FINAL_ERROR,
+ N_("variadic thunk call"))
+
+/* Call was originally indirect. */
+DEFCIFCODE(ORIGINALLY_INDIRECT_CALL, CIF_FINAL_NORMAL,
+ N_("originally indirect function call not considered for inlining"))
+
+/* Ths edge represents an indirect edge with a yet-undetermined callee . */
+DEFCIFCODE(INDIRECT_UNKNOWN_CALL, CIF_FINAL_NORMAL,
+ N_("indirect function call with a yet undetermined callee"))
+
+/* We can't inline different EH personalities together. */
+DEFCIFCODE(EH_PERSONALITY, CIF_FINAL_ERROR,
+ N_("exception handling personality mismatch"))
+
+/* We can't inline if the callee can throw non-call exceptions but the
+ caller cannot. */
+DEFCIFCODE(NON_CALL_EXCEPTIONS, CIF_FINAL_ERROR,
+ N_("non-call exception handling mismatch"))
+
+/* We can't inline because of mismatched target specific options. */
+DEFCIFCODE(TARGET_OPTION_MISMATCH, CIF_FINAL_ERROR,
+ N_("target specific option mismatch"))
+
+/* We can't inline because of mismatched optimization levels. */
+DEFCIFCODE(OPTIMIZATION_MISMATCH, CIF_FINAL_ERROR,
+ N_("optimization level attribute mismatch"))
+
+/* We can't inline because the callee refers to comdat-local symbols. */
+DEFCIFCODE(USES_COMDAT_LOCAL, CIF_FINAL_NORMAL,
+ N_("callee refers to comdat-local symbols"))
+
+/* We can't inline because of mismatched caller/callee
+ sanitizer attributes. */
+DEFCIFCODE(SANITIZE_ATTRIBUTE_MISMATCH, CIF_FINAL_ERROR,
+ N_("sanitizer function attribute mismatch"))
+
+/* We can't inline because the user requests only static functions
+ but the function has external linkage for live patching purpose. */
+DEFCIFCODE(EXTERN_LIVE_ONLY_STATIC, CIF_FINAL_ERROR,
+ N_("function has external linkage when the user requests only"
+ " inlining static for live patching"))
+
+/* We proved that the call is unreachable. */
+DEFCIFCODE(UNREACHABLE, CIF_FINAL_ERROR,
+ N_("unreachable"))
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect-utils.h
new file mode 100644
index 0000000..b872eab
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect-utils.h
@@ -0,0 +1,51 @@
+/* Utility functions used by tools like collect2 and lto-wrapper.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_COLLECT_UTILS_H
+#define GCC_COLLECT_UTILS_H
+
+/* Provided in collect-utils.cc. */
+extern void notice (const char *, ...)
+ __attribute__ ((format (printf, 1, 2)));
+extern void fatal_signal (int);
+extern void setup_signals (void);
+
+extern struct pex_obj *collect_execute (const char *, char **,
+ const char *, const char *,
+ int, bool, const char *);
+extern int collect_wait (const char *, struct pex_obj *);
+extern void do_wait (const char *, struct pex_obj *);
+extern void fork_execute (const char *, char **, bool, const char *);
+extern void utils_cleanup (bool);
+
+
+extern bool debug;
+extern bool verbose;
+extern bool save_temps;
+extern const char *dumppfx;
+
+/* Provided by the tool itself. */
+
+/* The name of the tool, printed in error messages. */
+extern const char tool_name[];
+/* Called by utils_cleanup. */
+extern void tool_cleanup (bool);
+extern void maybe_unlink (const char *);
+
+#endif /* GCC_COLLECT_UTILS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2-aix.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2-aix.h
new file mode 100644
index 0000000..6fdafff
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2-aix.h
@@ -0,0 +1,306 @@
+/* AIX cross support for collect2.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_COLLECT2_AIX_H
+#define GCC_COLLECT2_AIX_H
+/* collect2-aix.cc requires mmap support. It should otherwise be
+ fairly portable. */
+#if defined(CROSS_DIRECTORY_STRUCTURE) \
+ && defined(TARGET_AIX_VERSION) \
+ && HAVE_MMAP
+
+#define CROSS_AIX_SUPPORT 1
+
+/* -------------------------------------------------------------------------
+ Definitions adapted from bfd. (Fairly heavily adapted in some cases.)
+ ------------------------------------------------------------------------- */
+
+/* Compatibility types for bfd. */
+typedef unsigned HOST_WIDE_INT bfd_vma;
+
+/* The size of an archive's fl_magic field. */
+#define FL_MAGIC_SIZE 8
+
+/* The expected contents of fl_magic for big archives. */
+#define FL_MAGIC_BIG_AR "<bigaf>\012"
+
+/* The size of each offset string in the header of a big archive. */
+#define AR_BIG_OFFSET_SIZE 20
+
+/* The format of the file header in a "big" XCOFF archive. */
+struct external_big_ar_filehdr
+{
+ /* Magic string. */
+ char fl_magic[FL_MAGIC_SIZE];
+
+ /* Offset of the member table (decimal ASCII string). */
+ char fl_memoff[AR_BIG_OFFSET_SIZE];
+
+ /* Offset of the global symbol table for 32-bit objects (decimal ASCII
+ string). */
+ char fl_symoff[AR_BIG_OFFSET_SIZE];
+
+ /* Offset of the global symbol table for 64-bit objects (decimal ASCII
+ string). */
+ char fl_symoff64[AR_BIG_OFFSET_SIZE];
+
+ /* Offset of the first member in the archive (decimal ASCII string). */
+ char fl_firstmemoff[AR_BIG_OFFSET_SIZE];
+
+ /* Offset of the last member in the archive (decimal ASCII string). */
+ char fl_lastmemoff[AR_BIG_OFFSET_SIZE];
+
+ /* Offset of the first member on the free list (decimal ASCII
+ string). */
+ char fl_freeoff[AR_BIG_OFFSET_SIZE];
+};
+
+/* Each archive name is followed by this many bytes of magic string. */
+#define SXCOFFARFMAG 2
+
+/* The format of a member header in a "big" XCOFF archive. */
+struct external_big_ar_member
+{
+ /* File size not including the header (decimal ASCII string). */
+ char ar_size[AR_BIG_OFFSET_SIZE];
+
+ /* File offset of next archive member (decimal ASCII string). */
+ char ar_nextoff[AR_BIG_OFFSET_SIZE];
+
+ /* File offset of previous archive member (decimal ASCII string). */
+ char ar_prevoff[AR_BIG_OFFSET_SIZE];
+
+ /* File mtime (decimal ASCII string). */
+ char ar_date[12];
+
+ /* File UID (decimal ASCII string). */
+ char ar_uid[12];
+
+ /* File GID (decimal ASCII string). */
+ char ar_gid[12];
+
+ /* File mode (octal ASCII string). */
+ char ar_mode[12];
+
+ /* Length of file name (decimal ASCII string). */
+ char ar_namlen[4];
+
+ /* This structure is followed by the file name. The length of the
+ name is given in the namlen field. If the length of the name is
+ odd, the name is followed by a null byte. The name and optional
+ null byte are followed by XCOFFARFMAG, which is not included in
+ namlen. The contents of the archive member follow; the number of
+ bytes is given in the size field. */
+};
+
+/* The known values of f_magic in an XCOFF file header. */
+#define U802WRMAGIC 0730 /* Writeable text segments. */
+#define U802ROMAGIC 0735 /* Readonly sharable text segments. */
+#define U802TOCMAGIC 0737 /* Readonly text segments and TOC. */
+#define U803XTOCMAGIC 0757 /* Aix 4.3 64-bit XCOFF. */
+#define U64_TOCMAGIC 0767 /* AIX 5+ 64-bit XCOFF. */
+
+/* The number of bytes in an XCOFF file's f_magic field. */
+#define F_MAGIC_SIZE 2
+
+/* The format of a 32-bit XCOFF file header. */
+struct external_filehdr_32
+{
+ /* The magic number. */
+ char f_magic[F_MAGIC_SIZE];
+
+ /* The number of sections. */
+ char f_nscns[2];
+
+ /* Time & date stamp. */
+ char f_timdat[4];
+
+ /* The offset of the symbol table from the start of the file. */
+ char f_symptr[4];
+
+ /* The number of entries in the symbol table. */
+ char f_nsyms[4];
+
+ /* The size of the auxiliary header. */
+ char f_opthdr[2];
+
+ /* Flags. */
+ char f_flags[2];
+};
+
+/* The format of a 64-bit XCOFF file header. */
+struct external_filehdr_64
+{
+ /* The magic number. */
+ char f_magic[F_MAGIC_SIZE];
+
+ /* The number of sections. */
+ char f_nscns[2];
+
+ /* Time & date stamp. */
+ char f_timdat[4];
+
+ /* The offset of the symbol table from the start of the file. */
+ char f_symptr[8];
+
+ /* The size of the auxiliary header. */
+ char f_opthdr[2];
+
+ /* Flags. */
+ char f_flags[2];
+
+ /* The number of entries in the symbol table. */
+ char f_nsyms[4];
+};
+
+/* An internal representation of the XCOFF file header. */
+struct internal_filehdr
+{
+ unsigned short f_magic;
+ unsigned short f_nscns;
+ long f_timdat;
+ bfd_vma f_symptr;
+ long f_nsyms;
+ unsigned short f_opthdr;
+ unsigned short f_flags;
+};
+
+/* Symbol classes have their names in the debug section if this flag
+ is set. */
+#define DBXMASK 0x80
+
+/* The format of an XCOFF symbol-table entry. */
+struct external_syment
+{
+ union {
+ struct {
+ union {
+ /* The name of the symbol. There is an implicit null character
+ after the end of the array. */
+ char n_name[8];
+ struct {
+ /* If n_zeroes is zero, n_offset is the offset the name from
+ the start of the string table. */
+ char n_zeroes[4];
+ char n_offset[4];
+ } u;
+ } u;
+
+ /* The symbol's value. */
+ char n_value[4];
+ } xcoff32;
+ struct {
+ /* The symbol's value. */
+ char n_value[8];
+
+ /* The offset of the symbol from the start of the string table. */
+ char n_offset[4];
+ } xcoff64;
+ } u;
+
+ /* The number of the section to which this symbol belongs. */
+ char n_scnum[2];
+
+ /* The type of symbol. (It can be interpreted as an n_lang
+ and an n_cpu byte, but we don't care about that here.) */
+ char n_type[2];
+
+ /* The class of symbol (a C_* value). */
+ char n_sclass[1];
+
+ /* The number of auxiliary symbols attached to this entry. */
+ char n_numaux[1];
+};
+
+/* Definitions required by collect2. */
+#define C_EXT 2
+
+#define F_SHROBJ 0x2000
+#define F_LOADONLY 0x4000
+
+#define N_UNDEF ((short) 0)
+#define N_TMASK 060
+#define N_BTSHFT 4
+
+#define DT_NON 0
+#define DT_FCN 2
+
+/* -------------------------------------------------------------------------
+ Local code.
+ ------------------------------------------------------------------------- */
+
+/* An internal representation of an XCOFF symbol-table entry,
+ which is associated with the API-defined SYMENT type. */
+struct internal_syment
+{
+ char n_name[9];
+ unsigned int n_zeroes;
+ bfd_vma n_offset;
+ bfd_vma n_value;
+ short n_scnum;
+ unsigned short n_flags;
+ unsigned short n_type;
+ unsigned char n_sclass;
+ unsigned char n_numaux;
+};
+typedef struct internal_syment SYMENT;
+
+/* The internal representation of the API-defined LDFILE type. */
+struct internal_ldfile
+{
+ /* The file handle for the associated file, or -1 if it hasn't been
+ opened yet. */
+ int fd;
+
+ /* The start of the current XCOFF object, if one has been mapped
+ into memory. Null otherwise. */
+ char *object;
+
+ /* The offset of OBJECT from the start of the containing page. */
+ size_t page_offset;
+
+ /* The size of the file pointed to by OBJECT. Valid iff OFFSET
+ is nonnull. */
+ size_t object_size;
+
+ /* The offset of the next member in an archive after OBJECT,
+ or -1 if this isn't an archive. Valid iff OFFSET is nonnull. */
+ off_t next_member;
+
+ /* The parsed version of the XCOFF file header. */
+ struct internal_filehdr filehdr;
+};
+typedef struct internal_ldfile LDFILE;
+
+/* The API allows the file header to be directly accessed via this macro. */
+#define HEADER(FILE) ((FILE)->filehdr)
+
+/* API-defined return codes. SUCCESS must be > 0 and FAILURE must be <= 0. */
+#define SUCCESS 1
+#define FAILURE 0
+
+/* API-defined functions. */
+extern LDFILE *ldopen (char *, LDFILE *);
+extern char *ldgetname (LDFILE *, SYMENT *);
+extern int ldtbread (LDFILE *, long, SYMENT *);
+extern int ldclose (LDFILE *);
+
+#endif
+
+#endif /* GCC_COLLECT2_AIX_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2.h
new file mode 100644
index 0000000..de12ef0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/collect2.h
@@ -0,0 +1,39 @@
+/* Header file for collect routines.
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_COLLECT2_H
+#define GCC_COLLECT2_H
+
+extern struct pex_obj *collect_execute (const char *, char **, const char *,
+ const char *, int flags);
+
+extern int collect_wait (const char *, struct pex_obj *);
+
+extern int file_exists (const char *);
+
+extern const char *c_file_name;
+extern struct obstack temporary_obstack;
+extern char *temporary_firstobj;
+extern bool may_unlink_output_file;
+
+extern void notice_translated (const char *, ...) ATTRIBUTE_PRINTF_1;
+extern void notice (const char *, ...) ATTRIBUTE_PRINTF_1;
+
+extern bool at_file_supplied;
+#endif /* ! GCC_COLLECT2_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/color-macros.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/color-macros.h
new file mode 100644
index 0000000..fcd79d0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/color-macros.h
@@ -0,0 +1,108 @@
+/* Terminal color manipulation macros.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_COLOR_MACROS_H
+#define GCC_COLOR_MACROS_H
+
+/* Select Graphic Rendition (SGR, "\33[...m") strings. */
+/* Also Erase in Line (EL) to Right ("\33[K") by default. */
+/* Why have EL to Right after SGR?
+ -- The behavior of line-wrapping when at the bottom of the
+ terminal screen and at the end of the current line is often
+ such that a new line is introduced, entirely cleared with
+ the current background color which may be different from the
+ default one (see the boolean back_color_erase terminfo(5)
+ capability), thus scrolling the display by one line.
+ The end of this new line will stay in this background color
+ even after reverting to the default background color with
+ "\33[m', unless it is explicitly cleared again with "\33[K"
+ (which is the behavior the user would instinctively expect
+ from the whole thing). There may be some unavoidable
+ background-color flicker at the end of this new line because
+ of this (when timing with the monitor's redraw is just right).
+ -- The behavior of HT (tab, "\t") is usually the same as that of
+ Cursor Forward Tabulation (CHT) with a default parameter
+ of 1 ("\33[I"), i.e., it performs pure movement to the next
+ tab stop, without any clearing of either content or screen
+ attributes (including background color); try
+ printf 'asdfqwerzxcv\rASDF\tZXCV\n'
+ in a bash(1) shell to demonstrate this. This is not what the
+ user would instinctively expect of HT (but is ok for CHT).
+ The instinctive behavior would include clearing the terminal
+ cells that are skipped over by HT with blank cells in the
+ current screen attributes, including background color;
+ the boolean dest_tabs_magic_smso terminfo(5) capability
+ indicates this saner behavior for HT, but only some rare
+ terminals have it (although it also indicates a special
+ glitch with standout mode in the Teleray terminal for which
+ it was initially introduced). The remedy is to add "\33K"
+ after each SGR sequence, be it START (to fix the behavior
+ of any HT after that before another SGR) or END (to fix the
+ behavior of an HT in default background color that would
+ follow a line-wrapping at the bottom of the screen in another
+ background color, and to complement doing it after START).
+ Piping GCC's output through a pager such as less(1) avoids
+ any HT problems since the pager performs tab expansion.
+
+ Generic disadvantages of this remedy are:
+ -- Some very rare terminals might support SGR but not EL (nobody
+ will use "gcc -fdiagnostics-color" on a terminal that does not
+ support SGR in the first place).
+ -- Having these extra control sequences might somewhat complicate
+ the task of any program trying to parse "gcc -fdiagnostics-color"
+ output in order to extract structuring information from it.
+ A specific disadvantage to doing it after SGR START is:
+ -- Even more possible background color flicker (when timing
+ with the monitor's redraw is just right), even when not at the
+ bottom of the screen.
+ There are no additional disadvantages specific to doing it after
+ SGR END.
+
+ It would be impractical for GCC to become a full-fledged
+ terminal program linked against ncurses or the like, so it will
+ not detect terminfo(5) capabilities. */
+
+#define COLOR_SEPARATOR ";"
+#define COLOR_NONE "00"
+#define COLOR_BOLD "01"
+#define COLOR_UNDERSCORE "04"
+#define COLOR_BLINK "05"
+#define COLOR_REVERSE "07"
+#define COLOR_FG_BLACK "30"
+#define COLOR_FG_RED "31"
+#define COLOR_FG_GREEN "32"
+#define COLOR_FG_YELLOW "33"
+#define COLOR_FG_BLUE "34"
+#define COLOR_FG_MAGENTA "35"
+#define COLOR_FG_CYAN "36"
+#define COLOR_FG_WHITE "37"
+#define COLOR_BG_BLACK "40"
+#define COLOR_BG_RED "41"
+#define COLOR_BG_GREEN "42"
+#define COLOR_BG_YELLOW "43"
+#define COLOR_BG_BLUE "44"
+#define COLOR_BG_MAGENTA "45"
+#define COLOR_BG_CYAN "46"
+#define COLOR_BG_WHITE "47"
+#define SGR_START "\33["
+#define SGR_END "m\33[K"
+#define SGR_SEQ(str) SGR_START str SGR_END
+#define SGR_RESET SGR_SEQ("")
+
+#endif /* GCC_COLOR_MACROS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/conditions.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/conditions.h
new file mode 100644
index 0000000..92a60e2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/conditions.h
@@ -0,0 +1,69 @@
+/* Definitions for condition code handling in final.cc and output routines.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CONDITIONS_H
+#define GCC_CONDITIONS_H
+
+/* These are the machine-independent flags: */
+
+/* Set if the sign of the cc value is inverted:
+ output a following jump-if-less as a jump-if-greater, etc. */
+#define CC_REVERSED 1
+
+/* This bit means that the current setting of the N bit is bogus
+ and conditional jumps should use the Z bit in its place.
+ This state obtains when an extraction of a signed single-bit field
+ or an arithmetic shift right of a byte by 7 bits
+ is turned into a btst, because btst does not set the N bit. */
+#define CC_NOT_POSITIVE 2
+
+/* This bit means that the current setting of the N bit is bogus
+ and conditional jumps should pretend that the N bit is clear.
+ Used after extraction of an unsigned bit
+ or logical shift right of a byte by 7 bits is turned into a btst.
+ The btst does not alter the N bit, but the result of that shift
+ or extract is never negative. */
+#define CC_NOT_NEGATIVE 4
+
+/* This bit means that the current setting of the overflow flag
+ is bogus and conditional jumps should pretend there is no overflow. */
+/* ??? Note that for most targets this macro is misnamed as it applies
+ to the carry flag, not the overflow flag. */
+#define CC_NO_OVERFLOW 010
+
+/* This bit means that what ought to be in the Z bit
+ should be tested as the complement of the N bit. */
+#define CC_Z_IN_NOT_N 020
+
+/* This bit means that what ought to be in the Z bit
+ should be tested as the N bit. */
+#define CC_Z_IN_N 040
+
+/* Nonzero if we must invert the sense of the following branch, i.e.
+ change EQ to NE. This is not safe for IEEE floating point operations!
+ It is intended for use only when a combination of arithmetic
+ or logical insns can leave the condition codes set in a fortuitous
+ (though inverted) state. */
+#define CC_INVERTED 0100
+
+/* Nonzero if we must convert signed condition operators to unsigned.
+ This is only used by machine description files. */
+#define CC_NOT_SIGNED 0200
+
+#endif /* GCC_CONDITIONS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config.h
new file mode 100644
index 0000000..aa6dd6b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config.h
@@ -0,0 +1,10 @@
+#ifndef GCC_CONFIG_H
+#define GCC_CONFIG_H
+#ifdef GENERATOR_FILE
+#error config.h is for the host, not build, machine.
+#endif
+#include "auto-host.h"
+#ifdef IN_GCC
+# include "ansidecl.h"
+#endif
+#endif /* GCC_CONFIG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common-protos.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common-protos.h
new file mode 100644
index 0000000..f8cb656
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common-protos.h
@@ -0,0 +1,168 @@
+/* Functions and structures shared between arm and aarch64.
+
+ Copyright (C) 1991-2023 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_AARCH_COMMON_PROTOS_H
+#define GCC_AARCH_COMMON_PROTOS_H
+
+#include "hard-reg-set.h"
+
+extern int aarch_accumulator_forwarding (rtx_insn *, rtx_insn *);
+extern bool aarch_rev16_p (rtx);
+extern bool aarch_rev16_shleft_mask_imm_p (rtx, machine_mode);
+extern bool aarch_rev16_shright_mask_imm_p (rtx, machine_mode);
+extern bool aarch_mm_needs_acquire (rtx);
+extern bool aarch_mm_needs_release (rtx);
+extern int arm_early_load_addr_dep (rtx, rtx);
+extern int arm_early_load_addr_dep_ptr (rtx, rtx);
+extern int arm_early_store_addr_dep (rtx, rtx);
+extern int arm_early_store_addr_dep_ptr (rtx, rtx);
+extern int arm_mac_accumulator_is_mul_result (rtx, rtx);
+extern int arm_mac_accumulator_is_result (rtx, rtx);
+extern int arm_no_early_alu_shift_dep (rtx, rtx);
+extern int arm_no_early_alu_shift_value_dep (rtx, rtx);
+extern int arm_no_early_mul_dep (rtx, rtx);
+extern int arm_no_early_store_addr_dep (rtx, rtx);
+extern bool arm_rtx_shift_left_p (rtx);
+extern void aarch_bti_arch_check (void);
+extern bool aarch_bti_enabled (void);
+extern bool aarch_bti_j_insn_p (rtx_insn *);
+extern bool aarch_pac_insn_p (rtx);
+extern rtx aarch_gen_bti_c (void);
+extern rtx aarch_gen_bti_j (void);
+
+/* RTX cost table definitions. These are used when tuning for speed rather
+ than for size and should reflect the _additional_ cost over the cost
+ of the fastest instruction in the machine, which is COSTS_N_INSNS (1).
+ Therefore it's okay for some costs to be 0.
+ Costs may not have a negative value. */
+struct alu_cost_table
+{
+ const int arith; /* ADD/SUB. */
+ const int logical; /* AND/ORR/EOR/BIC, etc. */
+ const int shift; /* Simple shift. */
+ const int shift_reg; /* Simple shift by reg. */
+ const int arith_shift; /* Additional when arith also shifts... */
+ const int arith_shift_reg; /* ... and when the shift is by a reg. */
+ const int log_shift; /* Additional when logic also shifts... */
+ const int log_shift_reg; /* ... and when the shift is by a reg. */
+ const int extend; /* Zero/sign extension. */
+ const int extend_arith; /* Extend and arith. */
+ const int bfi; /* Bit-field insert. */
+ const int bfx; /* Bit-field extraction. */
+ const int clz; /* Count Leading Zeros. */
+ const int rev; /* Reverse bits/bytes. */
+ const int non_exec; /* Extra cost when not executing insn. */
+ const bool non_exec_costs_exec; /* True if non-execution must add the exec
+ cost. */
+};
+
+struct mult_cost_table
+{
+ const int simple;
+ const int flag_setting; /* Additional cost if multiply sets flags. */
+ const int extend;
+ const int add;
+ const int extend_add;
+ const int idiv;
+};
+
+/* Calculations of LDM costs are complex. We assume an initial cost
+ (ldm_1st) which will load the number of registers mentioned in
+ ldm_regs_per_insn_1st registers; then each additional
+ ldm_regs_per_insn_subsequent registers cost one more insn.
+ Similarly for STM operations.
+ Therefore the ldm_regs_per_insn_1st/stm_regs_per_insn_1st and
+ ldm_regs_per_insn_subsequent/stm_regs_per_insn_subsequent fields indicate
+ the number of registers loaded/stored and are expressed by a simple integer
+ and not by a COSTS_N_INSNS (N) expression.
+ */
+struct mem_cost_table
+{
+ const int load;
+ const int load_sign_extend; /* Additional to load cost. */
+ const int ldrd; /* Cost of LDRD. */
+ const int ldm_1st;
+ const int ldm_regs_per_insn_1st;
+ const int ldm_regs_per_insn_subsequent;
+ const int loadf; /* SFmode. */
+ const int loadd; /* DFmode. */
+ const int load_unaligned; /* Extra for unaligned loads. */
+ const int store;
+ const int strd;
+ const int stm_1st;
+ const int stm_regs_per_insn_1st;
+ const int stm_regs_per_insn_subsequent;
+ const int storef; /* SFmode. */
+ const int stored; /* DFmode. */
+ const int store_unaligned; /* Extra for unaligned stores. */
+ const int loadv; /* Vector load. */
+ const int storev; /* Vector store. */
+};
+
+struct fp_cost_table
+{
+ const int div;
+ const int mult;
+ const int mult_addsub; /* Non-fused. */
+ const int fma; /* Fused. */
+ const int addsub;
+ const int fpconst; /* Immediate. */
+ const int neg; /* NEG and ABS. */
+ const int compare;
+ const int widen; /* Widen to this size. */
+ const int narrow; /* Narrow from this size. */
+ const int toint;
+ const int fromint;
+ const int roundint; /* V8 round to integral, remains FP format. */
+};
+
+struct vector_cost_table
+{
+ const int alu;
+ const int mult;
+ const int movi;
+ const int dup;
+ const int extract;
+};
+
+struct cpu_cost_table
+{
+ const struct alu_cost_table alu;
+ const struct mult_cost_table mult[2]; /* SImode and DImode. */
+ const struct mem_cost_table ldst;
+ const struct fp_cost_table fp[2]; /* SFmode and DFmode. */
+ const struct vector_cost_table vect;
+};
+
+rtx_insn *arm_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
+ vec<machine_mode> & /*input_modes*/,
+ vec<const char *> &constraints,
+ vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs,
+ location_t loc);
+
+/* Parsing routine for branch-protection common to AArch64 and Arm. */
+enum aarch_parse_opt_result aarch_parse_branch_protection (const char*, char**);
+
+/* Validation routine for branch-protection common to AArch64 and Arm. */
+bool aarch_validate_mbranch_protection (const char *);
+
+#endif /* GCC_AARCH_COMMON_PROTOS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common.h
new file mode 100644
index 0000000..c6a67f0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aarch-common.h
@@ -0,0 +1,73 @@
+/* Types shared between arm and aarch64.
+
+ Copyright (C) 2009-2021 Free Software Foundation, Inc.
+ Contributed by Arm Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_AARCH_COMMON_H
+#define GCC_AARCH_COMMON_H
+
+/* Enum describing the various ways that the
+ aarch*_parse_{arch,tune,cpu,extension} functions can fail.
+ This way their callers can choose what kind of error to give. */
+
+enum aarch_parse_opt_result
+{
+ AARCH_PARSE_OK, /* Parsing was successful. */
+ AARCH_PARSE_MISSING_ARG, /* Missing argument. */
+ AARCH_PARSE_INVALID_FEATURE, /* Invalid feature modifier. */
+ AARCH_PARSE_INVALID_ARG /* Invalid arch, tune, cpu arg. */
+};
+
+/* Function types -msign-return-address should sign. */
+enum aarch_function_type {
+ /* Don't sign any function. */
+ AARCH_FUNCTION_NONE,
+ /* Non-leaf functions. */
+ AARCH_FUNCTION_NON_LEAF,
+ /* All functions. */
+ AARCH_FUNCTION_ALL
+};
+
+/* The key type that -msign-return-address should use. */
+enum aarch_key_type {
+ AARCH_KEY_A,
+ AARCH_KEY_B
+};
+
+struct aarch_branch_protect_type
+{
+ /* The type's name that the user passes to the branch-protection option
+ string. */
+ const char* name;
+ /* Function to handle the protection type and set global variables.
+ First argument is the string token corresponding with this type and the
+ second argument is the next token in the option string.
+ Return values:
+ * AARCH_PARSE_OK: Handling was sucessful.
+ * AARCH_INVALID_ARG: The type is invalid in this context and the caller
+ should print an error.
+ * AARCH_INVALID_FEATURE: The type is invalid and the handler prints its
+ own error. */
+ enum aarch_parse_opt_result (*handler)(char*, char*);
+ /* A list of types that can follow this type in the option string. */
+ const struct aarch_branch_protect_type* subtypes;
+ unsigned int num_subtypes;
+};
+
+#endif /* GCC_AARCH_COMMON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aout.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aout.h
new file mode 100644
index 0000000..57c3b9b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/aout.h
@@ -0,0 +1,302 @@
+/* Definitions of target machine for GNU compiler, for ARM with a.out
+ Copyright (C) 1995-2023 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef ASM_APP_ON
+#define ASM_APP_ON ""
+#endif
+#ifndef ASM_APP_OFF
+#define ASM_APP_OFF ""
+#endif
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+#define BSS_SECTION_ASM_OP "\t.bss"
+
+/* Note: If USER_LABEL_PREFIX or LOCAL_LABEL_PREFIX are changed,
+ make sure that this change is reflected in the function
+ coff_arm_is_local_label_name() in bfd/coff-arm.c. */
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX ""
+#endif
+
+/* The assembler's names for the registers. Note that the ?xx registers are
+ there so that VFPv3/NEON registers D16-D31 have the same spacing as D0-D15
+ (each of which is overlaid on two S registers), although there are no
+ actual single-precision registers which correspond to D16-D31. New register
+ p0 is added which is used for MVE predicated cases. */
+
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc", \
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", \
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", \
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", \
+ "d16", "?16", "d17", "?17", "d18", "?18", "d19", "?19", \
+ "d20", "?20", "d21", "?21", "d22", "?22", "d23", "?23", \
+ "d24", "?24", "d25", "?25", "d26", "?26", "d27", "?27", \
+ "d28", "?28", "d29", "?29", "d30", "?30", "d31", "?31", \
+ "wr0", "wr1", "wr2", "wr3", \
+ "wr4", "wr5", "wr6", "wr7", \
+ "wr8", "wr9", "wr10", "wr11", \
+ "wr12", "wr13", "wr14", "wr15", \
+ "wcgr0", "wcgr1", "wcgr2", "wcgr3", \
+ "cc", "vfpcc", "sfp", "afp", "apsrq", "apsrge", "p0", \
+ "ra_auth_code" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"rfp", 9}, /* Historical. */ \
+ {"sb", 9}, /* Historical. */ \
+ {"v7", 10}, \
+ {"sl", 10}, /* Historical. */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+#ifndef OVERLAPPING_REGISTER_NAMES
+#define OVERLAPPING_REGISTER_NAMES \
+{ \
+ {"d0", FIRST_VFP_REGNUM + 0, 2}, \
+ {"d1", FIRST_VFP_REGNUM + 2, 2}, \
+ {"d2", FIRST_VFP_REGNUM + 4, 2}, \
+ {"d3", FIRST_VFP_REGNUM + 6, 2}, \
+ {"d4", FIRST_VFP_REGNUM + 8, 2}, \
+ {"d5", FIRST_VFP_REGNUM + 10, 2}, \
+ {"d6", FIRST_VFP_REGNUM + 12, 2}, \
+ {"d7", FIRST_VFP_REGNUM + 14, 2}, \
+ {"d8", FIRST_VFP_REGNUM + 16, 2}, \
+ {"d9", FIRST_VFP_REGNUM + 18, 2}, \
+ {"d10", FIRST_VFP_REGNUM + 20, 2}, \
+ {"d11", FIRST_VFP_REGNUM + 22, 2}, \
+ {"d12", FIRST_VFP_REGNUM + 24, 2}, \
+ {"d13", FIRST_VFP_REGNUM + 26, 2}, \
+ {"d14", FIRST_VFP_REGNUM + 28, 2}, \
+ {"d15", FIRST_VFP_REGNUM + 30, 2}, \
+ {"q0", FIRST_VFP_REGNUM + 0, 4}, \
+ {"q1", FIRST_VFP_REGNUM + 4, 4}, \
+ {"q2", FIRST_VFP_REGNUM + 8, 4}, \
+ {"q3", FIRST_VFP_REGNUM + 12, 4}, \
+ {"q4", FIRST_VFP_REGNUM + 16, 4}, \
+ {"q5", FIRST_VFP_REGNUM + 20, 4}, \
+ {"q6", FIRST_VFP_REGNUM + 24, 4}, \
+ {"q7", FIRST_VFP_REGNUM + 28, 4}, \
+ {"q8", FIRST_VFP_REGNUM + 32, 4}, \
+ {"q9", FIRST_VFP_REGNUM + 36, 4}, \
+ {"q10", FIRST_VFP_REGNUM + 40, 4}, \
+ {"q11", FIRST_VFP_REGNUM + 44, 4}, \
+ {"q12", FIRST_VFP_REGNUM + 48, 4}, \
+ {"q13", FIRST_VFP_REGNUM + 52, 4}, \
+ {"q14", FIRST_VFP_REGNUM + 56, 4}, \
+ {"q15", FIRST_VFP_REGNUM + 60, 4} \
+}
+#endif
+
+#ifndef NO_DOLLAR_IN_LABEL
+#define NO_DOLLAR_IN_LABEL 1
+#endif
+
+/* Output a function label definition. */
+#ifndef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ ARM_DECLARE_FUNCTION_NAME (STREAM, NAME, DECL); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ while (0)
+#endif
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global\t"
+
+/* Make an internal label into a string. */
+#ifndef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+ sprintf (STRING, "*%s%s%u", LOCAL_LABEL_PREFIX, PREFIX, (unsigned int)(NUM))
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ do \
+ { \
+ gcc_assert (!TARGET_THUMB2); \
+ asm_fprintf (STREAM, "\t.word\t%LL%d\n", VALUE); \
+ } \
+ while (0)
+
+
+/* Thumb-2 always uses addr_diff_elf so that the Table Branch instructions
+ can be used. For non-pic code where the offsets do not suitable for
+ TBB/TBH the elements are output as absolute labels. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do \
+ { \
+ if (TARGET_ARM) \
+ asm_fprintf (STREAM, "\tb\t%LL%d\n", VALUE); \
+ else if (TARGET_THUMB1) \
+ { \
+ if (flag_pic || optimize_size) \
+ { \
+ switch (GET_MODE(body)) \
+ { \
+ case E_QImode: \
+ asm_fprintf (STREAM, "\t.byte\t(%LL%d-%LL%d)/2\n", \
+ VALUE, REL); \
+ break; \
+ case E_HImode: /* TBH */ \
+ asm_fprintf (STREAM, "\t.2byte\t(%LL%d-%LL%d)/2\n", \
+ VALUE, REL); \
+ break; \
+ case E_SImode: \
+ asm_fprintf (STREAM, "\t.word\t%LL%d-%LL%d\n", \
+ VALUE, REL); \
+ break; \
+ default: \
+ gcc_unreachable(); \
+ } \
+ } \
+ else \
+ asm_fprintf (STREAM, "\t.word\t%LL%d+1\n", VALUE); \
+ } \
+ else /* Thumb-2 */ \
+ { \
+ switch (GET_MODE(body)) \
+ { \
+ case E_QImode: /* TBB */ \
+ asm_fprintf (STREAM, "\t.byte\t(%LL%d-%LL%d)/2\n", \
+ VALUE, REL); \
+ break; \
+ case E_HImode: /* TBH */ \
+ asm_fprintf (STREAM, "\t.2byte\t(%LL%d-%LL%d)/2\n", \
+ VALUE, REL); \
+ break; \
+ case E_SImode: \
+ if (flag_pic) \
+ asm_fprintf (STREAM, "\t.word\t%LL%d+1-%LL%d\n", VALUE, REL); \
+ else \
+ asm_fprintf (STREAM, "\t.word\t%LL%d+1\n", VALUE); \
+ break; \
+ default: \
+ gcc_unreachable(); \
+ } \
+ } \
+ } \
+ while (0)
+
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \
+ output_ascii_pseudo_op (STREAM, (const unsigned char *) (PTR), LEN)
+
+/* Output a gap. In fact we fill it with nulls. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf (STREAM, "\t.space\t%d\n", (int) (NBYTES))
+
+/* Align output to a power of two. Horrible /bin/as. */
+#ifndef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ int amount = 1 << (POWER); \
+ \
+ if (amount == 2) \
+ fprintf (STREAM, "\t.even\n"); \
+ else if (amount != 1) \
+ fprintf (STREAM, "\t.align\t%d\n", amount - 4); \
+ } \
+ while (0)
+#endif
+
+/* Output a common block. */
+#ifndef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ do \
+ { \
+ fprintf (STREAM, "\t.comm\t"); \
+ assemble_name (STREAM, NAME); \
+ asm_fprintf (STREAM, ", %d\t%@ %d\n", \
+ (int)(ROUNDED), (int)(SIZE)); \
+ } \
+ while (0)
+#endif
+
+/* Output a local common block. /bin/as can't do this, so hack a
+ `.space' into the bss segment. Note that this is *bad* practice,
+ which is guaranteed NOT to work since it doesn't define STATIC
+ COMMON space but merely STATIC BSS space. */
+#ifndef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ switch_to_section (bss_section); \
+ ASM_OUTPUT_ALIGN (STREAM, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ fprintf (STREAM, "\t.space\t%d\n", (int)(SIZE)); \
+ } \
+ while (0)
+#endif
+
+/* Output a zero-initialized block. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(STREAM, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGN)
+#endif
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* This works for GAS and some other assemblers. */
+#define SET_ASM_OP "\t.set\t"
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-flags.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-flags.h
new file mode 100644
index 0000000..b68eb79
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-flags.h
@@ -0,0 +1,35 @@
+/* Flags used to identify the presence of processor capabilities.
+
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ARM_FLAGS_H
+#define GCC_ARM_FLAGS_H
+
+/* Flags used to identify a few tuning properties. These are for legacy
+ purposes only. Do not add any more of these: use the main tuning tables. */
+#define TF_LDSCHED (1U << 0)
+#define TF_WBUF (1U << 1)
+#define TF_CO_PROC (1U << 2)
+#define TF_SMALLMUL (1U << 3)
+#define TF_STRONG (1U << 4)
+#define TF_XSCALE (1U << 5)
+#define TF_NO_MODE32 (1U << 6)
+
+#endif /* GCC_ARM_FLAGS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-mlib.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-mlib.h
new file mode 100644
index 0000000..02cfba0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-mlib.h
@@ -0,0 +1,22 @@
+/* Arm multilib default option include file.
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ Contributed by Arm.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#define MULTILIB_DEFAULTS { "mbranch-protection=none" }
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-opts.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-opts.h
new file mode 100644
index 0000000..9964fd2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-opts.h
@@ -0,0 +1,78 @@
+/* Definitions for option handling for ARM.
+ Copyright (C) 1991-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef ARM_OPTS_H
+#define ARM_OPTS_H
+
+#include "arm-flags.h"
+#include "arm-isa.h"
+#include "arm-cpu.h"
+
+/* Which __fp16 format to use.
+ The enumeration values correspond to the numbering for the
+ Tag_ABI_FP_16bit_format attribute.
+ */
+enum arm_fp16_format_type
+{
+ ARM_FP16_FORMAT_NONE = 0,
+ ARM_FP16_FORMAT_IEEE = 1,
+ ARM_FP16_FORMAT_ALTERNATIVE = 2
+};
+
+/* Which ABI to use. */
+enum arm_abi_type
+{
+ ARM_ABI_APCS,
+ ARM_ABI_ATPCS,
+ ARM_ABI_AAPCS,
+ ARM_ABI_IWMMXT,
+ ARM_ABI_AAPCS_LINUX
+};
+
+enum float_abi_type
+{
+ ARM_FLOAT_ABI_SOFT,
+ ARM_FLOAT_ABI_SOFTFP,
+ ARM_FLOAT_ABI_HARD
+};
+
+/* Which thread pointer access sequence to use. */
+enum arm_tp_type {
+ TP_AUTO,
+ TP_SOFT,
+ TP_CP15
+};
+
+/* Which TLS scheme to use. */
+enum arm_tls_type {
+ TLS_GNU,
+ TLS_GNU2
+};
+
+/* Where to get the canary for the stack protector. */
+enum stack_protector_guard {
+ SSP_TLSREG, /* per-thread canary in TLS register */
+ SSP_GLOBAL /* global canary */
+};
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-protos.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-protos.h
new file mode 100644
index 0000000..c8ae5e1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm-protos.h
@@ -0,0 +1,601 @@
+/* Prototypes for exported functions defined in arm.cc and pe.c
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@arm.com)
+ Minor hacks by Nick Clifton (nickc@cygnus.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ARM_PROTOS_H
+#define GCC_ARM_PROTOS_H
+
+#include "sbitmap.h"
+
+rtl_opt_pass *make_pass_insert_bti (gcc::context *ctxt);
+
+extern enum unwind_info_type arm_except_unwind_info (struct gcc_options *);
+extern int use_return_insn (int, rtx);
+extern bool use_simple_return_p (void);
+extern enum reg_class arm_regno_class (int);
+extern bool arm_check_builtin_call (location_t , vec<location_t> , tree,
+ tree, unsigned int, tree *);
+extern void arm_load_pic_register (unsigned long, rtx);
+extern int arm_volatile_func (void);
+extern void arm_expand_prologue (void);
+extern void arm_expand_epilogue (bool);
+extern void arm_declare_function_name (FILE *, const char *, tree);
+extern void arm_asm_declare_function_name (FILE *, const char *, tree);
+extern void thumb2_expand_return (bool);
+extern const char *arm_strip_name_encoding (const char *);
+extern void arm_asm_output_labelref (FILE *, const char *);
+extern void thumb2_asm_output_opcode (FILE *);
+extern unsigned long arm_current_func_type (void);
+extern HOST_WIDE_INT arm_compute_initial_elimination_offset (unsigned int,
+ unsigned int);
+extern HOST_WIDE_INT thumb_compute_initial_elimination_offset (unsigned int,
+ unsigned int);
+extern unsigned int arm_debugger_regno (unsigned int);
+extern void arm_output_fn_unwind (FILE *, bool);
+
+extern rtx arm_expand_builtin (tree exp, rtx target, rtx subtarget
+ ATTRIBUTE_UNUSED, machine_mode mode
+ ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED);
+extern tree arm_builtin_decl (unsigned code, bool initialize_p
+ ATTRIBUTE_UNUSED);
+extern void arm_init_builtins (void);
+extern void arm_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update);
+extern rtx arm_simd_vect_par_cnst_half (machine_mode mode, bool high);
+extern bool arm_simd_check_vect_par_cnst_half_p (rtx op, machine_mode mode,
+ bool high);
+extern void arm_emit_speculation_barrier_function (void);
+extern void arm_decompose_di_binop (rtx, rtx, rtx *, rtx *, rtx *, rtx *);
+extern bool arm_q_bit_access (void);
+extern bool arm_ge_bits_access (void);
+extern bool arm_target_insn_ok_for_lob (rtx);
+
+#ifdef RTX_CODE
+enum reg_class
+arm_mode_base_reg_class (machine_mode);
+extern void arm_gen_unlikely_cbranch (enum rtx_code, machine_mode cc_mode,
+ rtx label_ref);
+extern bool arm_vector_mode_supported_p (machine_mode);
+extern bool arm_small_register_classes_for_mode_p (machine_mode);
+extern int const_ok_for_arm (HOST_WIDE_INT);
+extern int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
+extern int const_ok_for_dimode_op (HOST_WIDE_INT, enum rtx_code);
+extern void thumb1_gen_const_int_rtl (rtx, HOST_WIDE_INT);
+extern void thumb1_gen_const_int_print (rtx, HOST_WIDE_INT);
+extern int arm_split_constant (RTX_CODE, machine_mode, rtx,
+ HOST_WIDE_INT, rtx, rtx, int);
+extern int legitimate_pic_operand_p (rtx);
+extern rtx legitimize_pic_address (rtx, machine_mode, rtx, rtx, bool);
+extern rtx legitimize_tls_address (rtx, rtx);
+extern bool arm_legitimate_address_p (machine_mode, rtx, bool);
+extern int arm_legitimate_address_outer_p (machine_mode, rtx, RTX_CODE, int);
+extern int thumb_legitimate_offset_p (machine_mode, HOST_WIDE_INT);
+extern int thumb1_legitimate_address_p (machine_mode, rtx, int);
+extern bool ldm_stm_operation_p (rtx, bool, machine_mode mode,
+ bool, bool);
+extern bool clear_operation_p (rtx, bool);
+extern int arm_const_double_rtx (rtx);
+extern int vfp3_const_double_rtx (rtx);
+extern int simd_immediate_valid_for_move (rtx, machine_mode, rtx *, int *);
+extern int neon_immediate_valid_for_logic (rtx, machine_mode, int, rtx *,
+ int *);
+extern int neon_immediate_valid_for_shift (rtx, machine_mode, rtx *,
+ int *, bool);
+extern char *neon_output_logic_immediate (const char *, rtx *,
+ machine_mode, int, int);
+extern char *neon_output_shift_immediate (const char *, char, rtx *,
+ machine_mode, int, bool);
+extern void neon_pairwise_reduce (rtx, rtx, machine_mode,
+ rtx (*) (rtx, rtx, rtx));
+extern rtx mve_bool_vec_to_const (rtx const_vec);
+extern rtx neon_make_constant (rtx, bool generate = true);
+extern void neon_expand_vector_init (rtx, rtx);
+extern void neon_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
+extern void arm_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+extern HOST_WIDE_INT neon_element_bits (machine_mode);
+extern void neon_emit_pair_result_insn (machine_mode,
+ rtx (*) (rtx, rtx, rtx, rtx),
+ rtx, rtx, rtx);
+extern void neon_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
+extern void neon_split_vcombine (rtx op[3]);
+extern enum reg_class coproc_secondary_reload_class (machine_mode, rtx,
+ bool);
+extern bool arm_tls_referenced_p (rtx);
+
+extern int arm_coproc_mem_operand (rtx, bool);
+extern int arm_coproc_mem_operand_no_writeback (rtx);
+extern int arm_coproc_mem_operand_wb (rtx, int);
+extern int neon_vector_mem_operand (rtx, int, bool);
+extern int mve_vector_mem_operand (machine_mode, rtx, bool);
+extern int neon_struct_mem_operand (rtx);
+extern int mve_struct_mem_operand (rtx);
+
+extern rtx *neon_vcmla_lane_prepare_operands (rtx *);
+
+extern int tls_mentioned_p (rtx);
+extern int symbol_mentioned_p (rtx);
+extern int label_mentioned_p (rtx);
+extern RTX_CODE minmax_code (rtx);
+extern bool arm_sat_operator_match (rtx, rtx, int *, bool *);
+extern int adjacent_mem_locations (rtx, rtx);
+extern bool gen_ldm_seq (rtx *, int, bool);
+extern bool gen_stm_seq (rtx *, int);
+extern bool gen_const_stm_seq (rtx *, int);
+extern rtx arm_gen_load_multiple (int *, int, rtx, int, rtx, HOST_WIDE_INT *);
+extern rtx arm_gen_store_multiple (int *, int, rtx, int, rtx, HOST_WIDE_INT *);
+extern bool offset_ok_for_ldrd_strd (HOST_WIDE_INT);
+extern bool operands_ok_ldrd_strd (rtx, rtx, rtx, HOST_WIDE_INT, bool, bool);
+extern bool gen_operands_ldrd_strd (rtx *, bool, bool, bool);
+extern bool valid_operands_ldrd_strd (rtx *, bool);
+extern int arm_gen_cpymemqi (rtx *);
+extern bool gen_cpymem_ldrd_strd (rtx *);
+extern machine_mode arm_select_cc_mode (RTX_CODE, rtx, rtx);
+extern machine_mode arm_select_dominance_cc_mode (rtx, rtx,
+ HOST_WIDE_INT);
+extern rtx arm_gen_compare_reg (RTX_CODE, rtx, rtx, rtx);
+extern rtx arm_gen_return_addr_mask (void);
+extern void arm_reload_in_hi (rtx *);
+extern void arm_reload_out_hi (rtx *);
+extern int arm_max_const_double_inline_cost (void);
+extern int arm_const_double_inline_cost (rtx);
+extern bool arm_const_double_by_parts (rtx);
+extern bool arm_const_double_by_immediates (rtx);
+extern rtx arm_load_function_descriptor (rtx funcdesc);
+extern void arm_emit_call_insn (rtx, rtx, bool);
+bool detect_cmse_nonsecure_call (tree);
+extern const char *output_call (rtx *);
+void arm_emit_movpair (rtx, rtx);
+extern const char *output_mov_long_double_arm_from_arm (rtx *);
+extern const char *output_move_double (rtx *, bool, int *count);
+extern const char *output_move_quad (rtx *);
+extern int arm_count_output_move_double_insns (rtx *);
+extern int arm_count_ldrdstrd_insns (rtx *, bool);
+extern const char *output_move_vfp (rtx *operands);
+extern const char *output_move_neon (rtx *operands);
+extern int arm_attr_length_move_neon (rtx_insn *);
+extern int arm_address_offset_is_imm (rtx_insn *);
+extern const char *output_add_immediate (rtx *);
+extern const char *arithmetic_instr (rtx, int);
+extern void output_ascii_pseudo_op (FILE *, const unsigned char *, int);
+extern const char *output_return_instruction (rtx, bool, bool, bool);
+extern const char *output_probe_stack_range (rtx, rtx);
+extern void arm_poke_function_name (FILE *, const char *);
+extern void arm_final_prescan_insn (rtx_insn *);
+extern int arm_debugger_arg_offset (int, rtx);
+extern bool arm_is_long_call_p (tree);
+extern int arm_emit_vector_const (FILE *, rtx);
+extern void arm_emit_fp16_const (rtx c);
+extern const char * arm_output_load_gr (rtx *);
+extern const char *vfp_output_vstmd (rtx *);
+extern void arm_output_multireg_pop (rtx *, bool, rtx, bool, bool);
+extern void arm_set_return_address (rtx, rtx);
+extern int arm_eliminable_register (rtx);
+extern const char *arm_output_shift(rtx *, int);
+extern const char *arm_output_iwmmxt_shift_immediate (const char *, rtx *, bool);
+extern const char *arm_output_iwmmxt_tinsr (rtx *);
+extern unsigned int arm_sync_loop_insns (rtx , rtx *);
+extern int arm_attr_length_push_multi(rtx, rtx);
+extern int arm_attr_length_pop_multi(rtx *, bool, bool);
+extern void arm_expand_compare_and_swap (rtx op[]);
+extern void arm_split_compare_and_swap (rtx op[]);
+extern void arm_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx arm_load_tp (rtx);
+extern bool arm_coproc_builtin_available (enum unspecv);
+extern bool arm_coproc_ldc_stc_legitimate_address (rtx);
+extern rtx arm_stack_protect_tls_canary_mem (bool);
+
+
+#if defined TREE_CODE
+extern void arm_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree);
+extern bool arm_pad_reg_upward (machine_mode, tree, int);
+#endif
+extern int arm_apply_result_size (void);
+extern opt_machine_mode arm_get_mask_mode (machine_mode mode);
+
+#endif /* RTX_CODE */
+
+/* MVE functions. */
+namespace arm_mve {
+ void handle_arm_mve_types_h ();
+}
+
+/* Thumb functions. */
+extern void arm_init_expanders (void);
+extern const char *thumb1_unexpanded_epilogue (void);
+extern void thumb1_expand_prologue (void);
+extern void thumb1_expand_epilogue (void);
+extern const char *thumb1_output_interwork (void);
+extern int thumb_shiftable_const (unsigned HOST_WIDE_INT);
+#ifdef RTX_CODE
+extern enum arm_cond_code maybe_get_arm_condition_code (rtx);
+extern void thumb1_final_prescan_insn (rtx_insn *);
+extern void thumb2_final_prescan_insn (rtx_insn *);
+extern const char *thumb_load_double_from_address (rtx *);
+extern const char *thumb_output_move_mem_multiple (int, rtx *);
+extern const char *thumb_call_via_reg (rtx);
+extern void thumb_expand_cpymemqi (rtx *);
+extern rtx arm_return_addr (int, rtx);
+extern void thumb_reload_out_hi (rtx *);
+extern void thumb_set_return_address (rtx, rtx);
+extern const char *thumb1_output_casesi (rtx *);
+extern const char *thumb2_output_casesi (rtx *);
+#endif
+
+/* Defined in pe.c. */
+extern int arm_dllexport_name_p (const char *);
+extern int arm_dllimport_name_p (const char *);
+
+#ifdef TREE_CODE
+extern void arm_pe_unique_section (tree, int);
+extern void arm_pe_encode_section_info (tree, rtx, int);
+extern int arm_dllexport_p (tree);
+extern int arm_dllimport_p (tree);
+extern void arm_mark_dllexport (tree);
+extern void arm_mark_dllimport (tree);
+extern bool arm_change_mode_p (tree);
+#endif
+
+extern tree arm_valid_target_attribute_tree (tree, struct gcc_options *,
+ struct gcc_options *);
+extern void arm_configure_build_target (struct arm_build_target *,
+ struct cl_target_option *, bool);
+extern void arm_option_reconfigure_globals (void);
+extern void arm_options_perform_arch_sanity_checks (void);
+extern void arm_pr_long_calls (struct cpp_reader *);
+extern void arm_pr_no_long_calls (struct cpp_reader *);
+extern void arm_pr_long_calls_off (struct cpp_reader *);
+
+extern const char *arm_mangle_type (const_tree);
+extern const char *arm_mangle_builtin_type (const_tree);
+
+extern void arm_order_regs_for_local_alloc (void);
+
+extern int arm_max_conditional_execute ();
+
+/* Vectorizer cost model implementation. */
+struct cpu_vec_costs {
+ const int scalar_stmt_cost; /* Cost of any scalar operation, excluding
+ load and store. */
+ const int scalar_load_cost; /* Cost of scalar load. */
+ const int scalar_store_cost; /* Cost of scalar store. */
+ const int vec_stmt_cost; /* Cost of any vector operation, excluding
+ load, store, vector-to-scalar and
+ scalar-to-vector operation. */
+ const int vec_to_scalar_cost; /* Cost of vect-to-scalar operation. */
+ const int scalar_to_vec_cost; /* Cost of scalar-to-vector operation. */
+ const int vec_align_load_cost; /* Cost of aligned vector load. */
+ const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
+ const int vec_unalign_store_cost; /* Cost of unaligned vector load. */
+ const int vec_store_cost; /* Cost of vector store. */
+ const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer
+ cost model. */
+ const int cond_not_taken_branch_cost;/* Cost of not taken branch for
+ vectorizer cost model. */
+};
+
+#ifdef RTX_CODE
+/* This needs to be here because we need RTX_CODE and similar. */
+
+struct cpu_cost_table;
+
+/* Addressing mode operations. Used to index tables in struct
+ addr_mode_cost_table. */
+enum arm_addr_mode_op
+{
+ AMO_DEFAULT,
+ AMO_NO_WB, /* Offset with no writeback. */
+ AMO_WB, /* Offset with writeback. */
+ AMO_MAX /* For array size. */
+};
+
+/* Table of additional costs in units of COSTS_N_INSNS() when using
+ addressing modes for each access type. */
+struct addr_mode_cost_table
+{
+ const int integer[AMO_MAX];
+ const int fp[AMO_MAX];
+ const int vector[AMO_MAX];
+};
+
+/* Dump function ARM_PRINT_TUNE_INFO should be updated whenever this
+ structure is modified. */
+
+struct tune_params
+{
+ const struct cpu_cost_table *insn_extra_cost;
+ const struct addr_mode_cost_table *addr_mode_costs;
+ bool (*sched_adjust_cost) (rtx_insn *, int, rtx_insn *, int *);
+ int (*branch_cost) (bool, bool);
+ /* Vectorizer costs. */
+ const struct cpu_vec_costs* vec_costs;
+ int constant_limit;
+ /* Maximum number of instructions to conditionalise. */
+ int max_insns_skipped;
+ /* Maximum number of instructions to inline calls to memset. */
+ int max_insns_inline_memset;
+ /* Issue rate of the processor. */
+ unsigned int issue_rate;
+ /* Explicit prefetch data. */
+ struct
+ {
+ int num_slots;
+ int l1_cache_size;
+ int l1_cache_line_size;
+ } prefetch;
+ enum {PREF_CONST_POOL_FALSE, PREF_CONST_POOL_TRUE}
+ prefer_constant_pool: 1;
+ /* Prefer STRD/LDRD instructions over PUSH/POP/LDM/STM. */
+ enum {PREF_LDRD_FALSE, PREF_LDRD_TRUE} prefer_ldrd_strd: 1;
+ /* The preference for non short cirtcuit operation when optimizing for
+ performance. The first element covers Thumb state and the second one
+ is for ARM state. */
+ enum log_op_non_short_circuit {LOG_OP_NON_SHORT_CIRCUIT_FALSE,
+ LOG_OP_NON_SHORT_CIRCUIT_TRUE};
+ log_op_non_short_circuit logical_op_non_short_circuit_thumb: 1;
+ log_op_non_short_circuit logical_op_non_short_circuit_arm: 1;
+ /* Prefer 32-bit encoding instead of flag-setting 16-bit encoding. */
+ enum {DISPARAGE_FLAGS_NEITHER, DISPARAGE_FLAGS_PARTIAL, DISPARAGE_FLAGS_ALL}
+ disparage_flag_setting_t16_encodings: 2;
+ /* Prefer to inline string operations like memset by using Neon. */
+ enum {PREF_NEON_STRINGOPS_FALSE, PREF_NEON_STRINGOPS_TRUE}
+ string_ops_prefer_neon: 1;
+ /* Bitfield encoding the fusible pairs of instructions. Use FUSE_OPS
+ in an initializer if multiple fusion operations are supported on a
+ target. */
+ enum fuse_ops
+ {
+ FUSE_NOTHING = 0,
+ FUSE_MOVW_MOVT = 1 << 0,
+ FUSE_AES_AESMC = 1 << 1
+ } fusible_ops: 2;
+ /* Depth of scheduling queue to check for L2 autoprefetcher. */
+ enum {SCHED_AUTOPREF_OFF, SCHED_AUTOPREF_RANK, SCHED_AUTOPREF_FULL}
+ sched_autopref: 2;
+};
+
+/* Smash multiple fusion operations into a type that can be used for an
+ initializer. */
+#define FUSE_OPS(x) ((tune_params::fuse_ops) (x))
+
+extern const struct tune_params *current_tune;
+extern int vfp3_const_double_for_fract_bits (rtx);
+/* return power of two from operand, otherwise 0. */
+extern int vfp3_const_double_for_bits (rtx);
+
+extern void arm_emit_coreregs_64bit_shift (enum rtx_code, rtx, rtx, rtx, rtx,
+ rtx);
+extern bool arm_fusion_enabled_p (tune_params::fuse_ops);
+extern bool arm_current_function_pac_enabled_p (void);
+extern bool arm_valid_symbolic_address_p (rtx);
+extern bool arm_validize_comparison (rtx *, rtx *, rtx *);
+extern bool arm_expand_vector_compare (rtx, rtx_code, rtx, rtx, bool);
+#endif /* RTX_CODE */
+
+extern bool arm_gen_setmem (rtx *);
+extern void arm_expand_vcond (rtx *, machine_mode);
+extern void arm_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
+
+extern bool arm_autoinc_modes_ok_p (machine_mode, enum arm_auto_incmodes);
+
+extern void arm_emit_eabi_attribute (const char *, int, int);
+
+extern void arm_reset_previous_fndecl (void);
+extern void save_restore_target_globals (tree);
+
+/* Defined in gcc/common/config/arm-common.cc. */
+extern const char *arm_rewrite_selected_cpu (const char *name);
+
+/* Defined in gcc/common/config/arm-c.cc. */
+extern void arm_lang_object_attributes_init (void);
+extern void arm_register_target_pragmas (void);
+extern void arm_cpu_cpp_builtins (struct cpp_reader *);
+
+extern bool arm_is_constant_pool_ref (rtx);
+
+/* The bits in this mask specify which instruction scheduling options should
+ be used. */
+extern unsigned int tune_flags;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions. */
+extern int arm_arch4;
+
+/* Nonzero if this chip supports the ARM Architecture 4t extensions. */
+extern int arm_arch4t;
+
+/* Nonzero if this chip supports the ARM Architecture 5t extensions. */
+extern int arm_arch5t;
+
+/* Nonzero if this chip supports the ARM Architecture 5te extensions. */
+extern int arm_arch5te;
+
+/* Nonzero if this chip supports the ARM Architecture 6 extensions. */
+extern int arm_arch6;
+
+/* Nonzero if this chip supports the ARM 6K extensions. */
+extern int arm_arch6k;
+
+/* Nonzero if this chip supports the ARM 6KZ extensions. */
+extern int arm_arch6kz;
+
+/* Nonzero if instructions present in ARMv6-M can be used. */
+extern int arm_arch6m;
+
+/* Nonzero if this chip supports the ARM 7 extensions. */
+extern int arm_arch7;
+
+/* Nonzero if this chip supports the Large Physical Address Extension. */
+extern int arm_arch_lpae;
+
+/* Nonzero if instructions not present in the 'M' profile can be used. */
+extern int arm_arch_notm;
+
+/* Nonzero if instructions present in ARMv7E-M can be used. */
+extern int arm_arch7em;
+
+/* Nonzero if instructions present in ARMv8 can be used. */
+extern int arm_arch8;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_tune_strongarm;
+
+/* Nonzero if this chip supports Intel Wireless MMX technology. */
+extern int arm_arch_iwmmxt;
+
+/* Nonzero if this chip supports Intel Wireless MMX2 technology. */
+extern int arm_arch_iwmmxt2;
+
+/* Nonzero if this chip is an XScale. */
+extern int arm_arch_xscale;
+
+/* Nonzero if tuning for XScale */
+extern int arm_tune_xscale;
+
+/* Nonzero if we want to tune for stores that access the write-buffer.
+ This typically means an ARM6 or ARM7 with MMU or MPU. */
+extern int arm_tune_wbuf;
+
+/* Nonzero if tuning for Cortex-A9. */
+extern int arm_tune_cortex_a9;
+
+/* Nonzero if we should define __THUMB_INTERWORK__ in the
+ preprocessor.
+ XXX This is a bit of a hack, it's intended to help work around
+ problems in GLD which doesn't understand that armv5t code is
+ interworking clean. */
+extern int arm_cpp_interwork;
+
+/* Nonzero if chip supports Thumb 1. */
+extern int arm_arch_thumb1;
+
+/* Nonzero if chip supports Thumb 2. */
+extern int arm_arch_thumb2;
+
+/* Nonzero if chip supports integer division instruction. */
+extern int arm_arch_arm_hwdiv;
+extern int arm_arch_thumb_hwdiv;
+
+/* Nonzero if chip disallows volatile memory access in IT block. */
+extern int arm_arch_no_volatile_ce;
+
+/* Structure defining the current overall architectural target and tuning. */
+struct arm_build_target
+{
+ /* Name of the target CPU, if known, or NULL if the target CPU was not
+ specified by the user (and inferred from the -march option). */
+ const char *core_name;
+ /* Name of the target ARCH. NULL if there is a selected CPU. */
+ const char *arch_name;
+ /* Preprocessor substring (never NULL). */
+ const char *arch_pp_name;
+ /* The base architecture value. */
+ enum base_architecture base_arch;
+ /* The profile letter for the architecture, upper case by convention. */
+ char profile;
+ /* Bitmap encapsulating the isa_bits for the target environment. */
+ sbitmap isa;
+ /* Flags used for tuning. Long term, these move into tune_params. */
+ unsigned int tune_flags;
+ /* Tables with more detailed tuning information. */
+ const struct tune_params *tune;
+ /* CPU identifier for the tuning target. */
+ enum processor_type tune_core;
+};
+
+extern struct arm_build_target arm_active_target;
+
+/* Table entry for a CPU alias. */
+struct cpu_alias
+{
+ /* The alias name. */
+ const char *const name;
+ /* True if the name should be displayed in help text listing cpu names. */
+ bool visible;
+};
+
+/* Table entry for an architectural feature extension. */
+struct cpu_arch_extension
+{
+ /* Feature name. */
+ const char *const name;
+ /* True if the option is negative (removes extensions). */
+ bool remove;
+ /* True if the option is an alias for another option with identical effect;
+ the option will be ignored for canonicalization. */
+ bool alias;
+ /* The modifier bits. */
+ const enum isa_feature isa_bits[isa_num_bits];
+};
+
+/* Common elements of both CPU and architectural options. */
+struct cpu_arch_option
+{
+ /* Name for this option. */
+ const char *name;
+ /* List of feature extensions permitted. */
+ const struct cpu_arch_extension *extensions;
+ /* Standard feature bits. */
+ enum isa_feature isa_bits[isa_num_bits];
+};
+
+/* Table entry for an architecture entry. */
+struct arch_option
+{
+ /* Common option fields. */
+ cpu_arch_option common;
+ /* Short string for this architecture. */
+ const char *arch;
+ /* Base architecture, from which this specific architecture is derived. */
+ enum base_architecture base_arch;
+ /* The profile letter for the architecture, upper case by convention. */
+ const char profile;
+ /* Default tune target (in the absence of any more specific data). */
+ enum processor_type tune_id;
+};
+
+/* Table entry for a CPU entry. */
+struct cpu_option
+{
+ /* Common option fields. */
+ cpu_arch_option common;
+ /* List of aliases for this CPU. */
+ const struct cpu_alias *aliases;
+ /* Architecture upon which this CPU is based. */
+ enum arch_type arch;
+};
+
+extern const arch_option all_architectures[];
+extern const cpu_option all_cores[];
+
+
+const cpu_option *arm_parse_cpu_option_name (const cpu_option *, const char *,
+ const char *, bool = true);
+const arch_option *arm_parse_arch_option_name (const arch_option *,
+ const char *, const char *, bool = true);
+void arm_parse_option_features (sbitmap, const cpu_arch_option *,
+ const char *);
+
+void arm_initialize_isa (sbitmap, const enum isa_feature *);
+
+const char * arm_gen_far_branch (rtx *, int, const char * , const char *);
+
+bool arm_mve_immediate_check(rtx, machine_mode, bool);
+#endif /* ! GCC_ARM_PROTOS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm.h
new file mode 100644
index 0000000..7d40b8b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/arm.h
@@ -0,0 +1,2544 @@
+/* Definitions of target machine for GNU compiler, for ARM.
+ Copyright (C) 1991-2023 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com)
+ Minor hacks by Nick Clifton (nickc@cygnus.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ARM_H
+#define GCC_ARM_H
+
+/* We can't use machine_mode inside a generator file because it
+ hasn't been created yet; we shouldn't be using any code that
+ needs the real definition though, so this ought to be safe. */
+#ifdef GENERATOR_FILE
+#define MACHMODE int
+#else
+#include "insn-modes.h"
+#define MACHMODE machine_mode
+#endif
+
+#include "config/vxworks-dummy.h"
+
+/* The architecture define. */
+extern char arm_arch_name[];
+
+/* Target CPU builtins. */
+#define TARGET_CPU_CPP_BUILTINS() arm_cpu_cpp_builtins (pfile)
+
+#include "config/arm/arm-opts.h"
+
+/* The processor for which instructions should be scheduled. */
+extern enum processor_type arm_tune;
+
+typedef enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+}
+arm_cc;
+
+extern arm_cc arm_current_cc;
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((arm_cc) (((int)X) ^ 1))
+
+/* The maximum number of instructions that is beneficial to
+ conditionally execute. */
+#undef MAX_CONDITIONAL_EXECUTE
+#define MAX_CONDITIONAL_EXECUTE arm_max_conditional_execute ()
+
+extern int arm_target_label;
+extern int arm_ccfsm_state;
+extern GTY(()) rtx arm_target_insn;
+/* Callback to output language specific object attributes. */
+extern void (*arm_lang_output_object_attributes_hook)(void);
+
+/* This type is the user-visible __fp16. We need it in a few places in
+ the backend. Defined in arm-builtins.cc. */
+extern tree arm_fp16_type_node;
+
+/* This type is the user-visible __bf16. We need it in a few places in
+ the backend. Defined in arm-builtins.cc. */
+extern tree arm_bf16_type_node;
+extern tree arm_bf16_ptr_type_node;
+
+
+#undef CPP_SPEC
+#define CPP_SPEC "%(subtarget_cpp_spec)"
+
+#ifndef CC1_SPEC
+#define CC1_SPEC ""
+#endif
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ { "asm_cpu_spec", ASM_CPU_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC ""
+#endif
+
+/* Tree Target Specification. */
+#define TARGET_ARM_P(flags) (!TARGET_THUMB_P (flags))
+#define TARGET_THUMB1_P(flags) (TARGET_THUMB_P (flags) && !arm_arch_thumb2)
+#define TARGET_THUMB2_P(flags) (TARGET_THUMB_P (flags) && arm_arch_thumb2)
+#define TARGET_32BIT_P(flags) (TARGET_ARM_P (flags) || TARGET_THUMB2_P (flags))
+
+/* Run-time Target Specification. */
+/* Use hardware floating point instructions. -mgeneral-regs-only prevents
+the use of floating point instructions and registers but does not prevent
+emission of floating point pcs attributes. */
+#define TARGET_HARD_FLOAT_SUB (arm_float_abi != ARM_FLOAT_ABI_SOFT \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_vfpv2) \
+ && TARGET_32BIT)
+
+#define TARGET_HARD_FLOAT (TARGET_HARD_FLOAT_SUB \
+ && !TARGET_GENERAL_REGS_ONLY)
+
+#define TARGET_SOFT_FLOAT (!TARGET_HARD_FLOAT_SUB)
+/* User has permitted use of FP instructions, if they exist for this
+ target. */
+#define TARGET_MAYBE_HARD_FLOAT (arm_float_abi != ARM_FLOAT_ABI_SOFT)
+/* Use hardware floating point calling convention. */
+#define TARGET_HARD_FLOAT_ABI (arm_float_abi == ARM_FLOAT_ABI_HARD)
+#define TARGET_IWMMXT (arm_arch_iwmmxt)
+#define TARGET_IWMMXT2 (arm_arch_iwmmxt2)
+#define TARGET_REALLY_IWMMXT (TARGET_IWMMXT && TARGET_32BIT \
+ && !TARGET_GENERAL_REGS_ONLY)
+#define TARGET_REALLY_IWMMXT2 (TARGET_IWMMXT2 && TARGET_32BIT \
+ && !TARGET_GENERAL_REGS_ONLY)
+#define TARGET_IWMMXT_ABI (TARGET_32BIT && arm_abi == ARM_ABI_IWMMXT)
+#define TARGET_ARM (! TARGET_THUMB)
+#define TARGET_EITHER 1 /* (TARGET_ARM | TARGET_THUMB) */
+#define TARGET_BACKTRACE (crtl->is_leaf \
+ ? TARGET_TPCS_LEAF_FRAME \
+ : TARGET_TPCS_FRAME)
+#define TARGET_AAPCS_BASED \
+ (arm_abi != ARM_ABI_APCS && arm_abi != ARM_ABI_ATPCS)
+
+#define TARGET_HARD_TP (target_thread_pointer == TP_CP15)
+#define TARGET_SOFT_TP (target_thread_pointer == TP_SOFT)
+#define TARGET_GNU2_TLS (target_tls_dialect == TLS_GNU2)
+
+/* Only 16-bit thumb code. */
+#define TARGET_THUMB1 (TARGET_THUMB && !arm_arch_thumb2)
+/* Arm or Thumb-2 32-bit code. */
+#define TARGET_32BIT (TARGET_ARM || arm_arch_thumb2)
+/* 32-bit Thumb-2 code. */
+#define TARGET_THUMB2 (TARGET_THUMB && arm_arch_thumb2)
+/* Thumb-1 only. */
+#define TARGET_THUMB1_ONLY (TARGET_THUMB1 && !arm_arch_notm)
+
+#define TARGET_LDRD (arm_arch5te && ARM_DOUBLEWORD_ALIGN \
+ && !TARGET_THUMB1)
+
+#define TARGET_CRC32 (arm_arch_crc)
+
+/* Thumb-2 but also has some conditional arithmetic instructions like csinc,
+ csinv, etc. */
+#define TARGET_COND_ARITH (arm_arch8_1m_main)
+
+/* The following two macros concern the ability to execute coprocessor
+ instructions for VFPv3 or NEON. TARGET_VFP3/TARGET_VFPD32 are currently
+ only ever tested when we know we are generating for VFP hardware; we need
+ to be more careful with TARGET_NEON as noted below. */
+
+/* FPU is has the full VFPv3/NEON register file of 32 D registers. */
+#define TARGET_VFPD32 (bitmap_bit_p (arm_active_target.isa, isa_bit_fp_d32))
+
+/* FPU supports VFPv3 instructions. */
+#define TARGET_VFP3 (bitmap_bit_p (arm_active_target.isa, isa_bit_vfpv3))
+
+/* FPU supports FPv5 instructions. */
+#define TARGET_VFP5 (bitmap_bit_p (arm_active_target.isa, isa_bit_fpv5))
+
+/* FPU only supports VFP single-precision instructions. */
+#define TARGET_VFP_SINGLE (!TARGET_VFP_DOUBLE)
+
+/* FPU supports VFP double-precision instructions. */
+#define TARGET_VFP_DOUBLE (bitmap_bit_p (arm_active_target.isa, isa_bit_fp_dbl))
+
+/* FPU supports half-precision floating-point with NEON element load/store. */
+#define TARGET_NEON_FP16 \
+ (bitmap_bit_p (arm_active_target.isa, isa_bit_neon) \
+ && bitmap_bit_p (arm_active_target.isa, isa_bit_fp16conv))
+
+/* FPU supports VFP half-precision floating-point conversions. */
+#define TARGET_FP16 (bitmap_bit_p (arm_active_target.isa, isa_bit_fp16conv))
+
+/* FPU supports converting between HFmode and DFmode in a single hardware
+ step. */
+#define TARGET_FP16_TO_DOUBLE \
+ (TARGET_HARD_FLOAT && TARGET_FP16 && TARGET_VFP5 && TARGET_VFP_DOUBLE)
+
+/* FPU supports fused-multiply-add operations. */
+#define TARGET_FMA (bitmap_bit_p (arm_active_target.isa, isa_bit_vfpv4))
+
+/* FPU supports Crypto extensions. */
+#define TARGET_CRYPTO (bitmap_bit_p (arm_active_target.isa, isa_bit_crypto))
+
+/* FPU supports Neon instructions. The setting of this macro gets
+ revealed via __ARM_NEON__ so we add extra guards upon TARGET_32BIT
+ and TARGET_HARD_FLOAT to ensure that NEON instructions are
+ available. */
+#define TARGET_NEON \
+ (TARGET_32BIT && TARGET_HARD_FLOAT \
+ && bitmap_bit_p (arm_active_target.isa, isa_bit_neon))
+
+/* FPU supports ARMv8.1 Adv.SIMD extensions. */
+#define TARGET_NEON_RDMA (TARGET_NEON && arm_arch8_1)
+
+/* Supports the Dot Product AdvSIMD extensions. */
+#define TARGET_DOTPROD (TARGET_NEON && TARGET_VFP5 \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_dotprod) \
+ && arm_arch8_2)
+
+/* Supports the Armv8.3-a Complex number AdvSIMD extensions. */
+#define TARGET_COMPLEX (TARGET_NEON && arm_arch8_3)
+
+/* FPU supports the floating point FP16 instructions for ARMv8.2-A
+ and later. */
+#define TARGET_VFP_FP16INST \
+ (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP5 && arm_fp16_inst)
+
+/* Target supports the floating point FP16 instructions from ARMv8.2-A
+ and later. */
+#define TARGET_FP16FML (TARGET_NEON \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_fp16fml) \
+ && arm_arch8_2)
+
+/* FPU supports the AdvSIMD FP16 instructions for ARMv8.2 and later. */
+#define TARGET_NEON_FP16INST (TARGET_VFP_FP16INST && TARGET_NEON_RDMA)
+
+/* FPU supports 8-bit Integer Matrix Multiply (I8MM) AdvSIMD extensions. */
+#define TARGET_I8MM (TARGET_NEON && arm_arch8_2 && arm_arch_i8mm)
+
+/* FPU supports Brain half-precision floating-point (BFloat16) extension. */
+#define TARGET_BF16_FP (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP5 \
+ && arm_arch8_2 && arm_arch_bf16)
+#define TARGET_BF16_SIMD (TARGET_NEON && TARGET_VFP5 \
+ && arm_arch8_2 && arm_arch_bf16)
+
+/* Q-bit is present. */
+#define TARGET_ARM_QBIT \
+ (TARGET_32BIT && arm_arch5te && (arm_arch_notm || arm_arch7))
+/* Saturation operation, e.g. SSAT. */
+#define TARGET_ARM_SAT \
+ (TARGET_32BIT && arm_arch6 && (arm_arch_notm || arm_arch7))
+/* "DSP" multiply instructions, eg. SMULxy. */
+#define TARGET_DSP_MULTIPLY \
+ (TARGET_32BIT && arm_arch5te && (arm_arch_notm || arm_arch7em))
+/* Integer SIMD instructions, and extend-accumulate instructions. */
+#define TARGET_INT_SIMD \
+ (TARGET_32BIT && arm_arch6 && (arm_arch_notm || arm_arch7em))
+
+/* Should MOVW/MOVT be used in preference to a constant pool. */
+#define TARGET_USE_MOVT \
+ (TARGET_HAVE_MOVT \
+ && (arm_disable_literal_pool \
+ || (!optimize_size && !current_tune->prefer_constant_pool)))
+
+/* Nonzero if this chip provides the DMB instruction. */
+#define TARGET_HAVE_DMB (arm_arch6m || arm_arch7)
+
+/* Nonzero if this chip implements a memory barrier via CP15. */
+#define TARGET_HAVE_DMB_MCR (arm_arch6 && ! TARGET_HAVE_DMB \
+ && ! TARGET_THUMB1)
+
+/* Nonzero if this chip implements a memory barrier instruction. */
+#define TARGET_HAVE_MEMORY_BARRIER (TARGET_HAVE_DMB || TARGET_HAVE_DMB_MCR)
+
+/* Nonzero if this chip supports ldrex and strex */
+#define TARGET_HAVE_LDREX ((arm_arch6 && TARGET_ARM) \
+ || arm_arch7 \
+ || (arm_arch8 && !arm_arch_notm))
+
+/* Nonzero if this chip supports LPAE. */
+#define TARGET_HAVE_LPAE (arm_arch_lpae)
+
+/* Nonzero if this chip supports ldrex{bh} and strex{bh}. */
+#define TARGET_HAVE_LDREXBH ((arm_arch6k && TARGET_ARM) \
+ || arm_arch7 \
+ || (arm_arch8 && !arm_arch_notm))
+
+/* Nonzero if this chip supports ldrexd and strexd. */
+#define TARGET_HAVE_LDREXD (((arm_arch6k && TARGET_ARM) \
+ || arm_arch7) && arm_arch_notm)
+
+/* Nonzero if this chip supports load-acquire and store-release. */
+#define TARGET_HAVE_LDACQ (TARGET_ARM_ARCH >= 8)
+
+/* Nonzero if this chip supports LDAEXD and STLEXD. */
+#define TARGET_HAVE_LDACQEXD (TARGET_ARM_ARCH >= 8 \
+ && TARGET_32BIT \
+ && arm_arch_notm)
+
+/* Nonzero if this chip provides the MOVW and MOVT instructions. */
+#define TARGET_HAVE_MOVT (arm_arch_thumb2 || arm_arch8)
+
+/* Nonzero if this chip provides the CBZ and CBNZ instructions. */
+#define TARGET_HAVE_CBZ (arm_arch_thumb2 || arm_arch8)
+
+/* Nonzero if this chip provides Armv8.1-M Mainline Security extensions
+ instructions (most are floating-point related). */
+#define TARGET_HAVE_FPCXT_CMSE (arm_arch8_1m_main)
+
+#define TARGET_HAVE_MVE (arm_float_abi != ARM_FLOAT_ABI_SOFT \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_mve) \
+ && !TARGET_GENERAL_REGS_ONLY)
+
+#define TARGET_HAVE_MVE_FLOAT (arm_float_abi != ARM_FLOAT_ABI_SOFT \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_mve_float) \
+ && !TARGET_GENERAL_REGS_ONLY)
+
+/* Non-zero if this target supports Armv8.1-M Mainline pointer-signing
+ extension. */
+#define TARGET_HAVE_PACBTI (arm_arch8_1m_main \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_pacbti))
+
+/* MVE have few common instructions as VFP, like VLDM alias VPOP, VLDR, VSTM
+ alia VPUSH, VSTR and VMOV, VMSR and VMRS. In the same manner it updates few
+ registers such as FPCAR, FPCCR, FPDSCR, FPSCR, MVFR0, MVFR1 and MVFR2. All
+ the VFP instructions, RTL patterns and register are guarded by
+ TARGET_HARD_FLOAT. But the common instructions, RTL pattern and registers
+ between MVE and VFP will be guarded by the following macro TARGET_VFP_BASE
+ hereafter. */
+
+#define TARGET_VFP_BASE (arm_float_abi != ARM_FLOAT_ABI_SOFT \
+ && bitmap_bit_p (arm_active_target.isa, \
+ isa_bit_vfp_base) \
+ && !TARGET_GENERAL_REGS_ONLY)
+
+/* Nonzero if integer division instructions supported. */
+#define TARGET_IDIV ((TARGET_ARM && arm_arch_arm_hwdiv) \
+ || (TARGET_THUMB && arm_arch_thumb_hwdiv))
+
+/* Nonzero if disallow volatile memory access in IT block. */
+#define TARGET_NO_VOLATILE_CE (arm_arch_no_volatile_ce)
+
+/* Nonzero if chip supports the Custom Datapath Extension. */
+#define TARGET_CDE (arm_arch_cde && arm_arch8 && !arm_arch_notm)
+
+/* Should constant I be slplit for OP. */
+#define DONT_EARLY_SPLIT_CONSTANT(i, op) \
+ ((optimize >= 2) \
+ && can_create_pseudo_p () \
+ && !const_ok_for_op (i, op))
+
+/* True iff the full BPABI is being used. If TARGET_BPABI is true,
+ then TARGET_AAPCS_BASED must be true -- but the converse does not
+ hold. TARGET_BPABI implies the use of the BPABI runtime library,
+ etc., in addition to just the AAPCS calling conventions. */
+#ifndef TARGET_BPABI
+#define TARGET_BPABI false
+#endif
+
+/* Transform lane numbers on big endian targets. This is used to allow for the
+ endianness difference between NEON architectural lane numbers and those
+ used in RTL */
+#define NEON_ENDIAN_LANE_N(mode, n) \
+ (BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 - n : n)
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-arch is ignored if -march or -mcpu are specified.
+ --with-cpu is ignored if -march or -mcpu are specified, and is overridden
+ by --with-arch.
+ --with-tune is ignored if -mtune or -mcpu are specified (but not affected
+ by -march).
+ --with-float is ignored if -mfloat-abi is specified.
+ --with-fpu is ignored if -mfpu is specified.
+ --with-abi is ignored if -mabi is specified.
+ --with-tls is ignored if -mtls-dialect is specified.
+ Note: --with-mode is not handled here, that has a special rule
+ TARGET_MODE_CHECK that also takes into account the selected CPU and
+ architecture. */
+#define OPTION_DEFAULT_SPECS \
+ {"arch", "%{!march=*:%{!mcpu=*:-march=%(VALUE)}}" }, \
+ {"cpu", "%{!march=*:%{!mcpu=*:-mcpu=%(VALUE)}}" }, \
+ {"tune", "%{!mcpu=*:%{!mtune=*:-mtune=%(VALUE)}}" }, \
+ {"float", "%{!mfloat-abi=*:-mfloat-abi=%(VALUE)}" }, \
+ {"fpu", "%{!mfpu=*:-mfpu=%(VALUE)}"}, \
+ {"abi", "%{!mabi=*:-mabi=%(VALUE)}"}, \
+ {"tls", "%{!mtls-dialect=*:-mtls-dialect=%(VALUE)}"},
+
+extern const struct arm_fpu_desc
+{
+ const char *name;
+ enum isa_feature isa_bits[isa_num_bits];
+} all_fpus[];
+
+/* Which floating point hardware to schedule for. */
+extern int arm_fpu_attr;
+
+#ifndef TARGET_DEFAULT_FLOAT_ABI
+#define TARGET_DEFAULT_FLOAT_ABI ARM_FLOAT_ABI_SOFT
+#endif
+
+#ifndef ARM_DEFAULT_ABI
+#define ARM_DEFAULT_ABI ARM_ABI_APCS
+#endif
+
+/* AAPCS based ABIs use short enums by default. */
+#ifndef ARM_DEFAULT_SHORT_ENUMS
+#define ARM_DEFAULT_SHORT_ENUMS \
+ (TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX)
+#endif
+
+/* Map each of the micro-architecture variants to their corresponding
+ major architecture revision. */
+
+enum base_architecture
+{
+ BASE_ARCH_0 = 0,
+ BASE_ARCH_2 = 2,
+ BASE_ARCH_3 = 3,
+ BASE_ARCH_3M = 3,
+ BASE_ARCH_4 = 4,
+ BASE_ARCH_4T = 4,
+ BASE_ARCH_5T = 5,
+ BASE_ARCH_5TE = 5,
+ BASE_ARCH_5TEJ = 5,
+ BASE_ARCH_6 = 6,
+ BASE_ARCH_6J = 6,
+ BASE_ARCH_6KZ = 6,
+ BASE_ARCH_6K = 6,
+ BASE_ARCH_6T2 = 6,
+ BASE_ARCH_6M = 6,
+ BASE_ARCH_6Z = 6,
+ BASE_ARCH_7 = 7,
+ BASE_ARCH_7A = 7,
+ BASE_ARCH_7R = 7,
+ BASE_ARCH_7M = 7,
+ BASE_ARCH_7EM = 7,
+ BASE_ARCH_8A = 8,
+ BASE_ARCH_8M_BASE = 8,
+ BASE_ARCH_8M_MAIN = 8,
+ BASE_ARCH_8R = 8,
+ BASE_ARCH_9A = 9
+};
+
+/* The major revision number of the ARM Architecture implemented by the target. */
+extern enum base_architecture arm_base_arch;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions. */
+extern int arm_arch4;
+
+/* Nonzero if this chip supports the ARM Architecture 4T extensions. */
+extern int arm_arch4t;
+
+/* Nonzero if this chip supports the ARM Architecture 5T extensions. */
+extern int arm_arch5t;
+
+/* Nonzero if this chip supports the ARM Architecture 5TE extensions. */
+extern int arm_arch5te;
+
+/* Nonzero if this chip supports the ARM Architecture 6 extensions. */
+extern int arm_arch6;
+
+/* Nonzero if this chip supports the ARM Architecture 6k extensions. */
+extern int arm_arch6k;
+
+/* Nonzero if instructions present in ARMv6-M can be used. */
+extern int arm_arch6m;
+
+/* Nonzero if this chip supports the ARM Architecture 7 extensions. */
+extern int arm_arch7;
+
+/* Nonzero if instructions not present in the 'M' profile can be used. */
+extern int arm_arch_notm;
+
+/* Nonzero if instructions present in ARMv7E-M can be used. */
+extern int arm_arch7em;
+
+/* Nonzero if this chip supports the ARM Architecture 8 extensions. */
+extern int arm_arch8;
+
+/* Nonzero if this chip supports the ARM Architecture 8.1 extensions. */
+extern int arm_arch8_1;
+
+/* Nonzero if this chip supports the ARM Architecture 8.2 extensions. */
+extern int arm_arch8_2;
+
+/* Nonzero if this chip supports the ARM Architecture 8.3 extensions. */
+extern int arm_arch8_3;
+
+/* Nonzero if this chip supports the ARM Architecture 8.4 extensions. */
+extern int arm_arch8_4;
+
+/* Nonzero if this chip supports the ARM Architecture 8-M Mainline
+ extensions. */
+extern int arm_arch8m_main;
+
+/* Nonzero if this chip supports the ARM Architecture 8.1-M Mainline
+ extensions. */
+extern int arm_arch8_1m_main;
+
+/* Nonzero if this chip supports the FP16 instructions extension of ARM
+ Architecture 8.2. */
+extern int arm_fp16_inst;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_tune_strongarm;
+
+/* Nonzero if this chip supports Intel XScale with Wireless MMX technology. */
+extern int arm_arch_iwmmxt;
+
+/* Nonzero if this chip supports Intel Wireless MMX2 technology. */
+extern int arm_arch_iwmmxt2;
+
+/* Nonzero if this chip is an XScale. */
+extern int arm_arch_xscale;
+
+/* Nonzero if tuning for XScale. */
+extern int arm_tune_xscale;
+
+/* Nonzero if tuning for stores via the write buffer. */
+extern int arm_tune_wbuf;
+
+/* Nonzero if tuning for Cortex-A9. */
+extern int arm_tune_cortex_a9;
+
+/* Nonzero if we should define __THUMB_INTERWORK__ in the
+ preprocessor.
+ XXX This is a bit of a hack, it's intended to help work around
+ problems in GLD which doesn't understand that armv5t code is
+ interworking clean. */
+extern int arm_cpp_interwork;
+
+/* Nonzero if chip supports Thumb 1. */
+extern int arm_arch_thumb1;
+
+/* Nonzero if chip supports Thumb 2. */
+extern int arm_arch_thumb2;
+
+/* Nonzero if chip supports integer division instruction in ARM mode. */
+extern int arm_arch_arm_hwdiv;
+
+/* Nonzero if chip supports integer division instruction in Thumb mode. */
+extern int arm_arch_thumb_hwdiv;
+
+/* Nonzero if chip disallows volatile memory access in IT block. */
+extern int arm_arch_no_volatile_ce;
+
+/* Nonzero if we shouldn't use literal pools. */
+#ifndef USED_FOR_TARGET
+extern bool arm_disable_literal_pool;
+#endif
+
+/* Nonzero if chip supports the ARMv8 CRC instructions. */
+extern int arm_arch_crc;
+
+/* Nonzero if chip supports the ARMv8-M Security Extensions. */
+extern int arm_arch_cmse;
+
+/* Nonzero if chip supports the I8MM instructions. */
+extern int arm_arch_i8mm;
+
+/* Nonzero if chip supports the BFloat16 instructions. */
+extern int arm_arch_bf16;
+
+/* Nonzero if chip supports the Custom Datapath Extension. */
+extern int arm_arch_cde;
+extern int arm_arch_cde_coproc;
+extern const int arm_arch_cde_coproc_bits[];
+#define ARM_CDE_CONST_COPROC 7
+#define ARM_CCDE_CONST_1 ((1 << 13) - 1)
+#define ARM_CCDE_CONST_2 ((1 << 9 ) - 1)
+#define ARM_CCDE_CONST_3 ((1 << 6 ) - 1)
+#define ARM_VCDE_CONST_1 ((1 << 11) - 1)
+#define ARM_VCDE_CONST_2 ((1 << 6 ) - 1)
+#define ARM_VCDE_CONST_3 ((1 << 3 ) - 1)
+#define ARM_MVE_CDE_CONST_1 ((1 << 12) - 1)
+#define ARM_MVE_CDE_CONST_2 ((1 << 7 ) - 1)
+#define ARM_MVE_CDE_CONST_3 ((1 << 4 ) - 1)
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+#endif
+
+/* Nonzero if PIC code requires explicit qualifiers to generate
+ PLT and GOT relocs rather than the assembler doing so implicitly.
+ Subtargets can override these if required. */
+#ifndef NEED_GOT_RELOC
+#define NEED_GOT_RELOC 0
+#endif
+#ifndef NEED_PLT_RELOC
+#define NEED_PLT_RELOC 0
+#endif
+
+#ifndef TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE
+#define TARGET_DEFAULT_PIC_DATA_IS_TEXT_RELATIVE 1
+#endif
+
+/* Nonzero if we need to refer to the GOT with a PC-relative
+ offset. In other words, generate
+
+ .word _GLOBAL_OFFSET_TABLE_ - [. - (.Lxx + 8)]
+
+ rather than
+
+ .word _GLOBAL_OFFSET_TABLE_ - (.Lxx + 8)
+
+ The default is true, which matches NetBSD. Subtargets can
+ override this if required. */
+#ifndef GOT_PCREL
+#define GOT_PCREL 1
+#endif
+
+/* Target machine storage Layout. */
+
+/* Nonzero if this chip provides Armv8.1-M Mainline
+ LOB (low overhead branch features) extension instructions. */
+#define TARGET_HAVE_LOB (arm_arch8_1m_main)
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (MODE) = SImode; \
+ }
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+#define UNITS_PER_WORD 4
+
+/* True if natural alignment is used for doubleword types. */
+#define ARM_DOUBLEWORD_ALIGN TARGET_AAPCS_BASED
+
+#define DOUBLEWORD_ALIGNMENT 64
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY (ARM_DOUBLEWORD_ALIGN ? DOUBLEWORD_ALIGNMENT : 32)
+
+#define PREFERRED_STACK_BOUNDARY \
+ (arm_abi == ARM_ABI_ATPCS ? 64 : STACK_BOUNDARY)
+
+#define FUNCTION_BOUNDARY_P(flags) (TARGET_THUMB_P (flags) ? 16 : 32)
+#define FUNCTION_BOUNDARY (FUNCTION_BOUNDARY_P (target_flags))
+
+/* The lowest bit is used to indicate Thumb-mode functions, so the
+ vbit must go into the delta field of pointers to member
+ functions. */
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT (ARM_DOUBLEWORD_ALIGN ? DOUBLEWORD_ALIGNMENT : 32)
+
+#define MALLOC_ABI_ALIGNMENT BIGGEST_ALIGNMENT
+
+/* XXX Blah -- this macro is used directly by libobjc. Since it
+ supports no vector modes, cut out the complexity and fall back
+ on BIGGEST_FIELD_ALIGNMENT. */
+#ifdef IN_TARGET_LIBS
+#define BIGGEST_FIELD_ALIGNMENT 64
+#endif
+
+/* Align definitions of arrays, unions and structures so that
+ initializations and copies can be made more efficient. This is not
+ ABI-changing, so it only affects places where we can see the
+ definition. Increasing the alignment tends to introduce padding,
+ so don't do this when optimizing for size/conserving stack space. */
+#define ARM_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \
+ (((COND) && ((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (EXP) == ARRAY_TYPE \
+ || TREE_CODE (EXP) == UNION_TYPE \
+ || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* Align global data. */
+#define DATA_ALIGNMENT(EXP, ALIGN) \
+ ARM_EXPAND_ALIGNMENT(!optimize_size, EXP, ALIGN)
+
+/* Similarly, make sure that objects on the stack are sensibly aligned. */
+#define LOCAL_ALIGNMENT(EXP, ALIGN) \
+ ARM_EXPAND_ALIGNMENT(!flag_conserve_stack, EXP, ALIGN)
+
+/* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
+ value set in previous versions of this toolchain was 8, which produces more
+ compact structures. The command line option -mstructure_size_boundary=<n>
+ can be used to change this value. For compatibility with the ARM SDK
+ however the value should be left at 32. ARM SDT Reference Manual (ARM DUI
+ 0020D) page 2-20 says "Structures are aligned on word boundaries".
+ The AAPCS specifies a value of 8. */
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+/* This is the value used to initialize arm_structure_size_boundary. If a
+ particular arm target wants to change the default value it should change
+ the definition of this macro, not STRUCTURE_SIZE_BOUNDARY. See netbsd.h
+ for an example of this. */
+#ifndef DEFAULT_STRUCTURE_SIZE_BOUNDARY
+#define DEFAULT_STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* wchar_t is unsigned under the AAPCS. */
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "int")
+
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+#endif
+
+/* Sized for fixed-point types. */
+
+#define SHORT_FRACT_TYPE_SIZE 8
+#define FRACT_TYPE_SIZE 16
+#define LONG_FRACT_TYPE_SIZE 32
+#define LONG_LONG_FRACT_TYPE_SIZE 64
+
+#define SHORT_ACCUM_TYPE_SIZE 16
+#define ACCUM_TYPE_SIZE 32
+#define LONG_ACCUM_TYPE_SIZE 64
+#define LONG_LONG_ACCUM_TYPE_SIZE 64
+
+#define MAX_FIXED_MODE_SIZE 64
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE (TARGET_AAPCS_BASED ? "unsigned int" : "long unsigned int")
+#endif
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_AAPCS_BASED ? "int" : "long int")
+#endif
+
+/* AAPCS requires that structure alignment is affected by bitfields. */
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS TARGET_AAPCS_BASED
+#endif
+
+/* The maximum size of the sync library functions supported. */
+#ifndef MAX_SYNC_LIBFUNC_SIZE
+#define MAX_SYNC_LIBFUNC_SIZE (2 * UNITS_PER_WORD)
+#endif
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard
+ (S - saved over call, F - Frame-related).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+ apsrq Nor this, it is used to track operations on the Q bit
+ of APSR by ACLE saturating intrinsics.
+ apsrge Nor this, it is used to track operations on the GE bits
+ of APSR by ACLE SIMD32 intrinsics
+
+ *: See TARGET_CONDITIONAL_REGISTER_USAGE */
+
+/* s0-s15 VFP scratch (aka d0-d7).
+ s16-s31 S VFP variable (aka d8-d15).
+ vfpcc Not a real register. Represents the VFP condition
+ code flags.
+ vpr Used to represent MVE VPR predication.
+ ra_auth_code Pseudo register to save PAC. */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ r0-r3 are not normally saved in a C function. */
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ /* Core regs. */ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,1,0,1, \
+ /* VFP regs. */ \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ /* IWMMXT regs. */ \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1, \
+ /* Specials. */ \
+ 1,1,1,1,1,1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ /* Core regs. */ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,0,0,1,1,1,1, \
+ /* VFP Regs. */ \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ /* IWMMXT regs. */ \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1,1,1,1,1, \
+ 1,1,1,1, \
+ /* Specials. */ \
+ 1,1,1,1,1,1,1,1 \
+}
+
+#ifndef SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#define SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#endif
+
+/* These are a couple of extensions to the formats accepted
+ by asm_fprintf:
+ %@ prints out ASM_COMMENT_START
+ %r prints out REGISTER_PREFIX reg_names[arg] */
+#define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \
+ case '@': \
+ fputs (ASM_COMMENT_START, FILE); \
+ break; \
+ \
+ case 'r': \
+ fputs (REGISTER_PREFIX, FILE); \
+ fputs (reg_names [va_arg (ARGS, int)], FILE); \
+ break;
+
+/* Round X up to the nearest word. */
+#define ROUND_UP_WORD(X) (((X) + 3) & ~3)
+
+/* Convert fron bytes to ints. */
+#define ARM_NUM_INTS(X) (((X) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* The number of (integer) registers required to hold a quantity of type MODE.
+ Also used for VFP registers. */
+#define ARM_NUM_REGS(MODE) \
+ ARM_NUM_INTS (GET_MODE_SIZE (MODE))
+
+/* The number of (integer) registers required to hold a quantity of TYPE MODE. */
+#define ARM_NUM_REGS2(MODE, TYPE) \
+ ARM_NUM_INTS ((MODE) == BLKmode ? \
+ int_size_in_bytes (TYPE) : GET_MODE_SIZE (MODE))
+
+/* The number of (integer) argument register available. */
+#define NUM_ARG_REGS 4
+
+/* And similarly for the VFP. */
+#define NUM_VFP_ARG_REGS 16
+
+/* Return the register number of the N'th (integer) argument. */
+#define ARG_REGISTER(N) (N - 1)
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* The number of the last argument register. */
+#define LAST_ARG_REGNUM ARG_REGISTER (NUM_ARG_REGS)
+
+/* The numbers of the Thumb register ranges. */
+#define FIRST_LO_REGNUM 0
+#define LAST_LO_REGNUM 7
+#define FIRST_HI_REGNUM 8
+#define LAST_HI_REGNUM 11
+
+/* Overridden by config/arm/bpabi.h. */
+#ifndef ARM_UNWIND_INFO
+#define ARM_UNWIND_INFO 0
+#endif
+
+/* Use r0 and r1 to pass exception handling information. */
+#define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? N : INVALID_REGNUM)
+
+/* The register that holds the return address in exception handlers. */
+#define ARM_EH_STACKADJ_REGNUM 2
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (SImode, ARM_EH_STACKADJ_REGNUM)
+
+#ifndef ARM_TARGET2_DWARF_FORMAT
+#define ARM_TARGET2_DWARF_FORMAT DW_EH_PE_pcrel
+#endif
+
+/* ttype entries (the only interesting data references used)
+ use TARGET2 relocations. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(code, data) \
+ (((code) == 0 && (data) == 1 && ARM_UNWIND_INFO) ? ARM_TARGET2_DWARF_FORMAT \
+ : DW_EH_PE_absptr)
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 12
+
+/* r9 is the FDPIC register (base register for GOT and FUNCDESC accesses). */
+#define FDPIC_REGNUM 9
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated.
+
+ For the Thumb we have another problem. The TPCS defines the frame pointer
+ as r11, and GCC believes that it is always possible to use the frame pointer
+ as base register for addressing purposes. (See comments in
+ find_reloads_address()). But - the Thumb does not allow high registers,
+ including r11, to be used as base address registers. Hence our problem.
+
+ The solution used here, and in the old thumb port is to use r7 instead of
+ r11 as the hard frame pointer and to have special code to generate
+ backtrace structures on the stack (if required to do so via a command line
+ option) using r11. This is the only 'user visible' use of r11 as a frame
+ pointer. */
+#define ARM_HARD_FRAME_POINTER_REGNUM 11
+#define THUMB_HARD_FRAME_POINTER_REGNUM 7
+
+#define HARD_FRAME_POINTER_REGNUM \
+ (TARGET_ARM \
+ ? ARM_HARD_FRAME_POINTER_REGNUM \
+ : THUMB_HARD_FRAME_POINTER_REGNUM)
+
+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
+
+#define FP_REGNUM HARD_FRAME_POINTER_REGNUM
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM SP_REGNUM
+
+#define FIRST_IWMMXT_REGNUM (LAST_HI_VFP_REGNUM + 1)
+#define LAST_IWMMXT_REGNUM (FIRST_IWMMXT_REGNUM + 15)
+
+/* Need to sync with WCGR in iwmmxt.md. */
+#define FIRST_IWMMXT_GR_REGNUM (LAST_IWMMXT_REGNUM + 1)
+#define LAST_IWMMXT_GR_REGNUM (FIRST_IWMMXT_GR_REGNUM + 3)
+
+#define IS_IWMMXT_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_IWMMXT_REGNUM) && ((REGNUM) <= LAST_IWMMXT_REGNUM))
+#define IS_IWMMXT_GR_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_IWMMXT_GR_REGNUM) && ((REGNUM) <= LAST_IWMMXT_GR_REGNUM))
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 102
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 103
+
+#define FIRST_VFP_REGNUM 16
+#define D7_VFP_REGNUM (FIRST_VFP_REGNUM + 15)
+#define LAST_VFP_REGNUM \
+ (TARGET_VFPD32 ? LAST_HI_VFP_REGNUM : LAST_LO_VFP_REGNUM)
+
+#define IS_VFP_REGNUM(REGNUM) \
+ (((REGNUM) >= FIRST_VFP_REGNUM) && ((REGNUM) <= LAST_VFP_REGNUM))
+
+/* VFP registers are split into two types: those defined by VFP versions < 3
+ have D registers overlaid on consecutive pairs of S registers. VFP version 3
+ defines 16 new D registers (d16-d31) which, for simplicity and correctness
+ in various parts of the backend, we implement as "fake" single-precision
+ registers (which would be S32-S63, but cannot be used in that way). The
+ following macros define these ranges of registers. */
+#define LAST_LO_VFP_REGNUM (FIRST_VFP_REGNUM + 31)
+#define FIRST_HI_VFP_REGNUM (LAST_LO_VFP_REGNUM + 1)
+#define LAST_HI_VFP_REGNUM (FIRST_HI_VFP_REGNUM + 31)
+
+#define VFP_REGNO_OK_FOR_SINGLE(REGNUM) \
+ ((REGNUM) <= LAST_LO_VFP_REGNUM)
+
+/* DFmode values are only valid in even register pairs. */
+#define VFP_REGNO_OK_FOR_DOUBLE(REGNUM) \
+ ((((REGNUM) - FIRST_VFP_REGNUM) & 1) == 0)
+
+/* Neon Quad values must start at a multiple of four registers. */
+#define NEON_REGNO_OK_FOR_QUAD(REGNUM) \
+ ((((REGNUM) - FIRST_VFP_REGNUM) & 3) == 0)
+
+/* Neon structures of vectors must be in even register pairs and there
+ must be enough registers available. Because of various patterns
+ requiring quad registers, we require them to start at a multiple of
+ four. */
+#define NEON_REGNO_OK_FOR_NREGS(REGNUM, N) \
+ ((((REGNUM) - FIRST_VFP_REGNUM) & 3) == 0 \
+ && (LAST_VFP_REGNUM - (REGNUM) >= 2 * (N) - 1))
+
+/* The number of hard registers is 16 ARM + 1 CC + 1 SFP + 1 AFP
+ + 1 APSRQ + 1 APSRGE + 1 VPR + 1 Pseudo register to save PAC. */
+/* Intel Wireless MMX Technology registers add 16 + 4 more. */
+/* VFP (VFP3) adds 32 (64) + 1 VFPCC. */
+#define FIRST_PSEUDO_REGISTER 108
+
+#define DWARF_PAC_REGNUM 143
+
+#define DEBUGGER_REGNO(REGNO) arm_debugger_regno (REGNO)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+
+#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
+#define SUBTARGET_FRAME_POINTER_REQUIRED 0
+#endif
+
+#define VALID_IWMMXT_REG_MODE(MODE) \
+ (arm_vector_mode_supported_p (MODE) || (MODE) == DImode)
+
+/* Modes valid for Neon D registers. */
+#define VALID_NEON_DREG_MODE(MODE) \
+ ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \
+ || (MODE) == V4HFmode || (MODE) == V2SFmode || (MODE) == DImode \
+ || (MODE) == V4BFmode)
+
+/* Modes valid for Neon Q registers. */
+#define VALID_NEON_QREG_MODE(MODE) \
+ ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
+ || (MODE) == V8HFmode || (MODE) == V4SFmode || (MODE) == V2DImode \
+ || (MODE) == V8BFmode)
+
+#define VALID_MVE_MODE(MODE) \
+ ((MODE) == V2DImode ||(MODE) == V4SImode || (MODE) == V8HImode \
+ || (MODE) == V16QImode || (MODE) == V8HFmode || (MODE) == V4SFmode \
+ || (MODE) == V2DFmode)
+
+#define VALID_MVE_PRED_MODE(MODE) \
+ ((MODE) == HImode \
+ || (MODE) == V16BImode || (MODE) == V8BImode || (MODE) == V4BImode \
+ || (MODE) == V2QImode)
+
+#define VALID_MVE_SI_MODE(MODE) \
+ ((MODE) == V2DImode ||(MODE) == V4SImode || (MODE) == V8HImode \
+ || (MODE) == V16QImode)
+
+/* Modes used in MVE's narrowing stores or widening loads. */
+#define MVE_STN_LDW_MODE(MODE) \
+ ((MODE) == V4QImode || (MODE) == V8QImode || (MODE) == V4HImode)
+
+#define VALID_MVE_SF_MODE(MODE) \
+ ((MODE) == V8HFmode || (MODE) == V4SFmode || (MODE) == V2DFmode)
+
+/* Structure modes valid for Neon registers. */
+#define VALID_NEON_STRUCT_MODE(MODE) \
+ ((MODE) == TImode || (MODE) == EImode || (MODE) == OImode \
+ || (MODE) == CImode || (MODE) == XImode)
+
+#define VALID_MVE_STRUCT_MODE(MODE) \
+ ((MODE) == TImode || (MODE) == OImode || (MODE) == XImode)
+
+/* The conditions under which vector modes are supported for general
+ arithmetic using Neon. */
+
+#define ARM_HAVE_NEON_V8QI_ARITH TARGET_NEON
+#define ARM_HAVE_NEON_V4HI_ARITH TARGET_NEON
+#define ARM_HAVE_NEON_V2SI_ARITH TARGET_NEON
+
+#define ARM_HAVE_NEON_V16QI_ARITH TARGET_NEON
+#define ARM_HAVE_NEON_V8HI_ARITH TARGET_NEON
+#define ARM_HAVE_NEON_V4SI_ARITH TARGET_NEON
+#define ARM_HAVE_NEON_V2DI_ARITH TARGET_NEON
+
+/* HF operations have their own flush-to-zero control (FPSCR.FZ16). */
+#define ARM_HAVE_NEON_V4HF_ARITH TARGET_NEON_FP16INST
+#define ARM_HAVE_NEON_V8HF_ARITH TARGET_NEON_FP16INST
+
+/* SF operations always flush to zero, regardless of FPSCR.FZ, so we can
+ only use them for general arithmetic when -funsafe-math-optimizations
+ is in effect. */
+#define ARM_HAVE_NEON_V2SF_ARITH \
+ (TARGET_NEON && flag_unsafe_math_optimizations)
+#define ARM_HAVE_NEON_V4SF_ARITH ARM_HAVE_NEON_V2SF_ARITH
+
+/* The conditions under which vector modes are supported for general
+ arithmetic by any vector extension. */
+
+#define ARM_HAVE_V8QI_ARITH (ARM_HAVE_NEON_V8QI_ARITH || TARGET_REALLY_IWMMXT)
+#define ARM_HAVE_V4HI_ARITH (ARM_HAVE_NEON_V4HI_ARITH || TARGET_REALLY_IWMMXT)
+#define ARM_HAVE_V2SI_ARITH (ARM_HAVE_NEON_V2SI_ARITH || TARGET_REALLY_IWMMXT)
+
+#define ARM_HAVE_V16QI_ARITH (ARM_HAVE_NEON_V16QI_ARITH || TARGET_HAVE_MVE)
+#define ARM_HAVE_V8HI_ARITH (ARM_HAVE_NEON_V8HI_ARITH || TARGET_HAVE_MVE)
+#define ARM_HAVE_V4SI_ARITH (ARM_HAVE_NEON_V4SI_ARITH || TARGET_HAVE_MVE)
+#define ARM_HAVE_V2DI_ARITH ARM_HAVE_NEON_V2DI_ARITH
+
+#define ARM_HAVE_V4HF_ARITH ARM_HAVE_NEON_V4HF_ARITH
+#define ARM_HAVE_V2SF_ARITH ARM_HAVE_NEON_V2SF_ARITH
+
+#define ARM_HAVE_V8HF_ARITH (ARM_HAVE_NEON_V8HF_ARITH || TARGET_HAVE_MVE_FLOAT)
+#define ARM_HAVE_V4SF_ARITH (ARM_HAVE_NEON_V4SF_ARITH || TARGET_HAVE_MVE_FLOAT)
+
+/* The conditions under which vector modes are supported by load/store
+ instructions using Neon. */
+
+#define ARM_HAVE_NEON_V8QI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V16QI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V4HI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V8HI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V2SI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V4SI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V4HF_LDST TARGET_NEON_FP16INST
+#define ARM_HAVE_NEON_V8HF_LDST TARGET_NEON_FP16INST
+#define ARM_HAVE_NEON_V4BF_LDST TARGET_BF16_SIMD
+#define ARM_HAVE_NEON_V8BF_LDST TARGET_BF16_SIMD
+#define ARM_HAVE_NEON_V2SF_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V4SF_LDST TARGET_NEON
+#define ARM_HAVE_NEON_DI_LDST TARGET_NEON
+#define ARM_HAVE_NEON_V2DI_LDST TARGET_NEON
+
+/* The conditions under which vector modes are supported by load/store
+ instructions by any vector extension. */
+
+#define ARM_HAVE_V8QI_LDST (ARM_HAVE_NEON_V8QI_LDST || TARGET_REALLY_IWMMXT)
+#define ARM_HAVE_V4HI_LDST (ARM_HAVE_NEON_V4HI_LDST || TARGET_REALLY_IWMMXT)
+#define ARM_HAVE_V2SI_LDST (ARM_HAVE_NEON_V2SI_LDST || TARGET_REALLY_IWMMXT)
+
+#define ARM_HAVE_V16QI_LDST (ARM_HAVE_NEON_V16QI_LDST || TARGET_HAVE_MVE)
+#define ARM_HAVE_V8HI_LDST (ARM_HAVE_NEON_V8HI_LDST || TARGET_HAVE_MVE)
+#define ARM_HAVE_V4SI_LDST (ARM_HAVE_NEON_V4SI_LDST || TARGET_HAVE_MVE)
+#define ARM_HAVE_DI_LDST ARM_HAVE_NEON_DI_LDST
+#define ARM_HAVE_V2DI_LDST ARM_HAVE_NEON_V2DI_LDST
+
+#define ARM_HAVE_V4HF_LDST ARM_HAVE_NEON_V4HF_LDST
+#define ARM_HAVE_V2SF_LDST ARM_HAVE_NEON_V2SF_LDST
+
+#define ARM_HAVE_V4BF_LDST ARM_HAVE_NEON_V4BF_LDST
+#define ARM_HAVE_V8BF_LDST ARM_HAVE_NEON_V8BF_LDST
+
+#define ARM_HAVE_V8HF_LDST (ARM_HAVE_NEON_V8HF_LDST || TARGET_HAVE_MVE_FLOAT)
+#define ARM_HAVE_V4SF_LDST (ARM_HAVE_NEON_V4SF_LDST || TARGET_HAVE_MVE_FLOAT)
+
+/* The register numbers in sequence, for passing to arm_gen_load_multiple. */
+extern int arm_regs_in_sequence[];
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ For VFP/VFPv3, allocate D16-D31 first, then caller-saved registers (D0-D7),
+ then D8-D15. The reason for doing this is to attempt to reduce register
+ pressure when both single- and double-precision registers are used in a
+ function. */
+
+#define VREG(X) (FIRST_VFP_REGNUM + (X))
+#define WREG(X) (FIRST_IWMMXT_REGNUM + (X))
+#define WGREG(X) (FIRST_IWMMXT_GR_REGNUM + (X))
+
+#define REG_ALLOC_ORDER \
+{ \
+ /* General registers. */ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 9, 10, 11, \
+ /* High VFP registers. */ \
+ VREG(32), VREG(33), VREG(34), VREG(35), \
+ VREG(36), VREG(37), VREG(38), VREG(39), \
+ VREG(40), VREG(41), VREG(42), VREG(43), \
+ VREG(44), VREG(45), VREG(46), VREG(47), \
+ VREG(48), VREG(49), VREG(50), VREG(51), \
+ VREG(52), VREG(53), VREG(54), VREG(55), \
+ VREG(56), VREG(57), VREG(58), VREG(59), \
+ VREG(60), VREG(61), VREG(62), VREG(63), \
+ /* VFP argument registers. */ \
+ VREG(15), VREG(14), VREG(13), VREG(12), \
+ VREG(11), VREG(10), VREG(9), VREG(8), \
+ VREG(7), VREG(6), VREG(5), VREG(4), \
+ VREG(3), VREG(2), VREG(1), VREG(0), \
+ /* VFP call-saved registers. */ \
+ VREG(16), VREG(17), VREG(18), VREG(19), \
+ VREG(20), VREG(21), VREG(22), VREG(23), \
+ VREG(24), VREG(25), VREG(26), VREG(27), \
+ VREG(28), VREG(29), VREG(30), VREG(31), \
+ /* IWMMX registers. */ \
+ WREG(0), WREG(1), WREG(2), WREG(3), \
+ WREG(4), WREG(5), WREG(6), WREG(7), \
+ WREG(8), WREG(9), WREG(10), WREG(11), \
+ WREG(12), WREG(13), WREG(14), WREG(15), \
+ WGREG(0), WGREG(1), WGREG(2), WGREG(3), \
+ /* Registers not for general use. */ \
+ CC_REGNUM, VFPCC_REGNUM, \
+ FRAME_POINTER_REGNUM, ARG_POINTER_REGNUM, \
+ SP_REGNUM, PC_REGNUM, APSRQ_REGNUM, \
+ APSRGE_REGNUM, VPR_REGNUM, RA_AUTH_CODE \
+}
+
+#define IS_VPR_REGNUM(REGNUM) \
+ ((REGNUM) == VPR_REGNUM)
+
+#define IS_PAC_REGNUM(REGNUM) \
+ ((REGNUM) == RA_AUTH_CODE)
+
+/* Use different register alloc ordering for Thumb. */
+#define ADJUST_REG_ALLOC_ORDER arm_order_regs_for_local_alloc ()
+
+/* Tell IRA to use the order we define when optimizing for size. */
+#define HONOR_REG_ALLOC_ORDER optimize_function_for_size_p (cfun)
+
+/* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+#define HARD_REGNO_RENAME_OK(SRC, DST) \
+ (! IS_INTERRUPT (cfun->machine->func_type) || \
+ df_regs_ever_live_p (DST))
+
+/* Register and constant classes. */
+
+/* Register classes. */
+enum reg_class
+{
+ NO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ CALLER_SAVE_REGS,
+ EVEN_REG,
+ GENERAL_REGS,
+ CORE_REGS,
+ VFP_D0_D7_REGS,
+ VFP_LO_REGS,
+ VFP_HI_REGS,
+ VFP_REGS,
+ IWMMXT_REGS,
+ IWMMXT_GR_REGS,
+ CC_REG,
+ VFPCC_REG,
+ SFP_REG,
+ AFP_REG,
+ VPR_REG,
+ PAC_REG,
+ GENERAL_AND_VPR_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "CALLER_SAVE_REGS", \
+ "EVEN_REG", \
+ "GENERAL_REGS", \
+ "CORE_REGS", \
+ "VFP_D0_D7_REGS", \
+ "VFP_LO_REGS", \
+ "VFP_HI_REGS", \
+ "VFP_REGS", \
+ "IWMMXT_REGS", \
+ "IWMMXT_GR_REGS", \
+ "CC_REG", \
+ "VFPCC_REG", \
+ "SFP_REG", \
+ "AFP_REG", \
+ "VPR_REG", \
+ "PAC_REG", \
+ "GENERAL_AND_VPR_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0x000000FF, 0x00000000, 0x00000000, 0x00000000 }, /* LO_REGS */ \
+ { 0x00002000, 0x00000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
+ { 0x000020FF, 0x00000000, 0x00000000, 0x00000000 }, /* BASE_REGS */ \
+ { 0x00005F00, 0x00000000, 0x00000000, 0x00000000 }, /* HI_REGS */ \
+ { 0x0000100F, 0x00000000, 0x00000000, 0x00000000 }, /* CALLER_SAVE_REGS */ \
+ { 0x00005555, 0x00000000, 0x00000000, 0x00000000 }, /* EVEN_REGS. */ \
+ { 0x00005FFF, 0x00000000, 0x00000000, 0x00000000 }, /* GENERAL_REGS */ \
+ { 0x00007FFF, 0x00000000, 0x00000000, 0x00000000 }, /* CORE_REGS */ \
+ { 0xFFFF0000, 0x00000000, 0x00000000, 0x00000000 }, /* VFP_D0_D7_REGS */ \
+ { 0xFFFF0000, 0x0000FFFF, 0x00000000, 0x00000000 }, /* VFP_LO_REGS */ \
+ { 0x00000000, 0xFFFF0000, 0x0000FFFF, 0x00000000 }, /* VFP_HI_REGS */ \
+ { 0xFFFF0000, 0xFFFFFFFF, 0x0000FFFF, 0x00000000 }, /* VFP_REGS */ \
+ { 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000 }, /* IWMMXT_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x0000000F }, /* IWMMXT_GR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000010 }, /* CC_REG */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000020 }, /* VFPCC_REG */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000040 }, /* SFP_REG */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, /* AFP_REG */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000400 }, /* VPR_REG. */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000800 }, /* PAC_REG. */ \
+ { 0x00005FFF, 0x00000000, 0x00000000, 0x00000400 }, /* GENERAL_AND_VPR_REGS. */ \
+ { 0xFFFF7FFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0000040F } /* ALL_REGS. */ \
+}
+
+#define FP_SYSREGS \
+ DEF_FP_SYSREG (FPSCR) \
+ DEF_FP_SYSREG (FPSCR_nzcvqc) \
+ DEF_FP_SYSREG (VPR) \
+ DEF_FP_SYSREG (P0) \
+ DEF_FP_SYSREG (FPCXTNS) \
+ DEF_FP_SYSREG (FPCXTS)
+
+#define DEF_FP_SYSREG(reg) reg ## _ENUM,
+enum vfp_sysregs_encoding {
+ FP_SYSREGS
+ NB_FP_SYSREGS
+};
+#undef DEF_FP_SYSREG
+extern const char *fp_sysreg_names[NB_FP_SYSREGS];
+
+/* Any of the VFP register classes. */
+#define IS_VFP_CLASS(X) \
+ ((X) == VFP_D0_D7_REGS || (X) == VFP_LO_REGS \
+ || (X) == VFP_HI_REGS || (X) == VFP_REGS)
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) arm_regno_class (REGNO)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS (TARGET_THUMB1 ? LO_REGS : GENERAL_REGS)
+#define BASE_REG_CLASS (TARGET_THUMB1 ? LO_REGS : CORE_REGS)
+
+/* For the Thumb the high registers cannot be used as base registers
+ when addressing quantities in QI or HI mode; if we don't know the
+ mode, then we must be conservative. For MVE we need to load from
+ memory to low regs based on given modes i.e [Rn], Rn <= LO_REGS. */
+#define MODE_BASE_REG_CLASS(MODE) \
+ (TARGET_HAVE_MVE ? arm_mode_base_reg_class (MODE) \
+ :(TARGET_32BIT ? CORE_REGS \
+ : GET_MODE_SIZE (MODE) >= 4 ? BASE_REGS \
+ : LO_REGS))
+
+/* For Thumb we cannot support SP+reg addressing, so we return LO_REGS
+ instead of BASE_REGS. */
+#define MODE_BASE_REG_REG_CLASS(MODE) BASE_REG_CLASS
+
+/* When this hook returns true for MODE, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
+ arm_small_register_classes_for_mode_p
+
+/* Must leave BASE_REGS reloads alone */
+#define THUMB_SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ (lra_in_progress ? NO_REGS \
+ : ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + hard_regno_nregs (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS))
+
+#define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ (lra_in_progress ? NO_REGS \
+ : (CLASS) != LO_REGS && (CLASS) != BASE_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + hard_regno_nregs (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ /* Restrict which direct reloads are allowed for VFP/iWMMXt regs. */ \
+ ((TARGET_HARD_FLOAT && IS_VFP_CLASS (CLASS)) \
+ ? coproc_secondary_reload_class (MODE, X, FALSE) \
+ : (TARGET_IWMMXT && (CLASS) == IWMMXT_REGS) \
+ ? coproc_secondary_reload_class (MODE, X, TRUE) \
+ : TARGET_32BIT \
+ ? (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS) \
+ : THUMB_SECONDARY_OUTPUT_RELOAD_CLASS (CLASS, MODE, X))
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ /* Restrict which direct reloads are allowed for VFP/iWMMXt regs. */ \
+ ((TARGET_HARD_FLOAT && IS_VFP_CLASS (CLASS)) \
+ ? coproc_secondary_reload_class (MODE, X, FALSE) : \
+ (TARGET_IWMMXT && (CLASS) == IWMMXT_REGS) ? \
+ coproc_secondary_reload_class (MODE, X, TRUE) : \
+ (TARGET_32BIT ? \
+ (((CLASS) == IWMMXT_REGS || (CLASS) == IWMMXT_GR_REGS) \
+ && CONSTANT_P (X)) \
+ ? GENERAL_REGS : \
+ (((MODE) == HImode && ! arm_arch4 \
+ && (MEM_P (X) \
+ || ((REG_P (X) || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS) \
+ : THUMB_SECONDARY_INPUT_RELOAD_CLASS (CLASS, MODE, X)))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits.
+ FIXME: Is this true for iWMMX? */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (CLASS == VPR_REG) \
+ ? CEIL (GET_MODE_SIZE (MODE), 2) \
+ : (ARM_NUM_REGS (MODE))
+
+/* If defined, gives a class of registers that cannot be used as the
+ operand of a SUBREG that changes the mode of the object illegally. */
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* The amount of scratch space needed by _interwork_{r7,r11}_call_via_rN().
+ When present, it is one word in size, and sits at the top of the frame,
+ between the soft frame pointer and either r7 or r11.
+
+ We only need _interwork_rM_call_via_rN() for -mcaller-super-interworking,
+ and only then if some outgoing arguments are passed on the stack. It would
+ be tempting to also check whether the stack arguments are passed by indirect
+ calls, but there seems to be no reason in principle why a post-reload pass
+ couldn't convert a direct call into an indirect one. */
+#define CALLER_INTERWORKING_SLOT_SIZE \
+ (TARGET_CALLER_INTERWORKING \
+ && maybe_ne (crtl->outgoing_args_size, 0) \
+ ? UNITS_PER_WORD : 0)
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly.
+ So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) ROUND_UP_WORD (NPUSHED) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable crtl->outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) (TARGET_ARM ? 4 : 0)
+
+/* Amount of memory needed for an untyped call to save all possible return
+ registers. */
+#define APPLY_RESULT_SIZE arm_apply_result_size()
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* These bits describe the different types of function supported
+ by the ARM backend. They are exclusive. i.e. a function cannot be both a
+ normal function and an interworked function, for example. Knowing the
+ type of a function is important for determining its prologue and
+ epilogue sequences.
+ Note value 7 is currently unassigned. Also note that the interrupt
+ function types all have bit 2 set, so that they can be tested for easily.
+ Note that 0 is deliberately chosen for ARM_FT_UNKNOWN so that when the
+ machine_function structure is initialized (to zero) func_type will
+ default to unknown. This will force the first use of arm_current_func_type
+ to call arm_compute_func_type. */
+#define ARM_FT_UNKNOWN 0 /* Type has not yet been determined. */
+#define ARM_FT_NORMAL 1 /* Your normal, straightforward function. */
+#define ARM_FT_INTERWORKED 2 /* A function that supports interworking. */
+#define ARM_FT_ISR 4 /* An interrupt service routine. */
+#define ARM_FT_FIQ 5 /* A fast interrupt service routine. */
+#define ARM_FT_EXCEPTION 6 /* An ARM exception handler (subcase of ISR). */
+
+#define ARM_FT_TYPE_MASK ((1 << 3) - 1)
+
+/* In addition functions can have several type modifiers,
+ outlined by these bit masks: */
+#define ARM_FT_INTERRUPT (1 << 2) /* Note overlap with FT_ISR and above. */
+#define ARM_FT_NAKED (1 << 3) /* No prologue or epilogue. */
+#define ARM_FT_VOLATILE (1 << 4) /* Does not return. */
+#define ARM_FT_NESTED (1 << 5) /* Embedded inside another func. */
+#define ARM_FT_STACKALIGN (1 << 6) /* Called with misaligned stack. */
+#define ARM_FT_CMSE_ENTRY (1 << 7) /* ARMv8-M non-secure entry function. */
+
+/* Some macros to test these flags. */
+#define ARM_FUNC_TYPE(t) (t & ARM_FT_TYPE_MASK)
+#define IS_INTERRUPT(t) (t & ARM_FT_INTERRUPT)
+#define IS_VOLATILE(t) (t & ARM_FT_VOLATILE)
+#define IS_NAKED(t) (t & ARM_FT_NAKED)
+#define IS_NESTED(t) (t & ARM_FT_NESTED)
+#define IS_STACKALIGN(t) (t & ARM_FT_STACKALIGN)
+#define IS_CMSE_ENTRY(t) (t & ARM_FT_CMSE_ENTRY)
+
+
+/* Structure used to hold the function stack frame layout. Offsets are
+ relative to the stack pointer on function entry. Positive offsets are
+ in the direction of stack growth.
+ Only soft_frame is used in thumb mode. */
+
+typedef struct GTY(()) arm_stack_offsets
+{
+ int saved_args; /* ARG_POINTER_REGNUM. */
+ int frame; /* ARM_HARD_FRAME_POINTER_REGNUM. */
+ int saved_regs;
+ int soft_frame; /* FRAME_POINTER_REGNUM. */
+ int locals_base; /* THUMB_HARD_FRAME_POINTER_REGNUM. */
+ int outgoing_args; /* STACK_POINTER_REGNUM. */
+ unsigned int saved_regs_mask;
+}
+arm_stack_offsets;
+
+#if !defined(GENERATOR_FILE) && !defined (USED_FOR_TARGET)
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+typedef struct GTY(()) machine_function
+{
+ /* Additional stack adjustment in __builtin_eh_throw. */
+ rtx eh_epilogue_sp_ofs;
+ /* Records if LR has to be saved for far jumps. */
+ int far_jump_used;
+ /* Records if ARG_POINTER was ever live. */
+ int arg_pointer_live;
+ /* Records if the save of LR has been eliminated. */
+ int lr_save_eliminated;
+ /* The size of the stack frame. Only valid after reload. */
+ arm_stack_offsets stack_offsets;
+ /* Records the type of the current function. */
+ unsigned long func_type;
+ /* Record if the function has a variable argument list. */
+ int uses_anonymous_args;
+ /* Records if sibcalls are blocked because an argument
+ register is needed to preserve stack alignment. */
+ int sibcall_blocked;
+ /* The PIC register for this function. This might be a pseudo. */
+ rtx pic_reg;
+ /* Labels for per-function Thumb call-via stubs. One per potential calling
+ register. We can never call via LR or PC. We can call via SP if a
+ trampoline happens to be on the top of the stack. */
+ rtx call_via[14];
+ /* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+ int return_used_this_function;
+ /* When outputting Thumb-1 code, record the last insn that provides
+ information about condition codes, and the comparison operands. */
+ rtx thumb1_cc_insn;
+ rtx thumb1_cc_op0;
+ rtx thumb1_cc_op1;
+ /* Also record the CC mode that is supported. */
+ machine_mode thumb1_cc_mode;
+ /* Set to 1 after arm_reorg has started. */
+ int after_arm_reorg;
+ /* The number of bytes used to store the static chain register on the
+ stack, above the stack frame. */
+ int static_chain_stack_bytes;
+ /* Set to 1 when pointer authentication operation uses value of SP other
+ than the incoming stack pointer value. */
+ int pacspval_needed;
+}
+machine_function;
+#endif
+
+#define ARM_Q_BIT_READ (arm_q_bit_access ())
+#define ARM_GE_BITS_READ (arm_ge_bits_access ())
+
+/* As in the machine_function, a global set of call-via labels, for code
+ that is in text_section. */
+extern GTY(()) rtx thumb_call_via_label[14];
+
+/* The number of potential ways of assigning to a co-processor. */
+#define ARM_NUM_COPROC_SLOTS 1
+
+/* Enumeration of procedure calling standard variants. We don't really
+ support all of these yet. */
+enum arm_pcs
+{
+ ARM_PCS_AAPCS, /* Base standard AAPCS. */
+ ARM_PCS_AAPCS_VFP, /* Use VFP registers for floating point values. */
+ ARM_PCS_AAPCS_IWMMXT, /* Use iWMMXT registers for vectors. */
+ /* This must be the last AAPCS variant. */
+ ARM_PCS_AAPCS_LOCAL, /* Private call within this compilation unit. */
+ ARM_PCS_ATPCS, /* ATPCS. */
+ ARM_PCS_APCS, /* APCS (legacy Linux etc). */
+ ARM_PCS_UNKNOWN
+};
+
+/* Default procedure calling standard of current compilation unit. */
+extern enum arm_pcs arm_pcs_default;
+
+#if !defined (USED_FOR_TARGET)
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* This is the number of iWMMXt register arguments scanned so far. */
+ int iwmmxt_nregs;
+ int named_count;
+ int nargs;
+ /* Which procedure call variant to use for this call. */
+ enum arm_pcs pcs_variant;
+
+ /* AAPCS related state tracking. */
+ int aapcs_arg_processed; /* No need to lay out this argument again. */
+ int aapcs_cprc_slot; /* Index of co-processor rules to handle
+ this argument, or -1 if using core
+ registers. */
+ int aapcs_ncrn;
+ int aapcs_next_ncrn;
+ rtx aapcs_reg; /* Register assigned to this argument. */
+ int aapcs_partial; /* How many bytes are passed in regs (if
+ split between core regs and stack.
+ Zero otherwise. */
+ int aapcs_cprc_failed[ARM_NUM_COPROC_SLOTS];
+ int can_split; /* Argument can be split between core regs
+ and the stack. */
+ /* Private data for tracking VFP register allocation */
+ unsigned aapcs_vfp_regs_free;
+ unsigned aapcs_vfp_reg_alloc;
+ int aapcs_vfp_rcount;
+ MACHMODE aapcs_vfp_rmode;
+} CUMULATIVE_ARGS;
+#endif
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (arm_pad_reg_upward (MODE, TYPE, FIRST) ? PAD_UPWARD : PAD_DOWNWARD)
+
+/* For AAPCS, padding should never be below the argument. For other ABIs,
+ * mimic the default. */
+#define PAD_VARARGS_DOWN \
+ ((TARGET_AAPCS_BASED) ? 0 : BYTES_BIG_ENDIAN)
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ arm_init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL))
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ (IN_RANGE ((REGNO), 0, 3) \
+ || (TARGET_AAPCS_BASED && TARGET_HARD_FLOAT \
+ && IN_RANGE ((REGNO), FIRST_VFP_REGNUM, FIRST_VFP_REGNUM + 15)) \
+ || (TARGET_IWMMXT_ABI \
+ && IN_RANGE ((REGNO), FIRST_IWMMXT_REGNUM, FIRST_IWMMXT_REGNUM + 9)))
+
+
+/* If your target environment doesn't prefix user functions with an
+ underscore, you may wish to re-define this to prevent any conflicts. */
+#ifndef ARM_MCOUNT_NAME
+#define ARM_MCOUNT_NAME "*mcount"
+#endif
+
+/* Call the function profiler with a given profile label. The Acorn
+ compiler puts this BEFORE the prolog but gcc puts it afterwards.
+ On the ARM the full profile code will look like:
+ .data
+ LP1
+ .word 0
+ .text
+ mov ip, lr
+ bl mcount
+ .word LP1
+
+ profile_function() in final.cc outputs the .data section, FUNCTION_PROFILER
+ will output the .text section.
+
+ The ``mov ip,lr'' seems like a good idea to stick with cc convention.
+ ``prof'' doesn't seem to mind about this!
+
+ Note - this version of the code is designed to work in both ARM and
+ Thumb modes. */
+#ifndef ARM_FUNCTION_PROFILER
+#define ARM_FUNCTION_PROFILER(STREAM, LABELNO) \
+{ \
+ char temp[20]; \
+ rtx sym; \
+ \
+ asm_fprintf (STREAM, "\tmov\t%r, %r\n\tbl\t", \
+ IP_REGNUM, LR_REGNUM); \
+ assemble_name (STREAM, ARM_MCOUNT_NAME); \
+ fputc ('\n', STREAM); \
+ ASM_GENERATE_INTERNAL_LABEL (temp, "LP", LABELNO); \
+ sym = gen_rtx_SYMBOL_REF (Pmode, temp); \
+ assemble_aligned_integer (UNITS_PER_WORD, sym); \
+}
+#endif
+
+#ifdef THUMB_FUNCTION_PROFILER
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+ if (TARGET_ARM) \
+ ARM_FUNCTION_PROFILER (STREAM, LABELNO) \
+ else \
+ THUMB_FUNCTION_PROFILER (STREAM, LABELNO)
+#else
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
+ ARM_FUNCTION_PROFILER (STREAM, LABELNO)
+#endif
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+#define EPILOGUE_USES(REGNO) (epilogue_completed && (REGNO) == LR_REGNUM)
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) \
+ (TARGET_32BIT ? use_return_insn (ISCOND, NULL) : 0)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. Note we have to use {ARM|THUMB}_HARD_FRAME_POINTER_REGNUM
+ because the definition of HARD_FRAME_POINTER_REGNUM is not a constant. */
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM },\
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM },\
+ { ARG_POINTER_REGNUM, ARM_HARD_FRAME_POINTER_REGNUM },\
+ { ARG_POINTER_REGNUM, THUMB_HARD_FRAME_POINTER_REGNUM },\
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM },\
+ { FRAME_POINTER_REGNUM, ARM_HARD_FRAME_POINTER_REGNUM },\
+ { FRAME_POINTER_REGNUM, THUMB_HARD_FRAME_POINTER_REGNUM }}
+
+/* Define the offset between two registers, one to be eliminated, and the
+ other its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ if (TARGET_ARM) \
+ (OFFSET) = arm_compute_initial_elimination_offset (FROM, TO); \
+ else \
+ (OFFSET) = thumb_compute_initial_elimination_offset (FROM, TO)
+
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+
+/* Initialize data used by insn expanders. This is called from insn_emit,
+ once for every function before code is generated. */
+#define INIT_EXPANDERS arm_init_expanders ()
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE (TARGET_FDPIC ? 32 : (TARGET_32BIT ? 16 : 20))
+
+/* Alignment required for a trampoline in bits. */
+#define TRAMPOLINE_ALIGNMENT 32
+
+/* Addressing modes, and classification of registers for them. */
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT TARGET_32BIT
+#define HAVE_POST_DECREMENT TARGET_32BIT
+#define HAVE_PRE_DECREMENT TARGET_32BIT
+#define HAVE_PRE_MODIFY_DISP TARGET_32BIT
+#define HAVE_POST_MODIFY_DISP TARGET_32BIT
+#define HAVE_PRE_MODIFY_REG TARGET_32BIT
+#define HAVE_POST_MODIFY_REG TARGET_32BIT
+
+enum arm_auto_incmodes
+ {
+ ARM_POST_INC,
+ ARM_PRE_INC,
+ ARM_POST_DEC,
+ ARM_PRE_DEC
+ };
+
+#define ARM_AUTOINC_VALID_FOR_MODE_P(mode, code) \
+ (TARGET_32BIT && arm_autoinc_modes_ok_p (mode, code))
+#define USE_LOAD_POST_INCREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_POST_INC)
+#define USE_LOAD_PRE_INCREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_PRE_INC)
+#define USE_LOAD_POST_DECREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_POST_DEC)
+#define USE_LOAD_PRE_DECREMENT(mode) \
+ ARM_AUTOINC_VALID_FOR_MODE_P(mode, ARM_PRE_DEC)
+
+#define USE_STORE_PRE_DECREMENT(mode) USE_LOAD_PRE_DECREMENT(mode)
+#define USE_STORE_PRE_INCREMENT(mode) USE_LOAD_PRE_INCREMENT(mode)
+#define USE_STORE_POST_DECREMENT(mode) USE_LOAD_POST_DECREMENT(mode)
+#define USE_STORE_POST_INCREMENT(mode) USE_LOAD_POST_INCREMENT(mode)
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg. */
+#define TEST_REGNO(R, TEST, VALUE) \
+ ((R TEST VALUE) \
+ || (reg_renumber && ((unsigned) reg_renumber[R] TEST VALUE)))
+
+/* Don't allow the pc to be used. */
+#define ARM_REGNO_OK_FOR_BASE_P(REGNO) \
+ (TEST_REGNO (REGNO, <, PC_REGNUM) \
+ || TEST_REGNO (REGNO, ==, FRAME_POINTER_REGNUM) \
+ || TEST_REGNO (REGNO, ==, ARG_POINTER_REGNUM))
+
+#define THUMB1_REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ (TEST_REGNO (REGNO, <=, LAST_LO_REGNUM) \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && TEST_REGNO (REGNO, ==, STACK_POINTER_REGNUM)))
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ (TARGET_THUMB1 \
+ ? THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO, MODE) \
+ : ARM_REGNO_OK_FOR_BASE_P (REGNO))
+
+/* Nonzero if X can be the base register in a reg+reg addressing mode.
+ For Thumb, we cannot use SP + reg, so reject SP. */
+#define REGNO_MODE_OK_FOR_REG_BASE_P(X, MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (X, QImode)
+
+/* For ARM code, we don't care about the mode, but for Thumb, the index
+ must be suitable for use in a QImode load. */
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ (REGNO_MODE_OK_FOR_BASE_P (REGNO, QImode) \
+ && !TEST_REGNO (REGNO, ==, STACK_POINTER_REGNUM))
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+/* ??? Should the TARGET_ARM here also apply to thumb2? */
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (TARGET_ARM && optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+/* True if SYMBOL + OFFSET constants must refer to something within
+ SYMBOL's section. */
+#define ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P 0
+
+/* Nonzero if all target requires all absolute relocations be R_ARM_ABS32. */
+#ifndef TARGET_DEFAULT_WORD_RELOCATIONS
+#define TARGET_DEFAULT_WORD_RELOCATIONS 0
+#endif
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragment for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ arm_asm_output_labelref (FILE, NAME)
+
+/* Output IT instructions for conditionally executed Thumb-2 instructions. */
+#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
+ if (TARGET_THUMB2) \
+ thumb2_asm_output_opcode (STREAM);
+
+/* The EABI specifies that constructors should go in .init_array.
+ Other targets use .ctors for compatibility. */
+#ifndef ARM_EABI_CTORS_SECTION_OP
+#define ARM_EABI_CTORS_SECTION_OP \
+ "\t.section\t.init_array,\"aw\",%init_array"
+#endif
+#ifndef ARM_EABI_DTORS_SECTION_OP
+#define ARM_EABI_DTORS_SECTION_OP \
+ "\t.section\t.fini_array,\"aw\",%fini_array"
+#endif
+#define ARM_CTORS_SECTION_OP \
+ "\t.section\t.ctors,\"aw\",%progbits"
+#define ARM_DTORS_SECTION_OP \
+ "\t.section\t.dtors,\"aw\",%progbits"
+
+/* Define CTORS_SECTION_ASM_OP. */
+#undef CTORS_SECTION_ASM_OP
+#undef DTORS_SECTION_ASM_OP
+#ifndef IN_LIBGCC2
+# define CTORS_SECTION_ASM_OP \
+ (TARGET_AAPCS_BASED ? ARM_EABI_CTORS_SECTION_OP : ARM_CTORS_SECTION_OP)
+# define DTORS_SECTION_ASM_OP \
+ (TARGET_AAPCS_BASED ? ARM_EABI_DTORS_SECTION_OP : ARM_DTORS_SECTION_OP)
+#else /* !defined (IN_LIBGCC2) */
+/* In libgcc, CTORS_SECTION_ASM_OP must be a compile-time constant,
+ so we cannot use the definition above. */
+# ifdef __ARM_EABI__
+/* The .ctors section is not part of the EABI, so we do not define
+ CTORS_SECTION_ASM_OP when in libgcc; that prevents crtstuff
+ from trying to use it. We do define it when doing normal
+ compilation, as .init_array can be used instead of .ctors. */
+/* There is no need to emit begin or end markers when using
+ init_array; the dynamic linker will compute the size of the
+ array itself based on special symbols created by the static
+ linker. However, we do need to arrange to set up
+ exception-handling here. */
+# define CTOR_LIST_BEGIN asm (ARM_EABI_CTORS_SECTION_OP)
+# define CTOR_LIST_END /* empty */
+# define DTOR_LIST_BEGIN asm (ARM_EABI_DTORS_SECTION_OP)
+# define DTOR_LIST_END /* empty */
+# else /* !defined (__ARM_EABI__) */
+# define CTORS_SECTION_ASM_OP ARM_CTORS_SECTION_OP
+# define DTORS_SECTION_ASM_OP ARM_DTORS_SECTION_OP
+# endif /* !defined (__ARM_EABI__) */
+#endif /* !defined (IN_LIBCC2) */
+
+/* True if the operating system can merge entities with vague linkage
+ (e.g., symbols in COMDAT group) during dynamic linking. */
+#ifndef TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P
+#define TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P true
+#endif
+
+#define ARM_OUTPUT_FN_UNWIND(F, PROLOGUE) arm_output_fn_unwind (F, PROLOGUE)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+ Thumb-2 has the same restrictions as arm. */
+#ifndef REG_OK_STRICT
+
+#define ARM_REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) <= LAST_ARM_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM \
+ || REGNO (X) == ARG_POINTER_REGNUM)
+
+#define ARM_REG_OK_FOR_INDEX_P(X) \
+ ((REGNO (X) <= LAST_ARM_REGNUM \
+ && REGNO (X) != STACK_POINTER_REGNUM) \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM \
+ || REGNO (X) == ARG_POINTER_REGNUM)
+
+#define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ (REGNO (X) <= LAST_LO_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == hard_frame_pointer_rtx \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_STRICT_P 0
+
+#else /* REG_OK_STRICT */
+
+#define ARM_REG_OK_FOR_BASE_P(X) \
+ ARM_REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#define ARM_REG_OK_FOR_INDEX_P(X) \
+ ARM_REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define THUMB1_REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ THUMB1_REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#define REG_STRICT_P 1
+
+#endif /* REG_OK_STRICT */
+
+/* Now define some helpers in terms of the above. */
+
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
+ (TARGET_THUMB1 \
+ ? THUMB1_REG_MODE_OK_FOR_BASE_P (X, MODE) \
+ : ARM_REG_OK_FOR_BASE_P (X))
+
+/* For 16-bit Thumb, a valid index register is anything that can be used in
+ a byte load instruction. */
+#define THUMB1_REG_OK_FOR_INDEX_P(X) \
+ THUMB1_REG_MODE_OK_FOR_BASE_P (X, QImode)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. On the Thumb, the stack pointer
+ is not suitable. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (TARGET_THUMB1 \
+ ? THUMB1_REG_OK_FOR_INDEX_P (X) \
+ : ARM_REG_OK_FOR_INDEX_P (X))
+
+/* Nonzero if X can be the base register in a reg+reg addressing mode.
+ For Thumb, we cannot use SP + reg, so reject SP. */
+#define REG_MODE_OK_FOR_REG_BASE_P(X, MODE) \
+ REG_OK_FOR_INDEX_P (X)
+
+#define ARM_BASE_REGISTER_RTX_P(X) \
+ (REG_P (X) && ARM_REG_OK_FOR_BASE_P (X))
+
+#define ARM_INDEX_REGISTER_RTX_P(X) \
+ (REG_P (X) && ARM_REG_OK_FOR_INDEX_P (X))
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+#define CASE_VECTOR_PC_RELATIVE ((TARGET_THUMB2 \
+ || (TARGET_THUMB1 \
+ && (optimize_size || flag_pic))) \
+ && (!target_pure_code))
+
+
+#define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
+ (TARGET_THUMB1 \
+ ? (min >= 0 && max < 512 \
+ ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 1, QImode) \
+ : min >= -256 && max < 256 \
+ ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 0, QImode) \
+ : min >= 0 && max < 8192 \
+ ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 1, HImode) \
+ : min >= -4096 && max < 4096 \
+ ? (ADDR_DIFF_VEC_FLAGS (body).offset_unsigned = 0, HImode) \
+ : SImode) \
+ : ((min < 0 || max >= 0x20000 || !TARGET_THUMB2) ? SImode \
+ : (max >= 0x200) ? HImode \
+ : QImode))
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+#undef MOVE_RATIO
+#define MOVE_RATIO(speed) (arm_tune_xscale ? 4 : 2)
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS 1
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ (TARGET_THUMB ? ZERO_EXTEND : \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)))
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+#define ARM_FRAME_RTX(X) \
+ ( (X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions. */
+#define BRANCH_COST(speed_p, predictable_p) \
+ ((arm_branch_cost != -1) ? arm_branch_cost : \
+ (current_tune->branch_cost (speed_p, predictable_p)))
+
+/* False if short circuit operation is preferred. */
+#define LOGICAL_OP_NON_SHORT_CIRCUIT \
+ ((optimize_size) \
+ ? (TARGET_THUMB ? false : true) \
+ : TARGET_THUMB ? static_cast<bool> (current_tune->logical_op_non_short_circuit_thumb) \
+ : static_cast<bool> (current_tune->logical_op_non_short_circuit_arm))
+
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern unsigned arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+/* For FDPIC, the FDPIC register is call-clobbered (otherwise PLT
+ entries would need to handle saving and restoring it). */
+#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED TARGET_FDPIC
+
+/* We can't directly access anything that contains a symbol,
+ nor can we indirect via the constant pool. One exception is
+ UNSPEC_TLS, which is always PIC. */
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ (!(symbol_mentioned_p (X) \
+ || label_mentioned_p (X) \
+ || (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X) \
+ && (symbol_mentioned_p (get_pool_constant (X)) \
+ || label_mentioned_p (get_pool_constant (X))))) \
+ || tls_mentioned_p (X))
+
+/* We may want to save the PIC register if it is a dedicated one. */
+#define PIC_REGISTER_MAY_NEED_SAVING \
+ (flag_pic \
+ && !TARGET_SINGLE_PIC_BASE \
+ && !TARGET_FDPIC \
+ && arm_pic_register != INVALID_REGNUM)
+
+/* We need to know when we are making a constant pool; this determines
+ whether data needs to be in the GOT or can be referenced via a GOT
+ offset. */
+extern int making_const_table;
+
+/* Handle pragmas for compatibility with Intel's compilers. */
+/* Also abuse this to register additional C specific EABI attributes. */
+#define REGISTER_TARGET_PRAGMAS() do { \
+ c_register_pragma (0, "long_calls", arm_pr_long_calls); \
+ c_register_pragma (0, "no_long_calls", arm_pr_no_long_calls); \
+ c_register_pragma (0, "long_calls_off", arm_pr_long_calls_off); \
+ arm_lang_object_attributes_init(); \
+ arm_register_target_pragmas(); \
+} while (0)
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+
+#define SELECT_CC_MODE(OP, X, Y) arm_select_cc_mode (OP, X, Y)
+
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+#define REVERSE_CONDITION(CODE,MODE) \
+ (((MODE) == CCFPmode || (MODE) == CCFPEmode) \
+ ? reverse_condition_maybe_unordered (code) \
+ : reverse_condition (code))
+
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+
+#define CC_STATUS_INIT \
+ do { cfun->machine->thumb1_cc_insn = NULL_RTX; } while (0)
+
+#undef ASM_APP_ON
+#define ASM_APP_ON (inline_asm_unified ? "\t.syntax unified\n" : \
+ "\t.syntax divided\n")
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF (TARGET_ARM ? "\t.arm\n\t.syntax unified\n" : \
+ "\t.thumb\n\t.syntax unified\n")
+
+/* Output a push or a pop instruction (only used when profiling).
+ We can't push STATIC_CHAIN_REGNUM (r12) directly with Thumb-1. We know
+ that ASM_OUTPUT_REG_PUSH will be matched with ASM_OUTPUT_REG_POP, and
+ that r7 isn't used by the function profiler, so we can use it as a
+ scratch reg. WARNING: This isn't safe in the general case! It may be
+ sensitive to future changes in final.cc:profile_function. */
+#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \
+ do \
+ { \
+ if (TARGET_THUMB1 \
+ && (REGNO) == STATIC_CHAIN_REGNUM) \
+ { \
+ asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
+ asm_fprintf (STREAM, "\tmov\tr7, %r\n", REGNO);\
+ asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
+ } \
+ else \
+ asm_fprintf (STREAM, "\tpush {%r}\n", REGNO); \
+ } while (0)
+
+
+/* See comment for ASM_OUTPUT_REG_PUSH concerning Thumb-1 issue. */
+#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \
+ do \
+ { \
+ if (TARGET_THUMB1 \
+ && (REGNO) == STATIC_CHAIN_REGNUM) \
+ { \
+ asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
+ asm_fprintf (STREAM, "\tmov\t%r, r7\n", REGNO);\
+ asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
+ } \
+ else \
+ asm_fprintf (STREAM, "\tpop {%r}\n", REGNO); \
+ } while (0)
+
+#define ADDR_VEC_ALIGN(JUMPTABLE) \
+ ((TARGET_THUMB && GET_MODE (PATTERN (JUMPTABLE)) == SImode) ? 2 : 0)
+
+/* Alignment for case labels comes from ADDR_VEC_ALIGN; avoid the
+ default alignment from elfos.h. */
+#undef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) /* Empty. */
+
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) \
+ (GET_CODE (PATTERN (prev_active_insn (LABEL))) == ADDR_DIFF_VEC \
+ ? 1 : 0)
+
+#define ARM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+ arm_declare_function_name ((STREAM), (NAME), (DECL));
+
+/* For aliases of functions we use .thumb_set instead. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL1, DECL2) \
+ do \
+ { \
+ const char *const LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ const char *const LABEL2 = IDENTIFIER_POINTER (DECL2); \
+ \
+ if (TARGET_THUMB && TREE_CODE (DECL1) == FUNCTION_DECL) \
+ { \
+ fprintf (FILE, "\t.thumb_set "); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+ else \
+ ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
+ } \
+ while (0)
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* To support -falign-* switches we need to use .p2align so
+ that alignment directives in code sections will be padded
+ with no-op instructions, rather than zeroes. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", \
+ (int) (LOG), (int) (MAX_SKIP)); \
+ }
+#endif
+
+/* Add two bytes to the length of conditionally executed Thumb-2
+ instructions for the IT instruction. */
+#define ADJUST_INSN_LENGTH(insn, length) \
+ if (TARGET_THUMB2 && GET_CODE (PATTERN (insn)) == COND_EXEC) \
+ length += 2;
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimizing. For Thumb-2 check if any IT instructions need
+ outputting. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (TARGET_ARM && optimize) \
+ arm_final_prescan_insn (INSN); \
+ else if (TARGET_THUMB2) \
+ thumb2_final_prescan_insn (INSN); \
+ else if (TARGET_THUMB1) \
+ thumb1_final_prescan_insn (INSN)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (unsigned HOST_WIDE_INT) (x) \
+ : ((((unsigned HOST_WIDE_INT)(x)) & (unsigned HOST_WIDE_INT) 0xffffffff) |\
+ ((((unsigned HOST_WIDE_INT)(x)) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (unsigned HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ arm_return_addr (COUNT, FRAME)
+
+/* Mask of the bits in the PC that contain the real return address
+ when running in 26-bit mode. */
+#define RETURN_ADDR_MASK26 (0x03fffffc)
+
+/* Pick up the return address upon entry to a procedure. Used for
+ dwarf2 unwind information. This also enables the table driven
+ mechanism. */
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((arm_arch4 || TARGET_THUMB) \
+ ? (gen_int_mode ((unsigned long)0xffffffff, Pmode)) \
+ : arm_gen_return_addr_mask ())
+
+
+/* Do not emit .note.GNU-stack by default. */
+#ifndef NEED_INDICATE_EXEC_STACK
+#define NEED_INDICATE_EXEC_STACK 0
+#endif
+
+#define TARGET_ARM_ARCH \
+ (arm_base_arch) \
+
+/* The highest Thumb instruction set version supported by the chip. */
+#define TARGET_ARM_ARCH_ISA_THUMB \
+ (arm_arch_thumb2 ? 2 : (arm_arch_thumb1 ? 1 : 0))
+
+/* Expands to an upper-case char of the target's architectural
+ profile. */
+#define TARGET_ARM_ARCH_PROFILE \
+ (arm_active_target.profile)
+
+/* Bit-field indicating what size LDREX/STREX loads/stores are available.
+ Bit 0 for bytes, up to bit 3 for double-words. */
+#define TARGET_ARM_FEATURE_LDREX \
+ ((TARGET_HAVE_LDREX ? 4 : 0) \
+ | (TARGET_HAVE_LDREXBH ? 3 : 0) \
+ | (TARGET_HAVE_LDREXD ? 8 : 0))
+
+/* Set as a bit mask indicating the available widths of hardware floating
+ point types. Where bit 1 indicates 16-bit support, bit 2 indicates
+ 32-bit support, bit 3 indicates 64-bit support. */
+#define TARGET_ARM_FP \
+ (!TARGET_SOFT_FLOAT ? (TARGET_VFP_SINGLE ? 4 \
+ : (TARGET_VFP_DOUBLE ? (TARGET_FP16 ? 14 : 12) : 0)) \
+ : 0)
+
+
+/* Set as a bit mask indicating the available widths of floating point
+ types for hardware NEON floating point. This is the same as
+ TARGET_ARM_FP without the 64-bit bit set. */
+#define TARGET_NEON_FP \
+ (TARGET_NEON ? (TARGET_ARM_FP & (0xff ^ 0x08)) \
+ : 0)
+
+/* Name of the automatic fpu-selection option. */
+#define FPUTYPE_AUTO "auto"
+
+/* The maximum number of parallel loads or stores we support in an ldm/stm
+ instruction. */
+#define MAX_LDM_STM_OPS 4
+
+extern const char *arm_rewrite_mcpu (int argc, const char **argv);
+extern const char *arm_rewrite_march (int argc, const char **argv);
+extern const char *arm_asm_auto_mfpu (int argc, const char **argv);
+#define ASM_CPU_SPEC_FUNCTIONS \
+ { "rewrite_mcpu", arm_rewrite_mcpu }, \
+ { "rewrite_march", arm_rewrite_march }, \
+ { "asm_auto_mfpu", arm_asm_auto_mfpu },
+
+#define ASM_CPU_SPEC \
+ " %{mfpu=auto:%<mfpu=auto %:asm_auto_mfpu(%{march=*: arch %*})}" \
+ " %{mcpu=generic-*:-march=%:rewrite_march(%{mcpu=generic-*:%*});" \
+ " march=*:-march=%:rewrite_march(%{march=*:%*});" \
+ " mcpu=*:-mcpu=%:rewrite_mcpu(%{mcpu=*:%*})" \
+ " }"
+
+extern const char *arm_target_mode (int argc, const char **argv);
+#define TARGET_MODE_SPEC_FUNCTIONS \
+ { "target_mode_check", arm_target_mode },
+
+/* -mcpu=native handling only makes sense with compiler running on
+ an ARM chip. */
+#if defined(__arm__)
+extern const char *host_detect_local_cpu (int argc, const char **argv);
+#define HAVE_LOCAL_CPU_DETECT
+# define MCPU_MTUNE_NATIVE_FUNCTIONS \
+ { "local_cpu_detect", host_detect_local_cpu },
+# define MCPU_MTUNE_NATIVE_SPECS \
+ " %{march=native:%<march=native %:local_cpu_detect(arch)}" \
+ " %{mcpu=native:%<mcpu=native %:local_cpu_detect(cpu)}" \
+ " %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
+#else
+# define MCPU_MTUNE_NATIVE_FUNCTIONS
+# define MCPU_MTUNE_NATIVE_SPECS ""
+#endif
+
+const char *arm_canon_arch_option (int argc, const char **argv);
+const char *arm_canon_arch_multilib_option (int argc, const char **argv);
+
+#define CANON_ARCH_SPEC_FUNCTION \
+ { "canon_arch", arm_canon_arch_option },
+
+#define CANON_ARCH_MULTILIB_SPEC_FUNCTION \
+ { "canon_arch_multilib", arm_canon_arch_multilib_option },
+
+const char *arm_be8_option (int argc, const char **argv);
+#define BE8_SPEC_FUNCTION \
+ { "be8_linkopt", arm_be8_option },
+
+# define EXTRA_SPEC_FUNCTIONS \
+ MCPU_MTUNE_NATIVE_FUNCTIONS \
+ ASM_CPU_SPEC_FUNCTIONS \
+ CANON_ARCH_SPEC_FUNCTION \
+ CANON_ARCH_MULTILIB_SPEC_FUNCTION \
+ TARGET_MODE_SPEC_FUNCTIONS \
+ BE8_SPEC_FUNCTION
+
+/* Automatically add -mthumb for Thumb-only targets if mode isn't specified
+ via the configuration option --with-mode or via the command line. The
+ function target_mode_check is called to do the check with either:
+ - an array of -march values if any is given;
+ - an array of -mcpu values if any is given;
+ - an empty array. */
+#define TARGET_MODE_SPECS \
+ " %{!marm:%{!mthumb:%:target_mode_check(%{march=*:arch %*;mcpu=*:cpu %*;:})}}"
+
+/* Generate a canonical string to represent the architecture selected. */
+#define ARCH_CANONICAL_SPECS \
+ " -march=%:canon_arch(%{mcpu=*: cpu %*} " \
+ " %{march=*: arch %*} " \
+ " %{mfpu=*: fpu %*} " \
+ " %{mfloat-abi=*: abi %*}" \
+ " %<march=*) "
+
+/* Generate a canonical string to represent the architecture selected ignoring
+ the options not required for multilib linking. */
+#define MULTILIB_ARCH_CANONICAL_SPECS \
+ "-mlibarch=%:canon_arch_multilib(%{mcpu=*: cpu %*} " \
+ " %{march=*: arch %*} " \
+ " %{mfpu=*: fpu %*} " \
+ " %{mfloat-abi=*: abi %*}" \
+ " %<mlibarch=*) "
+
+/* Complete set of specs for the driver. Commas separate the
+ individual rules so that any option suppression (%<opt...)is
+ completed before starting subsequent rules. */
+#define DRIVER_SELF_SPECS \
+ MCPU_MTUNE_NATIVE_SPECS, \
+ TARGET_MODE_SPECS, \
+ MULTILIB_ARCH_CANONICAL_SPECS, \
+ ARCH_CANONICAL_SPECS
+
+#define TARGET_SUPPORTS_WIDE_INT 1
+
+/* For switching between functions with different target attributes. */
+#define SWITCHABLE_TARGET 1
+
+/* Define SECTION_ARM_PURECODE as the ARM specific section attribute
+ representation for SHF_ARM_PURECODE in GCC. */
+#define SECTION_ARM_PURECODE SECTION_MACH_DEP
+
+#endif /* ! GCC_ARM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/bpabi.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/bpabi.h
new file mode 100644
index 0000000..aa78c64
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/bpabi.h
@@ -0,0 +1,135 @@
+/* Configuration file for ARM BPABI targets.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Use the AAPCS ABI by default. */
+#define ARM_DEFAULT_ABI ARM_ABI_AAPCS
+
+/* Assume that AAPCS ABIs should adhere to the full BPABI. */
+#define TARGET_BPABI (TARGET_AAPCS_BASED)
+
+/* BPABI targets use EABI frame unwinding tables. */
+#undef ARM_UNWIND_INFO
+#define ARM_UNWIND_INFO 1
+
+/* TARGET_BIG_ENDIAN_DEFAULT is set in
+ config.gcc for big endian configurations. */
+#if TARGET_BIG_ENDIAN_DEFAULT
+#define TARGET_ENDIAN_DEFAULT MASK_BIG_END
+#else
+#define TARGET_ENDIAN_DEFAULT 0
+#endif
+
+/* EABI targets should enable interworking by default. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_INTERWORK | TARGET_ENDIAN_DEFAULT)
+
+/* The ARM BPABI functions return a boolean; they use no special
+ calling convention. */
+#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) TARGET_BPABI
+
+/* The BPABI integer comparison routines return { -1, 0, 1 }. */
+#define TARGET_LIB_INT_CMP_BIASED !TARGET_BPABI
+
+#define TARGET_FIX_V4BX_SPEC " %{mcpu=arm8|mcpu=arm810|mcpu=strongarm*"\
+ "|march=armv4|mcpu=fa526|mcpu=fa626:--fix-v4bx}"
+
+#define TARGET_FDPIC_ASM_SPEC ""
+
+#define BE8_LINK_SPEC \
+ "%{!r:%{!mbe32:%:be8_linkopt(%{mlittle-endian:little}" \
+ " %{mbig-endian:big}" \
+ " %{mbe8:be8}" \
+ " %{march=*:arch %*})}}"
+
+/* Tell the assembler to build BPABI binaries. */
+#undef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ "%{mabi=apcs-gnu|mabi=atpcs:-meabi=gnu;:-meabi=5}" TARGET_FIX_V4BX_SPEC \
+ TARGET_FDPIC_ASM_SPEC
+
+#ifndef SUBTARGET_EXTRA_LINK_SPEC
+#define SUBTARGET_EXTRA_LINK_SPEC ""
+#endif
+
+/* Split out the EABI common values so other targets can use it. */
+#define EABI_LINK_SPEC \
+ TARGET_FIX_V4BX_SPEC BE8_LINK_SPEC
+
+/* The generic link spec in elf.h does not support shared libraries. */
+#define BPABI_LINK_SPEC \
+ "%{mbig-endian:-EB} %{mlittle-endian:-EL} " \
+ "%{static:-Bstatic} %{shared:-shared} %{symbolic:-Bsymbolic} " \
+ "-X" SUBTARGET_EXTRA_LINK_SPEC EABI_LINK_SPEC
+
+#undef LINK_SPEC
+#define LINK_SPEC BPABI_LINK_SPEC
+
+/* The BPABI requires that we always use an out-of-line implementation
+ of RTTI comparison, even if the target supports weak symbols,
+ because the same object file might be used on a target that does
+ not support merging symbols across DLL boundaries. This macro is
+ broken out separately so that it can be used within
+ TARGET_OS_CPP_BUILTINS in configuration files for systems based on
+ the BPABI. */
+#define TARGET_BPABI_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__GXX_TYPEINFO_EQUALITY_INLINE=0"); \
+ } \
+ while (false)
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ TARGET_BPABI_CPP_BUILTINS()
+
+/* The BPABI specifies the use of .{init,fini}_array. Therefore, we
+ do not want GCC to put anything into the .{init,fini} sections. */
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#define INIT_ARRAY_SECTION_ASM_OP ARM_EABI_CTORS_SECTION_OP
+#define FINI_ARRAY_SECTION_ASM_OP ARM_EABI_DTORS_SECTION_OP
+
+/* The legacy _mcount implementation assumes r11 points to a
+ 4-word APCS frame. This is generally not true for EABI targets,
+ particularly not in Thumb mode. We assume the mcount
+ implementation does not require a counter variable (No Counter).
+ Note that __gnu_mcount_nc will be entered with a misaligned stack.
+ This is OK because it uses a special calling convention anyway. */
+
+#undef NO_PROFILE_COUNTERS
+#define NO_PROFILE_COUNTERS 1
+#undef ARM_FUNCTION_PROFILER
+#define ARM_FUNCTION_PROFILER(STREAM, LABELNO) \
+{ \
+ fprintf (STREAM, "\tpush\t{lr}\n"); \
+ fprintf (STREAM, "\tbl\t__gnu_mcount_nc\n"); \
+}
+
+#undef SUBTARGET_FRAME_POINTER_REQUIRED
+#define SUBTARGET_FRAME_POINTER_REQUIRED 0
+
+/* __gnu_mcount_nc restores the original LR value before returning. Ensure
+ that there is no unnecessary hook set up. */
+#undef PROFILE_HOOK
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/elf.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/elf.h
new file mode 100644
index 0000000..5766cb4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/elf.h
@@ -0,0 +1,152 @@
+/* Definitions of target machine for GNU compiler.
+ For ARM with ELF obj format.
+ Copyright (C) 1995-2023 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org> and
+ Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef OBJECT_FORMAT_ELF
+ #error elf.h included before elfos.h
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+#ifndef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "-D__ELF__"
+#endif
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "subtarget_extra_asm_spec", SUBTARGET_EXTRA_ASM_SPEC }, \
+ { "subtarget_asm_float_spec", SUBTARGET_ASM_FLOAT_SPEC }, \
+ SUBSUBTARGET_EXTRA_SPECS
+#endif
+
+#ifndef SUBTARGET_EXTRA_ASM_SPEC
+#define SUBTARGET_EXTRA_ASM_SPEC ""
+#endif
+
+#ifndef SUBTARGET_ASM_FLOAT_SPEC
+#define SUBTARGET_ASM_FLOAT_SPEC "\
+%{mapcs-float:-mfloat}"
+#endif
+
+#undef SUBSUBTARGET_EXTRA_SPECS
+#define SUBSUBTARGET_EXTRA_SPECS
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian:-EB} \
+%{mlittle-endian:-EL} \
+%(asm_cpu_spec) \
+%{mapcs-*:-mapcs-%*} \
+%(subtarget_asm_float_spec) \
+%{mthumb-interwork:-mthumb-interwork} \
+%{mfloat-abi=*} %{!mfpu=auto: %{mfpu=*}} \
+%(subtarget_extra_asm_spec)"
+#endif
+
+/* The ARM uses @ are a comment character so we need to redefine
+ TYPE_OPERAND_FMT. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+/* We might need a ARM specific header to function declarations. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME arm_asm_declare_function_name
+
+/* We might need an ARM specific trailer for function declarations. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ ARM_OUTPUT_FN_UNWIND (FILE, FALSE); \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } \
+ while (0)
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+/* We put ARM and Thumb-2 jump tables in the text section, because it makes
+ the code more efficient, but for Thumb-1 it's better to put them out of
+ band unless we are generating compressed tables. */
+#define JUMP_TABLES_IN_TEXT_SECTION \
+ ((TARGET_32BIT || (TARGET_THUMB && (optimize_size || flag_pic))) \
+ && !target_pure_code)
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
+#endif
+
+/* Run-time Target Specification. */
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APCS_FRAME)
+#endif
+
+
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+
+/* Output an element in the static constructor array. */
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR arm_elf_asm_constructor
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR arm_elf_asm_destructor
+
+/* For PIC code we need to explicitly specify (PLT) and (GOT) relocs. */
+#define NEED_PLT_RELOC flag_pic
+#define NEED_GOT_RELOC flag_pic
+
+/* The ELF assembler handles GOT addressing differently to NetBSD. */
+#define GOT_PCREL 0
+
+/* Align output to a power of two. Note ".align 0" is redundant,
+ and also GAS will treat it as ".align 2" which we do not want. */
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ if ((POWER) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", POWER); \
+ } \
+ while (0)
+
+/* Horrible hack: We want to prevent some libgcc routines being included
+ for some multilibs. The condition should match the one in
+ libgcc/config/arm/lib1funcs.S and libgcc/config/arm/t-elf. */
+#if __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1
+#undef L_fixdfsi
+#undef L_fixunsdfsi
+#undef L_truncdfsf2
+#undef L_fixsfsi
+#undef L_fixunssfsi
+#undef L_floatdidf
+#undef L_floatdisf
+#undef L_floatundidf
+#undef L_floatundisf
+#endif
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/unknown-elf.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/unknown-elf.h
new file mode 100644
index 0000000..397ac3f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/arm/unknown-elf.h
@@ -0,0 +1,96 @@
+/* Definitions for non-Linux based ARM systems using ELF
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* elfos.h should have already been included. Now just override
+ any conflicting definitions and add any extras. */
+
+/* Run-time Target Specification. */
+
+/* Default to using software floating point. */
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (0)
+#endif
+
+/* Now we define the strings used to build the spec file. */
+#define UNKNOWN_ELF_STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{Ofast|ffast-math|funsafe-math-optimizations:%{!shared:crtfastmath.o%s}} " \
+ UNKNOWN_ELF_STARTFILE_SPEC
+
+#define UNKNOWN_ELF_ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC UNKNOWN_ELF_ENDFILE_SPEC
+
+/* The __USES_INITFINI__ define is tested in newlib/libc/sys/arm/crt0.S
+ to see if it needs to invoked _init() and _fini(). */
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "-D__USES_INITFINI__"
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Return a nonzero value if DECL has a section attribute. */
+#define IN_NAMED_SECTION_P(DECL) \
+ ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
+ && DECL_SECTION_NAME (DECL) != NULL)
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (IN_NAMED_SECTION_P (DECL)) \
+ switch_to_section (get_named_section (DECL, NULL, 0)); \
+ else \
+ switch_to_section (bss_section); \
+ \
+ ASM_OUTPUT_ALIGN (FILE, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ \
+ last_assemble_variable_decl = DECL; \
+ ASM_DECLARE_OBJECT_NAME (FILE, NAME, DECL); \
+ ASM_OUTPUT_SKIP (FILE, SIZE ? (int)(SIZE) : 1); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if ((DECL) != NULL && IN_NAMED_SECTION_P (DECL)) \
+ switch_to_section (get_named_section (DECL, NULL, 0)); \
+ else \
+ switch_to_section (bss_section); \
+ \
+ ASM_OUTPUT_ALIGN (FILE, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ fprintf (FILE, "\t.space\t%d\n", SIZE ? (int) SIZE : 1); \
+ fprintf (FILE, "\t.size\t%s, %d\n", \
+ NAME, SIZE ? (int) SIZE : 1); \
+ } \
+ while (0)
+
+/* The libgcc udivmod functions may throw exceptions. If newlib is
+ configured to support long longs in I/O, then printf will depend on
+ udivmoddi4, which will depend on the exception unwind routines,
+ which will depend on abort, which is defined in libc. */
+#undef LINK_GCC_C_SEQUENCE_SPEC
+#define LINK_GCC_C_SEQUENCE_SPEC "--start-group %G %{!nolibc:%L} --end-group"
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/elfos.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/elfos.h
new file mode 100644
index 0000000..f294f3b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/elfos.h
@@ -0,0 +1,484 @@
+/* elfos.h -- operating system specific defines to be used when
+ targeting GCC for some generic ELF system
+ Copyright (C) 1991-2023 Free Software Foundation, Inc.
+ Based on svr4.h contributed by Ron Guilmette (rfg@netcom.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define TARGET_OBJFMT_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__ELF__"); \
+ } \
+ while (0)
+
+/* Define a symbol indicating that we are using elfos.h.
+ Some CPU specific configuration files use this. */
+#define USING_ELFOS_H
+
+/* The prefix to add to user-visible assembler symbols.
+
+ For ELF systems the convention is *not* to prepend a leading
+ underscore onto user-level symbol names. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* The biggest alignment supported by ELF in bits. 32-bit ELF
+ supports section alignment up to (0x80000000 * 8), while
+ 64-bit ELF supports (0x8000000000000000 * 8). If this macro
+ is not defined, the default is the largest alignment supported
+ by 32-bit ELF and representable on a 32-bit host. Use this
+ macro to limit the alignment which can be specified using
+ the `__attribute__ ((aligned (N)))' construct. */
+#ifndef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT (((unsigned int) 1 << 28) * 8)
+#endif
+
+/* Use periods rather than dollar signs in special g++ assembler names. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* Writing `int' for a bit-field forces int alignment for the structure. */
+
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS 1
+#endif
+
+/* All ELF targets can support DWARF-2. */
+
+#define DWARF2_DEBUGGING_INFO 1
+
+/* All ELF targets can support CTF. */
+
+#define CTF_DEBUGGING_INFO 1
+
+/* All ELF targets can support BTF. */
+
+#define BTF_DEBUGGING_INFO 1
+
+/* The GNU tools operate better with dwarf2, and it is required by some
+ psABI's. Since we don't have any native tools to be compatible with,
+ default to dwarf2. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+#endif
+
+/* All SVR4 targets use the ELF object file format. */
+#define OBJECT_FORMAT_ELF
+
+
+/* Output #ident as a .ident. */
+
+#undef TARGET_ASM_OUTPUT_IDENT
+#define TARGET_ASM_OUTPUT_IDENT default_asm_output_ident_directive
+
+#undef SET_ASM_OP
+#define SET_ASM_OP "\t.set\t"
+
+/* Most svr4 assemblers want a .file directive at the beginning of
+ their input file. */
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+/* This is how to allocate empty space in some section. The .zero
+ pseudo-op is used for this on most svr4 assemblers. */
+
+#define SKIP_ASM_OP "\t.zero\t"
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE, SIZE) \
+ fprintf ((FILE), "%s" HOST_WIDE_INT_PRINT_UNSIGNED "\n",\
+ SKIP_ASM_OP, (SIZE))
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'.
+
+ For most svr4 systems, the convention is that any symbol which begins
+ with a period is not put into the linker symbol table by the assembler. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+ do \
+ { \
+ char *__p; \
+ (LABEL)[0] = '*'; \
+ (LABEL)[1] = '.'; \
+ __p = stpcpy (&(LABEL)[2], PREFIX); \
+ sprint_ul (__p, (unsigned long) (NUM)); \
+ } \
+ while (0)
+
+/* Output the label which precedes a jumptable. Note that for all svr4
+ systems where we actually generate jumptables (which is to say every
+ svr4 target except i386, where we use casesi instead) we put the jump-
+ tables into the .rodata section and since other stuff could have been
+ put into the .rodata section prior to any given jumptable, we have to
+ make sure that the location counter for the .rodata section gets pro-
+ perly re-aligned prior to the actual beginning of the jump table. */
+
+#undef ALIGN_ASM_OP
+#define ALIGN_ASM_OP "\t.align\t"
+
+#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
+ ASM_OUTPUT_ALIGN ((FILE), 2)
+#endif
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
+ do \
+ { \
+ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE); \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \
+ } \
+ while (0)
+
+/* The standard SVR4 assembler seems to require that certain builtin
+ library routines (e.g. .udiv) be explicitly declared as .globl
+ in each assembly file where they are referenced. */
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0))
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define COMMON_ASM_OP "\t.comm\t"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define LOCAL_ASM_OP "\t.local\t"
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ fprintf ((FILE), "%s", LOCAL_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+ } \
+ while (0)
+
+/* This is the pseudo-op used to generate a contiguous sequence of byte
+ values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
+ AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
+
+#undef ASCII_DATA_ASM_OP
+#define ASCII_DATA_ASM_OP "\t.ascii\t"
+
+/* Support a read-only data section. */
+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+
+#define INIT_SECTION_ASM_OP "\t.section\t.init"
+#define FINI_SECTION_ASM_OP "\t.section\t.fini"
+
+/* Output assembly directive to move to the beginning of current section. */
+#ifdef HAVE_GAS_SUBSECTION_ORDERING
+# define ASM_SECTION_START_OP "\t.subsection\t-1"
+# define ASM_OUTPUT_SECTION_START(FILE) \
+ fprintf ((FILE), "%s\n", ASM_SECTION_START_OP)
+#endif
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION default_elf_select_rtx_section
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION default_elf_select_section
+#undef TARGET_HAVE_SWITCHABLE_BSS_SECTIONS
+#define TARGET_HAVE_SWITCHABLE_BSS_SECTIONS true
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#define TYPE_ASM_OP "\t.type\t"
+#define SIZE_ASM_OP "\t.size\t"
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_SYMVER_DIRECTIVE(FILE, NAME, NAME2) \
+ do \
+ { \
+ fputs ("\t.symver\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs (", ", (FILE)); \
+ assemble_name ((FILE), (NAME2)); \
+ fputc ('\n', (FILE)); \
+ } \
+ while (0)
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+#ifndef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_FUNCTION_LABEL (FILE, NAME, DECL); \
+ } \
+ while (0)
+#endif
+
+/* Write the extra assembler code needed to declare the name of a
+ cold function partition properly. Some svr4 assemblers need to also
+ have something extra said about the function's return value. We
+ allow for that here. */
+
+#ifndef ASM_DECLARE_COLD_FUNCTION_NAME
+#define ASM_DECLARE_COLD_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_FUNCTION_LABEL (FILE, NAME, DECL); \
+ } \
+ while (0)
+#endif
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#ifdef HAVE_GAS_GNU_UNIQUE_OBJECT
+#define USE_GNU_UNIQUE_OBJECT flag_gnu_unique
+#else
+#define USE_GNU_UNIQUE_OBJECT 0
+#endif
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ HOST_WIDE_INT size; \
+ \
+ /* For template static data member instantiations or \
+ inline fn local statics and their guard variables, use \
+ gnu_unique_object so that they will be combined even under \
+ RTLD_LOCAL. Don't use gnu_unique_object for typeinfo, \
+ vtables and other read-only artificial decls. */ \
+ if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (DECL) \
+ && (!DECL_ARTIFICIAL (DECL) || !TREE_READONLY (DECL))) \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "gnu_unique_object"); \
+ else \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive \
+ && (DECL) && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ size = tree_to_uhwi (DECL_SIZE_UNIT (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \
+ } \
+ \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END)\
+ do \
+ { \
+ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ HOST_WIDE_INT size; \
+ \
+ if (!flag_inhibit_size_directive \
+ && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ size = tree_to_uhwi (DECL_SIZE_UNIT (DECL)); \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
+ } \
+ } \
+ while (0)
+
+/* This is how to declare the size of a function. */
+#ifndef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } \
+ while (0)
+#endif
+
+/* This is how to declare the size of a cold function partition. */
+#ifndef ASM_DECLARE_COLD_FUNCTION_SIZE
+#define ASM_DECLARE_COLD_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \
+ } \
+ while (0)
+#endif
+
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#define ELF_ASCII_ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+
+/* Some svr4 assemblers have a limit on the number of characters which
+ can appear in the operand of a .string directive. If your assembler
+ has such a limitation, you should define STRING_LIMIT to reflect that
+ limit. Note that at least some svr4 assemblers have a limit on the
+ actual number of bytes in the double-quoted string, and that they
+ count each character in an escape sequence as one byte. Thus, an
+ escape sequence like \377 would count as four bytes.
+
+ If your target assembler doesn't support the .string directive, you
+ should define this to zero.
+*/
+
+#define ELF_STRING_LIMIT ((unsigned) 256)
+
+#define STRING_ASM_OP "\t.string\t"
+
+/* The routine used to output NUL terminated strings. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable, especially for targets like the i386
+ (where the only alternative is to output character sequences as
+ comma separated lists of numbers). */
+
+#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \
+ default_elf_asm_output_limited_string ((FILE), (STR))
+
+/* The routine used to output sequences of byte values. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable. Note that if we find subparts of the
+ character sequence which end with NUL (and which are shorter than
+ STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \
+ default_elf_asm_output_ascii ((FILE), (STR), (LENGTH))
+
+/* Allow the use of the -frecord-gcc-switches switch via the
+ elf_record_gcc_switches function defined in varasm.cc. */
+#undef TARGET_ASM_RECORD_GCC_SWITCHES
+#define TARGET_ASM_RECORD_GCC_SWITCHES elf_record_gcc_switches
+
+/* A C statement (sans semicolon) to output to the stdio stream STREAM
+ any text necessary for declaring the name of an external symbol
+ named NAME which is referenced in this compilation but not defined.
+ It is needed to properly support non-default visibility. */
+
+#ifndef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+ default_elf_asm_output_external (FILE, DECL, NAME)
+#endif
+
+#undef TARGET_LIBC_HAS_FUNCTION
+#define TARGET_LIBC_HAS_FUNCTION no_c99_libc_has_function
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/initfini-array.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/initfini-array.h
new file mode 100644
index 0000000..a1fc2c8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/initfini-array.h
@@ -0,0 +1,45 @@
+/* Definitions for ELF systems with .init_array/.fini_array section
+ support.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if HAVE_INITFINI_ARRAY_SUPPORT
+
+#define USE_INITFINI_ARRAY
+
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+
+#undef INIT_ARRAY_SECTION_ASM_OP
+#define INIT_ARRAY_SECTION_ASM_OP
+
+#undef FINI_ARRAY_SECTION_ASM_OP
+#define FINI_ARRAY_SECTION_ASM_OP
+
+/* Use .init_array/.fini_array section for constructors and destructors. */
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR default_elf_init_array_asm_out_constructor
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR default_elf_fini_array_asm_out_destructor
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/newlib-stdint.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/newlib-stdint.h
new file mode 100644
index 0000000..98bc77b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/newlib-stdint.h
@@ -0,0 +1,69 @@
+/* Definitions for <stdint.h> types on systems using newlib.
+ Copyright (C) 2008-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* newlib uses 32-bit long in certain cases for all non-SPU
+ targets. */
+#ifndef STDINT_LONG32
+#define STDINT_LONG32 (LONG_TYPE_SIZE == 32)
+#endif
+
+#define SIG_ATOMIC_TYPE "int"
+
+/* The newlib logic actually checks for sizes greater than 32 rather
+ than equal to 64 for various 64-bit types. */
+
+#define INT8_TYPE (CHAR_TYPE_SIZE == 8 ? "signed char" : 0)
+#define INT16_TYPE (SHORT_TYPE_SIZE == 16 ? "short int" : INT_TYPE_SIZE == 16 ? "int" : CHAR_TYPE_SIZE == 16 ? "signed char" : 0)
+#define INT32_TYPE (STDINT_LONG32 ? "long int" : INT_TYPE_SIZE == 32 ? "int" : SHORT_TYPE_SIZE == 32 ? "short int" : CHAR_TYPE_SIZE == 32 ? "signed char" : 0)
+#define INT64_TYPE (LONG_TYPE_SIZE == 64 ? "long int" : LONG_LONG_TYPE_SIZE == 64 ? "long long int" : INT_TYPE_SIZE == 64 ? "int" : 0)
+#define UINT8_TYPE (CHAR_TYPE_SIZE == 8 ? "unsigned char" : 0)
+#define UINT16_TYPE (SHORT_TYPE_SIZE == 16 ? "short unsigned int" : INT_TYPE_SIZE == 16 ? "unsigned int" : CHAR_TYPE_SIZE == 16 ? "unsigned char" : 0)
+#define UINT32_TYPE (STDINT_LONG32 ? "long unsigned int" : INT_TYPE_SIZE == 32 ? "unsigned int" : SHORT_TYPE_SIZE == 32 ? "short unsigned int" : CHAR_TYPE_SIZE == 32 ? "unsigned char" : 0)
+#define UINT64_TYPE (LONG_TYPE_SIZE == 64 ? "long unsigned int" : LONG_LONG_TYPE_SIZE == 64 ? "long long unsigned int" : INT_TYPE_SIZE == 64 ? "unsigned int" : 0)
+
+#define INT_LEAST8_TYPE (INT8_TYPE ? INT8_TYPE : INT16_TYPE ? INT16_TYPE : INT32_TYPE ? INT32_TYPE : INT64_TYPE ? INT64_TYPE : 0)
+#define INT_LEAST16_TYPE (INT16_TYPE ? INT16_TYPE : INT32_TYPE ? INT32_TYPE : INT64_TYPE ? INT64_TYPE : 0)
+#define INT_LEAST32_TYPE (INT32_TYPE ? INT32_TYPE : INT64_TYPE ? INT64_TYPE : 0)
+#define INT_LEAST64_TYPE INT64_TYPE
+#define UINT_LEAST8_TYPE (UINT8_TYPE ? UINT8_TYPE : UINT16_TYPE ? UINT16_TYPE : UINT32_TYPE ? UINT32_TYPE : UINT64_TYPE ? UINT64_TYPE : 0)
+#define UINT_LEAST16_TYPE (UINT16_TYPE ? UINT16_TYPE : UINT32_TYPE ? UINT32_TYPE : UINT64_TYPE ? UINT64_TYPE : 0)
+#define UINT_LEAST32_TYPE (UINT32_TYPE ? UINT32_TYPE : UINT64_TYPE ? UINT64_TYPE : 0)
+#define UINT_LEAST64_TYPE UINT64_TYPE
+
+#define INT_FAST8_TYPE (INT_TYPE_SIZE >= 8 ? "int" : INT_LEAST8_TYPE)
+#define INT_FAST16_TYPE (INT_TYPE_SIZE >= 16 ? "int" : INT_LEAST16_TYPE)
+#define INT_FAST32_TYPE (INT_TYPE_SIZE >= 32 ? "int" : INT_LEAST32_TYPE)
+#define INT_FAST64_TYPE (INT_TYPE_SIZE >= 64 ? "int" : INT_LEAST64_TYPE)
+#define UINT_FAST8_TYPE (INT_TYPE_SIZE >= 8 ? "unsigned int" : UINT_LEAST8_TYPE)
+#define UINT_FAST16_TYPE (INT_TYPE_SIZE >= 16 ? "unsigned int" : UINT_LEAST16_TYPE)
+#define UINT_FAST32_TYPE (INT_TYPE_SIZE >= 32 ? "unsigned int" : UINT_LEAST32_TYPE)
+#define UINT_FAST64_TYPE (INT_TYPE_SIZE >= 64 ? "unsigned int" : UINT_LEAST64_TYPE)
+
+/* Newlib uses the unsigned type corresponding to ptrdiff_t for
+ uintptr_t; this is the same as size_t for most newlib-using
+ targets. */
+#define INTPTR_TYPE PTRDIFF_TYPE
+#ifndef UINTPTR_TYPE
+#define UINTPTR_TYPE SIZE_TYPE
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/vxworks-dummy.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/vxworks-dummy.h
new file mode 100644
index 0000000..53704a5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/config/vxworks-dummy.h
@@ -0,0 +1,48 @@
+/* Dummy definitions of VxWorks-related macros
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* True if we're targeting VxWorks, VxWorks7 and/or 64bit. */
+#ifndef TARGET_VXWORKS
+#define TARGET_VXWORKS 0
+#endif
+
+#ifndef TARGET_VXWORKS7
+#define TARGET_VXWORKS7 0
+#endif
+
+#ifndef TARGET_VXWORKS64
+#define TARGET_VXWORKS64 0
+#endif
+
+/* True if generating code for a VxWorks RTP. */
+#ifndef TARGET_VXWORKS_RTP
+#define TARGET_VXWORKS_RTP false
+#endif
+
+/* The symbol that points to an RTP's table of GOTs. */
+#define VXWORKS_GOTT_BASE (gcc_unreachable (), "")
+
+/* The symbol that holds the index of the current module's GOT in
+ VXWORKS_GOTT_BASE. */
+#define VXWORKS_GOTT_INDEX (gcc_unreachable (), "")
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/configargs.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/configargs.h
new file mode 100644
index 0000000..dd42a62
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/configargs.h
@@ -0,0 +1,7 @@
+/* Generated automatically. */
+static const char configuration_arguments[] = "/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/configure --target=arm-none-eabi --prefix=/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/install --with-gmp=/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/host-tools --with-mpfr=/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/host-tools --with-mpc=/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/host-tools --with-isl=/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/host-tools --disable-shared --disable-nls --disable-threads --disable-tls --enable-checking=release --enable-languages=c,c++,fortran --with-newlib --with-gnu-as --with-headers=yes --with-gnu-ld --with-native-system-header-dir=/include --with-sysroot=/data/jenkins/workspace/GNU-toolchain/arm-13/build-arm-none-eabi/install/arm-none-eabi --with-multilib-list=aprofile,rmprofile --with-pkgversion='Arm GNU Toolchain 13.2.rel1 (Build arm-13.7)' --with-bugurl=https://bugs.linaro.org/";
+static const char thread_model[] = "single";
+
+static const struct {
+ const char *name, *value;
+} configure_default_options[] = { { "cpu", "arm7tdmi" }, { "float", "soft" } };
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/context.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/context.h
new file mode 100644
index 0000000..ad921f2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/context.h
@@ -0,0 +1,69 @@
+/* context.h - Holder for global state
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CONTEXT_H
+#define GCC_CONTEXT_H
+
+namespace gcc {
+
+class pass_manager;
+class dump_manager;
+
+/* GCC's internal state can be divided into zero or more
+ "parallel universe" of state; an instance of this class is one such
+ context of state. */
+class context
+{
+public:
+ context ();
+ ~context ();
+
+ /* The flag shows if there are symbols to be streamed for offloading. */
+ bool have_offload;
+
+ /* Pass-management. */
+
+ void set_passes (pass_manager *m)
+ {
+ gcc_assert (!m_passes);
+ m_passes = m;
+ }
+
+ pass_manager *get_passes () { gcc_assert (m_passes); return m_passes; }
+
+ /* Handling dump files. */
+
+ dump_manager *get_dumps () {gcc_assert (m_dumps); return m_dumps; }
+
+private:
+ /* Pass-management. */
+ pass_manager *m_passes;
+
+ /* Dump files. */
+ dump_manager *m_dumps;
+
+}; // class context
+
+} // namespace gcc
+
+/* The global singleton context aka "g".
+ (the name is chosen to be easy to type in a debugger). */
+extern gcc::context *g;
+
+#endif /* ! GCC_CONTEXT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/convert.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/convert.h
new file mode 100644
index 0000000..5351e82
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/convert.h
@@ -0,0 +1,45 @@
+/* Definition of functions in convert.cc.
+ Copyright (C) 1993-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CONVERT_H
+#define GCC_CONVERT_H
+
+extern tree convert_to_integer (tree, tree);
+extern tree convert_to_integer_maybe_fold (tree, tree, bool);
+extern tree convert_to_pointer (tree, tree);
+extern tree convert_to_pointer_maybe_fold (tree, tree, bool);
+extern tree convert_to_real (tree, tree);
+extern tree convert_to_real_maybe_fold (tree, tree, bool);
+extern tree convert_to_fixed (tree, tree);
+extern tree convert_to_complex (tree, tree);
+extern tree convert_to_complex_maybe_fold (tree, tree, bool);
+extern tree convert_to_vector (tree, tree);
+
+extern inline tree convert_to_integer_nofold (tree t, tree x)
+{ return convert_to_integer_maybe_fold (t, x, false); }
+extern inline tree convert_to_pointer_nofold (tree t, tree x)
+{ return convert_to_pointer_maybe_fold (t, x, false); }
+extern inline tree convert_to_real_nofold (tree t, tree x)
+{ return convert_to_real_maybe_fold (t, x, false); }
+extern inline tree convert_to_complex_nofold (tree t, tree x)
+{ return convert_to_complex_maybe_fold (t, x, false); }
+
+extern tree preserve_any_location_wrapper (tree result, tree orig_expr);
+
+#endif /* GCC_CONVERT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coretypes.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coretypes.h
new file mode 100644
index 0000000..ca8837c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coretypes.h
@@ -0,0 +1,495 @@
+/* GCC core type declarations.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Provide forward declarations of core types which are referred to by
+ most of the compiler. This allows header files to use these types
+ (e.g. in function prototypes) without concern for whether the full
+ definitions are visible. Some other declarations that need to be
+ universally visible are here, too.
+
+ In the context of tconfig.h, most of these have special definitions
+ which prevent them from being used except in further type
+ declarations. This is a kludge; the right thing is to avoid
+ including the "tm.h" header set in the context of tconfig.h, but
+ we're not there yet. */
+
+#ifndef GCC_CORETYPES_H
+#define GCC_CORETYPES_H
+
+#ifndef GTY
+#define GTY(x) /* nothing - marker for gengtype */
+#endif
+
+#ifndef USED_FOR_TARGET
+
+typedef int64_t gcov_type;
+typedef uint64_t gcov_type_unsigned;
+
+struct bitmap_obstack;
+class bitmap_head;
+typedef class bitmap_head *bitmap;
+typedef const class bitmap_head *const_bitmap;
+struct simple_bitmap_def;
+typedef struct simple_bitmap_def *sbitmap;
+typedef const struct simple_bitmap_def *const_sbitmap;
+struct rtx_def;
+typedef struct rtx_def *rtx;
+typedef const struct rtx_def *const_rtx;
+class scalar_mode;
+class scalar_int_mode;
+class scalar_float_mode;
+class complex_mode;
+class fixed_size_mode;
+template<typename> class opt_mode;
+typedef opt_mode<scalar_mode> opt_scalar_mode;
+typedef opt_mode<scalar_int_mode> opt_scalar_int_mode;
+typedef opt_mode<scalar_float_mode> opt_scalar_float_mode;
+template<typename> struct pod_mode;
+typedef pod_mode<scalar_mode> scalar_mode_pod;
+typedef pod_mode<scalar_int_mode> scalar_int_mode_pod;
+typedef pod_mode<fixed_size_mode> fixed_size_mode_pod;
+
+/* Subclasses of rtx_def, using indentation to show the class
+ hierarchy, along with the relevant invariant.
+ Where possible, keep this list in the same order as in rtl.def. */
+struct rtx_def;
+ struct rtx_expr_list; /* GET_CODE (X) == EXPR_LIST */
+ struct rtx_insn_list; /* GET_CODE (X) == INSN_LIST */
+ struct rtx_sequence; /* GET_CODE (X) == SEQUENCE */
+ struct rtx_insn;
+ struct rtx_debug_insn; /* DEBUG_INSN_P (X) */
+ struct rtx_nonjump_insn; /* NONJUMP_INSN_P (X) */
+ struct rtx_jump_insn; /* JUMP_P (X) */
+ struct rtx_call_insn; /* CALL_P (X) */
+ struct rtx_jump_table_data; /* JUMP_TABLE_DATA_P (X) */
+ struct rtx_barrier; /* BARRIER_P (X) */
+ struct rtx_code_label; /* LABEL_P (X) */
+ struct rtx_note; /* NOTE_P (X) */
+
+struct rtvec_def;
+typedef struct rtvec_def *rtvec;
+typedef const struct rtvec_def *const_rtvec;
+struct hwivec_def;
+typedef struct hwivec_def *hwivec;
+typedef const struct hwivec_def *const_hwivec;
+union tree_node;
+typedef union tree_node *tree;
+typedef const union tree_node *const_tree;
+struct gimple;
+typedef gimple *gimple_seq;
+struct gimple_stmt_iterator;
+
+/* Forward decls for leaf gimple subclasses (for individual gimple codes).
+ Keep this in the same order as the corresponding codes in gimple.def. */
+
+struct gcond;
+struct gdebug;
+struct ggoto;
+struct glabel;
+struct gswitch;
+struct gassign;
+struct gasm;
+struct gcall;
+struct gtransaction;
+struct greturn;
+struct gbind;
+struct gcatch;
+struct geh_filter;
+struct geh_mnt;
+struct geh_else;
+struct gresx;
+struct geh_dispatch;
+struct gphi;
+struct gtry;
+struct gomp_atomic_load;
+struct gomp_atomic_store;
+struct gomp_continue;
+struct gomp_critical;
+struct gomp_ordered;
+struct gomp_for;
+struct gomp_parallel;
+struct gomp_task;
+struct gomp_sections;
+struct gomp_single;
+struct gomp_target;
+struct gomp_teams;
+
+/* Subclasses of symtab_node, using indentation to show the class
+ hierarchy. */
+
+struct symtab_node;
+ struct cgraph_node;
+ struct varpool_node;
+struct cgraph_edge;
+
+union section;
+typedef union section section;
+struct gcc_options;
+struct cl_target_option;
+struct cl_optimization;
+struct cl_option;
+struct cl_decoded_option;
+struct cl_option_handlers;
+struct diagnostic_context;
+class pretty_printer;
+class diagnostic_event_id_t;
+typedef const char * (*diagnostic_input_charset_callback)(const char *);
+
+template<typename T> struct array_traits;
+
+/* Provides a read-only bitmap view of a single integer bitmask or an
+ array of integer bitmasks, or of a wrapper around such bitmasks. */
+template<typename T, typename Traits = array_traits<T>,
+ bool has_constant_size = Traits::has_constant_size>
+class bitmap_view;
+
+/* Address space number for named address space support. */
+typedef unsigned char addr_space_t;
+
+/* The value of addr_space_t that represents the generic address space. */
+#define ADDR_SPACE_GENERIC 0
+#define ADDR_SPACE_GENERIC_P(AS) ((AS) == ADDR_SPACE_GENERIC)
+
+/* The major intermediate representations of GCC. */
+enum ir_type {
+ IR_GIMPLE,
+ IR_RTL_CFGRTL,
+ IR_RTL_CFGLAYOUT
+};
+
+/* Provide forward struct declaration so that we don't have to include
+ all of cpplib.h whenever a random prototype includes a pointer.
+ Note that the cpp_reader and cpp_token typedefs remain part of
+ cpplib.h. */
+
+struct cpp_reader;
+struct cpp_token;
+
+/* The thread-local storage model associated with a given VAR_DECL
+ or SYMBOL_REF. This isn't used much, but both trees and RTL refer
+ to it, so it's here. */
+enum tls_model {
+ TLS_MODEL_NONE,
+ TLS_MODEL_EMULATED,
+ TLS_MODEL_REAL,
+ TLS_MODEL_GLOBAL_DYNAMIC = TLS_MODEL_REAL,
+ TLS_MODEL_LOCAL_DYNAMIC,
+ TLS_MODEL_INITIAL_EXEC,
+ TLS_MODEL_LOCAL_EXEC
+};
+
+/* Types of ABI for an offload compiler. */
+enum offload_abi {
+ OFFLOAD_ABI_UNSET,
+ OFFLOAD_ABI_LP64,
+ OFFLOAD_ABI_ILP32
+};
+
+/* Types of profile update methods. */
+enum profile_update {
+ PROFILE_UPDATE_SINGLE,
+ PROFILE_UPDATE_ATOMIC,
+ PROFILE_UPDATE_PREFER_ATOMIC
+};
+
+/* Type of profile reproducibility methods. */
+enum profile_reproducibility {
+ PROFILE_REPRODUCIBILITY_SERIAL,
+ PROFILE_REPRODUCIBILITY_PARALLEL_RUNS,
+ PROFILE_REPRODUCIBILITY_MULTITHREADED
+};
+
+/* Type of -fstack-protector-*. */
+enum stack_protector {
+ SPCT_FLAG_DEFAULT = 1,
+ SPCT_FLAG_ALL = 2,
+ SPCT_FLAG_STRONG = 3,
+ SPCT_FLAG_EXPLICIT = 4
+};
+
+/* Types of unwind/exception handling info that can be generated.
+ Note that a UI_TARGET (or larger) setting is considered to be
+ incompatible with -freorder-blocks-and-partition. */
+
+enum unwind_info_type
+{
+ UI_NONE,
+ UI_SJLJ,
+ UI_DWARF2,
+ UI_SEH,
+ UI_TARGET
+};
+
+/* Callgraph node profile representation. */
+enum node_frequency {
+ /* This function most likely won't be executed at all.
+ (set only when profile feedback is available or via function attribute). */
+ NODE_FREQUENCY_UNLIKELY_EXECUTED,
+ /* For functions that are known to be executed once (i.e. constructors, destructors
+ and main function. */
+ NODE_FREQUENCY_EXECUTED_ONCE,
+ /* The default value. */
+ NODE_FREQUENCY_NORMAL,
+ /* Optimize this function hard
+ (set only when profile feedback is available or via function attribute). */
+ NODE_FREQUENCY_HOT
+};
+
+/* Ways of optimizing code. */
+enum optimization_type {
+ /* Prioritize speed over size. */
+ OPTIMIZE_FOR_SPEED,
+
+ /* Only do things that are good for both size and speed. */
+ OPTIMIZE_FOR_BOTH,
+
+ /* Prioritize size over speed. */
+ OPTIMIZE_FOR_SIZE
+};
+
+/* Enumerates a padding direction. */
+enum pad_direction {
+ /* No padding is required. */
+ PAD_NONE,
+
+ /* Insert padding above the data, i.e. at higher memeory addresses
+ when dealing with memory, and at the most significant end when
+ dealing with registers. */
+ PAD_UPWARD,
+
+ /* Insert padding below the data, i.e. at lower memeory addresses
+ when dealing with memory, and at the least significant end when
+ dealing with registers. */
+ PAD_DOWNWARD
+};
+
+/* Possible initialization status of a variable. When requested
+ by the user, this information is tracked and recorded in the DWARF
+ debug information, along with the variable's location. */
+enum var_init_status
+{
+ VAR_INIT_STATUS_UNKNOWN,
+ VAR_INIT_STATUS_UNINITIALIZED,
+ VAR_INIT_STATUS_INITIALIZED
+};
+
+/* Names for the different levels of -Wstrict-overflow=N. The numeric
+ values here correspond to N. */
+enum warn_strict_overflow_code
+{
+ /* Overflow warning that should be issued with -Wall: a questionable
+ construct that is easy to avoid even when using macros. Example:
+ folding (x + CONSTANT > x) to 1. */
+ WARN_STRICT_OVERFLOW_ALL = 1,
+ /* Overflow warning about folding a comparison to a constant because
+ of undefined signed overflow, other than cases covered by
+ WARN_STRICT_OVERFLOW_ALL. Example: folding (abs (x) >= 0) to 1
+ (this is false when x == INT_MIN). */
+ WARN_STRICT_OVERFLOW_CONDITIONAL = 2,
+ /* Overflow warning about changes to comparisons other than folding
+ them to a constant. Example: folding (x + 1 > 1) to (x > 0). */
+ WARN_STRICT_OVERFLOW_COMPARISON = 3,
+ /* Overflow warnings not covered by the above cases. Example:
+ folding ((x * 10) / 5) to (x * 2). */
+ WARN_STRICT_OVERFLOW_MISC = 4,
+ /* Overflow warnings about reducing magnitude of constants in
+ comparison. Example: folding (x + 2 > y) to (x + 1 >= y). */
+ WARN_STRICT_OVERFLOW_MAGNITUDE = 5
+};
+
+/* The type of an alias set. Code currently assumes that variables of
+ this type can take the values 0 (the alias set which aliases
+ everything) and -1 (sometimes indicating that the alias set is
+ unknown, sometimes indicating a memory barrier) and -2 (indicating
+ that the alias set should be set to a unique value but has not been
+ set yet). */
+typedef int alias_set_type;
+
+class edge_def;
+typedef class edge_def *edge;
+typedef const class edge_def *const_edge;
+struct basic_block_def;
+typedef struct basic_block_def *basic_block;
+typedef const struct basic_block_def *const_basic_block;
+
+#if !defined (GENERATOR_FILE)
+# define OBSTACK_CHUNK_SIZE memory_block_pool::block_size
+# define obstack_chunk_alloc mempool_obstack_chunk_alloc
+# define obstack_chunk_free mempool_obstack_chunk_free
+#else
+# define OBSTACK_CHUNK_SIZE 0
+# define obstack_chunk_alloc xmalloc
+# define obstack_chunk_free free
+#endif
+
+#define gcc_obstack_init(OBSTACK) \
+ obstack_specify_allocation ((OBSTACK), OBSTACK_CHUNK_SIZE, 0, \
+ obstack_chunk_alloc, \
+ obstack_chunk_free)
+
+/* enum reg_class is target specific, so it should not appear in
+ target-independent code or interfaces, like the target hook declarations
+ in target.h. */
+typedef int reg_class_t;
+
+class rtl_opt_pass;
+
+namespace gcc {
+ class context;
+}
+
+typedef std::pair <tree, tree> tree_pair;
+typedef std::pair <const char *, int> string_int_pair;
+
+/* Define a name->value mapping. */
+template <typename ValueType>
+struct kv_pair
+{
+ const char *const name; /* the name of the value */
+ const ValueType value; /* the value of the name */
+};
+
+#else
+
+struct _dont_use_rtx_here_;
+struct _dont_use_rtvec_here_;
+struct _dont_use_rtx_insn_here_;
+union _dont_use_tree_here_;
+#define rtx struct _dont_use_rtx_here_ *
+#define const_rtx struct _dont_use_rtx_here_ *
+#define rtvec struct _dont_use_rtvec_here *
+#define const_rtvec struct _dont_use_rtvec_here *
+#define rtx_insn struct _dont_use_rtx_insn_here_
+#define tree union _dont_use_tree_here_ *
+#define const_tree union _dont_use_tree_here_ *
+
+typedef struct scalar_mode scalar_mode;
+typedef struct scalar_int_mode scalar_int_mode;
+typedef struct scalar_float_mode scalar_float_mode;
+typedef struct complex_mode complex_mode;
+
+#endif
+
+/* Classes of functions that compiler needs to check
+ whether they are present at the runtime or not. */
+enum function_class {
+ function_c94,
+ function_c99_misc,
+ function_c99_math_complex,
+ function_sincos,
+ function_c11_misc,
+ function_c2x_misc
+};
+
+/* Enumerate visibility settings. This is deliberately ordered from most
+ to least visibility. */
+enum symbol_visibility
+{
+ VISIBILITY_DEFAULT,
+ VISIBILITY_PROTECTED,
+ VISIBILITY_HIDDEN,
+ VISIBILITY_INTERNAL
+};
+
+/* enums used by the targetm.excess_precision hook. */
+
+enum flt_eval_method
+{
+ FLT_EVAL_METHOD_UNPREDICTABLE = -1,
+ FLT_EVAL_METHOD_PROMOTE_TO_FLOAT = 0,
+ FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE = 1,
+ FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE = 2,
+ FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16 = 16
+};
+
+enum excess_precision_type
+{
+ EXCESS_PRECISION_TYPE_IMPLICIT,
+ EXCESS_PRECISION_TYPE_STANDARD,
+ EXCESS_PRECISION_TYPE_FAST,
+ EXCESS_PRECISION_TYPE_FLOAT16
+};
+
+/* Level of size optimization. */
+
+enum optimize_size_level
+{
+ /* Do not optimize for size. */
+ OPTIMIZE_SIZE_NO,
+ /* Optimize for size but not at extreme performance costs. */
+ OPTIMIZE_SIZE_BALANCED,
+ /* Optimize for size as much as possible. */
+ OPTIMIZE_SIZE_MAX
+};
+
+/* Support for user-provided GGC and PCH markers. The first parameter
+ is a pointer to a pointer, the second either NULL if the pointer to
+ pointer points into a GC object or the actual pointer address if
+ the first argument points to a temporary and the third a cookie. */
+typedef void (*gt_pointer_operator) (void *, void *, void *);
+
+#if !defined (HAVE_UCHAR)
+typedef unsigned char uchar;
+#endif
+
+/* Most source files will require the following headers. */
+#if !defined (USED_FOR_TARGET)
+#include "insn-modes.h"
+#include "signop.h"
+#include "wide-int.h"
+#include "wide-int-print.h"
+
+/* On targets that don't need polynomial offsets, target-specific code
+ should be able to treat poly_int like a normal constant, with a
+ conversion operator going from the former to the latter. We also
+ allow this for gencondmd.cc for all targets, so that we can treat
+ machine_modes as enums without causing build failures. */
+#if (defined (IN_TARGET_CODE) \
+ && (defined (USE_ENUM_MODES) || NUM_POLY_INT_COEFFS == 1))
+#define POLY_INT_CONVERSION 1
+#else
+#define POLY_INT_CONVERSION 0
+#endif
+
+#include "poly-int.h"
+#include "poly-int-types.h"
+#include "insn-modes-inline.h"
+#include "machmode.h"
+#include "double-int.h"
+#include "align.h"
+/* Most host source files will require the following headers. */
+#if !defined (GENERATOR_FILE)
+#include "iterator-utils.h"
+#include "real.h"
+#include "fixed-value.h"
+#include "hash-table.h"
+#include "hash-set.h"
+#include "input.h"
+#include "is-a.h"
+#include "memory-block.h"
+#include "dumpfile.h"
+#endif
+#endif /* GENERATOR_FILE && !USED_FOR_TARGET */
+
+#endif /* coretypes.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coroutine-builtins.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coroutine-builtins.def
new file mode 100644
index 0000000..6ba10b1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coroutine-builtins.def
@@ -0,0 +1,53 @@
+/* This file contains the definitions and documentation for the
+ coroutines builtins used in GCC.
+
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+
+ Contributed by Iain Sandoe <iain@sandoe.co.uk> under contract to Facebook.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, you should define a macro:
+
+ DEF_BUILTIN_STUB(ENUM, NAME)
+ DEF_COROUTINE_BUILTIN (ENUM, NAME, TYPE, ATTRS)
+
+ See builtins.def for details.
+ The builtins are created used by library implementations of C++
+ coroutines. */
+
+/* This has to come before all the coroutine builtins. */
+DEF_BUILTIN_STUB (BEGIN_COROUTINE_BUILTINS, (const char *) 0)
+
+/* These are the builtins that are externally-visible and used by the
+ standard library implementation of the coroutine header. */
+
+DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_PROMISE, "promise",
+ BT_FN_PTR_PTR_CONST_SIZE_BOOL,
+ ATTR_CONST_NOTHROW_LEAF_LIST)
+
+DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_RESUME, "resume", BT_FN_VOID_PTR,
+ ATTR_NULL)
+
+DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_DESTROY, "destroy", BT_FN_VOID_PTR,
+ ATTR_NULL)
+
+DEF_COROUTINE_BUILTIN (BUILT_IN_CORO_DONE, "done", BT_FN_BOOL_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+
+/* This has to come after all the coroutine builtins. */
+DEF_BUILTIN_STUB (END_COROUTINE_BUILTINS, (const char *) 0)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coverage.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coverage.h
new file mode 100644
index 0000000..504a235
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/coverage.h
@@ -0,0 +1,61 @@
+/* coverage.h - Defines data exported from coverage.cc
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_COVERAGE_H
+#define GCC_COVERAGE_H
+
+#include "gcov-io.h"
+
+extern void coverage_init (const char *);
+extern void coverage_finish (void);
+extern void coverage_remove_note_file (void);
+
+/* Start outputting coverage information for the current
+ function. */
+extern int coverage_begin_function (unsigned, unsigned);
+
+/* Complete the coverage information for the current function. */
+extern void coverage_end_function (unsigned, unsigned);
+
+/* Compute the control flow checksum for the function FN given as argument. */
+extern unsigned coverage_compute_cfg_checksum (struct function *fn);
+
+/* Compute the profile id of function N. */
+extern unsigned coverage_compute_profile_id (struct cgraph_node *n);
+
+/* Compute the line number checksum for the current function. */
+extern unsigned coverage_compute_lineno_checksum (void);
+
+/* Allocate some counters. Repeatable per function. */
+extern int coverage_counter_alloc (unsigned /*counter*/, unsigned/*num*/);
+/* Use a counter from the most recent allocation. */
+extern tree tree_coverage_counter_ref (unsigned /*counter*/, unsigned/*num*/);
+/* Use a counter address from the most recent allocation. */
+extern tree tree_coverage_counter_addr (unsigned /*counter*/, unsigned/*num*/);
+
+/* Get all the counters for the current function. */
+extern gcov_type *get_coverage_counts (unsigned /*counter*/,
+ unsigned /*cfg_checksum*/,
+ unsigned /*lineno_checksum*/,
+ unsigned /*n_counts*/);
+
+extern tree get_gcov_type (void);
+extern bool coverage_node_map_initialized_p (void);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/contracts.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/contracts.h
new file mode 100644
index 0000000..a5bbd8b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/contracts.h
@@ -0,0 +1,305 @@
+/* Definitions for C++ contract levels. Implements functionality described in
+ the N4820 working draft version of contracts, P1290, P1332, and P1429.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Jeff Chapman II (jchapman@lock3software.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CP_CONTRACT_H
+#define GCC_CP_CONTRACT_H
+
+/* Contract levels approximate the complexity of the expression. */
+
+enum contract_level
+{
+ CONTRACT_INVALID,
+ CONTRACT_DEFAULT,
+ CONTRACT_AUDIT,
+ CONTRACT_AXIOM
+};
+
+/* The concrete semantics determine the behavior of a contract. */
+
+enum contract_semantic
+{
+ CCS_INVALID,
+ CCS_IGNORE,
+ CCS_ASSUME,
+ CCS_NEVER,
+ CCS_MAYBE
+};
+
+/* True if the contract is unchecked. */
+
+inline bool
+unchecked_contract_p (contract_semantic cs)
+{
+ return cs == CCS_IGNORE || cs == CCS_ASSUME;
+}
+
+/* True if the contract is checked. */
+
+inline bool
+checked_contract_p (contract_semantic cs)
+{
+ return cs >= CCS_NEVER;
+}
+
+/* Must match std::contract_violation_continuation_mode in <contract>. */
+enum contract_continuation
+{
+ NEVER_CONTINUE,
+ MAYBE_CONTINUE
+};
+
+/* Assertion role info. */
+struct contract_role
+{
+ const char *name;
+ contract_semantic default_semantic;
+ contract_semantic audit_semantic;
+ contract_semantic axiom_semantic;
+};
+
+/* Information for configured contract semantics. */
+
+struct contract_configuration
+{
+ contract_level level;
+ contract_role* role;
+};
+
+/* A contract mode contains information used to derive the checking
+ and assumption semantics of a contract. This is either a dynamic
+ configuration, meaning it derives from the build mode, or it is
+ explicitly specified. */
+
+struct contract_mode
+{
+ contract_mode () : kind(cm_invalid) {}
+ contract_mode (contract_level level, contract_role *role = NULL)
+ : kind(cm_dynamic)
+ {
+ contract_configuration cc;
+ cc.level = level;
+ cc.role = role;
+ u.config = cc;
+ }
+ contract_mode (contract_semantic semantic) : kind(cm_explicit)
+ {
+ u.semantic = semantic;
+ }
+
+ contract_level get_level () const
+ {
+ gcc_assert (kind == cm_dynamic);
+ return u.config.level;
+ }
+
+ contract_role *get_role () const
+ {
+ gcc_assert (kind == cm_dynamic);
+ return u.config.role;
+ }
+
+ contract_semantic get_semantic () const
+ {
+ gcc_assert (kind == cm_explicit);
+ return u.semantic;
+ }
+
+ enum { cm_invalid, cm_dynamic, cm_explicit } kind;
+
+ union
+ {
+ contract_configuration config;
+ contract_semantic semantic;
+ } u;
+};
+
+extern contract_role *get_contract_role (const char *);
+extern contract_role *add_contract_role (const char *,
+ contract_semantic,
+ contract_semantic,
+ contract_semantic,
+ bool = true);
+extern void validate_contract_role (contract_role *);
+extern void setup_default_contract_role (bool = true);
+extern contract_semantic lookup_concrete_semantic (const char *);
+
+/* Map a source level semantic or level name to its value, or invalid. */
+extern contract_semantic map_contract_semantic (const char *);
+extern contract_level map_contract_level (const char *);
+
+/* Check if an attribute is a cxx contract attribute. */
+extern bool cxx_contract_attribute_p (const_tree);
+extern bool cp_contract_assertion_p (const_tree);
+
+/* Returns the default role. */
+
+inline contract_role *
+get_default_contract_role ()
+{
+ return get_contract_role ("default");
+}
+
+/* Handle various command line arguments related to semantic mapping. */
+extern void handle_OPT_fcontract_build_level_ (const char *);
+extern void handle_OPT_fcontract_assumption_mode_ (const char *);
+extern void handle_OPT_fcontract_continuation_mode_ (const char *);
+extern void handle_OPT_fcontract_role_ (const char *);
+extern void handle_OPT_fcontract_semantic_ (const char *);
+
+enum contract_matching_context
+{
+ cmc_declaration,
+ cmc_override
+};
+
+/* True if NODE is any kind of contract. */
+#define CONTRACT_P(NODE) \
+ (TREE_CODE (NODE) == ASSERTION_STMT \
+ || TREE_CODE (NODE) == PRECONDITION_STMT \
+ || TREE_CODE (NODE) == POSTCONDITION_STMT)
+
+/* True if NODE is a contract condition. */
+#define CONTRACT_CONDITION_P(NODE) \
+ (TREE_CODE (NODE) == PRECONDITION_STMT \
+ || TREE_CODE (NODE) == POSTCONDITION_STMT)
+
+/* True if NODE is a precondition. */
+#define PRECONDITION_P(NODE) \
+ (TREE_CODE (NODE) == PRECONDITION_STMT)
+
+/* True if NODE is a postcondition. */
+#define POSTCONDITION_P(NODE) \
+ (TREE_CODE (NODE) == POSTCONDITION_STMT)
+
+#define CONTRACT_CHECK(NODE) \
+ (TREE_CHECK3 (NODE, ASSERTION_STMT, PRECONDITION_STMT, POSTCONDITION_STMT))
+
+/* True iff the FUNCTION_DECL NODE currently has any contracts. */
+#define DECL_HAS_CONTRACTS_P(NODE) \
+ (DECL_CONTRACTS (NODE) != NULL_TREE)
+
+/* For a FUNCTION_DECL of a guarded function, this points to a list of the pre
+ and post contracts of the first decl of NODE in original order. */
+#define DECL_CONTRACTS(NODE) \
+ (find_contract (DECL_ATTRIBUTES (NODE)))
+
+/* The next contract (if any) after this one in an attribute list. */
+#define CONTRACT_CHAIN(NODE) \
+ (find_contract (TREE_CHAIN (NODE)))
+
+/* The wrapper of the original source location of a list of contracts. */
+#define CONTRACT_SOURCE_LOCATION_WRAPPER(NODE) \
+ (TREE_PURPOSE (TREE_VALUE (NODE)))
+
+/* The original source location of a list of contracts. */
+#define CONTRACT_SOURCE_LOCATION(NODE) \
+ (EXPR_LOCATION (CONTRACT_SOURCE_LOCATION_WRAPPER (NODE)))
+
+/* The actual code _STMT for a contract attribute. */
+#define CONTRACT_STATEMENT(NODE) \
+ (TREE_VALUE (TREE_VALUE (NODE)))
+
+/* True if the contract semantic was specified literally. If true, the
+ contract mode is an identifier containing the semantic. Otherwise,
+ it is a TREE_LIST whose TREE_VALUE is the level and whose TREE_PURPOSE
+ is the role. */
+#define CONTRACT_LITERAL_MODE_P(NODE) \
+ (CONTRACT_MODE (NODE) != NULL_TREE \
+ && TREE_CODE (CONTRACT_MODE (NODE)) == IDENTIFIER_NODE)
+
+/* The identifier denoting the literal semantic of the contract. */
+#define CONTRACT_LITERAL_SEMANTIC(NODE) \
+ (TREE_OPERAND (NODE, 0))
+
+/* The written "mode" of the contract. Either an IDENTIFIER with the
+ literal semantic or a TREE_LIST containing the level and role. */
+#define CONTRACT_MODE(NODE) \
+ (TREE_OPERAND (CONTRACT_CHECK (NODE), 0))
+
+/* The identifier denoting the build level of the contract. */
+#define CONTRACT_LEVEL(NODE) \
+ (TREE_VALUE (CONTRACT_MODE (NODE)))
+
+/* The identifier denoting the role of the contract */
+#define CONTRACT_ROLE(NODE) \
+ (TREE_PURPOSE (CONTRACT_MODE (NODE)))
+
+/* The parsed condition of the contract. */
+#define CONTRACT_CONDITION(NODE) \
+ (TREE_OPERAND (CONTRACT_CHECK (NODE), 1))
+
+/* True iff the condition of the contract NODE is not yet parsed. */
+#define CONTRACT_CONDITION_DEFERRED_P(NODE) \
+ (TREE_CODE (CONTRACT_CONDITION (NODE)) == DEFERRED_PARSE)
+
+/* The raw comment of the contract. */
+#define CONTRACT_COMMENT(NODE) \
+ (TREE_OPERAND (CONTRACT_CHECK (NODE), 2))
+
+/* The VAR_DECL of a postcondition result. For deferred contracts, this
+ is an IDENTIFIER. */
+#define POSTCONDITION_IDENTIFIER(NODE) \
+ (TREE_OPERAND (POSTCONDITION_STMT_CHECK (NODE), 3))
+
+/* For a FUNCTION_DECL of a guarded function, this holds the function decl
+ where pre contract checks are emitted. */
+#define DECL_PRE_FN(NODE) \
+ (get_precondition_function ((NODE)))
+
+/* For a FUNCTION_DECL of a guarded function, this holds the function decl
+ where post contract checks are emitted. */
+#define DECL_POST_FN(NODE) \
+ (get_postcondition_function ((NODE)))
+
+/* True iff the FUNCTION_DECL is the pre function for a guarded function. */
+#define DECL_IS_PRE_FN_P(NODE) \
+ (DECL_ABSTRACT_ORIGIN (NODE) && DECL_PRE_FN (DECL_ABSTRACT_ORIGIN (NODE)) == NODE)
+
+/* True iff the FUNCTION_DECL is the post function for a guarded function. */
+#define DECL_IS_POST_FN_P(NODE) \
+ (DECL_ABSTRACT_ORIGIN (NODE) && DECL_POST_FN (DECL_ABSTRACT_ORIGIN (NODE)) == NODE)
+
+extern void remove_contract_attributes (tree);
+extern void copy_contract_attributes (tree, tree);
+extern void remap_contracts (tree, tree, tree, bool);
+extern void maybe_update_postconditions (tree);
+extern void rebuild_postconditions (tree);
+extern bool check_postcondition_result (tree, tree, location_t);
+extern tree get_precondition_function (tree);
+extern tree get_postcondition_function (tree);
+extern void duplicate_contracts (tree, tree);
+extern void match_deferred_contracts (tree);
+extern void defer_guarded_contract_match (tree, tree, tree);
+extern bool diagnose_misapplied_contracts (tree);
+extern tree finish_contract_attribute (tree, tree);
+extern tree invalidate_contract (tree);
+extern void update_late_contract (tree, tree, tree);
+extern tree splice_out_contracts (tree);
+extern bool all_attributes_are_contracts_p (tree);
+extern void inherit_base_contracts (tree, tree);
+extern tree apply_postcondition_to_return (tree);
+extern void start_function_contracts (tree);
+extern void finish_function_contracts (tree);
+extern void set_contract_functions (tree, tree, tree);
+extern tree build_contract_check (tree);
+extern void emit_assertion (tree);
+
+#endif /* ! GCC_CP_CONTRACT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-trait.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-trait.def
new file mode 100644
index 0000000..bac593c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-trait.def
@@ -0,0 +1,108 @@
+/* This file contains the definitions for C++-specific built-in traits.
+
+ Copyright The GNU Toolchain Authors.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Add a DEFTRAIT_EXPR (CODE, NAME, N) line to this file to define an
+ expression-yielding built-in trait that has internal code name CODE, is
+ spelled as NAME and takes N type arguments (where N is either 1, 2, or
+ the special value -1 which denotes that it takes at least one argument).
+ Such traits are represented as TRAIT_EXPR tree whose TRAIT_EXPR_KIND is
+ CPTK_CODE. Define the behavior of the trait in finish_trait_expr. */
+
+/* Add a DEFTRAIT_TYPE (CODE, NAME, N) line to this file to define a
+ type-yielding built-in trait as described above. Such traits are
+ generally represented as a TRAIT_TYPE tree whose TRAIT_TYPE_KIND is
+ CPTK_CODE (exceptions are BASES and DIRECT_BASES below). Define the
+ behavior of the trait in finish_trait_type. */
+
+#ifdef DEFTRAIT
+#define DEFTRAIT_EXPR(CODE, NAME, ARITY) DEFTRAIT(tcc_expression, CODE, NAME, ARITY)
+#define DEFTRAIT_TYPE(CODE, NAME, ARITY) DEFTRAIT(tcc_type, CODE, NAME, ARITY)
+#define DEFTRAIT_EXPR_DEFAULTED
+#define DEFTRAIT_TYPE_DEFAULTED
+#endif
+
+#ifndef DEFTRAIT_EXPR
+#define DEFTRAIT_EXPR(CODE, NAME, ARITY)
+#define DEFTRAIT_EXPR_DEFAULTED
+#endif
+
+#ifndef DEFTRAIT_TYPE
+#define DEFTRAIT_TYPE(CODE, NAME, ARITY)
+#define DEFTRAIT_TYPE_DEFAULTED
+#endif
+
+DEFTRAIT_EXPR (HAS_NOTHROW_ASSIGN, "__has_nothrow_assign", 1)
+DEFTRAIT_EXPR (HAS_NOTHROW_CONSTRUCTOR, "__has_nothrow_constructor", 1)
+DEFTRAIT_EXPR (HAS_NOTHROW_COPY, "__has_nothrow_copy", 1)
+DEFTRAIT_EXPR (HAS_TRIVIAL_ASSIGN, "__has_trivial_assign", 1)
+DEFTRAIT_EXPR (HAS_TRIVIAL_CONSTRUCTOR, "__has_trivial_constructor", 1)
+DEFTRAIT_EXPR (HAS_TRIVIAL_COPY, "__has_trivial_copy", 1)
+DEFTRAIT_EXPR (HAS_TRIVIAL_DESTRUCTOR, "__has_trivial_destructor", 1)
+DEFTRAIT_EXPR (HAS_UNIQUE_OBJ_REPRESENTATIONS, "__has_unique_object_representations", 1)
+DEFTRAIT_EXPR (HAS_VIRTUAL_DESTRUCTOR, "__has_virtual_destructor", 1)
+DEFTRAIT_EXPR (IS_ABSTRACT, "__is_abstract", 1)
+DEFTRAIT_EXPR (IS_AGGREGATE, "__is_aggregate", 1)
+DEFTRAIT_EXPR (IS_ASSIGNABLE, "__is_assignable", 2)
+DEFTRAIT_EXPR (IS_BASE_OF, "__is_base_of", 2)
+DEFTRAIT_EXPR (IS_CLASS, "__is_class", 1)
+DEFTRAIT_EXPR (IS_CONSTRUCTIBLE, "__is_constructible", -1)
+DEFTRAIT_EXPR (IS_CONVERTIBLE, "__is_convertible", 2)
+DEFTRAIT_EXPR (IS_EMPTY, "__is_empty", 1)
+DEFTRAIT_EXPR (IS_ENUM, "__is_enum", 1)
+DEFTRAIT_EXPR (IS_FINAL, "__is_final", 1)
+DEFTRAIT_EXPR (IS_LAYOUT_COMPATIBLE, "__is_layout_compatible", 2)
+DEFTRAIT_EXPR (IS_LITERAL_TYPE, "__is_literal_type", 1)
+DEFTRAIT_EXPR (IS_NOTHROW_ASSIGNABLE, "__is_nothrow_assignable", 2)
+DEFTRAIT_EXPR (IS_NOTHROW_CONSTRUCTIBLE, "__is_nothrow_constructible", -1)
+DEFTRAIT_EXPR (IS_NOTHROW_CONVERTIBLE, "__is_nothrow_convertible", 2)
+DEFTRAIT_EXPR (IS_POINTER_INTERCONVERTIBLE_BASE_OF, "__is_pointer_interconvertible_base_of", 2)
+DEFTRAIT_EXPR (IS_POD, "__is_pod", 1)
+DEFTRAIT_EXPR (IS_POLYMORPHIC, "__is_polymorphic", 1)
+DEFTRAIT_EXPR (IS_SAME, "__is_same", 2)
+DEFTRAIT_EXPR (IS_STD_LAYOUT, "__is_standard_layout", 1)
+DEFTRAIT_EXPR (IS_TRIVIAL, "__is_trivial", 1)
+DEFTRAIT_EXPR (IS_TRIVIALLY_ASSIGNABLE, "__is_trivially_assignable", 2)
+DEFTRAIT_EXPR (IS_TRIVIALLY_CONSTRUCTIBLE, "__is_trivially_constructible", -1)
+DEFTRAIT_EXPR (IS_TRIVIALLY_COPYABLE, "__is_trivially_copyable", 1)
+DEFTRAIT_EXPR (IS_UNION, "__is_union", 1)
+DEFTRAIT_EXPR (REF_CONSTRUCTS_FROM_TEMPORARY, "__reference_constructs_from_temporary", 2)
+DEFTRAIT_EXPR (REF_CONVERTS_FROM_TEMPORARY, "__reference_converts_from_temporary", 2)
+/* FIXME Added space to avoid direct usage in GCC 13. */
+DEFTRAIT_EXPR (IS_DEDUCIBLE, "__is_deducible ", 2)
+
+DEFTRAIT_TYPE (REMOVE_CV, "__remove_cv", 1)
+DEFTRAIT_TYPE (REMOVE_REFERENCE, "__remove_reference", 1)
+DEFTRAIT_TYPE (REMOVE_CVREF, "__remove_cvref", 1)
+DEFTRAIT_TYPE (UNDERLYING_TYPE, "__underlying_type", 1)
+
+/* These traits yield a type pack, not a type, and are represented by
+ cp_parser_trait as a special BASES tree instead of a TRAIT_TYPE tree. */
+DEFTRAIT_TYPE (BASES, "__bases", 1)
+DEFTRAIT_TYPE (DIRECT_BASES, "__direct_bases", 1)
+
+#ifdef DEFTRAIT_EXPR_DEFAULTED
+#undef DEFTRAIT_EXPR
+#undef DEFTRAIT_EXPR_DEFAULTED
+#endif
+
+#ifdef DEFTRAIT_TYPE_DEFAULTED
+#undef DEFTRAIT_TYPE
+#undef DEFTRAIT_TYPE_DEFAULTED
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.def
new file mode 100644
index 0000000..0e66ca7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.def
@@ -0,0 +1,600 @@
+/* This file contains the definitions and documentation for the
+ additional tree codes used in the GNU C++ compiler (see tree.def
+ for the standard codes).
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+ Hacked by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* An OFFSET_REF is used in two situations:
+
+ 1. An expression of the form `A::m' where `A' is a class and `m' is
+ a non-static member. In this case, operand 0 will be a TYPE
+ (corresponding to `A') and operand 1 will be a FIELD_DECL,
+ BASELINK, or TEMPLATE_ID_EXPR (corresponding to `m').
+
+ The expression is a pointer-to-member if its address is taken,
+ but simply denotes a member of the object if its address is not
+ taken.
+
+ This form is only used during the parsing phase; once semantic
+ analysis has taken place they are eliminated.
+
+ 2. An expression of the form `x.*p'. In this case, operand 0 will
+ be an expression corresponding to `x' and operand 1 will be an
+ expression with pointer-to-member type. */
+DEFTREECODE (OFFSET_REF, "offset_ref", tcc_reference, 2)
+
+/* A pointer-to-member constant. For a pointer-to-member constant
+ `X::Y' The PTRMEM_CST_CLASS is the RECORD_TYPE for `X' and the
+ PTRMEM_CST_MEMBER is the _DECL for `Y'. */
+DEFTREECODE (PTRMEM_CST, "ptrmem_cst", tcc_constant, 0)
+
+/* For NEW_EXPR, operand 0 is the placement list.
+ Operand 1 is the new-declarator.
+ Operand 2 is the number of elements in the array.
+ Operand 3 is the initializer. */
+DEFTREECODE (NEW_EXPR, "nw_expr", tcc_expression, 4)
+DEFTREECODE (VEC_NEW_EXPR, "vec_nw_expr", tcc_expression, 3)
+
+/* For DELETE_EXPR, operand 0 is the store to be destroyed.
+ Operand 1 is the value to pass to the destroying function
+ saying whether the store should be deallocated as well. */
+DEFTREECODE (DELETE_EXPR, "dl_expr", tcc_expression, 2)
+DEFTREECODE (VEC_DELETE_EXPR, "vec_dl_expr", tcc_expression, 2)
+
+/* Value is reference to particular overloaded class method.
+ Operand 0 is the class, operand 1 is the field
+ The COMPLEXITY field holds the class level (usually 0). */
+DEFTREECODE (SCOPE_REF, "scope_ref", tcc_reference, 2)
+
+/* When composing an object with a member, this is the result.
+ Operand 0 is the object. Operand 1 is the member (usually
+ a dereferenced pointer to member). */
+DEFTREECODE (MEMBER_REF, "member_ref", tcc_reference, 2)
+
+/* Type conversion operator in C++. TREE_TYPE is type that this
+ operator converts to. Operand is expression to be converted. */
+DEFTREECODE (TYPE_EXPR, "type_expr", tcc_expression, 1)
+
+/* AGGR_INIT_EXPRs have a variably-sized representation similar to
+ that of CALL_EXPRs. Operand 0 is an INTEGER_CST node containing the
+ operand count, operand 1 is the function which performs initialization,
+ operand 2 is the slot which was allocated for this expression, and
+ the remaining operands are the arguments to the initialization function. */
+DEFTREECODE (AGGR_INIT_EXPR, "aggr_init_expr", tcc_vl_exp, 3)
+
+/* Initialization of an array from another array, expressed at a high level
+ so that it works with TARGET_EXPR. Operand 0 is the target, operand 1
+ is the initializer. */
+DEFTREECODE (VEC_INIT_EXPR, "vec_init_expr", tcc_expression, 2)
+
+/* A throw expression. operand 0 is the expression, if there was one,
+ else it is NULL_TREE. */
+DEFTREECODE (THROW_EXPR, "throw_expr", tcc_expression, 1)
+
+/* An empty class object. The TREE_TYPE gives the class type. We use
+ these to avoid actually creating instances of the empty classes. */
+DEFTREECODE (EMPTY_CLASS_EXPR, "empty_class_expr", tcc_expression, 0)
+
+/* A reference to a member function or member functions from a base
+ class. BASELINK_FUNCTIONS gives the FUNCTION_DECL,
+ TEMPLATE_DECL, OVERLOAD, or TEMPLATE_ID_EXPR corresponding to the
+ functions. BASELINK_BINFO gives the base from which the functions
+ come, i.e., the base to which the `this' pointer must be converted
+ before the functions are called. BASELINK_ACCESS_BINFO gives the
+ base used to name the functions.
+
+ A BASELINK is an expression; the TREE_TYPE of the BASELINK gives
+ the type of the expression. This type is either a FUNCTION_TYPE,
+ METHOD_TYPE, or `unknown_type_node' indicating that the function is
+ overloaded. */
+DEFTREECODE (BASELINK, "baselink", tcc_exceptional, 0)
+
+/* Template definition. The following fields have the specified uses,
+ although there are other macros in cp-tree.h that should be used for
+ accessing this data.
+ DECL_ARGUMENTS template parm vector
+ DECL_TEMPLATE_INFO template text &c
+ DECL_VINDEX list of instantiations already produced;
+ only done for functions so far
+ For class template:
+ DECL_INITIAL associated templates (methods &c)
+ DECL_TEMPLATE_RESULT null
+ For non-class templates:
+ TREE_TYPE type of object to be constructed
+ DECL_TEMPLATE_RESULT decl for object to be created
+ (e.g., FUNCTION_DECL with tmpl parms used)
+ */
+DEFTREECODE (TEMPLATE_DECL, "template_decl", tcc_declaration, 0)
+
+/* Index into a template parameter list. The TEMPLATE_PARM_IDX gives
+ the index (from 0) of the parameter, while the TEMPLATE_PARM_LEVEL
+ gives the level (from 1) of the parameter.
+
+ Here's an example:
+
+ template <class T> // Index 0, Level 1.
+ struct S
+ {
+ template <class U, // Index 0, Level 2.
+ class V> // Index 1, Level 2.
+ void f();
+ };
+
+ The DESCENDANTS will be a chain of TEMPLATE_PARM_INDEXs descended
+ from this one. The first descendant will have the same IDX, but
+ its LEVEL will be one less. The TREE_CHAIN field is used to chain
+ together the descendants. The TEMPLATE_PARM_DECL is the
+ declaration of this parameter, either a TYPE_DECL or CONST_DECL.
+ The TEMPLATE_PARM_ORIG_LEVEL is the LEVEL of the most distant
+ parent, i.e., the LEVEL that the parameter originally had when it
+ was declared. For example, if we instantiate S<int>, we will have:
+
+ struct S<int>
+ {
+ template <class U, // Index 0, Level 1, Orig Level 2
+ class V> // Index 1, Level 1, Orig Level 2
+ void f();
+ };
+
+ The LEVEL is the level of the parameter when we are worrying about
+ the types of things; the ORIG_LEVEL is the level when we are
+ worrying about instantiating things. */
+DEFTREECODE (TEMPLATE_PARM_INDEX, "template_parm_index", tcc_exceptional, 0)
+
+/* Index into a template parameter list for template template parameters.
+ This parameter must be a type. The TYPE_FIELDS value will be a
+ TEMPLATE_PARM_INDEX.
+
+ It is used without template arguments like TT in C<TT>,
+ TYPE_NAME is a TEMPLATE_DECL. */
+DEFTREECODE (TEMPLATE_TEMPLATE_PARM, "template_template_parm", tcc_type, 0)
+
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. TEMPLATE_TYPE_PARM, TYPENAME_TYPE, TYPEOF_TYPE,
+ BOUND_TEMPLATE_TEMPLATE_PARM. */
+
+/* Index into a template parameter list. This parameter must be a type.
+ The type.values field will be a TEMPLATE_PARM_INDEX. */
+DEFTREECODE (TEMPLATE_TYPE_PARM, "template_type_parm", tcc_type, 0)
+
+/* A type designated by `typename T::t'. TYPE_CONTEXT is `T',
+ TYPE_NAME is an IDENTIFIER_NODE for `t'. If the type was named via
+ template-id, TYPENAME_TYPE_FULLNAME will hold the TEMPLATE_ID_EXPR.
+ TREE_TYPE is always NULL. */
+DEFTREECODE (TYPENAME_TYPE, "typename_type", tcc_type, 0)
+
+/* A type designated by `__typeof (expr)'. TYPEOF_TYPE_EXPR is the
+ expression in question. */
+DEFTREECODE (TYPEOF_TYPE, "typeof_type", tcc_type, 0)
+
+/* Like TEMPLATE_TEMPLATE_PARM it is used with bound template arguments
+ like TT<int>.
+ In this case, TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO contains the
+ template name and its bound arguments. TYPE_NAME is a TYPE_DECL. */
+DEFTREECODE (BOUND_TEMPLATE_TEMPLATE_PARM, "bound_template_template_parm",
+ tcc_type, 0)
+
+/* For template template argument of the form `T::template C'.
+ TYPE_CONTEXT is `T', the template parameter dependent object.
+ TYPE_NAME is a TEMPLATE_DECL, whose DECL_TEMPLATE_PARMS are any
+ template parms of the instantiation. That decl's DECL_NAME is the
+ IDENTIFIER_NODE for `C', the member class template. */
+DEFTREECODE (UNBOUND_CLASS_TEMPLATE, "unbound_class_template", tcc_type, 0)
+
+/* A using declaration. USING_DECL_SCOPE contains the specified
+ scope. In a variadic using-declaration, this is a TYPE_PACK_EXPANSION.
+ In a member using decl, unless DECL_DEPENDENT_P is true,
+ USING_DECL_DECLS contains the _DECL or OVERLOAD so named. This is
+ not an alias, but is later expanded into multiple aliases. */
+DEFTREECODE (USING_DECL, "using_decl", tcc_declaration, 0)
+
+/* A using directive. The operand is USING_STMT_NAMESPACE. */
+DEFTREECODE (USING_STMT, "using_stmt", tcc_statement, 1)
+
+/* An un-parsed operand. Holds a vector of input tokens and
+ a vector of places where the argument was instantiated before
+ parsing had occurred. This is used for default arguments, delayed
+ NSDMIs, and noexcept-specifier parsing. */
+DEFTREECODE (DEFERRED_PARSE, "deferred_parse", tcc_exceptional, 0)
+
+/* An uninstantiated/unevaluated noexcept-specification. For the
+ uninstantiated case, DEFERRED_NOEXCEPT_PATTERN is the pattern from the
+ template, and DEFERRED_NOEXCEPT_ARGS are the template arguments to
+ substitute into the pattern when needed. For the unevaluated case,
+ those slots are NULL_TREE and we use get_defaulted_eh_spec to find
+ the exception-specification. */
+DEFTREECODE (DEFERRED_NOEXCEPT, "deferred_noexcept", tcc_exceptional, 0)
+
+/* A template-id, like foo<int>. The first operand is the template.
+ The second is NULL if there are no explicit arguments, or a
+ TREE_VEC of arguments. The template will be a FUNCTION_DECL,
+ TEMPLATE_DECL, or an OVERLOAD. If the template-id refers to a
+ member template, the template may be an IDENTIFIER_NODE. */
+DEFTREECODE (TEMPLATE_ID_EXPR, "template_id_expr", tcc_expression, 2)
+
+/* One of a set of overloaded functions. */
+DEFTREECODE (OVERLOAD, "overload", tcc_exceptional, 0)
+
+/* A vector of binding slots. */
+DEFTREECODE (BINDING_VECTOR, "binding_vector", tcc_exceptional, 0)
+
+/* A pseudo-destructor, of the form "OBJECT.~DESTRUCTOR" or
+ "OBJECT.SCOPE::~DESTRUCTOR. The first operand is the OBJECT. The
+ second operand (if non-NULL) is the SCOPE. The third operand is
+ the TYPE node corresponding to the DESTRUCTOR. The type of the
+ first operand will always be a scalar type.
+
+ The type of a PSEUDO_DTOR_EXPR is always "void", even though it can
+ be used as if it were a zero-argument function. We handle the
+ function-call case specially, and giving it "void" type prevents it
+ being used in expressions in ways that are not permitted. */
+DEFTREECODE (PSEUDO_DTOR_EXPR, "pseudo_dtor_expr", tcc_expression, 3)
+
+/* A whole bunch of tree codes for the initial, superficial parsing of
+ templates. */
+DEFTREECODE (MODOP_EXPR, "modop_expr", tcc_expression, 3)
+DEFTREECODE (CAST_EXPR, "cast_expr", tcc_unary, 1)
+DEFTREECODE (REINTERPRET_CAST_EXPR, "reinterpret_cast_expr", tcc_unary, 1)
+DEFTREECODE (CONST_CAST_EXPR, "const_cast_expr", tcc_unary, 1)
+DEFTREECODE (STATIC_CAST_EXPR, "static_cast_expr", tcc_unary, 1)
+DEFTREECODE (DYNAMIC_CAST_EXPR, "dynamic_cast_expr", tcc_unary, 1)
+DEFTREECODE (IMPLICIT_CONV_EXPR, "implicit_conv_expr", tcc_unary, 1)
+DEFTREECODE (DOTSTAR_EXPR, "dotstar_expr", tcc_expression, 2)
+DEFTREECODE (TYPEID_EXPR, "typeid_expr", tcc_expression, 1)
+DEFTREECODE (NOEXCEPT_EXPR, "noexcept_expr", tcc_unary, 1)
+DEFTREECODE (SPACESHIP_EXPR, "spaceship_expr", tcc_expression, 2)
+
+/* A placeholder for an expression that is not type-dependent, but
+ does occur in a template. When an expression that is not
+ type-dependent appears in a larger expression, we must compute the
+ type of that larger expression. That computation would normally
+ modify the original expression, which would change the mangling of
+ that expression if it appeared in a template argument list. In
+ that situation, we create a NON_DEPENDENT_EXPR to take the place of
+ the original expression. The expression is the only operand -- it
+ is only needed for diagnostics. */
+DEFTREECODE (NON_DEPENDENT_EXPR, "non_dependent_expr", tcc_expression, 1)
+
+/* CTOR_INITIALIZER is a placeholder in template code for a call to
+ setup_vtbl_pointer (and appears in all functions, not just ctors). */
+DEFTREECODE (CTOR_INITIALIZER, "ctor_initializer", tcc_expression, 1)
+
+DEFTREECODE (TRY_BLOCK, "try_block", tcc_statement, 2)
+
+DEFTREECODE (EH_SPEC_BLOCK, "eh_spec_block", tcc_statement, 2)
+
+/* A HANDLER wraps a catch handler for the HANDLER_TYPE. If this is
+ CATCH_ALL_TYPE, then the handler catches all types. The declaration of
+ the catch variable is in HANDLER_PARMS, and the body block in
+ HANDLER_BODY. */
+DEFTREECODE (HANDLER, "handler", tcc_statement, 2)
+
+/* A MUST_NOT_THROW_EXPR wraps an expression that may not
+ throw, and must call terminate if it does. The second argument
+ is a condition, used in templates to express noexcept (condition). */
+DEFTREECODE (MUST_NOT_THROW_EXPR, "must_not_throw_expr", tcc_expression, 2)
+
+/* A CLEANUP_STMT marks the point at which a declaration is fully
+ constructed. The CLEANUP_EXPR is run on behalf of CLEANUP_DECL
+ when CLEANUP_BODY completes. */
+DEFTREECODE (CLEANUP_STMT, "cleanup_stmt", tcc_statement, 3)
+
+/* Represents an 'if' statement. The operands are IF_COND,
+ THEN_CLAUSE, and ELSE_CLAUSE, and the current scope, respectively. */
+/* ??? It is currently still necessary to distinguish between IF_STMT
+ and COND_EXPR for the benefit of templates. */
+DEFTREECODE (IF_STMT, "if_stmt", tcc_statement, 4)
+
+/* Used to represent a range-based `for' statement. The operands are
+ RANGE_FOR_DECL, RANGE_FOR_EXPR, RANGE_FOR_BODY, RANGE_FOR_SCOPE,
+ RANGE_FOR_UNROLL, and RANGE_FOR_INIT_STMT, respectively. Only used in
+ templates. */
+DEFTREECODE (RANGE_FOR_STMT, "range_for_stmt", tcc_statement, 6)
+
+/* Used to represent an expression statement. Use `EXPR_STMT_EXPR' to
+ obtain the expression. */
+DEFTREECODE (EXPR_STMT, "expr_stmt", tcc_expression, 1)
+
+DEFTREECODE (TAG_DEFN, "tag_defn", tcc_expression, 0)
+
+/* Represents an 'offsetof' expression during template expansion. */
+DEFTREECODE (OFFSETOF_EXPR, "offsetof_expr", tcc_expression, 2)
+
+/* Represents an '__builtin_addressof' expression during template
+ expansion. This is similar to ADDR_EXPR, but it doesn't invoke
+ overloaded & operators. */
+DEFTREECODE (ADDRESSOF_EXPR, "addressof_expr", tcc_expression, 1)
+
+/* Represents the -> operator during template expansion. */
+DEFTREECODE (ARROW_EXPR, "arrow_expr", tcc_expression, 1)
+
+/* Represents an '__alignof__' expression during template
+ expansion. */
+DEFTREECODE (ALIGNOF_EXPR, "alignof_expr", tcc_expression, 1)
+
+/* Represents an Objective-C++ '@encode' expression during template
+ expansion. */
+DEFTREECODE (AT_ENCODE_EXPR, "at_encode_expr", tcc_expression, 1)
+
+/* A STMT_EXPR represents a statement-expression during template
+ expansion. This is the GCC extension { ( ... ) }. The
+ STMT_EXPR_STMT is the statement given by the expression. */
+DEFTREECODE (STMT_EXPR, "stmt_expr", tcc_expression, 1)
+
+/* Unary plus. Operand 0 is the expression to which the unary plus
+ is applied. */
+DEFTREECODE (UNARY_PLUS_EXPR, "unary_plus_expr", tcc_unary, 1)
+
+/** C++11 extensions. */
+
+/* A static assertion. This is a C++11 extension.
+ STATIC_ASSERT_CONDITION contains the condition that is being
+ checked. STATIC_ASSERT_MESSAGE contains the message (a string
+ literal) to be displayed if the condition fails to hold. */
+DEFTREECODE (STATIC_ASSERT, "static_assert", tcc_exceptional, 0)
+
+/* Represents an argument pack of types (or templates). An argument
+ pack stores zero or more arguments that will be used to instantiate
+ a parameter pack.
+
+ ARGUMENT_PACK_ARGS retrieves the arguments stored in the argument
+ pack.
+
+ Example:
+ template<typename... Values>
+ class tuple { ... };
+
+ tuple<int, float, double> t;
+
+ Values is a (template) parameter pack. When tuple<int, float,
+ double> is instantiated, the Values parameter pack is instantiated
+ with the argument pack <int, float, double>. ARGUMENT_PACK_ARGS will
+ be a TREE_VEC containing int, float, and double. */
+DEFTREECODE (TYPE_ARGUMENT_PACK, "type_argument_pack", tcc_type, 0)
+
+/* Represents an argument pack of values, which can be used either for
+ non-type template arguments or function call arguments.
+
+ NONTYPE_ARGUMENT_PACK plays precisely the same role as
+ TYPE_ARGUMENT_PACK, but will be used for packing non-type template
+ arguments (e.g., "int... Dimensions") or function arguments ("const
+ Args&... args"). */
+DEFTREECODE (NONTYPE_ARGUMENT_PACK, "nontype_argument_pack", tcc_expression, 1)
+
+/* Represents a type expression that will be expanded into a list of
+ types when instantiated with one or more argument packs.
+
+ PACK_EXPANSION_PATTERN retrieves the expansion pattern. This is
+ the type or expression that we will substitute into with each
+ argument in an argument pack.
+
+ PACK_EXPANSION_PARAMETER_PACKS contains a TREE_LIST of the parameter
+ packs that are used in this pack expansion.
+
+ Example:
+ template<typename... Values>
+ struct tied : tuple<Values&...> {
+ // ...
+ };
+
+ The derivation from tuple contains a TYPE_PACK_EXPANSION for the
+ template arguments. Its PACK_EXPANSION_PATTERN is "Values&" and its
+ PACK_EXPANSION_PARAMETER_PACKS will contain "Values". */
+DEFTREECODE (TYPE_PACK_EXPANSION, "type_pack_expansion", tcc_type, 0)
+
+/* Represents an expression that will be expanded into a list of
+ expressions when instantiated with one or more argument packs.
+
+ EXPR_PACK_EXPANSION plays precisely the same role as TYPE_PACK_EXPANSION,
+ but will be used for expressions. */
+DEFTREECODE (EXPR_PACK_EXPANSION, "expr_pack_expansion", tcc_expression, 3)
+
+/* Selects the Ith parameter out of an argument pack. This node will
+ be used when instantiating pack expansions; see
+ tsubst_pack_expansion.
+
+ ARGUMENT_PACK_SELECT_FROM_PACK contains the *_ARGUMENT_PACK node
+ from which the argument will be selected.
+
+ ARGUMENT_PACK_SELECT_INDEX contains the index into the argument
+ pack that will be returned by this ARGUMENT_PACK_SELECT node. The
+ index is a machine integer. */
+DEFTREECODE (ARGUMENT_PACK_SELECT, "argument_pack_select", tcc_exceptional, 0)
+
+/* Fold expressions allow the expansion of a template argument pack
+ over a binary operator.
+
+ FOLD_EXPR_MOD_P is true when the fold operation is a compound assignment
+ operator.
+
+ FOLD_EXPR_OP is an INTEGER_CST storing the tree code for the folded
+ expression. Note that when FOLDEXPR_MOD_P is true, the operator is
+ a compound assignment operator for that kind of expression.
+
+ FOLD_EXPR_PACK is an expression containing an unexpanded parameter pack;
+ when expanded, each term becomes an argument of the folded expression.
+
+ In a BINARY_FOLD_EXPRESSION, FOLD_EXPR_INIT is the non-pack argument. */
+DEFTREECODE (UNARY_LEFT_FOLD_EXPR, "unary_left_fold_expr", tcc_expression, 2)
+DEFTREECODE (UNARY_RIGHT_FOLD_EXPR, "unary_right_fold_expr", tcc_expression, 2)
+DEFTREECODE (BINARY_LEFT_FOLD_EXPR, "binary_left_fold_expr", tcc_expression, 3)
+DEFTREECODE (BINARY_RIGHT_FOLD_EXPR, "binary_right_fold_expr", tcc_expression, 3)
+
+/* Represents the __builtin_bit_cast (type, expr) expression.
+ The type is in TREE_TYPE, expression in TREE_OPERAND (bitcast, 0). */
+DEFTREECODE (BIT_CAST_EXPR, "bit_cast_expr", tcc_expression, 1)
+
+/** C++ extensions. */
+
+/* Represents a templated trait that yields an expression. */
+DEFTREECODE (TRAIT_EXPR, "trait_expr", tcc_exceptional, 0)
+
+/* Represents a templated trait that yields a type. */
+DEFTREECODE (TRAIT_TYPE, "trait_type", tcc_type, 0)
+
+/* A lambda expression. This is a C++0x extension.
+ LAMBDA_EXPR_DEFAULT_CAPTURE_MODE is an enum for the default, which may be
+ none.
+ LAMBDA_EXPR_CAPTURE_LIST holds the capture-list, including `this'.
+ LAMBDA_EXPR_THIS_CAPTURE goes straight to the capture of `this', if it exists.
+ LAMBDA_EXPR_PENDING_PROXIES is a vector of capture proxies which need to
+ be pushed once scope returns to the lambda.
+ LAMBDA_EXPR_MUTABLE_P signals whether this lambda was declared mutable. */
+DEFTREECODE (LAMBDA_EXPR, "lambda_expr", tcc_exceptional, 0)
+
+/* The declared type of an expression. This is a C++0x extension.
+ DECLTYPE_TYPE_EXPR is the expression whose type we are computing.
+ DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P states whether the
+ expression was parsed as an id-expression or a member access
+ expression. When false, it was parsed as a full expression.
+ DECLTYPE_FOR_LAMBDA_CAPTURE is set if we want lambda capture semantics.
+ DECLTYPE_FOR_LAMBDA_RETURN is set if we want lambda return deduction. */
+DEFTREECODE (DECLTYPE_TYPE, "decltype_type", tcc_type, 0)
+
+/* A type designated by one of the bases type traits.
+ BASES_TYPE is the type in question. */
+DEFTREECODE (BASES, "bases", tcc_type, 0)
+
+/* Dependent operator expressions are given this type rather than a NULL_TREE
+ type so that we have somewhere to stash the result of phase 1 name lookup
+ (namely into DEPENDENT_OPERATOR_TYPE_SAVED_LOOKUPS). */
+DEFTREECODE (DEPENDENT_OPERATOR_TYPE, "dependent_operator_type", tcc_type, 0)
+
+/* Used to represent the template information stored by template
+ specializations.
+ The accessors are:
+ TI_TEMPLATE the template declaration associated to the specialization
+ TI_ARGS the arguments of the template specialization
+ TI_TYPEDEFS_NEEDING_ACCESS_CHECKING the vector of typedefs used in
+ the pattern of the template for which access check is needed at template
+ instantiation time. */
+DEFTREECODE (TEMPLATE_INFO, "template_info", tcc_exceptional, 0)
+
+/* OpenMP - #pragma omp depobj
+ Operand 0: OMP_DEPOBJ_DEPOBJ: Depobj expression
+ Operand 1: OMP_DEPOBJ_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_DEPOBJ, "omp_depobj", tcc_statement, 2)
+
+/* Extensions for Concepts. */
+
+/* Used to represent information associated with constrained declarations. */
+DEFTREECODE (CONSTRAINT_INFO, "constraint_info", tcc_exceptional, 0)
+
+/* A wildcard declaration is a placeholder for a template parameter
+ used to resolve constrained-type-names in concepts. During
+ resolution, the matching argument is saved as the TREE_TYPE
+ of the wildcard. */
+DEFTREECODE (WILDCARD_DECL, "wildcard_decl", tcc_declaration, 0)
+
+/* A requires-expr has three operands. The first operand is
+ its parameter list (possibly NULL). The second is a list of
+ requirements, which are denoted by the _REQ* tree codes
+ below. The third is a TREE_VEC of template arguments to
+ be applied when substituting into the parameter list and
+ requirements, set by tsubst_requires_expr for partial instantiations. */
+DEFTREECODE (REQUIRES_EXPR, "requires_expr", tcc_expression, 3)
+
+/* A requirement for an expression. */
+DEFTREECODE (SIMPLE_REQ, "simple_req", tcc_expression, 1)
+
+/* A requirement for a type. */
+DEFTREECODE (TYPE_REQ, "type_req", tcc_expression, 1)
+
+/* A requirement for an expression and its properties. The
+ first operand is the expression, and the 2nd is its type.
+ The accessor COMPOUND_REQ_NOEXCEPT determines whether
+ the noexcept keyword was present. */
+DEFTREECODE (COMPOUND_REQ, "compound_req", tcc_expression, 2)
+
+/* A requires clause within a requires expression. */
+DEFTREECODE (NESTED_REQ, "nested_req", tcc_expression, 1)
+
+/* Constraints are modeled as kinds of expressions.
+ The operands of a constraint can be either types or expressions.
+ Unlike expressions, constraints do not have a type. */
+
+/* An atomic constraint evaluates an expression E. The operand of the
+ constraint is its parameter mapping. The actual expression is stored
+ in the context.
+
+ ATOMIC_CONSTR_INFO provides source info to support diagnostics.
+ ATOMIC_CONSTR_EXPR has the expression to be evaluated.
+ ATOMIC_CONSTR_PARMS is the parameter mapping for the atomic constraint
+ and is stored in the type field. */
+DEFTREECODE (ATOMIC_CONSTR, "atomic_constr", tcc_expression, 1)
+
+/* The conjunction and disjunction of two constraints, respectively.
+ Operands are accessed using TREE_OPERAND. The third operand provides
+ source info for diagnostics.
+
+ CONJ_CONSTR_INFO and DISJ_CONSTR_INFO provide access to the source
+ information of constraints, which is stored in the TREE_TYPE. */
+DEFTREECODE (CONJ_CONSTR, "conj_constr", tcc_expression, 2)
+DEFTREECODE (DISJ_CONSTR, "disj_constr", tcc_expression, 2)
+
+/* A check constraint represents the checking of a concept
+ C. It has two operands: the template defining the concept
+ and a sequence of template arguments.
+
+ CHECK_CONSTR_CONCEPT has the concept definition
+ CHECK_CONSTR_ARGUMENTS are the template arguments */
+DEFTREECODE (CHECK_CONSTR, "check_constr", tcc_expression, 2)
+
+/* The co_await expression is used to support coroutines.
+
+ Op 0 is the cast expresssion (potentially modified by the
+ promise "await_transform()" method).
+ Op1 is a proxy for the temp / coro frame slot 'e' value.
+ Op2 is the initialiser for Op1 (Op0, potentially modified by any
+ applicable 'co_await' operator).
+ Op3 is a vector of the [0] e.ready, [1] e.suspend and [2] e.resume calls.
+ Op4 is a mode : 0 (await) 1 (yield) 2 (initial) 3 (final) */
+DEFTREECODE (CO_AWAIT_EXPR, "co_await", tcc_expression, 5)
+
+/* The co_yield expression is used to support coroutines.
+
+ Op0 is the original expr (for use in diagnostics)
+ Op2 is the co_await derived from this. */
+DEFTREECODE (CO_YIELD_EXPR, "co_yield", tcc_expression, 2)
+
+/* The co_return expression is used to support coroutines.
+
+ Op0 is the original expr, can be void (for use in diagnostics)
+ Op1 is the promise return_xxxx call for for the expression given. */
+
+DEFTREECODE (CO_RETURN_EXPR, "co_return", tcc_statement, 2)
+
+/* Different flavors of contracts.
+
+ Assertions and preconditions have two operands: a node containing
+ the their mode and condition. Postconditions have an additional
+ operand to store the optional name for the result value.
+
+ CONTRACT_SEMANTIC has the computed behavior of the contract. */
+DEFTREECODE (ASSERTION_STMT, "assertion_stmt", tcc_statement, 3)
+DEFTREECODE (PRECONDITION_STMT, "precondition_stmt", tcc_statement, 3)
+DEFTREECODE (POSTCONDITION_STMT, "postcondition_stmt", tcc_statement, 4)
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.h
new file mode 100644
index 0000000..8fee475
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cp-tree.h
@@ -0,0 +1,8844 @@
+/* Definitions for -*- C++ -*- parsing and type checking.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CP_TREE_H
+#define GCC_CP_TREE_H
+
+#include "tm.h"
+#include "hard-reg-set.h"
+#include "function.h"
+#include "tristate.h"
+#include "contracts.h"
+
+/* In order for the format checking to accept the C++ front end
+ diagnostic framework extensions, you must include this file before
+ diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
+ in c-common.h. */
+#undef GCC_DIAG_STYLE
+#define GCC_DIAG_STYLE __gcc_cxxdiag__
+#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
+#error \
+In order for the format checking to accept the C++ front end diagnostic \
+framework extensions, you must include this file before diagnostic-core.h and \
+c-common.h, not after.
+#endif
+#include "c-family/c-common.h"
+#include "diagnostic.h"
+
+/* A tree node, together with a location, so that we can track locations
+ (and ranges) during parsing.
+
+ The location is redundant for node kinds that have locations,
+ but not all node kinds do (e.g. constants, and references to
+ params, locals, etc), so we stash a copy here. */
+
+extern location_t cp_expr_location (const_tree);
+
+class cp_expr
+{
+public:
+ cp_expr () :
+ m_value (NULL), m_loc (UNKNOWN_LOCATION),
+ m_decimal (false)
+ {}
+
+ cp_expr (tree value) :
+ m_value (value), m_loc (cp_expr_location (m_value)),
+ m_decimal (false)
+ {}
+
+ cp_expr (tree value, location_t loc):
+ m_value (value), m_loc (loc), m_decimal (false)
+ {
+ protected_set_expr_location (value, loc);
+ }
+
+ cp_expr (tree value, location_t loc, bool decimal):
+ m_value (value), m_loc (loc), m_decimal (decimal)
+ {
+ protected_set_expr_location (value, loc);
+ }
+
+ /* Implicit conversions to tree. */
+ operator tree () const { return m_value; }
+ tree & operator* () { return m_value; }
+ tree operator* () const { return m_value; }
+ tree & operator-> () { return m_value; }
+ tree operator-> () const { return m_value; }
+
+ tree get_value () const { return m_value; }
+ location_t get_location () const { return m_loc; }
+ location_t get_start () const
+ {
+ source_range src_range = get_range_from_loc (line_table, m_loc);
+ return src_range.m_start;
+ }
+ location_t get_finish () const
+ {
+ source_range src_range = get_range_from_loc (line_table, m_loc);
+ return src_range.m_finish;
+ }
+
+ void set_location (location_t loc)
+ {
+ protected_set_expr_location (m_value, loc);
+ m_loc = loc;
+ }
+
+ void set_range (location_t start, location_t finish)
+ {
+ set_location (make_location (m_loc, start, finish));
+ }
+
+ cp_expr& maybe_add_location_wrapper ()
+ {
+ m_value = maybe_wrap_with_location (m_value, m_loc);
+ return *this;
+ }
+
+ bool decimal_p () const { return m_decimal; }
+
+ private:
+ tree m_value;
+ location_t m_loc;
+ bool m_decimal : 1;
+};
+
+inline bool
+operator == (const cp_expr &lhs, tree rhs)
+{
+ return lhs.get_value () == rhs;
+}
+
+
+enum cp_tree_index
+{
+ CPTI_WCHAR_DECL,
+ CPTI_VTABLE_ENTRY_TYPE,
+ CPTI_DELTA_TYPE,
+ CPTI_VTABLE_INDEX_TYPE,
+ CPTI_CLEANUP_TYPE,
+ CPTI_VTT_PARM_TYPE,
+
+ CPTI_CLASS_TYPE,
+ CPTI_UNKNOWN_TYPE,
+ CPTI_INIT_LIST_TYPE,
+ CPTI_EXPLICIT_VOID_LIST,
+ CPTI_VTBL_TYPE,
+ CPTI_VTBL_PTR_TYPE,
+ CPTI_GLOBAL,
+ CPTI_ABORT_FNDECL,
+ CPTI_AGGR_TAG,
+ CPTI_CONV_OP_MARKER,
+
+ CPTI_CTOR_IDENTIFIER,
+ CPTI_COMPLETE_CTOR_IDENTIFIER,
+ CPTI_BASE_CTOR_IDENTIFIER,
+ CPTI_DTOR_IDENTIFIER,
+ CPTI_COMPLETE_DTOR_IDENTIFIER,
+ CPTI_BASE_DTOR_IDENTIFIER,
+ CPTI_DELETING_DTOR_IDENTIFIER,
+ CPTI_CONV_OP_IDENTIFIER,
+ CPTI_DELTA_IDENTIFIER,
+ CPTI_IN_CHARGE_IDENTIFIER,
+ CPTI_VTT_PARM_IDENTIFIER,
+ CPTI_AS_BASE_IDENTIFIER,
+ CPTI_THIS_IDENTIFIER,
+ CPTI_PFN_IDENTIFIER,
+ CPTI_VPTR_IDENTIFIER,
+ CPTI_GLOBAL_IDENTIFIER,
+ CPTI_ANON_IDENTIFIER,
+ CPTI_AUTO_IDENTIFIER,
+ CPTI_DECLTYPE_AUTO_IDENTIFIER,
+ CPTI_INIT_LIST_IDENTIFIER,
+ CPTI_FOR_RANGE__IDENTIFIER,
+ CPTI_FOR_BEGIN__IDENTIFIER,
+ CPTI_FOR_END__IDENTIFIER,
+ CPTI_FOR_RANGE_IDENTIFIER,
+ CPTI_FOR_BEGIN_IDENTIFIER,
+ CPTI_FOR_END_IDENTIFIER,
+ CPTI_ABI_TAG_IDENTIFIER,
+ CPTI_ALIGNED_IDENTIFIER,
+ CPTI_BEGIN_IDENTIFIER,
+ CPTI_END_IDENTIFIER,
+ CPTI_GET_IDENTIFIER,
+ CPTI_GNU_IDENTIFIER,
+ CPTI_TUPLE_ELEMENT_IDENTIFIER,
+ CPTI_TUPLE_SIZE_IDENTIFIER,
+ CPTI_TYPE_IDENTIFIER,
+ CPTI_VALUE_IDENTIFIER,
+ CPTI_FUN_IDENTIFIER,
+ CPTI_CLOSURE_IDENTIFIER,
+ CPTI_HEAP_UNINIT_IDENTIFIER,
+ CPTI_HEAP_IDENTIFIER,
+ CPTI_HEAP_DELETED_IDENTIFIER,
+ CPTI_HEAP_VEC_UNINIT_IDENTIFIER,
+ CPTI_HEAP_VEC_IDENTIFIER,
+ CPTI_OMP_IDENTIFIER,
+
+ CPTI_LANG_NAME_C,
+ CPTI_LANG_NAME_CPLUSPLUS,
+
+ CPTI_EMPTY_EXCEPT_SPEC,
+ CPTI_NOEXCEPT_TRUE_SPEC,
+ CPTI_NOEXCEPT_FALSE_SPEC,
+ CPTI_NOEXCEPT_DEFERRED_SPEC,
+
+ CPTI_ANY_TARG,
+
+ CPTI_MODULE_HWM,
+ /* Nodes after here change during compilation, or should not be in
+ the module's global tree table. Such nodes must be locatable
+ via name lookup or type-construction, as those are the only
+ cross-TU matching capabilities remaining. */
+
+ /* We must find these via the global namespace. */
+ CPTI_STD,
+ CPTI_ABI,
+
+ /* These are created at init time, but the library/headers provide
+ definitions. */
+ CPTI_ALIGN_TYPE,
+ CPTI_TERMINATE_FN,
+ CPTI_CALL_UNEXPECTED_FN,
+
+ /* These are lazily inited. */
+ CPTI_CONST_TYPE_INFO_TYPE,
+ CPTI_GET_EXCEPTION_PTR_FN,
+ CPTI_BEGIN_CATCH_FN,
+ CPTI_END_CATCH_FN,
+ CPTI_ALLOCATE_EXCEPTION_FN,
+ CPTI_FREE_EXCEPTION_FN,
+ CPTI_THROW_FN,
+ CPTI_RETHROW_FN,
+ CPTI_ATEXIT_FN_PTR_TYPE,
+ CPTI_ATEXIT,
+ CPTI_DSO_HANDLE,
+ CPTI_DCAST,
+
+ CPTI_PSEUDO_CONTRACT_VIOLATION,
+
+ CPTI_MAX
+};
+
+extern GTY(()) tree cp_global_trees[CPTI_MAX];
+
+#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
+#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
+/* The type used to represent an offset by which to adjust the `this'
+ pointer in pointer-to-member types. */
+#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
+/* The type used to represent an index into the vtable. */
+#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
+
+#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
+#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
+#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
+#define explicit_void_list_node cp_global_trees[CPTI_EXPLICIT_VOID_LIST]
+#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
+#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
+#define std_node cp_global_trees[CPTI_STD]
+#define abi_node cp_global_trees[CPTI_ABI]
+#define global_namespace cp_global_trees[CPTI_GLOBAL]
+#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
+#define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER]
+#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
+#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
+/* std::align_val_t */
+#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE]
+#define pseudo_contract_violation_type cp_global_trees[CPTI_PSEUDO_CONTRACT_VIOLATION]
+
+/* We cache these tree nodes so as to call get_identifier less frequently.
+ For identifiers for functions, including special member functions such
+ as ctors and assignment operators, the nodes can be used (among other
+ things) to iterate over their overloads defined by/for a type. For
+ example:
+
+ tree ovlid = assign_op_identifier;
+ tree overloads = get_class_binding (type, ovlid);
+ for (ovl_iterator it (overloads); it; ++it) { ... }
+
+ iterates over the set of implicitly and explicitly defined overloads
+ of the assignment operator for type (including the copy and move
+ assignment operators, whether deleted or not). */
+
+/* The name of a constructor that takes an in-charge parameter to
+ decide whether or not to construct virtual base classes. */
+#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
+/* The name of a constructor that constructs virtual base classes. */
+#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
+/* The name of a constructor that does not construct virtual base classes. */
+#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
+/* The name of a destructor that takes an in-charge parameter to
+ decide whether or not to destroy virtual base classes and whether
+ or not to delete the object. */
+#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
+/* The name of a destructor that destroys virtual base classes. */
+#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
+/* The name of a destructor that does not destroy virtual base
+ classes. */
+#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
+/* The name of a destructor that destroys virtual base classes, and
+ then deletes the entire object. */
+#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
+
+/* The name used for conversion operators -- but note that actual
+ conversion functions use special identifiers outside the identifier
+ table. */
+#define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER]
+
+#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
+#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
+/* The name of the parameter that contains a pointer to the VTT to use
+ for this subobject constructor or destructor. */
+#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
+#define as_base_identifier cp_global_trees[CPTI_AS_BASE_IDENTIFIER]
+#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
+#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
+#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
+/* The name of the ::, std & anon namespaces. */
+#define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER]
+#define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER]
+/* auto and declspec(auto) identifiers. */
+#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER]
+#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER]
+#define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER]
+#define for_range__identifier cp_global_trees[CPTI_FOR_RANGE__IDENTIFIER]
+#define for_begin__identifier cp_global_trees[CPTI_FOR_BEGIN__IDENTIFIER]
+#define for_end__identifier cp_global_trees[CPTI_FOR_END__IDENTIFIER]
+#define for_range_identifier cp_global_trees[CPTI_FOR_RANGE_IDENTIFIER]
+#define for_begin_identifier cp_global_trees[CPTI_FOR_BEGIN_IDENTIFIER]
+#define for_end_identifier cp_global_trees[CPTI_FOR_END_IDENTIFIER]
+#define abi_tag_identifier cp_global_trees[CPTI_ABI_TAG_IDENTIFIER]
+#define aligned_identifier cp_global_trees[CPTI_ALIGNED_IDENTIFIER]
+#define begin_identifier cp_global_trees[CPTI_BEGIN_IDENTIFIER]
+#define end_identifier cp_global_trees[CPTI_END_IDENTIFIER]
+#define get__identifier cp_global_trees[CPTI_GET_IDENTIFIER]
+#define gnu_identifier cp_global_trees[CPTI_GNU_IDENTIFIER]
+#define tuple_element_identifier cp_global_trees[CPTI_TUPLE_ELEMENT_IDENTIFIER]
+#define tuple_size_identifier cp_global_trees[CPTI_TUPLE_SIZE_IDENTIFIER]
+#define type_identifier cp_global_trees[CPTI_TYPE_IDENTIFIER]
+#define value_identifier cp_global_trees[CPTI_VALUE_IDENTIFIER]
+#define fun_identifier cp_global_trees[CPTI_FUN_IDENTIFIER]
+#define closure_identifier cp_global_trees[CPTI_CLOSURE_IDENTIFIER]
+#define heap_uninit_identifier cp_global_trees[CPTI_HEAP_UNINIT_IDENTIFIER]
+#define heap_identifier cp_global_trees[CPTI_HEAP_IDENTIFIER]
+#define heap_deleted_identifier cp_global_trees[CPTI_HEAP_DELETED_IDENTIFIER]
+#define heap_vec_uninit_identifier cp_global_trees[CPTI_HEAP_VEC_UNINIT_IDENTIFIER]
+#define heap_vec_identifier cp_global_trees[CPTI_HEAP_VEC_IDENTIFIER]
+#define omp_identifier cp_global_trees[CPTI_OMP_IDENTIFIER]
+#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
+#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
+
+/* Exception specifiers used for throw(), noexcept(true),
+ noexcept(false) and deferred noexcept. We rely on these being
+ uncloned. */
+#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
+#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
+#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
+#define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC]
+
+/* Exception handling function declarations. */
+#define terminate_fn cp_global_trees[CPTI_TERMINATE_FN]
+#define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN]
+#define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN]
+#define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN]
+#define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN]
+#define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN]
+#define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN]
+#define throw_fn cp_global_trees[CPTI_THROW_FN]
+#define rethrow_fn cp_global_trees[CPTI_RETHROW_FN]
+
+/* The type of the function-pointer argument to "__cxa_atexit" (or
+ "std::atexit", if "__cxa_atexit" is not being used). */
+#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
+
+/* A pointer to `std::atexit'. */
+#define atexit_node cp_global_trees[CPTI_ATEXIT]
+
+/* A pointer to `__dso_handle'. */
+#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
+
+/* The declaration of the dynamic_cast runtime. */
+#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
+
+/* The type of a destructor. */
+#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
+
+/* The type of the vtt parameter passed to subobject constructors and
+ destructors. */
+#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
+
+/* A node which matches any template argument. */
+#define any_targ_node cp_global_trees[CPTI_ANY_TARG]
+
+/* Node to indicate default access. This must be distinct from the
+ access nodes in tree.h. */
+
+#define access_default_node null_node
+
+#include "name-lookup.h"
+
+/* Usage of TREE_LANG_FLAG_?:
+ 0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE)
+ NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
+ COND_EXPR_IS_VEC_DELETE (in COND_EXPR).
+ DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
+ CLEANUP_P (in TRY_BLOCK)
+ AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
+ PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
+ PAREN_STRING_LITERAL_P (in STRING_CST)
+ CP_DECL_THREAD_LOCAL_P (in VAR_DECL)
+ KOENIG_LOOKUP_P (in CALL_EXPR)
+ STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
+ EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
+ STMT_EXPR_NO_SCOPE (in STMT_EXPR)
+ BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
+ TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
+ OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE,
+ and OMP_TASKLOOP)
+ BASELINK_QUALIFIED_P (in BASELINK)
+ TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
+ TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
+ ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
+ ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
+ LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST)
+ PARENTHESIZED_LIST_P (in the TREE_LIST for a parameter-declaration-list)
+ CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
+ LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
+ DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
+ VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
+ DECL_OVERRIDE_P (in FUNCTION_DECL)
+ IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
+ TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
+ CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
+ PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
+ TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO)
+ SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
+ COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ)
+ WILDCARD_PACK_P (in WILDCARD_DECL)
+ BLOCK_OUTER_CURLY_BRACE_P (in BLOCK)
+ FOLD_EXPR_MODOP_P (*_FOLD_EXPR)
+ IF_STMT_CONSTEXPR_P (IF_STMT)
+ DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL)
+ SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT)
+ REINTERPRET_CAST_P (in NOP_EXPR)
+ ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR)
+ OVL_DEDUP_P (in OVERLOAD)
+ ATOMIC_CONSTR_MAP_INSTANTIATED_P (in ATOMIC_CONSTR)
+ contract_semantic (in ASSERTION_, PRECONDITION_, POSTCONDITION_STMT)
+ 1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE)
+ TI_PENDING_TEMPLATE_FLAG.
+ TEMPLATE_PARMS_FOR_INLINE.
+ DELETE_EXPR_USE_VEC (in DELETE_EXPR).
+ ICS_ELLIPSIS_FLAG (in _CONV)
+ DECL_INITIALIZED_P (in VAR_DECL)
+ TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
+ STMT_IS_FULL_EXPR_P (in _STMT)
+ TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
+ LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
+ DECL_FINAL_P (in FUNCTION_DECL)
+ QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
+ CONSTRUCTOR_IS_DEPENDENT (in CONSTRUCTOR)
+ TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO)
+ PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION)
+ OVL_USING_P (in OVERLOAD)
+ IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR)
+ BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P (in BASELINK)
+ BIND_EXPR_VEC_DTOR (in BIND_EXPR)
+ ATOMIC_CONSTR_EXPR_FROM_CONCEPT_P (in ATOMIC_CONSTR)
+ 2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE)
+ ICS_THIS_FLAG (in _CONV)
+ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
+ STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
+ TYPENAME_IS_RESOLVING_P (in TYPENAME_TYPE)
+ TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
+ FNDECL_USED_AUTO (in FUNCTION_DECL)
+ DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
+ REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF,
+ VIEW_CONVERT_EXPR, PAREN_EXPR)
+ AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
+ CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR)
+ OVL_HIDDEN_P (in OVERLOAD)
+ IF_STMT_CONSTEVAL_P (in IF_STMT)
+ SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT)
+ LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR)
+ IMPLICIT_CONV_EXPR_BRACED_INIT (in IMPLICIT_CONV_EXPR)
+ PACK_EXPANSION_AUTO_P (in *_PACK_EXPANSION)
+ contract_semantic (in ASSERTION_, PRECONDITION_, POSTCONDITION_STMT)
+ 3: IMPLICIT_RVALUE_P (in NON_LVALUE_EXPR or STATIC_CAST_EXPR)
+ ICS_BAD_FLAG (in _CONV)
+ FN_TRY_BLOCK_P (in TRY_BLOCK)
+ BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
+ CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
+ DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE)
+ CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR)
+ OVL_NESTED_P (in OVERLOAD)
+ DECL_MODULE_EXPORT_P (in _DECL)
+ PACK_EXPANSION_FORCE_EXTRA_ARGS_P (in *_PACK_EXPANSION)
+ LAMBDA_EXPR_STATIC_P (in LAMBDA_EXPR)
+ TARGET_EXPR_ELIDING_P (in TARGET_EXPR)
+ contract_semantic (in ASSERTION_, PRECONDITION_, POSTCONDITION_STMT)
+ 4: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
+ TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
+ CALL_EXPR, or FIELD_DECL).
+ DECL_TINFO_P (in VAR_DECL, TYPE_DECL)
+ FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
+ OVL_LOOKUP_P (in OVERLOAD)
+ LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, ENUMERAL_TYPE, NAMESPACE_DECL)
+ FNDECL_MANIFESTLY_CONST_EVALUATED (in FUNCTION_DECL)
+ 5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
+ FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
+ CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
+ CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR)
+ OVL_EXPORT_P (in OVERLOAD)
+ DECL_NTTP_OBJECT_P (in VAR_DECL)
+ 6: TYPE_MARKED_P (in _TYPE)
+ DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL)
+ RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
+ CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR)
+ CONSTRUCTOR_IS_DESIGNATED_INIT (in CONSTRUCTOR)
+
+ Usage of TYPE_LANG_FLAG_?:
+ 0: TYPE_DEPENDENT_P
+ 1: TYPE_HAS_USER_CONSTRUCTOR.
+ 2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE)
+ TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE)
+ 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
+ 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
+ ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
+ AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
+ 6: TYPE_DEPENDENT_P_VALID
+
+ Usage of DECL_LANG_FLAG_?:
+ 0: DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
+ DECL_LOCAL_DECL_P (in FUNCTION_DECL, VAR_DECL)
+ DECL_MUTABLE_P (in FIELD_DECL)
+ DECL_DEPENDENT_P (in USING_DECL)
+ LABEL_DECL_BREAK (in LABEL_DECL)
+ 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
+ DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
+ DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
+ USING_DECL_TYPENAME_P (in USING_DECL)
+ DECL_VLA_CAPTURE_P (in FIELD_DECL)
+ DECL_ARRAY_PARAMETER_P (in PARM_DECL)
+ LABEL_DECL_CONTINUE (in LABEL_DECL)
+ 2: DECL_THIS_EXTERN (in VAR_DECL, FUNCTION_DECL or PARM_DECL)
+ DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
+ DECL_CONSTRAINT_VAR_P (in a PARM_DECL)
+ TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
+ DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL)
+ USING_DECL_UNRELATED_P (in USING_DECL)
+ 3: DECL_IN_AGGR_P.
+ 4: DECL_C_BIT_FIELD (in a FIELD_DECL)
+ DECL_ANON_UNION_VAR_P (in a VAR_DECL)
+ DECL_SELF_REFERENCE_P (in a TYPE_DECL)
+ DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
+ DECL_UNINSTANIATED_TEMPLATE_FRIEND_P (in TEMPLATE_DECL)
+ 5: DECL_INTERFACE_KNOWN.
+ 6: DECL_THIS_STATIC (in VAR_DECL, FUNCTION_DECL or PARM_DECL)
+ DECL_FIELD_IS_BASE (in FIELD_DECL)
+ TYPE_DECL_ALIAS_P (in TYPE_DECL)
+ 7: DECL_THUNK_P (in a member FUNCTION_DECL)
+ DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
+ DECL_DECLARED_CONSTINIT_P (in VAR_DECL)
+ 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
+
+ Usage of language-independent fields in a language-dependent manner:
+
+ TYPE_ALIAS_SET
+ This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
+ forth as a substitute for the mark bits provided in `lang_type'.
+ At present, only the six low-order bits are used.
+
+ TYPE_LANG_SLOT_1
+ For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS.
+ For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE.
+ For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE,
+ RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO,
+
+ BINFO_VIRTUALS
+ For a binfo, this is a TREE_LIST. There is an entry for each
+ virtual function declared either in BINFO or its direct and
+ indirect primary bases.
+
+ The BV_DELTA of each node gives the amount by which to adjust the
+ `this' pointer when calling the function. If the method is an
+ overridden version of a base class method, then it is assumed
+ that, prior to adjustment, the this pointer points to an object
+ of the base class.
+
+ The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
+ index of the vcall offset for this entry.
+
+ The BV_FN is the declaration for the virtual function itself.
+
+ If BV_LOST_PRIMARY is set, it means that this entry is for a lost
+ primary virtual base and can be left null in the vtable.
+
+ BINFO_VTABLE
+ This is an expression with POINTER_TYPE that gives the value
+ to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
+ to extract the VAR_DECL for the complete vtable.
+
+ DECL_VINDEX
+ This field is NULL for a non-virtual function. For a virtual
+ function, it is eventually set to an INTEGER_CST indicating the
+ index in the vtable at which this function can be found. When
+ a virtual function is declared, but before it is known what
+ function is overridden, this field is the error_mark_node.
+
+ Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
+ the virtual function this one overrides, and whose TREE_CHAIN is
+ the old DECL_VINDEX. */
+
+/* Language-specific tree checkers. */
+
+#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
+ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
+
+#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
+ TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
+
+#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
+ (TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
+ || TREE_CODE (NODE) == FUNCTION_DECL)
+
+#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
+ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
+
+#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
+ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
+
+#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
+ TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
+
+#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
+ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
+
+#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
+
+/* Returns t iff the node can have a TEMPLATE_INFO field. */
+
+inline tree
+template_info_decl_check (const_tree t, const char* f, int l, const char* fn)
+{
+ switch (TREE_CODE (t))
+ {
+ case VAR_DECL:
+ case FUNCTION_DECL:
+ case FIELD_DECL:
+ case TYPE_DECL:
+ case CONCEPT_DECL:
+ case TEMPLATE_DECL:
+ return const_cast<tree>(t);
+ default:
+ break;
+ }
+ tree_check_failed (t, f, l, fn,
+ VAR_DECL, FUNCTION_DECL, FIELD_DECL, TYPE_DECL,
+ CONCEPT_DECL, TEMPLATE_DECL, 0);
+ gcc_unreachable ();
+}
+
+#define TEMPLATE_INFO_DECL_CHECK(NODE) \
+ template_info_decl_check ((NODE), __FILE__, __LINE__, __FUNCTION__)
+
+#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
+({ __typeof (NODE) const __t = (NODE); \
+ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
+ || !__t->decl_common.lang_specific->u.fn.thunk_p) \
+ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
+ __t; })
+
+#else /* ENABLE_TREE_CHECKING */
+
+#define TEMPLATE_INFO_DECL_CHECK(NODE) (NODE)
+#define THUNK_FUNCTION_CHECK(NODE) (NODE)
+
+#endif /* ENABLE_TREE_CHECKING */
+
+/* Language-dependent contents of an identifier. */
+
+struct GTY(()) lang_identifier {
+ struct c_common_identifier c_common;
+ cxx_binding *bindings;
+};
+
+/* Return a typed pointer version of T if it designates a
+ C++ front-end identifier. */
+inline lang_identifier*
+identifier_p (tree t)
+{
+ if (TREE_CODE (t) == IDENTIFIER_NODE)
+ return (lang_identifier*) t;
+ return NULL;
+}
+
+#define LANG_IDENTIFIER_CAST(NODE) \
+ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
+
+struct GTY(()) template_parm_index {
+ struct tree_common common;
+ int index;
+ int level;
+ int orig_level;
+ tree decl;
+};
+
+struct GTY(()) ptrmem_cst {
+ struct tree_common common;
+ tree member;
+ location_t locus;
+};
+typedef struct ptrmem_cst * ptrmem_cst_t;
+
+#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
+
+#define BIND_EXPR_TRY_BLOCK(NODE) \
+ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
+
+/* This BIND_EXPR is from build_vec_delete_1. */
+#define BIND_EXPR_VEC_DTOR(NODE) \
+ TREE_LANG_FLAG_1 (BIND_EXPR_CHECK (NODE))
+
+/* Used to mark the block around the member initializers and cleanups. */
+#define BIND_EXPR_BODY_BLOCK(NODE) \
+ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
+#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
+ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
+ || LAMBDA_FUNCTION_P (NODE))
+
+#define STATEMENT_LIST_NO_SCOPE(NODE) \
+ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
+#define STATEMENT_LIST_TRY_BLOCK(NODE) \
+ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
+
+/* Mark the outer curly brace BLOCK. */
+#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE))
+
+/* Nonzero if this statement should be considered a full-expression,
+ i.e., if temporaries created during this statement should have
+ their destructors run at the end of this statement. */
+#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
+
+/* Marks the result of a statement expression. */
+#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
+ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
+
+/* Nonzero if this statement-expression does not have an associated scope. */
+#define STMT_EXPR_NO_SCOPE(NODE) \
+ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
+
+#define COND_EXPR_IS_VEC_DELETE(NODE) \
+ TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE))
+
+/* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions
+ are not constexprs. Other NOP_EXPRs are. */
+#define REINTERPRET_CAST_P(NODE) \
+ TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE))
+
+/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
+ sense of `same'. */
+#define same_type_p(TYPE1, TYPE2) \
+ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
+
+/* Returns nonzero iff NODE is a declaration for the global function
+ `main'. */
+#define DECL_MAIN_ANY_P(NODE) \
+ (DECL_EXTERN_C_FUNCTION_P (NODE) \
+ && DECL_NAME (NODE) != NULL_TREE \
+ && MAIN_NAME_P (DECL_NAME (NODE)))
+
+/* Nonzero iff NODE is a declaration for `int main', or we are hosted. */
+#define DECL_MAIN_FREESTANDING_P(NODE) \
+ (DECL_MAIN_ANY_P(NODE) \
+ && (flag_hosted \
+ || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (NODE))) \
+ == integer_type_node))
+
+/* Nonzero iff NODE is a declaration for `main', and we are hosted. */
+#define DECL_MAIN_P(NODE) (DECL_MAIN_ANY_P(NODE) && flag_hosted)
+
+/* Lookup walker marking. */
+#define LOOKUP_SEEN_P(NODE) TREE_VISITED (NODE)
+#define LOOKUP_FOUND_P(NODE) \
+ TREE_LANG_FLAG_4 (TREE_CHECK4 (NODE,RECORD_TYPE,UNION_TYPE,ENUMERAL_TYPE,\
+ NAMESPACE_DECL))
+
+/* These two accessors should only be used by OVL manipulators.
+ Other users should use iterators and convenience functions. */
+#define OVL_FUNCTION(NODE) \
+ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
+#define OVL_CHAIN(NODE) \
+ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain)
+
+/* If set, this or a subsequent overload contains decls that need deduping. */
+#define OVL_DEDUP_P(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
+/* If set, this was imported in a using declaration. */
+#define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE))
+/* If set, this overload is a hidden decl. */
+#define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE))
+/* If set, this overload contains a nested overload. */
+#define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE))
+/* If set, this overload was constructed during lookup. */
+#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE))
+/* If set, this OVL_USING_P overload is exported. */
+#define OVL_EXPORT_P(NODE) TREE_LANG_FLAG_5 (OVERLOAD_CHECK (NODE))
+
+/* The first decl of an overload. */
+#define OVL_FIRST(NODE) ovl_first (NODE)
+/* The name of the overload set. */
+#define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE))
+
+/* Whether this is a set of overloaded functions. TEMPLATE_DECLS are
+ always wrapped in an OVERLOAD, so we don't need to check them
+ here. */
+#define OVL_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD)
+/* Whether this is a single member overload. */
+#define OVL_SINGLE_P(NODE) \
+ (TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE))
+
+/* OVL_HIDDEN_P nodes come before other nodes. */
+
+struct GTY(()) tree_overload {
+ struct tree_common common;
+ tree function;
+};
+
+/* Iterator for a 1 dimensional overload. Permits iterating over the
+ outer level of a 2-d overload when explicitly enabled. */
+
+class ovl_iterator {
+ tree ovl;
+ const bool allow_inner; /* Only used when checking. */
+
+ public:
+ explicit ovl_iterator (tree o, bool allow = false)
+ : ovl (o), allow_inner (allow)
+ {
+ }
+
+ public:
+ operator bool () const
+ {
+ return ovl;
+ }
+ ovl_iterator &operator++ ()
+ {
+ ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl);
+ return *this;
+ }
+ tree operator* () const
+ {
+ tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl);
+
+ /* Check this is not an unexpected 2-dimensional overload. */
+ gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD);
+
+ return fn;
+ }
+ bool operator== (const ovl_iterator &o) const
+ {
+ return ovl == o.ovl;
+ }
+ tree get_using () const
+ {
+ gcc_checking_assert (using_p ());
+ return ovl;
+ }
+
+ public:
+ /* Whether this overload was introduced by a using decl. */
+ bool using_p () const
+ {
+ return (TREE_CODE (ovl) == USING_DECL
+ || (TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl)));
+ }
+ /* Whether this using is being exported. */
+ bool exporting_p () const
+ {
+ return OVL_EXPORT_P (get_using ());
+ }
+
+ bool hidden_p () const
+ {
+ return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl);
+ }
+
+ public:
+ tree remove_node (tree head)
+ {
+ return remove_node (head, ovl);
+ }
+ tree reveal_node (tree head)
+ {
+ return reveal_node (head, ovl);
+ }
+
+ protected:
+ /* If we have a nested overload, point at the inner overload and
+ return the next link on the outer one. */
+ tree maybe_push ()
+ {
+ tree r = NULL_TREE;
+
+ if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl))
+ {
+ r = OVL_CHAIN (ovl);
+ ovl = OVL_FUNCTION (ovl);
+ }
+ return r;
+ }
+ /* Restore an outer nested overload. */
+ void pop (tree outer)
+ {
+ gcc_checking_assert (!ovl);
+ ovl = outer;
+ }
+
+ private:
+ /* We make these static functions to avoid the address of the
+ iterator escaping the local context. */
+ static tree remove_node (tree head, tree node);
+ static tree reveal_node (tree ovl, tree node);
+};
+
+/* Treat a tree as a range of ovl_iterator, e.g.
+ for (tree f : ovl_range (fns)) { ... } */
+
+class ovl_range
+{
+ tree t;
+ bool allow;
+public:
+ explicit ovl_range (tree t, bool allow = false): t(t), allow(allow) { }
+ ovl_iterator begin() { return ovl_iterator (t, allow); }
+ ovl_iterator end() { return ovl_iterator (NULL_TREE, allow); }
+};
+
+/* Iterator over a (potentially) 2 dimensional overload, which is
+ produced by name lookup. */
+
+class lkp_iterator : public ovl_iterator {
+ typedef ovl_iterator parent;
+
+ tree outer;
+
+ public:
+ explicit lkp_iterator (tree o)
+ : parent (o, true), outer (maybe_push ())
+ {
+ }
+
+ public:
+ lkp_iterator &operator++ ()
+ {
+ bool repush = !outer;
+
+ if (!parent::operator++ () && !repush)
+ {
+ pop (outer);
+ repush = true;
+ }
+
+ if (repush)
+ outer = maybe_push ();
+
+ return *this;
+ }
+};
+
+/* Treat a tree as a range of lkp_iterator, e.g.
+ for (tree f : lkp_range (fns)) { ... } */
+
+class lkp_range
+{
+ tree t;
+public:
+ lkp_range (tree t): t(t) { }
+ lkp_iterator begin() { return lkp_iterator (t); }
+ lkp_iterator end() { return lkp_iterator (NULL_TREE); }
+};
+
+/* hash traits for declarations. Hashes potential overload sets via
+ DECL_NAME. */
+
+struct named_decl_hash : ggc_remove <tree> {
+ typedef tree value_type; /* A DECL or OVERLOAD */
+ typedef tree compare_type; /* An identifier. */
+
+ inline static hashval_t hash (const value_type decl);
+ inline static bool equal (const value_type existing, compare_type candidate);
+
+ static const bool empty_zero_p = true;
+ static inline void mark_empty (value_type &p) {p = NULL_TREE;}
+ static inline bool is_empty (value_type p) {return !p;}
+
+ /* Nothing is deletable. Everything is insertable. */
+ static bool is_deleted (value_type) { return false; }
+ static void mark_deleted (value_type) { gcc_unreachable (); }
+};
+
+/* Simplified unique_ptr clone to release a tree vec on exit. */
+
+class releasing_vec
+{
+public:
+ typedef vec<tree, va_gc> vec_t;
+
+ releasing_vec (vec_t *v): v(v) { }
+ releasing_vec (): v(make_tree_vector ()) { }
+
+ /* Copy ops are deliberately declared but not defined,
+ copies must always be elided. */
+ releasing_vec (const releasing_vec &);
+ releasing_vec &operator= (const releasing_vec &);
+
+ vec_t &operator* () const { return *v; }
+ vec_t *operator-> () const { return v; }
+ vec_t *get() const { return v; }
+ operator vec_t *() const { return v; }
+ vec_t ** operator& () { return &v; }
+
+ /* Breaks pointer/value consistency for convenience. This takes ptrdiff_t
+ rather than unsigned to avoid ambiguity with the built-in operator[]
+ (bootstrap/91828). */
+ tree& operator[] (ptrdiff_t i) const { return (*v)[i]; }
+
+ tree *begin() { return ::begin (v); }
+ tree *end() { return ::end (v); }
+
+ void release () { release_tree_vector (v); v = NULL; }
+
+ ~releasing_vec () { release_tree_vector (v); }
+private:
+ vec_t *v;
+};
+/* Forwarding functions for vec_safe_* that might reallocate. */
+inline tree* vec_safe_push (releasing_vec& r, const tree &t CXX_MEM_STAT_INFO)
+{ return vec_safe_push (*&r, t PASS_MEM_STAT); }
+inline bool vec_safe_reserve (releasing_vec& r, unsigned n, bool e = false CXX_MEM_STAT_INFO)
+{ return vec_safe_reserve (*&r, n, e PASS_MEM_STAT); }
+inline unsigned vec_safe_length (releasing_vec &r)
+{ return r->length(); }
+inline void vec_safe_splice (releasing_vec &r, vec<tree, va_gc> *p CXX_MEM_STAT_INFO)
+{ vec_safe_splice (*&r, p PASS_MEM_STAT); }
+void release_tree_vector (releasing_vec &); // cause link error
+
+struct GTY(()) tree_template_decl {
+ struct tree_decl_common common;
+ tree arguments;
+ tree result;
+};
+
+/* Returns true iff NODE is a BASELINK. */
+#define BASELINK_P(NODE) \
+ (TREE_CODE (NODE) == BASELINK)
+/* The BINFO indicating the base in which lookup found the
+ BASELINK_FUNCTIONS. */
+#define BASELINK_BINFO(NODE) \
+ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
+/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
+ a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
+#define BASELINK_FUNCTIONS(NODE) \
+ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
+/* If T is a BASELINK, grab the functions, otherwise just T, which is
+ expected to already be a (list of) functions. */
+#define MAYBE_BASELINK_FUNCTIONS(T) \
+ (BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T)
+/* The BINFO in which the search for the functions indicated by this baselink
+ began. This base is used to determine the accessibility of functions
+ selected by overload resolution. */
+#define BASELINK_ACCESS_BINFO(NODE) \
+ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
+/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
+ to which the conversion should occur. This value is important if
+ the BASELINK_FUNCTIONS include a template conversion operator --
+ the BASELINK_OPTYPE can be used to determine what type the user
+ requested. */
+#define BASELINK_OPTYPE(NODE) \
+ (TREE_CHAIN (BASELINK_CHECK (NODE)))
+/* Nonzero if this baselink was from a qualified lookup. */
+#define BASELINK_QUALIFIED_P(NODE) \
+ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
+/* Nonzero if the overload set for this baselink might be incomplete due
+ to the lookup being performed from an incomplete-class context. */
+#define BASELINK_FUNCTIONS_MAYBE_INCOMPLETE_P(NODE) \
+ TREE_LANG_FLAG_1 (BASELINK_CHECK (NODE))
+
+struct GTY(()) tree_baselink {
+ struct tree_common common;
+ tree binfo;
+ tree functions;
+ tree access_binfo;
+};
+
+/* The different kinds of ids that we encounter. */
+
+enum cp_id_kind
+{
+ /* Not an id at all. */
+ CP_ID_KIND_NONE,
+ /* An unqualified-id that is not a template-id. */
+ CP_ID_KIND_UNQUALIFIED,
+ /* An unqualified-id that is a dependent name. */
+ CP_ID_KIND_UNQUALIFIED_DEPENDENT,
+ /* An unqualified template-id. */
+ CP_ID_KIND_TEMPLATE_ID,
+ /* A qualified-id. */
+ CP_ID_KIND_QUALIFIED
+};
+
+
+/* The various kinds of C++0x warnings we encounter. */
+
+enum cpp0x_warn_str
+{
+ /* extended initializer lists */
+ CPP0X_INITIALIZER_LISTS,
+ /* explicit conversion operators */
+ CPP0X_EXPLICIT_CONVERSION,
+ /* variadic templates */
+ CPP0X_VARIADIC_TEMPLATES,
+ /* lambda expressions */
+ CPP0X_LAMBDA_EXPR,
+ /* C++0x auto */
+ CPP0X_AUTO,
+ /* scoped enums */
+ CPP0X_SCOPED_ENUMS,
+ /* defaulted and deleted functions */
+ CPP0X_DEFAULTED_DELETED,
+ /* inline namespaces */
+ CPP0X_INLINE_NAMESPACES,
+ /* override controls, override/final */
+ CPP0X_OVERRIDE_CONTROLS,
+ /* non-static data member initializers */
+ CPP0X_NSDMI,
+ /* user defined literals */
+ CPP0X_USER_DEFINED_LITERALS,
+ /* delegating constructors */
+ CPP0X_DELEGATING_CTORS,
+ /* inheriting constructors */
+ CPP0X_INHERITING_CTORS,
+ /* C++11 attributes */
+ CPP0X_ATTRIBUTES,
+ /* ref-qualified member functions */
+ CPP0X_REF_QUALIFIER
+};
+
+/* The various kinds of operation used by composite_pointer_type. */
+
+enum composite_pointer_operation
+{
+ /* comparison */
+ CPO_COMPARISON,
+ /* conversion */
+ CPO_CONVERSION,
+ /* conditional expression */
+ CPO_CONDITIONAL_EXPR
+};
+
+/* Possible cases of expression list used by build_x_compound_expr_from_list. */
+enum expr_list_kind {
+ ELK_INIT, /* initializer */
+ ELK_MEM_INIT, /* member initializer */
+ ELK_FUNC_CAST /* functional cast */
+};
+
+/* Possible cases of implicit bad rhs conversions. */
+enum impl_conv_rhs {
+ ICR_DEFAULT_ARGUMENT, /* default argument */
+ ICR_CONVERTING, /* converting */
+ ICR_INIT, /* initialization */
+ ICR_ARGPASS, /* argument passing */
+ ICR_RETURN, /* return */
+ ICR_ASSIGN /* assignment */
+};
+
+/* Possible cases of implicit or explicit bad conversions to void. */
+enum impl_conv_void {
+ ICV_CAST, /* (explicit) conversion to void */
+ ICV_SECOND_OF_COND, /* second operand of conditional expression */
+ ICV_THIRD_OF_COND, /* third operand of conditional expression */
+ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
+ ICV_LEFT_OF_COMMA, /* left operand of comma operator */
+ ICV_STATEMENT, /* statement */
+ ICV_THIRD_IN_FOR /* for increment expression */
+};
+
+/* Possible invalid uses of an abstract class that might not have a
+ specific associated declaration. */
+enum GTY(()) abstract_class_use {
+ ACU_UNKNOWN, /* unknown or decl provided */
+ ACU_CAST, /* cast to abstract class */
+ ACU_NEW, /* new-expression of abstract class */
+ ACU_THROW, /* throw-expression of abstract class */
+ ACU_CATCH, /* catch-parameter of abstract class */
+ ACU_ARRAY, /* array of abstract class */
+ ACU_RETURN, /* return type of abstract class */
+ ACU_PARM /* parameter type of abstract class */
+};
+
+/* Macros for access to language-specific slots in an identifier. */
+
+/* Identifiers map directly to block or class-scope bindings.
+ Namespace-scope bindings are held in hash tables on the respective
+ namespaces. The identifier bindings are the innermost active
+ binding, from whence you can get the decl and/or implicit-typedef
+ of an elaborated type. When not bound to a local entity the
+ values are NULL. */
+#define IDENTIFIER_BINDING(NODE) \
+ (LANG_IDENTIFIER_CAST (NODE)->bindings)
+#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
+#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
+
+/* Kinds of identifiers. Values are carefully chosen. */
+enum cp_identifier_kind {
+ cik_normal = 0, /* Not a special identifier. */
+ cik_keyword = 1, /* A keyword. */
+ cik_ctor = 2, /* Constructor (in-chg, complete or base). */
+ cik_dtor = 3, /* Destructor (in-chg, deleting, complete or
+ base). */
+ cik_simple_op = 4, /* Non-assignment operator name. */
+ cik_assign_op = 5, /* An assignment operator name. */
+ cik_conv_op = 6, /* Conversion operator name. */
+ cik_reserved_for_udlit = 7, /* Not yet in use */
+ cik_max
+};
+
+/* Kind bits. */
+#define IDENTIFIER_KIND_BIT_0(NODE) \
+ TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE))
+#define IDENTIFIER_KIND_BIT_1(NODE) \
+ TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE))
+#define IDENTIFIER_KIND_BIT_2(NODE) \
+ TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE))
+
+/* Used by various search routines. */
+#define IDENTIFIER_MARKED(NODE) \
+ TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE))
+
+/* Nonzero if this identifier is used as a virtual function name somewhere
+ (optimizes searches). */
+#define IDENTIFIER_VIRTUAL_P(NODE) \
+ TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE))
+
+/* True if this identifier is a reserved word. C_RID_CODE (node) is
+ then the RID_* value of the keyword. Value 1. */
+#define IDENTIFIER_KEYWORD_P(NODE) \
+ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \
+ & (!IDENTIFIER_KIND_BIT_1 (NODE)) \
+ & IDENTIFIER_KIND_BIT_0 (NODE))
+
+/* True if this identifier is the name of a constructor or
+ destructor. Value 2 or 3. */
+#define IDENTIFIER_CDTOR_P(NODE) \
+ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \
+ & IDENTIFIER_KIND_BIT_1 (NODE))
+
+/* True if this identifier is the name of a constructor. Value 2. */
+#define IDENTIFIER_CTOR_P(NODE) \
+ (IDENTIFIER_CDTOR_P(NODE) \
+ & (!IDENTIFIER_KIND_BIT_0 (NODE)))
+
+/* True if this identifier is the name of a destructor. Value 3. */
+#define IDENTIFIER_DTOR_P(NODE) \
+ (IDENTIFIER_CDTOR_P(NODE) \
+ & IDENTIFIER_KIND_BIT_0 (NODE))
+
+/* True if this identifier is for any operator name (including
+ conversions). Value 4, 5, 6 or 7. */
+#define IDENTIFIER_ANY_OP_P(NODE) \
+ (IDENTIFIER_KIND_BIT_2 (NODE))
+
+/* True if this identifier is for an overloaded operator. Values 4, 5. */
+#define IDENTIFIER_OVL_OP_P(NODE) \
+ (IDENTIFIER_ANY_OP_P (NODE) \
+ & (!IDENTIFIER_KIND_BIT_1 (NODE)))
+
+/* True if this identifier is for any assignment. Values 5. */
+#define IDENTIFIER_ASSIGN_OP_P(NODE) \
+ (IDENTIFIER_OVL_OP_P (NODE) \
+ & IDENTIFIER_KIND_BIT_0 (NODE))
+
+/* True if this identifier is the name of a type-conversion
+ operator. Value 7. */
+#define IDENTIFIER_CONV_OP_P(NODE) \
+ (IDENTIFIER_ANY_OP_P (NODE) \
+ & IDENTIFIER_KIND_BIT_1 (NODE) \
+ & (!IDENTIFIER_KIND_BIT_0 (NODE)))
+
+/* True if this identifier is a new or delete operator. */
+#define IDENTIFIER_NEWDEL_OP_P(NODE) \
+ (IDENTIFIER_OVL_OP_P (NODE) \
+ && IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC)
+
+/* True if this identifier is a new operator. */
+#define IDENTIFIER_NEW_OP_P(NODE) \
+ (IDENTIFIER_OVL_OP_P (NODE) \
+ && (IDENTIFIER_OVL_OP_FLAGS (NODE) \
+ & (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC)
+
+/* Access a C++-specific index for identifier NODE.
+ Used to optimize operator mappings etc. */
+#define IDENTIFIER_CP_INDEX(NODE) \
+ (IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space)
+
+/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
+#define C_TYPE_FIELDS_READONLY(TYPE) \
+ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
+
+/* The tokens stored in the unparsed operand. */
+
+#define DEFPARSE_TOKENS(NODE) \
+ (((struct tree_deferred_parse *)DEFERRED_PARSE_CHECK (NODE))->tokens)
+#define DEFPARSE_INSTANTIATIONS(NODE) \
+ (((struct tree_deferred_parse *)DEFERRED_PARSE_CHECK (NODE))->instantiations)
+
+struct GTY (()) tree_deferred_parse {
+ struct tree_base base;
+ struct cp_token_cache *tokens;
+ vec<tree, va_gc> *instantiations;
+};
+
+
+#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
+ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
+#define DEFERRED_NOEXCEPT_ARGS(NODE) \
+ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
+#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
+ ((NODE) && (TREE_PURPOSE (NODE)) \
+ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
+#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
+ (DEFERRED_NOEXCEPT_SPEC_P (NODE) \
+ && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
+#define UNPARSED_NOEXCEPT_SPEC_P(NODE) \
+ ((NODE) && (TREE_PURPOSE (NODE)) \
+ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_PARSE))
+
+struct GTY (()) tree_deferred_noexcept {
+ struct tree_base base;
+ tree pattern;
+ tree args;
+};
+
+
+/* The condition associated with the static assertion. This must be
+ an integral constant expression. */
+#define STATIC_ASSERT_CONDITION(NODE) \
+ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
+
+/* The message associated with the static assertion. This must be a
+ string constant, which will be emitted as an error message when the
+ static assert condition is false. */
+#define STATIC_ASSERT_MESSAGE(NODE) \
+ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
+
+/* Source location information for a static assertion. */
+#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
+ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
+
+struct GTY (()) tree_static_assert {
+ struct tree_common common;
+ tree condition;
+ tree message;
+ location_t location;
+};
+
+struct GTY (()) tree_argument_pack_select {
+ struct tree_common common;
+ tree argument_pack;
+ int index;
+};
+
+/* The different kinds of traits that we encounter. */
+
+enum cp_trait_kind
+{
+#define DEFTRAIT(TCC, CODE, NAME, ARITY) \
+ CPTK_##CODE,
+#include "cp-trait.def"
+#undef DEFTRAIT
+};
+
+/* The types that we are processing. */
+#define TRAIT_EXPR_TYPE1(NODE) \
+ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
+
+#define TRAIT_EXPR_TYPE2(NODE) \
+ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
+
+/* The specific trait that we are processing. */
+#define TRAIT_EXPR_KIND(NODE) \
+ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
+
+#define TRAIT_EXPR_LOCATION(NODE) \
+ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->locus)
+
+struct GTY (()) tree_trait_expr {
+ struct tree_common common;
+ tree type1;
+ tree type2;
+ location_t locus;
+ enum cp_trait_kind kind;
+};
+
+/* An INTEGER_CST containing the kind of the trait type NODE. */
+#define TRAIT_TYPE_KIND_RAW(NODE) \
+ TYPE_VALUES_RAW (TRAIT_TYPE_CHECK (NODE))
+
+/* The kind of the trait type NODE. */
+#define TRAIT_TYPE_KIND(NODE) \
+ ((enum cp_trait_kind) TREE_INT_CST_LOW (TRAIT_TYPE_KIND_RAW (NODE)))
+
+/* The first argument of the trait type NODE. */
+#define TRAIT_TYPE_TYPE1(NODE) \
+ TYPE_MIN_VALUE_RAW (TRAIT_TYPE_CHECK (NODE))
+
+/* The rest of the arguments of the trait type NODE. */
+#define TRAIT_TYPE_TYPE2(NODE) \
+ TYPE_MAX_VALUE_RAW (TRAIT_TYPE_CHECK (NODE))
+
+/* Identifiers used for lambda types are almost anonymous. Use this
+ spare flag to distinguish them (they also have the anonymous flag). */
+#define IDENTIFIER_LAMBDA_P(NODE) \
+ (IDENTIFIER_NODE_CHECK(NODE)->base.protected_flag)
+
+/* Based off of TYPE_UNNAMED_P. */
+#define LAMBDA_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE \
+ && TYPE_LINKAGE_IDENTIFIER (NODE) \
+ && IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
+
+/* Test if FUNCTION_DECL is a lambda function. */
+#define LAMBDA_FUNCTION_P(FNDECL) \
+ (DECL_DECLARES_FUNCTION_P (FNDECL) \
+ && DECL_OVERLOADED_OPERATOR_P (FNDECL) \
+ && DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \
+ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
+
+enum cp_lambda_default_capture_mode_type {
+ CPLD_NONE,
+ CPLD_COPY,
+ CPLD_REFERENCE
+};
+
+/* The method of default capture, if any. */
+#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
+
+/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
+ * so that the name, type, and field are all together, whether or not it has
+ * been added to the lambda's class type.
+ TREE_LIST:
+ TREE_PURPOSE: The FIELD_DECL for this capture.
+ TREE_VALUE: The initializer. This is part of a GNU extension. */
+#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
+
+/* During parsing of the lambda-introducer, the node in the capture-list
+ that holds the 'this' capture. During parsing of the body, the
+ capture proxy for that node. */
+#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
+
+/* Predicate tracking whether `this' is in the effective capture set. */
+#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
+ LAMBDA_EXPR_THIS_CAPTURE(NODE)
+
+/* Predicate tracking whether the lambda was declared 'mutable'. */
+#define LAMBDA_EXPR_MUTABLE_P(NODE) \
+ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
+
+/* True iff uses of a const variable capture were optimized away. */
+#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \
+ TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE))
+
+/* Predicate tracking whether the lambda was declared 'static'. */
+#define LAMBDA_EXPR_STATIC_P(NODE) \
+ TREE_LANG_FLAG_3 (LAMBDA_EXPR_CHECK (NODE))
+
+/* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit
+ capture. */
+#define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \
+ TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* The source location of the lambda. */
+#define LAMBDA_EXPR_LOCATION(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
+
+/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
+ FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
+#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
+
+/* Lambdas in the same extra scope might need a discriminating count.
+ For ABI 17, we have single per-scope count, for ABI 18, we have
+ per-scope, per-signature numbering. */
+#define LAMBDA_EXPR_SCOPE_ONLY_DISCRIMINATOR(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator_scope)
+#define LAMBDA_EXPR_SCOPE_SIG_DISCRIMINATOR(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator_sig)
+
+/* During parsing of the lambda, a vector of capture proxies which need
+ to be pushed once we're done processing a nested lambda. */
+#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
+
+/* If NODE was regenerated via tsubst_lambda_expr, this is a TEMPLATE_INFO
+ whose TI_TEMPLATE is the immediate LAMBDA_EXPR from which NODE was
+ regenerated, and TI_ARGS is the full set of template arguments used
+ to regenerate NODE from the most general lambda. */
+#define LAMBDA_EXPR_REGEN_INFO(NODE) \
+ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->regen_info)
+
+/* The closure type of the lambda, which is also the type of the
+ LAMBDA_EXPR. */
+#define LAMBDA_EXPR_CLOSURE(NODE) \
+ (TREE_TYPE (LAMBDA_EXPR_CHECK (NODE)))
+
+struct GTY (()) tree_lambda_expr
+{
+ struct tree_typed typed;
+ tree capture_list;
+ tree this_capture;
+ tree extra_scope;
+ tree regen_info;
+ vec<tree, va_gc> *pending_proxies;
+ location_t locus;
+ enum cp_lambda_default_capture_mode_type default_capture_mode : 2;
+ unsigned discriminator_scope : 15; // Per-scope discriminator
+ unsigned discriminator_sig : 15; // Per-scope, per-signature discriminator
+};
+
+/* Non-zero if this template specialization has access violations that
+ should be rechecked when the function is instantiated outside argument
+ deduction. */
+#define TINFO_HAS_ACCESS_ERRORS(NODE) \
+ (TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
+#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
+ (TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
+
+/* Non-zero if this variable template specialization was specified using a
+ template-id, so it's a partial or full specialization and not a definition
+ of the member template of a particular class specialization. */
+#define TINFO_USED_TEMPLATE_ID(NODE) \
+ (TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)))
+
+/* The representation of a deferred access check. */
+
+struct GTY(()) deferred_access_check {
+ /* The base class in which the declaration is referenced. */
+ tree binfo;
+ /* The declaration whose access must be checked. */
+ tree decl;
+ /* The declaration that should be used in the error message. */
+ tree diag_decl;
+ /* The location of this access. */
+ location_t loc;
+};
+
+struct GTY(()) tree_template_info {
+ struct tree_base base;
+ tree tmpl;
+ tree args;
+ vec<deferred_access_check, va_gc> *deferred_access_checks;
+};
+
+// Constraint information for a C++ declaration. Constraint information is
+// comprised of:
+//
+// - a constraint expression introduced by the template header
+// - a constraint expression introduced by a function declarator
+// - the associated constraints, which are the conjunction of those,
+// and used for declaration matching
+//
+// The template and declarator requirements are kept to support pretty
+// printing constrained declarations.
+struct GTY(()) tree_constraint_info {
+ struct tree_base base;
+ tree template_reqs;
+ tree declarator_reqs;
+ tree associated_constr;
+};
+
+// Require that pointer P is non-null before returning.
+template<typename T>
+inline T*
+check_nonnull (T* p)
+{
+ gcc_assert (p);
+ return p;
+}
+
+/* Returns true iff T is non-null and represents constraint info. */
+inline tree_constraint_info *
+check_constraint_info (tree t)
+{
+ if (t && TREE_CODE (t) == CONSTRAINT_INFO)
+ return (tree_constraint_info *)t;
+ return NULL;
+}
+
+/* Access the expression describing the template constraints. This may be
+ null if no constraints were introduced in the template parameter list,
+ a requirements clause after the template parameter list, or constraints
+ through a constrained-type-specifier. */
+#define CI_TEMPLATE_REQS(NODE) \
+ check_constraint_info (check_nonnull (NODE))->template_reqs
+
+/* Access the expression describing the trailing constraints. This is non-null
+ for any implicit instantiation of a constrained declaration. For a
+ templated declaration it is non-null only when a trailing requires-clause
+ was specified. */
+#define CI_DECLARATOR_REQS(NODE) \
+ check_constraint_info (check_nonnull (NODE))->declarator_reqs
+
+/* The computed associated constraint expression for a declaration. */
+#define CI_ASSOCIATED_CONSTRAINTS(NODE) \
+ check_constraint_info (check_nonnull (NODE))->associated_constr
+
+/* Access the constraint-expression introduced by the requires-clause
+ associate the template parameter list NODE. */
+#define TEMPLATE_PARMS_CONSTRAINTS(NODE) \
+ TREE_TYPE (TREE_LIST_CHECK (NODE))
+
+/* Access the logical constraints on the template parameter declaration
+ indicated by NODE. */
+#define TEMPLATE_PARM_CONSTRAINTS(NODE) \
+ TREE_TYPE (TREE_LIST_CHECK (NODE))
+
+/* Non-zero if the noexcept is present in a compound requirement. */
+#define COMPOUND_REQ_NOEXCEPT_P(NODE) \
+ TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ))
+
+/* A TREE_LIST whose TREE_VALUE is the constraints on the 'auto' placeholder
+ type NODE, used in an argument deduction constraint. The TREE_PURPOSE
+ holds the set of template parameters that were in-scope when this 'auto'
+ was formed. */
+#define PLACEHOLDER_TYPE_CONSTRAINTS_INFO(NODE) \
+ DECL_SIZE_UNIT (TYPE_NAME (NODE))
+
+/* The constraints on the 'auto' placeholder type NODE. */
+#define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \
+ (PLACEHOLDER_TYPE_CONSTRAINTS_INFO (NODE) \
+ ? TREE_VALUE (PLACEHOLDER_TYPE_CONSTRAINTS_INFO (NODE)) \
+ : NULL_TREE)
+
+/* True if NODE is a constraint. */
+#define CONSTR_P(NODE) \
+ (TREE_CODE (NODE) == ATOMIC_CONSTR \
+ || TREE_CODE (NODE) == CONJ_CONSTR \
+ || TREE_CODE (NODE) == DISJ_CONSTR)
+
+/* Valid for any normalized constraint. */
+#define CONSTR_CHECK(NODE) \
+ TREE_CHECK3 (NODE, ATOMIC_CONSTR, CONJ_CONSTR, DISJ_CONSTR)
+
+/* The CONSTR_INFO stores normalization data for a constraint. It refers to
+ the original expression and the expression or declaration
+ from which the constraint was normalized.
+
+ This is TREE_LIST whose TREE_PURPOSE is the original expression and whose
+ TREE_VALUE is a list of contexts. */
+#define CONSTR_INFO(NODE) \
+ TREE_TYPE (CONSTR_CHECK (NODE))
+
+/* The expression evaluated by the constraint. */
+#define CONSTR_EXPR(NODE) \
+ TREE_PURPOSE (CONSTR_INFO (NODE))
+
+/* The expression or declaration from which this constraint was normalized.
+ This is a TREE_LIST whose TREE_VALUE is either a template-id expression
+ denoting a concept check or the declaration introducing the constraint.
+ These are chained to other context objects. */
+#define CONSTR_CONTEXT(NODE) \
+ TREE_VALUE (CONSTR_INFO (NODE))
+
+/* The parameter mapping for an atomic constraint. */
+#define ATOMIC_CONSTR_MAP(NODE) \
+ TREE_OPERAND (TREE_CHECK (NODE, ATOMIC_CONSTR), 0)
+
+/* Whether the parameter mapping of this atomic constraint
+ is already instantiated with concrete template arguments.
+ Used only in satisfy_atom and in the satisfaction cache. */
+#define ATOMIC_CONSTR_MAP_INSTANTIATED_P(NODE) \
+ TREE_LANG_FLAG_0 (ATOMIC_CONSTR_CHECK (NODE))
+
+/* Whether the expression for this atomic constraint belongs to a
+ concept definition. */
+#define ATOMIC_CONSTR_EXPR_FROM_CONCEPT_P(NODE) \
+ TREE_LANG_FLAG_1 (ATOMIC_CONSTR_CHECK (NODE))
+
+/* The expression of an atomic constraint. */
+#define ATOMIC_CONSTR_EXPR(NODE) \
+ CONSTR_EXPR (ATOMIC_CONSTR_CHECK (NODE))
+
+/* The concept of a concept check. */
+#define CHECK_CONSTR_CONCEPT(NODE) \
+ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0)
+
+/* The template arguments of a concept check. */
+#define CHECK_CONSTR_ARGS(NODE) \
+ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1)
+
+/* Whether a PARM_DECL represents a local parameter in a
+ requires-expression. */
+#define CONSTRAINT_VAR_P(NODE) \
+ DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL))
+
+/* The concept constraining this constrained template-parameter. */
+#define CONSTRAINED_PARM_CONCEPT(NODE) \
+ DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE))
+/* Any extra template arguments specified for a constrained
+ template-parameter. */
+#define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \
+ DECL_SIZE (TYPE_DECL_CHECK (NODE))
+/* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a
+ prototype for the constrained parameter in finish_shorthand_constraint,
+ attached for convenience. */
+#define CONSTRAINED_PARM_PROTOTYPE(NODE) \
+ DECL_INITIAL (TYPE_DECL_CHECK (NODE))
+
+/* Module flags on FUNCTION,VAR,TYPE,CONCEPT or NAMESPACE
+ A TEMPLATE_DECL holds them on the DECL_TEMPLATE_RESULT object --
+ it's just not practical to keep them consistent. */
+#define DECL_MODULE_CHECK(NODE) \
+ TREE_NOT_CHECK (NODE, TEMPLATE_DECL)
+
+/* In the purview of a named module (or in the purview of the
+ header-unit being compiled). */
+#define DECL_MODULE_PURVIEW_P(N) \
+ (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (N))->u.base.module_purview_p)
+
+/* Attached to the named module it is in the purview of. Decls
+ attached to the global module will have this false. */
+#define DECL_MODULE_ATTACH_P(N) \
+ (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (N))->u.base.module_attach_p)
+
+/* True if the live version of the decl was imported. */
+#define DECL_MODULE_IMPORT_P(NODE) \
+ (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (NODE))->u.base.module_import_p)
+
+/* True if this decl is in the entity hash & array. This means that
+ some variant was imported, even if DECL_MODULE_IMPORT_P is false. */
+#define DECL_MODULE_ENTITY_P(NODE) \
+ (DECL_LANG_SPECIFIC (DECL_MODULE_CHECK (NODE))->u.base.module_entity_p)
+
+/* DECL that has attached decls for ODR-relatedness. */
+#define DECL_MODULE_KEYED_DECLS_P(NODE) \
+ (DECL_LANG_SPECIFIC (TREE_CHECK2(NODE,FUNCTION_DECL,VAR_DECL))\
+ ->u.base.module_keyed_decls_p)
+
+/* Whether this is an exported DECL. Held on any decl that can appear
+ at namespace scope (function, var, type, template, const or
+ namespace). templates copy from their template_result, consts have
+ it for unscoped enums. */
+#define DECL_MODULE_EXPORT_P(NODE) TREE_LANG_FLAG_3 (NODE)
+
+
+/* The list of local parameters introduced by this requires-expression,
+ in the form of a chain of PARM_DECLs. */
+#define REQUIRES_EXPR_PARMS(NODE) \
+ TREE_OPERAND (TREE_CHECK (NODE, REQUIRES_EXPR), 0)
+
+/* A TREE_LIST of the requirements for this requires-expression.
+ The requirements are stored in lexical order within the TREE_VALUE
+ of each TREE_LIST node. The TREE_PURPOSE of each node is unused. */
+#define REQUIRES_EXPR_REQS(NODE) \
+ TREE_OPERAND (TREE_CHECK (NODE, REQUIRES_EXPR), 1)
+
+/* Like PACK_EXPANSION_EXTRA_ARGS, for requires-expressions. */
+#define REQUIRES_EXPR_EXTRA_ARGS(NODE) \
+ TREE_OPERAND (TREE_CHECK (NODE, REQUIRES_EXPR), 2)
+
+enum cp_tree_node_structure_enum {
+ TS_CP_GENERIC,
+ TS_CP_IDENTIFIER,
+ TS_CP_TPI,
+ TS_CP_PTRMEM,
+ TS_CP_OVERLOAD,
+ TS_CP_BINDING_VECTOR,
+ TS_CP_BASELINK,
+ TS_CP_TEMPLATE_DECL,
+ TS_CP_DEFERRED_PARSE,
+ TS_CP_DEFERRED_NOEXCEPT,
+ TS_CP_STATIC_ASSERT,
+ TS_CP_ARGUMENT_PACK_SELECT,
+ TS_CP_TRAIT_EXPR,
+ TS_CP_LAMBDA_EXPR,
+ TS_CP_TEMPLATE_INFO,
+ TS_CP_CONSTRAINT_INFO,
+ TS_CP_USERDEF_LITERAL
+};
+
+/* The resulting tree type. */
+union GTY((desc ("cp_tree_node_structure (&%h)"),
+ chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
+ union tree_node GTY ((tag ("TS_CP_GENERIC"),
+ desc ("tree_node_structure (&%h)"))) generic;
+ struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi;
+ struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
+ struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
+ struct tree_binding_vec GTY ((tag ("TS_CP_BINDING_VECTOR"))) binding_vec;
+ struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
+ struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl;
+ struct tree_deferred_parse GTY ((tag ("TS_CP_DEFERRED_PARSE"))) deferred_parse;
+ struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
+ struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
+ struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
+ static_assertion;
+ struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
+ argument_pack_select;
+ struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
+ trait_expression;
+ struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
+ lambda_expression;
+ struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
+ template_info;
+ struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO")))
+ constraint_info;
+ struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
+ userdef_literal;
+};
+
+
+struct GTY(()) cp_omp_declare_target_attr {
+ bool attr_syntax;
+ int device_type;
+};
+
+struct GTY(()) cp_omp_begin_assumes_data {
+ bool attr_syntax;
+};
+
+/* Global state. */
+
+struct GTY(()) saved_scope {
+ vec<cxx_saved_binding, va_gc> *old_bindings;
+ tree old_namespace;
+ vec<tree, va_gc> *decl_ns_list;
+ tree class_name;
+ tree class_type;
+ tree access_specifier;
+ tree function_decl;
+ vec<tree, va_gc> *lang_base;
+ tree lang_name;
+ tree template_parms;
+ cp_binding_level *x_previous_class_level;
+ tree x_saved_tree;
+
+ /* Only used for uses of this in trailing return type. */
+ tree x_current_class_ptr;
+ tree x_current_class_ref;
+
+ int x_processing_template_decl;
+ int x_processing_specialization;
+ int x_processing_constraint;
+ int x_processing_contract_condition;
+ int suppress_location_wrappers;
+ BOOL_BITFIELD x_processing_explicit_instantiation : 1;
+ BOOL_BITFIELD need_pop_function_context : 1;
+
+ /* Nonzero if we are parsing the discarded statement of a constexpr
+ if-statement. */
+ BOOL_BITFIELD discarded_stmt : 1;
+ /* Nonzero if we are parsing or instantiating the compound-statement
+ of consteval if statement. Also set while processing an immediate
+ invocation. */
+ BOOL_BITFIELD consteval_if_p : 1;
+
+ int unevaluated_operand;
+ int inhibit_evaluation_warnings;
+ int noexcept_operand;
+ int ref_temp_count;
+
+ struct stmt_tree_s x_stmt_tree;
+
+ cp_binding_level *class_bindings;
+ cp_binding_level *bindings;
+
+ hash_map<tree, tree> *GTY((skip)) x_local_specializations;
+ vec<cp_omp_declare_target_attr, va_gc> *omp_declare_target_attribute;
+ vec<cp_omp_begin_assumes_data, va_gc> *omp_begin_assumes;
+
+ struct saved_scope *prev;
+};
+
+extern GTY(()) struct saved_scope *scope_chain;
+
+/* The current open namespace. */
+
+#define current_namespace scope_chain->old_namespace
+
+/* The stack for namespaces of current declarations. */
+
+#define decl_namespace_list scope_chain->decl_ns_list
+
+/* IDENTIFIER_NODE: name of current class */
+
+#define current_class_name scope_chain->class_name
+
+/* _TYPE: the type of the current class */
+
+#define current_class_type scope_chain->class_type
+
+/* When parsing a class definition, the access specifier most recently
+ given by the user, or, if no access specifier was given, the
+ default value appropriate for the kind of class (i.e., struct,
+ class, or union). */
+
+#define current_access_specifier scope_chain->access_specifier
+
+/* Pointer to the top of the language name stack. */
+
+#define current_lang_base scope_chain->lang_base
+#define current_lang_name scope_chain->lang_name
+
+/* When parsing a template declaration, a TREE_LIST represents the
+ active template parameters. Each node in the list represents one
+ level of template parameters. The innermost level is first in the
+ list. The depth of each level is stored as an INTEGER_CST in the
+ TREE_PURPOSE of each node. The parameters for that level are
+ stored in the TREE_VALUE. */
+
+#define current_template_parms scope_chain->template_parms
+#define current_template_depth \
+ (current_template_parms ? TMPL_PARMS_DEPTH (current_template_parms) : 0)
+
+#define processing_template_decl scope_chain->x_processing_template_decl
+#define processing_specialization scope_chain->x_processing_specialization
+#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
+
+/* Nonzero if we are parsing the conditional expression of a contract
+ condition. These expressions appear outside the paramter list (like a
+ trailing return type), but are potentially evaluated. */
+
+#define processing_contract_condition scope_chain->x_processing_contract_condition
+
+#define in_discarded_stmt scope_chain->discarded_stmt
+#define in_consteval_if_p scope_chain->consteval_if_p
+
+#define current_ref_temp_count scope_chain->ref_temp_count
+
+/* RAII sentinel to handle clearing processing_template_decl and restoring
+ it when done. */
+
+class processing_template_decl_sentinel
+{
+public:
+ int saved;
+ processing_template_decl_sentinel (bool reset = true)
+ : saved (processing_template_decl)
+ {
+ if (reset)
+ processing_template_decl = 0;
+ }
+ ~processing_template_decl_sentinel()
+ {
+ processing_template_decl = saved;
+ }
+};
+
+/* RAII sentinel to disable certain warnings during template substitution
+ and elsewhere. */
+
+class warning_sentinel
+{
+public:
+ int &flag;
+ int val;
+ warning_sentinel(int& flag, bool suppress=true)
+ : flag(flag), val(flag) { if (suppress) flag = 0; }
+ ~warning_sentinel() { flag = val; }
+};
+
+/* RAII sentinel to temporarily override input_location. This will not set
+ input_location to UNKNOWN_LOCATION or BUILTINS_LOCATION. */
+
+class iloc_sentinel
+{
+ location_t saved_loc;
+public:
+ iloc_sentinel (location_t loc): saved_loc (input_location)
+ {
+ if (loc >= RESERVED_LOCATION_COUNT)
+ input_location = loc;
+ }
+ ~iloc_sentinel ()
+ {
+ input_location = saved_loc;
+ }
+};
+
+/* RAII sentinel that saves the value of a variable, optionally
+ overrides it right away, and restores its value when the sentinel
+ id destructed. */
+
+template <typename T>
+class temp_override
+{
+ T& overridden_variable;
+ T saved_value;
+public:
+ temp_override(T& var) : overridden_variable (var), saved_value (var) {}
+ temp_override(T& var, T overrider)
+ : overridden_variable (var), saved_value (var)
+ {
+ overridden_variable = overrider;
+ }
+ ~temp_override() { overridden_variable = saved_value; }
+};
+
+/* Wrapping a template parameter in type_identity_t hides it from template
+ argument deduction. */
+#if __cpp_lib_type_identity
+using std::type_identity_t;
+#else
+template <typename T>
+struct type_identity { typedef T type; };
+template <typename T>
+using type_identity_t = typename type_identity<T>::type;
+#endif
+
+/* Object generator function for temp_override, so you don't need to write the
+ type of the object as a template argument.
+
+ Use as auto x = make_temp_override (flag); */
+
+template <typename T>
+inline temp_override<T>
+make_temp_override (T& var)
+{
+ return { var };
+}
+
+/* Likewise, but use as auto x = make_temp_override (flag, value); */
+
+template <typename T>
+inline temp_override<T>
+make_temp_override (T& var, type_identity_t<T> overrider)
+{
+ return { var, overrider };
+}
+
+/* temp_override for in_consteval_if_p, which can't use make_temp_override
+ because it is a bitfield. */
+
+struct in_consteval_if_p_temp_override {
+ bool save_in_consteval_if_p;
+ in_consteval_if_p_temp_override ()
+ : save_in_consteval_if_p (in_consteval_if_p) {}
+ void reset () { in_consteval_if_p = save_in_consteval_if_p; }
+ ~in_consteval_if_p_temp_override ()
+ { reset (); }
+};
+
+/* The cached class binding level, from the most recently exited
+ class, or NULL if none. */
+
+#define previous_class_level scope_chain->x_previous_class_level
+
+/* A map from local variable declarations in the body of the template
+ presently being instantiated to the corresponding instantiated
+ local variables. */
+
+#define local_specializations scope_chain->x_local_specializations
+
+/* Nonzero if we are parsing the operand of a noexcept operator. */
+
+#define cp_noexcept_operand scope_chain->noexcept_operand
+
+struct named_label_entry; /* Defined in decl.cc. */
+
+struct named_label_hash : ggc_remove <named_label_entry *>
+{
+ typedef named_label_entry *value_type;
+ typedef tree compare_type; /* An identifier. */
+
+ inline static hashval_t hash (value_type);
+ inline static bool equal (const value_type, compare_type);
+
+ static const bool empty_zero_p = true;
+ inline static void mark_empty (value_type &p) {p = NULL;}
+ inline static bool is_empty (value_type p) {return !p;}
+
+ /* Nothing is deletable. Everything is insertable. */
+ inline static bool is_deleted (value_type) { return false; }
+ inline static void mark_deleted (value_type) { gcc_unreachable (); }
+};
+
+/* Global state pertinent to the current function. */
+
+struct GTY(()) language_function {
+ struct c_language_function base;
+
+ tree x_current_class_ptr;
+ tree x_current_class_ref;
+ tree x_eh_spec_block;
+ tree x_in_charge_parm;
+ tree x_vtt_parm;
+ tree x_return_value;
+
+ BOOL_BITFIELD returns_value : 1;
+ BOOL_BITFIELD returns_null : 1;
+ BOOL_BITFIELD returns_abnormally : 1;
+ BOOL_BITFIELD infinite_loop: 1;
+ BOOL_BITFIELD x_in_function_try_handler : 1;
+ BOOL_BITFIELD x_in_base_initializer : 1;
+
+ /* True if this function can throw an exception. */
+ BOOL_BITFIELD can_throw : 1;
+
+ BOOL_BITFIELD invalid_constexpr : 1;
+ BOOL_BITFIELD throwing_cleanup : 1;
+ BOOL_BITFIELD backward_goto : 1;
+
+ hash_table<named_label_hash> *x_named_labels;
+
+ cp_binding_level *bindings;
+
+ /* Tracking possibly infinite loops. This is a vec<tree> only because
+ vec<bool> doesn't work with gtype. */
+ vec<tree, va_gc> *infinite_loops;
+};
+
+/* The current C++-specific per-function global variables. */
+
+#define cp_function_chain (cfun->language)
+
+/* When we're processing a member function, current_class_ptr is the
+ PARM_DECL for the `this' pointer. The current_class_ref is an
+ expression for `*this'. */
+
+#define current_class_ptr \
+ (*(cfun && cp_function_chain \
+ ? &cp_function_chain->x_current_class_ptr \
+ : &scope_chain->x_current_class_ptr))
+#define current_class_ref \
+ (*(cfun && cp_function_chain \
+ ? &cp_function_chain->x_current_class_ref \
+ : &scope_chain->x_current_class_ref))
+
+/* The EH_SPEC_BLOCK for the exception-specifiers for the current
+ function, if any. */
+
+#define current_eh_spec_block cp_function_chain->x_eh_spec_block
+
+/* The `__in_chrg' parameter for the current function. Only used for
+ constructors and destructors. */
+
+#define current_in_charge_parm cp_function_chain->x_in_charge_parm
+
+/* The `__vtt_parm' parameter for the current function. Only used for
+ constructors and destructors. */
+
+#define current_vtt_parm cp_function_chain->x_vtt_parm
+
+/* A boolean flag to control whether we need to clean up the return value if a
+ local destructor throws. Only used in functions that return by value a
+ class with a destructor. Which 'tors don't, so we can use the same
+ field as current_vtt_parm. */
+
+#define current_retval_sentinel current_vtt_parm
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+#define current_function_returns_value cp_function_chain->returns_value
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+#define current_function_returns_null cp_function_chain->returns_null
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a call to a noreturn function is seen. */
+
+#define current_function_returns_abnormally \
+ cp_function_chain->returns_abnormally
+
+/* Set to 0 at beginning of a function definition, set to 1 if we see an
+ obvious infinite loop. This can have false positives and false
+ negatives, so it should only be used as a heuristic. */
+
+#define current_function_infinite_loop cp_function_chain->infinite_loop
+
+/* Nonzero if we are processing a base initializer. Zero elsewhere. */
+#define in_base_initializer cp_function_chain->x_in_base_initializer
+
+#define in_function_try_handler cp_function_chain->x_in_function_try_handler
+
+/* Expression always returned from function, or error_mark_node
+ otherwise, for use by the automatic named return value optimization. */
+
+#define current_function_return_value \
+ (cp_function_chain->x_return_value)
+
+/* In parser.cc. */
+extern tree cp_literal_operator_id (const char *);
+
+#define NON_ERROR(NODE) ((NODE) == error_mark_node ? NULL_TREE : (NODE))
+
+/* TRUE if a tree code represents a statement. */
+extern bool statement_code_p[MAX_TREE_CODES];
+
+#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
+
+enum languages { lang_c, lang_cplusplus };
+
+/* Macros to make error reporting functions' lives easier. */
+#define TYPE_LINKAGE_IDENTIFIER(NODE) \
+ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
+#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
+#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
+
+/* Any kind of anonymous type. */
+#define TYPE_ANON_P(NODE) \
+ (TYPE_LINKAGE_IDENTIFIER (NODE) \
+ && IDENTIFIER_ANON_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
+
+/* Nonzero if NODE, a TYPE, has no name for linkage purposes. */
+#define TYPE_UNNAMED_P(NODE) \
+ (TYPE_ANON_P (NODE) \
+ && !IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
+
+/* The _DECL for this _TYPE. */
+#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
+
+/* Nonzero if T is a type that could resolve to any kind of concrete type
+ at instantiation time. */
+#define WILDCARD_TYPE_P(T) \
+ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \
+ || TREE_CODE (T) == TYPENAME_TYPE \
+ || TREE_CODE (T) == TYPEOF_TYPE \
+ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
+ || TREE_CODE (T) == DECLTYPE_TYPE \
+ || TREE_CODE (T) == TRAIT_TYPE \
+ || TREE_CODE (T) == DEPENDENT_OPERATOR_TYPE)
+
+/* Nonzero if T is a class (or struct or union) type. Also nonzero
+ for template type parameters, typename types, and instantiated
+ template template parameters. Keep these checks in ascending code
+ order. */
+#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
+
+/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
+ union type. */
+#define SET_CLASS_TYPE_P(T, VAL) \
+ (TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL))
+
+/* Nonzero if T is a class type. Zero for template type parameters,
+ typename types, and so forth. */
+#define CLASS_TYPE_P(T) \
+ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
+
+/* Nonzero if T is a class type but not a union. */
+#define NON_UNION_CLASS_TYPE_P(T) \
+ (TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T))
+
+/* Keep these checks in ascending code order. */
+#define RECORD_OR_UNION_CODE_P(T) \
+ ((T) == RECORD_TYPE || (T) == UNION_TYPE)
+#define OVERLOAD_TYPE_P(T) \
+ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
+
+/* True if this type is dependent. This predicate is only valid if
+ TYPE_DEPENDENT_P_VALID is true. */
+#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
+
+/* True if dependent_type_p has been called for this type, with the
+ result that TYPE_DEPENDENT_P is valid. */
+#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
+
+/* Nonzero if this type is const-qualified. */
+#define CP_TYPE_CONST_P(NODE) \
+ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
+
+/* Nonzero if this type is volatile-qualified. */
+#define CP_TYPE_VOLATILE_P(NODE) \
+ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
+
+/* Nonzero if this type is restrict-qualified. */
+#define CP_TYPE_RESTRICT_P(NODE) \
+ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
+
+/* Nonzero if this type is const-qualified, but not
+ volatile-qualified. Other qualifiers are ignored. This macro is
+ used to test whether or not it is OK to bind an rvalue to a
+ reference. */
+#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
+ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
+ == TYPE_QUAL_CONST)
+
+#define FUNCTION_ARG_CHAIN(NODE) \
+ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
+
+/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
+ which refers to a user-written parameter. */
+#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
+ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
+
+/* Similarly, but for DECL_ARGUMENTS. */
+#define FUNCTION_FIRST_USER_PARM(NODE) \
+ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
+
+/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
+ ambiguity issues. */
+#define DERIVED_FROM_P(PARENT, TYPE) \
+ (lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
+
+/* Gives the visibility specification for a class type. */
+#define CLASSTYPE_VISIBILITY(TYPE) \
+ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
+#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
+ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
+
+struct GTY (()) tree_pair_s {
+ tree purpose;
+ tree value;
+};
+typedef tree_pair_s *tree_pair_p;
+
+/* This structure provides additional information above and beyond
+ what is provide in the ordinary tree_type. In the past, we used it
+ for the types of class types, template parameters types, typename
+ types, and so forth. However, there can be many (tens to hundreds
+ of thousands) of template parameter types in a compilation, and
+ there's no need for this additional information in that case.
+ Therefore, we now use this data structure only for class types.
+
+ In the past, it was thought that there would be relatively few
+ class types. However, in the presence of heavy use of templates,
+ many (i.e., thousands) of classes can easily be generated.
+ Therefore, we should endeavor to keep the size of this structure to
+ a minimum. */
+struct GTY(()) lang_type {
+ unsigned char align;
+
+ unsigned has_type_conversion : 1;
+ unsigned has_copy_ctor : 1;
+ unsigned has_default_ctor : 1;
+ unsigned const_needs_init : 1;
+ unsigned ref_needs_init : 1;
+ unsigned has_const_copy_assign : 1;
+ unsigned use_template : 2;
+
+ unsigned has_mutable : 1;
+ unsigned com_interface : 1;
+ unsigned non_pod_class : 1;
+ unsigned nearly_empty_p : 1;
+ unsigned user_align : 1;
+ unsigned has_copy_assign : 1;
+ unsigned has_new : 1;
+ unsigned has_array_new : 1;
+
+ unsigned gets_delete : 2;
+ unsigned interface_only : 1;
+ unsigned interface_unknown : 1;
+ unsigned contains_empty_class_p : 1;
+ unsigned anon_aggr : 1;
+ unsigned non_zero_init : 1;
+ unsigned empty_p : 1;
+ /* 32 bits allocated. */
+
+ unsigned vec_new_uses_cookie : 1;
+ unsigned declared_class : 1;
+ unsigned diamond_shaped : 1;
+ unsigned repeated_base : 1;
+ unsigned being_defined : 1;
+ unsigned debug_requested : 1;
+ unsigned fields_readonly : 1;
+ unsigned ptrmemfunc_flag : 1;
+
+ unsigned lazy_default_ctor : 1;
+ unsigned lazy_copy_ctor : 1;
+ unsigned lazy_copy_assign : 1;
+ unsigned lazy_destructor : 1;
+ unsigned has_const_copy_ctor : 1;
+ unsigned has_complex_copy_ctor : 1;
+ unsigned has_complex_copy_assign : 1;
+ unsigned non_aggregate : 1;
+
+ unsigned has_complex_dflt : 1;
+ unsigned has_list_ctor : 1;
+ unsigned non_std_layout : 1;
+ unsigned is_literal : 1;
+ unsigned lazy_move_ctor : 1;
+ unsigned lazy_move_assign : 1;
+ unsigned has_complex_move_ctor : 1;
+ unsigned has_complex_move_assign : 1;
+
+ unsigned has_constexpr_ctor : 1;
+ unsigned unique_obj_representations : 1;
+ unsigned unique_obj_representations_set : 1;
+ bool erroneous : 1;
+ bool non_pod_aggregate : 1;
+
+ /* When adding a flag here, consider whether or not it ought to
+ apply to a template instance if it applies to the template. If
+ so, make sure to copy it in instantiate_class_template! */
+
+ /* There are some bits left to fill out a 32-bit word. Keep track
+ of this by updating the size of this bitfield whenever you add or
+ remove a flag. */
+ unsigned dummy : 3;
+
+ tree primary_base;
+ vec<tree_pair_s, va_gc> *vcall_indices;
+ tree vtables;
+ tree typeinfo_var;
+ vec<tree, va_gc> *vbases;
+ tree as_base;
+ vec<tree, va_gc> *pure_virtuals;
+ tree friend_classes;
+ vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members;
+ tree key_method;
+ tree decl_list;
+ tree befriending_classes;
+ /* In a RECORD_TYPE, information specific to Objective-C++, such
+ as a list of adopted protocols or a pointer to a corresponding
+ @interface. See objc/objc-act.h for details. */
+ tree objc_info;
+ /* FIXME reuse another field? */
+ tree lambda_expr;
+};
+
+/* We used to have a variant type for lang_type. Keep the name of the
+ checking accessor for the sole survivor. */
+#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE))
+
+/* Nonzero for _CLASSTYPE means that operator delete is defined. */
+#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
+#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
+#define TYPE_GETS_VEC_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 2)
+
+/* Nonzero if `new NODE[x]' should cause the allocation of extra
+ storage to indicate how many array elements are in use. */
+#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
+ (CLASS_TYPE_P (NODE) \
+ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
+
+/* Nonzero means that this _CLASSTYPE node defines ways of converting
+ itself to other types. */
+#define TYPE_HAS_CONVERSION(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion)
+
+/* Nonzero means that NODE (a class type) has a default constructor --
+ but that it has not yet been declared. */
+#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
+
+/* Nonzero means that NODE (a class type) has a copy constructor --
+ but that it has not yet been declared. */
+#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
+
+/* Nonzero means that NODE (a class type) has a move constructor --
+ but that it has not yet been declared. */
+#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
+
+/* Nonzero means that NODE (a class type) has an assignment operator
+ -- but that it has not yet been declared. */
+#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
+
+/* Nonzero means that NODE (a class type) has an assignment operator
+ -- but that it has not yet been declared. */
+#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
+
+/* Nonzero means that NODE (a class type) has a destructor -- but that
+ it has not yet been declared. */
+#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
+
+/* Nonzero means that NODE (a class type) is final */
+#define CLASSTYPE_FINAL(NODE) \
+ TYPE_FINAL_P (NODE)
+
+
+/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
+#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
+
+/* True iff the class type NODE has an "operator =" whose parameter
+ has a parameter of type "const X&". */
+#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign)
+
+/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
+#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor)
+#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
+
+/* Nonzero if this class has an X(initializer_list<T>) constructor. */
+#define TYPE_HAS_LIST_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
+
+/* Nonzero if this class has a constexpr constructor other than a copy/move
+ constructor. Note that a class can have constexpr constructors for
+ static initialization even if it isn't a literal class. */
+#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
+
+/* Nonzero if this class defines an overloaded operator new. (An
+ operator new [] doesn't count.) */
+#define TYPE_HAS_NEW_OPERATOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_new)
+
+/* Nonzero if this class defines an overloaded operator new[]. */
+#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
+
+/* Nonzero means that this type is being defined. I.e., the left brace
+ starting the definition of this type has been seen. */
+#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
+
+/* Nonzero means that this type is either complete or being defined, so we
+ can do lookup in it. */
+#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
+ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
+
+/* Mark bits for repeated base checks. */
+#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
+
+/* Nonzero if the class NODE has multiple paths to the same (virtual)
+ base object. */
+#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
+
+/* Nonzero if the class NODE has multiple instances of the same base
+ type. */
+#define CLASSTYPE_REPEATED_BASE_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
+
+/* The member function with which the vtable will be emitted:
+ the first noninline non-pure-virtual member function. NULL_TREE
+ if there is no key function or if this is a class template */
+#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
+
+/* Vector of members. During definition, it is unordered and only
+ member functions are present. After completion it is sorted and
+ contains both member functions and non-functions. STAT_HACK is
+ involved to preserve oneslot per name invariant. */
+#define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members)
+
+/* For class templates, this is a TREE_LIST of all member data,
+ functions, types, and friends in the order of declaration.
+ The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
+ and the RECORD_TYPE for the class template otherwise. */
+#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
+
+/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
+ are the constructors that take an in-charge parameter. */
+#define CLASSTYPE_CONSTRUCTORS(NODE) \
+ (get_class_binding_direct (NODE, ctor_identifier))
+
+/* A FUNCTION_DECL for the destructor for NODE. This is the
+ destructors that take an in-charge parameter. If
+ CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
+ until the destructor is created with lazily_declare_fn. */
+#define CLASSTYPE_DESTRUCTOR(NODE) \
+ (get_class_binding_direct (NODE, dtor_identifier))
+
+/* Nonzero if NODE has a primary base class, i.e., a base class with
+ which it shares the virtual function table pointer. */
+#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
+ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
+
+/* If non-NULL, this is the binfo for the primary base class, i.e.,
+ the base class which contains the virtual function table pointer
+ for this class. */
+#define CLASSTYPE_PRIMARY_BINFO(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
+
+/* A vector of BINFOs for the direct and indirect virtual base classes
+ that this type uses in a post-order depth-first left-to-right
+ order. (In other words, these bases appear in the order that they
+ should be initialized.) */
+#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
+
+/* The type corresponding to NODE when NODE is used as a base class,
+ i.e., NODE without virtual base classes or tail padding. */
+#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
+
+/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
+#define IS_FAKE_BASE_TYPE(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE \
+ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
+ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
+
+/* These are the size and alignment of the type without its virtual
+ base classes, for when we use this type as a base itself. */
+#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
+#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
+#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
+#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
+
+/* The alignment of NODE, without its virtual bases, in bytes. */
+#define CLASSTYPE_ALIGN_UNIT(NODE) \
+ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
+
+/* A vec<tree> of virtual functions which cannot be inherited by
+ derived classes. When deriving from this type, the derived
+ class must provide its own definition for each of these functions. */
+#define CLASSTYPE_PURE_VIRTUALS(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
+
+/* Nonzero means that this type is an abstract class type. */
+#define ABSTRACT_CLASS_TYPE_P(NODE) \
+ (CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
+
+/* Nonzero means that this type has an X() constructor. */
+#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor)
+
+/* Nonzero means that this type contains a mutable member. */
+#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
+#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
+
+/* Nonzero means that this class type is not POD for the purpose of layout
+ (as defined in the ABI). This is different from the language's POD. */
+#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
+
+/* Nonzero means that this class type is a non-standard-layout class. */
+#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
+
+/* Nonzero means that this class type does have unique object
+ representations. */
+#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations)
+
+/* Nonzero means that this class type has
+ CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */
+#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set)
+
+/* Nonzero means that this class contains pod types whose default
+ initialization is not a zero initialization (namely, pointers to
+ data members). */
+#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
+
+/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
+#define CLASSTYPE_EMPTY_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
+
+/* Nonzero if this class is "nearly empty", i.e., contains only a
+ virtual function table pointer. */
+#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
+
+/* Nonzero if this class contains an empty subobject. */
+#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
+
+/* A list of class types of which this type is a friend. The
+ TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
+ case of a template friend. */
+#define CLASSTYPE_FRIEND_CLASSES(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
+
+/* A list of the classes which grant friendship to this class. */
+#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
+
+/* The associated LAMBDA_EXPR that made this class. */
+#define CLASSTYPE_LAMBDA_EXPR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
+/* The extra mangling scope for this closure type. */
+#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
+ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
+
+/* Say whether this node was declared as a "class" or a "struct". */
+#define CLASSTYPE_DECLARED_CLASS(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
+
+/* Nonzero if this class has const members
+ which have no specified initialization. */
+#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
+ (TYPE_LANG_SPECIFIC (NODE) \
+ ? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0)
+#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE))
+
+/* Nonzero if this class has ref members
+ which have no specified initialization. */
+#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
+ (TYPE_LANG_SPECIFIC (NODE) \
+ ? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0)
+#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE))
+
+/* Nonzero if this class is included from a header file which employs
+ `#pragma interface', and it is not included in its implementation file. */
+#define CLASSTYPE_INTERFACE_ONLY(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
+
+/* True if we have already determined whether or not vtables, VTTs,
+ typeinfo, and other similar per-class data should be emitted in
+ this translation unit. This flag does not indicate whether or not
+ these items should be emitted; it only indicates that we know one
+ way or the other. */
+#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
+/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
+#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
+
+#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
+#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
+#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
+
+/* Nonzero if a _DECL node requires us to output debug info for this class. */
+#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
+
+/* True if we saw errors while instantiating this class. */
+#define CLASSTYPE_ERRONEOUS(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->erroneous)
+
+/* True if this class is non-layout-POD only because it was not an aggregate
+ before C++14. If we run out of bits in lang_type, this could be replaced
+ with a hash_set only filled in when abi_version_crosses (17). */
+#define CLASSTYPE_NON_POD_AGGREGATE(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_aggregate)
+
+/* Additional macros for inheritance information. */
+
+/* Nonzero means that this class is on a path leading to a new vtable. */
+#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
+
+/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
+ have this flag set. */
+#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
+
+/* Compare a BINFO_TYPE with another type for equality. For a binfo,
+ this is functionally equivalent to using same_type_p, but
+ measurably faster. At least one of the arguments must be a
+ BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
+ BINFO_TYPE(T) ever stops being the main variant of the class the
+ binfo is for, this macro must change. */
+#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
+
+/* Any subobject that needs a new vtable must have a vptr and must not
+ be a non-virtual primary base (since it would then use the vtable from a
+ derived class and never become non-primary.) */
+#define SET_BINFO_NEW_VTABLE_MARKED(B) \
+ (BINFO_NEW_VTABLE_MARKED (B) = 1, \
+ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
+ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
+
+/* Nonzero if this binfo is for a dependent base - one that should not
+ be searched. */
+#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
+
+/* Nonzero if this binfo has lost its primary base binfo (because that
+ is a nearly-empty virtual base that has been taken by some other
+ base in the complete hierarchy. */
+#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
+
+/* Nonzero if this BINFO is a primary base class. */
+#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
+
+/* A vec<tree_pair_s> of the vcall indices associated with the class
+ NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
+ function. The VALUE is the index into the virtual table where the
+ vcall offset for that function is stored, when NODE is a virtual
+ base. */
+#define CLASSTYPE_VCALL_INDICES(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
+
+/* The various vtables for the class NODE. The primary vtable will be
+ first, followed by the construction vtables and VTT, if any. */
+#define CLASSTYPE_VTABLES(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->vtables)
+
+/* The std::type_info variable representing this class, or NULL if no
+ such variable has been created. This field is only set for the
+ TYPE_MAIN_VARIANT of the class. */
+#define CLASSTYPE_TYPEINFO_VAR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
+
+/* Accessor macros for the BINFO_VIRTUALS list. */
+
+/* The number of bytes by which to adjust the `this' pointer when
+ calling this virtual function. Subtract this value from the this
+ pointer. Always non-NULL, might be constant zero though. */
+#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
+
+/* If non-NULL, the vtable index at which to find the vcall offset
+ when calling this virtual function. Add the value at that vtable
+ index to the this pointer. */
+#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
+
+/* The function to call. */
+#define BV_FN(NODE) (TREE_VALUE (NODE))
+
+/* Whether or not this entry is for a lost primary virtual base. */
+#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
+
+/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
+ this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
+ will be NULL_TREE to indicate a throw specification of `()', or
+ no exceptions allowed. For a noexcept specification, TREE_VALUE
+ is NULL_TREE and TREE_PURPOSE is the constant-expression. For
+ a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
+ (for templates) or an OVERLOAD list of functions (for implicitly
+ declared functions). */
+#define TYPE_RAISES_EXCEPTIONS(NODE) \
+ TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
+
+/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
+ or noexcept(true). */
+#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
+
+/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
+ case for things declared noexcept(true) and, with -fnothrow-opt, for
+ throw() functions. */
+#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
+
+/* The binding level associated with the namespace. */
+#define NAMESPACE_LEVEL(NODE) \
+ (LANG_DECL_NS_CHECK (NODE)->level)
+
+/* Discriminator values for lang_decl. */
+
+enum lang_decl_selector
+{
+ lds_min,
+ lds_fn,
+ lds_ns,
+ lds_parm,
+ lds_decomp
+};
+
+/* Flags shared by all forms of DECL_LANG_SPECIFIC.
+
+ Some of the flags live here only to make lang_decl_min/fn smaller. Do
+ not make this struct larger than 32 bits. */
+
+struct GTY(()) lang_decl_base {
+ ENUM_BITFIELD(lang_decl_selector) selector : 3;
+ ENUM_BITFIELD(languages) language : 1;
+ unsigned use_template : 2;
+ unsigned not_really_extern : 1; /* var or fn */
+ unsigned initialized_in_class : 1; /* var or fn */
+
+ unsigned threadprivate_or_deleted_p : 1; /* var or fn */
+ /* anticipated_p is no longer used for anticipated_decls (fn, type
+ or template). It is used as DECL_OMP_PRIVATIZED_MEMBER in
+ var. */
+ unsigned anticipated_p : 1;
+ unsigned friend_or_tls : 1; /* var, fn, type or template */
+ unsigned unknown_bound_p : 1; /* var */
+ unsigned odr_used : 1; /* var or fn */
+ unsigned concept_p : 1; /* applies to vars and functions */
+ unsigned var_declared_inline_p : 1; /* var */
+ unsigned dependent_init_p : 1; /* var */
+
+ /* The following apply to VAR, FUNCTION, TYPE, CONCEPT, & NAMESPACE
+ decls. */
+ unsigned module_purview_p : 1; // in named-module purview
+ unsigned module_attach_p : 1; // attached to named module
+ unsigned module_import_p : 1; /* from an import */
+ unsigned module_entity_p : 1; /* is in the entitity ary &
+ hash. */
+ /* VAR_DECL or FUNCTION_DECL has keyed decls. */
+ unsigned module_keyed_decls_p : 1;
+
+ /* 12 spare bits. */
+};
+
+/* True for DECL codes which have template info and access. */
+#define LANG_DECL_HAS_MIN(NODE) \
+ (VAR_OR_FUNCTION_DECL_P (NODE) \
+ || TREE_CODE (NODE) == FIELD_DECL \
+ || TREE_CODE (NODE) == CONST_DECL \
+ || TREE_CODE (NODE) == TYPE_DECL \
+ || TREE_CODE (NODE) == TEMPLATE_DECL \
+ || TREE_CODE (NODE) == USING_DECL \
+ || TREE_CODE (NODE) == CONCEPT_DECL)
+
+/* DECL_LANG_SPECIFIC for the above codes. */
+
+struct GTY(()) lang_decl_min {
+ struct lang_decl_base base; /* 32-bits. */
+
+ /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
+ THUNK_ALIAS.
+ In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
+ VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
+ DECL_TEMPLATE_INFO. */
+ tree template_info;
+
+ /* In a DECL_THUNK_P FUNCTION_DECL, this is THUNK_VIRTUAL_OFFSET.
+ In a lambda-capture proxy VAR_DECL, this is DECL_CAPTURED_VARIABLE.
+ In a function-scope TREE_STATIC VAR_DECL or IMPLICIT_TYPEDEF_P TYPE_DECL,
+ this is DECL_DISCRIMINATOR.
+ In a DECL_LOCAL_DECL_P decl, this is the namespace decl it aliases.
+ Otherwise, in a class-scope DECL, this is DECL_ACCESS. */
+ tree access;
+};
+
+/* Additional DECL_LANG_SPECIFIC information for functions. */
+
+struct GTY(()) lang_decl_fn {
+ struct lang_decl_min min;
+
+ /* In a overloaded operator, this is the compressed operator code. */
+ unsigned ovl_op_code : 6;
+ unsigned global_ctor_p : 1;
+ unsigned global_dtor_p : 1;
+
+ unsigned static_function : 1;
+ unsigned pure_virtual : 1;
+ unsigned defaulted_p : 1;
+ unsigned has_in_charge_parm_p : 1;
+ unsigned has_vtt_parm_p : 1;
+ unsigned pending_inline_p : 1;
+ unsigned nonconverting : 1;
+ unsigned thunk_p : 1;
+
+ unsigned this_thunk_p : 1;
+ unsigned omp_declare_reduction_p : 1;
+ unsigned has_dependent_explicit_spec_p : 1;
+ unsigned immediate_fn_p : 1;
+ unsigned maybe_deleted : 1;
+ unsigned coroutine_p : 1;
+ unsigned implicit_constexpr : 1;
+
+ unsigned spare : 9;
+
+ /* 32-bits padding on 64-bit host. */
+
+ /* For a non-thunk function decl, this is a tree list of
+ friendly classes. For a thunk function decl, it is the
+ thunked to function decl. */
+ tree befriending_classes;
+
+ /* For a virtual FUNCTION_DECL for which
+ DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
+ this pointer and result pointer adjusting thunks are
+ chained here. This pointer thunks to return pointer thunks
+ will be chained on the return pointer thunk.
+ For a DECL_CONSTUCTOR_P FUNCTION_DECL, this is the base from
+ whence we inherit. Otherwise, it is the class in which a
+ (namespace-scope) friend is defined (if any). */
+ tree context;
+
+ union lang_decl_u5
+ {
+ /* In a non-thunk FUNCTION_DECL, this is DECL_CLONED_FUNCTION. */
+ tree GTY ((tag ("0"))) cloned_function;
+
+ /* In a FUNCTION_DECL for which THUNK_P holds this is the
+ THUNK_FIXED_OFFSET. */
+ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
+ } GTY ((desc ("%1.thunk_p"))) u5;
+
+ union lang_decl_u3
+ {
+ struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
+ tree GTY ((tag ("0"))) saved_auto_return_type;
+ } GTY ((desc ("%1.pending_inline_p"))) u;
+
+};
+
+/* DECL_LANG_SPECIFIC for namespaces. */
+
+struct GTY(()) lang_decl_ns {
+ struct lang_decl_base base; /* 32 bits. */
+ cp_binding_level *level;
+
+ /* Inline children. Needs to be va_gc, because of PCH. */
+ vec<tree, va_gc> *inlinees;
+
+ /* Hash table of bound decls. It'd be nice to have this inline, but
+ as the hash_map has a dtor, we can't then put this struct into a
+ union (until moving to c++11). */
+ hash_table<named_decl_hash> *bindings;
+};
+
+/* DECL_LANG_SPECIFIC for parameters. */
+
+struct GTY(()) lang_decl_parm {
+ struct lang_decl_base base; /* 32 bits. */
+ int level;
+ int index;
+};
+
+/* Additional DECL_LANG_SPECIFIC information for structured bindings. */
+
+struct GTY(()) lang_decl_decomp {
+ struct lang_decl_min min;
+ /* The artificial underlying "e" variable of the structured binding
+ variable. */
+ tree base;
+};
+
+/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
+ union rather than a struct containing a union as its only field, but
+ tree.h declares it as a struct. */
+
+struct GTY(()) lang_decl {
+ union GTY((desc ("%h.base.selector"))) lang_decl_u {
+ /* Nothing of only the base type exists. */
+ struct lang_decl_base GTY ((default)) base;
+ struct lang_decl_min GTY((tag ("lds_min"))) min;
+ struct lang_decl_fn GTY ((tag ("lds_fn"))) fn;
+ struct lang_decl_ns GTY((tag ("lds_ns"))) ns;
+ struct lang_decl_parm GTY((tag ("lds_parm"))) parm;
+ struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp;
+ } u;
+};
+
+/* Looks through a template (if present) to find what it declares. */
+#define STRIP_TEMPLATE(NODE) \
+ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
+
+#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
+
+#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
+({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (!LANG_DECL_HAS_MIN (NODE)) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.min; })
+
+/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
+ template, not just on a FUNCTION_DECL. So when looking for things in
+ lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
+#define LANG_DECL_FN_CHECK(NODE) __extension__ \
+({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
+ if (!DECL_DECLARES_FUNCTION_P (NODE) \
+ || lt->u.base.selector != lds_fn) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.fn; })
+
+#define LANG_DECL_NS_CHECK(NODE) __extension__ \
+({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (TREE_CODE (NODE) != NAMESPACE_DECL \
+ || lt->u.base.selector != lds_ns) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.ns; })
+
+#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
+({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (TREE_CODE (NODE) != PARM_DECL \
+ || lt->u.base.selector != lds_parm) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.parm; })
+
+#define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \
+({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (!VAR_P (NODE) \
+ || lt->u.base.selector != lds_decomp) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.decomp; })
+
+#else
+
+#define LANG_DECL_MIN_CHECK(NODE) \
+ (&DECL_LANG_SPECIFIC (NODE)->u.min)
+
+#define LANG_DECL_FN_CHECK(NODE) \
+ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
+
+#define LANG_DECL_NS_CHECK(NODE) \
+ (&DECL_LANG_SPECIFIC (NODE)->u.ns)
+
+#define LANG_DECL_PARM_CHECK(NODE) \
+ (&DECL_LANG_SPECIFIC (NODE)->u.parm)
+
+#define LANG_DECL_DECOMP_CHECK(NODE) \
+ (&DECL_LANG_SPECIFIC (NODE)->u.decomp)
+
+#endif /* ENABLE_TREE_CHECKING */
+
+/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
+ declaration. Some entities (like a member function in a local
+ class, or a local variable) do not have linkage at all, and this
+ macro should not be used in those cases.
+
+ Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
+ created by language-independent code, and has C linkage. Most
+ VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
+ we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
+#define DECL_LANGUAGE(NODE) \
+ (DECL_LANG_SPECIFIC (NODE) \
+ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \
+ : (TREE_CODE (NODE) == FUNCTION_DECL \
+ ? lang_c : lang_cplusplus))
+
+/* Set the language linkage for NODE to LANGUAGE. */
+#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
+ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
+
+/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
+ is a constructor. */
+#define DECL_CONSTRUCTOR_P(NODE) \
+ DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE))
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
+ object. */
+#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == complete_ctor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
+ object. */
+#define DECL_BASE_CONSTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == base_ctor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
+ specialized in-charge constructor or the specialized not-in-charge
+ constructor. */
+#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == ctor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
+#define DECL_COPY_CONSTRUCTOR_P(NODE) \
+ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
+#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
+ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
+
+/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
+ is a destructor. */
+#define DECL_DESTRUCTOR_P(NODE) \
+ DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE))
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
+ specialized in-charge constructor, in-charge deleting constructor,
+ or the base destructor. */
+#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == dtor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
+ object. */
+#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == complete_dtor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
+ object. */
+#define DECL_BASE_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == base_dtor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
+ object that deletes the object after it has been destroyed. */
+#define DECL_DELETING_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == deleting_dtor_identifier)
+
+/* Nonzero if either DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P or
+ DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P is true of NODE. */
+#define DECL_MAYBE_IN_CHARGE_CDTOR_P(NODE) \
+ (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (NODE) \
+ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (NODE))
+
+/* Nonzero if NODE (a _DECL) is a cloned constructor or
+ destructor. */
+#define DECL_CLONED_FUNCTION_P(NODE) \
+ (DECL_NAME (NODE) \
+ && IDENTIFIER_CDTOR_P (DECL_NAME (NODE)) \
+ && !DECL_MAYBE_IN_CHARGE_CDTOR_P (NODE))
+
+/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
+ cloned. */
+#define DECL_CLONED_FUNCTION(NODE) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE))->u.fn.u5.cloned_function)
+
+/* Perform an action for each clone of FN, if FN is a function with
+ clones. This macro should be used like:
+
+ FOR_EACH_CLONE (clone, fn)
+ { ... }
+
+ */
+#define FOR_EACH_CLONE(CLONE, FN) \
+ if (!(TREE_CODE (FN) == FUNCTION_DECL \
+ && DECL_MAYBE_IN_CHARGE_CDTOR_P (FN))) \
+ ; \
+ else \
+ for (CLONE = DECL_CHAIN (FN); \
+ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
+ CLONE = DECL_CHAIN (CLONE))
+
+/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
+#define DECL_DISCRIMINATOR_P(NODE) \
+ (((TREE_CODE (NODE) == VAR_DECL && TREE_STATIC (NODE)) \
+ || DECL_IMPLICIT_TYPEDEF_P (NODE)) \
+ && DECL_FUNCTION_SCOPE_P (NODE))
+
+/* Discriminator for name mangling. */
+#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_MIN_CHECK (NODE)->access)
+
+/* The index of a user-declared parameter in its function, starting at 1.
+ All artificial parameters will have index 0. */
+#define DECL_PARM_INDEX(NODE) \
+ (LANG_DECL_PARM_CHECK (NODE)->index)
+
+/* The level of a user-declared parameter in its function, starting at 1.
+ A parameter of the function will have level 1; a parameter of the first
+ nested function declarator (i.e. t in void f (void (*p)(T t))) will have
+ level 2. */
+#define DECL_PARM_LEVEL(NODE) \
+ (LANG_DECL_PARM_CHECK (NODE)->level)
+
+/* Nonzero if the VTT parm has been added to NODE. */
+#define DECL_HAS_VTT_PARM_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
+
+/* Nonzero if NODE is a user-defined conversion operator. */
+#define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE))
+
+/* The type to which conversion operator FN converts to. */
+#define DECL_CONV_FN_TYPE(FN) \
+ TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN)))
+
+/* Nonzero if NODE, a static data member, was declared in its class as an
+ array of unknown bound. */
+#define VAR_HAD_UNKNOWN_BOUND(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
+ ? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \
+ : false)
+#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true)
+
+/* True iff decl NODE is for an overloaded operator. */
+#define DECL_OVERLOADED_OPERATOR_P(NODE) \
+ IDENTIFIER_ANY_OP_P (DECL_NAME (NODE))
+
+/* Nonzero if NODE is an assignment operator (including += and such). */
+#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
+ IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE))
+
+/* NODE is a function_decl for an overloaded operator. Return its
+ compressed (raw) operator code. Note that this is not a TREE_CODE. */
+#define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->ovl_op_code)
+
+/* DECL is an overloaded operator. Test whether it is for TREE_CODE
+ (a literal constant). */
+#define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \
+ (DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE)
+
+/* For FUNCTION_DECLs: nonzero means that this function is a
+ constructor or a destructor with an extra in-charge parameter to
+ control whether or not virtual bases are constructed. */
+#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
+
+/* Nonzero if DECL is a declaration of __builtin_constant_p. */
+#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL \
+ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
+ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
+
+/* Nonzero for _DECL means that this decl appears in (or will appear
+ in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
+ detecting circularity in case members are multiply defined. In the
+ case of a VAR_DECL, it means that no definition has been seen, even
+ if an initializer has been. */
+#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
+
+/* Nonzero for a VAR_DECL means that the variable's initialization (if
+ any) has been processed. (In general, DECL_INITIALIZED_P is
+ !DECL_EXTERNAL, but static data members may be initialized even if
+ not defined.) */
+#define DECL_INITIALIZED_P(NODE) \
+ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
+
+/* Nonzero for a VAR_DECL iff an explicit initializer was provided
+ or a non-trivial constructor is called. */
+#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
+ (TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)))
+
+/* Nonzero for a VAR_DECL that was initialized with a
+ constant-expression. */
+#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
+ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
+
+/* Nonzero if the DECL was initialized in the class definition itself,
+ rather than outside the class. This is used for both static member
+ VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
+#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
+ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
+ ->u.base.initialized_in_class)
+
+/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
+ Only available for decls with DECL_LANG_SPECIFIC. */
+#define DECL_ODR_USED(DECL) \
+ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
+ ->u.base.odr_used)
+
+/* Nonzero for FUNCTION_DECL means that this is a friend that is
+ either not pushed into a namespace/looked up in a class (because it
+ is a dependent type, in an uninstantiated template), or it has
+ /only/ been subject to hidden friend injection from one or more
+ befriending classes. Once another decl matches, the flag is
+ cleared. There are requirements on its default parms. */
+#define DECL_UNIQUE_FRIEND_P(NODE) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE)) \
+ ->u.base.friend_or_tls)
+
+/* True of a TEMPLATE_DECL that is a template class friend. Such
+ decls are not pushed until instantiated (as they may depend on
+ parameters of the befriending class). DECL_CHAIN is the
+ befriending class. */
+#define DECL_UNINSTANTIATED_TEMPLATE_FRIEND_P(NODE) \
+ (DECL_LANG_FLAG_4 (TEMPLATE_DECL_CHECK (NODE)))
+
+/* Nonzero if the thread-local variable was declared with __thread as
+ opposed to thread_local. */
+#define DECL_GNU_TLS_P(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
+ && DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls)
+#define SET_DECL_GNU_TLS_P(NODE) \
+ (retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \
+ DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true)
+
+/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
+#define DECL_BEFRIENDING_CLASSES(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->befriending_classes)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a static
+ member function. */
+#define DECL_STATIC_FUNCTION_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->static_function)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a non-static
+ member function. */
+#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
+ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a member function
+ (static or non-static). */
+#define DECL_FUNCTION_MEMBER_P(NODE) \
+ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
+
+/* Nonzero for FUNCTION_DECL means that this member function
+ has `this' as const X *const. */
+#define DECL_CONST_MEMFUNC_P(NODE) \
+ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
+ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
+ (TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
+
+/* Nonzero for FUNCTION_DECL means that this member function
+ has `this' as volatile X *const. */
+#define DECL_VOLATILE_MEMFUNC_P(NODE) \
+ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
+ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
+ (TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
+
+/* Nonzero for a DECL means that this member is a non-static member. */
+#define DECL_NONSTATIC_MEMBER_P(NODE) \
+ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
+ || TREE_CODE (NODE) == FIELD_DECL)
+
+/* Nonzero for a FIELD_DECL means that this member object type
+ is mutable. */
+#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (FIELD_DECL_CHECK (NODE)))
+
+/* Nonzero for _DECL means that this constructor or conversion function is
+ non-converting. */
+#define DECL_NONCONVERTING_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->nonconverting)
+
+/* Nonzero for FUNCTION_DECL means that this member function is a pure
+ virtual function. */
+#define DECL_PURE_VIRTUAL_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->pure_virtual)
+
+/* Nonzero for FUNCTION_DECL means that this member function (either
+ a constructor or a conversion function) has an explicit specifier
+ with a value-dependent expression. */
+#define DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->has_dependent_explicit_spec_p)
+
+/* Nonzero for a defaulted FUNCTION_DECL for which we haven't decided yet if
+ it's deleted; we will decide in synthesize_method. */
+#define DECL_MAYBE_DELETED(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->maybe_deleted)
+
+/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
+ invalid overrider for a function from a base class. Once we have
+ complained about an invalid overrider we avoid complaining about it
+ again. */
+#define DECL_INVALID_OVERRIDER_P(NODE) \
+ (DECL_LANG_FLAG_4 (NODE))
+
+/* True (in a FUNCTION_DECL) if NODE is a function declared with
+ an override virt-specifier */
+#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
+
+/* The thunks associated with NODE, a FUNCTION_DECL. */
+#define DECL_THUNKS(NODE) \
+ (DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
+
+/* Set DECL_THUNKS. */
+#define SET_DECL_THUNKS(NODE,THUNKS) \
+ (LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
+
+/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
+ is the constructor it inherits from. */
+#define DECL_INHERITED_CTOR(NODE) \
+ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
+ ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
+
+/* And this is the base that constructor comes from. */
+#define DECL_INHERITED_CTOR_BASE(NODE) \
+ (DECL_INHERITED_CTOR (NODE) \
+ ? DECL_CONTEXT (flag_new_inheriting_ctors \
+ ? strip_inheriting_ctors (NODE) \
+ : DECL_INHERITED_CTOR (NODE)) \
+ : NULL_TREE)
+
+/* Set the inherited base. */
+#define SET_DECL_INHERITED_CTOR(NODE,INH) \
+ (LANG_DECL_FN_CHECK (NODE)->context = (INH))
+
+/* Nonzero if NODE is a thunk, rather than an ordinary function. */
+#define DECL_THUNK_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL \
+ && DECL_LANG_SPECIFIC (NODE) \
+ && LANG_DECL_FN_CHECK (NODE)->thunk_p)
+
+/* Set DECL_THUNK_P for node. */
+#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
+ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
+ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
+
+/* Nonzero if NODE is a this pointer adjusting thunk. */
+#define DECL_THIS_THUNK_P(NODE) \
+ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
+
+/* Nonzero if NODE is a result pointer adjusting thunk. */
+#define DECL_RESULT_THUNK_P(NODE) \
+ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
+
+/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
+#define DECL_NON_THUNK_FUNCTION_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
+
+/* Nonzero if NODE is `extern "C"'. */
+#define DECL_EXTERN_C_P(NODE) \
+ (DECL_LANGUAGE (NODE) == lang_c)
+
+/* Nonzero if NODE is an `extern "C"' function. */
+#define DECL_EXTERN_C_FUNCTION_P(NODE) \
+ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
+
+/* Non-zero if this variable is declared `constinit' specifier. */
+#define DECL_DECLARED_CONSTINIT_P(NODE) \
+ (DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)))
+
+/* True if DECL is declared 'constexpr'. */
+#define DECL_DECLARED_CONSTEXPR_P(DECL) \
+ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
+
+/* True if FNDECL is an immediate function. */
+#define DECL_IMMEDIATE_FUNCTION_P(NODE) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (STRIP_TEMPLATE (NODE))) \
+ ? LANG_DECL_FN_CHECK (NODE)->immediate_fn_p \
+ : false)
+#define SET_DECL_IMMEDIATE_FUNCTION_P(NODE) \
+ (retrofit_lang_decl (FUNCTION_DECL_CHECK (NODE)), \
+ LANG_DECL_FN_CHECK (NODE)->immediate_fn_p = true)
+
+// True if NODE was declared as 'concept'. The flag implies that the
+// declaration is constexpr, that the declaration cannot be specialized or
+// refined, and that the result type must be convertible to bool.
+#define DECL_DECLARED_CONCEPT_P(NODE) \
+ (DECL_LANG_SPECIFIC (NODE)->u.base.concept_p)
+
+/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
+ template function. */
+#define DECL_PRETTY_FUNCTION_P(NODE) \
+ (DECL_NAME (NODE) \
+ && id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__"))
+
+/* For a DECL, true if it is __func__ or similar. */
+#define DECL_FNAME_P(NODE) \
+ (VAR_P (NODE) && DECL_NAME (NODE) && DECL_ARTIFICIAL (NODE) \
+ && DECL_HAS_VALUE_EXPR_P (NODE) \
+ && (id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__") \
+ || id_equal (DECL_NAME (NODE), "__FUNCTION__") \
+ || id_equal (DECL_NAME (NODE), "__func__")))
+
+/* Nonzero if the variable was declared to be thread-local.
+ We need a special C++ version of this test because the middle-end
+ DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for
+ templates. */
+#define CP_DECL_THREAD_LOCAL_P(NODE) \
+ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
+
+/* The _TYPE context in which this _DECL appears. This field holds the
+ class where a virtual function instance is actually defined. */
+#define DECL_CLASS_CONTEXT(NODE) \
+ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
+
+/* For a non-member friend function, the class (if any) in which this
+ friend was defined. For example, given:
+
+ struct S { friend void f () { ... } };
+
+ the DECL_FRIEND_CONTEXT for `f' will be `S'. */
+#define DECL_FRIEND_CONTEXT(NODE) \
+ ((DECL_DECLARES_FUNCTION_P (NODE) && !DECL_VIRTUAL_P (NODE) \
+ && !DECL_CONSTRUCTOR_P (NODE)) \
+ ? LANG_DECL_FN_CHECK (NODE)->context \
+ : NULL_TREE)
+
+/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
+#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
+ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
+
+#define CP_DECL_CONTEXT(NODE) \
+ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
+#define CP_TYPE_CONTEXT(NODE) \
+ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
+#define FROB_CONTEXT(NODE) \
+ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
+
+/* 1 iff NODE has namespace scope, including the global namespace. */
+#define DECL_NAMESPACE_SCOPE_P(NODE) \
+ (!DECL_TEMPLATE_PARM_P (NODE) \
+ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
+
+#define TYPE_NAMESPACE_SCOPE_P(NODE) \
+ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
+
+#define NAMESPACE_SCOPE_P(NODE) \
+ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
+ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
+
+/* 1 iff NODE is a class member. */
+#define DECL_CLASS_SCOPE_P(NODE) \
+ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
+
+#define TYPE_CLASS_SCOPE_P(NODE) \
+ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
+
+/* 1 iff NODE is function-local. */
+#define DECL_FUNCTION_SCOPE_P(NODE) \
+ (DECL_CONTEXT (NODE) \
+ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
+
+#define TYPE_FUNCTION_SCOPE_P(NODE) \
+ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
+
+/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
+ both the primary typeinfo object and the associated NTBS name. */
+#define DECL_TINFO_P(NODE) \
+ TREE_LANG_FLAG_4 (TREE_CHECK2 (NODE,VAR_DECL,TYPE_DECL))
+
+/* true iff VAR_DECL node NODE is a NTTP object decl. */
+#define DECL_NTTP_OBJECT_P(NODE) \
+ TREE_LANG_FLAG_5 (TREE_CHECK (NODE,VAR_DECL))
+
+/* 1 iff VAR_DECL node NODE is virtual table or VTT. We forward to
+ DECL_VIRTUAL_P from the common code, as that has the semantics we
+ need. But we want a more descriptive name. */
+#define DECL_VTABLE_OR_VTT_P(NODE) DECL_VIRTUAL_P (VAR_DECL_CHECK (NODE))
+
+/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
+#define FUNCTION_REF_QUALIFIED(NODE) \
+ TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
+
+/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
+#define FUNCTION_RVALUE_QUALIFIED(NODE) \
+ TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
+
+/* 1 iff NODE is function-local, but for types. */
+#define LOCAL_CLASS_P(NODE) \
+ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
+
+/* The nesting depth of namespace, class or function. Makes is_ancestor much
+ simpler. Only 8 bits available. */
+#define SCOPE_DEPTH(NODE) \
+ (NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space)
+
+/* Whether the namepace is an inline namespace. */
+#define DECL_NAMESPACE_INLINE_P(NODE) \
+ TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE))
+
+/* In a NAMESPACE_DECL, a vector of inline namespaces. */
+#define DECL_NAMESPACE_INLINEES(NODE) \
+ (LANG_DECL_NS_CHECK (NODE)->inlinees)
+
+/* Pointer to hash_map from IDENTIFIERS to DECLS */
+#define DECL_NAMESPACE_BINDINGS(NODE) \
+ (LANG_DECL_NS_CHECK (NODE)->bindings)
+
+/* In a NAMESPACE_DECL, points to the original namespace if this is
+ a namespace alias. */
+#define DECL_NAMESPACE_ALIAS(NODE) \
+ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
+#define ORIGINAL_NAMESPACE(NODE) \
+ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
+
+/* Nonzero if NODE is the std namespace. */
+#define DECL_NAMESPACE_STD_P(NODE) \
+ ((NODE) == std_node)
+
+/* In a TREE_LIST in an attribute list, indicates that the attribute
+ must be applied at instantiation time. */
+#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
+ was inherited from a template parameter, not explicitly indicated. */
+#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* In a TREE_LIST for a parameter-declaration-list, indicates that all the
+ parameters in the list have declarators enclosed in (). */
+#define PARENTHESIZED_LIST_P(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* Non zero if this is a using decl for a dependent scope. */
+#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
+
+/* The scope named in a using decl. */
+#define USING_DECL_SCOPE(NODE) DECL_RESULT_FLD (USING_DECL_CHECK (NODE))
+
+/* The decls named by a using decl. */
+#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
+
+/* Non zero if the using decl refers to a dependent type. */
+#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
+
+/* True if member using decl NODE refers to a non-inherited NODE. */
+#define USING_DECL_UNRELATED_P(NODE) DECL_LANG_FLAG_2 (USING_DECL_CHECK (NODE))
+
+/* True iff the CONST_DECL is a class-scope clone from C++20 using enum,
+ created by handle_using_decl. */
+#define CONST_DECL_USING_P(NODE) \
+ (TREE_CODE (NODE) == CONST_DECL \
+ && TREE_TYPE (NODE) \
+ && TREE_CODE (TREE_TYPE (NODE)) == ENUMERAL_TYPE \
+ && DECL_CONTEXT (NODE) != TREE_TYPE (NODE))
+
+/* In a FUNCTION_DECL, this is nonzero if this function was defined in
+ the class definition. We have saved away the text of the function,
+ but have not yet processed it. */
+#define DECL_PENDING_INLINE_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
+
+/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
+ function. */
+#define DECL_PENDING_INLINE_INFO(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
+
+/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
+#define TYPE_DECL_ALIAS_P(NODE) \
+ DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
+
+/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
+#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
+ DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
+
+/* Nonzero for a type which is an alias for another type; i.e, a type
+ which declaration was written 'using name-of-type =
+ another-type'. */
+#define TYPE_ALIAS_P(NODE) \
+ (TYPE_P (NODE) \
+ && TYPE_NAME (NODE) \
+ && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
+ && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
+
+/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL, TEMPLATE_DECL,
+ or CONCEPT_DECL, the entity is either a template specialization (if
+ DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
+ template itself.
+
+ In either case, DECL_TEMPLATE_INFO is a TEMPLATE_INFO, whose
+ TI_TEMPLATE is the TEMPLATE_DECL of which this entity is a
+ specialization or abstract instance. The TI_ARGS is the
+ template arguments used to specialize the template.
+
+ Consider:
+
+ template <typename T> struct S { friend void f(T) {} };
+
+ In this case, S<int>::f is, from the point of view of the compiler,
+ an instantiation of a template -- but, from the point of view of
+ the language, each instantiation of S results in a wholly unrelated
+ global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
+ will be non-NULL, but DECL_USE_TEMPLATE will be zero.
+
+ In a friend declaration, TI_TEMPLATE can be an overload set, or
+ identifier. */
+#define DECL_TEMPLATE_INFO(NODE) \
+ (DECL_LANG_SPECIFIC (TEMPLATE_INFO_DECL_CHECK (NODE)) \
+ ->u.min.template_info)
+
+/* For a lambda capture proxy, its captured variable. */
+#define DECL_CAPTURED_VARIABLE(NODE) \
+ (LANG_DECL_MIN_CHECK (NODE)->access)
+
+/* For a VAR_DECL, indicates that the variable is actually a
+ non-static data member of anonymous union that has been promoted to
+ variable status. */
+#define DECL_ANON_UNION_VAR_P(NODE) \
+ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
+
+/* Template information for a RECORD_TYPE or UNION_TYPE. */
+#define CLASSTYPE_TEMPLATE_INFO(NODE) \
+ (TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE)))
+
+/* Template information for a template template parameter. */
+#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
+ (TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)))
+
+/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
+ BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias
+ templateness of NODE. It'd be nice if this could unconditionally
+ access the slot, rather than return NULL if given a
+ non-templatable type. */
+#define TYPE_TEMPLATE_INFO(NODE) \
+ (TREE_CODE (NODE) == ENUMERAL_TYPE \
+ || TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \
+ || RECORD_OR_UNION_TYPE_P (NODE) \
+ ? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE)
+
+/* Template information (if any) for an alias type. */
+#define TYPE_ALIAS_TEMPLATE_INFO(NODE) \
+ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
+ ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
+ : NULL_TREE)
+
+/* If NODE is a type alias, this accessor returns the template info
+ for the alias template (if any). Otherwise behave as
+ TYPE_TEMPLATE_INFO. */
+#define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \
+ (typedef_variant_p (NODE) \
+ ? TYPE_ALIAS_TEMPLATE_INFO (NODE) \
+ : TYPE_TEMPLATE_INFO (NODE))
+
+/* Set the template information for a non-alias n ENUMERAL_, RECORD_,
+ or UNION_TYPE to VAL. ALIAS's are dealt with separately. */
+#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
+ (TREE_CODE (NODE) == ENUMERAL_TYPE \
+ || (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
+ ? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \
+ : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))) \
+
+#define TI_TEMPLATE(NODE) \
+ ((struct tree_template_info*)TEMPLATE_INFO_CHECK (NODE))->tmpl
+#define TI_ARGS(NODE) \
+ ((struct tree_template_info*)TEMPLATE_INFO_CHECK (NODE))->args
+#define TI_PENDING_TEMPLATE_FLAG(NODE) \
+ TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE))
+/* For a given TREE_VEC containing a template argument list,
+ this property contains the number of arguments that are not
+ defaulted. */
+#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
+ TREE_CHAIN (TREE_VEC_CHECK (NODE))
+
+/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
+ property. */
+#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
+ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
+#if CHECKING_P
+#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
+ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
+#else
+#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
+ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
+ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
+ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
+#endif
+
+/* The list of access checks that were deferred during parsing
+ which need to be performed at template instantiation time.
+
+ FIXME this should be associated with the TEMPLATE_DECL, not the
+ TEMPLATE_INFO. */
+#define TI_DEFERRED_ACCESS_CHECKS(NODE) \
+ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \
+ (NODE))->deferred_access_checks
+
+/* We use TREE_VECs to hold template arguments. If there is only one
+ level of template arguments, then the TREE_VEC contains the
+ arguments directly. If there is more than one level of template
+ arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
+ containing the template arguments for a single level. The first
+ entry in the outer TREE_VEC is the outermost level of template
+ parameters; the last is the innermost.
+
+ It is incorrect to ever form a template argument vector containing
+ only one level of arguments, but which is a TREE_VEC containing as
+ its only entry the TREE_VEC for that level.
+
+ For each TREE_VEC containing the template arguments for a single
+ level, it's possible to get or set the number of non defaulted
+ template arguments by using the accessor macros
+ GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
+ SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
+
+/* Nonzero if the template arguments is actually a vector of vectors,
+ rather than just a vector. */
+#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
+ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
+ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
+
+/* The depth of a template argument vector. When called directly by
+ the parser, we use a TREE_LIST rather than a TREE_VEC to represent
+ template arguments. In that case, there is only one level of template
+ arguments. We may even see NULL_TREE if there are 0 levels of
+ template arguments, as in cp_parser_requires_expression. */
+#define TMPL_ARGS_DEPTH(NODE) \
+ ((NODE) == NULL_TREE ? 0 \
+ : TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) \
+ : 1)
+
+/* The LEVELth level of the template ARGS. The outermost level of
+ args is level 1, not level 0. */
+#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
+ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
+ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) \
+ : (gcc_checking_assert ((LEVEL) == 1), (ARGS)))
+
+/* Set the LEVELth level of the template ARGS to VAL. This macro does
+ not work with single-level argument vectors. */
+#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
+ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
+
+/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
+#define TMPL_ARG(ARGS, LEVEL, IDX) \
+ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
+
+/* Given a single level of template arguments in NODE, return the
+ number of arguments. */
+#define NUM_TMPL_ARGS(NODE) \
+ (TREE_VEC_LENGTH (NODE))
+
+/* Returns the innermost level of template arguments in ARGS. */
+#define INNERMOST_TEMPLATE_ARGS(NODE) \
+ (get_innermost_template_args ((NODE), 1))
+
+/* The number of levels of template parameters given by NODE. */
+#define TMPL_PARMS_DEPTH(NODE) \
+ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
+
+/* The TEMPLATE_DECL instantiated or specialized by NODE. This
+ TEMPLATE_DECL will be the immediate parent, not the most general
+ template. For example, in:
+
+ template <class T> struct S { template <class U> void f(U); }
+
+ the FUNCTION_DECL for S<int>::f<double> will have, as its
+ DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
+
+ As a special case, for a member friend template of a template
+ class, this value will not be a TEMPLATE_DECL, but rather an
+ IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
+ any explicit template arguments provided. For example, in:
+
+ template <class T> struct S { friend void f<int>(int, double); }
+
+ the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
+ DECL_TI_ARGS will be {int}.
+
+ For a FIELD_DECL with a non-static data member initializer, this value
+ is the FIELD_DECL it was instantiated from. */
+#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
+
+/* The template arguments used to obtain this decl from the most
+ general form of DECL_TI_TEMPLATE. For the example given for
+ DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
+ are always the full set of arguments required to instantiate this
+ declaration from the most general template specialized here. */
+#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
+
+/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
+ will be generated from a partial specialization, the TEMPLATE_DECL
+ referred to here will be the original template. For example,
+ given:
+
+ template <typename T> struct S {};
+ template <typename T> struct S<T*> {};
+
+ the CLASSTYPE_TI_TEMPLATE for S<int*> will be S, not the S<T*>.
+
+ For a member class template, CLASSTYPE_TI_TEMPLATE always refers to the
+ partial instantiation rather than the primary template. CLASSTYPE_TI_ARGS
+ are for the primary template if the partial instantiation isn't
+ specialized, or for the explicit specialization if it is, e.g.
+
+ template <class T> class C { template <class U> class D; }
+ template <> template <class U> class C<int>::D; */
+#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
+#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
+
+/* For a template instantiation TYPE, returns the TYPE corresponding
+ to the primary template. Otherwise returns TYPE itself. */
+#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
+ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
+ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
+ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
+ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
+ : (TYPE))
+
+/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
+#define TYPE_TI_TEMPLATE(NODE) \
+ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
+
+/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
+#define TYPE_TI_ARGS(NODE) \
+ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
+
+#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
+
+/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
+ sense of [temp.mem]. */
+#define DECL_MEMBER_TEMPLATE_P(NODE) \
+ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
+
+/* Nonzero if the NODE corresponds to the template parameters for a
+ member template, whose inline definition is being processed after
+ the class definition is complete. */
+#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
+
+/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
+#define DECL_PACK_P(NODE) \
+ (DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
+
+/* Determines if NODE is an expansion of one or more parameter packs,
+ e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
+#define PACK_EXPANSION_P(NODE) \
+ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
+ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
+
+#define PACK_EXPANSION_CHECK(NODE) \
+ TREE_CHECK2 (NODE, TYPE_PACK_EXPANSION, EXPR_PACK_EXPANSION)
+
+/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
+ EXPR_PACK_EXPANSION. */
+#define PACK_EXPANSION_PATTERN(NODE) \
+ (TREE_CODE (PACK_EXPANSION_CHECK (NODE)) == TYPE_PACK_EXPANSION \
+ ? TREE_TYPE (NODE) : TREE_OPERAND (NODE, 0))
+
+/* The list of parameter packs used in the PACK_EXPANSION_* node. The
+ TREE_VALUE of each TREE_LIST contains the parameter packs. */
+#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
+ *(TREE_CODE (PACK_EXPANSION_CHECK (NODE)) == EXPR_PACK_EXPANSION \
+ ? &TREE_OPERAND (NODE, 1) \
+ : &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE)))
+
+/* Any additional template args to be applied when substituting into
+ the pattern, set by tsubst_pack_expansion for partial instantiations.
+ If this is a TREE_LIST, the TREE_VALUE of the first element is the
+ usual template argument TREE_VEC, and the TREE_PURPOSE of later elements
+ are enclosing functions that provided function parameter packs we'll need
+ to map appropriately. */
+#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
+ *(TREE_CODE (PACK_EXPANSION_CHECK (NODE)) == TYPE_PACK_EXPANSION \
+ ? &TYPE_MAX_VALUE_RAW (NODE) \
+ : &TREE_OPERAND ((NODE), 2))
+
+/* True iff this pack expansion is within a function context. */
+#define PACK_EXPANSION_LOCAL_P(NODE) \
+ TREE_LANG_FLAG_0 (PACK_EXPANSION_CHECK (NODE))
+
+/* True iff this pack expansion is for sizeof.... */
+#define PACK_EXPANSION_SIZEOF_P(NODE) \
+ TREE_LANG_FLAG_1 (PACK_EXPANSION_CHECK (NODE))
+
+/* True iff this pack expansion is for auto... in lambda init-capture. */
+#define PACK_EXPANSION_AUTO_P(NODE) \
+ TREE_LANG_FLAG_2 (PACK_EXPANSION_CHECK (NODE))
+
+/* True if we must use PACK_EXPANSION_EXTRA_ARGS and avoid partial
+ instantiation of this pack expansion. */
+#define PACK_EXPANSION_FORCE_EXTRA_ARGS_P(NODE) \
+ TREE_LANG_FLAG_3 (PACK_EXPANSION_CHECK (NODE))
+
+/* True iff the wildcard can match a template parameter pack. */
+#define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE)
+
+/* Determine if this is an argument pack. */
+#define ARGUMENT_PACK_P(NODE) \
+ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
+ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
+
+#define ARGUMENT_PACK_CHECK(NODE) \
+ TREE_CHECK2 (NODE, TYPE_ARGUMENT_PACK, NONTYPE_ARGUMENT_PACK)
+
+/* The arguments stored in an argument pack. Arguments are stored in a
+ TREE_VEC, which may have length zero. */
+#define ARGUMENT_PACK_ARGS(NODE) \
+ (TREE_CODE (ARGUMENT_PACK_CHECK (NODE)) == TYPE_ARGUMENT_PACK \
+ ? TREE_TYPE (NODE) : TREE_OPERAND (NODE, 0))
+
+/* Whether the argument pack is "incomplete", meaning that more
+ arguments can still be deduced. Incomplete argument packs are only
+ used when the user has provided an explicit template argument list
+ for a variadic function template. Some of the explicit template
+ arguments will be placed into the beginning of the argument pack,
+ but additional arguments might still be deduced. */
+#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
+ TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
+
+/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
+ arguments used to fill this pack. */
+#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
+ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
+
+/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
+ argument will be selected. */
+#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
+ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
+
+/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
+ select. */
+#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
+ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
+
+#define FOLD_EXPR_CHECK(NODE) \
+ TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \
+ BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
+
+#define BINARY_FOLD_EXPR_CHECK(NODE) \
+ TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
+
+/* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */
+#define FOLD_EXPR_P(NODE) \
+ (TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \
+ || TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \
+ || TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \
+ || TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR)
+
+/* True when NODE is a fold over a compound assignment operator. */
+#define FOLD_EXPR_MODIFY_P(NODE) \
+ TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE))
+
+/* An INTEGER_CST containing the tree code of the folded operator. */
+#define FOLD_EXPR_OP_RAW(NODE) \
+ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0)
+
+/* The tree code of the folded operator. */
+#define FOLD_EXPR_OP(NODE) \
+ ((enum tree_code) TREE_INT_CST_LOW (FOLD_EXPR_OP_RAW (NODE)))
+
+/* The expression containing an unexpanded parameter pack. */
+#define FOLD_EXPR_PACK(NODE) \
+ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1)
+
+/* In a binary fold expression, the argument with no unexpanded
+ parameter packs. */
+#define FOLD_EXPR_INIT(NODE) \
+ TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2)
+
+/* In a FUNCTION_DECL, the saved auto-return pattern. */
+#define DECL_SAVED_AUTO_RETURN_TYPE(NODE) \
+ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
+ ->u.saved_auto_return_type)
+
+/* True if NODE is an implicit INDIRECT_REF from convert_from_reference. */
+#define REFERENCE_REF_P(NODE) \
+ (INDIRECT_REF_P (NODE) \
+ && TREE_TYPE (TREE_OPERAND (NODE, 0)) \
+ && TYPE_REF_P (TREE_TYPE (TREE_OPERAND ((NODE), 0))))
+
+/* True iff this represents an lvalue being treated as an rvalue during return
+ or throw as per [class.copy.elision]. */
+#define IMPLICIT_RVALUE_P(NODE) \
+ TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE), NON_LVALUE_EXPR, STATIC_CAST_EXPR))
+
+#define NEW_EXPR_USE_GLOBAL(NODE) \
+ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
+#define DELETE_EXPR_USE_GLOBAL(NODE) \
+ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
+#define DELETE_EXPR_USE_VEC(NODE) \
+ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
+
+#define CALL_OR_AGGR_INIT_CHECK(NODE) \
+ TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR)
+
+/* In a CALL_EXPR appearing in a template, true if Koenig lookup
+ should be performed at instantiation time. */
+#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
+
+/* True if the arguments to NODE should be evaluated in left-to-right
+ order regardless of PUSH_ARGS_REVERSED. */
+#define CALL_EXPR_ORDERED_ARGS(NODE) \
+ TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE))
+
+/* True if the arguments to NODE should be evaluated in right-to-left
+ order regardless of PUSH_ARGS_REVERSED. */
+#define CALL_EXPR_REVERSE_ARGS(NODE) \
+ TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE))
+
+/* True if CALL_EXPR was written as an operator expression, not a function
+ call. */
+#define CALL_EXPR_OPERATOR_SYNTAX(NODE) \
+ TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE))
+
+/* A TREE_LIST containing the result of phase 1 name lookup of the operator
+ overloads that are pertinent to the dependent operator expression whose
+ type is NODE. Each TREE_PURPOSE is an IDENTIFIER_NODE and TREE_VALUE is
+ the corresponding (possibly empty) lookup result. The TREE_TYPE of the
+ first TREE_LIST node points back to NODE. */
+#define DEPENDENT_OPERATOR_TYPE_SAVED_LOOKUPS(NODE) \
+ TYPE_VALUES_RAW (DEPENDENT_OPERATOR_TYPE_CHECK (NODE))
+
+/* Guarded helper for the above accessor macro that takes a (templated)
+ operator expression instead of the type thereof. */
+inline tree
+templated_operator_saved_lookups (tree t)
+{
+ tree type = TREE_TYPE (EXPR_CHECK (t));
+ if (type && TREE_CODE (type) == DEPENDENT_OPERATOR_TYPE)
+ return DEPENDENT_OPERATOR_TYPE_SAVED_LOOKUPS (type);
+ else
+ return NULL_TREE;
+}
+
+/* Indicates whether a string literal has been parenthesized. Such
+ usages are disallowed in certain circumstances. */
+
+#define PAREN_STRING_LITERAL_P(NODE) \
+ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
+
+/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, an
+ INDIRECT_REF comes from parenthesizing a _DECL, or a PAREN_EXPR identifies a
+ parenthesized initializer relevant for decltype(auto). Currently only set
+ some of the time in C++14 mode. */
+
+#define REF_PARENTHESIZED_P(NODE) \
+ TREE_LANG_FLAG_2 (TREE_CHECK5 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF, VIEW_CONVERT_EXPR, PAREN_EXPR))
+
+/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
+ constructor call, rather than an ordinary function call. */
+#define AGGR_INIT_VIA_CTOR_P(NODE) \
+ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
+
+/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
+ the object. */
+#define AGGR_INIT_ZERO_FIRST(NODE) \
+ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
+
+/* Nonzero means that the call is the jump from a thunk to the
+ thunked-to function. */
+#define AGGR_INIT_FROM_THUNK_P(NODE) \
+ (AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag)
+
+/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
+ accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
+ CALL_EXPR_STATIC_CHAIN). */
+
+#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
+#define AGGR_INIT_EXPR_SLOT(NODE) \
+ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
+#define AGGR_INIT_EXPR_ARG(NODE, I) \
+ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
+#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
+
+/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
+ We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
+ the argument count is zero when checking is enabled. Instead, do
+ the pointer arithmetic to advance past the 3 fixed operands in a
+ AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
+ the operand array, even if it's not valid to dereference it. */
+#define AGGR_INIT_EXPR_ARGP(NODE) \
+ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
+
+/* Abstract iterators for AGGR_INIT_EXPRs. */
+
+/* Structure containing iterator state. */
+struct aggr_init_expr_arg_iterator {
+ tree t; /* the aggr_init_expr */
+ int n; /* argument count */
+ int i; /* next argument index */
+};
+
+/* Initialize the abstract argument list iterator object ITER with the
+ arguments from AGGR_INIT_EXPR node EXP. */
+inline void
+init_aggr_init_expr_arg_iterator (tree exp,
+ aggr_init_expr_arg_iterator *iter)
+{
+ iter->t = exp;
+ iter->n = aggr_init_expr_nargs (exp);
+ iter->i = 0;
+}
+
+/* Return the next argument from abstract argument list iterator object ITER,
+ and advance its state. Return NULL_TREE if there are no more arguments. */
+inline tree
+next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
+{
+ tree result;
+ if (iter->i >= iter->n)
+ return NULL_TREE;
+ result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
+ iter->i++;
+ return result;
+}
+
+/* Initialize the abstract argument list iterator object ITER, then advance
+ past and return the first argument. Useful in for expressions, e.g.
+ for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
+ arg = next_aggr_init_expr_arg (&iter)) */
+inline tree
+first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
+{
+ init_aggr_init_expr_arg_iterator (exp, iter);
+ return next_aggr_init_expr_arg (iter);
+}
+
+/* Test whether there are more arguments in abstract argument list iterator
+ ITER, without changing its state. */
+inline bool
+more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
+{
+ return (iter->i < iter->n);
+}
+
+/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
+ ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
+#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
+ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
+ (arg) = next_aggr_init_expr_arg (&(iter)))
+
+/* We have an expression tree T that represents a call, either CALL_EXPR
+ or AGGR_INIT_EXPR. Return a reference to the Nth argument. */
+
+inline tree&
+get_nth_callarg (tree t, int n)
+{
+ switch (TREE_CODE (t))
+ {
+ case CALL_EXPR:
+ return CALL_EXPR_ARG (t, n);
+
+ case AGGR_INIT_EXPR:
+ return AGGR_INIT_EXPR_ARG (t, n);
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* VEC_INIT_EXPR accessors. */
+#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
+#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
+
+/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
+ Only set when the current function is constexpr. */
+#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
+ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
+
+/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
+#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
+ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
+
+/* If T is a VEC_INIT_EXPR, return it, possibly stripping a TARGET_EXPR
+ wrapper. Otherwise, return null. */
+inline tree
+get_vec_init_expr (tree t)
+{
+ if (t && TREE_CODE (t) == TARGET_EXPR)
+ t = TARGET_EXPR_INITIAL (t);
+ if (t && TREE_CODE (t) == VEC_INIT_EXPR)
+ return t;
+ return NULL_TREE;
+}
+
+/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
+ exceptions. NULL_TREE means 'true'. */
+#define MUST_NOT_THROW_COND(NODE) \
+ TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
+
+/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
+ TEMPLATE_DECL. This macro determines whether or not a given class
+ type is really a template type, as opposed to an instantiation or
+ specialization of one. */
+#define CLASSTYPE_IS_TEMPLATE(NODE) \
+ (CLASSTYPE_TEMPLATE_INFO (NODE) \
+ && !CLASSTYPE_USE_TEMPLATE (NODE) \
+ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
+
+/* The name used by the user to name the typename type. Typically,
+ this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
+ corresponding TYPE_DECL. However, this may also be a
+ TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
+#define TYPENAME_TYPE_FULLNAME(NODE) \
+ (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
+
+/* True if a TYPENAME_TYPE was declared as an "enum". */
+#define TYPENAME_IS_ENUM_P(NODE) \
+ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
+
+/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
+ "union". */
+#define TYPENAME_IS_CLASS_P(NODE) \
+ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
+
+/* True if a TYPENAME_TYPE is in the process of being resolved. */
+#define TYPENAME_IS_RESOLVING_P(NODE) \
+ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
+
+/* [class.virtual]
+
+ A class that declares or inherits a virtual function is called a
+ polymorphic class. */
+#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
+
+/* Nonzero if this class has a virtual function table pointer. */
+#define TYPE_CONTAINS_VPTR_P(NODE) \
+ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
+
+/* Nonzero if NODE is a FUNCTION_DECL or VARIABLE_DECL (for a decl
+ with namespace scope) declared in a local scope. */
+#define DECL_LOCAL_DECL_P(NODE) \
+ DECL_LANG_FLAG_0 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
+
+/* The namespace-scope decl a DECL_LOCAL_DECL_P aliases. */
+#define DECL_LOCAL_DECL_ALIAS(NODE) \
+ DECL_ACCESS ((gcc_checking_assert (DECL_LOCAL_DECL_P (NODE)), NODE))
+
+/* True if NODE was declared with auto in its return type, but it has
+ started compilation and so the return type might have been changed by
+ return type deduction; its declared return type should be found in
+ DECL_SAVED_AUTO_RETURN_TYPE (NODE). */
+#define FNDECL_USED_AUTO(NODE) \
+ TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
+
+/* True if NODE is needed for a manifestly constant-evaluated expression.
+ This doesn't especially need to be a flag, since currently it's only
+ used for error recovery; if we run out of function flags it could move
+ to an attribute. */
+#define FNDECL_MANIFESTLY_CONST_EVALUATED(NODE) \
+ TREE_LANG_FLAG_4 (FUNCTION_DECL_CHECK (NODE))
+
+/* True for artificial decls added for OpenMP privatized non-static
+ data members. */
+#define DECL_OMP_PRIVATIZED_MEMBER(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p)
+
+/* Nonzero if NODE is an artificial FUNCTION_DECL for
+ #pragma omp declare reduction. */
+#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
+ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
+
+/* Nonzero if DECL has been declared threadprivate by
+ #pragma omp threadprivate. */
+#define CP_DECL_THREADPRIVATE_P(DECL) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
+
+/* Nonzero if NODE is a VAR_DECL which has been declared inline. */
+#define DECL_VAR_DECLARED_INLINE_P(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
+ ? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \
+ : false)
+#define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \
+ = true)
+
+/* True if NODE is a constant variable with a value-dependent initializer. */
+#define DECL_DEPENDENT_INIT_P(NODE) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
+ && DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p)
+#define SET_DECL_DEPENDENT_INIT_P(NODE, X) \
+ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X))
+
+/* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding
+ declaration or one of VAR_DECLs for the user identifiers in it. */
+#define DECL_DECOMPOSITION_P(NODE) \
+ (VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \
+ ? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \
+ : false)
+
+/* The underlying artificial VAR_DECL for structured binding. */
+#define DECL_DECOMP_BASE(NODE) \
+ (LANG_DECL_DECOMP_CHECK (NODE)->base)
+
+/* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members
+ declared with constexpr specifier are implicitly inline variables. */
+#define DECL_INLINE_VAR_P(NODE) \
+ (DECL_VAR_DECLARED_INLINE_P (NODE) \
+ || (cxx_dialect >= cxx17 \
+ && DECL_DECLARED_CONSTEXPR_P (NODE) \
+ && DECL_CLASS_SCOPE_P (NODE)))
+
+/* Nonzero if DECL was declared with '= delete'. */
+#define DECL_DELETED_FN(DECL) \
+ (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
+
+/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
+#define DECL_DEFAULTED_FN(DECL) \
+ (LANG_DECL_FN_CHECK (DECL)->defaulted_p)
+
+/* Nonzero if DECL is explicitly defaulted in the class body. */
+#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
+ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
+/* Nonzero if DECL was defaulted outside the class body. */
+#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
+ (DECL_DEFAULTED_FN (DECL) \
+ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
+
+/* Record whether a typedef for type `int' was actually `signed int'. */
+#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
+
+/* Returns nonzero if DECL has external linkage, as specified by the
+ language standard. (This predicate may hold even when the
+ corresponding entity is not actually given external linkage in the
+ object file; see decl_linkage for details.) */
+#define DECL_EXTERNAL_LINKAGE_P(DECL) \
+ (decl_linkage (DECL) == lk_external)
+
+/* Keep these codes in ascending code order. */
+
+#define INTEGRAL_CODE_P(CODE) \
+ ((CODE) == ENUMERAL_TYPE \
+ || (CODE) == BOOLEAN_TYPE \
+ || (CODE) == INTEGER_TYPE)
+
+/* [basic.fundamental]
+
+ Types bool, char, wchar_t, and the signed and unsigned integer types
+ are collectively called integral types.
+
+ Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
+ types as well, which is incorrect in C++. Keep these checks in
+ ascending code order. */
+#define CP_INTEGRAL_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == BOOLEAN_TYPE \
+ || TREE_CODE (TYPE) == INTEGER_TYPE)
+
+/* Returns true if TYPE is an integral or enumeration name. Keep
+ these checks in ascending code order. */
+#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
+
+/* Returns true if TYPE is an integral or unscoped enumeration type. */
+#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
+ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
+
+/* True if the class type TYPE is a literal type. */
+#define CLASSTYPE_LITERAL_P(TYPE) \
+ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
+
+/* [basic.fundamental]
+
+ Integral and floating types are collectively called arithmetic
+ types.
+
+ As a GNU extension, we also accept complex types.
+
+ Keep these checks in ascending code order. */
+#define ARITHMETIC_TYPE_P(TYPE) \
+ (CP_INTEGRAL_TYPE_P (TYPE) \
+ || TREE_CODE (TYPE) == REAL_TYPE \
+ || TREE_CODE (TYPE) == COMPLEX_TYPE)
+
+/* [basic.types]
+
+ Arithmetic types, enumeration types, pointer types,
+ pointer-to-member types, and std::nullptr_t are collectively called
+ scalar types.
+
+ Keep these checks in ascending code order. */
+#define SCALAR_TYPE_P(TYPE) \
+ (TYPE_PTRDATAMEM_P (TYPE) \
+ || TREE_CODE (TYPE) == ENUMERAL_TYPE \
+ || ARITHMETIC_TYPE_P (TYPE) \
+ || TYPE_PTR_P (TYPE) \
+ || TYPE_PTRMEMFUNC_P (TYPE) \
+ || NULLPTR_TYPE_P (TYPE))
+
+/* Determines whether this type is a C++0x scoped enumeration
+ type. Scoped enumerations types are introduced via "enum class" or
+ "enum struct", e.g.,
+
+ enum class Color {
+ Red, Green, Blue
+ };
+
+ Scoped enumeration types are different from normal (unscoped)
+ enumeration types in several ways:
+
+ - The enumerators of a scoped enumeration type are only available
+ within the scope of the enumeration type and not in the
+ enclosing scope. For example, the Red color can be referred to
+ with "Color::Red" but not "Red".
+
+ - Scoped enumerators and enumerations do not implicitly convert
+ to integers or 'bool'.
+
+ - The underlying type of the enum is well-defined. */
+#define SCOPED_ENUM_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
+
+/* Determine whether this is an unscoped enumeration type. */
+#define UNSCOPED_ENUM_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
+
+/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
+ enumeration type (1) or a normal (unscoped) enumeration type
+ (0). */
+#define SET_SCOPED_ENUM_P(TYPE, VAL) \
+ (ENUM_IS_SCOPED (TYPE) = (VAL))
+
+#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
+ (ENUM_IS_OPAQUE (TYPE) = (VAL))
+
+#define OPAQUE_ENUM_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
+
+/* [dcl.init.aggr]
+
+ An aggregate is an array or a class with no user-provided
+ constructors, no brace-or-equal-initializers for non-static data
+ members, no private or protected non-static data members, no
+ base classes, and no virtual functions.
+
+ As an extension, we also treat vectors as aggregates. Keep these
+ checks in ascending code order. */
+#define CP_AGGREGATE_TYPE_P(TYPE) \
+ (gnu_vector_type_p (TYPE) \
+ || TREE_CODE (TYPE) == ARRAY_TYPE \
+ || (CLASS_TYPE_P (TYPE) && COMPLETE_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
+
+/* Nonzero for a class type means that the class type has a
+ user-declared constructor. */
+#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
+
+/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a
+ late-specified return type. */
+#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \
+ (TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE)))
+
+/* When appearing in an INDIRECT_REF, it means that the tree structure
+ underneath is actually a call to a constructor. This is needed
+ when the constructor must initialize local storage (which can
+ be automatically destroyed), rather than allowing it to allocate
+ space from the heap.
+
+ When appearing in a SAVE_EXPR, it means that underneath
+ is a call to a constructor.
+
+ When appearing in a CONSTRUCTOR, the expression is an unconverted
+ compound literal.
+
+ When appearing in a CALL_EXPR, it means that it is a call to
+ a constructor.
+
+ When appearing in a FIELD_DECL, it means that this field
+ has been duly initialized in its constructor. */
+#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
+
+/* True if NODE is a brace-enclosed initializer. */
+#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
+ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
+
+/* True if NODE is a compound-literal, i.e., a brace-enclosed
+ initializer cast to a particular type. This is mostly only set during
+ template parsing; once the initializer has been digested into an actual
+ value of the type, the expression is represented by a TARGET_EXPR. */
+#define COMPOUND_LITERAL_P(NODE) \
+ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
+
+#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
+ && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
+ && !TREE_HAS_CONSTRUCTOR (NODE))
+
+/* True if NODE is a init-list used as a direct-initializer, i.e.
+ B b{1,2}, not B b({1,2}) or B b = {1,2}. */
+#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
+
+/* True if this CONSTRUCTOR is instantiation-dependent and needs to be
+ substituted. */
+#define CONSTRUCTOR_IS_DEPENDENT(NODE) \
+ (TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE)))
+
+/* True if this CONSTRUCTOR should not be used as a variable initializer
+ because it was loaded from a constexpr variable with mutable fields. */
+#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
+ (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
+
+/* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather
+ than C++11 functional cast syntax. */
+#define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \
+ (TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE)))
+
+/* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the
+ CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with
+ CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */
+#define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \
+ (TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE)))
+
+#define DIRECT_LIST_INIT_P(NODE) \
+ (BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
+
+/* True if this is a designated initializer (when we allow initializer-clauses
+ mixed with designated-initializer-clauses set whenever there is at least
+ one designated-initializer-clause), or a C99 designator. */
+#define CONSTRUCTOR_IS_DESIGNATED_INIT(NODE) \
+ (TREE_LANG_FLAG_6 (CONSTRUCTOR_CHECK (NODE)))
+
+/* True if this CONSTRUCTOR comes from a parenthesized list of values, e.g.
+ A(1, 2, 3). */
+#define CONSTRUCTOR_IS_PAREN_INIT(NODE) \
+ (CONSTRUCTOR_CHECK(NODE)->base.private_flag)
+
+/* True if reshape_init built this sub-CONSTRUCTOR to undo the brace elision
+ of the original CONSTRUCTOR. This flag is used during C++20 aggregate
+ CTAD. */
+#define CONSTRUCTOR_BRACES_ELIDED_P(NODE) \
+ (CONSTRUCTOR_CHECK (NODE)->base.protected_flag)
+
+/* True if NODE represents a conversion for direct-initialization in a
+ template. Set by perform_implicit_conversion_flags. */
+#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
+ (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
+
+/* True if NODE represents a dependent conversion of a non-type template
+ argument. Set by maybe_convert_nontype_argument. */
+#define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \
+ (TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
+
+/* True if NODE represents a conversion for braced-init-list in a
+ template. Set by perform_implicit_conversion_flags. */
+#define IMPLICIT_CONV_EXPR_BRACED_INIT(NODE) \
+ (TREE_LANG_FLAG_2 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
+
+/* Nonzero means that an object of this type cannot be initialized using
+ an initializer list. */
+#define CLASSTYPE_NON_AGGREGATE(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
+#define TYPE_NON_AGGREGATE_CLASS(NODE) \
+ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
+
+/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
+#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
+
+/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
+#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
+
+/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
+#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
+
+/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
+#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
+
+/* Nonzero if there is no trivial default constructor for this class. */
+#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
+
+/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
+
+ A destructor is trivial if it is an implicitly declared
+ destructor and if:
+
+ - all of the direct base classes of its class have trivial
+ destructors,
+
+ - for all of the non-static data members of its class that are
+ of class type (or array thereof), each such class has a
+ trivial destructor. */
+#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
+ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
+
+/* Nonzero for _TYPE node means that this type does not have a trivial
+ destructor. Therefore, destroying an object of this type will
+ involve a call to a destructor. This can apply to objects of
+ ARRAY_TYPE if the type of the elements needs a destructor. */
+#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
+ (TYPE_LANG_FLAG_4 (NODE))
+
+/* Nonzero for class type means that the default constructor is trivial. */
+#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
+ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
+
+/* Nonzero for class type means that copy initialization of this type can use
+ a bitwise copy. */
+#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
+ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
+
+/* Nonzero for class type means that assignment of this type can use
+ a bitwise copy. */
+#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
+ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
+
+/* Returns true if NODE is a pointer-to-data-member. */
+#define TYPE_PTRDATAMEM_P(NODE) \
+ (TREE_CODE (NODE) == OFFSET_TYPE)
+
+/* Returns true if NODE is a pointer. */
+#define TYPE_PTR_P(NODE) \
+ (TREE_CODE (NODE) == POINTER_TYPE)
+
+/* Returns true if NODE is a reference. */
+#define TYPE_REF_P(NODE) \
+ (TREE_CODE (NODE) == REFERENCE_TYPE)
+
+/* Returns true if NODE is a pointer or a reference. */
+#define INDIRECT_TYPE_P(NODE) \
+ (TYPE_PTR_P (NODE) || TYPE_REF_P (NODE))
+
+/* Returns true if NODE is an object type:
+
+ [basic.types]
+
+ An object type is a (possibly cv-qualified) type that is not a
+ function type, not a reference type, and not a void type.
+
+ Keep these checks in ascending order, for speed. */
+#define TYPE_OBJ_P(NODE) \
+ (!TYPE_REF_P (NODE) \
+ && !VOID_TYPE_P (NODE) \
+ && !FUNC_OR_METHOD_TYPE_P (NODE))
+
+/* Returns true if NODE is a pointer to an object. Keep these checks
+ in ascending tree code order. */
+#define TYPE_PTROB_P(NODE) \
+ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
+
+/* Returns true if NODE is a reference to an object. Keep these checks
+ in ascending tree code order. */
+#define TYPE_REF_OBJ_P(NODE) \
+ (TYPE_REF_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
+
+/* Returns true if NODE is a pointer to an object, or a pointer to
+ void. Keep these checks in ascending tree code order. */
+#define TYPE_PTROBV_P(NODE) \
+ (TYPE_PTR_P (NODE) \
+ && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (NODE)))
+
+/* Returns true if NODE is a pointer to function type. */
+#define TYPE_PTRFN_P(NODE) \
+ (TYPE_PTR_P (NODE) \
+ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
+
+/* Returns true if NODE is a reference to function type. */
+#define TYPE_REFFN_P(NODE) \
+ (TYPE_REF_P (NODE) \
+ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
+
+/* Returns true if NODE is a pointer to member function type. */
+#define TYPE_PTRMEMFUNC_P(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE \
+ && TYPE_PTRMEMFUNC_FLAG (NODE))
+
+#define TYPE_PTRMEMFUNC_FLAG(NODE) \
+ (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
+
+/* Returns true if NODE is a pointer-to-member. */
+#define TYPE_PTRMEM_P(NODE) \
+ (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
+
+/* Returns true if NODE is a pointer or a pointer-to-member. */
+#define TYPE_PTR_OR_PTRMEM_P(NODE) \
+ (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
+
+/* Indicates when overload resolution may resolve to a pointer to
+ member function. [expr.unary.op]/3 */
+#define PTRMEM_OK_P(NODE) \
+ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
+
+/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
+ pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
+ before using this macro. */
+#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
+ (cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\
+ cp_type_quals (NODE)))
+
+/* As above, but can be used in places that want an lvalue at the expense
+ of not necessarily having the correct cv-qualifiers. */
+#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \
+ (TREE_TYPE (TYPE_FIELDS (NODE)))
+
+/* Returns `A' for a type like `int (A::*)(double)' */
+#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
+ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
+
+/* The canonical internal RECORD_TYPE from the POINTER_TYPE to
+ METHOD_TYPE. */
+#define TYPE_PTRMEMFUNC_TYPE(NODE) \
+ TYPE_LANG_SLOT_1 (NODE)
+
+/* For a pointer-to-member type of the form `T X::*', this is `X'.
+ For a type like `void (X::*)() const', this type is `X', not `const
+ X'. To get at the `const X' you have to look at the
+ TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
+ type `const X*'. */
+#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
+ (TYPE_PTRDATAMEM_P (NODE) \
+ ? TYPE_OFFSET_BASETYPE (NODE) \
+ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
+
+/* For a pointer-to-member type of the form `T X::*', this is `T'. */
+#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
+ (TYPE_PTRDATAMEM_P (NODE) \
+ ? TREE_TYPE (NODE) \
+ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
+
+/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
+ `X'. */
+#define PTRMEM_CST_CLASS(NODE) \
+ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
+
+/* For a pointer-to-member constant `X::Y' this is the _DECL for
+ `Y'. */
+#define PTRMEM_CST_MEMBER(NODE) \
+ (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
+
+/* For a pointer-to-member constant `X::Y' this is a location where
+ the address of the member has been taken. */
+#define PTRMEM_CST_LOCATION(NODE) \
+ (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->locus)
+
+/* The expression in question for a TYPEOF_TYPE. */
+#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
+
+/* The type in question for an UNDERLYING_TYPE. */
+#define UNDERLYING_TYPE_TYPE(NODE) \
+ (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
+
+/* The type in question for BASES. */
+#define BASES_TYPE(NODE) \
+ (TYPE_VALUES_RAW (BASES_CHECK (NODE)))
+
+#define BASES_DIRECT(NODE) \
+ TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
+
+/* The expression in question for a DECLTYPE_TYPE. */
+#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
+
+/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
+ id-expression or a member-access expression. When false, it was
+ parsed as a full expression. */
+#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
+ (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
+
+/* These flags indicate that we want different semantics from normal
+ decltype: lambda capture just drops references,
+ lambda proxies look through implicit dereference. */
+#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
+ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
+#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
+ TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
+#define DECLTYPE_FOR_REF_CAPTURE(NODE) \
+ TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE))
+
+/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
+ specified in its declaration. This can also be set for an
+ erroneously declared PARM_DECL. */
+#define DECL_THIS_EXTERN(NODE) \
+ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
+
+/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
+ specified in its declaration. This can also be set for an
+ erroneously declared PARM_DECL. */
+#define DECL_THIS_STATIC(NODE) \
+ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
+
+/* Nonzero for FIELD_DECL node means that this field is a lambda capture
+ field for an array of runtime bound. */
+#define DECL_VLA_CAPTURE_P(NODE) \
+ DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero for PARM_DECL node means that this is an array function
+ parameter, i.e, a[] rather than *a. */
+#define DECL_ARRAY_PARAMETER_P(NODE) \
+ DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))
+
+/* Nonzero for a FIELD_DECL who's NSMDI is currently being
+ instantiated. */
+#define DECL_INSTANTIATING_NSDMI_P(NODE) \
+ DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero for FIELD_DECL node means that this field is a base class
+ of the parent object, as opposed to a member field. */
+#define DECL_FIELD_IS_BASE(NODE) \
+ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero for FIELD_DECL node means that this field is a simple (no
+ explicit initializer) lambda capture field, making it invisible to
+ name lookup in unevaluated contexts. */
+#define DECL_NORMAL_CAPTURE_P(NODE) \
+ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
+ flag for this because "A union for which objects or pointers are
+ declared is not an anonymous union" [class.union]. */
+#define ANON_AGGR_TYPE_P(NODE) \
+ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
+#define SET_ANON_AGGR_TYPE_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
+
+/* Nonzero if TYPE is an anonymous union type. */
+#define ANON_UNION_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
+
+/* For an ANON_AGGR_TYPE_P the single FIELD_DECL it is used with. */
+#define ANON_AGGR_TYPE_FIELD(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
+
+/* Define fields and accessors for nodes representing declared names. */
+
+/* True if TYPE is an unnamed structured type with a typedef for
+ linkage purposes. In that case TYPE_NAME and TYPE_STUB_DECL of the
+ MAIN-VARIANT are different. */
+#define TYPE_WAS_UNNAMED(NODE) \
+ (TYPE_NAME (TYPE_MAIN_VARIANT (NODE)) \
+ != TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
+
+/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
+
+/* The format of each node in the DECL_FRIENDLIST is as follows:
+
+ The TREE_PURPOSE will be the name of a function, i.e., an
+ IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
+ TREE_VALUEs are friends with the given name. */
+#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
+#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
+#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
+
+/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
+ each node is a type; the TREE_VALUE is the access granted for this
+ DECL in that type. The DECL_ACCESS is set by access declarations.
+ For example, if a member that would normally be public in a
+ derived class is made protected, then the derived class and the
+ protected_access_node will appear in the DECL_ACCESS for the node. */
+#define DECL_ACCESS(NODE) (LANG_DECL_MIN_CHECK (NODE)->access)
+
+/* Nonzero if the FUNCTION_DECL is a global constructor. */
+#define DECL_GLOBAL_CTOR_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
+
+/* Nonzero if the FUNCTION_DECL is a global destructor. */
+#define DECL_GLOBAL_DTOR_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
+
+/* Accessor macros for C++ template decl nodes. */
+
+/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
+ is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
+ template parameters, with 1 being the outermost set of template
+ parameters. The TREE_VALUE is a vector, whose elements are the
+ template parameters at each level. Each element in the vector is a
+ TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
+ non-type parameter), or a TYPE_DECL (if the parameter is a type
+ parameter) or a TEMPLATE_DECL (if the parameter is a template
+ parameter). The TREE_PURPOSE is the default value, if any. The
+ TEMPLATE_PARM_INDEX for the parameter is available as the
+ DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
+ TYPE_DECL).
+
+ FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
+ tree is converted to C++ class hiearchy. */
+#define DECL_TEMPLATE_PARMS(NODE) \
+ ((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments
+#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
+ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
+#define DECL_NTPARMS(NODE) \
+ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
+/* For function, method, class-data templates.
+
+ FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
+ tree is converted to C++ class hiearchy. */
+#define DECL_TEMPLATE_RESULT(NODE) \
+ ((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result
+/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
+ lists all instantiations and specializations of the function so that
+ tsubst_friend_function can reassign them to another template if we find
+ that the namespace-scope template is really a partial instantiation of a
+ friend template.
+
+ For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
+ all instantiations and specializations of the class type, including
+ partial instantiations and partial specializations, so that if we
+ explicitly specialize a partial instantiation we can walk the list
+ in maybe_process_partial_specialization and reassign them or complain
+ as appropriate.
+
+ In both cases, the TREE_PURPOSE of each node contains the arguments
+ used; the TREE_VALUE contains the generated variable. The template
+ arguments are always complete. For example, given:
+
+ template <class T> struct S1 {
+ template <class U> struct S2 {};
+ template <class U> struct S2<U*> {};
+ };
+
+ the record for the partial specialization will contain, as its
+ argument list, { {T}, {U*} }, and will be on the
+ DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
+ <class U> struct S1<T>::S2'.
+
+ This list is not used for other templates. */
+#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
+ DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE))
+
+/* For a class template, this list contains the partial
+ specializations of this template. (Full specializations are not
+ recorded on this list.) The TREE_PURPOSE holds the arguments used
+ in the partial specialization (e.g., for `template <class T> struct
+ S<T*, int>' this will be `T*, int'.) The arguments will also include
+ any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
+ for the partial specialization. The TREE_TYPE is the _TYPE node for
+ the partial specialization.
+
+ This list is not used for other templates. */
+#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
+ DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
+
+/* Nonzero for a DECL which is actually a template parameter. Keep
+ these checks in ascending tree code order. */
+#define DECL_TEMPLATE_PARM_P(NODE) \
+ (DECL_LANG_FLAG_0 (NODE) \
+ && (TREE_CODE (NODE) == CONST_DECL \
+ || TREE_CODE (NODE) == PARM_DECL \
+ || TREE_CODE (NODE) == TYPE_DECL \
+ || TREE_CODE (NODE) == TEMPLATE_DECL))
+
+/* Nonzero for a raw template parameter node. */
+#define TEMPLATE_PARM_P(NODE) \
+ (TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \
+ || TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \
+ || TREE_CODE (NODE) == TEMPLATE_PARM_INDEX)
+
+/* Mark NODE as a template parameter. */
+#define SET_DECL_TEMPLATE_PARM_P(NODE) \
+ (DECL_LANG_FLAG_0 (NODE) = 1)
+
+/* Nonzero if NODE is a template template parameter. */
+#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
+ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
+
+/* Nonzero for a DECL that represents a function template. */
+#define DECL_FUNCTION_TEMPLATE_P(NODE) \
+ (TREE_CODE (NODE) == TEMPLATE_DECL \
+ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
+ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
+
+/* Nonzero for a DECL that represents a class template or alias
+ template. */
+#define DECL_TYPE_TEMPLATE_P(NODE) \
+ (TREE_CODE (NODE) == TEMPLATE_DECL \
+ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
+ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
+
+/* Nonzero for a DECL that represents a class template. */
+#define DECL_CLASS_TEMPLATE_P(NODE) \
+ (DECL_TYPE_TEMPLATE_P (NODE) \
+ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
+
+/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
+#define DECL_ALIAS_TEMPLATE_P(NODE) \
+ (DECL_TYPE_TEMPLATE_P (NODE) \
+ && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
+
+/* Nonzero for a NODE which declares a type. */
+#define DECL_DECLARES_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
+
+/* Nonzero if NODE declares a function. */
+#define DECL_DECLARES_FUNCTION_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
+
+/* Nonzero if NODE is the typedef implicitly generated for a type when
+ the type is declared. In C++, `struct S {};' is roughly
+ equivalent to `struct S {}; typedef struct S S;' in C.
+ DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
+ example. In C++, there is a second implicit typedef for each
+ class, called the injected-class-name, in the scope of `S' itself, so that
+ you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */
+#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
+ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
+#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
+ (DECL_LANG_FLAG_2 (NODE) = 1)
+#define DECL_SELF_REFERENCE_P(NODE) \
+ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
+#define SET_DECL_SELF_REFERENCE_P(NODE) \
+ (DECL_LANG_FLAG_4 (NODE) = 1)
+
+/* A `primary' template is one that has its own template header and is not
+ a partial specialization. A member function of a class template is a
+ template, but not primary. A member template is primary. Friend
+ templates are primary, too. */
+
+/* Returns the primary template corresponding to these parameters. */
+#define TPARMS_PRIMARY_TEMPLATE(NODE) (TREE_TYPE (NODE))
+
+#define DECL_PRIMARY_TEMPLATE(NODE) \
+ (TPARMS_PRIMARY_TEMPLATE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
+
+/* Returns nonzero if NODE is a primary template. */
+#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
+
+/* Nonzero iff NODE is a specialization of a template. The value
+ indicates the type of specializations:
+
+ 1=implicit instantiation
+
+ 2=partial or explicit specialization, e.g.:
+
+ template <> int min<int> (int, int),
+
+ 3=explicit instantiation, e.g.:
+
+ template int min<int> (int, int);
+
+ Note that NODE will be marked as a specialization even if the
+ template it is instantiating is not a primary template. For
+ example, given:
+
+ template <typename T> struct O {
+ void f();
+ struct I {};
+ };
+
+ both O<int>::f and O<int>::I will be marked as instantiations.
+
+ If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
+ be non-NULL. */
+#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
+
+/* Like DECL_USE_TEMPLATE, but for class types. */
+#define CLASSTYPE_USE_TEMPLATE(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->use_template)
+
+/* True if NODE is a specialization of a primary template. */
+#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
+ (CLASS_TYPE_P (NODE) \
+ && CLASSTYPE_USE_TEMPLATE (NODE) \
+ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
+
+#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
+#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) & 1)
+
+#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
+#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
+
+/* Returns true for an explicit or partial specialization of a class
+ template. */
+#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) == 2)
+#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) = 2)
+
+#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
+#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
+#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) == 1)
+#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) = 1)
+
+#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
+#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
+#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) == 3)
+#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
+ (CLASSTYPE_USE_TEMPLATE (NODE) = 3)
+
+/* Nonzero if DECL is a friend function which is an instantiation
+ from the point of view of the compiler, but not from the point of
+ view of the language. For example given:
+ template <class T> struct S { friend void f(T) {}; };
+ the declaration of `void f(int)' generated when S<int> is
+ instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
+ a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
+#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
+ (DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \
+ && !DECL_USE_TEMPLATE (DECL))
+
+/* Nonzero if DECL is a function generated from a function 'temploid',
+ i.e. template, member of class template, or dependent friend. */
+#define DECL_TEMPLOID_INSTANTIATION(DECL) \
+ (DECL_TEMPLATE_INSTANTIATION (DECL) \
+ || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
+
+/* Nonzero if DECL is either defined implicitly by the compiler or
+ generated from a temploid. */
+#define DECL_GENERATED_P(DECL) \
+ (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
+
+/* Nonzero iff we are currently processing a declaration for an
+ entity with its own template parameter list, and which is not a
+ full specialization. */
+#define PROCESSING_REAL_TEMPLATE_DECL_P() \
+ (!processing_template_parmlist \
+ && current_template_depth > template_class_depth (current_scope ()))
+
+/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
+ instantiated, i.e. its definition has been generated from the
+ pattern given in the template. */
+#define DECL_TEMPLATE_INSTANTIATED(NODE) \
+ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
+
+/* We know what we're doing with this decl now. */
+#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
+
+/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
+ so that assemble_external will work properly. So we have this flag to
+ tell us whether the decl is really not external.
+
+ This flag does not indicate whether or not the decl is defined in the
+ current translation unit; it indicates whether or not we should emit the
+ decl at the end of compilation if it is defined and needed. */
+#define DECL_NOT_REALLY_EXTERN(NODE) \
+ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
+
+#define DECL_REALLY_EXTERN(NODE) \
+ (DECL_EXTERNAL (NODE) \
+ && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
+
+/* A thunk is a stub function.
+
+ A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
+ The address of the ordinary FUNCTION_DECL is given by the
+ DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
+ FUNCTION_DECL. The job of the thunk is to either adjust the this
+ pointer before transferring control to the FUNCTION_DECL, or call
+ FUNCTION_DECL and then adjust the result value. Note, the result
+ pointer adjusting thunk must perform a call to the thunked
+ function, (or be implemented via passing some invisible parameter
+ to the thunked function, which is modified to perform the
+ adjustment just before returning).
+
+ A thunk may perform either, or both, of the following operations:
+
+ o Adjust the this or result pointer by a constant offset.
+ o Adjust the this or result pointer by looking up a vcall or vbase offset
+ in the vtable.
+
+ A this pointer adjusting thunk converts from a base to a derived
+ class, and hence adds the offsets. A result pointer adjusting thunk
+ converts from a derived class to a base, and hence subtracts the
+ offsets. If both operations are performed, then the constant
+ adjustment is performed first for this pointer adjustment and last
+ for the result pointer adjustment.
+
+ The constant adjustment is given by THUNK_FIXED_OFFSET. If the
+ vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
+ used. For this pointer adjusting thunks, it is the vcall offset
+ into the vtable. For result pointer adjusting thunks it is the
+ binfo of the virtual base to convert to. Use that binfo's vbase
+ offset.
+
+ It is possible to have equivalent covariant thunks. These are
+ distinct virtual covariant thunks whose vbase offsets happen to
+ have the same value. THUNK_ALIAS is used to pick one as the
+ canonical thunk, which will get all the this pointer adjusting
+ thunks attached to it. */
+
+/* An integer indicating how many bytes should be subtracted from the
+ this or result pointer when this function is called. */
+#define THUNK_FIXED_OFFSET(DECL) \
+ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
+
+/* A tree indicating how to perform the virtual adjustment. For a this
+ adjusting thunk it is the number of bytes to be added to the vtable
+ to find the vcall offset. For a result adjusting thunk, it is the
+ binfo of the relevant virtual base. If NULL, then there is no
+ virtual adjust. (The vptr is always located at offset zero from
+ the this or result pointer.) (If the covariant type is within the
+ class hierarchy being laid out, the vbase index is not yet known
+ at the point we need to create the thunks, hence the need to use
+ binfos.) */
+
+#define THUNK_VIRTUAL_OFFSET(DECL) \
+ (LANG_DECL_MIN_CHECK (FUNCTION_DECL_CHECK (DECL))->access)
+
+/* A thunk which is equivalent to another thunk. */
+#define THUNK_ALIAS(DECL) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
+
+/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
+ possible for the target to be a thunk too. */
+#define THUNK_TARGET(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->befriending_classes)
+
+/* True for a SCOPE_REF iff the "template" keyword was used to
+ indicate that the qualified name denotes a template. */
+#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
+ (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
+
+/* [coroutines]
+*/
+
+/* True if NODE is a co-routine FUNCTION_DECL. */
+#define DECL_COROUTINE_P(NODE) \
+ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->coroutine_p)
+
+/* For a FUNCTION_DECL of a coroutine, this holds the ACTOR helper function
+ decl. */
+#define DECL_ACTOR_FN(NODE) \
+ (coro_get_actor_function ((NODE)))
+
+/* For a FUNCTION_DECL of a coroutine, this holds the DESTROY helper function
+ decl. */
+#define DECL_DESTROY_FN(NODE) \
+ (coro_get_destroy_function ((NODE)))
+
+/* For a FUNCTION_DECL of a coroutine helper (ACTOR or DESTROY), this points
+ back to the original (ramp) function. */
+#define DECL_RAMP_FN(NODE) \
+ (coro_get_ramp_function (NODE))
+
+/* True for an OMP_ATOMIC that has dependent parameters. These are stored
+ as an expr in operand 1, and integer_zero_node or clauses in operand 0. */
+#define OMP_ATOMIC_DEPENDENT_P(NODE) \
+ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST \
+ || TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == OMP_CLAUSE)
+
+/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
+#define OMP_FOR_GIMPLIFYING_P(NODE) \
+ (TREE_LANG_FLAG_0 (OMP_LOOPING_CHECK (NODE)))
+
+/* A language-specific token attached to the OpenMP data clauses to
+ hold code (or code fragments) related to ctors, dtors, and op=.
+ See semantics.cc for details. */
+#define CP_OMP_CLAUSE_INFO(NODE) \
+ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
+ OMP_CLAUSE__CONDTEMP_))
+
+/* Nonzero if this transaction expression's body contains statements. */
+#define TRANSACTION_EXPR_IS_STMT(NODE) \
+ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
+
+/* These macros provide convenient access to the various _STMT nodes
+ created when parsing template declarations. */
+#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
+#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
+
+#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
+#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
+
+#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
+
+/* Nonzero if this try block is a function try block. */
+#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
+#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
+#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
+#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
+
+/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
+ and the VAR_DECL for which this cleanup exists. */
+#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
+#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
+#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
+
+/* IF_STMT accessors. These give access to the condition of the if
+ statement, the then block of the if statement, and the else block
+ of the if statement if it exists. */
+#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
+#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
+#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
+#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
+#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE))
+#define IF_STMT_CONSTEVAL_P(NODE) TREE_LANG_FLAG_2 (IF_STMT_CHECK (NODE))
+
+/* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while
+ building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */
+#define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE)
+
+/* RANGE_FOR_STMT accessors. These give access to the declarator,
+ expression, body, and scope of the statement, respectively. */
+#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
+#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
+#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
+#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
+#define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4)
+#define RANGE_FOR_INIT_STMT(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 5)
+#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
+
+/* STMT_EXPR accessor. */
+#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
+
+/* EXPR_STMT accessor. This gives the expression associated with an
+ expression statement. */
+#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
+
+/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
+ discard it if it isn't useful. */
+#define TARGET_EXPR_IMPLICIT_P(NODE) \
+ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
+
+/* True if this TARGET_EXPR is the result of list-initialization of a
+ temporary. */
+#define TARGET_EXPR_LIST_INIT_P(NODE) \
+ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
+
+/* True if this TARGET_EXPR expresses direct-initialization of an object
+ to be named later. */
+#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
+ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
+
+/* True if we expect this TARGET_EXPR to be used as an initializer, not to
+ materialize as a temporary. */
+#define TARGET_EXPR_ELIDING_P(NODE) \
+ TREE_LANG_FLAG_3 (TARGET_EXPR_CHECK (NODE))
+
+/* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if
+ the initializer has void type, it's doing something more complicated. */
+#define SIMPLE_TARGET_EXPR_P(NODE) \
+ (TREE_CODE (NODE) == TARGET_EXPR \
+ && TARGET_EXPR_INITIAL (NODE) \
+ && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE))))
+
+/* True if T is a TARGET_EXPR for which we'll need to replace_decl to use it as
+ an initializer. */
+inline bool
+target_expr_needs_replace (tree t)
+{
+ if (!t || TREE_CODE (t) != TARGET_EXPR)
+ return false;
+ tree init = TARGET_EXPR_INITIAL (t);
+ if (!init || !VOID_TYPE_P (TREE_TYPE (init)))
+ return false;
+ while (TREE_CODE (init) == COMPOUND_EXPR)
+ init = TREE_OPERAND (init, 1);
+ return (TREE_CODE (init) != AGGR_INIT_EXPR
+ && TREE_CODE (init) != VEC_INIT_EXPR);
+}
+
+/* True if EXPR expresses direct-initialization of a TYPE. */
+#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
+ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
+ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
+
+/* True if this CONVERT_EXPR is for a conversion to virtual base in
+ an NSDMI, and should be re-evaluated when used in a constructor. */
+#define CONVERT_EXPR_VBASE_PATH(NODE) \
+ TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
+
+/* True if SIZEOF_EXPR argument is type. */
+#define SIZEOF_EXPR_TYPE_P(NODE) \
+ TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
+
+/* True if the ALIGNOF_EXPR was spelled "alignof". */
+#define ALIGNOF_EXPR_STD_P(NODE) \
+ TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE))
+
+/* OMP_DEPOBJ accessors. These give access to the depobj expression of the
+ #pragma omp depobj directive and the clauses, respectively. If
+ OMP_DEPOBJ_CLAUSES is INTEGER_CST, it is instead the update clause kind
+ or OMP_CLAUSE_DEPEND_LAST for destroy clause. */
+#define OMP_DEPOBJ_DEPOBJ(NODE) TREE_OPERAND (OMP_DEPOBJ_CHECK (NODE), 0)
+#define OMP_DEPOBJ_CLAUSES(NODE) TREE_OPERAND (OMP_DEPOBJ_CHECK (NODE), 1)
+
+/* An enumeration of the kind of tags that C++ accepts. */
+enum tag_types {
+ none_type = 0, /* Not a tag type. */
+ record_type, /* "struct" types. */
+ class_type, /* "class" types. */
+ union_type, /* "union" types. */
+ enum_type, /* "enum" types. */
+ typename_type, /* "typename" types. */
+ scope_type /* namespace or tagged type name followed by :: */
+};
+
+/* The various kinds of lvalues we distinguish. */
+enum cp_lvalue_kind_flags {
+ clk_none = 0, /* Things that are not an lvalue. */
+ clk_ordinary = 1, /* An ordinary lvalue. */
+ clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
+ clk_class = 4, /* A prvalue of class or array type. */
+ clk_bitfield = 8, /* An lvalue for a bit-field. */
+ clk_packed = 16, /* An lvalue for a packed field. */
+ clk_implicit_rval = 1<<5 /* An lvalue being treated as an xvalue. */
+};
+
+/* This type is used for parameters and variables which hold
+ combinations of the flags in enum cp_lvalue_kind_flags. */
+typedef int cp_lvalue_kind;
+
+/* Various kinds of template specialization, instantiation, etc. */
+enum tmpl_spec_kind {
+ tsk_none, /* Not a template at all. */
+ tsk_invalid_member_spec, /* An explicit member template
+ specialization, but the enclosing
+ classes have not all been explicitly
+ specialized. */
+ tsk_invalid_expl_inst, /* An explicit instantiation containing
+ template parameter lists. */
+ tsk_excessive_parms, /* A template declaration with too many
+ template parameter lists. */
+ tsk_insufficient_parms, /* A template declaration with too few
+ parameter lists. */
+ tsk_template, /* A template declaration. */
+ tsk_expl_spec, /* An explicit specialization. */
+ tsk_expl_inst /* An explicit instantiation. */
+};
+
+/* The various kinds of access. BINFO_ACCESS depends on these being
+ two bit quantities. The numerical values are important; they are
+ used to initialize RTTI data structures, so changing them changes
+ the ABI. */
+enum access_kind {
+ ak_none = 0, /* Inaccessible. */
+ ak_public = 1, /* Accessible, as a `public' thing. */
+ ak_protected = 2, /* Accessible, as a `protected' thing. */
+ ak_private = 3 /* Accessible, as a `private' thing. */
+};
+
+/* The various kinds of special functions. If you add to this list,
+ you should update special_function_p as well. */
+enum special_function_kind {
+ sfk_none = 0, /* Not a special function. This enumeral
+ must have value zero; see
+ special_function_p. */
+ /* The following are ordered, for use by member synthesis fns. */
+ sfk_destructor, /* A destructor. */
+ sfk_constructor, /* A constructor. */
+ sfk_inheriting_constructor, /* An inheriting constructor */
+ sfk_copy_constructor, /* A copy constructor. */
+ sfk_move_constructor, /* A move constructor. */
+ sfk_copy_assignment, /* A copy assignment operator. */
+ sfk_move_assignment, /* A move assignment operator. */
+ /* The following are unordered. */
+ sfk_complete_destructor, /* A destructor for complete objects. */
+ sfk_base_destructor, /* A destructor for base subobjects. */
+ sfk_deleting_destructor, /* A destructor for complete objects that
+ deletes the object after it has been
+ destroyed. */
+ sfk_conversion, /* A conversion operator. */
+ sfk_deduction_guide, /* A class template deduction guide. */
+ sfk_comparison, /* A comparison operator (e.g. ==, <, <=>). */
+ sfk_virtual_destructor /* Used by member synthesis fns. */
+};
+
+/* The various kinds of linkage. From [basic.link],
+
+ A name is said to have linkage when it might denote the same
+ object, reference, function, type, template, namespace or value
+ as a name introduced in another scope:
+
+ -- When a name has external linkage, the entity it denotes can
+ be referred to from scopes of other translation units or from
+ other scopes of the same translation unit.
+
+ -- When a name has internal linkage, the entity it denotes can
+ be referred to by names from other scopes in the same
+ translation unit.
+
+ -- When a name has no linkage, the entity it denotes cannot be
+ referred to by names from other scopes. */
+
+enum linkage_kind {
+ lk_none, /* No linkage. */
+ lk_internal, /* Internal linkage. */
+ lk_external /* External linkage. */
+};
+
+enum duration_kind {
+ dk_static,
+ dk_thread,
+ dk_auto,
+ dk_dynamic
+};
+
+/* Bitmask flags to control type substitution. */
+enum tsubst_flags {
+ tf_none = 0, /* nothing special */
+ tf_error = 1 << 0, /* give error messages */
+ tf_warning = 1 << 1, /* give warnings too */
+ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
+ tf_keep_type_decl = 1 << 3, /* retain typedef type decls
+ (make_typename_type use) */
+ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
+ instantiate_type use) */
+ tf_user = 1 << 5, /* found template must be a user template
+ (lookup_template_class use) */
+ tf_conv = 1 << 6, /* We are determining what kind of
+ conversion might be permissible,
+ not actually performing the
+ conversion. */
+ tf_decltype = 1 << 7, /* We are the operand of decltype.
+ Used to implement the special rules
+ for calls in decltype (5.2.2/11). */
+ tf_partial = 1 << 8, /* Doing initial explicit argument
+ substitution in fn_type_unification. */
+ tf_fndecl_type = 1 << 9, /* Substituting the type of a function
+ declaration. */
+ tf_no_cleanup = 1 << 10, /* Do not build a cleanup
+ (build_target_expr and friends) */
+ tf_norm = 1 << 11, /* Build diagnostic information during
+ constraint normalization. */
+ tf_tst_ok = 1 << 12, /* Allow a typename-specifier to name
+ a template (C++17 or later). */
+ tf_dguide = 1 << 13, /* Building a deduction guide from a ctor. */
+ tf_qualifying_scope = 1 << 14, /* Substituting the LHS of the :: operator.
+ Affects TYPENAME_TYPE resolution from
+ make_typename_type. */
+ /* Convenient substitution flags combinations. */
+ tf_warning_or_error = tf_warning | tf_error
+};
+
+/* This type is used for parameters and variables which hold
+ combinations of the flags in enum tsubst_flags. */
+typedef int tsubst_flags_t;
+
+/* The kind of checking we can do looking in a class hierarchy. */
+enum base_access_flags {
+ ba_any = 0, /* Do not check access, allow an ambiguous base,
+ prefer a non-virtual base */
+ ba_unique = 1 << 0, /* Must be a unique base. */
+ ba_check_bit = 1 << 1, /* Check access. */
+ ba_check = ba_unique | ba_check_bit,
+ ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
+};
+
+/* This type is used for parameters and variables which hold
+ combinations of the flags in enum base_access_flags. */
+typedef int base_access;
+
+/* The various kinds of access check during parsing. */
+enum deferring_kind {
+ dk_no_deferred = 0, /* Check access immediately */
+ dk_deferred = 1, /* Deferred check */
+ dk_no_check = 2 /* No access check */
+};
+
+/* The kind of base we can find, looking in a class hierarchy.
+ Values <0 indicate we failed. */
+enum base_kind {
+ bk_inaccessible = -3, /* The base is inaccessible */
+ bk_ambig = -2, /* The base is ambiguous */
+ bk_not_base = -1, /* It is not a base */
+ bk_same_type = 0, /* It is the same type */
+ bk_proper_base = 1, /* It is a proper base */
+ bk_via_virtual = 2 /* It is a proper base, but via a virtual
+ path. This might not be the canonical
+ binfo. */
+};
+
+/* Node for "pointer to (virtual) function".
+ This may be distinct from ptr_type_node so gdb can distinguish them. */
+#define vfunc_ptr_type_node vtable_entry_type
+
+
+/* For building calls to `delete'. */
+extern GTY(()) tree integer_two_node;
+
+/* The number of function bodies which we are currently processing.
+ (Zero if we are at namespace scope, one inside the body of a
+ function, two inside the body of a function in a local class, etc.) */
+extern int function_depth;
+
+/* Nonzero if we are inside spec_hasher::equal, which affects
+ comparison of PARM_DECLs in cp_tree_equal. */
+extern int comparing_specializations;
+
+/* Nonzero if we want different dependent aliases to compare as unequal.
+ FIXME we should always do this except during deduction/ordering. */
+extern int comparing_dependent_aliases;
+
+/* Nonzero if we want to consider different member expressions to compare
+ equal if they designate the same entity. This is set when comparing
+ contract conditions of overrides. */
+extern bool comparing_override_contracts;
+
+/* In parser.cc. */
+
+/* Nonzero if we are parsing an unevaluated operand: an operand to
+ sizeof, typeof, or alignof. This is a count since operands to
+ sizeof can be nested. */
+
+extern int cp_unevaluated_operand;
+
+/* RAII class used to inhibit the evaluation of operands during parsing
+ and template instantiation. Evaluation warnings are also inhibited. */
+
+class cp_unevaluated
+{
+public:
+ cp_unevaluated ();
+ ~cp_unevaluated ();
+};
+
+/* The reverse: an RAII class used for nested contexts that are evaluated even
+ if the enclosing context is not. */
+
+class cp_evaluated
+{
+public:
+ int uneval;
+ int inhibit;
+ cp_evaluated (bool reset = true)
+ : uneval(cp_unevaluated_operand), inhibit(c_inhibit_evaluation_warnings)
+ { if (reset)
+ cp_unevaluated_operand = c_inhibit_evaluation_warnings = 0; }
+ ~cp_evaluated ()
+ { cp_unevaluated_operand = uneval;
+ c_inhibit_evaluation_warnings = inhibit; }
+};
+
+/* in pt.cc */
+
+/* These values are used for the `STRICT' parameter to type_unification and
+ fn_type_unification. Their meanings are described with the
+ documentation for fn_type_unification. */
+
+enum unification_kind_t {
+ DEDUCE_CALL,
+ DEDUCE_CONV,
+ DEDUCE_EXACT
+};
+
+// An RAII class used to create a new pointer map for local
+// specializations. When the stack goes out of scope, the
+// previous pointer map is restored.
+enum lss_policy { lss_blank, lss_copy, lss_nop };
+class local_specialization_stack
+{
+public:
+ local_specialization_stack (lss_policy = lss_blank);
+ ~local_specialization_stack ();
+
+ hash_map<tree, tree> *saved;
+};
+
+/* Entry in the specialization hash table. */
+struct GTY((for_user)) spec_entry
+{
+ tree tmpl; /* The general template this is a specialization of. */
+ tree args; /* The args for this (maybe-partial) specialization. */
+ tree spec; /* The specialization itself. */
+};
+
+/* in class.cc */
+
+extern int current_class_depth;
+
+/* in decl.cc */
+
+/* An array of static vars & fns. */
+extern GTY(()) vec<tree, va_gc> *static_decls;
+
+/* An array of vtable-needing types that have no key function, or have
+ an emitted key function. */
+extern GTY(()) vec<tree, va_gc> *keyed_classes;
+
+/* Here's where we control how name mangling takes place. */
+
+/* Cannot use '$' up front, because this confuses gdb
+ (names beginning with '$' are gdb-local identifiers).
+
+ Note that all forms in which the '$' is significant are long enough
+ for direct indexing (meaning that if we know there is a '$'
+ at a particular location, we can index into the string at
+ any other location that provides distinguishing characters). */
+
+/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
+ doesn't allow '.' in symbol names. */
+#ifndef NO_DOT_IN_LABEL
+
+#define JOINER '.'
+#define JOIN_STR "."
+
+#define AUTO_TEMP_NAME "_.tmp_"
+#define VFIELD_BASE ".vf"
+#define VFIELD_NAME "_vptr."
+#define VFIELD_NAME_FORMAT "_vptr.%s"
+
+#else /* NO_DOT_IN_LABEL */
+
+#ifndef NO_DOLLAR_IN_LABEL
+
+#define JOINER '$'
+#define JOIN_STR "$"
+
+#define AUTO_TEMP_NAME "_$tmp_"
+#define VFIELD_BASE "$vf"
+#define VFIELD_NAME "_vptr$"
+#define VFIELD_NAME_FORMAT "_vptr$%s"
+
+#else /* NO_DOLLAR_IN_LABEL */
+
+#define JOIN_STR "_"
+
+#define VTABLE_NAME "__vt_"
+#define VTABLE_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
+ sizeof (VTABLE_NAME) - 1))
+#define VFIELD_BASE "__vfb"
+#define VFIELD_NAME "__vptr_"
+#define VFIELD_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
+ sizeof (VFIELD_NAME) - 1))
+#define VFIELD_NAME_FORMAT "__vptr_%s"
+
+#endif /* NO_DOLLAR_IN_LABEL */
+#endif /* NO_DOT_IN_LABEL */
+
+#define UDLIT_OP_ANSI_PREFIX "operator\"\""
+#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
+#define UDLIT_OP_MANGLED_PREFIX "li"
+#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
+#define UDLIT_OPER_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \
+ UDLIT_OP_ANSI_PREFIX, \
+ sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
+#define UDLIT_OP_SUFFIX(ID_NODE) \
+ (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
+
+#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
+
+#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
+ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
+ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
+
+#define VFIELD_NAME_P(ID_NODE) \
+ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
+
+#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
+
+
+/* Nonzero if we're done parsing and into end-of-file activities.
+ Two if we're done with front-end processing. */
+
+extern int at_eof;
+
+/* True if note_mangling_alias should enqueue mangling aliases for
+ later generation, rather than emitting them right away. */
+
+extern bool defer_mangling_aliases;
+
+/* True if noexcept is part of the type (i.e. in C++17). */
+
+extern bool flag_noexcept_type;
+
+/* A list of namespace-scope objects which have constructors or
+ destructors which reside in the global scope. The decl is stored
+ in the TREE_VALUE slot and the initializer is stored in the
+ TREE_PURPOSE slot. */
+extern GTY(()) tree static_aggregates;
+/* Likewise, for thread local storage. */
+extern GTY(()) tree tls_aggregates;
+
+/* A hash-map mapping from variable decls to the dynamic initializer for
+ the decl. This is currently only used by OpenMP. */
+extern GTY(()) decl_tree_map *dynamic_initializers;
+
+enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
+
+/* These are uses as bits in flags passed to various functions to
+ control their behavior. Despite the LOOKUP_ prefix, many of these
+ do not control name lookup. ??? Functions using these flags should
+ probably be modified to accept explicit boolean flags for the
+ behaviors relevant to them. */
+/* Check for access violations. */
+#define LOOKUP_PROTECT (1 << 0)
+#define LOOKUP_NORMAL (LOOKUP_PROTECT)
+/* Even if the function found by lookup is a virtual function, it
+ should be called directly. */
+#define LOOKUP_NONVIRTUAL (1 << 1)
+/* Non-converting (i.e., "explicit") constructors are not tried. This flag
+ indicates that we are not performing direct-initialization. */
+#define LOOKUP_ONLYCONVERTING (1 << 2)
+#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
+/* If a temporary is created, it should be created so that it lives
+ as long as the current variable bindings; otherwise it only lives
+ until the end of the complete-expression. It also forces
+ direct-initialization in cases where other parts of the compiler
+ have already generated a temporary, such as reference
+ initialization and the catch parameter. */
+#define DIRECT_BIND (1 << 3)
+/* We're performing a user-defined conversion, so more user-defined
+ conversions are not permitted (only built-in conversions). */
+#define LOOKUP_NO_CONVERSION (1 << 4)
+/* The user has explicitly called a destructor. (Therefore, we do
+ not need to check that the object is non-NULL before calling the
+ destructor.) */
+#define LOOKUP_DESTRUCTOR (1 << 5)
+/* Do not permit references to bind to temporaries. */
+#define LOOKUP_NO_TEMP_BIND (1 << 6)
+/* We're inside an init-list, so narrowing conversions are ill-formed. */
+#define LOOKUP_NO_NARROWING (LOOKUP_NO_TEMP_BIND << 1)
+/* We're looking up a constructor for list-initialization. */
+#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
+/* This is the first parameter of a copy constructor. */
+#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
+/* We only want to consider list constructors. */
+#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
+/* Return after determining which function to call and checking access.
+ Used by sythesized_method_walk to determine which functions will
+ be called to initialize subobjects, in order to determine exception
+ specification and possible implicit delete.
+ This is kind of a hack, but exiting early avoids problems with trying
+ to perform argument conversions when the class isn't complete yet. */
+#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
+/* Used by calls from defaulted functions to limit the overload set to avoid
+ cycles trying to declare them (core issue 1092). */
+#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
+/* Used in calls to store_init_value to suppress its usual call to
+ digest_init. */
+#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
+/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
+#define LOOKUP_NO_RVAL_BIND (LOOKUP_ALREADY_DIGESTED << 1)
+/* Used by case_conversion to disregard non-integral conversions. */
+#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
+/* Used for delegating constructors in order to diagnose self-delegation. */
+#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
+/* Allow initialization of a flexible array members. */
+#define LOOKUP_ALLOW_FLEXARRAY_INIT (LOOKUP_DELEGATING_CONS << 1)
+/* We're looking for either a rewritten comparison operator candidate or the
+ operator to use on the former's result. We distinguish between the two by
+ knowing that comparisons other than == and <=> must be the latter, as must
+ a <=> expression trying to rewrite to <=> without reversing. */
+#define LOOKUP_REWRITTEN (LOOKUP_ALLOW_FLEXARRAY_INIT << 1)
+/* Reverse the order of the two arguments for comparison rewriting. First we
+ swap the arguments in add_operator_candidates, then we swap the conversions
+ in add_candidate (so that they correspond to the original order of the
+ args), then we swap the conversions back in build_new_op_1 (so they
+ correspond to the order of the args in the candidate). */
+#define LOOKUP_REVERSED (LOOKUP_REWRITTEN << 1)
+/* We're initializing an aggregate from a parenthesized list of values. */
+#define LOOKUP_AGGREGATE_PAREN_INIT (LOOKUP_REVERSED << 1)
+/* We're computing conversions as part of a first pass of overload resolution
+ wherein we don't try to distinguish an unviable candidate from a
+ non-strictly viable candidate and thus can avoid computing unnecessary
+ bad conversions. */
+#define LOOKUP_SHORTCUT_BAD_CONVS (LOOKUP_AGGREGATE_PAREN_INIT << 1)
+
+/* These flags are used by the conversion code.
+ CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
+ CONV_STATIC : Perform the explicit conversions for static_cast.
+ CONV_CONST : Perform the explicit conversions for const_cast.
+ CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
+ CONV_PRIVATE : Perform upcasts to private bases.
+ CONV_FORCE_TEMP : Require a new temporary when converting to the same
+ aggregate type. */
+
+#define CONV_IMPLICIT 1
+#define CONV_STATIC 2
+#define CONV_CONST 4
+#define CONV_REINTERPRET 8
+#define CONV_PRIVATE 16
+#define CONV_FORCE_TEMP 32
+#define CONV_FOLD 64
+#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
+ | CONV_REINTERPRET)
+#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
+ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
+#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD)
+
+/* Used by build_expr_type_conversion to indicate which types are
+ acceptable as arguments to the expression under consideration. */
+
+#define WANT_INT 1 /* integer types, including bool */
+#define WANT_FLOAT 2 /* floating point types */
+#define WANT_ENUM 4 /* enumerated types */
+#define WANT_POINTER 8 /* pointer types */
+#define WANT_NULL 16 /* null pointer constant */
+#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
+#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
+
+/* Used with comptypes, and related functions, to guide type
+ comparison. */
+
+#define COMPARE_STRICT 0 /* Just check if the types are the
+ same. */
+#define COMPARE_BASE 1 /* Check to see if the second type is
+ derived from the first. */
+#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
+ reverse. */
+#define COMPARE_REDECLARATION 4 /* The comparison is being done when
+ another declaration of an existing
+ entity is seen. */
+#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
+ structural. The actual comparison
+ will be identical to
+ COMPARE_STRICT. */
+
+/* Used with start function. */
+#define SF_DEFAULT 0 /* No flags. */
+#define SF_PRE_PARSED 1 /* The function declaration has
+ already been parsed. */
+#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
+ in the class body. */
+
+/* Used with start_decl's initialized parameter. */
+#define SD_UNINITIALIZED 0
+#define SD_INITIALIZED 1
+/* Like SD_INITIALIZED, but also mark the new decl as DECL_DECOMPOSITION_P. */
+#define SD_DECOMPOSITION 2
+#define SD_DEFAULTED 3
+#define SD_DELETED 4
+
+/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
+ is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
+ class derived from the type pointed to (referred to) by TYPE1. */
+#define same_or_base_type_p(TYPE1, TYPE2) \
+ comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
+
+/* These macros are used to access a TEMPLATE_PARM_INDEX. */
+#define TEMPLATE_PARM_INDEX_CAST(NODE) \
+ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
+#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
+#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
+#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
+#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
+#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
+#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
+ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
+
+/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
+ TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
+#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
+ (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
+ TEMPLATE_TEMPLATE_PARM, \
+ BOUND_TEMPLATE_TEMPLATE_PARM)))
+#define TEMPLATE_TYPE_IDX(NODE) \
+ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
+#define TEMPLATE_TYPE_LEVEL(NODE) \
+ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
+#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
+ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
+#define TEMPLATE_TYPE_DECL(NODE) \
+ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
+#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
+ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
+
+/* For a C++17 class deduction placeholder, the template it represents. */
+#define CLASS_PLACEHOLDER_TEMPLATE(NODE) \
+ (DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE))))
+
+/* Contexts in which auto deduction occurs. These flags are
+ used to control diagnostics in do_auto_deduction. */
+
+enum auto_deduction_context
+{
+ adc_unspecified, /* Not given */
+ adc_variable_type, /* Variable initializer deduction */
+ adc_return_type, /* Return type deduction */
+ adc_unify, /* Template argument deduction */
+ adc_requirement, /* Argument deduction constraint */
+ adc_decomp_type /* Decomposition declaration initializer deduction */
+};
+
+/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
+#define AUTO_IS_DECLTYPE(NODE) \
+ (TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
+
+/* These constants can used as bit flags in the process of tree formatting.
+
+ TFF_PLAIN_IDENTIFIER: unqualified part of a name.
+ TFF_SCOPE: include the class and namespace scope of the name.
+ TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
+ TFF_DECL_SPECIFIERS: print decl-specifiers.
+ TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
+ a class-key (resp. `enum').
+ TFF_RETURN_TYPE: include function return type.
+ TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
+ TFF_EXCEPTION_SPECIFICATION: show function exception specification.
+ TFF_TEMPLATE_HEADER: show the template<...> header in a
+ template-declaration.
+ TFF_TEMPLATE_NAME: show only template-name.
+ TFF_EXPR_IN_PARENS: parenthesize expressions.
+ TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
+ TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
+ top-level entity.
+ TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
+ identical to their defaults.
+ TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
+ arguments for a function template specialization.
+ TFF_POINTER: we are printing a pointer type. */
+
+#define TFF_PLAIN_IDENTIFIER (0)
+#define TFF_SCOPE (1)
+#define TFF_CHASE_TYPEDEF (1 << 1)
+#define TFF_DECL_SPECIFIERS (1 << 2)
+#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
+#define TFF_RETURN_TYPE (1 << 4)
+#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
+#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
+#define TFF_TEMPLATE_HEADER (1 << 7)
+#define TFF_TEMPLATE_NAME (1 << 8)
+#define TFF_EXPR_IN_PARENS (1 << 9)
+#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
+#define TFF_UNQUALIFIED_NAME (1 << 11)
+#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
+#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
+#define TFF_POINTER (1 << 14)
+
+/* These constants can be used as bit flags to control strip_typedefs.
+
+ STF_USER_VISIBLE: use heuristics to try to avoid stripping user-facing
+ aliases of internal details. This is intended for diagnostics,
+ where it should (for example) give more useful "aka" types.
+
+ STF_STRIP_DEPENDENT: allow the stripping of aliases with dependent
+ template parameters, relying on code elsewhere to report any
+ appropriate diagnostics. */
+const unsigned int STF_USER_VISIBLE = 1U;
+const unsigned int STF_STRIP_DEPENDENT = 1U << 1;
+
+/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
+ node. */
+#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
+ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
+ ? TYPE_TI_TEMPLATE (NODE) \
+ : TYPE_NAME (NODE))
+
+/* in lex.cc */
+
+extern void init_reswords (void);
+
+/* Various flags for the overloaded operator information. */
+enum ovl_op_flags {
+ OVL_OP_FLAG_NONE = 0, /* Don't care. */
+ OVL_OP_FLAG_UNARY = 1, /* Is unary. */
+ OVL_OP_FLAG_BINARY = 2, /* Is binary. */
+ OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */
+ OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */
+ OVL_OP_FLAG_DELETE = 1, /* operator delete. */
+ OVL_OP_FLAG_VEC = 2 /* vector new or delete. */
+};
+
+/* Compressed operator codes. Order is determined by operators.def
+ and does not match that of tree_codes. */
+enum ovl_op_code {
+ OVL_OP_ERROR_MARK,
+ OVL_OP_NOP_EXPR,
+#define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE,
+#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */
+#include "operators.def"
+ OVL_OP_MAX
+};
+
+/* Make sure it fits in lang_decl_fn::ovl_op_code. */
+STATIC_ASSERT (OVL_OP_MAX < (1 << 6));
+
+struct GTY(()) ovl_op_info_t {
+ /* The IDENTIFIER_NODE for the operator. */
+ tree identifier;
+ /* The name of the operator. */
+ const char *name;
+ /* The mangled name of the operator. */
+ const char *mangled_name;
+ /* The (regular) tree code. */
+ enum tree_code tree_code : 16;
+ /* The (compressed) operator code. */
+ enum ovl_op_code ovl_op_code : 8;
+ /* The ovl_op_flags of the operator */
+ unsigned flags : 8;
+};
+
+/* Overloaded operator info indexed by ass_op_p & ovl_op_code. */
+extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX];
+/* Mapping from tree_codes to ovl_op_codes. */
+extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES];
+/* Mapping for ambi-ary operators from the binary to the unary. */
+extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX];
+
+/* Given an ass_op_p boolean and a tree code, return a pointer to its
+ overloaded operator info. Tree codes for non-overloaded operators
+ map to the error-operator. */
+#define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \
+ (&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]])
+/* Overloaded operator info for an identifier for which
+ IDENTIFIER_OVL_OP_P is true. */
+#define IDENTIFIER_OVL_OP_INFO(NODE) \
+ (&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)])
+#define IDENTIFIER_OVL_OP_FLAGS(NODE) \
+ (IDENTIFIER_OVL_OP_INFO (NODE)->flags)
+
+inline tree ovl_op_identifier (bool isass, tree_code code)
+{ return OVL_OP_INFO(isass, code)->identifier; }
+inline tree ovl_op_identifier (tree_code code) { return ovl_op_identifier (false, code); }
+#define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier)
+#define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier)
+
+/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
+ constants. */
+
+typedef int cp_cv_quals;
+
+/* Non-static member functions have an optional virt-specifier-seq.
+ There is a VIRT_SPEC value for each virt-specifier.
+ They can be combined by bitwise-or to form the complete set of
+ virt-specifiers for a member function. */
+enum virt_specifier
+ {
+ VIRT_SPEC_UNSPECIFIED = 0x0,
+ VIRT_SPEC_FINAL = 0x1,
+ VIRT_SPEC_OVERRIDE = 0x2
+ };
+
+/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
+ constants. */
+
+typedef int cp_virt_specifiers;
+
+/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
+
+ [dcl.fct]
+ The return type, the parameter-type-list, the ref-qualifier, and
+ the cv-qualifier-seq, but not the default arguments or the exception
+ specification, are part of the function type.
+
+ REF_QUAL_NONE Ordinary member function with no ref-qualifier
+ REF_QUAL_LVALUE Member function with the &-ref-qualifier
+ REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
+
+enum cp_ref_qualifier {
+ REF_QUAL_NONE = 0,
+ REF_QUAL_LVALUE = 1,
+ REF_QUAL_RVALUE = 2
+};
+
+/* A storage class. */
+
+enum cp_storage_class {
+ /* sc_none must be zero so that zeroing a cp_decl_specifier_seq
+ sets the storage_class field to sc_none. */
+ sc_none = 0,
+ sc_auto,
+ sc_register,
+ sc_static,
+ sc_extern,
+ sc_mutable
+};
+
+/* An individual decl-specifier. This is used to index the array of
+ locations for the declspecs in struct cp_decl_specifier_seq
+ below. */
+
+enum cp_decl_spec {
+ ds_first,
+ ds_signed = ds_first,
+ ds_unsigned,
+ ds_short,
+ ds_long,
+ ds_const,
+ ds_volatile,
+ ds_restrict,
+ ds_inline,
+ ds_virtual,
+ ds_explicit,
+ ds_friend,
+ ds_typedef,
+ ds_alias,
+ ds_constexpr,
+ ds_complex,
+ ds_constinit,
+ ds_consteval,
+ ds_thread,
+ ds_type_spec,
+ ds_redefined_builtin_type_spec,
+ ds_attribute,
+ ds_std_attribute,
+ ds_storage_class,
+ ds_long_long,
+ ds_concept,
+ ds_last /* This enumerator must always be the last one. */
+};
+
+/* A decl-specifier-seq. */
+
+struct cp_decl_specifier_seq {
+ /* An array of locations for the declaration sepecifiers, indexed by
+ enum cp_decl_spec_word. */
+ location_t locations[ds_last];
+ /* The primary type, if any, given by the decl-specifier-seq.
+ Modifiers, like "short", "const", and "unsigned" are not
+ reflected here. This field will be a TYPE, unless a typedef-name
+ was used, in which case it will be a TYPE_DECL. */
+ tree type;
+ /* The attributes, if any, provided with the specifier sequence. */
+ tree attributes;
+ /* The c++11 attributes that follows the type specifier. */
+ tree std_attributes;
+ /* If non-NULL, a built-in type that the user attempted to redefine
+ to some other type. */
+ tree redefined_builtin_type;
+ /* The explicit-specifier, if any. */
+ tree explicit_specifier;
+ /* The storage class specified -- or sc_none if no storage class was
+ explicitly specified. */
+ cp_storage_class storage_class;
+ /* For the __intN declspec, this stores the index into the int_n_* arrays. */
+ int int_n_idx;
+ /* True iff TYPE_SPEC defines a class or enum. */
+ BOOL_BITFIELD type_definition_p : 1;
+ /* True iff multiple types were (erroneously) specified for this
+ decl-specifier-seq. */
+ BOOL_BITFIELD multiple_types_p : 1;
+ /* True iff multiple storage classes were (erroneously) specified
+ for this decl-specifier-seq or a combination of a storage class
+ with a typedef specifier. */
+ BOOL_BITFIELD conflicting_specifiers_p : 1;
+ /* True iff at least one decl-specifier was found. */
+ BOOL_BITFIELD any_specifiers_p : 1;
+ /* True iff at least one type-specifier was found. */
+ BOOL_BITFIELD any_type_specifiers_p : 1;
+ /* True iff "int" was explicitly provided. */
+ BOOL_BITFIELD explicit_int_p : 1;
+ /* True iff "__intN" was explicitly provided. */
+ BOOL_BITFIELD explicit_intN_p : 1;
+ /* True iff "char" was explicitly provided. */
+ BOOL_BITFIELD explicit_char_p : 1;
+ /* True iff ds_thread is set for __thread, not thread_local. */
+ BOOL_BITFIELD gnu_thread_keyword_p : 1;
+ /* True iff the type is a decltype. */
+ BOOL_BITFIELD decltype_p : 1;
+ /* True iff the alternate "__intN__" form of the __intN type has been
+ used. */
+ BOOL_BITFIELD int_n_alt: 1;
+};
+
+/* The various kinds of declarators. */
+
+enum cp_declarator_kind {
+ cdk_id,
+ cdk_function,
+ cdk_array,
+ cdk_pointer,
+ cdk_reference,
+ cdk_ptrmem,
+ cdk_decomp,
+ cdk_error
+};
+
+/* A declarator. */
+
+typedef struct cp_declarator cp_declarator;
+
+typedef struct cp_parameter_declarator cp_parameter_declarator;
+
+/* A parameter, before it has been semantically analyzed. */
+struct cp_parameter_declarator {
+ /* The next parameter, or NULL_TREE if none. */
+ cp_parameter_declarator *next;
+ /* The decl-specifiers-seq for the parameter. */
+ cp_decl_specifier_seq decl_specifiers;
+ /* The declarator for the parameter. */
+ cp_declarator *declarator;
+ /* The default-argument expression, or NULL_TREE, if none. */
+ tree default_argument;
+ /* True iff this is a template parameter pack. */
+ bool template_parameter_pack_p;
+ /* Location within source. */
+ location_t loc;
+};
+
+/* A declarator. */
+struct cp_declarator {
+ /* The kind of declarator. */
+ ENUM_BITFIELD (cp_declarator_kind) kind : 4;
+ /* Whether we parsed an ellipsis (`...') just before the declarator,
+ to indicate this is a parameter pack. */
+ BOOL_BITFIELD parameter_pack_p : 1;
+ /* If this declarator is parenthesized, this the open-paren. It is
+ UNKNOWN_LOCATION when not parenthesized. */
+ location_t parenthesized;
+ /* Currently only set for cdk_id, cdk_decomp and cdk_function. */
+ location_t id_loc;
+ /* If this declarator is part of an init-declarator, the location of the
+ initializer. */
+ location_t init_loc;
+ /* GNU Attributes that apply to this declarator. If the declarator
+ is a pointer or a reference, these attribute apply to the type
+ pointed to. */
+ tree attributes;
+ /* Standard C++11 attributes that apply to this declarator. If the
+ declarator is a pointer or a reference, these attributes apply
+ to the pointer, rather than to the type pointed to. */
+ tree std_attributes;
+ /* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator.
+ For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */
+ cp_declarator *declarator;
+ union {
+ /* For identifiers. */
+ struct {
+ /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
+ *_TYPE) for this identifier. */
+ tree qualifying_scope;
+ /* The unqualified name of the entity -- an IDENTIFIER_NODE,
+ BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
+ tree unqualified_name;
+ /* If this is the name of a function, what kind of special
+ function (if any). */
+ special_function_kind sfk;
+ } id;
+ /* For functions. */
+ struct {
+ /* The parameters to the function as a TREE_LIST of decl/default. */
+ tree parameters;
+ /* The cv-qualifiers for the function. */
+ cp_cv_quals qualifiers;
+ /* The virt-specifiers for the function. */
+ cp_virt_specifiers virt_specifiers;
+ /* The ref-qualifier for the function. */
+ cp_ref_qualifier ref_qualifier;
+ /* The transaction-safety qualifier for the function. */
+ tree tx_qualifier;
+ /* The exception-specification for the function. */
+ tree exception_specification;
+ /* The late-specified return type, if any. */
+ tree late_return_type;
+ /* The trailing requires-clause, if any. */
+ tree requires_clause;
+ location_t parens_loc;
+ } function;
+ /* For arrays. */
+ struct {
+ /* The bounds to the array. */
+ tree bounds;
+ } array;
+ /* For cdk_pointer and cdk_ptrmem. */
+ struct {
+ /* The cv-qualifiers for the pointer. */
+ cp_cv_quals qualifiers;
+ /* For cdk_ptrmem, the class type containing the member. */
+ tree class_type;
+ } pointer;
+ /* For cdk_reference */
+ struct {
+ /* The cv-qualifiers for the reference. These qualifiers are
+ only used to diagnose ill-formed code. */
+ cp_cv_quals qualifiers;
+ /* Whether this is an rvalue reference */
+ bool rvalue_ref;
+ } reference;
+ } u;
+};
+
+/* A level of template instantiation. */
+struct GTY((chain_next ("%h.next"))) tinst_level {
+ /* The immediately deeper level in the chain. */
+ struct tinst_level *next;
+
+ /* The original node. TLDCL can be a DECL (for a function or static
+ data member), a TYPE (for a class), depending on what we were
+ asked to instantiate, or a TREE_LIST with the template as PURPOSE
+ and the template args as VALUE, if we are substituting for
+ overload resolution. In all these cases, TARGS is NULL.
+ However, to avoid creating TREE_LIST objects for substitutions if
+ we can help, we store PURPOSE and VALUE in TLDCL and TARGS,
+ respectively. So TLDCL stands for TREE_LIST or DECL (the
+ template is a DECL too), whereas TARGS stands for the template
+ arguments. */
+ tree tldcl, targs;
+
+ /* For modules we need to know (a) the modules on the path of
+ instantiation and (b) the transitive imports along that path.
+ Note that these two bitmaps may be inherited from NEXT, if this
+ decl is in the same module as NEXT (or has no new information). */
+ bitmap path;
+ bitmap visible;
+
+ private:
+ /* Return TRUE iff the original node is a split list. */
+ bool split_list_p () const { return targs; }
+
+ /* Return TRUE iff the original node is a TREE_LIST object. */
+ bool tree_list_p () const
+ {
+ return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST;
+ }
+
+ /* Return TRUE iff the original node is not a list, split or not. */
+ bool not_list_p () const
+ {
+ return !split_list_p () && !tree_list_p ();
+ }
+
+ /* Convert (in place) the original node from a split list to a
+ TREE_LIST. */
+ tree to_list ();
+
+ public:
+ /* Release storage for OBJ and node, if it's a TREE_LIST. */
+ static void free (tinst_level *obj);
+
+ /* Return TRUE iff the original node is a list, split or not. */
+ bool list_p () const { return !not_list_p (); }
+
+ /* Return the original node; if it's a split list, make it a
+ TREE_LIST first, so that it can be returned as a single tree
+ object. */
+ tree get_node () {
+ if (!split_list_p ()) return tldcl;
+ else return to_list ();
+ }
+
+ /* Return the original node if it's a DECL or a TREE_LIST, but do
+ NOT convert a split list to a TREE_LIST: return NULL instead. */
+ tree maybe_get_node () const {
+ if (!split_list_p ()) return tldcl;
+ else return NULL_TREE;
+ }
+
+ /* The location where the template is instantiated. */
+ location_t locus;
+
+ /* errorcount + sorrycount when we pushed this level. */
+ unsigned short errors;
+
+ /* Count references to this object. If refcount reaches
+ refcount_infinity value, we don't increment or decrement the
+ refcount anymore, as the refcount isn't accurate anymore.
+ The object can be still garbage collected if unreferenced from
+ anywhere, which might keep referenced objects referenced longer than
+ otherwise necessary. Hitting the infinity is rare though. */
+ unsigned short refcount;
+
+ /* Infinity value for the above refcount. */
+ static const unsigned short refcount_infinity = (unsigned short) ~0;
+};
+
+/* BUILT_IN_FRONTEND function codes. */
+enum cp_built_in_function {
+ CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ CP_BUILT_IN_INTEGER_PACK,
+ CP_BUILT_IN_IS_CORRESPONDING_MEMBER,
+ CP_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS,
+ CP_BUILT_IN_SOURCE_LOCATION,
+ CP_BUILT_IN_LAST
+};
+
+bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
+
+/* Return the type of the `this' parameter of FNTYPE. */
+
+inline tree
+type_of_this_parm (const_tree fntype)
+{
+ function_args_iterator iter;
+ gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
+ function_args_iter_init (&iter, fntype);
+ return function_args_iter_cond (&iter);
+}
+
+/* Return the class of the `this' parameter of FNTYPE. */
+
+inline tree
+class_of_this_parm (const_tree fntype)
+{
+ return TREE_TYPE (type_of_this_parm (fntype));
+}
+
+/* A parameter list indicating for a function with no parameters,
+ e.g "int f(void)". */
+extern cp_parameter_declarator *no_parameters;
+
+/* Various dump ids. */
+extern int class_dump_id;
+extern int module_dump_id;
+extern int raw_dump_id;
+
+/* in call.cc */
+extern bool check_dtor_name (tree, tree);
+int magic_varargs_p (tree);
+
+extern tree build_conditional_expr (const op_location_t &,
+ tree, tree, tree,
+ tsubst_flags_t);
+extern tree build_addr_func (tree, tsubst_flags_t);
+extern void set_flags_from_callee (tree);
+extern tree build_call_a (tree, int, tree*);
+extern tree build_call_n (tree, int, ...);
+extern bool null_ptr_cst_p (tree);
+extern bool null_member_pointer_value_p (tree);
+extern bool sufficient_parms_p (const_tree);
+extern tree type_decays_to (tree);
+extern tree extract_call_expr (tree);
+extern tree build_trivial_dtor_call (tree, bool = false);
+extern tristate ref_conv_binds_to_temporary (tree, tree, bool = false);
+extern tree build_user_type_conversion (tree, tree, int,
+ tsubst_flags_t);
+extern tree build_new_function_call (tree, vec<tree, va_gc> **,
+ tsubst_flags_t);
+extern tree build_operator_new_call (tree, vec<tree, va_gc> **,
+ tree *, tree *, tree, tree,
+ tree *, tsubst_flags_t);
+extern tree build_new_method_call (tree, tree,
+ vec<tree, va_gc> **, tree,
+ int, tree *, tsubst_flags_t);
+extern tree build_special_member_call (tree, tree,
+ vec<tree, va_gc> **,
+ tree, int, tsubst_flags_t);
+extern tree build_new_op (const op_location_t &,
+ enum tree_code,
+ int, tree, tree, tree, tree,
+ tree *, tsubst_flags_t);
+/* Wrapper that leaves out the usually-null op3 and overload parms. */
+inline tree build_new_op (const op_location_t &loc, enum tree_code code,
+ int flags, tree arg1, tree arg2,
+ tsubst_flags_t complain)
+{
+ return build_new_op (loc, code, flags, arg1, arg2, NULL_TREE, NULL_TREE,
+ NULL, complain);
+}
+extern tree keep_unused_object_arg (tree, tree, tree);
+extern tree build_op_call (tree, vec<tree, va_gc> **,
+ tsubst_flags_t);
+extern tree build_op_subscript (const op_location_t &, tree,
+ vec<tree, va_gc> **, tree *,
+ tsubst_flags_t);
+extern bool aligned_allocation_fn_p (tree);
+extern tree destroying_delete_p (tree);
+extern bool usual_deallocation_fn_p (tree);
+extern tree build_op_delete_call (enum tree_code, tree, tree,
+ bool, tree, tree,
+ tsubst_flags_t);
+extern bool can_convert (tree, tree, tsubst_flags_t);
+extern bool can_convert_standard (tree, tree, tsubst_flags_t);
+extern bool can_convert_arg (tree, tree, tree, int,
+ tsubst_flags_t);
+extern bool can_convert_arg_bad (tree, tree, tree, int,
+ tsubst_flags_t);
+extern int conv_flags (int, int, tree, tree, int);
+extern struct conversion * good_conversion (tree, tree, tree, int, tsubst_flags_t);
+extern location_t get_fndecl_argument_location (tree, int);
+extern void complain_about_bad_argument (location_t arg_loc,
+ tree from_type, tree to_type,
+ tree fndecl, int parmnum);
+extern void maybe_inform_about_fndecl_for_bogus_argument_init (tree, int);
+extern tree perform_dguide_overload_resolution (tree, const vec<tree, va_gc> *,
+ tsubst_flags_t);
+
+
+/* A class for recording information about access failures (e.g. private
+ fields), so that we can potentially supply a fix-it hint about
+ an accessor (from a context in which the constness of the object
+ is known). */
+
+class access_failure_info
+{
+ public:
+ access_failure_info () : m_was_inaccessible (false),
+ m_basetype_path (NULL_TREE),
+ m_decl (NULL_TREE), m_diag_decl (NULL_TREE) {}
+
+ void record_access_failure (tree basetype_path, tree decl, tree diag_decl);
+
+ bool was_inaccessible_p () const { return m_was_inaccessible; }
+ tree get_decl () const { return m_decl; }
+ tree get_diag_decl () const { return m_diag_decl; }
+ tree get_any_accessor (bool const_p) const;
+ void maybe_suggest_accessor (bool const_p) const;
+ static void add_fixit_hint (rich_location *richloc, tree accessor);
+
+ private:
+ bool m_was_inaccessible;
+ tree m_basetype_path;
+ tree m_decl;
+ tree m_diag_decl;
+};
+
+extern void complain_about_access (tree, tree, tree, bool,
+ access_kind);
+extern void push_defarg_context (tree);
+extern void pop_defarg_context (void);
+extern tree convert_default_arg (tree, tree, tree, int,
+ tsubst_flags_t);
+extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
+extern tree build_x_va_arg (location_t, tree, tree);
+extern tree cxx_type_promotes_to (tree);
+extern tree type_passed_as (tree);
+extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
+extern bool is_properly_derived_from (tree, tree);
+extern tree initialize_reference (tree, tree, int,
+ tsubst_flags_t);
+extern tree extend_ref_init_temps (tree, tree,
+ vec<tree, va_gc>**,
+ tree * = NULL);
+extern tree make_temporary_var_for_ref_to_temp (tree, tree);
+extern bool type_has_extended_temps (tree);
+extern tree strip_top_quals (tree);
+extern bool reference_related_p (tree, tree);
+extern bool reference_compatible_p (tree, tree);
+extern int remaining_arguments (tree);
+extern tree build_implicit_conv_flags (tree, tree, int);
+extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
+extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
+extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t);
+extern tree build_converted_constant_bool_expr (tree, tsubst_flags_t);
+extern tree perform_direct_initialization_if_possible (tree, tree, bool,
+ tsubst_flags_t);
+extern vec<tree,va_gc> *resolve_args (vec<tree,va_gc>*, tsubst_flags_t);
+extern tree in_charge_arg_for_name (tree);
+extern bool in_immediate_context ();
+extern tree build_cxx_call (tree, int, tree *,
+ tsubst_flags_t,
+ tree = NULL_TREE);
+extern bool is_std_init_list (tree);
+extern bool is_list_ctor (tree);
+extern void validate_conversion_obstack (void);
+extern void mark_versions_used (tree);
+extern int unsafe_return_slot_p (tree);
+extern bool unsafe_copy_elision_p (tree, tree);
+extern bool make_safe_copy_elision (tree, tree);
+extern bool cp_handle_deprecated_or_unavailable (tree, tsubst_flags_t = tf_warning_or_error);
+extern void cp_warn_deprecated_use_scopes (tree);
+extern tree get_function_version_dispatcher (tree);
+extern bool any_template_arguments_need_structural_equality_p (tree);
+
+/* in class.cc */
+extern tree build_vfield_ref (tree, tree);
+extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node);
+extern tree build_base_path (enum tree_code, tree,
+ tree, int, tsubst_flags_t);
+extern tree convert_to_base (tree, tree, bool, bool,
+ tsubst_flags_t);
+extern tree convert_to_base_statically (tree, tree);
+extern bool is_empty_base_ref (tree);
+extern tree build_vtbl_ref (tree, tree);
+extern tree build_vfn_ref (tree, tree);
+extern tree get_vtable_decl (tree, int);
+extern bool add_method (tree, tree, bool);
+extern tree declared_access (tree);
+extern bool maybe_push_used_methods (tree);
+extern tree currently_open_class (tree);
+extern tree currently_open_derived_class (tree);
+extern tree outermost_open_class (void);
+extern tree current_nonlambda_class_type (void);
+extern tree finish_struct (tree, tree);
+extern void finish_struct_1 (tree);
+extern int resolves_to_fixed_type_p (tree, int * = NULL);
+extern void init_class_processing (void);
+extern int is_empty_class (tree);
+extern bool is_really_empty_class (tree, bool);
+extern void pushclass (tree);
+extern void popclass (void);
+extern void push_nested_class (tree);
+extern void pop_nested_class (void);
+extern int current_lang_depth (void);
+extern void push_lang_context (tree);
+extern void pop_lang_context (void);
+extern tree instantiate_type (tree, tree, tsubst_flags_t);
+extern void build_self_reference (void);
+extern int same_signature_p (const_tree, const_tree);
+extern tree lookup_vfn_in_binfo (tree, tree);
+extern void maybe_add_class_template_decl_list (tree, tree, int);
+extern void unreverse_member_declarations (tree);
+extern bool is_empty_field (tree);
+extern void invalidate_class_lookup_cache (void);
+extern void maybe_note_name_used_in_class (tree, tree);
+extern void note_name_declared_in_class (tree, tree);
+extern tree get_vtbl_decl_for_binfo (tree);
+extern bool vptr_via_virtual_p (tree);
+extern void debug_class (tree);
+extern void debug_thunks (tree);
+extern void set_linkage_according_to_type (tree, tree);
+extern void determine_key_method (tree);
+extern void check_for_override (tree, tree);
+extern void push_class_stack (void);
+extern void pop_class_stack (void);
+extern bool default_ctor_p (const_tree);
+extern bool type_has_user_nondefault_constructor (tree);
+extern tree in_class_defaulted_default_constructor (tree);
+extern bool user_provided_p (tree);
+extern bool type_has_user_provided_constructor (tree);
+extern bool type_has_non_user_provided_default_constructor (tree);
+extern bool vbase_has_user_provided_move_assign (tree);
+extern tree default_init_uninitialized_part (tree);
+extern bool trivial_default_constructor_is_constexpr (tree);
+extern bool type_has_constexpr_default_constructor (tree);
+extern bool type_has_constexpr_destructor (tree);
+extern bool type_has_virtual_destructor (tree);
+extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared);
+extern bool classtype_has_non_deleted_move_ctor (tree);
+extern tree classtype_has_depr_implicit_copy (tree);
+extern bool classtype_has_op (tree, tree_code);
+extern tree classtype_has_defaulted_op (tree, tree_code);
+extern bool type_build_ctor_call (tree);
+extern bool type_build_dtor_call (tree);
+extern void explain_non_literal_class (tree);
+extern void inherit_targ_abi_tags (tree);
+extern void defaulted_late_check (tree);
+extern bool defaultable_fn_check (tree);
+extern void check_abi_tags (tree);
+extern tree missing_abi_tags (tree);
+extern void fixup_type_variants (tree);
+extern void fixup_attribute_variants (tree);
+extern void build_cdtor_clones (tree, bool, bool, bool);
+extern void clone_cdtor (tree, bool);
+extern tree copy_operator_fn (tree, tree_code code);
+extern void adjust_clone_args (tree);
+extern void deduce_noexcept_on_destructor (tree);
+extern bool uniquely_derived_from_p (tree, tree);
+extern bool publicly_uniquely_derived_p (tree, tree);
+extern tree common_enclosing_class (tree, tree);
+
+/* in cvt.cc */
+extern tree convert_to_reference (tree, tree, int, int, tree,
+ tsubst_flags_t);
+extern tree convert_from_reference (tree);
+extern tree force_rvalue (tree, tsubst_flags_t);
+extern tree ocp_convert (tree, tree, int, int,
+ tsubst_flags_t);
+extern tree cp_convert (tree, tree, tsubst_flags_t);
+extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
+extern tree cp_fold_convert (tree, tree);
+extern tree cp_get_callee (tree);
+extern tree cp_get_callee_fndecl (tree);
+extern tree cp_get_callee_fndecl_nofold (tree);
+extern tree cp_get_fndecl_from_callee (tree, bool fold = true);
+extern tree convert_to_void (tree, impl_conv_void,
+ tsubst_flags_t);
+extern tree convert_force (tree, tree, int,
+ tsubst_flags_t);
+extern tree build_expr_type_conversion (int, tree, bool);
+extern tree type_promotes_to (tree);
+extern bool can_convert_qual (tree, tree);
+extern tree perform_qualification_conversions (tree, tree);
+extern bool tx_safe_fn_type_p (tree);
+extern tree tx_unsafe_fn_variant (tree);
+extern bool fnptr_conv_p (tree, tree);
+extern tree strip_fnptr_conv (tree);
+
+/* in name-lookup.cc */
+extern void maybe_push_cleanup_level (tree);
+extern tree maybe_push_decl (tree);
+extern tree current_decl_namespace (void);
+
+/* decl.cc */
+extern tree poplevel (int, int, int);
+extern void cxx_init_decl_processing (void);
+enum cp_tree_node_structure_enum cp_tree_node_structure
+ (union lang_tree_node *);
+extern void finish_scope (void);
+extern void push_switch (tree);
+extern void pop_switch (void);
+extern void note_break_stmt (void);
+extern bool note_iteration_stmt_body_start (void);
+extern void note_iteration_stmt_body_end (bool);
+extern void determine_local_discriminator (tree);
+extern bool fns_correspond (tree, tree);
+extern int decls_match (tree, tree, bool = true);
+extern bool maybe_version_functions (tree, tree, bool);
+extern bool merge_default_template_args (tree, tree, bool);
+extern tree duplicate_decls (tree, tree,
+ bool hiding = false,
+ bool was_hidden = false);
+extern tree declare_local_label (tree);
+extern tree define_label (location_t, tree);
+extern void check_goto (tree);
+extern bool check_omp_return (void);
+extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
+extern tree build_typename_type (tree, tree, tree, tag_types);
+extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
+extern tree make_unbound_class_template_raw (tree, tree, tree);
+extern unsigned push_abi_namespace (tree node = abi_node);
+extern void pop_abi_namespace (unsigned flags,
+ tree node = abi_node);
+extern tree build_library_fn_ptr (const char *, tree, int);
+extern tree build_cp_library_fn_ptr (const char *, tree, int);
+extern tree push_library_fn (tree, tree, tree, int);
+extern tree push_throw_library_fn (tree, tree);
+extern void warn_misplaced_attr_for_class_type (location_t location,
+ tree class_type);
+extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
+extern tree shadow_tag (cp_decl_specifier_seq *);
+extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
+extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
+extern void start_decl_1 (tree, bool);
+extern bool check_array_initializer (tree, tree, tree);
+extern void omp_declare_variant_finalize (tree, tree);
+extern void cp_finish_decl (tree, tree, bool, tree, int);
+extern tree lookup_decomp_type (tree);
+extern void cp_maybe_mangle_decomp (tree, tree, unsigned int);
+extern void cp_finish_decomp (tree, tree, unsigned int);
+extern int cp_complete_array_type (tree *, tree, bool);
+extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
+extern tree build_ptrmemfunc_type (tree);
+extern tree build_ptrmem_type (tree, tree);
+/* the grokdeclarator prototype is in decl.h */
+extern tree build_this_parm (tree, tree, cp_cv_quals);
+extern tree grokparms (tree, tree *);
+extern int copy_fn_p (const_tree);
+extern bool move_fn_p (const_tree);
+extern bool move_signature_fn_p (const_tree);
+extern tree get_scope_of_declarator (const cp_declarator *);
+extern void grok_special_member_properties (tree);
+extern bool grok_ctor_properties (const_tree, const_tree);
+extern bool grok_op_properties (tree, bool);
+extern tree xref_tag (tag_types, tree,
+ TAG_how = TAG_how::CURRENT_ONLY,
+ bool tpl_header_p = false);
+extern void xref_basetypes (tree, tree);
+extern tree start_enum (tree, tree, tree, tree, bool, bool *);
+extern void finish_enum_value_list (tree);
+extern void finish_enum (tree);
+extern tree build_enumerator (tree, tree, tree, tree, location_t);
+extern tree lookup_enumerator (tree, tree);
+extern bool start_preparsed_function (tree, tree, int);
+extern bool start_function (cp_decl_specifier_seq *,
+ const cp_declarator *, tree);
+extern void maybe_return_this (void);
+extern tree begin_function_body (void);
+extern void finish_function_body (tree);
+extern tree outer_curly_brace_block (tree);
+extern tree finish_function (bool);
+extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
+extern void maybe_register_incomplete_var (tree);
+extern void maybe_commonize_var (tree);
+extern void complete_vars (tree);
+extern tree static_fn_type (tree);
+extern void revert_static_member_fn (tree);
+extern void fixup_anonymous_aggr (tree);
+extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
+extern tree check_default_argument (tree, tree, tsubst_flags_t);
+extern int wrapup_namespace_globals ();
+extern tree create_implicit_typedef (tree, tree);
+extern int local_variable_p (const_tree);
+extern tree register_dtor_fn (tree);
+extern tmpl_spec_kind current_tmpl_spec_kind (int);
+extern tree cxx_builtin_function (tree decl);
+extern tree cxx_builtin_function_ext_scope (tree decl);
+extern tree cxx_simulate_builtin_function_decl (tree);
+extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
+extern void warn_extern_redeclared_static (tree, tree);
+extern tree cxx_comdat_group (tree);
+extern bool cp_missing_noreturn_ok_p (tree);
+extern bool is_direct_enum_init (tree, tree);
+extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
+extern tree check_var_type (tree, tree, location_t);
+extern tree reshape_init (tree, tree, tsubst_flags_t);
+extern tree next_aggregate_field (tree);
+extern tree next_subobject_field (tree);
+extern tree first_field (const_tree);
+extern tree fndecl_declared_return_type (tree);
+extern bool undeduced_auto_decl (tree);
+extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error);
+
+extern tree finish_case_label (location_t, tree, tree);
+extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
+extern bool check_array_designated_initializer (constructor_elt *,
+ unsigned HOST_WIDE_INT);
+extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t);
+extern tree build_explicit_specifier (tree, tsubst_flags_t);
+extern void do_push_parm_decls (tree, tree, tree *);
+extern tree do_aggregate_paren_init (tree, tree);
+
+/* in decl2.cc */
+extern void record_mangling (tree, bool);
+extern void overwrite_mangling (tree, tree);
+extern void note_mangling_alias (tree, tree);
+extern void generate_mangling_aliases (void);
+extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
+extern tree build_pointer_ptrmemfn_type (tree);
+extern tree change_return_type (tree, tree);
+extern void maybe_retrofit_in_chrg (tree);
+extern void maybe_make_one_only (tree);
+extern bool vague_linkage_p (tree);
+extern void grokclassfn (tree, tree,
+ enum overload_flags);
+extern tree grok_array_decl (location_t, tree, tree,
+ vec<tree, va_gc> **, tsubst_flags_t);
+extern tree delete_sanity (location_t, tree, tree, bool,
+ int, tsubst_flags_t);
+extern tree check_classfn (tree, tree, tree);
+extern void check_member_template (tree);
+extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
+ tree, bool, tree, tree);
+extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
+ tree, tree, tree);
+extern tree splice_template_attributes (tree *, tree);
+extern bool any_dependent_type_attributes_p (tree);
+extern tree cp_reconstruct_complex_type (tree, tree);
+extern bool attributes_naming_typedef_ok (tree);
+extern void cplus_decl_attributes (tree *, tree, int);
+extern void finish_anon_union (tree);
+extern void cxx_post_compilation_parsing_cleanups (void);
+extern tree coerce_new_type (tree, location_t);
+extern void coerce_delete_type (tree, location_t);
+extern void comdat_linkage (tree);
+extern void determine_visibility (tree);
+extern void constrain_class_visibility (tree);
+extern void reset_type_linkage (tree);
+extern void tentative_decl_linkage (tree);
+extern void import_export_decl (tree);
+extern tree build_cleanup (tree);
+extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
+ tsubst_flags_t);
+extern bool decl_defined_p (tree);
+extern bool decl_constant_var_p (tree);
+extern bool decl_maybe_constant_var_p (tree);
+extern void no_linkage_error (tree);
+extern void check_default_args (tree);
+extern bool mark_used (tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern bool mark_single_function (tree, tsubst_flags_t);
+extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
+extern tree cp_build_parm_decl (tree, tree, tree);
+extern void copy_linkage (tree, tree);
+extern tree get_guard (tree);
+extern tree get_guard_cond (tree, bool);
+extern tree set_guard (tree);
+extern bool var_needs_tls_wrapper (tree);
+extern tree maybe_get_tls_wrapper_call (tree);
+extern void mark_needed (tree);
+extern bool decl_needed_p (tree);
+extern void note_vague_linkage_fn (tree);
+extern void note_variable_template_instantiation (tree);
+extern tree build_artificial_parm (tree, tree, tree);
+extern bool possibly_inlined_p (tree);
+extern int parm_index (tree);
+extern tree vtv_start_verification_constructor_init_function (void);
+extern tree vtv_finish_verification_constructor_init_function (tree);
+extern void cp_check_const_attributes (tree);
+
+/* in error.cc */
+extern const char *type_as_string (tree, int);
+extern const char *type_as_string_translate (tree, int);
+extern const char *decl_as_string (tree, int);
+extern const char *decl_as_string_translate (tree, int);
+extern const char *decl_as_dwarf_string (tree, int);
+extern const char *expr_as_string (tree, int);
+extern const char *expr_to_string (tree);
+extern const char *lang_decl_name (tree, int, bool);
+extern const char *lang_decl_dwarf_name (tree, int, bool);
+extern const char *language_to_string (enum languages);
+extern const char *class_key_or_enum_as_string (tree);
+extern void maybe_warn_variadic_templates (void);
+extern void maybe_warn_cpp0x (cpp0x_warn_str str,
+ location_t = input_location);
+extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
+extern location_t location_of (tree);
+extern void qualified_name_lookup_error (tree, tree, tree,
+ location_t);
+
+/* in except.cc */
+extern void init_exception_processing (void);
+extern tree expand_start_catch_block (tree);
+extern void expand_end_catch_block (void);
+extern tree build_exc_ptr (void);
+extern tree build_throw (location_t, tree);
+extern int nothrow_libfn_p (const_tree);
+extern void check_handlers (tree);
+extern tree finish_noexcept_expr (tree, tsubst_flags_t);
+extern bool expr_noexcept_p (tree, tsubst_flags_t);
+extern void perform_deferred_noexcept_checks (void);
+extern bool nothrow_spec_p (const_tree);
+extern bool type_noexcept_p (const_tree);
+extern bool type_throw_all_p (const_tree);
+extern tree build_noexcept_spec (tree, tsubst_flags_t);
+extern void choose_personality_routine (enum languages);
+extern tree build_must_not_throw_expr (tree,tree);
+extern tree eh_type_info (tree);
+extern tree begin_eh_spec_block (void);
+extern void finish_eh_spec_block (tree, tree);
+extern tree build_eh_type_type (tree);
+extern tree cp_protect_cleanup_actions (void);
+extern void maybe_splice_retval_cleanup (tree);
+extern tree maybe_set_retval_sentinel (void);
+
+extern tree template_parms_to_args (tree);
+extern tree template_parms_level_to_args (tree);
+extern tree generic_targs_for (tree);
+extern tree outer_template_args (tree);
+
+/* in expr.cc */
+extern tree cplus_expand_constant (tree);
+extern tree mark_use (tree expr, bool rvalue_p, bool read_p,
+ location_t = UNKNOWN_LOCATION,
+ bool reject_builtin = true);
+extern tree mark_rvalue_use (tree,
+ location_t = UNKNOWN_LOCATION,
+ bool reject_builtin = true);
+extern tree mark_lvalue_use (tree);
+extern tree mark_lvalue_use_nonread (tree);
+extern tree mark_type_use (tree);
+extern tree mark_discarded_use (tree);
+extern void mark_exp_read (tree);
+
+/* friend.cc */
+extern int is_friend (tree, tree);
+extern void make_friend_class (tree, tree, bool);
+extern void add_friend (tree, tree, bool);
+extern tree do_friend (tree, tree, tree,
+ enum overload_flags, bool);
+
+extern void set_global_friend (tree);
+extern bool is_global_friend (tree);
+
+/* in init.cc */
+extern tree find_temps_r (tree *, int *, void *);
+extern tree expand_member_init (tree);
+extern void emit_mem_initializers (tree);
+extern tree build_aggr_init (tree, tree, int,
+ tsubst_flags_t);
+extern int is_class_type (tree, int);
+extern bool is_copy_initialization (tree);
+extern tree build_zero_init (tree, tree, bool);
+extern tree build_value_init (tree, tsubst_flags_t);
+extern tree build_value_init_noctor (tree, tsubst_flags_t);
+extern tree maybe_instantiate_nsdmi_init (tree, tsubst_flags_t);
+extern tree get_nsdmi (tree, bool, tsubst_flags_t);
+extern tree build_offset_ref (tree, tree, bool,
+ tsubst_flags_t);
+extern tree throw_bad_array_new_length (void);
+extern bool type_has_new_extended_alignment (tree);
+extern unsigned malloc_alignment (void);
+extern tree build_new_constexpr_heap_type (tree, tree, tree);
+extern tree build_new (location_t,
+ vec<tree, va_gc> **, tree,
+ tree, vec<tree, va_gc> **,
+ int, tsubst_flags_t);
+extern tree get_temp_regvar (tree, tree);
+extern tree build_vec_init (tree, tree, tree, bool, int,
+ tsubst_flags_t,
+ vec<tree, va_gc> ** = nullptr);
+extern tree build_delete (location_t, tree, tree,
+ special_function_kind,
+ int, int, tsubst_flags_t);
+extern void push_base_cleanups (void);
+extern tree build_vec_delete (location_t, tree, tree,
+ special_function_kind, int,
+ tsubst_flags_t);
+extern tree create_temporary_var (tree);
+extern void initialize_vtbl_ptrs (tree);
+extern tree scalar_constant_value (tree);
+extern tree decl_constant_value (tree, bool);
+extern tree decl_really_constant_value (tree, bool = true);
+extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
+extern tree build_vtbl_address (tree);
+extern bool maybe_reject_flexarray_init (tree, tree);
+
+/* in lex.cc */
+extern void cxx_dup_lang_specific_decl (tree);
+extern tree unqualified_name_lookup_error (tree,
+ location_t = UNKNOWN_LOCATION);
+extern tree unqualified_fn_lookup_error (cp_expr);
+extern tree make_conv_op_name (tree);
+extern tree build_lang_decl (enum tree_code, tree, tree);
+extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
+extern bool maybe_add_lang_decl_raw (tree, bool decomp_p);
+extern bool maybe_add_lang_type_raw (tree);
+extern void retrofit_lang_decl (tree);
+extern void fit_decomposition_lang_decl (tree, tree);
+extern tree copy_decl (tree CXX_MEM_STAT_INFO);
+extern tree copy_type (tree CXX_MEM_STAT_INFO);
+extern tree cxx_make_type (enum tree_code CXX_MEM_STAT_INFO);
+extern tree make_class_type (enum tree_code CXX_MEM_STAT_INFO);
+extern const char *get_identifier_kind_name (tree);
+extern void set_identifier_kind (tree, cp_identifier_kind);
+extern bool cxx_init (void);
+extern void cxx_finish (void);
+extern bool in_main_input_context (void);
+extern uintptr_t module_token_pre (cpp_reader *, const cpp_token *, uintptr_t);
+extern uintptr_t module_token_cdtor (cpp_reader *, uintptr_t);
+extern uintptr_t module_token_lang (int type, int keyword, tree value,
+ location_t, uintptr_t);
+
+/* in method.cc */
+extern void init_method (void);
+extern tree make_thunk (tree, bool, tree, tree);
+extern void finish_thunk (tree);
+extern void use_thunk (tree, bool);
+extern bool trivial_fn_p (tree);
+extern tree forward_parm (tree);
+extern bool is_trivially_xible (enum tree_code, tree, tree);
+extern bool is_nothrow_xible (enum tree_code, tree, tree);
+extern bool is_xible (enum tree_code, tree, tree);
+extern bool is_convertible (tree, tree);
+extern bool is_nothrow_convertible (tree, tree);
+extern bool ref_xes_from_temporary (tree, tree, bool);
+extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error);
+extern bool maybe_explain_implicit_delete (tree);
+extern void explain_implicit_non_constexpr (tree);
+extern bool deduce_inheriting_ctor (tree);
+extern bool decl_remember_implicit_trigger_p (tree);
+extern void synthesize_method (tree);
+extern void maybe_synthesize_method (tree);
+extern tree lazily_declare_fn (special_function_kind,
+ tree);
+extern tree skip_artificial_parms_for (const_tree, tree);
+extern int num_artificial_parms_for (const_tree);
+extern tree make_alias_for (tree, tree);
+extern tree get_copy_ctor (tree, tsubst_flags_t);
+extern tree get_copy_assign (tree);
+extern tree get_default_ctor (tree);
+extern tree get_dtor (tree, tsubst_flags_t);
+extern tree build_stub_object (tree);
+extern tree strip_inheriting_ctors (tree);
+extern tree inherited_ctor_binfo (tree);
+extern bool base_ctor_omit_inherited_parms (tree);
+extern bool ctor_omit_inherited_parms (tree);
+extern tree locate_ctor (tree);
+extern tree implicitly_declare_fn (special_function_kind, tree,
+ bool, tree, tree);
+/* In module.cc */
+class module_state; /* Forward declare. */
+inline bool modules_p () { return flag_modules != 0; }
+
+/* The kind of module or part thereof that we're in. */
+enum module_kind_bits
+{
+ MK_NAMED = 1 << 0, // TU is a named module
+ MK_HEADER = 1 << 1, // TU is a header unit
+ MK_INTERFACE = 1 << 2, // TU is an interface
+ MK_PARTITION = 1 << 3, // TU is a partition
+
+ MK_PURVIEW = 1 << 4, // In purview of current module
+ MK_ATTACH = 1 << 5, // Attaching to named module
+
+ MK_EXPORTING = 1 << 6, /* We are in an export region. */
+};
+
+/* We do lots of bit-manipulation, so an unsigned is easier. */
+extern unsigned module_kind;
+
+inline bool module_p ()
+{ return module_kind & (MK_NAMED | MK_HEADER); }
+inline bool named_module_p ()
+{ return module_kind & MK_NAMED; }
+inline bool header_module_p ()
+{ return module_kind & MK_HEADER; }
+inline bool module_interface_p ()
+{ return module_kind & MK_INTERFACE; }
+inline bool module_partition_p ()
+{ return module_kind & MK_PARTITION; }
+inline bool module_has_cmi_p ()
+{ return module_kind & (MK_INTERFACE | MK_PARTITION); }
+
+inline bool module_purview_p ()
+{ return module_kind & MK_PURVIEW; }
+inline bool module_attach_p ()
+{ return module_kind & MK_ATTACH; }
+
+inline bool named_module_purview_p ()
+{ return named_module_p () && module_purview_p (); }
+
+/* We're currently exporting declarations. */
+inline bool module_exporting_p ()
+{ return module_kind & MK_EXPORTING; }
+
+extern module_state *get_module (tree name, module_state *parent = NULL,
+ bool partition = false);
+extern bool module_may_redeclare (tree decl);
+
+extern bool module_global_init_needed ();
+extern bool module_determine_import_inits ();
+extern void module_add_import_initializers ();
+
+/* Where the namespace-scope decl was originally declared. */
+extern void set_originating_module (tree, bool friend_p = false);
+extern tree get_originating_module_decl (tree) ATTRIBUTE_PURE;
+extern int get_originating_module (tree, bool for_mangle = false) ATTRIBUTE_PURE;
+extern unsigned get_importing_module (tree, bool = false) ATTRIBUTE_PURE;
+
+/* Where current instance of the decl got declared/defined/instantiated. */
+extern void set_instantiating_module (tree);
+extern void set_defining_module (tree);
+extern void maybe_key_decl (tree ctx, tree decl);
+
+extern void mangle_module (int m, bool include_partition);
+extern void mangle_module_fini ();
+extern void lazy_load_binding (unsigned mod, tree ns, tree id,
+ binding_slot *bslot);
+extern void lazy_load_pendings (tree decl);
+extern module_state *preprocess_module (module_state *, location_t,
+ bool in_purview,
+ bool is_import, bool export_p,
+ cpp_reader *reader);
+extern void preprocessed_module (cpp_reader *reader);
+extern void import_module (module_state *, location_t, bool export_p,
+ tree attr, cpp_reader *);
+extern void declare_module (module_state *, location_t, bool export_p,
+ tree attr, cpp_reader *);
+extern void init_modules (cpp_reader *);
+extern void fini_modules (cpp_reader *, void *cookie, bool);
+extern void maybe_check_all_macros (cpp_reader *);
+extern void *finish_module_processing (cpp_reader *);
+extern char const *module_name (unsigned, bool header_ok);
+extern bitmap get_import_bitmap ();
+extern bitmap visible_instantiation_path (bitmap *);
+extern void module_begin_main_file (cpp_reader *, line_maps *,
+ const line_map_ordinary *);
+extern void module_preprocess_options (cpp_reader *);
+extern bool handle_module_option (unsigned opt, const char *arg, int value);
+
+/* In optimize.cc */
+extern bool maybe_clone_body (tree);
+
+/* In parser.cc */
+extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool,
+ unsigned short);
+extern void cp_convert_omp_range_for (tree &, vec<tree, va_gc> *, tree &,
+ tree &, tree &, tree &, tree &, tree &);
+extern void cp_finish_omp_range_for (tree, tree);
+extern bool parsing_nsdmi (void);
+extern bool parsing_function_declarator ();
+extern bool parsing_default_capturing_generic_lambda_in_template (void);
+extern void inject_this_parameter (tree, cp_cv_quals);
+extern location_t defparse_location (tree);
+extern void maybe_show_extern_c_location (void);
+extern bool literal_integer_zerop (const_tree);
+extern tree attr_chainon (tree, tree);
+
+/* in pt.cc */
+extern tree canonical_type_parameter (tree);
+extern void push_access_scope (tree);
+extern void pop_access_scope (tree);
+extern bool check_template_shadow (tree);
+extern bool check_auto_in_tmpl_args (tree, tree);
+extern tree get_innermost_template_args (tree, int);
+extern void maybe_begin_member_template_processing (tree);
+extern void maybe_end_member_template_processing (void);
+extern tree finish_member_template_decl (tree);
+extern void begin_template_parm_list (void);
+extern bool begin_specialization (void);
+extern void reset_specialization (void);
+extern void end_specialization (void);
+extern void begin_explicit_instantiation (void);
+extern void end_explicit_instantiation (void);
+extern void check_unqualified_spec_or_inst (tree, location_t);
+extern tree check_explicit_specialization (tree, tree, int, int,
+ tree = NULL_TREE);
+extern int num_template_headers_for_class (tree);
+extern void check_template_variable (tree);
+extern tree make_auto (void);
+extern tree make_decltype_auto (void);
+extern tree make_constrained_auto (tree, tree);
+extern tree make_constrained_decltype_auto (tree, tree);
+extern tree make_template_placeholder (tree);
+extern bool template_placeholder_p (tree);
+extern bool ctad_template_p (tree);
+extern bool unparenthesized_id_or_class_member_access_p (tree);
+extern tree do_auto_deduction (tree, tree, tree,
+ tsubst_flags_t
+ = tf_warning_or_error,
+ auto_deduction_context
+ = adc_unspecified,
+ tree = NULL_TREE,
+ int = LOOKUP_NORMAL,
+ tree = NULL_TREE);
+extern tree type_uses_auto (tree);
+extern tree type_uses_auto_or_concept (tree);
+extern void append_type_to_template_for_access_check (tree, tree, tree,
+ location_t);
+extern tree convert_generic_types_to_packs (tree, int, int);
+extern tree splice_late_return_type (tree, tree);
+extern bool is_auto (const_tree);
+extern tree process_template_parm (tree, location_t, tree,
+ bool, bool);
+extern tree end_template_parm_list (tree);
+extern void end_template_parm_list (void);
+extern void end_template_decl (void);
+extern tree maybe_update_decl_type (tree, tree);
+extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
+extern tree push_template_decl (tree, bool is_friend = false);
+extern tree add_inherited_template_parms (tree, tree);
+extern void template_parm_level_and_index (tree, int*, int*);
+extern bool redeclare_class_template (tree, tree, tree);
+extern tree lookup_template_class (tree, tree, tree, tree,
+ int, tsubst_flags_t);
+extern tree lookup_template_function (tree, tree);
+extern tree lookup_template_variable (tree, tree);
+extern bool uses_template_parms (tree);
+extern bool uses_template_parms_level (tree, int);
+extern bool uses_outer_template_parms_in_constraints (tree);
+extern bool in_template_function (void);
+extern bool need_generic_capture (void);
+extern tree instantiate_class_template (tree);
+extern tree instantiate_template (tree, tree, tsubst_flags_t);
+extern tree fn_type_unification (tree, tree, tree,
+ const tree *, unsigned int,
+ tree, unification_kind_t, int,
+ struct conversion **,
+ bool, bool);
+extern void mark_decl_instantiated (tree, int);
+extern int more_specialized_fn (tree, tree, int);
+extern bool type_targs_deducible_from (tree, tree);
+extern void do_decl_instantiation (tree, tree);
+extern void do_type_instantiation (tree, tree, tsubst_flags_t);
+extern bool always_instantiate_p (tree);
+extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error);
+extern tree instantiate_decl (tree, bool, bool);
+extern void maybe_instantiate_decl (tree);
+extern int comp_template_parms (const_tree, const_tree);
+extern bool template_heads_equivalent_p (const_tree, const_tree);
+extern bool builtin_pack_fn_p (tree);
+extern tree uses_parameter_packs (tree);
+extern bool template_parameter_pack_p (const_tree);
+extern bool function_parameter_pack_p (const_tree);
+extern bool function_parameter_expanded_from_pack_p (tree, tree);
+extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error);
+extern bool check_for_bare_parameter_packs (tree, location_t = UNKNOWN_LOCATION);
+extern tree build_template_info (tree, tree);
+extern tree get_template_info (const_tree);
+extern int template_class_depth (tree);
+extern int is_specialization_of (tree, tree);
+extern bool is_specialization_of_friend (tree, tree);
+extern bool comp_template_args (tree, tree, tree * = NULL,
+ tree * = NULL, bool = false);
+extern int template_args_equal (tree, tree, bool = false);
+extern tree maybe_process_partial_specialization (tree);
+extern tree most_specialized_instantiation (tree);
+extern tree most_specialized_partial_spec (tree, tsubst_flags_t);
+extern void print_candidates (tree);
+extern void instantiate_pending_templates (int);
+extern tree tsubst_default_argument (tree, int, tree, tree,
+ tsubst_flags_t);
+extern tree tsubst (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_expr (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_argument_pack (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_template_args (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_function_parms (tree, tree, tsubst_flags_t, tree);
+extern tree most_general_template (tree);
+extern tree get_mostly_instantiated_function_type (tree);
+extern bool problematic_instantiation_changed (void);
+extern void record_last_problematic_instantiation (void);
+extern struct tinst_level *current_instantiation(void);
+extern bool instantiating_current_function_p (void);
+extern tree maybe_get_template_decl_from_type_decl (tree);
+extern int processing_template_parmlist;
+extern bool dependent_type_p (tree);
+extern bool dependent_scope_p (tree);
+extern bool dependentish_scope_p (tree);
+extern bool any_dependent_template_arguments_p (const_tree);
+extern bool any_erroneous_template_args_p (const_tree);
+extern bool dependent_template_p (tree);
+extern bool dependent_template_id_p (tree, tree);
+extern bool type_dependent_expression_p (tree);
+extern bool type_dependent_object_expression_p (tree);
+extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
+extern bool any_type_dependent_elements_p (const_tree);
+extern bool type_dependent_expression_p_push (tree);
+extern bool value_dependent_expression_p (tree);
+extern bool instantiation_dependent_uneval_expression_p (tree);
+extern bool any_value_dependent_elements_p (const_tree);
+extern bool dependent_omp_for_p (tree, tree, tree, tree);
+extern tree resolve_typename_type (tree, bool);
+extern tree template_for_substitution (tree);
+extern tree build_non_dependent_expr (tree);
+extern void make_args_non_dependent (vec<tree, va_gc> *);
+extern bool reregister_specialization (tree, tree, tree);
+extern tree instantiate_non_dependent_expr (tree, tsubst_flags_t = tf_error);
+extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
+extern tree instantiate_non_dependent_or_null (tree);
+extern bool variable_template_specialization_p (tree);
+extern bool alias_type_or_template_p (tree);
+enum { nt_opaque = false, nt_transparent = true };
+extern tree alias_template_specialization_p (const_tree, bool);
+extern tree dependent_alias_template_spec_p (const_tree, bool);
+extern tree get_template_parm_object (tree expr, tree mangle);
+extern tree tparm_object_argument (tree);
+extern bool explicit_class_specialization_p (tree);
+extern bool push_tinst_level (tree);
+extern bool push_tinst_level (tree, tree);
+extern bool push_tinst_level_loc (tree, location_t);
+extern bool push_tinst_level_loc (tree, tree, location_t);
+extern void pop_tinst_level (void);
+extern struct tinst_level *outermost_tinst_level(void);
+extern bool non_templated_friend_p (tree);
+extern void init_template_processing (void);
+extern void print_template_statistics (void);
+bool template_template_parameter_p (const_tree);
+bool template_type_parameter_p (const_tree);
+extern bool primary_template_specialization_p (const_tree);
+extern tree get_primary_template_innermost_parameters (const_tree);
+extern tree get_template_innermost_arguments (const_tree);
+extern tree get_template_argument_pack_elems (const_tree);
+extern tree get_function_template_decl (const_tree);
+extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
+extern tree resolve_nondeduced_context_or_error (tree, tsubst_flags_t);
+extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
+extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t,
+ bool = true);
+extern tree canonicalize_type_argument (tree, tsubst_flags_t);
+extern void register_local_identity (tree);
+extern void register_local_specialization (tree, tree);
+extern tree retrieve_local_specialization (tree);
+extern void register_parameter_specializations (tree, tree);
+extern tree extract_fnparm_pack (tree, tree *);
+extern tree template_parm_to_arg (tree);
+extern tree dguide_name (tree);
+extern bool dguide_name_p (tree);
+extern bool deduction_guide_p (const_tree);
+extern bool copy_guide_p (const_tree);
+extern bool template_guide_p (const_tree);
+extern bool builtin_guide_p (const_tree);
+extern void store_explicit_specifier (tree, tree);
+extern tree lookup_explicit_specifier (tree);
+extern void walk_specializations (bool,
+ void (*)(bool, spec_entry *,
+ void *),
+ void *);
+extern tree match_mergeable_specialization (bool is_decl, spec_entry *);
+extern unsigned get_mergeable_specialization_flags (tree tmpl, tree spec);
+extern void add_mergeable_specialization (bool is_decl, bool is_alias,
+ spec_entry *,
+ tree outer, unsigned);
+extern tree add_to_template_args (tree, tree);
+extern tree add_outermost_template_args (tree, tree);
+extern tree add_extra_args (tree, tree, tsubst_flags_t, tree);
+extern tree build_extra_args (tree, tree, tsubst_flags_t);
+
+/* in rtti.cc */
+/* A vector of all tinfo decls that haven't been emitted yet. */
+extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
+
+extern void init_rtti_processing (void);
+extern tree build_typeid (tree, tsubst_flags_t);
+extern tree get_tinfo_decl_direct (tree, tree, int);
+extern tree get_tinfo_decl (tree);
+extern tree get_typeid (tree, tsubst_flags_t);
+extern tree build_headof (tree);
+extern tree build_dynamic_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern void emit_support_tinfos (void);
+extern bool emit_tinfo_decl (tree);
+extern unsigned get_pseudo_tinfo_index (tree);
+extern tree get_pseudo_tinfo_type (unsigned);
+extern tree build_if_nonnull (tree, tree, tsubst_flags_t);
+
+/* in search.cc */
+extern tree get_parent_with_private_access (tree decl, tree binfo);
+extern bool accessible_base_p (tree, tree, bool);
+extern tree lookup_base (tree, tree, base_access,
+ base_kind *, tsubst_flags_t);
+extern tree dcast_base_hint (tree, tree);
+extern int accessible_p (tree, tree, bool);
+extern int accessible_in_template_p (tree, tree);
+extern tree lookup_field (tree, tree, int, bool);
+extern tree lookup_fnfields (tree, tree, int, tsubst_flags_t);
+extern tree lookup_member (tree, tree, int, bool,
+ tsubst_flags_t,
+ access_failure_info *afi = NULL);
+extern tree lookup_member_fuzzy (tree, tree, bool);
+extern tree locate_field_accessor (tree, tree, bool);
+extern int look_for_overrides (tree, tree);
+extern void get_pure_virtuals (tree);
+extern void maybe_suppress_debug_info (tree);
+extern void note_debug_info_needed (tree);
+extern tree current_scope (void);
+extern int at_function_scope_p (void);
+extern bool at_class_scope_p (void);
+extern bool at_namespace_scope_p (void);
+extern tree context_for_name_lookup (tree);
+extern tree type_context_for_name_lookup (tree);
+extern tree lookup_conversions (tree);
+extern tree binfo_from_vbase (tree);
+extern tree binfo_for_vbase (tree, tree);
+extern tree look_for_overrides_here (tree, tree);
+#define dfs_skip_bases ((tree)1)
+extern tree dfs_walk_all (tree, tree (*) (tree, void *),
+ tree (*) (tree, void *), void *);
+extern tree dfs_walk_once (tree, tree (*) (tree, void *),
+ tree (*) (tree, void *), void *);
+extern tree binfo_via_virtual (tree, tree);
+extern bool binfo_direct_p (tree);
+extern tree build_baselink (tree, tree, tree, tree);
+extern tree adjust_result_of_qualified_name_lookup
+ (tree, tree, tree);
+extern tree copied_binfo (tree, tree);
+extern tree original_binfo (tree, tree);
+extern bool shared_member_p (tree);
+extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ());
+extern bool maybe_check_overriding_exception_spec (tree, tree);
+
+/* in semantics.cc */
+extern void push_deferring_access_checks (deferring_kind);
+extern void resume_deferring_access_checks (void);
+extern void stop_deferring_access_checks (void);
+extern void pop_deferring_access_checks (void);
+extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
+extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
+extern void pop_to_parent_deferring_access_checks (void);
+extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
+ tsubst_flags_t);
+extern bool perform_deferred_access_checks (tsubst_flags_t);
+extern bool perform_or_defer_access_check (tree, tree, tree,
+ tsubst_flags_t,
+ access_failure_info *afi = NULL);
+
+/* RAII sentinel to ensures that deferred access checks are popped before
+ a function returns. */
+
+class deferring_access_check_sentinel
+{
+public:
+ deferring_access_check_sentinel (enum deferring_kind kind = dk_deferred)
+ {
+ push_deferring_access_checks (kind);
+ }
+ ~deferring_access_check_sentinel ()
+ {
+ pop_deferring_access_checks ();
+ }
+};
+
+extern int stmts_are_full_exprs_p (void);
+extern void init_cp_semantics (void);
+extern tree do_poplevel (tree);
+extern void break_maybe_infinite_loop (void);
+extern void add_decl_expr (tree);
+extern tree maybe_cleanup_point_expr_void (tree);
+extern tree finish_expr_stmt (tree);
+extern tree begin_if_stmt (void);
+extern tree finish_if_stmt_cond (tree, tree);
+extern tree finish_then_clause (tree);
+extern void begin_else_clause (tree);
+extern void finish_else_clause (tree);
+extern void finish_if_stmt (tree);
+extern tree begin_while_stmt (void);
+extern void finish_while_stmt_cond (tree, tree, bool, unsigned short);
+extern void finish_while_stmt (tree);
+extern tree begin_do_stmt (void);
+extern void finish_do_body (tree);
+extern void finish_do_stmt (tree, tree, bool, unsigned short);
+extern tree finish_return_stmt (tree);
+extern tree begin_for_scope (tree *);
+extern tree begin_for_stmt (tree, tree);
+extern void finish_init_stmt (tree);
+extern void finish_for_cond (tree, tree, bool, unsigned short);
+extern void finish_for_expr (tree, tree);
+extern void finish_for_stmt (tree);
+extern tree begin_range_for_stmt (tree, tree);
+extern void finish_range_for_decl (tree, tree, tree);
+extern void finish_range_for_stmt (tree);
+extern tree finish_break_stmt (void);
+extern tree finish_continue_stmt (void);
+extern tree begin_switch_stmt (void);
+extern void finish_switch_cond (tree, tree);
+extern void finish_switch_stmt (tree);
+extern tree finish_goto_stmt (tree);
+extern tree begin_try_block (void);
+extern void finish_try_block (tree);
+extern void finish_handler_sequence (tree);
+extern tree begin_function_try_block (tree *);
+extern void finish_function_try_block (tree);
+extern void finish_function_handler_sequence (tree, tree);
+extern void finish_cleanup_try_block (tree);
+extern tree begin_handler (void);
+extern void finish_handler_parms (tree, tree);
+extern void finish_handler (tree);
+extern void finish_cleanup (tree, tree);
+extern bool is_this_parameter (tree);
+
+enum {
+ BCS_NORMAL = 0,
+ BCS_NO_SCOPE = 1,
+ BCS_TRY_BLOCK = 2,
+ BCS_FN_BODY = 4,
+ BCS_TRANSACTION = 8,
+ BCS_STMT_EXPR = 16
+};
+extern tree begin_compound_stmt (unsigned int);
+
+extern void finish_compound_stmt (tree);
+extern tree finish_asm_stmt (location_t, int, tree, tree,
+ tree, tree, tree, bool);
+extern tree finish_label_stmt (tree);
+extern void finish_label_decl (tree);
+extern cp_expr finish_parenthesized_expr (cp_expr);
+extern tree force_paren_expr (tree, bool = false);
+inline tree force_paren_expr_uneval (tree t)
+{ return force_paren_expr (t, true); }
+extern tree maybe_undo_parenthesized_ref (tree);
+extern tree finish_non_static_data_member (tree, tree, tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern tree begin_stmt_expr (void);
+extern tree finish_stmt_expr_expr (tree, tree);
+extern tree finish_stmt_expr (tree, bool);
+extern tree stmt_expr_value_expr (tree);
+bool empty_expr_stmt_p (tree);
+extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *,
+ tsubst_flags_t);
+extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
+ bool, tsubst_flags_t);
+extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error);
+extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error);
+extern cp_expr finish_increment_expr (cp_expr, enum tree_code);
+extern tree finish_this_expr (void);
+extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
+extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr,
+ tsubst_flags_t);
+/* Whether this call to finish_compound_literal represents a C++11 functional
+ cast or a C99 compound literal. */
+enum fcl_t { fcl_functional, fcl_c99 };
+extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional);
+extern tree finish_fname (tree);
+extern void finish_translation_unit (void);
+extern tree finish_template_type_parm (tree, tree);
+extern tree finish_template_template_parm (tree, tree);
+extern tree begin_class_definition (tree);
+extern void finish_template_decl (tree);
+extern tree finish_template_type (tree, tree, int);
+extern tree finish_base_specifier (tree, tree, bool);
+extern void finish_member_declaration (tree);
+extern bool outer_automatic_var_p (tree);
+extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false);
+extern cp_expr finish_id_expression (tree, tree, tree,
+ cp_id_kind *,
+ bool, bool, bool *,
+ bool, bool, bool, bool,
+ const char **,
+ location_t);
+extern tree finish_typeof (tree);
+extern tree finish_underlying_type (tree);
+extern tree calculate_bases (tree, tsubst_flags_t);
+extern tree finish_bases (tree, bool);
+extern tree calculate_direct_bases (tree, tsubst_flags_t);
+extern tree finish_offsetof (tree, tree, location_t);
+extern void finish_decl_cleanup (tree, tree);
+extern void finish_eh_cleanup (tree);
+extern void emit_associated_thunks (tree);
+extern void finish_mem_initializers (tree);
+extern tree check_template_template_default_arg (tree);
+extern bool expand_or_defer_fn_1 (tree);
+extern void expand_or_defer_fn (tree);
+extern bool check_accessibility_of_qualified_id (tree, tree, tree, tsubst_flags_t);
+extern tree finish_qualified_id_expr (tree, tree, bool, bool,
+ bool, bool, tsubst_flags_t);
+extern void simplify_aggr_init_expr (tree *);
+extern void finalize_nrv (tree *, tree, tree);
+extern tree omp_reduction_id (enum tree_code, tree, tree);
+extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
+extern bool cp_check_omp_declare_reduction (tree);
+extern void finish_omp_declare_simd_methods (tree);
+extern tree finish_omp_clauses (tree, enum c_omp_region_type);
+extern tree push_omp_privatization_clauses (bool);
+extern void pop_omp_privatization_clauses (tree);
+extern void save_omp_privatization_clauses (vec<tree> &);
+extern void restore_omp_privatization_clauses (vec<tree> &);
+extern void finish_omp_threadprivate (tree);
+extern tree begin_omp_structured_block (void);
+extern tree finish_omp_structured_block (tree);
+extern tree finish_oacc_data (tree, tree);
+extern tree finish_oacc_host_data (tree, tree);
+extern tree finish_omp_construct (enum tree_code, tree, tree);
+extern tree begin_omp_parallel (void);
+extern tree finish_omp_parallel (tree, tree);
+extern tree begin_omp_task (void);
+extern tree finish_omp_task (tree, tree);
+extern tree finish_omp_for (location_t, enum tree_code,
+ tree, tree, tree, tree, tree,
+ tree, tree, vec<tree> *, tree);
+extern tree finish_omp_for_block (tree, tree);
+extern void finish_omp_atomic (location_t, enum tree_code,
+ enum tree_code, tree, tree,
+ tree, tree, tree, tree, tree,
+ enum omp_memory_order, bool);
+extern void finish_omp_barrier (void);
+extern void finish_omp_depobj (location_t, tree,
+ enum omp_clause_depend_kind,
+ tree);
+extern void finish_omp_flush (int);
+extern void finish_omp_taskwait (void);
+extern void finish_omp_taskyield (void);
+extern void finish_omp_cancel (tree);
+extern void finish_omp_cancellation_point (tree);
+extern tree omp_privatize_field (tree, bool);
+extern tree begin_transaction_stmt (location_t, tree *, int);
+extern void finish_transaction_stmt (tree, tree, int, tree);
+extern tree build_transaction_expr (location_t, tree, int, tree);
+extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
+ bool, bool);
+extern tree baselink_for_fns (tree);
+extern void finish_static_assert (tree, tree, location_t,
+ bool, bool);
+extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
+extern tree fold_builtin_is_corresponding_member (location_t, int, tree *);
+extern tree fold_builtin_is_pointer_inverconvertible_with_class (location_t, int, tree *);
+extern tree finish_trait_expr (location_t, enum cp_trait_kind, tree, tree);
+extern tree finish_trait_type (enum cp_trait_kind, tree, tree);
+extern tree build_lambda_expr (void);
+extern tree build_lambda_object (tree);
+extern tree begin_lambda_type (tree);
+extern tree lambda_capture_field_type (tree, bool, bool);
+extern tree lambda_proxy_type (tree);
+extern tree lambda_function (tree);
+extern void apply_deduced_return_type (tree, tree);
+extern tree add_capture (tree, tree, tree, bool, bool);
+extern tree add_default_capture (tree, tree, tree);
+extern void insert_capture_proxy (tree);
+extern void insert_pending_capture_proxies (void);
+extern bool is_capture_proxy (tree);
+extern bool is_normal_capture_proxy (tree);
+extern bool is_constant_capture_proxy (tree);
+extern void register_capture_members (tree);
+extern tree lambda_expr_this_capture (tree, int);
+extern void maybe_generic_this_capture (tree, tree);
+extern tree maybe_resolve_dummy (tree, bool);
+extern tree current_nonlambda_function (void);
+extern tree nonlambda_method_basetype (void);
+extern tree current_nonlambda_scope (void);
+extern tree current_lambda_expr (void);
+extern bool generic_lambda_fn_p (tree);
+extern tree do_dependent_capture (tree, bool = false);
+extern bool lambda_fn_in_template_p (tree);
+extern void maybe_add_lambda_conv_op (tree);
+extern bool is_lambda_ignored_entity (tree);
+extern bool lambda_static_thunk_p (tree);
+extern bool call_from_lambda_thunk_p (tree);
+extern tree finish_builtin_launder (location_t, tree,
+ tsubst_flags_t);
+extern tree cp_build_vec_convert (tree, location_t, tree,
+ tsubst_flags_t);
+extern tree cp_build_bit_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern void start_lambda_scope (tree decl);
+extern void finish_lambda_scope (void);
+extern void record_lambda_scope (tree lambda);
+extern void record_lambda_scope_discriminator (tree lambda);
+extern void record_lambda_scope_sig_discriminator (tree lambda, tree fn);
+extern tree start_lambda_function (tree fn, tree lambda_expr);
+extern void finish_lambda_function (tree body);
+extern bool regenerated_lambda_fn_p (tree);
+extern tree lambda_regenerating_args (tree);
+extern tree most_general_lambda (tree);
+extern tree finish_omp_target (location_t, tree, tree, bool);
+extern void finish_omp_target_clauses (location_t, tree, tree *);
+
+/* in tree.cc */
+extern int cp_tree_operand_length (const_tree);
+extern int cp_tree_code_length (enum tree_code);
+extern void cp_free_lang_data (tree t);
+extern tree force_target_expr (tree, tree, tsubst_flags_t);
+extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
+extern void lang_check_failed (const char *, int,
+ const char *) ATTRIBUTE_NORETURN
+ ATTRIBUTE_COLD;
+extern tree stabilize_expr (tree, tree *);
+extern void stabilize_call (tree, tree *);
+extern bool stabilize_init (tree, tree *);
+extern tree add_stmt_to_compound (tree, tree);
+extern void init_tree (void);
+extern bool pod_type_p (const_tree);
+extern bool layout_pod_type_p (const_tree);
+extern bool std_layout_type_p (const_tree);
+extern bool trivial_type_p (const_tree);
+extern bool trivially_copyable_p (const_tree);
+extern bool type_has_unique_obj_representations (const_tree);
+extern bool scalarish_type_p (const_tree);
+extern bool structural_type_p (tree, bool = false);
+extern bool type_has_nontrivial_default_init (const_tree);
+extern bool type_has_nontrivial_copy_init (const_tree);
+extern void maybe_warn_parm_abi (tree, location_t);
+extern bool class_tmpl_impl_spec_p (const_tree);
+extern int zero_init_p (const_tree);
+extern bool zero_init_expr_p (tree);
+extern bool check_abi_tag_redeclaration (const_tree, const_tree,
+ const_tree);
+extern bool check_abi_tag_args (tree, tree);
+extern tree strip_typedefs (tree, bool * = NULL,
+ unsigned int = 0);
+extern tree strip_typedefs_expr (tree, bool * = NULL,
+ unsigned int = 0);
+extern tree copy_binfo (tree, tree, tree,
+ tree *, int);
+extern int member_p (const_tree);
+extern cp_lvalue_kind real_lvalue_p (const_tree);
+extern cp_lvalue_kind lvalue_kind (const_tree);
+extern bool glvalue_p (const_tree);
+extern bool obvalue_p (const_tree);
+extern bool xvalue_p (const_tree);
+extern bool bitfield_p (const_tree);
+extern tree cp_stabilize_reference (tree);
+extern bool builtin_valid_in_constant_expr_p (const_tree);
+extern tree build_min (enum tree_code, tree, ...);
+extern tree build_min_nt_loc (location_t, enum tree_code,
+ ...);
+extern tree build_min_non_dep (enum tree_code, tree, ...);
+extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...);
+extern tree build_min_non_dep_op_overload (tree, tree, tree,
+ vec<tree, va_gc> *);
+extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *);
+extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
+extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned);
+extern tree build_cplus_new (tree, tree, tsubst_flags_t);
+extern tree build_local_temp (tree);
+extern bool is_local_temp (tree);
+extern tree build_aggr_init_expr (tree, tree);
+extern tree get_target_expr (tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern tree build_cplus_array_type (tree, tree, int is_dep = -1);
+extern tree build_array_of_n_type (tree, int);
+extern bool array_of_runtime_bound_p (tree);
+extern bool vla_type_p (tree);
+extern tree build_array_copy (tree);
+extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
+extern tree expand_vec_init_expr (tree, tree, tsubst_flags_t,
+ vec<tree,va_gc>** = nullptr);
+extern void diagnose_non_constexpr_vec_init (tree);
+extern tree hash_tree_cons (tree, tree, tree);
+extern tree hash_tree_chain (tree, tree);
+extern tree build_qualified_name (tree, tree, tree, bool);
+extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
+extern tree make_binding_vec (tree, unsigned clusters CXX_MEM_STAT_INFO);
+inline tree ovl_first (tree) ATTRIBUTE_PURE;
+extern tree ovl_make (tree fn,
+ tree next = NULL_TREE);
+extern tree ovl_insert (tree fn, tree maybe_ovl,
+ int using_or_hidden = 0);
+extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE;
+extern void lookup_mark (tree lookup, bool val);
+extern tree lookup_add (tree fns, tree lookup);
+extern tree lookup_maybe_add (tree fns, tree lookup,
+ bool deduping);
+extern int is_overloaded_fn (tree) ATTRIBUTE_PURE;
+extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE;
+extern tree dependent_name (tree);
+extern tree call_expr_dependent_name (tree);
+extern tree maybe_get_fns (tree) ATTRIBUTE_PURE;
+extern tree get_fns (tree) ATTRIBUTE_PURE;
+extern tree get_first_fn (tree) ATTRIBUTE_PURE;
+extern tree ovl_scope (tree);
+extern const char *cxx_printable_name (tree, int);
+extern const char *cxx_printable_name_translate (tree, int);
+extern tree canonical_eh_spec (tree);
+extern tree build_cp_fntype_variant (tree, cp_ref_qualifier, tree, bool);
+extern tree build_exception_variant (tree, tree);
+extern void fixup_deferred_exception_variants (tree, tree);
+extern tree bind_template_template_parm (tree, tree);
+extern tree array_type_nelts_total (tree);
+extern tree array_type_nelts_top (tree);
+extern bool array_of_unknown_bound_p (const_tree);
+extern tree break_out_target_exprs (tree, bool = false);
+extern tree build_ctor_subob_ref (tree, tree, tree);
+extern tree replace_placeholders (tree, tree, bool * = NULL);
+extern bool find_placeholders (tree);
+extern tree get_type_decl (tree);
+extern tree decl_namespace_context (tree);
+extern bool decl_anon_ns_mem_p (tree);
+extern bool decl_internal_context_p (const_tree);
+extern tree lvalue_type (tree);
+extern tree error_type (tree);
+extern int varargs_function_p (const_tree);
+extern bool cp_tree_equal (tree, tree);
+extern tree no_linkage_check (tree, bool);
+extern void debug_binfo (tree);
+extern tree build_dummy_object (tree);
+extern tree maybe_dummy_object (tree, tree *);
+extern bool is_dummy_object (const_tree);
+extern bool is_byte_access_type (tree);
+extern bool is_byte_access_type_not_plain_char (tree);
+extern const struct attribute_spec cxx_attribute_table[];
+extern tree make_ptrmem_cst (tree, tree);
+extern tree cp_build_type_attribute_variant (tree, tree);
+extern tree cp_build_reference_type (tree, bool);
+extern tree move (tree);
+extern tree cp_build_qualified_type (tree, int,
+ tsubst_flags_t = tf_warning_or_error);
+extern bool cv_qualified_p (const_tree);
+extern tree cv_unqualified (tree);
+extern special_function_kind special_function_p (const_tree);
+extern special_function_kind special_memfn_p (const_tree);
+extern int count_trees (tree);
+extern int char_type_p (tree);
+extern void verify_stmt_tree (tree);
+extern linkage_kind decl_linkage (tree);
+extern duration_kind decl_storage_duration (tree);
+extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
+ void*, hash_set<tree> *);
+#define cp_walk_tree(tp,func,data,pset) \
+ walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
+#define cp_walk_tree_without_duplicates(tp,func,data) \
+ walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
+extern tree rvalue (tree);
+extern tree convert_bitfield_to_declared_type (tree);
+extern tree cp_save_expr (tree);
+extern bool cast_valid_in_integral_constant_expression_p (tree);
+extern bool cxx_type_hash_eq (const_tree, const_tree);
+extern tree cxx_copy_lang_qualifiers (const_tree, const_tree);
+
+extern void cxx_print_statistics (void);
+extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
+
+/* in ptree.cc */
+extern void cxx_print_xnode (FILE *, tree, int);
+extern void cxx_print_decl (FILE *, tree, int);
+extern void cxx_print_type (FILE *, tree, int);
+extern void cxx_print_identifier (FILE *, tree, int);
+extern void cxx_print_error_function (diagnostic_context *,
+ const char *,
+ struct diagnostic_info *);
+
+/* in typeck.cc */
+/* Says how we should behave when comparing two arrays one of which
+ has unknown bounds. */
+enum compare_bounds_t { bounds_none, bounds_either, bounds_first };
+
+extern bool cxx_mark_addressable (tree, bool = false);
+extern int string_conv_p (const_tree, const_tree, int);
+extern tree cp_truthvalue_conversion (tree, tsubst_flags_t);
+extern tree contextual_conv_bool (tree, tsubst_flags_t);
+extern tree condition_conversion (tree);
+extern tree require_complete_type (tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern tree complete_type (tree);
+extern tree complete_type_or_else (tree, tree);
+extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
+extern int cp_compare_floating_point_conversion_ranks (tree, tree);
+inline bool type_unknown_p (const_tree);
+enum { ce_derived, ce_type, ce_normal, ce_exact };
+extern bool comp_except_specs (const_tree, const_tree, int);
+extern bool comptypes (tree, tree, int);
+extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
+extern bool similar_type_p (tree, tree);
+extern bool next_common_initial_sequence (tree &, tree &);
+extern bool layout_compatible_type_p (tree, tree);
+extern bool compparms (const_tree, const_tree);
+extern int comp_cv_qualification (const_tree, const_tree);
+extern int comp_cv_qualification (int, int);
+extern int comp_cv_qual_signature (tree, tree);
+extern tree cxx_sizeof_or_alignof_expr (location_t, tree,
+ enum tree_code, bool, bool);
+extern tree cxx_sizeof_or_alignof_type (location_t, tree,
+ enum tree_code, bool, bool);
+extern tree cxx_alignas_expr (tree);
+extern tree cxx_sizeof_nowarn (tree);
+extern tree is_bitfield_expr_with_lowered_type (const_tree);
+extern tree unlowered_expr_type (const_tree);
+extern tree decay_conversion (tree,
+ tsubst_flags_t,
+ bool = true);
+extern tree build_class_member_access_expr (cp_expr, tree, tree, bool,
+ tsubst_flags_t);
+extern tree finish_class_member_access_expr (cp_expr, tree, bool,
+ tsubst_flags_t);
+extern tree lookup_destructor (tree, tree, tree, tsubst_flags_t);
+extern tree build_dependent_operator_type (tree, enum tree_code, bool);
+extern tree build_x_indirect_ref (location_t, tree,
+ ref_operator, tree,
+ tsubst_flags_t);
+extern tree cp_build_indirect_ref (location_t, tree,
+ ref_operator,
+ tsubst_flags_t);
+extern tree cp_build_fold_indirect_ref (tree);
+extern tree build_array_ref (location_t, tree, tree);
+extern tree cp_build_array_ref (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
+extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
+ ATTRIBUTE_SENTINEL;
+extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
+ tsubst_flags_t,
+ tree = NULL_TREE);
+extern tree build_x_binary_op (const op_location_t &,
+ enum tree_code, tree,
+ enum tree_code, tree,
+ enum tree_code, tree,
+ tree *, tsubst_flags_t);
+inline tree build_x_binary_op (const op_location_t &loc,
+ enum tree_code code, tree arg1, tree arg2,
+ tsubst_flags_t complain)
+{
+ return build_x_binary_op (loc, code, arg1, TREE_CODE (arg1), arg2,
+ TREE_CODE (arg2), NULL_TREE, NULL, complain);
+}
+extern tree build_x_array_ref (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree build_x_unary_op (location_t,
+ enum tree_code, cp_expr,
+ tree, tsubst_flags_t);
+extern tree cp_build_addressof (location_t, tree,
+ tsubst_flags_t);
+extern tree cp_build_addr_expr (tree, tsubst_flags_t);
+extern tree cp_build_unary_op (enum tree_code, tree, bool,
+ tsubst_flags_t);
+extern tree genericize_compound_lvalue (tree);
+extern tree unary_complex_lvalue (enum tree_code, tree);
+extern tree build_x_conditional_expr (location_t, tree, tree, tree,
+ tsubst_flags_t);
+extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
+ tsubst_flags_t);
+extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
+ const char *, tsubst_flags_t);
+extern tree build_x_compound_expr (location_t, tree, tree,
+ tree, tsubst_flags_t);
+extern tree build_compound_expr (location_t, tree, tree);
+extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
+extern tree build_static_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree build_reinterpret_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree build_const_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree build_c_cast (location_t, tree, tree);
+extern cp_expr build_c_cast (location_t loc, tree type,
+ cp_expr expr);
+extern tree cp_build_c_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern cp_expr build_x_modify_expr (location_t, tree,
+ enum tree_code, tree,
+ tree, tsubst_flags_t);
+extern tree cp_build_modify_expr (location_t, tree,
+ enum tree_code, tree,
+ tsubst_flags_t);
+extern tree convert_for_initialization (tree, tree, tree, int,
+ impl_conv_rhs, tree, int,
+ tsubst_flags_t);
+extern int comp_ptr_ttypes (tree, tree);
+extern bool comp_ptr_ttypes_const (tree, tree, compare_bounds_t);
+extern bool error_type_p (const_tree);
+extern bool ptr_reasonably_similar (const_tree, const_tree);
+extern tree build_ptrmemfunc (tree, tree, int, bool,
+ tsubst_flags_t);
+extern int cp_type_quals (const_tree);
+extern int type_memfn_quals (const_tree);
+extern cp_ref_qualifier type_memfn_rqual (const_tree);
+extern tree apply_memfn_quals (tree, cp_cv_quals,
+ cp_ref_qualifier = REF_QUAL_NONE);
+extern bool cp_has_mutable_p (const_tree);
+extern bool at_least_as_qualified_p (const_tree, const_tree);
+extern void cp_apply_type_quals_to_decl (int, tree);
+extern tree build_ptrmemfunc1 (tree, tree, tree);
+extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
+extern tree type_after_usual_arithmetic_conversions (tree, tree);
+extern tree common_pointer_type (tree, tree);
+extern tree composite_pointer_type (const op_location_t &,
+ tree, tree, tree, tree,
+ composite_pointer_operation,
+ tsubst_flags_t);
+extern tree merge_types (tree, tree);
+extern tree strip_array_domain (tree);
+extern tree check_return_expr (tree, bool *);
+extern tree spaceship_type (tree, tsubst_flags_t = tf_warning_or_error);
+extern tree genericize_spaceship (location_t, tree, tree, tree);
+extern tree cp_build_binary_op (const op_location_t &,
+ enum tree_code, tree, tree,
+ tsubst_flags_t);
+extern tree build_x_vec_perm_expr (location_t,
+ tree, tree, tree,
+ tsubst_flags_t);
+extern tree build_x_shufflevector (location_t,
+ vec<tree, va_gc> *,
+ tsubst_flags_t);
+#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (input_location, T, SIZEOF_EXPR, false, true)
+extern tree build_simple_component_ref (tree, tree);
+extern tree build_ptrmemfunc_access_expr (tree, tree);
+extern tree build_address (tree);
+extern tree build_nop (tree, tree);
+extern tree non_reference (tree);
+extern tree lookup_anon_field (tree, tree);
+extern bool invalid_nonstatic_memfn_p (location_t, tree,
+ tsubst_flags_t);
+extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
+extern tree convert_ptrmem (tree, tree, bool, bool,
+ tsubst_flags_t);
+extern int lvalue_or_else (tree, enum lvalue_use,
+ tsubst_flags_t);
+extern void check_template_keyword (tree);
+extern bool check_raw_literal_operator (const_tree decl);
+extern bool check_literal_operator_args (const_tree, bool *, bool *);
+extern void maybe_warn_about_useless_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
+
+extern tree finish_left_unary_fold_expr (tree, int);
+extern tree finish_right_unary_fold_expr (tree, int);
+extern tree finish_binary_fold_expr (tree, tree, int);
+extern tree treat_lvalue_as_rvalue_p (tree, bool);
+extern bool decl_in_std_namespace_p (tree);
+extern void maybe_warn_pessimizing_move (tree, tree, bool);
+
+/* in typeck2.cc */
+extern void require_complete_eh_spec_types (tree, tree);
+extern bool cxx_incomplete_type_diagnostic (location_t, const_tree,
+ const_tree, diagnostic_t);
+inline location_t
+loc_or_input_loc (location_t loc)
+{
+ return loc == UNKNOWN_LOCATION ? input_location : loc;
+}
+
+inline location_t
+cp_expr_loc_or_loc (const_tree t, location_t or_loc)
+{
+ location_t loc = cp_expr_location (t);
+ if (loc == UNKNOWN_LOCATION)
+ loc = or_loc;
+ return loc;
+}
+
+inline location_t
+cp_expr_loc_or_input_loc (const_tree t)
+{
+ return cp_expr_loc_or_loc (t, input_location);
+}
+
+inline bool
+cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
+ diagnostic_t diag_kind)
+{
+ return cxx_incomplete_type_diagnostic (cp_expr_loc_or_input_loc (value),
+ value, type, diag_kind);
+}
+
+extern void cxx_incomplete_type_error (location_t, const_tree,
+ const_tree);
+inline void
+cxx_incomplete_type_error (const_tree value, const_tree type)
+{
+ cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
+}
+
+extern void cxx_incomplete_type_inform (const_tree);
+extern tree error_not_base_type (tree, tree);
+extern tree binfo_or_else (tree, tree);
+extern void cxx_readonly_error (location_t, tree,
+ enum lvalue_use);
+extern void complete_type_check_abstract (tree);
+extern int abstract_virtuals_error (tree, tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern int abstract_virtuals_error (abstract_class_use, tree,
+ tsubst_flags_t = tf_warning_or_error);
+
+extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
+extern tree split_nonconstant_init (tree, tree);
+extern bool check_narrowing (tree, tree, tsubst_flags_t,
+ bool = false);
+extern bool ordinary_char_type_p (tree);
+extern bool array_string_literal_compatible_p (tree, tree);
+extern tree digest_init (tree, tree, tsubst_flags_t);
+extern tree digest_init_flags (tree, tree, int, tsubst_flags_t);
+extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t);
+extern tree build_scoped_ref (tree, tree, tree *);
+extern tree build_x_arrow (location_t, tree,
+ tsubst_flags_t);
+extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
+extern tree build_functional_cast (location_t, tree, tree,
+ tsubst_flags_t);
+extern tree add_exception_specifier (tree, tree, tsubst_flags_t);
+extern tree merge_exception_specifiers (tree, tree);
+extern void set_target_expr_eliding (tree);
+extern tree cp_build_init_expr (location_t, tree, tree);
+inline tree cp_build_init_expr (tree t, tree i)
+{ return cp_build_init_expr (input_location, t, i); }
+
+/* in mangle.cc */
+extern void init_mangle (void);
+extern void mangle_decl (tree);
+extern const char *mangle_type_string (tree);
+extern tree mangle_typeinfo_for_type (tree);
+extern tree mangle_typeinfo_string_for_type (tree);
+extern tree mangle_vtbl_for_type (tree);
+extern tree mangle_vtt_for_type (tree);
+extern tree mangle_ctor_vtbl_for_type (tree, tree);
+extern tree mangle_thunk (tree, int, tree, tree, tree);
+extern tree mangle_guard_variable (tree);
+extern tree mangle_tls_init_fn (tree);
+extern tree mangle_tls_wrapper_fn (tree);
+extern bool decl_tls_wrapper_p (tree);
+extern tree mangle_ref_init_variable (tree);
+extern tree mangle_template_parm_object (tree);
+extern char *get_mangled_vtable_map_var_name (tree);
+extern bool mangle_return_type_p (tree);
+extern tree mangle_decomp (tree, vec<tree> &);
+extern void mangle_module_substitution (int);
+extern int mangle_module_component (tree id, bool partition);
+extern tree mangle_module_global_init (int);
+
+/* in dump.cc */
+extern bool cp_dump_tree (void *, tree);
+
+/* In cp/cp-objcp-common.cc. */
+
+extern alias_set_type cxx_get_alias_set (tree);
+extern bool cxx_warn_unused_global_decl (const_tree);
+extern size_t cp_tree_size (enum tree_code);
+extern bool cp_var_mod_type_p (tree, tree);
+extern void cxx_initialize_diagnostics (diagnostic_context *);
+extern int cxx_types_compatible_p (tree, tree);
+extern bool cxx_block_may_fallthru (const_tree);
+
+/* in cp-gimplify.cc */
+extern int cp_gimplify_expr (tree *, gimple_seq *,
+ gimple_seq *);
+extern void cp_genericize (tree);
+extern bool cxx_omp_const_qual_no_mutable (tree);
+extern enum omp_clause_default_kind cxx_omp_predetermined_sharing_1 (tree);
+extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
+extern enum omp_clause_defaultmap_kind cxx_omp_predetermined_mapping (tree);
+extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
+extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
+extern tree cxx_omp_clause_assign_op (tree, tree, tree);
+extern tree cxx_omp_clause_dtor (tree, tree);
+extern void cxx_omp_finish_clause (tree, gimple_seq *, bool);
+extern bool cxx_omp_privatize_by_reference (const_tree);
+extern bool cxx_omp_disregard_value_expr (tree, bool);
+extern void cp_fold_function (tree);
+extern tree cp_fold_maybe_rvalue (tree, bool);
+extern tree cp_fold_rvalue (tree);
+extern tree cp_fully_fold (tree);
+extern tree cp_fully_fold_init (tree);
+extern tree predeclare_vla (tree);
+extern void clear_fold_cache (void);
+extern tree lookup_hotness_attribute (tree);
+extern tree process_stmt_hotness_attribute (tree, location_t);
+extern tree build_assume_call (location_t, tree);
+extern tree process_stmt_assume_attribute (tree, tree, location_t);
+extern bool simple_empty_class_p (tree, tree, tree_code);
+extern tree fold_builtin_source_location (const_tree);
+extern tree get_source_location_impl_type ();
+
+/* in name-lookup.cc */
+extern tree strip_using_decl (tree);
+extern void diagnose_name_conflict (tree, tree);
+extern bool dependent_local_decl_p (tree);
+
+/* Tell the binding oracle what kind of binding we are looking for. */
+
+enum cp_oracle_request
+{
+ CP_ORACLE_IDENTIFIER
+};
+
+/* If this is non-NULL, then it is a "binding oracle" which can lazily
+ create bindings when needed by the C compiler. The oracle is told
+ the name and type of the binding to create. It can call pushdecl
+ or the like to ensure the binding is visible; or do nothing,
+ leaving the binding untouched. c-decl.cc takes note of when the
+ oracle has been called and will not call it again if it fails to
+ create a given binding. */
+
+typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier);
+
+extern cp_binding_oracle_function *cp_binding_oracle;
+
+/* Set during diagnostics to record the failed constraint. This is a
+ TREE_LIST whose VALUE is the constraint and whose PURPOSE are the
+ instantiation arguments Defined in pt.cc. */
+
+extern tree current_failed_constraint;
+
+/* An RAII class to manage the failed constraint. */
+
+struct diagnosing_failed_constraint
+{
+ diagnosing_failed_constraint (tree, tree, bool);
+ ~diagnosing_failed_constraint ();
+ static bool replay_errors_p ();
+
+ bool diagnosing_error;
+};
+
+/* in constraint.cc */
+
+extern cp_expr finish_constraint_or_expr (location_t, cp_expr, cp_expr);
+extern cp_expr finish_constraint_and_expr (location_t, cp_expr, cp_expr);
+extern cp_expr finish_constraint_primary_expr (cp_expr);
+extern tree finish_concept_definition (cp_expr, tree, tree);
+extern tree combine_constraint_expressions (tree, tree);
+extern tree append_constraint (tree, tree);
+extern tree get_constraints (const_tree);
+extern void set_constraints (tree, tree);
+extern void remove_constraints (tree);
+extern tree current_template_constraints (void);
+extern tree associate_classtype_constraints (tree);
+extern tree build_constraints (tree, tree);
+extern tree maybe_substitute_reqs_for (tree, const_tree);
+extern tree get_trailing_function_requirements (tree);
+extern tree get_shorthand_constraints (tree);
+
+extern tree build_concept_id (tree);
+extern tree build_type_constraint (tree, tree, tsubst_flags_t);
+extern tree build_concept_check (tree, tree, tsubst_flags_t);
+extern tree build_concept_check (tree, tree, tree, tsubst_flags_t);
+
+extern tree_pair finish_type_constraints (tree, tree, tsubst_flags_t);
+extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE);
+extern void placeholder_extract_concept_and_args (tree, tree&, tree&);
+extern bool equivalent_placeholder_constraints (tree, tree);
+extern hashval_t hash_placeholder_constraint (tree);
+extern bool deduce_constrained_parameter (tree, tree&, tree&);
+extern tree resolve_constraint_check (tree);
+extern tree check_function_concept (tree);
+extern tree finish_template_introduction (tree, tree, location_t loc);
+extern bool valid_requirements_p (tree);
+extern tree finish_concept_name (tree);
+extern tree finish_shorthand_constraint (tree, tree);
+extern tree finish_requires_expr (location_t, tree, tree);
+extern tree finish_simple_requirement (location_t, tree);
+extern tree finish_type_requirement (location_t, tree);
+extern tree finish_compound_requirement (location_t, tree, tree, bool);
+extern tree finish_nested_requirement (location_t, tree);
+extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree);
+extern tree evaluate_requires_expr (tree);
+extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree);
+extern tree tsubst_parameter_mapping (tree, tree, tsubst_flags_t, tree);
+
+struct processing_constraint_expression_sentinel
+{
+ processing_constraint_expression_sentinel ();
+ ~processing_constraint_expression_sentinel ();
+};
+
+extern bool processing_constraint_expression_p ();
+
+extern tree unpack_concept_check (tree);
+extern tree evaluate_concept_check (tree);
+extern bool constraints_satisfied_p (tree, tree = NULL_TREE);
+extern bool* lookup_subsumption_result (tree, tree);
+extern bool save_subsumption_result (tree, tree, bool);
+extern tree find_template_parameters (tree, tree);
+extern bool equivalent_constraints (tree, tree);
+extern bool equivalently_constrained (tree, tree);
+extern bool strictly_subsumes (tree, tree);
+extern bool weakly_subsumes (tree, tree);
+extern int more_constrained (tree, tree);
+extern bool at_least_as_constrained (tree, tree);
+extern bool constraints_equivalent_p (tree, tree);
+extern bool atomic_constraints_identical_p (tree, tree);
+extern hashval_t iterative_hash_constraint (tree, hashval_t);
+extern hashval_t hash_atomic_constraint (tree);
+extern void diagnose_constraints (location_t, tree, tree);
+
+extern void note_failed_type_completion_for_satisfaction (tree);
+
+/* A structural hasher for ATOMIC_CONSTRs. */
+
+struct atom_hasher : default_hash_traits<tree>
+{
+ static hashval_t hash (tree t)
+ {
+ ++comparing_specializations;
+ hashval_t val = hash_atomic_constraint (t);
+ --comparing_specializations;
+ return val;
+ }
+
+ static bool equal (tree t1, tree t2)
+ {
+ ++comparing_specializations;
+ bool eq = atomic_constraints_identical_p (t1, t2);
+ --comparing_specializations;
+ return eq;
+ }
+};
+
+/* in logic.cc */
+extern bool subsumes (tree, tree);
+
+/* In class.cc */
+extern void set_current_access_from_decl (tree);
+extern void cp_finish_injected_record_type (tree);
+
+/* in vtable-class-hierarchy.cc */
+extern void vtv_compute_class_hierarchy_transitive_closure (void);
+extern void vtv_generate_init_routine (void);
+extern void vtv_save_class_info (tree);
+extern void vtv_recover_class_info (void);
+extern void vtv_build_vtable_verify_fndecl (void);
+
+/* In constexpr.cc */
+/* Representation of entries in the constexpr function definition table. */
+
+struct GTY((for_user)) constexpr_fundef {
+ tree decl;
+ tree body;
+ tree parms;
+ tree result;
+};
+
+/* Whether the current context is manifestly constant-evaluated.
+ Used by the constexpr machinery to control folding of
+ __builtin_is_constant_evaluated. */
+
+enum class mce_value
+{
+ /* Unknown, so treat __builtin_is_constant_evaluated as non-constant. */
+ mce_unknown = 0,
+ /* Fold it to true. */
+ mce_true = 1,
+ /* Fold it to false. Primarily used during cp_fold_function and
+ cp_fully_fold_init. */
+ mce_false = -1,
+};
+constexpr mce_value mce_unknown = mce_value::mce_unknown;
+constexpr mce_value mce_true = mce_value::mce_true;
+constexpr mce_value mce_false = mce_value::mce_false;
+
+extern void fini_constexpr (void);
+extern bool literal_type_p (tree);
+extern void maybe_save_constexpr_fundef (tree);
+extern void register_constexpr_fundef (const constexpr_fundef &);
+extern constexpr_fundef *retrieve_constexpr_fundef (tree);
+extern bool is_valid_constexpr_fn (tree, bool);
+extern bool check_constexpr_ctor_body (tree, tree, bool);
+extern tree constexpr_fn_retval (tree);
+extern tree ensure_literal_type_for_constexpr_object (tree);
+extern bool potential_constant_expression (tree);
+extern bool is_constant_expression (tree);
+extern bool is_rvalue_constant_expression (tree);
+extern bool is_nondependent_constant_expression (tree);
+extern bool is_nondependent_static_init_expression (tree);
+extern bool is_static_init_expression (tree);
+extern bool is_std_allocator (tree);
+extern bool potential_rvalue_constant_expression (tree);
+extern bool require_potential_constant_expression (tree);
+extern bool require_constant_expression (tree);
+extern bool require_rvalue_constant_expression (tree);
+extern bool require_potential_rvalue_constant_expression (tree);
+extern bool require_potential_rvalue_constant_expression_fncheck (tree);
+extern tree cxx_constant_value (tree, tree = NULL_TREE,
+ tsubst_flags_t = tf_error);
+inline tree cxx_constant_value (tree t, tsubst_flags_t complain)
+{ return cxx_constant_value (t, NULL_TREE, complain); }
+extern void cxx_constant_dtor (tree, tree);
+extern tree cxx_constant_init (tree, tree = NULL_TREE);
+extern tree maybe_constant_value (tree, tree = NULL_TREE, mce_value = mce_unknown);
+extern tree maybe_constant_init (tree, tree = NULL_TREE, bool = false);
+extern tree fold_non_dependent_expr (tree,
+ tsubst_flags_t = tf_warning_or_error,
+ bool = false, tree = NULL_TREE);
+extern tree maybe_fold_non_dependent_expr (tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern tree fold_non_dependent_init (tree,
+ tsubst_flags_t = tf_warning_or_error,
+ bool = false, tree = NULL_TREE);
+extern tree fold_simple (tree);
+extern tree fold_to_constant (tree);
+extern bool reduced_constant_expression_p (tree);
+extern bool is_instantiation_of_constexpr (tree);
+extern bool var_in_constexpr_fn (tree);
+extern bool var_in_maybe_constexpr_fn (tree);
+extern bool maybe_constexpr_fn (tree);
+extern void explain_invalid_constexpr_fn (tree);
+extern vec<tree> cx_error_context (void);
+extern tree fold_sizeof_expr (tree);
+extern void clear_cv_and_fold_caches (void);
+extern tree unshare_constructor (tree CXX_MEM_STAT_INFO);
+extern bool decl_implicit_constexpr_p (tree);
+struct constexpr_ctx;
+extern tree find_failing_clause (const constexpr_ctx *ctx, tree);
+extern void diagnose_failing_condition (tree, location_t, bool,
+ const constexpr_ctx * = nullptr);
+extern bool replace_decl (tree *, tree, tree);
+
+/* An RAII sentinel used to restrict constexpr evaluation so that it
+ doesn't do anything that causes extra DECL_UID generation. */
+
+struct uid_sensitive_constexpr_evaluation_sentinel
+{
+ temp_override<bool> ovr;
+ uid_sensitive_constexpr_evaluation_sentinel ();
+};
+
+/* Used to determine whether uid_sensitive_constexpr_evaluation_p was
+ called and returned true, indicating that we've restricted constexpr
+ evaluation in order to avoid UID generation. We use this to control
+ updates to the fold_cache and cv_cache. */
+
+struct uid_sensitive_constexpr_evaluation_checker
+{
+ const unsigned saved_counter;
+ uid_sensitive_constexpr_evaluation_checker ();
+ bool evaluation_restricted_p () const;
+};
+
+void cp_tree_c_finish_parsing ();
+
+/* In cp-ubsan.cc */
+extern void cp_ubsan_maybe_instrument_member_call (tree);
+extern void cp_ubsan_instrument_member_accesses (tree *);
+extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree);
+extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree);
+extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree);
+
+/* In coroutines.cc */
+extern tree finish_co_return_stmt (location_t, tree);
+extern tree finish_co_await_expr (location_t, tree);
+extern tree finish_co_yield_expr (location_t, tree);
+extern tree coro_validate_builtin_call (tree,
+ tsubst_flags_t = tf_warning_or_error);
+extern bool morph_fn_to_coro (tree, tree *, tree *);
+extern tree coro_get_actor_function (tree);
+extern tree coro_get_destroy_function (tree);
+extern tree coro_get_ramp_function (tree);
+
+/* contracts.cc */
+extern tree make_postcondition_variable (cp_expr);
+extern tree make_postcondition_variable (cp_expr, tree);
+extern tree grok_contract (tree, tree, tree, cp_expr, location_t);
+extern tree finish_contract_condition (cp_expr);
+
+/* Return the first contract in ATTRS, or NULL_TREE if there are none. */
+
+inline tree
+find_contract (tree attrs)
+{
+ while (attrs && !cxx_contract_attribute_p (attrs))
+ attrs = TREE_CHAIN (attrs);
+ return attrs;
+}
+
+inline void
+set_decl_contracts (tree decl, tree contract_attrs)
+{
+ remove_contract_attributes (decl);
+ DECL_ATTRIBUTES (decl) = chainon (DECL_ATTRIBUTES (decl), contract_attrs);
+}
+
+/* Returns the computed semantic of the node. */
+
+inline contract_semantic
+get_contract_semantic (const_tree t)
+{
+ return (contract_semantic) (TREE_LANG_FLAG_3 (CONTRACT_CHECK (t))
+ | (TREE_LANG_FLAG_2 (t) << 1)
+ | (TREE_LANG_FLAG_0 ((t)) << 2));
+}
+
+/* Sets the computed semantic of the node. */
+
+inline void
+set_contract_semantic (tree t, contract_semantic semantic)
+{
+ TREE_LANG_FLAG_3 (CONTRACT_CHECK (t)) = semantic & 0x01;
+ TREE_LANG_FLAG_2 (t) = (semantic & 0x02) >> 1;
+ TREE_LANG_FLAG_0 (t) = (semantic & 0x04) >> 2;
+}
+
+/* Inline bodies. */
+
+inline tree
+ovl_first (tree node)
+{
+ while (TREE_CODE (node) == OVERLOAD)
+ node = OVL_FUNCTION (node);
+ return node;
+}
+
+inline bool
+type_unknown_p (const_tree expr)
+{
+ return TREE_TYPE (expr) == unknown_type_node;
+}
+
+inline hashval_t
+named_decl_hash::hash (const value_type decl)
+{
+ tree name = (TREE_CODE (decl) == BINDING_VECTOR
+ ? BINDING_VECTOR_NAME (decl) : OVL_NAME (decl));
+ return name ? IDENTIFIER_HASH_VALUE (name) : 0;
+}
+
+inline bool
+named_decl_hash::equal (const value_type existing, compare_type candidate)
+{
+ tree name = (TREE_CODE (existing) == BINDING_VECTOR
+ ? BINDING_VECTOR_NAME (existing) : OVL_NAME (existing));
+ return candidate == name;
+}
+
+inline bool
+null_node_p (const_tree expr)
+{
+ STRIP_ANY_LOCATION_WRAPPER (expr);
+ return expr == null_node;
+}
+
+/* True iff T is a variable template declaration. */
+inline bool
+variable_template_p (tree t)
+{
+ if (TREE_CODE (t) != TEMPLATE_DECL)
+ return false;
+ if (!PRIMARY_TEMPLATE_P (t))
+ return false;
+ if (tree r = DECL_TEMPLATE_RESULT (t))
+ return VAR_P (r);
+ return false;
+}
+
+/* True iff T is a standard concept definition. This will return
+ true for both the template and underlying declaration. */
+
+inline bool
+standard_concept_p (tree t)
+{
+ if (TREE_CODE (t) == TEMPLATE_DECL)
+ t = DECL_TEMPLATE_RESULT (t);
+ return TREE_CODE (t) == CONCEPT_DECL;
+}
+
+/* True iff T is a variable concept definition. This will return
+ true for both the template and the underlying declaration. */
+
+inline bool
+variable_concept_p (tree t)
+{
+ if (TREE_CODE (t) == TEMPLATE_DECL)
+ t = DECL_TEMPLATE_RESULT (t);
+ return VAR_P (t) && DECL_DECLARED_CONCEPT_P (t);
+}
+
+/* True iff T is a function concept definition or an overload set
+ containing multiple function concepts. This will return true for
+ both the template and the underlying declaration. */
+
+inline bool
+function_concept_p (tree t)
+{
+ if (TREE_CODE (t) == OVERLOAD)
+ t = OVL_FIRST (t);
+ if (TREE_CODE (t) == TEMPLATE_DECL)
+ t = DECL_TEMPLATE_RESULT (t);
+ return TREE_CODE (t) == FUNCTION_DECL && DECL_DECLARED_CONCEPT_P (t);
+}
+
+/* True iff T is a standard, variable, or function concept. */
+
+inline bool
+concept_definition_p (tree t)
+{
+ if (t == error_mark_node)
+ return false;
+
+ /* Adjust for function concept overloads. */
+ if (TREE_CODE (t) == OVERLOAD)
+ t = OVL_FIRST (t);
+
+ /* See through templates. */
+ if (TREE_CODE (t) == TEMPLATE_DECL)
+ t = DECL_TEMPLATE_RESULT (t);
+
+ /* The obvious and easy case. */
+ if (TREE_CODE (t) == CONCEPT_DECL)
+ return true;
+
+ /* Definitely not a concept. */
+ if (!VAR_OR_FUNCTION_DECL_P (t))
+ return false;
+ if (!DECL_LANG_SPECIFIC (t))
+ return false;
+
+ return DECL_DECLARED_CONCEPT_P (t);
+}
+
+/* Same as above, but for const trees. */
+
+inline bool
+concept_definition_p (const_tree t)
+{
+ return concept_definition_p (const_cast<tree> (t));
+}
+
+/* True if t is an expression that checks a concept. */
+
+inline bool
+concept_check_p (const_tree t)
+{
+ if (TREE_CODE (t) == CALL_EXPR)
+ t = CALL_EXPR_FN (t);
+ if (t && TREE_CODE (t) == TEMPLATE_ID_EXPR)
+ return concept_definition_p (TREE_OPERAND (t, 0));
+ return false;
+}
+
+/* Helpers for IMPLICIT_RVALUE_P to look through automatic dereference. */
+
+inline bool
+implicit_rvalue_p (const_tree t)
+{
+ if (REFERENCE_REF_P (t))
+ t = TREE_OPERAND (t, 0);
+ return ((TREE_CODE (t) == NON_LVALUE_EXPR
+ || TREE_CODE (t) == STATIC_CAST_EXPR)
+ && IMPLICIT_RVALUE_P (t));
+}
+inline tree
+set_implicit_rvalue_p (tree ot)
+{
+ tree t = ot;
+ if (REFERENCE_REF_P (t))
+ t = TREE_OPERAND (t, 0);
+ IMPLICIT_RVALUE_P (t) = 1;
+ return ot;
+}
+
+/* True if t is a "constrained auto" type-specifier. */
+
+inline bool
+is_constrained_auto (const_tree t)
+{
+ return is_auto (t) && PLACEHOLDER_TYPE_CONSTRAINTS_INFO (t);
+}
+
+/* True if CODE, a tree code, denotes a tree whose operand is not evaluated
+ as per [expr.context], i.e., an operand to sizeof, typeof, decltype, or
+ alignof. */
+
+inline bool
+unevaluated_p (tree_code code)
+{
+ return (code == DECLTYPE_TYPE
+ || code == ALIGNOF_EXPR
+ || code == SIZEOF_EXPR
+ || code == NOEXCEPT_EXPR
+ || code == REQUIRES_EXPR);
+}
+
+/* RAII class to push/pop the access scope for T. */
+
+struct push_access_scope_guard
+{
+ tree decl;
+ push_access_scope_guard (tree t)
+ : decl (t)
+ {
+ if (VAR_OR_FUNCTION_DECL_P (decl)
+ || TREE_CODE (decl) == TYPE_DECL)
+ push_access_scope (decl);
+ else
+ decl = NULL_TREE;
+ }
+ ~push_access_scope_guard ()
+ {
+ if (decl)
+ pop_access_scope (decl);
+ }
+};
+
+/* True if TYPE is an extended floating-point type. */
+
+inline bool
+extended_float_type_p (tree type)
+{
+ type = TYPE_MAIN_VARIANT (type);
+ for (int i = 0; i < NUM_FLOATN_NX_TYPES; ++i)
+ if (type == FLOATN_TYPE_NODE (i))
+ return true;
+ if (type == bfloat16_type_node)
+ return true;
+ return false;
+}
+
+#if CHECKING_P
+namespace selftest {
+ extern void run_cp_tests (void);
+
+ /* Declarations for specific families of tests within cp,
+ by source file, in alphabetical order. */
+ extern void cp_pt_cc_tests ();
+ extern void cp_tree_cc_tests (void);
+} // namespace selftest
+#endif /* #if CHECKING_P */
+
+/* -- end of C++ */
+
+#endif /* ! GCC_CP_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cxx-pretty-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cxx-pretty-print.h
new file mode 100644
index 0000000..2e4655e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/cxx-pretty-print.h
@@ -0,0 +1,117 @@
+/* Interface for the GNU C++ pretty-printer.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CXX_PRETTY_PRINT_H
+#define GCC_CXX_PRETTY_PRINT_H
+
+#include "c-family/c-pretty-print.h"
+
+enum cxx_pretty_printer_flags
+{
+ /* Ask for a qualified-id. */
+ pp_cxx_flag_default_argument = 1 << pp_c_flag_last_bit
+};
+
+class cxx_pretty_printer : public c_pretty_printer
+{
+public:
+ cxx_pretty_printer ();
+
+ pretty_printer *clone () const override;
+
+ void constant (tree) final override;
+ void id_expression (tree) final override;
+ void primary_expression (tree) final override;
+ void postfix_expression (tree) final override;
+ void unary_expression (tree) final override;
+ void multiplicative_expression (tree) final override;
+ void conditional_expression (tree) final override;
+ void assignment_expression (tree) final override;
+ void expression (tree) final override;
+ void type_id (tree) final override;
+ void statement (tree) final override;
+ void declaration (tree) final override;
+ void declaration_specifiers (tree) final override;
+ void simple_type_specifier (tree) final override;
+ void function_specifier (tree) final override;
+ void declarator (tree) final override;
+ void direct_declarator (tree) final override;
+ void abstract_declarator (tree) final override;
+ void direct_abstract_declarator (tree) final override;
+
+ /* This is the enclosing scope of the entity being pretty-printed. */
+ tree enclosing_scope;
+};
+
+#define pp_cxx_cv_qualifier_seq(PP, T) \
+ pp_c_type_qualifier_list (PP, T)
+#define pp_cxx_cv_qualifiers(PP, CV, FT) \
+ pp_c_cv_qualifiers (PP, CV, FT)
+
+#define pp_cxx_whitespace(PP) pp_c_whitespace (PP)
+#define pp_cxx_left_paren(PP) pp_c_left_paren (PP)
+#define pp_cxx_right_paren(PP) pp_c_right_paren (PP)
+#define pp_cxx_left_brace(PP) pp_c_left_brace (PP)
+#define pp_cxx_right_brace(PP) pp_c_right_brace (PP)
+#define pp_cxx_left_bracket(PP) pp_c_left_bracket (PP)
+#define pp_cxx_right_bracket(PP) pp_c_right_bracket (PP)
+#define pp_cxx_dot(PP) pp_c_dot (PP)
+#define pp_cxx_ampersand(PP) pp_c_ampersand (PP)
+#define pp_cxx_star(PP) pp_c_star (PP)
+#define pp_cxx_arrow(PP) pp_c_arrow (PP)
+#define pp_cxx_semicolon(PP) pp_c_semicolon (PP)
+#define pp_cxx_complement(PP) pp_c_complement (PP)
+
+#define pp_cxx_ws_string(PP, I) pp_c_ws_string (PP, I)
+#define pp_cxx_identifier(PP, I) pp_c_identifier (PP, I)
+#define pp_cxx_tree_identifier(PP, T) \
+ pp_c_tree_identifier (PP, T)
+
+void pp_cxx_begin_template_argument_list (cxx_pretty_printer *);
+void pp_cxx_end_template_argument_list (cxx_pretty_printer *);
+void pp_cxx_colon_colon (cxx_pretty_printer *);
+void pp_cxx_separate_with (cxx_pretty_printer *, int);
+
+void pp_cxx_canonical_template_parameter (cxx_pretty_printer *, tree);
+void pp_cxx_trait (cxx_pretty_printer *, tree);
+void pp_cxx_va_arg_expression (cxx_pretty_printer *, tree);
+void pp_cxx_offsetof_expression (cxx_pretty_printer *, tree);
+void pp_cxx_addressof_expression (cxx_pretty_printer *, tree);
+void pp_cxx_userdef_literal (cxx_pretty_printer *, tree);
+void pp_cxx_requires_clause (cxx_pretty_printer *, tree);
+void pp_cxx_requires_expr (cxx_pretty_printer *, tree);
+void pp_cxx_simple_requirement (cxx_pretty_printer *, tree);
+void pp_cxx_type_requirement (cxx_pretty_printer *, tree);
+void pp_cxx_compound_requirement (cxx_pretty_printer *, tree);
+void pp_cxx_nested_requirement (cxx_pretty_printer *, tree);
+void pp_cxx_predicate_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_expression_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_type_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_implicit_conversion_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_argument_deduction_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_exception_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_parameterized_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_conjunction (cxx_pretty_printer *, tree);
+void pp_cxx_disjunction (cxx_pretty_printer *, tree);
+void pp_cxx_constraint (cxx_pretty_printer *, tree);
+void pp_cxx_constrained_type_spec (cxx_pretty_printer *, tree);
+void pp_cxx_parameter_mapping (cxx_pretty_printer *, tree);
+
+#endif /* GCC_CXX_PRETTY_PRINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/name-lookup.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/name-lookup.h
new file mode 100644
index 0000000..b3e7085
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/name-lookup.h
@@ -0,0 +1,502 @@
+/* Declarations for -*- C++ -*- name lookup routines.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CP_NAME_LOOKUP_H
+#define GCC_CP_NAME_LOOKUP_H
+
+#include "c-family/c-common.h"
+
+
+/* The datatype used to implement C++ scope. */
+struct cp_binding_level;
+
+/* Nonzero if this binding is for a local scope, as opposed to a class
+ or namespace scope. */
+#define LOCAL_BINDING_P(NODE) ((NODE)->is_local)
+
+/* True if NODE->value is from a base class of the class which is
+ currently being defined. */
+#define INHERITED_VALUE_BINDING_P(NODE) ((NODE)->value_is_inherited)
+
+/* The IMPLICIT_TYPEDEF is hidden from ordinary name lookup (it was
+ injected via a local class's friend decl). The typdef may be in the
+ VALUE or the TYPE slot. We do not get the situation where the
+ value and type slots are both filled and both hidden. */
+#define HIDDEN_TYPE_BINDING_P(NODE) ((NODE)->type_is_hidden)
+
+/* Datatype that represents binding established by a declaration between
+ a name and a C++ entity. */
+struct GTY(()) cxx_binding {
+ /* Link to chain together various bindings for this name. */
+ cxx_binding *previous;
+ /* The non-type entity this name is bound to. */
+ tree value;
+ /* The type entity this name is bound to. */
+ tree type;
+ /* The scope at which this binding was made. */
+ cp_binding_level *scope;
+
+ bool value_is_inherited : 1;
+ bool is_local : 1;
+ bool type_is_hidden : 1;
+};
+
+/* Datatype used to temporarily save C++ bindings (for implicit
+ instantiations purposes and like). Implemented in decl.cc. */
+struct GTY(()) cxx_saved_binding {
+ /* The name of the current binding. */
+ tree identifier;
+ /* The binding we're saving. */
+ cxx_binding *binding;
+ tree real_type_value;
+};
+
+/* To support lazy module loading, we squirrel away a section number
+ (and a couple of flags) in the binding slot of unloaded bindings.
+ We rely on pointers being aligned and setting the bottom bit to
+ mark a lazy value. GTY doesn't like an array of union, so we have
+ a containing struct. */
+
+struct GTY(()) binding_slot {
+ union GTY((desc ("%1.is_lazy ()"))) binding_slot_lazy {
+ tree GTY((tag ("false"))) binding;
+ } u;
+
+ operator tree & ()
+ {
+ gcc_checking_assert (!is_lazy ());
+ return u.binding;
+ }
+ binding_slot &operator= (tree t)
+ {
+ u.binding = t;
+ return *this;
+ }
+ bool is_lazy () const
+ {
+ return bool (uintptr_t (u.binding) & 1);
+ }
+ void set_lazy (unsigned snum)
+ {
+ gcc_checking_assert (!u.binding);
+ u.binding = tree (uintptr_t ((snum << 1) | 1));
+ }
+ void or_lazy (unsigned snum)
+ {
+ gcc_checking_assert (is_lazy ());
+ u.binding = tree (uintptr_t (u.binding) | (snum << 1));
+ }
+ unsigned get_lazy () const
+ {
+ gcc_checking_assert (is_lazy ());
+ return unsigned (uintptr_t (u.binding) >> 1);
+ }
+};
+
+/* Bindings for modules are held in a sparse array. There is always a
+ current TU slot, others are allocated as needed. By construction
+ of the importing mechanism we only ever need to append to the
+ array. Rather than have straight index/slot tuples, we bunch them
+ up for greater packing.
+
+ The cluster representation packs well on a 64-bit system. */
+
+#define BINDING_VECTOR_SLOTS_PER_CLUSTER 2
+struct binding_index {
+ unsigned short base;
+ unsigned short span;
+};
+
+struct GTY(()) binding_cluster
+{
+ binding_index GTY((skip)) indices[BINDING_VECTOR_SLOTS_PER_CLUSTER];
+ binding_slot slots[BINDING_VECTOR_SLOTS_PER_CLUSTER];
+};
+
+/* These two fields overlay lang flags. So don't use those. */
+#define BINDING_VECTOR_ALLOC_CLUSTERS(NODE) \
+ (BINDING_VECTOR_CHECK (NODE)->base.u.dependence_info.clique)
+#define BINDING_VECTOR_NUM_CLUSTERS(NODE) \
+ (BINDING_VECTOR_CHECK (NODE)->base.u.dependence_info.base)
+#define BINDING_VECTOR_CLUSTER_BASE(NODE) \
+ (((tree_binding_vec *)BINDING_VECTOR_CHECK (NODE))->vec)
+#define BINDING_VECTOR_CLUSTER_LAST(NODE) \
+ (&BINDING_VECTOR_CLUSTER (NODE, BINDING_VECTOR_NUM_CLUSTERS (NODE) - 1))
+#define BINDING_VECTOR_CLUSTER(NODE,IX) \
+ (((tree_binding_vec *)BINDING_VECTOR_CHECK (NODE))->vec[IX])
+
+struct GTY(()) tree_binding_vec {
+ struct tree_base base;
+ tree name;
+ binding_cluster GTY((length ("%h.base.u.dependence_info.base"))) vec[1];
+};
+
+/* The name of a module vector. */
+#define BINDING_VECTOR_NAME(NODE) \
+ (((tree_binding_vec *)BINDING_VECTOR_CHECK (NODE))->name)
+
+/* tree_binding_vec does uses base.u.dependence_info.base field for
+ length. It does not have lang_flag etc available! */
+
+/* These two flags note if a module-vector contains deduplicated
+ bindings (i.e. multiple declarations in different imports). */
+/* This binding contains duplicate references to a global module
+ entity. */
+#define BINDING_VECTOR_GLOBAL_DUPS_P(NODE) \
+ (BINDING_VECTOR_CHECK (NODE)->base.static_flag)
+/* This binding contains duplicate references to a partioned module
+ entity. */
+#define BINDING_VECTOR_PARTITION_DUPS_P(NODE) \
+ (BINDING_VECTOR_CHECK (NODE)->base.volatile_flag)
+
+/* These two flags indicate the provenence of the bindings on this
+ particular vector slot. We can of course determine this from slot
+ number, but that's a relatively expensive lookup. This avoids
+ that when iterating. */
+/* This slot is part of the global module (a header unit). */
+#define MODULE_BINDING_GLOBAL_P(NODE) \
+ (OVERLOAD_CHECK (NODE)->base.static_flag)
+/* This slot is part of the current module (a partition or primary). */
+#define MODULE_BINDING_PARTITION_P(NODE) \
+ (OVERLOAD_CHECK (NODE)->base.volatile_flag)
+
+extern void set_identifier_type_value (tree, tree);
+extern void push_binding (tree, tree, cp_binding_level*);
+extern void pop_local_binding (tree, tree);
+extern void pop_bindings_and_leave_scope (void);
+extern tree constructor_name (tree);
+extern bool constructor_name_p (tree, tree);
+
+/* The kinds of scopes we recognize. */
+enum scope_kind {
+ sk_block = 0, /* An ordinary block scope. This enumerator must
+ have the value zero because "cp_binding_level"
+ is initialized by using "memset" to set the
+ contents to zero, and the default scope kind
+ is "sk_block". */
+ sk_cleanup, /* A scope for (pseudo-)scope for cleanup. It is
+ pseudo in that it is transparent to name lookup
+ activities. */
+ sk_try, /* A try-block. */
+ sk_catch, /* A catch-block. */
+ sk_for, /* The scope of the variable declared in a
+ init-statement. */
+ sk_cond, /* The scope of the variable declared in the condition
+ of an if or switch statement. */
+ sk_stmt_expr, /* GNU statement expression block. */
+ sk_function_parms, /* The scope containing function parameters. */
+ sk_class, /* The scope containing the members of a class. */
+ sk_scoped_enum, /* The scope containing the enumerators of a C++11
+ scoped enumeration. */
+ sk_namespace, /* The scope containing the members of a
+ namespace, including the global scope. */
+ sk_template_parms, /* A scope for template parameters. */
+ sk_template_spec, /* Like sk_template_parms, but for an explicit
+ specialization. Since, by definition, an
+ explicit specialization is introduced by
+ "template <>", this scope is always empty. */
+ sk_transaction, /* A synchronized or atomic statement. */
+ sk_omp /* An OpenMP structured block. */
+};
+
+struct GTY(()) cp_class_binding {
+ cxx_binding *base;
+ /* The bound name. */
+ tree identifier;
+};
+
+/* For each binding contour we allocate a binding_level structure
+ which records the names defined in that contour.
+ Contours include:
+ 0) the global one
+ 1) one for each function definition,
+ where internal declarations of the parameters appear.
+ 2) one for each compound statement,
+ to record its declarations.
+
+ The current meaning of a name can be found by searching the levels
+ from the current one out to the global one.
+
+ Off to the side, may be the class_binding_level. This exists only
+ to catch class-local declarations. It is otherwise nonexistent.
+
+ Also there may be binding levels that catch cleanups that must be
+ run when exceptions occur. Thus, to see whether a name is bound in
+ the current scope, it is not enough to look in the
+ CURRENT_BINDING_LEVEL. You should use lookup_name_current_level
+ instead. */
+
+struct GTY(()) cp_binding_level {
+ /* A chain of _DECL nodes for all variables, constants, functions,
+ and typedef types. These are in the reverse of the order
+ supplied. There may be OVERLOADs on this list, too, but they
+ are wrapped in TREE_LISTs; the TREE_VALUE is the OVERLOAD. */
+ tree names;
+
+ /* Using directives. */
+ vec<tree, va_gc> *using_directives;
+
+ /* For the binding level corresponding to a class, the entities
+ declared in the class or its base classes. */
+ vec<cp_class_binding, va_gc> *class_shadowed;
+
+ /* Similar to class_shadowed, but for IDENTIFIER_TYPE_VALUE, and
+ is used for all binding levels. The TREE_PURPOSE is the name of
+ the entity, the TREE_TYPE is the associated type. In addition
+ the TREE_VALUE is the IDENTIFIER_TYPE_VALUE before we entered
+ the class. */
+ tree type_shadowed;
+
+ /* For each level (except not the global one),
+ a chain of BLOCK nodes for all the levels
+ that were entered and exited one level down. */
+ tree blocks;
+
+ /* The entity (namespace, class, function) the scope of which this
+ binding contour corresponds to. Otherwise NULL. */
+ tree this_entity;
+
+ /* The binding level which this one is contained in (inherits from). */
+ cp_binding_level *level_chain;
+
+ /* STATEMENT_LIST for statements in this binding contour.
+ Only used at present for SK_CLEANUP temporary bindings. */
+ tree statement_list;
+
+ /* Binding depth at which this level began. */
+ int binding_depth;
+
+ /* The kind of scope that this object represents. However, a
+ SK_TEMPLATE_SPEC scope is represented with KIND set to
+ SK_TEMPLATE_PARMS and EXPLICIT_SPEC_P set to true. */
+ ENUM_BITFIELD (scope_kind) kind : 4;
+
+ /* True if this scope is an SK_TEMPLATE_SPEC scope. This field is
+ only valid if KIND == SK_TEMPLATE_PARMS. */
+ BOOL_BITFIELD explicit_spec_p : 1;
+
+ /* true means make a BLOCK for this level regardless of all else. */
+ unsigned keep : 1;
+
+ /* Nonzero if this level can safely have additional
+ cleanup-needing variables added to it. */
+ unsigned more_cleanups_ok : 1;
+ unsigned have_cleanups : 1;
+
+ /* Transient state set if this scope is of sk_class kind
+ and is in the process of defining 'this_entity'. Reset
+ on leaving the class definition to allow for the scope
+ to be subsequently re-used as a non-defining scope for
+ 'this_entity'. */
+ unsigned defining_class_p : 1;
+
+ /* True for SK_FUNCTION_PARMS of a requires-expression. */
+ unsigned requires_expression: 1;
+
+ /* 22 bits left to fill a 32-bit word. */
+};
+
+/* The binding level currently in effect. */
+
+#define current_binding_level \
+ (*(cfun && cp_function_chain && cp_function_chain->bindings \
+ ? &cp_function_chain->bindings \
+ : &scope_chain->bindings))
+
+/* The binding level of the current class, if any. */
+
+#define class_binding_level scope_chain->class_bindings
+
+/* True if SCOPE designates the global scope binding contour. */
+#define global_scope_p(SCOPE) \
+ ((SCOPE) == NAMESPACE_LEVEL (global_namespace))
+
+extern cp_binding_level *leave_scope (void);
+extern bool kept_level_p (void);
+extern bool global_bindings_p (void);
+extern bool toplevel_bindings_p (void);
+extern bool namespace_bindings_p (void);
+extern bool local_bindings_p (void);
+extern bool template_parm_scope_p (void);
+extern scope_kind innermost_scope_kind (void);
+extern cp_binding_level *begin_scope (scope_kind, tree);
+extern void print_binding_stack (void);
+extern void pop_everything (void);
+extern void keep_next_level (bool);
+extern bool is_ancestor (tree ancestor, tree descendant);
+extern bool is_nested_namespace (tree parent, tree descendant,
+ bool inline_only = false);
+extern tree push_scope (tree);
+extern void pop_scope (tree);
+extern tree push_inner_scope (tree);
+extern void pop_inner_scope (tree, tree);
+extern void push_binding_level (cp_binding_level *);
+
+extern bool handle_namespace_attrs (tree, tree);
+extern void pushlevel_class (void);
+extern void poplevel_class (void);
+
+/* What kind of scopes name lookup looks in. An enum class so we
+ don't accidentally mix integers. */
+enum class LOOK_where
+{
+ BLOCK = 1 << 0, /* Consider block scopes. */
+ CLASS = 1 << 1, /* Consider class scopes. */
+ NAMESPACE = 1 << 2, /* Consider namespace scopes. */
+
+ ALL = BLOCK | CLASS | NAMESPACE,
+ BLOCK_NAMESPACE = BLOCK | NAMESPACE,
+ CLASS_NAMESPACE = CLASS | NAMESPACE,
+};
+constexpr LOOK_where operator| (LOOK_where a, LOOK_where b)
+{
+ return LOOK_where (unsigned (a) | unsigned (b));
+}
+constexpr LOOK_where operator& (LOOK_where a, LOOK_where b)
+{
+ return LOOK_where (unsigned (a) & unsigned (b));
+}
+
+enum class LOOK_want
+{
+ NORMAL = 0, /* Normal lookup -- non-types can hide implicit types. */
+ TYPE = 1 << 1, /* We only want TYPE_DECLS. */
+ NAMESPACE = 1 << 2, /* We only want NAMESPACE_DECLS. */
+
+ HIDDEN_FRIEND = 1 << 3, /* See hidden friends. */
+ HIDDEN_LAMBDA = 1 << 4, /* See lambda-ignored entities. */
+
+ TYPE_NAMESPACE = TYPE | NAMESPACE, /* Either NAMESPACE or TYPE. */
+};
+constexpr LOOK_want operator| (LOOK_want a, LOOK_want b)
+{
+ return LOOK_want (unsigned (a) | unsigned (b));
+}
+constexpr LOOK_want operator& (LOOK_want a, LOOK_want b)
+{
+ return LOOK_want (unsigned (a) & unsigned (b));
+}
+
+extern tree lookup_name (tree, LOOK_where, LOOK_want = LOOK_want::NORMAL);
+/* Also declared in c-family/c-common.h. */
+extern tree lookup_name (tree name);
+inline tree lookup_name (tree name, LOOK_want want)
+{
+ return lookup_name (name, LOOK_where::ALL, want);
+}
+
+enum class TAG_how
+{
+ CURRENT_ONLY = 0, // Look and insert only in current scope
+
+ GLOBAL = 1, // Unqualified lookup, innermost-non-class insertion
+
+ INNERMOST_NON_CLASS = 2, // Look and insert only into
+ // innermost-non-class
+
+ HIDDEN_FRIEND = 3, // As INNERMOST_NON_CLASS, but hide it
+};
+
+extern tree lookup_elaborated_type (tree, TAG_how);
+extern tree get_namespace_binding (tree ns, tree id);
+extern void set_global_binding (tree decl);
+inline tree get_global_binding (tree id)
+{
+ return get_namespace_binding (NULL_TREE, id);
+}
+extern tree lookup_qualified_name (tree scope, tree name,
+ LOOK_want = LOOK_want::NORMAL,
+ bool = true);
+extern tree lookup_qualified_name (tree scope, const char *name,
+ LOOK_want = LOOK_want::NORMAL,
+ bool = true);
+extern bool pushdecl_class_level (tree);
+extern tree pushdecl_namespace_level (tree, bool hiding = false);
+extern bool push_class_level_binding (tree, tree);
+extern tree get_local_decls ();
+extern int function_parm_depth (void);
+extern tree cp_namespace_decls (tree);
+extern void set_decl_namespace (tree, tree, bool);
+extern void push_decl_namespace (tree);
+extern void pop_decl_namespace (void);
+extern void do_namespace_alias (tree, tree);
+extern tree do_class_using_decl (tree, tree);
+extern tree lookup_arg_dependent (tree, tree, vec<tree, va_gc> *);
+extern tree search_anon_aggr (tree, tree, bool = false);
+extern tree get_class_binding_direct (tree, tree, bool want_type = false);
+extern tree get_class_binding (tree, tree, bool want_type = false);
+extern tree *find_member_slot (tree klass, tree name);
+extern tree *add_member_slot (tree klass, tree name);
+extern void resort_type_member_vec (void *, void *,
+ gt_pointer_operator, void *);
+extern vec<tree, va_gc> *set_class_bindings (tree, int extra = 0);
+extern void insert_late_enum_def_bindings (tree, tree);
+extern tree innermost_non_namespace_value (tree);
+extern cxx_binding *outer_binding (tree, cxx_binding *, bool);
+extern void cp_emit_debug_info_for_using (tree, tree);
+
+extern void finish_nonmember_using_decl (tree scope, tree name);
+extern void finish_using_directive (tree target, tree attribs);
+void push_local_extern_decl_alias (tree decl);
+extern tree pushdecl (tree, bool hiding = false);
+extern tree pushdecl_outermost_localscope (tree);
+extern tree pushdecl_top_level (tree);
+extern tree pushdecl_top_level_and_finish (tree, tree);
+extern tree pushtag (tree, tree, TAG_how = TAG_how::CURRENT_ONLY);
+extern int push_namespace (tree, bool make_inline = false);
+extern void pop_namespace (void);
+extern void push_nested_namespace (tree);
+extern void pop_nested_namespace (tree);
+extern void push_to_top_level (void);
+extern void pop_from_top_level (void);
+extern bool maybe_push_to_top_level (tree);
+extern void maybe_pop_from_top_level (bool);
+extern void push_using_decl_bindings (tree, tree);
+
+/* Lower level interface for modules. */
+extern tree *mergeable_namespace_slots (tree ns, tree name, bool is_attached,
+ tree *mvec);
+extern void add_mergeable_namespace_entity (tree *slot, tree decl);
+extern tree lookup_class_binding (tree ctx, tree name);
+extern bool import_module_binding (tree ctx, tree name, unsigned mod,
+ unsigned snum);
+extern bool set_module_binding (tree ctx, tree name, unsigned mod,
+ int mod_glob_flag,
+ tree value, tree type, tree visible);
+extern void add_module_namespace_decl (tree ns, tree decl);
+
+enum WMB_Flags
+{
+ WMB_None = 0,
+ WMB_Dups = 1 << 0,
+ WMB_Export = 1 << 1,
+ WMB_Using = 1 << 2,
+ WMB_Hidden = 1 << 3,
+};
+
+extern unsigned walk_module_binding (tree binding, bitmap partitions,
+ bool (*)(tree decl, WMB_Flags, void *data),
+ void *data);
+extern tree add_imported_namespace (tree ctx, tree name, location_t,
+ unsigned module,
+ bool inline_p, bool visible_p);
+extern const char *get_cxx_dialect_name (enum cxx_dialect dialect);
+
+#endif /* GCC_CP_NAME_LOOKUP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/operators.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/operators.def
new file mode 100644
index 0000000..e948175
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/operators.def
@@ -0,0 +1,163 @@
+/* -*-C-*-
+
+ This file contains definitions of the various C++ operators,
+ including both overloadable operators (like `+') and
+ non-overloadable operators (like the `?:' ternary operator).
+ Written by Mark Mitchell <mark@codesourcery.com>
+
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The DEF_OPERATOR macro takes the following arguments:
+
+ NAME
+
+ The name of the operator, as a C string, but without the
+ preceding `operator'. This is the name that would be given in
+ the source program. For `operator +', for example, this would be
+ `+'.
+
+ CODE
+
+ The tree_code for this operator. For `operator +', for example,
+ this would be PLUS_EXPR. Because there are no tree codes for
+ assignment operators, the same tree-codes are reused; i.e.,
+ `operator +' will also have PLUS_EXPR as its CODE.
+
+ MANGLING
+
+ The mangling prefix for the operator, as a C string, and as
+ mangled under the new ABI. For `operator +', for example, this
+ would be "pl".
+
+ FLAGS
+
+ ovl_op_flags bits. Postincrement and postdecrement operators are
+ marked as binary.
+
+ Before including this file, you should define DEF_OPERATOR
+ to take these arguments.
+
+ There is code (such as in grok_op_properties) that depends on the
+ order the operators are presented in this file. Unary_ops must
+ preceed a matching binary op (i.e. '+'). Assignment operators must
+ be last, after OPERATOR_TRANSITION. */
+
+/* Use DEF_ASSN_OPERATOR to define an assignment operator. Its
+ arguments are as for DEF_OPERATOR, but there is no need to provide
+ FLAGS (OVL_OP_FLAG_BINARY). */
+
+#ifndef DEF_ASSN_OPERATOR
+#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) \
+ DEF_OPERATOR(NAME, CODE, MANGLING, OVL_OP_FLAG_BINARY)
+#endif
+
+/* Memory allocation operators. ARITY has special meaning. */
+DEF_OPERATOR ("new", NEW_EXPR, "nw", OVL_OP_FLAG_ALLOC)
+DEF_OPERATOR ("new []", VEC_NEW_EXPR, "na",
+ OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_VEC)
+DEF_OPERATOR ("delete", DELETE_EXPR, "dl",
+ OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)
+DEF_OPERATOR ("delete []", VEC_DELETE_EXPR, "da",
+ OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE | OVL_OP_FLAG_VEC)
+
+/* Unary operators. */
+DEF_OPERATOR ("+", UNARY_PLUS_EXPR, "ps", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("-", NEGATE_EXPR, "ng", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("&", ADDR_EXPR, "ad", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("*", INDIRECT_REF, "de", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("~", BIT_NOT_EXPR, "co", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("!", TRUTH_NOT_EXPR, "nt", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("++", PREINCREMENT_EXPR, "pp", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("--", PREDECREMENT_EXPR, "mm", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("->", COMPONENT_REF, "pt", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("sizeof", SIZEOF_EXPR, "sz", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("co_await", CO_AWAIT_EXPR, "aw", OVL_OP_FLAG_UNARY)
+
+/* These are extensions. */
+DEF_OPERATOR ("alignof", ALIGNOF_EXPR, "az", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("__imag__", IMAGPART_EXPR, "v18__imag__", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR ("__real__", REALPART_EXPR, "v18__real__", OVL_OP_FLAG_UNARY)
+
+/* Binary operators. */
+DEF_OPERATOR ("+", PLUS_EXPR, "pl", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("-", MINUS_EXPR, "mi", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("*", MULT_EXPR, "ml", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("/", TRUNC_DIV_EXPR, "dv", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("%", TRUNC_MOD_EXPR, "rm", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("&", BIT_AND_EXPR, "an", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("|", BIT_IOR_EXPR, "or", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("^", BIT_XOR_EXPR, "eo", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("<<", LSHIFT_EXPR, "ls", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR (">>", RSHIFT_EXPR, "rs", OVL_OP_FLAG_BINARY)
+
+/* defaultable_fn_check relies on the ordering of the comparison operators. */
+DEF_OPERATOR ("==", EQ_EXPR, "eq", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("!=", NE_EXPR, "ne", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("<", LT_EXPR, "lt", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR (">", GT_EXPR, "gt", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("<=", LE_EXPR, "le", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR (">=", GE_EXPR, "ge", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("<=>", SPACESHIP_EXPR, "ss", OVL_OP_FLAG_BINARY)
+
+DEF_OPERATOR ("&&", TRUTH_ANDIF_EXPR, "aa", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("||", TRUTH_ORIF_EXPR, "oo", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR (",", COMPOUND_EXPR, "cm", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("->*", MEMBER_REF, "pm", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR (".*", DOTSTAR_EXPR, "ds", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("[]", ARRAY_REF, "ix", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("++", POSTINCREMENT_EXPR, "pp", OVL_OP_FLAG_BINARY)
+DEF_OPERATOR ("--", POSTDECREMENT_EXPR, "mm", OVL_OP_FLAG_BINARY)
+
+/* Miscellaneous. */
+DEF_OPERATOR ("?:", COND_EXPR, "qu", OVL_OP_FLAG_NONE)
+DEF_OPERATOR ("()", CALL_EXPR, "cl", OVL_OP_FLAG_NONE)
+
+/* Operators needed for mangling. */
+DEF_OPERATOR (NULL, CAST_EXPR, "cv", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR (NULL, DYNAMIC_CAST_EXPR, "dc", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR (NULL, REINTERPRET_CAST_EXPR, "rc", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR (NULL, CONST_CAST_EXPR, "cc", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR (NULL, STATIC_CAST_EXPR, "sc", OVL_OP_FLAG_UNARY)
+DEF_OPERATOR (NULL, SCOPE_REF, "sr", OVL_OP_FLAG_NONE)
+DEF_OPERATOR (NULL, EXPR_PACK_EXPANSION, "sp", OVL_OP_FLAG_NONE)
+DEF_OPERATOR (NULL, UNARY_LEFT_FOLD_EXPR, "fl", OVL_OP_FLAG_NONE)
+DEF_OPERATOR (NULL, UNARY_RIGHT_FOLD_EXPR, "fr", OVL_OP_FLAG_NONE)
+DEF_OPERATOR (NULL, BINARY_LEFT_FOLD_EXPR, "fL", OVL_OP_FLAG_NONE)
+DEF_OPERATOR (NULL, BINARY_RIGHT_FOLD_EXPR, "fR", OVL_OP_FLAG_NONE)
+
+#ifdef OPERATOR_TRANSITION
+OPERATOR_TRANSITION
+#undef OPERATOR_TRANSITION
+#endif
+
+/* Assignment operators. */
+DEF_ASSN_OPERATOR ("=", NOP_EXPR, "aS")
+DEF_ASSN_OPERATOR ("+=", PLUS_EXPR, "pL")
+DEF_ASSN_OPERATOR ("-=", MINUS_EXPR, "mI")
+DEF_ASSN_OPERATOR ("*=", MULT_EXPR, "mL")
+DEF_ASSN_OPERATOR ("/=", TRUNC_DIV_EXPR, "dV")
+DEF_ASSN_OPERATOR ("%=", TRUNC_MOD_EXPR, "rM")
+DEF_ASSN_OPERATOR ("&=", BIT_AND_EXPR, "aN")
+DEF_ASSN_OPERATOR ("|=", BIT_IOR_EXPR, "oR")
+DEF_ASSN_OPERATOR ("^=", BIT_XOR_EXPR, "eO")
+DEF_ASSN_OPERATOR ("<<=", LSHIFT_EXPR, "lS")
+DEF_ASSN_OPERATOR (">>=", RSHIFT_EXPR, "rS")
+
+#undef DEF_ASSN_OPERATOR
+#undef DEF_OPERATOR
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/type-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/type-utils.h
new file mode 100644
index 0000000..5f67ba2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cp/type-utils.h
@@ -0,0 +1,54 @@
+/* Utilities for querying and manipulating type trees.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CP_TYPE_UTILS_H
+#define GCC_CP_TYPE_UTILS_H
+
+/* Returns the first tree within T that is directly matched by PRED. T may be a
+ type or PARM_DECL and is incrementally decomposed toward its type-specifier
+ until a match is found. NULL is returned if PRED does not match any
+ part of T.
+
+ This is primarily intended for detecting whether T uses `auto' or a concept
+ identifier. Since either of these can only appear as a type-specifier for
+ the declaration in question, only top-level qualifications are traversed;
+ find_type_usage does not look through the whole type. */
+
+inline tree
+find_type_usage (tree t, bool (*pred) (const_tree))
+{
+ if (pred (t))
+ return t;
+
+ enum tree_code code = TREE_CODE (t);
+
+ if (code == POINTER_TYPE || code == REFERENCE_TYPE
+ || code == PARM_DECL || code == OFFSET_TYPE
+ || code == FUNCTION_TYPE || code == METHOD_TYPE
+ || code == ARRAY_TYPE)
+ return find_type_usage (TREE_TYPE (t), pred);
+
+ if (TYPE_PTRMEMFUNC_P (t))
+ return find_type_usage
+ (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (t)), pred);
+
+ return NULL_TREE;
+}
+
+#endif // GCC_CP_TYPE_UTILS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppbuiltin.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppbuiltin.h
new file mode 100644
index 0000000..84daf5a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppbuiltin.h
@@ -0,0 +1,33 @@
+/* Define builtin-in macros for all front ends that perform preprocessing
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CPPBUILTIN_H
+#define GCC_CPPBUILTIN_H
+
+/* Parse a BASEVER version string of the format "major.minor.patchlevel"
+ or "major.minor" to extract its components. */
+extern void parse_basever (int *, int *, int *);
+
+/* Define macros builtins common to all language performing CPP
+ preprocessing. */
+extern void define_language_independent_builtin_macros (cpp_reader *);
+
+
+#endif /* ! GCC_CPPBUILTIN_H */
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppdefault.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppdefault.h
new file mode 100644
index 0000000..e26b424
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cppdefault.h
@@ -0,0 +1,76 @@
+/* CPP Library.
+ Copyright (C) 1986-2023 Free Software Foundation, Inc.
+ Contributed by Per Bothner, 1994-95.
+ Based on CCCP program by Paul Rubin, June 1986
+ Adapted to ANSI C, Richard Stallman, Jan 1987
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CPPDEFAULT_H
+#define GCC_CPPDEFAULT_H
+
+/* This is the default list of directories to search for include files.
+ It may be overridden by the various -I and -ixxx options.
+
+ #include "file" looks in the same directory as the current file,
+ then this list.
+ #include <file> just looks in this list.
+
+ All these directories are treated as `system' include directories
+ (they are not subject to pedantic warnings in some cases). */
+
+struct default_include
+{
+ const char *const fname; /* The name of the directory. */
+ const char *const component; /* The component containing the directory
+ (see update_path in prefix.cc) */
+ const char cplusplus; /* When this is non-zero, we should only
+ consider this if we're compiling C++.
+ When the -stdlib option is configured, this
+ may take values greater than 1 to indicate
+ which C++ standard library should be
+ used. */
+ const char cxx_aware; /* Includes in this directory don't need to
+ be wrapped in extern "C" when compiling
+ C++. */
+ const char add_sysroot; /* FNAME should be prefixed by
+ cpp_SYSROOT. */
+ const char multilib; /* FNAME should have appended
+ - the multilib path specified with -imultilib
+ when set to 1,
+ - the multiarch path specified with
+ -imultiarch, when set to 2. */
+};
+
+extern const struct default_include cpp_include_defaults[];
+extern const char cpp_GCC_INCLUDE_DIR[];
+extern const size_t cpp_GCC_INCLUDE_DIR_len;
+
+/* The configure-time prefix, i.e., the value supplied as the argument
+ to --prefix=. */
+extern const char cpp_PREFIX[];
+/* The length of the configure-time prefix. */
+extern const size_t cpp_PREFIX_len;
+/* The configure-time execution prefix. This is typically the lib/gcc
+ subdirectory of cpp_PREFIX. */
+extern const char cpp_EXEC_PREFIX[];
+/* The run-time execution prefix. This is typically the lib/gcc
+ subdirectory of the actual installation. */
+extern const char *gcc_exec_prefix;
+
+/* Return true if the toolchain is relocated. */
+bool cpp_relocated (void);
+
+#endif /* ! GCC_CPPDEFAULT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cpplib.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cpplib.h
new file mode 100644
index 0000000..a6f0abd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cpplib.h
@@ -0,0 +1,1605 @@
+/* Definitions for CPP library.
+ Copyright (C) 1995-2023 Free Software Foundation, Inc.
+ Written by Per Bothner, 1994-95.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+#ifndef LIBCPP_CPPLIB_H
+#define LIBCPP_CPPLIB_H
+
+#include <sys/types.h>
+#include "symtab.h"
+#include "line-map.h"
+
+typedef struct cpp_reader cpp_reader;
+typedef struct cpp_buffer cpp_buffer;
+typedef struct cpp_options cpp_options;
+typedef struct cpp_token cpp_token;
+typedef struct cpp_string cpp_string;
+typedef struct cpp_hashnode cpp_hashnode;
+typedef struct cpp_macro cpp_macro;
+typedef struct cpp_callbacks cpp_callbacks;
+typedef struct cpp_dir cpp_dir;
+
+struct _cpp_file;
+
+/* The first three groups, apart from '=', can appear in preprocessor
+ expressions (+= and -= are used to indicate unary + and - resp.).
+ This allows a lookup table to be implemented in _cpp_parse_expr.
+
+ The first group, to CPP_LAST_EQ, can be immediately followed by an
+ '='. The lexer needs operators ending in '=', like ">>=", to be in
+ the same order as their counterparts without the '=', like ">>".
+
+ See the cpp_operator table optab in expr.cc if you change the order or
+ add or remove anything in the first group. */
+
+#define TTYPE_TABLE \
+ OP(EQ, "=") \
+ OP(NOT, "!") \
+ OP(GREATER, ">") /* compare */ \
+ OP(LESS, "<") \
+ OP(PLUS, "+") /* math */ \
+ OP(MINUS, "-") \
+ OP(MULT, "*") \
+ OP(DIV, "/") \
+ OP(MOD, "%") \
+ OP(AND, "&") /* bit ops */ \
+ OP(OR, "|") \
+ OP(XOR, "^") \
+ OP(RSHIFT, ">>") \
+ OP(LSHIFT, "<<") \
+ \
+ OP(COMPL, "~") \
+ OP(AND_AND, "&&") /* logical */ \
+ OP(OR_OR, "||") \
+ OP(QUERY, "?") \
+ OP(COLON, ":") \
+ OP(COMMA, ",") /* grouping */ \
+ OP(OPEN_PAREN, "(") \
+ OP(CLOSE_PAREN, ")") \
+ TK(EOF, NONE) \
+ OP(EQ_EQ, "==") /* compare */ \
+ OP(NOT_EQ, "!=") \
+ OP(GREATER_EQ, ">=") \
+ OP(LESS_EQ, "<=") \
+ OP(SPACESHIP, "<=>") \
+ \
+ /* These two are unary + / - in preprocessor expressions. */ \
+ OP(PLUS_EQ, "+=") /* math */ \
+ OP(MINUS_EQ, "-=") \
+ \
+ OP(MULT_EQ, "*=") \
+ OP(DIV_EQ, "/=") \
+ OP(MOD_EQ, "%=") \
+ OP(AND_EQ, "&=") /* bit ops */ \
+ OP(OR_EQ, "|=") \
+ OP(XOR_EQ, "^=") \
+ OP(RSHIFT_EQ, ">>=") \
+ OP(LSHIFT_EQ, "<<=") \
+ /* Digraphs together, beginning with CPP_FIRST_DIGRAPH. */ \
+ OP(HASH, "#") /* digraphs */ \
+ OP(PASTE, "##") \
+ OP(OPEN_SQUARE, "[") \
+ OP(CLOSE_SQUARE, "]") \
+ OP(OPEN_BRACE, "{") \
+ OP(CLOSE_BRACE, "}") \
+ /* The remainder of the punctuation. Order is not significant. */ \
+ OP(SEMICOLON, ";") /* structure */ \
+ OP(ELLIPSIS, "...") \
+ OP(PLUS_PLUS, "++") /* increment */ \
+ OP(MINUS_MINUS, "--") \
+ OP(DEREF, "->") /* accessors */ \
+ OP(DOT, ".") \
+ OP(SCOPE, "::") \
+ OP(DEREF_STAR, "->*") \
+ OP(DOT_STAR, ".*") \
+ OP(ATSIGN, "@") /* used in Objective-C */ \
+ \
+ TK(NAME, IDENT) /* word */ \
+ TK(AT_NAME, IDENT) /* @word - Objective-C */ \
+ TK(NUMBER, LITERAL) /* 34_be+ta */ \
+ \
+ TK(CHAR, LITERAL) /* 'char' */ \
+ TK(WCHAR, LITERAL) /* L'char' */ \
+ TK(CHAR16, LITERAL) /* u'char' */ \
+ TK(CHAR32, LITERAL) /* U'char' */ \
+ TK(UTF8CHAR, LITERAL) /* u8'char' */ \
+ TK(OTHER, LITERAL) /* stray punctuation */ \
+ \
+ TK(STRING, LITERAL) /* "string" */ \
+ TK(WSTRING, LITERAL) /* L"string" */ \
+ TK(STRING16, LITERAL) /* u"string" */ \
+ TK(STRING32, LITERAL) /* U"string" */ \
+ TK(UTF8STRING, LITERAL) /* u8"string" */ \
+ TK(OBJC_STRING, LITERAL) /* @"string" - Objective-C */ \
+ TK(HEADER_NAME, LITERAL) /* <stdio.h> in #include */ \
+ \
+ TK(CHAR_USERDEF, LITERAL) /* 'char'_suffix - C++-0x */ \
+ TK(WCHAR_USERDEF, LITERAL) /* L'char'_suffix - C++-0x */ \
+ TK(CHAR16_USERDEF, LITERAL) /* u'char'_suffix - C++-0x */ \
+ TK(CHAR32_USERDEF, LITERAL) /* U'char'_suffix - C++-0x */ \
+ TK(UTF8CHAR_USERDEF, LITERAL) /* u8'char'_suffix - C++-0x */ \
+ TK(STRING_USERDEF, LITERAL) /* "string"_suffix - C++-0x */ \
+ TK(WSTRING_USERDEF, LITERAL) /* L"string"_suffix - C++-0x */ \
+ TK(STRING16_USERDEF, LITERAL) /* u"string"_suffix - C++-0x */ \
+ TK(STRING32_USERDEF, LITERAL) /* U"string"_suffix - C++-0x */ \
+ TK(UTF8STRING_USERDEF,LITERAL) /* u8"string"_suffix - C++-0x */ \
+ \
+ TK(COMMENT, LITERAL) /* Only if output comments. */ \
+ /* SPELL_LITERAL happens to DTRT. */ \
+ TK(MACRO_ARG, NONE) /* Macro argument. */ \
+ TK(PRAGMA, NONE) /* Only for deferred pragmas. */ \
+ TK(PRAGMA_EOL, NONE) /* End-of-line for deferred pragmas. */ \
+ TK(PADDING, NONE) /* Whitespace for -E. */
+
+#define OP(e, s) CPP_ ## e,
+#define TK(e, s) CPP_ ## e,
+enum cpp_ttype
+{
+ TTYPE_TABLE
+ N_TTYPES,
+
+ /* A token type for keywords, as opposed to ordinary identifiers. */
+ CPP_KEYWORD,
+
+ /* Positions in the table. */
+ CPP_LAST_EQ = CPP_LSHIFT,
+ CPP_FIRST_DIGRAPH = CPP_HASH,
+ CPP_LAST_PUNCTUATOR= CPP_ATSIGN,
+ CPP_LAST_CPP_OP = CPP_LESS_EQ
+};
+#undef OP
+#undef TK
+
+/* C language kind, used when calling cpp_create_reader. */
+enum c_lang {CLK_GNUC89 = 0, CLK_GNUC99, CLK_GNUC11, CLK_GNUC17, CLK_GNUC2X,
+ CLK_STDC89, CLK_STDC94, CLK_STDC99, CLK_STDC11, CLK_STDC17,
+ CLK_STDC2X,
+ CLK_GNUCXX, CLK_CXX98, CLK_GNUCXX11, CLK_CXX11,
+ CLK_GNUCXX14, CLK_CXX14, CLK_GNUCXX17, CLK_CXX17,
+ CLK_GNUCXX20, CLK_CXX20, CLK_GNUCXX23, CLK_CXX23,
+ CLK_ASM};
+
+/* Payload of a NUMBER, STRING, CHAR or COMMENT token. */
+struct GTY(()) cpp_string {
+ unsigned int len;
+
+ /* TEXT is always null terminated (terminator not included in len); but this
+ GTY markup arranges that PCH streaming works properly even if there is a
+ null byte in the middle of the string. */
+ const unsigned char * GTY((string_length ("1 + %h.len"))) text;
+};
+
+/* Flags for the cpp_token structure. */
+#define PREV_WHITE (1 << 0) /* If whitespace before this token. */
+#define DIGRAPH (1 << 1) /* If it was a digraph. */
+#define STRINGIFY_ARG (1 << 2) /* If macro argument to be stringified. */
+#define PASTE_LEFT (1 << 3) /* If on LHS of a ## operator. */
+#define NAMED_OP (1 << 4) /* C++ named operators. */
+#define PREV_FALLTHROUGH (1 << 5) /* On a token preceeded by FALLTHROUGH
+ comment. */
+#define DECIMAL_INT (1 << 6) /* Decimal integer, set in c-lex.cc. */
+#define PURE_ZERO (1 << 7) /* Single 0 digit, used by the C++ frontend,
+ set in c-lex.cc. */
+#define SP_DIGRAPH (1 << 8) /* # or ## token was a digraph. */
+#define SP_PREV_WHITE (1 << 9) /* If whitespace before a ##
+ operator, or before this token
+ after a # operator. */
+#define NO_EXPAND (1 << 10) /* Do not macro-expand this token. */
+#define PRAGMA_OP (1 << 11) /* _Pragma token. */
+#define BOL (1 << 12) /* Token at beginning of line. */
+
+/* Specify which field, if any, of the cpp_token union is used. */
+
+enum cpp_token_fld_kind {
+ CPP_TOKEN_FLD_NODE,
+ CPP_TOKEN_FLD_SOURCE,
+ CPP_TOKEN_FLD_STR,
+ CPP_TOKEN_FLD_ARG_NO,
+ CPP_TOKEN_FLD_TOKEN_NO,
+ CPP_TOKEN_FLD_PRAGMA,
+ CPP_TOKEN_FLD_NONE
+};
+
+/* A macro argument in the cpp_token union. */
+struct GTY(()) cpp_macro_arg {
+ /* Argument number. */
+ unsigned int arg_no;
+ /* The original spelling of the macro argument token. */
+ cpp_hashnode *
+ GTY ((nested_ptr (union tree_node,
+ "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL",
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL")))
+ spelling;
+};
+
+/* An identifier in the cpp_token union. */
+struct GTY(()) cpp_identifier {
+ /* The canonical (UTF-8) spelling of the identifier. */
+ cpp_hashnode *
+ GTY ((nested_ptr (union tree_node,
+ "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL",
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL")))
+ node;
+ /* The original spelling of the identifier. */
+ cpp_hashnode *
+ GTY ((nested_ptr (union tree_node,
+ "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL",
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL")))
+ spelling;
+};
+
+/* A preprocessing token. This has been carefully packed and should
+ occupy 16 bytes on 32-bit hosts and 24 bytes on 64-bit hosts. */
+struct GTY(()) cpp_token {
+
+ /* Location of first char of token, together with range of full token. */
+ location_t src_loc;
+
+ ENUM_BITFIELD(cpp_ttype) type : CHAR_BIT; /* token type */
+ unsigned short flags; /* flags - see above */
+
+ union cpp_token_u
+ {
+ /* An identifier. */
+ struct cpp_identifier GTY ((tag ("CPP_TOKEN_FLD_NODE"))) node;
+
+ /* Inherit padding from this token. */
+ cpp_token * GTY ((tag ("CPP_TOKEN_FLD_SOURCE"))) source;
+
+ /* A string, or number. */
+ struct cpp_string GTY ((tag ("CPP_TOKEN_FLD_STR"))) str;
+
+ /* Argument no. (and original spelling) for a CPP_MACRO_ARG. */
+ struct cpp_macro_arg GTY ((tag ("CPP_TOKEN_FLD_ARG_NO"))) macro_arg;
+
+ /* Original token no. for a CPP_PASTE (from a sequence of
+ consecutive paste tokens in a macro expansion). */
+ unsigned int GTY ((tag ("CPP_TOKEN_FLD_TOKEN_NO"))) token_no;
+
+ /* Caller-supplied identifier for a CPP_PRAGMA. */
+ unsigned int GTY ((tag ("CPP_TOKEN_FLD_PRAGMA"))) pragma;
+ } GTY ((desc ("cpp_token_val_index (&%1)"))) val;
+};
+
+/* Say which field is in use. */
+extern enum cpp_token_fld_kind cpp_token_val_index (const cpp_token *tok);
+
+/* A type wide enough to hold any multibyte source character.
+ cpplib's character constant interpreter requires an unsigned type.
+ Also, a typedef for the signed equivalent.
+ The width of this type is capped at 32 bits; there do exist targets
+ where wchar_t is 64 bits, but only in a non-default mode, and there
+ would be no meaningful interpretation for a wchar_t value greater
+ than 2^32 anyway -- the widest wide-character encoding around is
+ ISO 10646, which stops at 2^31. */
+#if CHAR_BIT * SIZEOF_INT >= 32
+# define CPPCHAR_SIGNED_T int
+#elif CHAR_BIT * SIZEOF_LONG >= 32
+# define CPPCHAR_SIGNED_T long
+#else
+# error "Cannot find a least-32-bit signed integer type"
+#endif
+typedef unsigned CPPCHAR_SIGNED_T cppchar_t;
+typedef CPPCHAR_SIGNED_T cppchar_signed_t;
+
+/* Style of header dependencies to generate. */
+enum cpp_deps_style { DEPS_NONE = 0, DEPS_USER, DEPS_SYSTEM };
+
+/* The possible normalization levels, from most restrictive to least. */
+enum cpp_normalize_level {
+ /* In NFKC. */
+ normalized_KC = 0,
+ /* In NFC. */
+ normalized_C,
+ /* In NFC, except for subsequences where being in NFC would make
+ the identifier invalid. */
+ normalized_identifier_C,
+ /* Not normalized at all. */
+ normalized_none
+};
+
+enum cpp_main_search
+{
+ CMS_none, /* A regular source file. */
+ CMS_header, /* Is a directly-specified header file (eg PCH or
+ header-unit). */
+ CMS_user, /* Search the user INCLUDE path. */
+ CMS_system, /* Search the system INCLUDE path. */
+};
+
+/* The possible bidirectional control characters checking levels. */
+enum cpp_bidirectional_level {
+ /* No checking. */
+ bidirectional_none = 0,
+ /* Only detect unpaired uses of bidirectional control characters. */
+ bidirectional_unpaired = 1,
+ /* Detect any use of bidirectional control characters. */
+ bidirectional_any = 2,
+ /* Also warn about UCNs. */
+ bidirectional_ucn = 4
+};
+
+/* This structure is nested inside struct cpp_reader, and
+ carries all the options visible to the command line. */
+struct cpp_options
+{
+ /* The language we're preprocessing. */
+ enum c_lang lang;
+
+ /* Nonzero means use extra default include directories for C++. */
+ unsigned char cplusplus;
+
+ /* Nonzero means handle cplusplus style comments. */
+ unsigned char cplusplus_comments;
+
+ /* Nonzero means define __OBJC__, treat @ as a special token, use
+ the OBJC[PLUS]_INCLUDE_PATH environment variable, and allow
+ "#import". */
+ unsigned char objc;
+
+ /* Nonzero means don't copy comments into the output file. */
+ unsigned char discard_comments;
+
+ /* Nonzero means don't copy comments into the output file during
+ macro expansion. */
+ unsigned char discard_comments_in_macro_exp;
+
+ /* Nonzero means process the ISO trigraph sequences. */
+ unsigned char trigraphs;
+
+ /* Nonzero means process the ISO digraph sequences. */
+ unsigned char digraphs;
+
+ /* Nonzero means to allow hexadecimal floats and LL suffixes. */
+ unsigned char extended_numbers;
+
+ /* Nonzero means process u/U prefix literals (UTF-16/32). */
+ unsigned char uliterals;
+
+ /* Nonzero means process u8 prefixed character literals (UTF-8). */
+ unsigned char utf8_char_literals;
+
+ /* Nonzero means process r/R raw strings. If this is set, uliterals
+ must be set as well. */
+ unsigned char rliterals;
+
+ /* Nonzero means print names of header files (-H). */
+ unsigned char print_include_names;
+
+ /* Nonzero means complain about deprecated features. */
+ unsigned char cpp_warn_deprecated;
+
+ /* Nonzero means warn if slash-star appears in a comment. */
+ unsigned char warn_comments;
+
+ /* Nonzero means to warn about __DATA__, __TIME__ and __TIMESTAMP__ usage. */
+ unsigned char warn_date_time;
+
+ /* Nonzero means warn if a user-supplied include directory does not
+ exist. */
+ unsigned char warn_missing_include_dirs;
+
+ /* Nonzero means warn if there are any trigraphs. */
+ unsigned char warn_trigraphs;
+
+ /* Nonzero means warn about multicharacter charconsts. */
+ unsigned char warn_multichar;
+
+ /* Nonzero means warn about various incompatibilities with
+ traditional C. */
+ unsigned char cpp_warn_traditional;
+
+ /* Nonzero means warn about long long numeric constants. */
+ unsigned char cpp_warn_long_long;
+
+ /* Nonzero means warn about text after an #endif (or #else). */
+ unsigned char warn_endif_labels;
+
+ /* Nonzero means warn about implicit sign changes owing to integer
+ promotions. */
+ unsigned char warn_num_sign_change;
+
+ /* Zero means don't warn about __VA_ARGS__ usage in c89 pedantic mode.
+ Presumably the usage is protected by the appropriate #ifdef. */
+ unsigned char warn_variadic_macros;
+
+ /* Nonzero means warn about builtin macros that are redefined or
+ explicitly undefined. */
+ unsigned char warn_builtin_macro_redefined;
+
+ /* Different -Wimplicit-fallthrough= levels. */
+ unsigned char cpp_warn_implicit_fallthrough;
+
+ /* Nonzero means we should look for header.gcc files that remap file
+ names. */
+ unsigned char remap;
+
+ /* Zero means dollar signs are punctuation. */
+ unsigned char dollars_in_ident;
+
+ /* Nonzero means UCNs are accepted in identifiers. */
+ unsigned char extended_identifiers;
+
+ /* True if we should warn about dollars in identifiers or numbers
+ for this translation unit. */
+ unsigned char warn_dollars;
+
+ /* Nonzero means warn if undefined identifiers are evaluated in an #if. */
+ unsigned char warn_undef;
+
+ /* Nonzero means warn if "defined" is encountered in a place other than
+ an #if. */
+ unsigned char warn_expansion_to_defined;
+
+ /* Nonzero means warn of unused macros from the main file. */
+ unsigned char warn_unused_macros;
+
+ /* Nonzero for the 1999 C Standard, including corrigenda and amendments. */
+ unsigned char c99;
+
+ /* Nonzero if we are conforming to a specific C or C++ standard. */
+ unsigned char std;
+
+ /* Nonzero means give all the error messages the ANSI standard requires. */
+ unsigned char cpp_pedantic;
+
+ /* Nonzero means we're looking at already preprocessed code, so don't
+ bother trying to do macro expansion and whatnot. */
+ unsigned char preprocessed;
+
+ /* Nonzero means we are going to emit debugging logs during
+ preprocessing. */
+ unsigned char debug;
+
+ /* Nonzero means we are tracking locations of tokens involved in
+ macro expansion. 1 Means we track the location in degraded mode
+ where we do not track locations of tokens resulting from the
+ expansion of arguments of function-like macro. 2 Means we do
+ track all macro expansions. This last option is the one that
+ consumes the highest amount of memory. */
+ unsigned char track_macro_expansion;
+
+ /* Nonzero means handle C++ alternate operator names. */
+ unsigned char operator_names;
+
+ /* Nonzero means warn about use of C++ alternate operator names. */
+ unsigned char warn_cxx_operator_names;
+
+ /* True for traditional preprocessing. */
+ unsigned char traditional;
+
+ /* Nonzero for C++ 2011 Standard user-defined literals. */
+ unsigned char user_literals;
+
+ /* Nonzero means warn when a string or character literal is followed by a
+ ud-suffix which does not beging with an underscore. */
+ unsigned char warn_literal_suffix;
+
+ /* Nonzero means interpret imaginary, fixed-point, or other gnu extension
+ literal number suffixes as user-defined literal number suffixes. */
+ unsigned char ext_numeric_literals;
+
+ /* Nonzero means extended identifiers allow the characters specified
+ in C11. */
+ unsigned char c11_identifiers;
+
+ /* Nonzero means extended identifiers allow the characters specified
+ by Unicode XID_Start and XID_Continue properties. */
+ unsigned char xid_identifiers;
+
+ /* Nonzero for C++ 2014 Standard binary constants. */
+ unsigned char binary_constants;
+
+ /* Nonzero for C++ 2014 Standard digit separators. */
+ unsigned char digit_separators;
+
+ /* Nonzero for C2X decimal floating-point constants. */
+ unsigned char dfp_constants;
+
+ /* Nonzero for C++20 __VA_OPT__ feature. */
+ unsigned char va_opt;
+
+ /* Nonzero for the '::' token. */
+ unsigned char scope;
+
+ /* Nonzero for the '#elifdef' and '#elifndef' directives. */
+ unsigned char elifdef;
+
+ /* Nonzero for the '#warning' directive. */
+ unsigned char warning_directive;
+
+ /* Nonzero means tokenize C++20 module directives. */
+ unsigned char module_directives;
+
+ /* Nonzero for C++23 size_t literals. */
+ unsigned char size_t_literals;
+
+ /* Nonzero for C++23 delimited escape sequences. */
+ unsigned char delimited_escape_seqs;
+
+ /* Nonzero for 'true' and 'false' in #if expressions. */
+ unsigned char true_false;
+
+ /* Holds the name of the target (execution) character set. */
+ const char *narrow_charset;
+
+ /* Holds the name of the target wide character set. */
+ const char *wide_charset;
+
+ /* Holds the name of the input character set. */
+ const char *input_charset;
+
+ /* The minimum permitted level of normalization before a warning
+ is generated. See enum cpp_normalize_level. */
+ int warn_normalize;
+
+ /* True to warn about precompiled header files we couldn't use. */
+ bool warn_invalid_pch;
+
+ /* True if dependencies should be restored from a precompiled header. */
+ bool restore_pch_deps;
+
+ /* True if warn about differences between C90 and C99. */
+ signed char cpp_warn_c90_c99_compat;
+
+ /* True if warn about differences between C11 and C2X. */
+ signed char cpp_warn_c11_c2x_compat;
+
+ /* True if warn about differences between C++98 and C++11. */
+ bool cpp_warn_cxx11_compat;
+
+ /* True if warn about differences between C++17 and C++20. */
+ bool cpp_warn_cxx20_compat;
+
+ /* Nonzero if bidirectional control characters checking is on. See enum
+ cpp_bidirectional_level. */
+ unsigned char cpp_warn_bidirectional;
+
+ /* True if libcpp should warn about invalid UTF-8 characters in comments.
+ 2 if it should be a pedwarn. */
+ unsigned char cpp_warn_invalid_utf8;
+
+ /* True if libcpp should warn about invalid forms of delimited or named
+ escape sequences. */
+ bool cpp_warn_unicode;
+
+ /* True if -finput-charset= option has been used explicitly. */
+ bool cpp_input_charset_explicit;
+
+ /* Dependency generation. */
+ struct
+ {
+ /* Style of header dependencies to generate. */
+ enum cpp_deps_style style;
+
+ /* Assume missing files are generated files. */
+ bool missing_files;
+
+ /* Generate phony targets for each dependency apart from the first
+ one. */
+ bool phony_targets;
+
+ /* Generate dependency info for modules. */
+ bool modules;
+
+ /* If true, no dependency is generated on the main file. */
+ bool ignore_main_file;
+
+ /* If true, intend to use the preprocessor output (e.g., for compilation)
+ in addition to the dependency info. */
+ bool need_preprocessor_output;
+ } deps;
+
+ /* Target-specific features set by the front end or client. */
+
+ /* Precision for target CPP arithmetic, target characters, target
+ ints and target wide characters, respectively. */
+ size_t precision, char_precision, int_precision, wchar_precision;
+
+ /* True means chars (wide chars, UTF-8 chars) are unsigned. */
+ bool unsigned_char, unsigned_wchar, unsigned_utf8char;
+
+ /* True if the most significant byte in a word has the lowest
+ address in memory. */
+ bool bytes_big_endian;
+
+ /* Nonzero means __STDC__ should have the value 0 in system headers. */
+ unsigned char stdc_0_in_system_headers;
+
+ /* True disables tokenization outside of preprocessing directives. */
+ bool directives_only;
+
+ /* True enables canonicalization of system header file paths. */
+ bool canonical_system_headers;
+
+ /* The maximum depth of the nested #include. */
+ unsigned int max_include_depth;
+
+ cpp_main_search main_search : 8;
+};
+
+/* Diagnostic levels. To get a diagnostic without associating a
+ position in the translation unit with it, use cpp_error_with_line
+ with a line number of zero. */
+
+enum cpp_diagnostic_level {
+ /* Warning, an error with -Werror. */
+ CPP_DL_WARNING = 0,
+ /* Same as CPP_DL_WARNING, except it is not suppressed in system headers. */
+ CPP_DL_WARNING_SYSHDR,
+ /* Warning, an error with -pedantic-errors or -Werror. */
+ CPP_DL_PEDWARN,
+ /* An error. */
+ CPP_DL_ERROR,
+ /* An internal consistency check failed. Prints "internal error: ",
+ otherwise the same as CPP_DL_ERROR. */
+ CPP_DL_ICE,
+ /* An informative note following a warning. */
+ CPP_DL_NOTE,
+ /* A fatal error. */
+ CPP_DL_FATAL
+};
+
+/* Warning reason codes. Use a reason code of CPP_W_NONE for unclassified
+ warnings and diagnostics that are not warnings. */
+
+enum cpp_warning_reason {
+ CPP_W_NONE = 0,
+ CPP_W_DEPRECATED,
+ CPP_W_COMMENTS,
+ CPP_W_MISSING_INCLUDE_DIRS,
+ CPP_W_TRIGRAPHS,
+ CPP_W_MULTICHAR,
+ CPP_W_TRADITIONAL,
+ CPP_W_LONG_LONG,
+ CPP_W_ENDIF_LABELS,
+ CPP_W_NUM_SIGN_CHANGE,
+ CPP_W_VARIADIC_MACROS,
+ CPP_W_BUILTIN_MACRO_REDEFINED,
+ CPP_W_DOLLARS,
+ CPP_W_UNDEF,
+ CPP_W_UNUSED_MACROS,
+ CPP_W_CXX_OPERATOR_NAMES,
+ CPP_W_NORMALIZE,
+ CPP_W_INVALID_PCH,
+ CPP_W_WARNING_DIRECTIVE,
+ CPP_W_LITERAL_SUFFIX,
+ CPP_W_SIZE_T_LITERALS,
+ CPP_W_DATE_TIME,
+ CPP_W_PEDANTIC,
+ CPP_W_C90_C99_COMPAT,
+ CPP_W_C11_C2X_COMPAT,
+ CPP_W_CXX11_COMPAT,
+ CPP_W_CXX20_COMPAT,
+ CPP_W_EXPANSION_TO_DEFINED,
+ CPP_W_BIDIRECTIONAL,
+ CPP_W_INVALID_UTF8,
+ CPP_W_UNICODE
+};
+
+/* Callback for header lookup for HEADER, which is the name of a
+ source file. It is used as a method of last resort to find headers
+ that are not otherwise found during the normal include processing.
+ The return value is the malloced name of a header to try and open,
+ if any, or NULL otherwise. This callback is called only if the
+ header is otherwise unfound. */
+typedef const char *(*missing_header_cb)(cpp_reader *, const char *header, cpp_dir **);
+
+/* Call backs to cpplib client. */
+struct cpp_callbacks
+{
+ /* Called when a new line of preprocessed output is started. */
+ void (*line_change) (cpp_reader *, const cpp_token *, int);
+
+ /* Called when switching to/from a new file.
+ The line_map is for the new file. It is NULL if there is no new file.
+ (In C this happens when done with <built-in>+<command line> and also
+ when done with a main file.) This can be used for resource cleanup. */
+ void (*file_change) (cpp_reader *, const line_map_ordinary *);
+
+ void (*dir_change) (cpp_reader *, const char *);
+ void (*include) (cpp_reader *, location_t, const unsigned char *,
+ const char *, int, const cpp_token **);
+ void (*define) (cpp_reader *, location_t, cpp_hashnode *);
+ void (*undef) (cpp_reader *, location_t, cpp_hashnode *);
+ void (*ident) (cpp_reader *, location_t, const cpp_string *);
+ void (*def_pragma) (cpp_reader *, location_t);
+ int (*valid_pch) (cpp_reader *, const char *, int);
+ void (*read_pch) (cpp_reader *, const char *, int, const char *);
+ missing_header_cb missing_header;
+
+ /* Context-sensitive macro support. Returns macro (if any) that should
+ be expanded. */
+ cpp_hashnode * (*macro_to_expand) (cpp_reader *, const cpp_token *);
+
+ /* Called to emit a diagnostic. This callback receives the
+ translated message. */
+ bool (*diagnostic) (cpp_reader *,
+ enum cpp_diagnostic_level,
+ enum cpp_warning_reason,
+ rich_location *,
+ const char *, va_list *)
+ ATTRIBUTE_FPTR_PRINTF(5,0);
+
+ /* Callbacks for when a macro is expanded, or tested (whether
+ defined or not at the time) in #ifdef, #ifndef or "defined". */
+ void (*used_define) (cpp_reader *, location_t, cpp_hashnode *);
+ void (*used_undef) (cpp_reader *, location_t, cpp_hashnode *);
+ /* Called before #define and #undef or other macro definition
+ changes are processed. */
+ void (*before_define) (cpp_reader *);
+ /* Called whenever a macro is expanded or tested.
+ Second argument is the location of the start of the current expansion. */
+ void (*used) (cpp_reader *, location_t, cpp_hashnode *);
+
+ /* Callback to identify whether an attribute exists. */
+ int (*has_attribute) (cpp_reader *, bool);
+
+ /* Callback to determine whether a built-in function is recognized. */
+ int (*has_builtin) (cpp_reader *);
+
+ /* Callback that can change a user lazy into normal macro. */
+ void (*user_lazy_macro) (cpp_reader *, cpp_macro *, unsigned);
+
+ /* Callback to handle deferred cpp_macros. */
+ cpp_macro *(*user_deferred_macro) (cpp_reader *, location_t, cpp_hashnode *);
+
+ /* Callback to parse SOURCE_DATE_EPOCH from environment. */
+ time_t (*get_source_date_epoch) (cpp_reader *);
+
+ /* Callback for providing suggestions for misspelled directives. */
+ const char *(*get_suggestion) (cpp_reader *, const char *, const char *const *);
+
+ /* Callback for when a comment is encountered, giving the location
+ of the opening slash, a pointer to the content (which is not
+ necessarily 0-terminated), and the length of the content.
+ The content contains the opening slash-star (or slash-slash),
+ and for C-style comments contains the closing star-slash. For
+ C++-style comments it does not include the terminating newline. */
+ void (*comment) (cpp_reader *, location_t, const unsigned char *,
+ size_t);
+
+ /* Callback for filename remapping in __FILE__ and __BASE_FILE__ macro
+ expansions. */
+ const char *(*remap_filename) (const char*);
+
+ /* Maybe translate a #include into something else. Return a
+ cpp_buffer containing the translation if translating. */
+ char *(*translate_include) (cpp_reader *, line_maps *, location_t,
+ const char *path);
+};
+
+#ifdef VMS
+#define INO_T_CPP ino_t ino[3]
+#elif defined (_AIX) && SIZEOF_INO_T == 4
+#define INO_T_CPP ino64_t ino
+#else
+#define INO_T_CPP ino_t ino
+#endif
+
+#if defined (_AIX) && SIZEOF_DEV_T == 4
+#define DEV_T_CPP dev64_t dev
+#else
+#define DEV_T_CPP dev_t dev
+#endif
+
+/* Chain of directories to look for include files in. */
+struct cpp_dir
+{
+ /* NULL-terminated singly-linked list. */
+ struct cpp_dir *next;
+
+ /* NAME of the directory, NUL-terminated. */
+ char *name;
+ unsigned int len;
+
+ /* One if a system header, two if a system header that has extern
+ "C" guards for C++. */
+ unsigned char sysp;
+
+ /* Is this a user-supplied directory? */
+ bool user_supplied_p;
+
+ /* The canonicalized NAME as determined by lrealpath. This field
+ is only used by hosts that lack reliable inode numbers. */
+ char *canonical_name;
+
+ /* Mapping of file names for this directory for MS-DOS and related
+ platforms. A NULL-terminated array of (from, to) pairs. */
+ const char **name_map;
+
+ /* Routine to construct pathname, given the search path name and the
+ HEADER we are trying to find, return a constructed pathname to
+ try and open. If this is NULL, the constructed pathname is as
+ constructed by append_file_to_dir. */
+ char *(*construct) (const char *header, cpp_dir *dir);
+
+ /* The C front end uses these to recognize duplicated
+ directories in the search path. */
+ INO_T_CPP;
+ DEV_T_CPP;
+};
+
+/* The kind of the cpp_macro. */
+enum cpp_macro_kind {
+ cmk_macro, /* An ISO macro (token expansion). */
+ cmk_assert, /* An assertion. */
+ cmk_traditional /* A traditional macro (text expansion). */
+};
+
+/* Each macro definition is recorded in a cpp_macro structure.
+ Variadic macros cannot occur with traditional cpp. */
+struct GTY(()) cpp_macro {
+ union cpp_parm_u
+ {
+ /* Parameters, if any. If parameter names use extended identifiers,
+ the original spelling of those identifiers, not the canonical
+ UTF-8 spelling, goes here. */
+ cpp_hashnode ** GTY ((tag ("false"),
+ nested_ptr (union tree_node,
+ "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL",
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"),
+ length ("%1.paramc"))) params;
+
+ /* If this is an assertion, the next one in the chain. */
+ cpp_macro *GTY ((tag ("true"))) next;
+ } GTY ((desc ("%1.kind == cmk_assert"))) parm;
+
+ /* Definition line number. */
+ location_t line;
+
+ /* Number of tokens in body, or bytes for traditional macros. */
+ /* Do we really need 2^32-1 range here? */
+ unsigned int count;
+
+ /* Number of parameters. */
+ unsigned short paramc;
+
+ /* Non-zero if this is a user-lazy macro, value provided by user. */
+ unsigned char lazy;
+
+ /* The kind of this macro (ISO, trad or assert) */
+ unsigned kind : 2;
+
+ /* If a function-like macro. */
+ unsigned int fun_like : 1;
+
+ /* If a variadic macro. */
+ unsigned int variadic : 1;
+
+ /* If macro defined in system header. */
+ unsigned int syshdr : 1;
+
+ /* Nonzero if it has been expanded or had its existence tested. */
+ unsigned int used : 1;
+
+ /* Indicate whether the tokens include extra CPP_PASTE tokens at the
+ end to track invalid redefinitions with consecutive CPP_PASTE
+ tokens. */
+ unsigned int extra_tokens : 1;
+
+ /* Imported C++20 macro (from a header unit). */
+ unsigned int imported_p : 1;
+
+ /* 0 bits spare (32-bit). 32 on 64-bit target. */
+
+ union cpp_exp_u
+ {
+ /* Trailing array of replacement tokens (ISO), or assertion body value. */
+ cpp_token GTY ((tag ("false"), length ("%1.count"))) tokens[1];
+
+ /* Pointer to replacement text (traditional). See comment at top
+ of cpptrad.c for how traditional function-like macros are
+ encoded. */
+ const unsigned char *GTY ((tag ("true"))) text;
+ } GTY ((desc ("%1.kind == cmk_traditional"))) exp;
+};
+
+/* Poisoned identifiers are flagged NODE_POISONED. NODE_OPERATOR (C++
+ only) indicates an identifier that behaves like an operator such as
+ "xor". NODE_DIAGNOSTIC is for speed in lex_token: it indicates a
+ diagnostic may be required for this node. Currently this only
+ applies to __VA_ARGS__, poisoned identifiers, and -Wc++-compat
+ warnings about NODE_OPERATOR. */
+
+/* Hash node flags. */
+#define NODE_OPERATOR (1 << 0) /* C++ named operator. */
+#define NODE_POISONED (1 << 1) /* Poisoned identifier. */
+#define NODE_DIAGNOSTIC (1 << 2) /* Possible diagnostic when lexed. */
+#define NODE_WARN (1 << 3) /* Warn if redefined or undefined. */
+#define NODE_DISABLED (1 << 4) /* A disabled macro. */
+#define NODE_USED (1 << 5) /* Dumped with -dU. */
+#define NODE_CONDITIONAL (1 << 6) /* Conditional macro */
+#define NODE_WARN_OPERATOR (1 << 7) /* Warn about C++ named operator. */
+#define NODE_MODULE (1 << 8) /* C++-20 module-related name. */
+
+/* Different flavors of hash node. */
+enum node_type
+{
+ NT_VOID = 0, /* Maybe an assert? */
+ NT_MACRO_ARG, /* A macro arg. */
+ NT_USER_MACRO, /* A user macro. */
+ NT_BUILTIN_MACRO, /* A builtin macro. */
+ NT_MACRO_MASK = NT_USER_MACRO /* Mask for either macro kind. */
+};
+
+/* Different flavors of builtin macro. _Pragma is an operator, but we
+ handle it with the builtin code for efficiency reasons. */
+enum cpp_builtin_type
+{
+ BT_SPECLINE = 0, /* `__LINE__' */
+ BT_DATE, /* `__DATE__' */
+ BT_FILE, /* `__FILE__' */
+ BT_FILE_NAME, /* `__FILE_NAME__' */
+ BT_BASE_FILE, /* `__BASE_FILE__' */
+ BT_INCLUDE_LEVEL, /* `__INCLUDE_LEVEL__' */
+ BT_TIME, /* `__TIME__' */
+ BT_STDC, /* `__STDC__' */
+ BT_PRAGMA, /* `_Pragma' operator */
+ BT_TIMESTAMP, /* `__TIMESTAMP__' */
+ BT_COUNTER, /* `__COUNTER__' */
+ BT_HAS_ATTRIBUTE, /* `__has_attribute(x)' */
+ BT_HAS_STD_ATTRIBUTE, /* `__has_c_attribute(x)' */
+ BT_HAS_BUILTIN, /* `__has_builtin(x)' */
+ BT_HAS_INCLUDE, /* `__has_include(x)' */
+ BT_HAS_INCLUDE_NEXT /* `__has_include_next(x)' */
+};
+
+#define CPP_HASHNODE(HNODE) ((cpp_hashnode *) (HNODE))
+#define HT_NODE(NODE) (&(NODE)->ident)
+#define NODE_LEN(NODE) HT_LEN (HT_NODE (NODE))
+#define NODE_NAME(NODE) HT_STR (HT_NODE (NODE))
+
+/* The common part of an identifier node shared amongst all 3 C front
+ ends. Also used to store CPP identifiers, which are a superset of
+ identifiers in the grammatical sense. */
+
+union GTY(()) _cpp_hashnode_value {
+ /* Assert (maybe NULL) */
+ cpp_macro * GTY((tag ("NT_VOID"))) answers;
+ /* Macro (maybe NULL) */
+ cpp_macro * GTY((tag ("NT_USER_MACRO"))) macro;
+ /* Code for a builtin macro. */
+ enum cpp_builtin_type GTY ((tag ("NT_BUILTIN_MACRO"))) builtin;
+ /* Macro argument index. */
+ unsigned short GTY ((tag ("NT_MACRO_ARG"))) arg_index;
+};
+
+struct GTY(()) cpp_hashnode {
+ struct ht_identifier ident;
+ unsigned int is_directive : 1;
+ unsigned int directive_index : 7; /* If is_directive,
+ then index into directive table.
+ Otherwise, a NODE_OPERATOR. */
+ unsigned int rid_code : 8; /* Rid code - for front ends. */
+ unsigned int flags : 9; /* CPP flags. */
+ ENUM_BITFIELD(node_type) type : 2; /* CPP node type. */
+
+ /* 5 bits spare. */
+
+ /* The deferred cookie is applicable to NT_USER_MACRO or NT_VOID.
+ The latter for when a macro had a prevailing undef.
+ On a 64-bit system there would be 32-bits of padding to the value
+ field. So placing the deferred index here is not costly. */
+ unsigned deferred; /* Deferred cookie */
+
+ union _cpp_hashnode_value GTY ((desc ("%1.type"))) value;
+};
+
+/* A class for iterating through the source locations within a
+ string token (before escapes are interpreted, and before
+ concatenation). */
+
+class cpp_string_location_reader {
+ public:
+ cpp_string_location_reader (location_t src_loc,
+ line_maps *line_table);
+
+ source_range get_next ();
+
+ private:
+ location_t m_loc;
+ int m_offset_per_column;
+};
+
+/* A class for storing the source ranges of all of the characters within
+ a string literal, after escapes are interpreted, and after
+ concatenation.
+
+ This is not GTY-marked, as instances are intended to be temporary. */
+
+class cpp_substring_ranges
+{
+ public:
+ cpp_substring_ranges ();
+ ~cpp_substring_ranges ();
+
+ int get_num_ranges () const { return m_num_ranges; }
+ source_range get_range (int idx) const
+ {
+ linemap_assert (idx < m_num_ranges);
+ return m_ranges[idx];
+ }
+
+ void add_range (source_range range);
+ void add_n_ranges (int num, cpp_string_location_reader &loc_reader);
+
+ private:
+ source_range *m_ranges;
+ int m_num_ranges;
+ int m_alloc_ranges;
+};
+
+/* Call this first to get a handle to pass to other functions.
+
+ If you want cpplib to manage its own hashtable, pass in a NULL
+ pointer. Otherwise you should pass in an initialized hash table
+ that cpplib will share; this technique is used by the C front
+ ends. */
+extern cpp_reader *cpp_create_reader (enum c_lang, struct ht *,
+ class line_maps *);
+
+/* Reset the cpp_reader's line_map. This is only used after reading a
+ PCH file. */
+extern void cpp_set_line_map (cpp_reader *, class line_maps *);
+
+/* Call this to change the selected language standard (e.g. because of
+ command line options). */
+extern void cpp_set_lang (cpp_reader *, enum c_lang);
+
+/* Set the include paths. */
+extern void cpp_set_include_chains (cpp_reader *, cpp_dir *, cpp_dir *, int);
+
+/* Call these to get pointers to the options, callback, and deps
+ structures for a given reader. These pointers are good until you
+ call cpp_finish on that reader. You can either edit the callbacks
+ through the pointer returned from cpp_get_callbacks, or set them
+ with cpp_set_callbacks. */
+extern cpp_options *cpp_get_options (cpp_reader *) ATTRIBUTE_PURE;
+extern cpp_callbacks *cpp_get_callbacks (cpp_reader *) ATTRIBUTE_PURE;
+extern void cpp_set_callbacks (cpp_reader *, cpp_callbacks *);
+extern class mkdeps *cpp_get_deps (cpp_reader *) ATTRIBUTE_PURE;
+
+extern const char *cpp_probe_header_unit (cpp_reader *, const char *file,
+ bool angle_p, location_t);
+
+/* Call these to get name data about the various compile-time
+ charsets. */
+extern const char *cpp_get_narrow_charset_name (cpp_reader *) ATTRIBUTE_PURE;
+extern const char *cpp_get_wide_charset_name (cpp_reader *) ATTRIBUTE_PURE;
+
+/* This function reads the file, but does not start preprocessing. It
+ returns the name of the original file; this is the same as the
+ input file, except for preprocessed input. This will generate at
+ least one file change callback, and possibly a line change callback
+ too. If there was an error opening the file, it returns NULL. */
+extern const char *cpp_read_main_file (cpp_reader *, const char *,
+ bool injecting = false);
+extern location_t cpp_main_loc (const cpp_reader *);
+
+/* Adjust for the main file to be an include. */
+extern void cpp_retrofit_as_include (cpp_reader *);
+
+/* Set up built-ins with special behavior. Use cpp_init_builtins()
+ instead unless your know what you are doing. */
+extern void cpp_init_special_builtins (cpp_reader *);
+
+/* Set up built-ins like __FILE__. */
+extern void cpp_init_builtins (cpp_reader *, int);
+
+/* This is called after options have been parsed, and partially
+ processed. */
+extern void cpp_post_options (cpp_reader *);
+
+/* Set up translation to the target character set. */
+extern void cpp_init_iconv (cpp_reader *);
+
+/* Call this to finish preprocessing. If you requested dependency
+ generation, pass an open stream to write the information to,
+ otherwise NULL. It is your responsibility to close the stream. */
+extern void cpp_finish (cpp_reader *, FILE *deps_stream);
+
+/* Call this to release the handle at the end of preprocessing. Any
+ use of the handle after this function returns is invalid. */
+extern void cpp_destroy (cpp_reader *);
+
+extern unsigned int cpp_token_len (const cpp_token *);
+extern unsigned char *cpp_token_as_text (cpp_reader *, const cpp_token *);
+extern unsigned char *cpp_spell_token (cpp_reader *, const cpp_token *,
+ unsigned char *, bool);
+extern void cpp_register_pragma (cpp_reader *, const char *, const char *,
+ void (*) (cpp_reader *), bool);
+extern void cpp_register_deferred_pragma (cpp_reader *, const char *,
+ const char *, unsigned, bool, bool);
+extern int cpp_avoid_paste (cpp_reader *, const cpp_token *,
+ const cpp_token *);
+extern const cpp_token *cpp_get_token (cpp_reader *);
+extern const cpp_token *cpp_get_token_with_location (cpp_reader *,
+ location_t *);
+inline bool cpp_user_macro_p (const cpp_hashnode *node)
+{
+ return node->type == NT_USER_MACRO;
+}
+inline bool cpp_builtin_macro_p (const cpp_hashnode *node)
+{
+ return node->type == NT_BUILTIN_MACRO;
+}
+inline bool cpp_macro_p (const cpp_hashnode *node)
+{
+ return node->type & NT_MACRO_MASK;
+}
+inline cpp_macro *cpp_set_deferred_macro (cpp_hashnode *node,
+ cpp_macro *forced = NULL)
+{
+ cpp_macro *old = node->value.macro;
+
+ node->value.macro = forced;
+ node->type = NT_USER_MACRO;
+ node->flags &= ~NODE_USED;
+
+ return old;
+}
+cpp_macro *cpp_get_deferred_macro (cpp_reader *, cpp_hashnode *, location_t);
+
+/* Returns true if NODE is a function-like user macro. */
+inline bool cpp_fun_like_macro_p (cpp_hashnode *node)
+{
+ return cpp_user_macro_p (node) && node->value.macro->fun_like;
+}
+
+extern const unsigned char *cpp_macro_definition (cpp_reader *, cpp_hashnode *);
+extern const unsigned char *cpp_macro_definition (cpp_reader *, cpp_hashnode *,
+ const cpp_macro *);
+inline location_t cpp_macro_definition_location (cpp_hashnode *node)
+{
+ const cpp_macro *macro = node->value.macro;
+ return macro ? macro->line : 0;
+}
+/* Return an idempotent time stamp (possibly from SOURCE_DATE_EPOCH). */
+enum class CPP_time_kind
+{
+ FIXED = -1, /* Fixed time via source epoch. */
+ DYNAMIC = -2, /* Dynamic via time(2). */
+ UNKNOWN = -3 /* Wibbly wobbly, timey wimey. */
+};
+extern CPP_time_kind cpp_get_date (cpp_reader *, time_t *);
+
+extern void _cpp_backup_tokens (cpp_reader *, unsigned int);
+extern const cpp_token *cpp_peek_token (cpp_reader *, int);
+
+/* Evaluate a CPP_*CHAR* token. */
+extern cppchar_t cpp_interpret_charconst (cpp_reader *, const cpp_token *,
+ unsigned int *, int *);
+/* Evaluate a vector of CPP_*STRING* tokens. */
+extern bool cpp_interpret_string (cpp_reader *,
+ const cpp_string *, size_t,
+ cpp_string *, enum cpp_ttype);
+extern const char *cpp_interpret_string_ranges (cpp_reader *pfile,
+ const cpp_string *from,
+ cpp_string_location_reader *,
+ size_t count,
+ cpp_substring_ranges *out,
+ enum cpp_ttype type);
+extern bool cpp_interpret_string_notranslate (cpp_reader *,
+ const cpp_string *, size_t,
+ cpp_string *, enum cpp_ttype);
+
+/* Convert a host character constant to the execution character set. */
+extern cppchar_t cpp_host_to_exec_charset (cpp_reader *, cppchar_t);
+
+/* Used to register macros and assertions, perhaps from the command line.
+ The text is the same as the command line argument. */
+extern void cpp_define (cpp_reader *, const char *);
+extern void cpp_define_unused (cpp_reader *, const char *);
+extern void cpp_define_formatted (cpp_reader *pfile,
+ const char *fmt, ...) ATTRIBUTE_PRINTF_2;
+extern void cpp_define_formatted_unused (cpp_reader *pfile,
+ const char *fmt,
+ ...) ATTRIBUTE_PRINTF_2;
+extern void cpp_assert (cpp_reader *, const char *);
+extern void cpp_undef (cpp_reader *, const char *);
+extern void cpp_unassert (cpp_reader *, const char *);
+
+/* Mark a node as a lazily defined macro. */
+extern void cpp_define_lazily (cpp_reader *, cpp_hashnode *node, unsigned N);
+
+/* Undefine all macros and assertions. */
+extern void cpp_undef_all (cpp_reader *);
+
+extern cpp_buffer *cpp_push_buffer (cpp_reader *, const unsigned char *,
+ size_t, int);
+extern int cpp_defined (cpp_reader *, const unsigned char *, int);
+
+/* A preprocessing number. Code assumes that any unused high bits of
+ the double integer are set to zero. */
+
+/* This type has to be equal to unsigned HOST_WIDE_INT, see
+ gcc/c-family/c-lex.cc. */
+typedef uint64_t cpp_num_part;
+typedef struct cpp_num cpp_num;
+struct cpp_num
+{
+ cpp_num_part high;
+ cpp_num_part low;
+ bool unsignedp; /* True if value should be treated as unsigned. */
+ bool overflow; /* True if the most recent calculation overflowed. */
+};
+
+/* cpplib provides two interfaces for interpretation of preprocessing
+ numbers.
+
+ cpp_classify_number categorizes numeric constants according to
+ their field (integer, floating point, or invalid), radix (decimal,
+ octal, hexadecimal), and type suffixes. */
+
+#define CPP_N_CATEGORY 0x000F
+#define CPP_N_INVALID 0x0000
+#define CPP_N_INTEGER 0x0001
+#define CPP_N_FLOATING 0x0002
+
+#define CPP_N_WIDTH 0x00F0
+#define CPP_N_SMALL 0x0010 /* int, float, short _Fract/Accum */
+#define CPP_N_MEDIUM 0x0020 /* long, double, long _Fract/_Accum. */
+#define CPP_N_LARGE 0x0040 /* long long, long double,
+ long long _Fract/Accum. */
+
+#define CPP_N_WIDTH_MD 0xF0000 /* machine defined. */
+#define CPP_N_MD_W 0x10000
+#define CPP_N_MD_Q 0x20000
+
+#define CPP_N_RADIX 0x0F00
+#define CPP_N_DECIMAL 0x0100
+#define CPP_N_HEX 0x0200
+#define CPP_N_OCTAL 0x0400
+#define CPP_N_BINARY 0x0800
+
+#define CPP_N_UNSIGNED 0x1000 /* Properties. */
+#define CPP_N_IMAGINARY 0x2000
+#define CPP_N_DFLOAT 0x4000
+#define CPP_N_DEFAULT 0x8000
+
+#define CPP_N_FRACT 0x100000 /* Fract types. */
+#define CPP_N_ACCUM 0x200000 /* Accum types. */
+#define CPP_N_FLOATN 0x400000 /* _FloatN types. */
+#define CPP_N_FLOATNX 0x800000 /* _FloatNx types. */
+
+#define CPP_N_USERDEF 0x1000000 /* C++11 user-defined literal. */
+
+#define CPP_N_SIZE_T 0x2000000 /* C++23 size_t literal. */
+#define CPP_N_BFLOAT16 0x4000000 /* std::bfloat16_t type. */
+
+#define CPP_N_WIDTH_FLOATN_NX 0xF0000000 /* _FloatN / _FloatNx value
+ of N, divided by 16. */
+#define CPP_FLOATN_SHIFT 24
+#define CPP_FLOATN_MAX 0xF0
+
+/* Classify a CPP_NUMBER token. The return value is a combination of
+ the flags from the above sets. */
+extern unsigned cpp_classify_number (cpp_reader *, const cpp_token *,
+ const char **, location_t);
+
+/* Return the classification flags for a float suffix. */
+extern unsigned int cpp_interpret_float_suffix (cpp_reader *, const char *,
+ size_t);
+
+/* Return the classification flags for an int suffix. */
+extern unsigned int cpp_interpret_int_suffix (cpp_reader *, const char *,
+ size_t);
+
+/* Evaluate a token classified as category CPP_N_INTEGER. */
+extern cpp_num cpp_interpret_integer (cpp_reader *, const cpp_token *,
+ unsigned int);
+
+/* Sign extend a number, with PRECISION significant bits and all
+ others assumed clear, to fill out a cpp_num structure. */
+cpp_num cpp_num_sign_extend (cpp_num, size_t);
+
+/* Output a diagnostic of some kind. */
+extern bool cpp_error (cpp_reader *, enum cpp_diagnostic_level,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_3;
+extern bool cpp_warning (cpp_reader *, enum cpp_warning_reason,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_3;
+extern bool cpp_pedwarning (cpp_reader *, enum cpp_warning_reason,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_3;
+extern bool cpp_warning_syshdr (cpp_reader *, enum cpp_warning_reason reason,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_3;
+
+/* As their counterparts above, but use RICHLOC. */
+extern bool cpp_warning_at (cpp_reader *, enum cpp_warning_reason,
+ rich_location *richloc, const char *msgid, ...)
+ ATTRIBUTE_PRINTF_4;
+extern bool cpp_pedwarning_at (cpp_reader *, enum cpp_warning_reason,
+ rich_location *richloc, const char *msgid, ...)
+ ATTRIBUTE_PRINTF_4;
+
+/* Output a diagnostic with "MSGID: " preceding the
+ error string of errno. No location is printed. */
+extern bool cpp_errno (cpp_reader *, enum cpp_diagnostic_level,
+ const char *msgid);
+/* Similarly, but with "FILENAME: " instead of "MSGID: ", where
+ the filename is not localized. */
+extern bool cpp_errno_filename (cpp_reader *, enum cpp_diagnostic_level,
+ const char *filename, location_t loc);
+
+/* Same as cpp_error, except additionally specifies a position as a
+ (translation unit) physical line and physical column. If the line is
+ zero, then no location is printed. */
+extern bool cpp_error_with_line (cpp_reader *, enum cpp_diagnostic_level,
+ location_t, unsigned,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_5;
+extern bool cpp_warning_with_line (cpp_reader *, enum cpp_warning_reason,
+ location_t, unsigned,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_5;
+extern bool cpp_pedwarning_with_line (cpp_reader *, enum cpp_warning_reason,
+ location_t, unsigned,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_5;
+extern bool cpp_warning_with_line_syshdr (cpp_reader *, enum cpp_warning_reason,
+ location_t, unsigned,
+ const char *msgid, ...)
+ ATTRIBUTE_PRINTF_5;
+
+extern bool cpp_error_at (cpp_reader * pfile, enum cpp_diagnostic_level,
+ location_t src_loc, const char *msgid, ...)
+ ATTRIBUTE_PRINTF_4;
+
+extern bool cpp_error_at (cpp_reader * pfile, enum cpp_diagnostic_level,
+ rich_location *richloc, const char *msgid, ...)
+ ATTRIBUTE_PRINTF_4;
+
+/* In lex.cc */
+extern int cpp_ideq (const cpp_token *, const char *);
+extern void cpp_output_line (cpp_reader *, FILE *);
+extern unsigned char *cpp_output_line_to_string (cpp_reader *,
+ const unsigned char *);
+extern const unsigned char *cpp_alloc_token_string
+ (cpp_reader *, const unsigned char *, unsigned);
+extern void cpp_output_token (const cpp_token *, FILE *);
+extern const char *cpp_type2name (enum cpp_ttype, unsigned char flags);
+/* Returns the value of an escape sequence, truncated to the correct
+ target precision. PSTR points to the input pointer, which is just
+ after the backslash. LIMIT is how much text we have. WIDE is true
+ if the escape sequence is part of a wide character constant or
+ string literal. Handles all relevant diagnostics. */
+extern cppchar_t cpp_parse_escape (cpp_reader *, const unsigned char ** pstr,
+ const unsigned char *limit, int wide);
+
+/* Structure used to hold a comment block at a given location in the
+ source code. */
+
+typedef struct
+{
+ /* Text of the comment including the terminators. */
+ char *comment;
+
+ /* source location for the given comment. */
+ location_t sloc;
+} cpp_comment;
+
+/* Structure holding all comments for a given cpp_reader. */
+
+typedef struct
+{
+ /* table of comment entries. */
+ cpp_comment *entries;
+
+ /* number of actual entries entered in the table. */
+ int count;
+
+ /* number of entries allocated currently. */
+ int allocated;
+} cpp_comment_table;
+
+/* Returns the table of comments encountered by the preprocessor. This
+ table is only populated when pfile->state.save_comments is true. */
+extern cpp_comment_table *cpp_get_comments (cpp_reader *);
+
+/* In hash.c */
+
+/* Lookup an identifier in the hashtable. Puts the identifier in the
+ table if it is not already there. */
+extern cpp_hashnode *cpp_lookup (cpp_reader *, const unsigned char *,
+ unsigned int);
+
+typedef int (*cpp_cb) (cpp_reader *, cpp_hashnode *, void *);
+extern void cpp_forall_identifiers (cpp_reader *, cpp_cb, void *);
+
+/* In macro.cc */
+extern void cpp_scan_nooutput (cpp_reader *);
+extern int cpp_sys_macro_p (cpp_reader *);
+extern unsigned char *cpp_quote_string (unsigned char *, const unsigned char *,
+ unsigned int);
+extern bool cpp_compare_macros (const cpp_macro *macro1,
+ const cpp_macro *macro2);
+
+/* In files.cc */
+extern bool cpp_included (cpp_reader *, const char *);
+extern bool cpp_included_before (cpp_reader *, const char *, location_t);
+extern void cpp_make_system_header (cpp_reader *, int, int);
+extern bool cpp_push_include (cpp_reader *, const char *);
+extern bool cpp_push_default_include (cpp_reader *, const char *);
+extern void cpp_change_file (cpp_reader *, enum lc_reason, const char *);
+extern const char *cpp_get_path (struct _cpp_file *);
+extern cpp_dir *cpp_get_dir (struct _cpp_file *);
+extern cpp_buffer *cpp_get_buffer (cpp_reader *);
+extern struct _cpp_file *cpp_get_file (cpp_buffer *);
+extern cpp_buffer *cpp_get_prev (cpp_buffer *);
+extern void cpp_clear_file_cache (cpp_reader *);
+
+/* cpp_get_converted_source returns the contents of the given file, as it exists
+ after cpplib has read it and converted it from the input charset to the
+ source charset. Return struct will be zero-filled if the data could not be
+ read for any reason. The data starts at the DATA pointer, but the TO_FREE
+ pointer is what should be passed to free(), as there may be an offset. */
+struct cpp_converted_source
+{
+ char *to_free;
+ char *data;
+ size_t len;
+};
+cpp_converted_source cpp_get_converted_source (const char *fname,
+ const char *input_charset);
+
+/* In pch.cc */
+struct save_macro_data;
+extern int cpp_save_state (cpp_reader *, FILE *);
+extern int cpp_write_pch_deps (cpp_reader *, FILE *);
+extern int cpp_write_pch_state (cpp_reader *, FILE *);
+extern int cpp_valid_state (cpp_reader *, const char *, int);
+extern void cpp_prepare_state (cpp_reader *, struct save_macro_data **);
+extern int cpp_read_state (cpp_reader *, const char *, FILE *,
+ struct save_macro_data *);
+
+/* In lex.cc */
+extern void cpp_force_token_locations (cpp_reader *, location_t);
+extern void cpp_stop_forcing_token_locations (cpp_reader *);
+enum CPP_DO_task
+{
+ CPP_DO_print,
+ CPP_DO_location,
+ CPP_DO_token
+};
+
+extern void cpp_directive_only_process (cpp_reader *pfile,
+ void *data,
+ void (*cb) (cpp_reader *,
+ CPP_DO_task,
+ void *data, ...));
+
+/* In expr.cc */
+extern enum cpp_ttype cpp_userdef_string_remove_type
+ (enum cpp_ttype type);
+extern enum cpp_ttype cpp_userdef_string_add_type
+ (enum cpp_ttype type);
+extern enum cpp_ttype cpp_userdef_char_remove_type
+ (enum cpp_ttype type);
+extern enum cpp_ttype cpp_userdef_char_add_type
+ (enum cpp_ttype type);
+extern bool cpp_userdef_string_p
+ (enum cpp_ttype type);
+extern bool cpp_userdef_char_p
+ (enum cpp_ttype type);
+extern const char * cpp_get_userdef_suffix
+ (const cpp_token *);
+
+/* In charset.cc */
+
+/* The result of attempting to decode a run of UTF-8 bytes. */
+
+struct cpp_decoded_char
+{
+ const char *m_start_byte;
+ const char *m_next_byte;
+
+ bool m_valid_ch;
+ cppchar_t m_ch;
+};
+
+/* Information for mapping between code points and display columns.
+
+ This is a tabstop value, along with a callback for getting the
+ widths of characters. Normally this callback is cpp_wcwidth, but we
+ support other schemes for escaping non-ASCII unicode as a series of
+ ASCII chars when printing the user's source code in diagnostic-show-locus.cc
+
+ For example, consider:
+ - the Unicode character U+03C0 "GREEK SMALL LETTER PI" (UTF-8: 0xCF 0x80)
+ - the Unicode character U+1F642 "SLIGHTLY SMILING FACE"
+ (UTF-8: 0xF0 0x9F 0x99 0x82)
+ - the byte 0xBF (a stray trailing byte of a UTF-8 character)
+ Normally U+03C0 would occupy one display column, U+1F642
+ would occupy two display columns, and the stray byte would be
+ printed verbatim as one display column.
+
+ However when escaping them as unicode code points as "<U+03C0>"
+ and "<U+1F642>" they occupy 8 and 9 display columns respectively,
+ and when escaping them as bytes as "<CF><80>" and "<F0><9F><99><82>"
+ they occupy 8 and 16 display columns respectively. In both cases
+ the stray byte is escaped to <BF> as 4 display columns. */
+
+struct cpp_char_column_policy
+{
+ cpp_char_column_policy (int tabstop,
+ int (*width_cb) (cppchar_t c))
+ : m_tabstop (tabstop),
+ m_undecoded_byte_width (1),
+ m_width_cb (width_cb)
+ {}
+
+ int m_tabstop;
+ /* Width in display columns of a stray byte that isn't decodable
+ as UTF-8. */
+ int m_undecoded_byte_width;
+ int (*m_width_cb) (cppchar_t c);
+};
+
+/* A class to manage the state while converting a UTF-8 sequence to cppchar_t
+ and computing the display width one character at a time. */
+class cpp_display_width_computation {
+ public:
+ cpp_display_width_computation (const char *data, int data_length,
+ const cpp_char_column_policy &policy);
+ const char *next_byte () const { return m_next; }
+ int bytes_processed () const { return m_next - m_begin; }
+ int bytes_left () const { return m_bytes_left; }
+ bool done () const { return !bytes_left (); }
+ int display_cols_processed () const { return m_display_cols; }
+
+ int process_next_codepoint (cpp_decoded_char *out);
+ int advance_display_cols (int n);
+
+ private:
+ const char *const m_begin;
+ const char *m_next;
+ size_t m_bytes_left;
+ const cpp_char_column_policy &m_policy;
+ int m_display_cols;
+};
+
+/* Convenience functions that are simple use cases for class
+ cpp_display_width_computation. Tab characters will be expanded to spaces
+ as determined by POLICY.m_tabstop, and non-printable-ASCII characters
+ will be escaped as per POLICY. */
+
+int cpp_byte_column_to_display_column (const char *data, int data_length,
+ int column,
+ const cpp_char_column_policy &policy);
+inline int cpp_display_width (const char *data, int data_length,
+ const cpp_char_column_policy &policy)
+{
+ return cpp_byte_column_to_display_column (data, data_length, data_length,
+ policy);
+}
+int cpp_display_column_to_byte_column (const char *data, int data_length,
+ int display_col,
+ const cpp_char_column_policy &policy);
+int cpp_wcwidth (cppchar_t c);
+
+bool cpp_input_conversion_is_trivial (const char *input_charset);
+int cpp_check_utf8_bom (const char *data, size_t data_length);
+bool cpp_valid_utf8_p (const char *data, size_t num_bytes);
+
+#endif /* ! LIBCPP_CPPLIB_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cselib.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cselib.h
new file mode 100644
index 0000000..e2fa8e8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/cselib.h
@@ -0,0 +1,143 @@
+/* Common subexpression elimination for GNU compiler.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_CSELIB_H
+#define GCC_CSELIB_H
+
+/* Describe a value. */
+struct cselib_val
+{
+ /* The hash value. */
+ unsigned int hash;
+
+ /* A unique id assigned to values. */
+ int uid;
+
+ /* A VALUE rtx that points back to this structure. */
+ rtx val_rtx;
+
+ /* All rtl expressions that hold this value at the current time during a
+ scan. */
+ struct elt_loc_list *locs;
+
+ /* If this value is used as an address, points to a list of values that
+ use it as an address in a MEM. */
+ struct elt_list *addr_list;
+
+ struct cselib_val *next_containing_mem;
+};
+
+/* A list of rtl expressions that hold the same value. */
+struct elt_loc_list {
+ /* Next element in the list. */
+ struct elt_loc_list *next;
+ /* An rtl expression that holds the value. */
+ rtx loc;
+ /* The insn that made the equivalence. */
+ rtx_insn *setting_insn;
+};
+
+/* Describe a single set that is part of an insn. */
+struct cselib_set
+{
+ rtx src;
+ rtx dest;
+ cselib_val *src_elt;
+ cselib_val *dest_addr_elt;
+};
+
+enum cselib_record_what
+{
+ CSELIB_RECORD_MEMORY = 1,
+ CSELIB_PRESERVE_CONSTANTS = 2
+};
+
+extern void (*cselib_discard_hook) (cselib_val *);
+extern void (*cselib_record_sets_hook) (rtx_insn *insn, struct cselib_set *sets,
+ int n_sets);
+
+extern cselib_val *cselib_lookup (rtx, machine_mode,
+ int, machine_mode);
+extern cselib_val *cselib_lookup_from_insn (rtx, machine_mode,
+ int, machine_mode, rtx_insn *);
+extern void cselib_init (int);
+extern void cselib_clear_table (void);
+extern void cselib_finish (void);
+extern void cselib_process_insn (rtx_insn *);
+extern bool fp_setter_insn (rtx_insn *);
+extern machine_mode cselib_reg_set_mode (const_rtx);
+extern int rtx_equal_for_cselib_1 (rtx, rtx, machine_mode, int);
+extern bool cselib_redundant_set_p (rtx);
+extern int references_value_p (const_rtx, int);
+extern rtx cselib_expand_value_rtx (rtx, bitmap, int);
+typedef rtx (*cselib_expand_callback)(rtx, bitmap, int, void *);
+extern rtx cselib_expand_value_rtx_cb (rtx, bitmap, int,
+ cselib_expand_callback, void *);
+extern bool cselib_dummy_expand_value_rtx_cb (rtx, bitmap, int,
+ cselib_expand_callback, void *);
+extern rtx cselib_subst_to_values (rtx, machine_mode);
+extern rtx cselib_subst_to_values_from_insn (rtx, machine_mode, rtx_insn *);
+extern void cselib_invalidate_rtx (rtx);
+
+extern void cselib_reset_table (unsigned int);
+extern unsigned int cselib_get_next_uid (void);
+extern void cselib_preserve_value (cselib_val *);
+extern bool cselib_preserved_value_p (cselib_val *);
+extern void cselib_preserve_only_values (void);
+extern void cselib_preserve_cfa_base_value (cselib_val *, unsigned int);
+extern void cselib_add_permanent_equiv (cselib_val *, rtx, rtx_insn *);
+extern bool cselib_have_permanent_equivalences (void);
+extern void cselib_set_value_sp_based (cselib_val *);
+extern bool cselib_sp_based_value_p (cselib_val *);
+extern void cselib_record_sp_cfa_base_equiv (HOST_WIDE_INT, rtx_insn *);
+extern bool cselib_sp_derived_value_p (cselib_val *);
+
+extern void dump_cselib_table (FILE *);
+
+/* Return the canonical value for VAL, following the equivalence chain
+ towards the earliest (== lowest uid) equivalent value. */
+
+inline cselib_val *
+canonical_cselib_val (cselib_val *val)
+{
+ cselib_val *canon;
+
+ if (!val->locs || val->locs->next
+ || !val->locs->loc || GET_CODE (val->locs->loc) != VALUE
+ || val->uid < CSELIB_VAL_PTR (val->locs->loc)->uid)
+ return val;
+
+ canon = CSELIB_VAL_PTR (val->locs->loc);
+ gcc_checking_assert (canonical_cselib_val (canon) == canon);
+ return canon;
+}
+
+/* Return nonzero if we can prove that X and Y contain the same value, taking
+ our gathered information into account. */
+
+inline int
+rtx_equal_for_cselib_p (rtx x, rtx y)
+{
+ if (x == y)
+ return 1;
+
+ return rtx_equal_for_cselib_1 (x, y, VOIDmode, 0);
+}
+
+#endif /* GCC_CSELIB_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ctfc.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ctfc.h
new file mode 100644
index 0000000..bf1841a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ctfc.h
@@ -0,0 +1,450 @@
+/* ctfc.h - Declarations and definitions related to the CTF container.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file defines the data structures and functions used by the compiler
+ to generate the CTF debug info. The definitions below are compiler internal
+ representations and closely reflect the CTF format requirements in <ctf.h>.
+
+ The contents of the CTF container are used eventually for emission of both
+ CTF (ctfout.cc) and BTF debug info (btfout.cc), as the two type debug formats
+ are close cousins. */
+
+#ifndef GCC_CTFC_H
+#define GCC_CTFC_H 1
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "fold-const.h"
+#include "dwarf2ctf.h"
+#include "ctf.h"
+#include "btf.h"
+
+/* Invalid CTF type ID definition. */
+
+#define CTF_NULL_TYPEID 0
+
+/* Value to start generating the CTF type ID from. */
+
+#define CTF_INIT_TYPEID 1
+
+/* CTF type ID. */
+
+typedef uint64_t ctf_id_t;
+
+/* CTF string table element (list node). */
+
+typedef struct GTY ((chain_next ("%h.cts_next"))) ctf_string
+{
+ const char * cts_str; /* CTF string. */
+ struct ctf_string * cts_next; /* A list node. */
+} ctf_string_t;
+
+/* Internal representation of CTF string table. */
+
+typedef struct GTY (()) ctf_strtable
+{
+ ctf_string_t * ctstab_head; /* Head str ptr. */
+ ctf_string_t * ctstab_tail; /* Tail. new str appended to tail. */
+ int ctstab_num; /* Number of strings in the table. */
+ size_t ctstab_len; /* Size of string table in bytes. */
+ const char * ctstab_estr; /* Empty string "". */
+} ctf_strtable_t;
+
+/* Encoding information for integers, floating-point values etc. The flags
+ field will contain values appropriate for the type defined in <ctf.h>. */
+
+typedef struct GTY (()) ctf_encoding
+{
+ unsigned int cte_format; /* Data format (CTF_INT_* or CTF_FP_* flags). */
+ unsigned int cte_offset; /* Offset of value in bits. */
+ unsigned int cte_bits; /* Size of storage in bits. */
+} ctf_encoding_t;
+
+/* Array information for CTF generation. */
+
+typedef struct GTY (()) ctf_arinfo
+{
+ ctf_id_t ctr_contents; /* Type of array contents. */
+ ctf_id_t ctr_index; /* Type of array index. */
+ unsigned int ctr_nelems; /* Number of elements. */
+} ctf_arinfo_t;
+
+/* Function information for CTF generation. */
+
+typedef struct GTY (()) ctf_funcinfo
+{
+ ctf_id_t ctc_return; /* Function return type. */
+ unsigned int ctc_argc; /* Number of typed arguments to function. */
+ unsigned int ctc_flags; /* Function attributes (see below). */
+} ctf_funcinfo_t;
+
+typedef struct GTY (()) ctf_sliceinfo
+{
+ unsigned int cts_type; /* Reference CTF type. */
+ unsigned short cts_offset; /* Offset in bits of the first bit. */
+ unsigned short cts_bits; /* Size in bits. */
+} ctf_sliceinfo_t;
+
+/* CTF type representation internal to the compiler. It closely reflects the
+ ctf_type_t type node in <ctf.h> except the GTY (()) tags. */
+
+typedef struct GTY (()) ctf_itype
+{
+ uint32_t ctti_name; /* Reference to name in string table. */
+ uint32_t ctti_info; /* Encoded kind, variant length (see below). */
+ union GTY ((desc ("0")))
+ {
+ uint32_t GTY ((tag ("0"))) _size;/* Size of entire type in bytes. */
+ uint32_t GTY ((tag ("1"))) _type;/* Reference to another type. */
+ } _u;
+ uint32_t ctti_lsizehi; /* High 32 bits of type size in bytes. */
+ uint32_t ctti_lsizelo; /* Low 32 bits of type size in bytes. */
+} ctf_itype_t;
+
+#define ctti_size _u._size
+#define ctti_type _u._type
+
+/* Function arguments end with varargs. */
+
+#define CTF_FUNC_VARARG 0x1
+
+/* Struct/union/enum member definition for CTF generation. */
+
+typedef struct GTY ((chain_next ("%h.dmd_next"))) ctf_dmdef
+{
+ const char * dmd_name; /* Name of this member. */
+ ctf_id_t dmd_type; /* Type of this member (for sou). */
+ uint32_t dmd_name_offset; /* Offset of the name in str table. */
+ uint64_t dmd_offset; /* Offset of this member in bits (for sou). */
+ HOST_WIDE_INT dmd_value; /* Value of this member (for enum). */
+ struct ctf_dmdef * dmd_next; /* A list node. */
+} ctf_dmdef_t;
+
+#define ctf_dmd_list_next(elem) ((ctf_dmdef_t *)((elem)->dmd_next))
+
+/* Function Argument. */
+
+typedef struct GTY (()) ctf_func_arg
+{
+ ctf_id_t farg_type; /* Type identifier of the argument. */
+ const char * farg_name; /* Name of the argument. */
+ uint32_t farg_name_offset; /* Offset of the name in str table. */
+ struct ctf_func_arg * farg_next;/* A list node. */
+} ctf_func_arg_t;
+
+#define ctf_farg_list_next(elem) ((ctf_func_arg_t *)((elem)->farg_next))
+
+/* Type definition for CTF generation. */
+
+struct GTY ((for_user)) ctf_dtdef
+{
+ dw_die_ref dtd_key; /* Type key for hashing. */
+ const char * dtd_name; /* Name associated with definition (if any). */
+ ctf_id_t dtd_type; /* Type identifier for this definition. */
+ ctf_itype_t dtd_data; /* Type node. */
+ bool from_global_func; /* Whether this type was added from a global
+ function. */
+ uint32_t linkage; /* Used in function types. 0=local, 1=global. */
+ bool dtd_enum_unsigned; /* Enum signedness. */
+ union GTY ((desc ("ctf_dtu_d_union_selector (&%1)")))
+ {
+ /* struct, union, or enum. */
+ ctf_dmdef_t * GTY ((tag ("CTF_DTU_D_MEMBERS"))) dtu_members;
+ /* array. */
+ ctf_arinfo_t GTY ((tag ("CTF_DTU_D_ARRAY"))) dtu_arr;
+ /* integer or float. */
+ ctf_encoding_t GTY ((tag ("CTF_DTU_D_ENCODING"))) dtu_enc;
+ /* function. */
+ ctf_func_arg_t * GTY ((tag ("CTF_DTU_D_ARGUMENTS"))) dtu_argv;
+ /* slice. */
+ ctf_sliceinfo_t GTY ((tag ("CTF_DTU_D_SLICE"))) dtu_slice;
+ } dtd_u;
+};
+
+typedef struct ctf_dtdef ctf_dtdef_t;
+
+/* Variable definition for CTF generation. */
+
+struct GTY ((for_user)) ctf_dvdef
+{
+ dw_die_ref dvd_key; /* DWARF DIE corresponding to the variable. */
+ const char * dvd_name; /* Name associated with variable. */
+ uint32_t dvd_name_offset; /* Offset of the name in str table. */
+ unsigned int dvd_visibility; /* External visibility. 0=static,1=global. */
+ ctf_id_t dvd_type; /* Type of variable. */
+};
+
+typedef struct ctf_dvdef ctf_dvdef_t;
+
+typedef ctf_dvdef_t * ctf_dvdef_ref;
+typedef ctf_dtdef_t * ctf_dtdef_ref;
+
+/* Location information for CTF Types and CTF Variables. */
+
+typedef struct GTY (()) ctf_srcloc
+{
+ const char * ctsloc_file;
+ unsigned int ctsloc_line;
+ unsigned int ctsloc_col;
+} ctf_srcloc_t;
+
+typedef ctf_srcloc_t * ctf_srcloc_ref;
+
+/* Helper enum and api for the GTY machinery to work on union dtu_d. */
+
+enum ctf_dtu_d_union_enum {
+ CTF_DTU_D_MEMBERS,
+ CTF_DTU_D_ARRAY,
+ CTF_DTU_D_ENCODING,
+ CTF_DTU_D_ARGUMENTS,
+ CTF_DTU_D_SLICE
+};
+
+enum ctf_dtu_d_union_enum
+ctf_dtu_d_union_selector (ctf_dtdef_ref);
+
+struct ctfc_dtd_hasher : ggc_ptr_hash <ctf_dtdef_t>
+{
+ typedef ctf_dtdef_ref compare_type;
+
+ static hashval_t hash (ctf_dtdef_ref);
+ static bool equal (ctf_dtdef_ref, ctf_dtdef_ref);
+};
+
+inline hashval_t
+ctfc_dtd_hasher::hash (ctf_dtdef_ref dtd)
+{
+ return htab_hash_pointer (dtd->dtd_key);
+}
+
+inline bool
+ctfc_dtd_hasher::equal (ctf_dtdef_ref dtd, ctf_dtdef_ref dtd2)
+{
+ return (dtd->dtd_key == dtd2->dtd_key);
+}
+
+struct ctfc_dvd_hasher : ggc_ptr_hash <ctf_dvdef_t>
+{
+ typedef ctf_dvdef_ref compare_type;
+
+ static hashval_t hash (ctf_dvdef_ref);
+ static bool equal (ctf_dvdef_ref, ctf_dvdef_ref);
+};
+
+inline hashval_t
+ctfc_dvd_hasher::hash (ctf_dvdef_ref dvd)
+{
+ return htab_hash_pointer (dvd->dvd_key);
+}
+
+inline bool
+ctfc_dvd_hasher::equal (ctf_dvdef_ref dvd, ctf_dvdef_ref dvd2)
+{
+ return (dvd->dvd_key == dvd2->dvd_key);
+}
+
+/* CTF container structure.
+ It is the context passed around when generating ctf debug info. There is
+ one container per translation unit. */
+
+typedef struct GTY (()) ctf_container
+{
+ /* CTF Preamble. */
+ unsigned short ctfc_magic;
+ unsigned char ctfc_version;
+ unsigned char ctfc_flags;
+ uint32_t ctfc_cuname_offset;
+
+ /* CTF types. */
+ hash_table <ctfc_dtd_hasher> * GTY (()) ctfc_types;
+ /* CTF variables. */
+ hash_table <ctfc_dvd_hasher> * GTY (()) ctfc_vars;
+ /* CTF variables to be ignored. */
+ hash_table <ctfc_dvd_hasher> * GTY (()) ctfc_ignore_vars;
+
+ /* CTF string table. */
+ ctf_strtable_t ctfc_strtable;
+ /* Auxilliary string table. At this time, used for keeping func arg names
+ for BTF. */
+ ctf_strtable_t ctfc_aux_strtable;
+
+ uint64_t ctfc_num_types;
+ uint64_t ctfc_num_stypes;
+ uint64_t ctfc_num_global_funcs;
+ uint64_t ctfc_num_global_objts;
+
+ /* Number of vlen bytes - the variable length portion after ctf_type_t and
+ ctf_stype_t in the CTF section. This is used to calculate the offsets in
+ the CTF header. */
+ uint64_t ctfc_num_vlen_bytes;
+
+ /* Next CTF type id to assign. */
+ ctf_id_t ctfc_nextid;
+
+ /* Specify an explicit length of 0 so that the GC marking routines steer
+ clear of marking the CTF vars and CTF types twice. These lists below do
+ not own the pointed to objects, they simply hold references to them. */
+
+ /* List of pre-processed CTF Variables. CTF requires that the variables
+ appear in the sorted order of their names. */
+ ctf_dvdef_t ** GTY ((length ("0"))) ctfc_vars_list;
+ /* Count of pre-processed CTF Variables in the list. */
+ uint64_t ctfc_vars_list_count;
+ /* List of pre-processed CTF types. CTF requires that a shared type must
+ appear before the type that uses it. For the compiler, this means types
+ are emitted in sorted order of their type IDs. */
+ ctf_dtdef_t ** GTY ((length ("0"))) ctfc_types_list;
+ /* List of CTF function types for global functions. The order of global
+ function entries in the CTF funcinfo section is undefined by the
+ compiler. */
+ ctf_dtdef_t ** GTY ((length ("0"))) ctfc_gfuncs_list;
+ /* List of CTF variables at global scope. The order of global object entries
+ in the CTF objinfo section is undefined by the compiler. */
+ ctf_dvdef_t ** GTY ((length ("0"))) ctfc_gobjts_list;
+
+ /* Following members are for debugging only. They do not add functional
+ value to the task of CTF creation. These can be cleaned up once CTF
+ generation stabilizes. */
+
+ /* Keep a count of the number of bytes dumped in asm for debugging
+ purposes. */
+ uint64_t ctfc_numbytes_asm;
+ /* Total length of all strings in CTF. */
+ size_t ctfc_strlen;
+ /* Total length of all strings in aux string table. */
+ size_t ctfc_aux_strlen;
+
+} ctf_container_t;
+
+/* Markers for which string table from the CTF container to use. */
+
+#define CTF_STRTAB 0 /* CTF string table. */
+#define CTF_AUX_STRTAB 1 /* CTF auxilliary string table. */
+
+typedef ctf_container_t * ctf_container_ref;
+
+extern GTY (()) ctf_container_ref tu_ctfc;
+
+extern void ctfc_delete_container (ctf_container_ref);
+
+/* If the next ctf type id is still set to the init value, no ctf records to
+ report. */
+extern bool ctfc_is_empty_container (ctf_container_ref);
+
+/* Get the total number of CTF types in the container. */
+
+extern unsigned int ctfc_get_num_ctf_types (ctf_container_ref);
+
+/* Get the total number of CTF variables in the container. */
+
+extern unsigned int ctfc_get_num_ctf_vars (ctf_container_ref);
+
+/* Get reference to the CTF string table or the CTF auxilliary
+ string table. */
+
+extern ctf_strtable_t * ctfc_get_strtab (ctf_container_ref, int);
+
+/* Get the length of the specified string table in the CTF container. */
+
+extern size_t ctfc_get_strtab_len (ctf_container_ref, int);
+
+/* Get the number of bytes to represent the variable length portion of all CTF
+ types in the CTF container. */
+
+extern size_t ctfc_get_num_vlen_bytes (ctf_container_ref);
+
+/* The compiler demarcates whether types are visible at top-level scope or not.
+ The only example so far of a type not visible at top-level scope is slices.
+ CTF_ADD_NONROOT is used to indicate the latter. */
+#define CTF_ADD_NONROOT 0 /* CTF type only visible in nested scope. */
+#define CTF_ADD_ROOT 1 /* CTF type visible at top-level scope. */
+
+/* These APIs allow to initialize and finalize the CTF machinery and
+ to add types to the CTF container associated to the current
+ translation unit. Used in dwarf2ctf.cc. */
+
+extern void ctf_init (void);
+extern void ctf_output (const char * filename);
+extern void ctf_finalize (void);
+
+extern void btf_output (const char * filename);
+extern void btf_init_postprocess (void);
+extern void btf_finalize (void);
+
+extern ctf_container_ref ctf_get_tu_ctfc (void);
+
+extern bool ctf_type_exists (ctf_container_ref, dw_die_ref, ctf_id_t *);
+
+extern void ctf_add_cuname (ctf_container_ref, const char *);
+
+extern ctf_dtdef_ref ctf_dtd_lookup (const ctf_container_ref ctfc,
+ dw_die_ref die);
+extern ctf_dvdef_ref ctf_dvd_lookup (const ctf_container_ref ctfc,
+ dw_die_ref die);
+extern bool ctf_dvd_ignore_lookup (const ctf_container_ref ctfc,
+ dw_die_ref die);
+
+extern const char * ctf_add_string (ctf_container_ref, const char *,
+ uint32_t *, int);
+
+extern ctf_id_t ctf_add_reftype (ctf_container_ref, uint32_t, ctf_id_t,
+ uint32_t, dw_die_ref);
+extern ctf_id_t ctf_add_enum (ctf_container_ref, uint32_t, const char *,
+ HOST_WIDE_INT, bool, dw_die_ref);
+extern ctf_id_t ctf_add_slice (ctf_container_ref, uint32_t, ctf_id_t,
+ uint32_t, uint32_t, dw_die_ref);
+extern ctf_id_t ctf_add_float (ctf_container_ref, uint32_t, const char *,
+ const ctf_encoding_t *, dw_die_ref);
+extern ctf_id_t ctf_add_integer (ctf_container_ref, uint32_t, const char *,
+ const ctf_encoding_t *, dw_die_ref);
+extern ctf_id_t ctf_add_unknown (ctf_container_ref, uint32_t, const char *,
+ const ctf_encoding_t *, dw_die_ref);
+extern ctf_id_t ctf_add_pointer (ctf_container_ref, uint32_t, ctf_id_t,
+ dw_die_ref);
+extern ctf_id_t ctf_add_array (ctf_container_ref, uint32_t,
+ const ctf_arinfo_t *, dw_die_ref);
+extern ctf_id_t ctf_add_forward (ctf_container_ref, uint32_t, const char *,
+ uint32_t, dw_die_ref);
+extern ctf_id_t ctf_add_typedef (ctf_container_ref, uint32_t, const char *,
+ ctf_id_t, dw_die_ref);
+extern ctf_id_t ctf_add_function (ctf_container_ref, uint32_t, const char *,
+ const ctf_funcinfo_t *, dw_die_ref, bool, int);
+extern ctf_id_t ctf_add_sou (ctf_container_ref, uint32_t, const char *,
+ uint32_t, size_t, dw_die_ref);
+
+extern int ctf_add_enumerator (ctf_container_ref, ctf_id_t, const char *,
+ HOST_WIDE_INT, dw_die_ref);
+extern int ctf_add_member_offset (ctf_container_ref, dw_die_ref, const char *,
+ ctf_id_t, uint64_t);
+extern int ctf_add_function_arg (ctf_container_ref, dw_die_ref,
+ const char *, ctf_id_t);
+extern int ctf_add_variable (ctf_container_ref, const char *, ctf_id_t,
+ dw_die_ref, unsigned int, dw_die_ref);
+
+extern ctf_id_t ctf_lookup_tree_type (ctf_container_ref, const tree);
+extern ctf_id_t get_btf_id (ctf_id_t);
+
+/* CTF section does not emit location information; at this time, location
+ information is needed for BTF CO-RE use-cases. */
+
+extern int ctfc_get_dtd_srcloc (ctf_dtdef_ref, ctf_srcloc_ref);
+extern int ctfc_get_dvd_srcloc (ctf_dvdef_ref, ctf_srcloc_ref);
+
+#endif /* GCC_CTFC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/d/d-tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/d/d-tree.def
new file mode 100644
index 0000000..32e58df
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/d/d-tree.def
@@ -0,0 +1,29 @@
+/* d-tree.def -- Definitions and documentation for additional tree codes used
+ in the D compiler (see tree.def for standard codes).
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Logical shift done on an unsigned type. If the first operand is
+ signed, it will be converted to the unsigned equivalent. The second
+ operand is the number of bits to shift by; it need not be the same
+ type as the first operand and result. */
+DEFTREECODE (UNSIGNED_RSHIFT_EXPR, "unsigned_rshift_expr", tcc_binary, 2)
+
+/* Floating point modulus that expands to a call to fmod. */
+DEFTREECODE (FLOAT_MOD_EXPR, "float_mod_expr", tcc_binary, 2)
+
+/* Used to represent information associated with a function closure. */
+DEFTREECODE (FUNCFRAME_INFO, "funcframe_info", tcc_exceptional, 0)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/data-streamer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/data-streamer.h
new file mode 100644
index 0000000..d8c7e21
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/data-streamer.h
@@ -0,0 +1,349 @@
+/* Generic streaming support for various data types.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DATA_STREAMER_H
+#define GCC_DATA_STREAMER_H
+
+#include "lto-streamer.h"
+
+/* Data structures used to pack values and bitflags into a vector of
+ words. Used to stream values of a fixed number of bits in a space
+ efficient way. */
+static unsigned const BITS_PER_BITPACK_WORD = HOST_BITS_PER_WIDE_INT;
+
+typedef unsigned HOST_WIDE_INT bitpack_word_t;
+
+struct bitpack_d
+{
+ /* The position of the first unused or unconsumed bit in the word. */
+ unsigned pos;
+
+ /* The current word we are (un)packing. */
+ bitpack_word_t word;
+
+ /* The lto_output_stream or the lto_input_block we are streaming to/from. */
+ void *stream;
+};
+
+/* In data-streamer.cc */
+void bp_pack_var_len_unsigned (struct bitpack_d *, unsigned HOST_WIDE_INT);
+void bp_pack_var_len_int (struct bitpack_d *, HOST_WIDE_INT);
+unsigned HOST_WIDE_INT bp_unpack_var_len_unsigned (struct bitpack_d *);
+HOST_WIDE_INT bp_unpack_var_len_int (struct bitpack_d *);
+
+/* In data-streamer-out.cc */
+void streamer_write_zero (struct output_block *);
+void streamer_write_uhwi (struct output_block *, unsigned HOST_WIDE_INT);
+void streamer_write_hwi (struct output_block *, HOST_WIDE_INT);
+void streamer_write_poly_uint64 (struct output_block *, poly_uint64);
+void streamer_write_poly_int64 (struct output_block *, poly_int64);
+void streamer_write_gcov_count (struct output_block *, gcov_type);
+void streamer_write_string (struct output_block *, struct lto_output_stream *,
+ const char *, bool);
+void streamer_write_string_with_length (struct output_block *,
+ struct lto_output_stream *,
+ const char *, unsigned int, bool);
+void bp_pack_string_with_length (struct output_block *, struct bitpack_d *,
+ const char *, unsigned int, bool);
+void bp_pack_string (struct output_block *, struct bitpack_d *,
+ const char *, bool);
+void streamer_write_uhwi_stream (struct lto_output_stream *,
+ unsigned HOST_WIDE_INT);
+void streamer_write_hwi_stream (struct lto_output_stream *, HOST_WIDE_INT);
+void streamer_write_gcov_count_stream (struct lto_output_stream *, gcov_type);
+void streamer_write_data_stream (struct lto_output_stream *, const void *,
+ size_t);
+void streamer_write_wide_int (struct output_block *, const wide_int &);
+void streamer_write_widest_int (struct output_block *, const widest_int &);
+
+/* In data-streamer-in.cc */
+const char *streamer_read_string (class data_in *, class lto_input_block *);
+const char *streamer_read_indexed_string (class data_in *,
+ class lto_input_block *,
+ unsigned int *);
+const char *bp_unpack_indexed_string (class data_in *, struct bitpack_d *,
+ unsigned int *);
+const char *bp_unpack_string (class data_in *, struct bitpack_d *);
+unsigned HOST_WIDE_INT streamer_read_uhwi (class lto_input_block *);
+HOST_WIDE_INT streamer_read_hwi (class lto_input_block *);
+poly_uint64 streamer_read_poly_uint64 (class lto_input_block *);
+poly_int64 streamer_read_poly_int64 (class lto_input_block *);
+gcov_type streamer_read_gcov_count (class lto_input_block *);
+wide_int streamer_read_wide_int (class lto_input_block *);
+widest_int streamer_read_widest_int (class lto_input_block *);
+
+/* Returns a new bit-packing context for bit-packing into S. */
+inline struct bitpack_d
+bitpack_create (struct lto_output_stream *s)
+{
+ struct bitpack_d bp;
+ bp.pos = 0;
+ bp.word = 0;
+ bp.stream = (void *)s;
+ return bp;
+}
+
+/* Pack the NBITS bit sized value VAL into the bit-packing context BP. */
+inline void
+bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
+{
+ bitpack_word_t word = bp->word;
+ int pos = bp->pos;
+
+ /* Verify that VAL fits in the NBITS. */
+ gcc_checking_assert (nbits == BITS_PER_BITPACK_WORD
+ || !(val & ~(((bitpack_word_t)1<<nbits)-1)));
+
+ /* If val does not fit into the current bitpack word switch to the
+ next one. */
+ if (pos + nbits > BITS_PER_BITPACK_WORD)
+ {
+ streamer_write_uhwi_stream ((struct lto_output_stream *) bp->stream,
+ word);
+ word = val;
+ pos = nbits;
+ }
+ else
+ {
+ word |= val << pos;
+ pos += nbits;
+ }
+ bp->word = word;
+ bp->pos = pos;
+}
+
+/* Pack VAL into the bit-packing context BP, using NBITS for each
+ coefficient. */
+inline void
+bp_pack_poly_value (struct bitpack_d *bp,
+ const poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t> &val,
+ unsigned nbits)
+{
+ for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ bp_pack_value (bp, val.coeffs[i], nbits);
+}
+
+/* Finishes bit-packing of BP. */
+inline void
+streamer_write_bitpack (struct bitpack_d *bp)
+{
+ streamer_write_uhwi_stream ((struct lto_output_stream *) bp->stream,
+ bp->word);
+ bp->word = 0;
+ bp->pos = 0;
+}
+
+/* Returns a new bit-packing context for bit-unpacking from IB. */
+inline struct bitpack_d
+streamer_read_bitpack (class lto_input_block *ib)
+{
+ struct bitpack_d bp;
+ bp.word = streamer_read_uhwi (ib);
+ bp.pos = 0;
+ bp.stream = (void *)ib;
+ return bp;
+}
+
+/* Unpacks NBITS bits from the bit-packing context BP and returns them. */
+inline bitpack_word_t
+bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
+{
+ bitpack_word_t mask, val;
+ int pos = bp->pos;
+
+ mask = (nbits == BITS_PER_BITPACK_WORD
+ ? (bitpack_word_t) -1
+ : ((bitpack_word_t) 1 << nbits) - 1);
+
+ /* If there are not continuous nbits in the current bitpack word
+ switch to the next one. */
+ if (pos + nbits > BITS_PER_BITPACK_WORD)
+ {
+ bp->word = val
+ = streamer_read_uhwi ((class lto_input_block *)bp->stream);
+ bp->pos = nbits;
+ return val & mask;
+ }
+ val = bp->word;
+ val >>= pos;
+ bp->pos = pos + nbits;
+
+ return val & mask;
+}
+
+/* Unpacks a polynomial value from the bit-packing context BP in which each
+ coefficient has NBITS bits. */
+inline poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t>
+bp_unpack_poly_value (struct bitpack_d *bp, unsigned nbits)
+{
+ poly_int_pod<NUM_POLY_INT_COEFFS, bitpack_word_t> x;
+ for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ x.coeffs[i] = bp_unpack_value (bp, nbits);
+ return x;
+}
+
+
+/* Write a character to the output block. */
+
+inline void
+streamer_write_char_stream (struct lto_output_stream *obs, char c)
+{
+ /* No space left. */
+ if (obs->left_in_block == 0)
+ lto_append_block (obs);
+
+ /* Write the actual character. */
+ char *current_pointer = obs->current_pointer;
+ *(current_pointer++) = c;
+ obs->current_pointer = current_pointer;
+ obs->total_size++;
+ obs->left_in_block--;
+}
+
+
+/* Read byte from the input block. */
+
+inline unsigned char
+streamer_read_uchar (class lto_input_block *ib)
+{
+ if (ib->p >= ib->len)
+ lto_section_overrun (ib);
+ return (ib->data[ib->p++]);
+}
+
+/* Output VAL into OBS and verify it is in range MIN...MAX that is supposed
+ to be compile time constant.
+ Be host independent, limit range to 31bits. */
+
+inline void
+streamer_write_hwi_in_range (struct lto_output_stream *obs,
+ HOST_WIDE_INT min,
+ HOST_WIDE_INT max,
+ HOST_WIDE_INT val)
+{
+ HOST_WIDE_INT range = max - min;
+
+ gcc_checking_assert (val >= min && val <= max && range > 0
+ && range < 0x7fffffff);
+
+ val -= min;
+ streamer_write_uhwi_stream (obs, (unsigned HOST_WIDE_INT) val);
+}
+
+/* Input VAL into OBS and verify it is in range MIN...MAX that is supposed
+ to be compile time constant. PURPOSE is used for error reporting. */
+
+inline HOST_WIDE_INT
+streamer_read_hwi_in_range (class lto_input_block *ib,
+ const char *purpose,
+ HOST_WIDE_INT min,
+ HOST_WIDE_INT max)
+{
+ HOST_WIDE_INT range = max - min;
+ unsigned HOST_WIDE_INT uval = streamer_read_uhwi (ib);
+
+ gcc_checking_assert (range > 0 && range < 0x7fffffff);
+
+ HOST_WIDE_INT val = (HOST_WIDE_INT) (uval + (unsigned HOST_WIDE_INT) min);
+ if (val < min || val > max)
+ lto_value_range_error (purpose, val, min, max);
+ return val;
+}
+
+/* Output VAL into BP and verify it is in range MIN...MAX that is supposed
+ to be compile time constant.
+ Be host independent, limit range to 31bits. */
+
+inline void
+bp_pack_int_in_range (struct bitpack_d *bp,
+ HOST_WIDE_INT min,
+ HOST_WIDE_INT max,
+ HOST_WIDE_INT val)
+{
+ HOST_WIDE_INT range = max - min;
+ int nbits = floor_log2 (range) + 1;
+
+ gcc_checking_assert (val >= min && val <= max && range > 0
+ && range < 0x7fffffff);
+
+ val -= min;
+ bp_pack_value (bp, val, nbits);
+}
+
+/* Input VAL into BP and verify it is in range MIN...MAX that is supposed
+ to be compile time constant. PURPOSE is used for error reporting. */
+
+inline HOST_WIDE_INT
+bp_unpack_int_in_range (struct bitpack_d *bp,
+ const char *purpose,
+ HOST_WIDE_INT min,
+ HOST_WIDE_INT max)
+{
+ HOST_WIDE_INT range = max - min;
+ int nbits = floor_log2 (range) + 1;
+ HOST_WIDE_INT val = bp_unpack_value (bp, nbits);
+
+ gcc_checking_assert (range > 0 && range < 0x7fffffff);
+
+ if (val < min || val > max)
+ lto_value_range_error (purpose, val, min, max);
+ return val;
+}
+
+/* Output VAL of type "enum enum_name" into OBS.
+ Assume range 0...ENUM_LAST - 1. */
+#define streamer_write_enum(obs,enum_name,enum_last,val) \
+ streamer_write_hwi_in_range ((obs), 0, (int)(enum_last) - 1, (int)(val))
+
+/* Input enum of type "enum enum_name" from IB.
+ Assume range 0...ENUM_LAST - 1. */
+#define streamer_read_enum(ib,enum_name,enum_last) \
+ (enum enum_name)streamer_read_hwi_in_range ((ib), #enum_name, 0, \
+ (int)(enum_last) - 1)
+
+/* Output VAL of type "enum enum_name" into BP.
+ Assume range 0...ENUM_LAST - 1. */
+#define bp_pack_enum(bp,enum_name,enum_last,val) \
+ bp_pack_int_in_range ((bp), 0, (int)(enum_last) - 1, (int)(val))
+
+/* Input enum of type "enum enum_name" from BP.
+ Assume range 0...ENUM_LAST - 1. */
+#define bp_unpack_enum(bp,enum_name,enum_last) \
+ (enum enum_name)bp_unpack_int_in_range ((bp), #enum_name, 0, \
+ (int)(enum_last) - 1)
+
+/* Output the start of a record with TAG to output block OB. */
+
+inline void
+streamer_write_record_start (struct output_block *ob, enum LTO_tags tag)
+{
+ streamer_write_enum (ob->main_stream, LTO_tags, LTO_NUM_TAGS, tag);
+}
+
+/* Return the next tag in the input block IB. */
+
+inline enum LTO_tags
+streamer_read_record_start (class lto_input_block *ib)
+{
+ return streamer_read_enum (ib, LTO_tags, LTO_NUM_TAGS);
+}
+
+#endif /* GCC_DATA_STREAMER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.def
new file mode 100644
index 0000000..9e2f1d8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.def
@@ -0,0 +1,217 @@
+/* This file contains the list of the debug counter for GCC.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* A debug counter provides you a way to count an event
+ and return false after the counter has exceeded the threshold
+ specified by the option.
+
+ What is it used for ?
+
+ This is primarily used to speed up the search for the bad transformation
+ an optimization pass does. By doing a binary search on N,
+ you can quickly narrow down to one transformation
+ which is bad, or which triggers the bad behavior downstream
+ (usually in the form of the badly generated code).
+
+ How does it work ?
+
+ Every time dbg_cnt(named-counter) is called,
+ the counter is incremented for the named-counter.
+ And the incremented value is compared against the threshold (limit)
+ specified by the option.
+ dbg_cnt () returns true if it is at or below threshold, and false if above.
+
+ How to add a new one ?
+
+ To add a new counter, simply add an entry below with some descriptive name,
+ and add call(s) to dbg_cnt(your-counter-name) in appropriate places.
+ Usually, you want to control at the finest granularity
+ any particular transformation can happen.
+ e.g. for each instruction in a dead code elimination,
+ or for each copy instruction in register coalescing,
+ or constant-propagation for each insn,
+ or a block straightening, etc.
+ See dce.cc for an example. With the dbg_cnt () call in dce.cc,
+ now a developer can use -fdbg-cnt=dce:N
+ to stop doing the dead code elimination after N times.
+
+ How to use it ?
+
+ By default, all limits are UINT_MAX.
+ Since debug count is unsigned int, <= UINT_MAX returns true always.
+ i.e. dbg_cnt() returns true always regardless of the counter value
+ (although it still counts the event).
+ Use -fdbg-cnt=counter1:N,counter2:M,...
+ which sets the limit for counter1 to N, and the limit for counter2 to M, etc.
+ e.g. setting a limit to zero will make dbg_cnt () return false *always*.
+
+ The following shell file can then be used to binary search for
+ exact transformation that causes the bug. A second shell script
+ should be written, say "tryTest", which exits with 1 if the
+ compiled program fails and exits with 0 if the program succeeds.
+ This shell script should take 1 parameter, the value to be passed
+ to set the counter of the compilation command in tryTest. Then,
+ assuming that the following script is called binarySearch,
+ the command:
+
+ binarySearch tryTest
+
+ will automatically find the highest value of the counter for which
+ the program fails. If tryTest never fails, binarySearch will
+ produce unpredictable results as it will try to find an upper bound
+ that does not exist.
+
+ When dbgcnt does hits the limit, it writes a comment in the current
+ dump_file of the form:
+
+ ***dbgcnt: limit reached for %s.***
+
+ Assuming that the dump file is logging the analysis/transformations
+ it is making, this pinpoints the exact position in the log file
+ where the problem transformation is being logged.
+
+=====================================
+#!/bin/bash
+
+while getopts "l:u:i:" opt
+do
+ case $opt in
+ l) lb="$OPTARG";;
+ u) ub="$OPTARG";;
+ i) init="$OPTARG";;
+ ?) usage; exit 3;;
+ esac
+done
+
+shift $(($OPTIND - 1))
+echo $@
+cmd=${1+"${@}"}
+
+lb=${lb:=0}
+init=${init:=100}
+
+$cmd $lb
+lb_val=$?
+if [ -z "$ub" ]; then
+ # find the upper bound
+ ub=$(($init + $lb))
+ true
+ while [ $? -eq $lb_val ]; do
+ ub=$(($ub * 10))
+ #ub=`expr $ub \* 10`
+ $cmd $ub
+ done
+fi
+
+echo command: $cmd
+
+true
+while [ `expr $ub - $lb` -gt 1 ]; do
+ try=$(($lb + ( $ub - $lb ) / 2))
+ $cmd $try
+ if [ $? -eq $lb_val ]; then
+ lb=$try
+ else
+ ub=$try
+ fi
+done
+
+echo lbound: $lb
+echo ubound: $ub
+
+=====================================
+
+*/
+
+/* Debug counter definitions.
+ Please keep the list sorted in alphabetic order. */
+DEBUG_COUNTER (asan_use_after_scope)
+DEBUG_COUNTER (auto_inc_dec)
+DEBUG_COUNTER (back_thread1)
+DEBUG_COUNTER (back_thread2)
+DEBUG_COUNTER (back_threadfull1)
+DEBUG_COUNTER (back_threadfull2)
+DEBUG_COUNTER (ccp)
+DEBUG_COUNTER (cfg_cleanup)
+DEBUG_COUNTER (cprop)
+DEBUG_COUNTER (cse2_move2add)
+DEBUG_COUNTER (dce)
+DEBUG_COUNTER (dce_fast)
+DEBUG_COUNTER (dce_ud)
+DEBUG_COUNTER (delete_trivial_dead)
+DEBUG_COUNTER (devirt)
+DEBUG_COUNTER (df_byte_scan)
+DEBUG_COUNTER (dom_unreachable_edges)
+DEBUG_COUNTER (dse)
+DEBUG_COUNTER (dse1)
+DEBUG_COUNTER (dse2)
+DEBUG_COUNTER (gcse2_delete)
+DEBUG_COUNTER (gimple_unroll)
+DEBUG_COUNTER (global_alloc_at_func)
+DEBUG_COUNTER (global_alloc_at_reg)
+DEBUG_COUNTER (graphite_scop)
+DEBUG_COUNTER (hoist)
+DEBUG_COUNTER (hoist_insn)
+DEBUG_COUNTER (ia64_sched2)
+DEBUG_COUNTER (if_after_combine)
+DEBUG_COUNTER (if_after_reload)
+DEBUG_COUNTER (if_conversion)
+DEBUG_COUNTER (if_conversion_tree)
+DEBUG_COUNTER (if_to_switch)
+DEBUG_COUNTER (ipa_attr)
+DEBUG_COUNTER (ipa_cp_bits)
+DEBUG_COUNTER (ipa_cp_values)
+DEBUG_COUNTER (ipa_cp_vr)
+DEBUG_COUNTER (ipa_mod_ref)
+DEBUG_COUNTER (ipa_mod_ref_pta)
+DEBUG_COUNTER (ipa_sra_params)
+DEBUG_COUNTER (ipa_sra_retvalues)
+DEBUG_COUNTER (ira_move)
+DEBUG_COUNTER (ivopts_loop)
+DEBUG_COUNTER (lim)
+DEBUG_COUNTER (local_alloc_for_sched)
+DEBUG_COUNTER (loop_unswitch)
+DEBUG_COUNTER (match)
+DEBUG_COUNTER (merged_ipa_icf)
+DEBUG_COUNTER (phiopt_edge_range)
+DEBUG_COUNTER (postreload_cse)
+DEBUG_COUNTER (pre)
+DEBUG_COUNTER (pre_insn)
+DEBUG_COUNTER (prefetch)
+DEBUG_COUNTER (registered_jump_thread)
+DEBUG_COUNTER (sched2_func)
+DEBUG_COUNTER (sched_block)
+DEBUG_COUNTER (sched_breakdep)
+DEBUG_COUNTER (sched_func)
+DEBUG_COUNTER (sched_insn)
+DEBUG_COUNTER (sched_region)
+DEBUG_COUNTER (sel_sched_cnt)
+DEBUG_COUNTER (sel_sched_insn_cnt)
+DEBUG_COUNTER (sel_sched_region_cnt)
+DEBUG_COUNTER (sms_sched_loop)
+DEBUG_COUNTER (split_for_sched2)
+DEBUG_COUNTER (store_merging)
+DEBUG_COUNTER (store_motion)
+DEBUG_COUNTER (stv_conversion)
+DEBUG_COUNTER (tail_call)
+DEBUG_COUNTER (tree_sra)
+DEBUG_COUNTER (treepre_insert)
+DEBUG_COUNTER (vect_loop)
+DEBUG_COUNTER (vect_slp)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.h
new file mode 100644
index 0000000..3ff43fc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dbgcnt.h
@@ -0,0 +1,40 @@
+/* Debug counter for debugging support
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>.
+
+See dbgcnt.def for usage information. */
+
+#ifndef GCC_DBGCNT_H
+#define GCC_DBGCNT_H
+
+#define DEBUG_COUNTER(a) a,
+
+enum debug_counter {
+#include "dbgcnt.def"
+ debug_counter_number_of_counters
+};
+
+#undef DEBUG_COUNTER
+
+extern bool dbg_cnt_is_enabled (enum debug_counter index);
+extern bool dbg_cnt (enum debug_counter index);
+extern unsigned dbg_cnt_counter (enum debug_counter index);
+extern void dbg_cnt_process_opt (const char *arg);
+extern void dbg_cnt_list_all_counters (void);
+
+#endif /* GCC_DBGCNT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dce.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dce.h
new file mode 100644
index 0000000..2f1602a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dce.h
@@ -0,0 +1,27 @@
+/* RTL dead code elimination.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DCE_H
+#define GCC_DCE_H
+
+extern void run_word_dce (void);
+extern void run_fast_dce (void);
+extern void run_fast_df_dce (void);
+
+#endif /* GCC_DCE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ddg.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ddg.h
new file mode 100644
index 0000000..0583fa1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ddg.h
@@ -0,0 +1,182 @@
+/* DDG - Data Dependence Graph - interface.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DDG_H
+#define GCC_DDG_H
+
+/* For sbitmap. */
+
+typedef struct ddg_node *ddg_node_ptr;
+typedef struct ddg_edge *ddg_edge_ptr;
+typedef struct ddg *ddg_ptr;
+typedef struct ddg_scc *ddg_scc_ptr;
+typedef struct ddg_all_sccs *ddg_all_sccs_ptr;
+
+enum dep_type {TRUE_DEP, OUTPUT_DEP, ANTI_DEP};
+enum dep_data_type {REG_OR_MEM_DEP, REG_DEP, MEM_DEP, REG_AND_MEM_DEP};
+
+/* The following two macros enables direct access to the successors and
+ predecessors bitmaps held in each ddg_node. Do not make changes to
+ these bitmaps, unless you want to change the DDG. */
+#define NODE_SUCCESSORS(x) ((x)->successors)
+#define NODE_PREDECESSORS(x) ((x)->predecessors)
+
+/* A structure that represents a node in the DDG. */
+struct ddg_node
+{
+ /* Each node has a unique CUID index. These indices increase monotonically
+ (according to the order of the corresponding INSN in the BB), starting
+ from 0 with no gaps. */
+ int cuid;
+
+ /* The insn represented by the node. */
+ rtx_insn *insn;
+
+ /* A note preceding INSN (or INSN itself), such that all insns linked
+ from FIRST_NOTE until INSN (inclusive of both) are moved together
+ when reordering the insns. This takes care of notes that should
+ continue to precede INSN. */
+ rtx_insn *first_note;
+
+ /* Incoming and outgoing dependency edges. */
+ ddg_edge_ptr in;
+ ddg_edge_ptr out;
+
+ /* Each bit corresponds to a ddg_node according to its cuid, and is
+ set iff the node is a successor/predecessor of "this" node. */
+ sbitmap successors;
+ sbitmap predecessors;
+
+ /* Temporary array used for Floyd-Warshall algorithm to find
+ scc recurrence length. */
+ int *max_dist;
+
+ /* For general use by algorithms manipulating the ddg. */
+ union {
+ int count;
+ void *info;
+ } aux;
+};
+
+/* A structure that represents an edge in the DDG. */
+struct ddg_edge
+{
+ /* The source and destination nodes of the dependency edge. */
+ ddg_node_ptr src;
+ ddg_node_ptr dest;
+
+ /* TRUE, OUTPUT or ANTI dependency. */
+ dep_type type;
+
+ /* REG or MEM dependency. */
+ dep_data_type data_type;
+
+ /* Latency of the dependency. */
+ int latency;
+
+ /* The distance: number of loop iterations the dependency crosses. */
+ int distance;
+
+ /* The following two fields are used to form a linked list of the in/out
+ going edges to/from each node. */
+ ddg_edge_ptr next_in;
+ ddg_edge_ptr next_out;
+
+ /* Is true when edge is already in scc. */
+ bool in_scc;
+};
+
+/* This structure holds the Data Dependence Graph for a basic block. */
+struct ddg
+{
+ /* The basic block for which this DDG is built. */
+ basic_block bb;
+
+ /* Number of instructions in the basic block. */
+ int num_nodes;
+
+ /* Number of load/store instructions in the BB - statistics. */
+ int num_loads;
+ int num_stores;
+
+ /* This array holds the nodes in the graph; it is indexed by the node
+ cuid, which follows the order of the instructions in the BB. */
+ ddg_node_ptr nodes;
+
+ /* The branch closing the loop. */
+ ddg_node_ptr closing_branch;
+
+ /* Build dependence edges for closing_branch, when set. In certain cases,
+ the closing branch can be dealt with separately from the insns of the
+ loop, and then no such deps are needed. */
+ int closing_branch_deps;
+
+ /* Array and number of backarcs (edges with distance > 0) in the DDG. */
+ int num_backarcs;
+ ddg_edge_ptr *backarcs;
+};
+
+
+/* Holds information on an SCC (Strongly Connected Component) of the DDG. */
+struct ddg_scc
+{
+ /* A bitmap that represents the nodes of the DDG that are in the SCC. */
+ sbitmap nodes;
+
+ /* Array and number of backarcs (edges with distance > 0) in the SCC. */
+ ddg_edge_ptr *backarcs;
+ int num_backarcs;
+
+ /* The maximum of (total_latency/total_distance) over all cycles in SCC. */
+ int recurrence_length;
+};
+
+/* This structure holds the SCCs of the DDG. */
+struct ddg_all_sccs
+{
+ /* Array that holds the SCCs in the DDG, and their number. */
+ ddg_scc_ptr *sccs;
+ int num_sccs;
+
+ ddg_ptr ddg;
+};
+
+
+ddg_ptr create_ddg (basic_block, int closing_branch_deps);
+void free_ddg (ddg_ptr);
+
+void print_ddg (FILE *, ddg_ptr);
+void vcg_print_ddg (FILE *, ddg_ptr);
+void print_ddg_edge (FILE *, ddg_edge_ptr);
+void print_sccs (FILE *, ddg_all_sccs_ptr, ddg_ptr);
+
+ddg_node_ptr get_node_of_insn (ddg_ptr, rtx_insn *);
+
+void find_successors (sbitmap result, ddg_ptr, sbitmap);
+void find_predecessors (sbitmap result, ddg_ptr, sbitmap);
+
+ddg_all_sccs_ptr create_ddg_all_sccs (ddg_ptr);
+void free_ddg_all_sccs (ddg_all_sccs_ptr);
+
+int find_nodes_on_paths (sbitmap result, ddg_ptr, sbitmap from, sbitmap to);
+
+bool autoinc_var_is_used_p (rtx_insn *, rtx_insn *);
+
+#endif /* GCC_DDG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/debug.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/debug.h
new file mode 100644
index 0000000..05512bc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/debug.h
@@ -0,0 +1,281 @@
+/* Debug hooks for GCC.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DEBUG_H
+#define GCC_DEBUG_H
+
+/* This structure contains hooks for the debug information output
+ functions, accessed through the global instance debug_hooks set in
+ toplev.cc according to command line options. */
+/* WARNING: Do not add new debug hook targets - DWARF will be the only
+ way to speak debug to the middle-end once we are able to get rid of
+ the remaining targets. If you need alternate output formats instead
+ generate them off the DWARF representation. */
+struct gcc_debug_hooks
+{
+ /* Initialize debug output. MAIN_FILENAME is the name of the main
+ input file. */
+ void (* init) (const char *main_filename);
+
+ /* Output debug symbols. */
+ void (* finish) (const char *main_filename);
+
+ /* Run cleanups necessary after early debug generation. */
+ void (* early_finish) (const char *main_filename);
+
+ /* Called from cgraph_optimize before starting to assemble
+ functions/variables/toplevel asms. */
+ void (* assembly_start) (void);
+
+ /* Macro defined on line LINE with name and expansion TEXT. */
+ void (* define) (unsigned int line, const char *text);
+
+ /* MACRO undefined on line LINE. */
+ void (* undef) (unsigned int line, const char *macro);
+
+ /* Record the beginning of a new source file FILE from LINE number
+ in the previous one. */
+ void (* start_source_file) (unsigned int line, const char *file);
+
+ /* Record the resumption of a source file. LINE is the line number
+ in the source file we are returning to. */
+ void (* end_source_file) (unsigned int line);
+
+ /* Record the beginning of block N, counting from 1 and not
+ including the function-scope block, at LINE. */
+ void (* begin_block) (unsigned int line, unsigned int n);
+
+ /* Record the end of a block. Arguments as for begin_block. */
+ void (* end_block) (unsigned int line, unsigned int n);
+
+ /* Returns nonzero if it is appropriate not to emit any debugging
+ information for BLOCK, because it doesn't contain any
+ instructions. This may not be the case for blocks containing
+ nested functions, since we may actually call such a function even
+ though the BLOCK information is messed up. Defaults to true. */
+ bool (* ignore_block) (const_tree);
+
+ /* Record a source file location at (FILE, LINE, COLUMN, DISCRIMINATOR). */
+ void (* source_line) (unsigned int line, unsigned int column,
+ const char *file, int discriminator, bool is_stmt);
+
+ /* Record a source file location for a DECL_IGNORED_P function. */
+ void (* set_ignored_loc) (unsigned int line, unsigned int column,
+ const char *file);
+
+ /* Called at start of prologue code. LINE is the first line in the
+ function. */
+ void (* begin_prologue) (unsigned int line, unsigned int column,
+ const char *file);
+
+ /* Called at end of prologue code. LINE is the first line in the
+ function. */
+ void (* end_prologue) (unsigned int line, const char *file);
+
+ /* Called at beginning of epilogue code. */
+ void (* begin_epilogue) (unsigned int line, const char *file);
+
+ /* Record end of epilogue code. */
+ void (* end_epilogue) (unsigned int line, const char *file);
+
+ /* Called at start of function DECL, before it is declared. */
+ void (* begin_function) (tree decl);
+
+ /* Record end of function. LINE is highest line number in function. */
+ void (* end_function) (unsigned int line);
+
+ /* Register UNIT as the main translation unit. Called from front-ends when
+ they create their main translation unit. */
+ void (* register_main_translation_unit) (tree);
+
+ /* Debug information for a function DECL. This might include the
+ function name (a symbol), its parameters, and the block that
+ makes up the function's body, and the local variables of the
+ function.
+
+ This is only called for FUNCTION_DECLs. It is part of the late
+ debug pass and is called from rest_of_handle_final.
+
+ Location information is available at this point.
+
+ See the documentation for early_global_decl and late_global_decl
+ for other entry points into the debugging back-ends for DECLs. */
+ void (* function_decl) (tree decl);
+
+ /* Debug information for a global DECL. Called from the parser
+ after the parsing process has finished.
+
+ This gets called for both variables and functions.
+
+ Location information is not available at this point, but it is a
+ good probe point to get access to symbols before they get
+ optimized away.
+
+ This hook may be called on VAR_DECLs or FUNCTION_DECLs. It is up
+ to the hook to use what it needs. */
+ void (* early_global_decl) (tree decl);
+
+ /* Augment debug information generated by early_global_decl with
+ more complete debug info (if applicable). Called from toplev.cc
+ after the compilation proper has finished and cgraph information
+ is available.
+
+ This gets called for both variables and functions.
+
+ Location information is usually available at this point, unless
+ the hook is being called for a decl that has been optimized away.
+
+ This hook may be called on VAR_DECLs or FUNCTION_DECLs. It is up
+ to the hook to use what it needs. */
+ void (* late_global_decl) (tree decl);
+
+ /* Debug information for a type DECL. Called from toplev.cc after
+ compilation proper, also from various language front ends to
+ record built-in types. The second argument is properly a
+ boolean, which indicates whether or not the type is a "local"
+ type as determined by the language. (It's not a boolean for
+ legacy reasons.) */
+ void (* type_decl) (tree decl, int local);
+
+ /* Debug information for imported modules and declarations. */
+ void (* imported_module_or_decl) (tree decl, tree name,
+ tree context, bool child,
+ bool implicit);
+
+ /* Return true if a DIE for the tree is available and return a symbol
+ and offset that can be used to refer to it externally. */
+ bool (* die_ref_for_decl) (tree, const char **, unsigned HOST_WIDE_INT *);
+
+ /* Early debug information for the tree is available at symbol plus
+ offset externally. */
+ void (* register_external_die) (tree, const char *, unsigned HOST_WIDE_INT);
+
+ /* DECL is an inline function, whose body is present, but which is
+ not being output at this point. */
+ void (* deferred_inline_function) (tree decl);
+
+ /* DECL is an inline function which is about to be emitted out of
+ line. The hook is useful to, e.g., emit abstract debug info for
+ the inline before it gets mangled by optimization. */
+ void (* outlining_inline_function) (tree decl);
+
+ /* Called from final_scan_insn for any CODE_LABEL insn whose
+ LABEL_NAME is non-null. */
+ void (* label) (rtx_code_label *);
+
+ /* Called after the start and before the end of writing a PCH file.
+ The parameter is 0 if after the start, 1 if before the end. */
+ void (* handle_pch) (unsigned int);
+
+ /* Called from final_scan_insn for any NOTE_INSN_VAR_LOCATION note. */
+ void (* var_location) (rtx_insn *);
+
+ /* Called from final_scan_insn for any NOTE_INSN_INLINE_ENTRY note. */
+ void (* inline_entry) (tree block);
+
+ /* Called from finalize_size_functions for size functions so that their body
+ can be encoded in the debug info to describe the layout of variable-length
+ structures. */
+ void (* size_function) (tree decl);
+
+ /* Called from final_scan_insn if there is a switch between hot and cold
+ text sections. */
+ void (* switch_text_section) (void);
+
+ /* Called from grokdeclarator. Replaces the anonymous name with the
+ type name. */
+ void (* set_name) (tree, tree);
+
+ /* This is 1 if the debug writer wants to see start and end commands for the
+ main source files, and 0 otherwise. */
+ int start_end_main_source_file;
+
+ /* The type of symtab field used by these debug hooks. This is one
+ of the TYPE_SYMTAB_IS_xxx values defined in tree.h. */
+ int tree_type_symtab_field;
+};
+
+extern const struct gcc_debug_hooks *debug_hooks;
+
+/* The do-nothing hooks. */
+extern void debug_nothing_void (void);
+extern void debug_nothing_charstar (const char *);
+extern void debug_nothing_int_int_charstar (unsigned int, unsigned int,
+ const char *);
+extern void debug_nothing_int_charstar (unsigned int, const char *);
+extern void debug_nothing_int_int_charstar_int_bool (unsigned int,
+ unsigned int,
+ const char *,
+ int, bool);
+extern void debug_nothing_int (unsigned int);
+extern void debug_nothing_int_int (unsigned int, unsigned int);
+extern void debug_nothing_tree (tree);
+extern void debug_nothing_tree_tree (tree, tree);
+extern void debug_nothing_tree_int (tree, int);
+extern void debug_nothing_tree_tree_tree_bool_bool (tree, tree, tree,
+ bool, bool);
+extern bool debug_true_const_tree (const_tree);
+extern void debug_nothing_rtx_insn (rtx_insn *);
+extern void debug_nothing_rtx_code_label (rtx_code_label *);
+extern bool debug_false_tree_charstarstar_uhwistar (tree, const char **,
+ unsigned HOST_WIDE_INT *);
+extern void debug_nothing_tree_charstar_uhwi (tree, const char *,
+ unsigned HOST_WIDE_INT);
+
+/* Hooks for various debug formats. */
+extern const struct gcc_debug_hooks do_nothing_debug_hooks;
+extern const struct gcc_debug_hooks xcoff_debug_hooks;
+extern const struct gcc_debug_hooks dwarf2_debug_hooks;
+extern const struct gcc_debug_hooks dwarf2_lineno_debug_hooks;
+extern const struct gcc_debug_hooks vmsdbg_debug_hooks;
+
+/* Dwarf2 frame information. */
+
+extern void dwarf2out_begin_prologue (unsigned int, unsigned int,
+ const char *);
+extern void dwarf2out_vms_end_prologue (unsigned int, const char *);
+extern void dwarf2out_vms_begin_epilogue (unsigned int, const char *);
+extern void dwarf2out_end_epilogue (unsigned int, const char *);
+extern void dwarf2out_frame_finish (void);
+extern bool dwarf2out_do_eh_frame (void);
+extern bool dwarf2out_do_frame (void);
+extern bool dwarf2out_do_cfi_asm (void);
+extern void dwarf2out_switch_text_section (void);
+extern bool dwarf2out_default_as_loc_support (void);
+extern bool dwarf2out_default_as_locview_support (void);
+
+/* For -fdump-go-spec. */
+
+extern const struct gcc_debug_hooks *
+dump_go_spec_init (const char *, const struct gcc_debug_hooks *);
+
+/* Instance discriminator mapping table. See final.cc. */
+typedef hash_map<const_tree, int> decl_to_instance_map_t;
+extern decl_to_instance_map_t *decl_to_instance_map;
+
+/* Allocate decl_to_instance_map with COUNT slots to begin wtih, if it
+ * hasn't been allocated yet. */
+
+inline decl_to_instance_map_t *
+maybe_create_decl_to_instance_map (int count = 13)
+{
+ if (!decl_to_instance_map)
+ decl_to_instance_map = new decl_to_instance_map_t (count);
+ return decl_to_instance_map;
+}
+
+#endif /* !GCC_DEBUG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/defaults.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/defaults.h
new file mode 100644
index 0000000..dc6f09c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/defaults.h
@@ -0,0 +1,1464 @@
+/* Definitions of various defaults for tm.h macros.
+ Copyright (C) 1992-2023 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DEFAULTS_H
+#define GCC_DEFAULTS_H
+
+/* How to start an assembler comment. */
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START ";#"
+#endif
+
+/* Store in OUTPUT a string (made with alloca) containing an
+ assembler-name for a local static variable or function named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#ifndef ASM_PN_FORMAT
+# ifndef NO_DOT_IN_LABEL
+# define ASM_PN_FORMAT "%s.%lu"
+# else
+# ifndef NO_DOLLAR_IN_LABEL
+# define ASM_PN_FORMAT "%s$%lu"
+# else
+# define ASM_PN_FORMAT "__%s_%lu"
+# endif
+# endif
+#endif /* ! ASM_PN_FORMAT */
+
+#ifndef ASM_FORMAT_PRIVATE_NAME
+# define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+ do { const char *const name_ = (NAME); \
+ char *const output_ = (OUTPUT) = \
+ (char *) alloca (strlen (name_) + 32); \
+ sprintf (output_, ASM_PN_FORMAT, name_, (unsigned long)(LABELNO)); \
+ } while (0)
+#endif
+
+/* Choose a reasonable default for ASM_OUTPUT_ASCII. */
+
+#ifndef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \
+ do { \
+ FILE *_my_file = (MYFILE); \
+ const unsigned char *_hide_p = (const unsigned char *) (MYSTRING); \
+ int _hide_thissize = (MYLENGTH); \
+ { \
+ const unsigned char *p = _hide_p; \
+ int thissize = _hide_thissize; \
+ int i; \
+ fprintf (_my_file, "\t.ascii \""); \
+ \
+ for (i = 0; i < thissize; i++) \
+ { \
+ int c = p[i]; \
+ if (c == '\"' || c == '\\') \
+ putc ('\\', _my_file); \
+ if (ISPRINT (c)) \
+ putc (c, _my_file); \
+ else \
+ { \
+ fprintf (_my_file, "\\%o", c); \
+ /* After an octal-escape, if a digit follows, \
+ terminate one string constant and start another. \
+ The VAX assembler fails to stop reading the escape \
+ after three digits, so this is the only way we \
+ can get it to parse the data properly. */ \
+ if (i < thissize - 1 && ISDIGIT (p[i + 1])) \
+ fprintf (_my_file, "\"\n\t.ascii \""); \
+ } \
+ } \
+ fprintf (_my_file, "\"\n"); \
+ } \
+ } \
+ while (0)
+#endif
+
+/* This is how we tell the assembler to equate two values. */
+#ifdef SET_ASM_OP
+#ifndef ASM_OUTPUT_DEF
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "%s", SET_ASM_OP); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+#endif
+#endif
+
+#ifndef IFUNC_ASM_TYPE
+#define IFUNC_ASM_TYPE "gnu_indirect_function"
+#endif
+
+#ifndef TLS_COMMON_ASM_OP
+#define TLS_COMMON_ASM_OP ".tls_common"
+#endif
+
+#if defined (HAVE_AS_TLS) && !defined (ASM_OUTPUT_TLS_COMMON)
+#define ASM_OUTPUT_TLS_COMMON(FILE, DECL, NAME, SIZE) \
+ do \
+ { \
+ fprintf ((FILE), "\t%s\t", TLS_COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "," HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), DECL_ALIGN (DECL) / BITS_PER_UNIT); \
+ } \
+ while (0)
+#endif
+
+/* Decide whether to defer emitting the assembler output for an equate
+ of two values. The default is to not defer output. */
+#ifndef TARGET_DEFERRED_OUTPUT_DEFS
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) false
+#endif
+
+/* This is how to output the definition of a user-level label named
+ NAME, such as the label on variable NAME. */
+
+#ifndef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { \
+ assemble_name ((FILE), (NAME)); \
+ fputs (":\n", (FILE)); \
+ } while (0)
+#endif
+
+/* This is how to output the definition of a user-level label named
+ NAME, such as the label on a function. */
+
+#ifndef ASM_OUTPUT_FUNCTION_LABEL
+#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \
+ ASM_OUTPUT_LABEL ((FILE), (NAME))
+#endif
+
+/* Output the definition of a compiler-generated label named NAME. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,NAME) \
+ do { \
+ assemble_name_raw ((FILE), (NAME)); \
+ fputs (":\n", (FILE)); \
+ } while (0)
+#endif
+
+/* This is how to output a reference to a user-level label named NAME. */
+
+#ifndef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ do { \
+ fputs (user_label_prefix, (FILE)); \
+ fputs ((NAME), (FILE)); \
+ } while (0)
+#endif
+
+/* Allow target to print debug info labels specially. This is useful for
+ VLIW targets, since debug info labels should go into the middle of
+ instruction bundles instead of breaking them. */
+
+#ifndef ASM_OUTPUT_DEBUG_LABEL
+#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \
+ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM)
+#endif
+
+/* This is how we tell the assembler that a symbol is weak. */
+#ifndef ASM_OUTPUT_WEAK_ALIAS
+#if defined (ASM_WEAKEN_LABEL) && defined (ASM_OUTPUT_DEF)
+#define ASM_OUTPUT_WEAK_ALIAS(STREAM, NAME, VALUE) \
+ do \
+ { \
+ ASM_WEAKEN_LABEL (STREAM, NAME); \
+ if (VALUE) \
+ ASM_OUTPUT_DEF (STREAM, NAME, VALUE); \
+ } \
+ while (0)
+#endif
+#endif
+
+/* This is how we tell the assembler that a symbol is a weak alias to
+ another symbol that doesn't require the other symbol to be defined.
+ Uses of the former will turn into weak uses of the latter, i.e.,
+ uses that, in case the latter is undefined, will not cause errors,
+ and will add it to the symbol table as weak undefined. However, if
+ the latter is referenced directly, a strong reference prevails. */
+#ifndef ASM_OUTPUT_WEAKREF
+#if defined HAVE_GAS_WEAKREF
+#define ASM_OUTPUT_WEAKREF(FILE, DECL, NAME, VALUE) \
+ do \
+ { \
+ fprintf ((FILE), "\t.weakref\t"); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","); \
+ assemble_name ((FILE), (VALUE)); \
+ fprintf ((FILE), "\n"); \
+ } \
+ while (0)
+#endif
+#endif
+
+/* How to emit a .type directive. */
+#ifndef ASM_OUTPUT_TYPE_DIRECTIVE
+#if defined TYPE_ASM_OP && defined TYPE_OPERAND_FMT
+#define ASM_OUTPUT_TYPE_DIRECTIVE(STREAM, NAME, TYPE) \
+ do \
+ { \
+ fputs (TYPE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs (", ", STREAM); \
+ fprintf (STREAM, TYPE_OPERAND_FMT, TYPE); \
+ putc ('\n', STREAM); \
+ } \
+ while (0)
+#endif
+#endif
+
+/* How to emit a .size directive. */
+#ifndef ASM_OUTPUT_SIZE_DIRECTIVE
+#ifdef SIZE_ASM_OP
+#define ASM_OUTPUT_SIZE_DIRECTIVE(STREAM, NAME, SIZE) \
+ do \
+ { \
+ HOST_WIDE_INT size_ = (SIZE); \
+ fputs (SIZE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, ", " HOST_WIDE_INT_PRINT_DEC "\n", size_); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_MEASURED_SIZE(STREAM, NAME) \
+ do \
+ { \
+ fputs (SIZE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs (", .-", STREAM); \
+ assemble_name (STREAM, NAME); \
+ putc ('\n', STREAM); \
+ } \
+ while (0)
+
+#endif
+#endif
+
+/* This determines whether or not we support weak symbols. SUPPORTS_WEAK
+ must be a preprocessor constant. */
+#ifndef SUPPORTS_WEAK
+#if defined (ASM_WEAKEN_LABEL) || defined (ASM_WEAKEN_DECL)
+#define SUPPORTS_WEAK 1
+#else
+#define SUPPORTS_WEAK 0
+#endif
+#endif
+
+/* This determines whether or not we support weak symbols during target
+ code generation. TARGET_SUPPORTS_WEAK can be any valid C expression. */
+#ifndef TARGET_SUPPORTS_WEAK
+#define TARGET_SUPPORTS_WEAK (SUPPORTS_WEAK)
+#endif
+
+/* This determines whether or not we support the discriminator
+ attribute in the .loc directive. */
+#ifndef SUPPORTS_DISCRIMINATOR
+#ifdef HAVE_GAS_DISCRIMINATOR
+#define SUPPORTS_DISCRIMINATOR 1
+#else
+#define SUPPORTS_DISCRIMINATOR 0
+#endif
+#endif
+
+/* This determines whether or not we support marking sections with
+ SHF_GNU_RETAIN flag. Also require .init_array/.fini_array section
+ for constructors and destructors. */
+#ifndef SUPPORTS_SHF_GNU_RETAIN
+#if HAVE_GAS_SHF_GNU_RETAIN && HAVE_INITFINI_ARRAY_SUPPORT
+#define SUPPORTS_SHF_GNU_RETAIN 1
+#else
+#define SUPPORTS_SHF_GNU_RETAIN 0
+#endif
+#endif
+
+/* This determines whether or not we support link-once semantics. */
+#ifndef SUPPORTS_ONE_ONLY
+#ifdef MAKE_DECL_ONE_ONLY
+#define SUPPORTS_ONE_ONLY 1
+#else
+#define SUPPORTS_ONE_ONLY 0
+#endif
+#endif
+
+/* This determines whether weak symbols must be left out of a static
+ archive's table of contents. Defining this macro to be nonzero has
+ the consequence that certain symbols will not be made weak that
+ otherwise would be. The C++ ABI requires this macro to be zero;
+ see the documentation. */
+#ifndef TARGET_WEAK_NOT_IN_ARCHIVE_TOC
+#define TARGET_WEAK_NOT_IN_ARCHIVE_TOC 0
+#endif
+
+/* This determines whether or not we need linkonce unwind information. */
+#ifndef TARGET_USES_WEAK_UNWIND_INFO
+#define TARGET_USES_WEAK_UNWIND_INFO 0
+#endif
+
+/* By default, there is no prefix on user-defined symbols. */
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+/* If the target supports weak symbols, define TARGET_ATTRIBUTE_WEAK to
+ provide a weak attribute. Else define it to nothing.
+
+ This would normally belong in ansidecl.h, but SUPPORTS_WEAK is
+ not available at that time.
+
+ Note, this is only for use by target files which we know are to be
+ compiled by GCC. */
+#ifndef TARGET_ATTRIBUTE_WEAK
+# if SUPPORTS_WEAK
+# define TARGET_ATTRIBUTE_WEAK __attribute__ ((weak))
+# else
+# define TARGET_ATTRIBUTE_WEAK
+# endif
+#endif
+
+/* By default we can assume that all global symbols are in one namespace,
+ across all shared libraries. */
+#ifndef MULTIPLE_SYMBOL_SPACES
+# define MULTIPLE_SYMBOL_SPACES 0
+#endif
+
+/* If the target supports init_priority C++ attribute, give
+ SUPPORTS_INIT_PRIORITY a nonzero value. */
+#ifndef SUPPORTS_INIT_PRIORITY
+#define SUPPORTS_INIT_PRIORITY 1
+#endif /* SUPPORTS_INIT_PRIORITY */
+
+/* If we have a definition of INCOMING_RETURN_ADDR_RTX, assume that
+ the rest of the DWARF 2 frame unwind support is also provided. */
+#if !defined (DWARF2_UNWIND_INFO) && defined (INCOMING_RETURN_ADDR_RTX)
+#define DWARF2_UNWIND_INFO 1
+#endif
+
+/* If we have named sections, and we're using crtstuff to run ctors,
+ use them for registering eh frame information. */
+#if defined (TARGET_ASM_NAMED_SECTION) && DWARF2_UNWIND_INFO \
+ && !defined (EH_FRAME_THROUGH_COLLECT2)
+#ifndef EH_FRAME_SECTION_NAME
+#define EH_FRAME_SECTION_NAME ".eh_frame"
+#endif
+#endif
+
+/* On many systems, different EH table encodings are used under
+ difference circumstances. Some will require runtime relocations;
+ some will not. For those that do not require runtime relocations,
+ we would like to make the table read-only. However, since the
+ read-only tables may need to be combined with read-write tables
+ that do require runtime relocation, it is not safe to make the
+ tables read-only unless the linker will merge read-only and
+ read-write sections into a single read-write section. If your
+ linker does not have this ability, but your system is such that no
+ encoding used with non-PIC code will ever require a runtime
+ relocation, then you can define EH_TABLES_CAN_BE_READ_ONLY to 1 in
+ your target configuration file. */
+#ifndef EH_TABLES_CAN_BE_READ_ONLY
+#ifdef HAVE_LD_RO_RW_SECTION_MIXING
+#define EH_TABLES_CAN_BE_READ_ONLY 1
+#else
+#define EH_TABLES_CAN_BE_READ_ONLY 0
+#endif
+#endif
+
+/* Provide defaults for stuff that may not be defined when using
+ sjlj exceptions. */
+#ifndef EH_RETURN_DATA_REGNO
+#define EH_RETURN_DATA_REGNO(N) INVALID_REGNUM
+#endif
+
+/* Offset between the eh handler address and entry in eh tables. */
+#ifndef RETURN_ADDR_OFFSET
+#define RETURN_ADDR_OFFSET 0
+#endif
+
+#ifndef MASK_RETURN_ADDR
+#define MASK_RETURN_ADDR NULL_RTX
+#endif
+
+/* Number of hardware registers that go into the DWARF-2 unwind info.
+ If not defined, equals FIRST_PSEUDO_REGISTER */
+
+#ifndef DWARF_FRAME_REGISTERS
+#define DWARF_FRAME_REGISTERS FIRST_PSEUDO_REGISTER
+#endif
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor. */
+#ifndef DWARF_CIE_DATA_ALIGNMENT
+#ifdef STACK_GROWS_DOWNWARD
+#define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD))
+#else
+#define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD)
+#endif
+#endif
+
+/* The DWARF 2 CFA column which tracks the return address. Normally this
+ is the column for PC, or the first column after all of the hard
+ registers. */
+#ifndef DWARF_FRAME_RETURN_COLUMN
+#ifdef PC_REGNUM
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM)
+#else
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS
+#endif
+#endif
+
+/* How to renumber registers for gdb. If not defined, assume
+ no renumbering is necessary. */
+
+#ifndef DEBUGGER_REGNO
+#define DEBUGGER_REGNO(REGNO) (REGNO)
+#endif
+
+/* The mapping from gcc register number to DWARF 2 CFA column number.
+ By default, we just provide columns for all registers. */
+#ifndef DWARF_FRAME_REGNUM
+#define DWARF_FRAME_REGNUM(REG) DEBUGGER_REGNO (REG)
+#endif
+
+/* The mapping from dwarf CFA reg number to internal dwarf reg numbers. */
+#ifndef DWARF_REG_TO_UNWIND_COLUMN
+#define DWARF_REG_TO_UNWIND_COLUMN(REGNO) (REGNO)
+#endif
+
+/* Map register numbers held in the call frame info that gcc has
+ collected using DWARF_FRAME_REGNUM to those that should be output in
+ .debug_frame and .eh_frame. */
+#ifndef DWARF2_FRAME_REG_OUT
+#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
+#endif
+
+/* The size of addresses as they appear in the Dwarf 2 data.
+ Some architectures use word addresses to refer to code locations,
+ but Dwarf 2 info always uses byte addresses. On such machines,
+ Dwarf 2 addresses need to be larger than the architecture's
+ pointers. */
+#ifndef DWARF2_ADDR_SIZE
+#define DWARF2_ADDR_SIZE ((POINTER_SIZE + BITS_PER_UNIT - 1) / BITS_PER_UNIT)
+#endif
+
+/* The size in bytes of a DWARF field indicating an offset or length
+ relative to a debug info section, specified to be 4 bytes in the
+ DWARF-2 specification. The SGI/MIPS ABI defines it to be the same
+ as PTR_SIZE. */
+#ifndef DWARF_OFFSET_SIZE
+#define DWARF_OFFSET_SIZE 4
+#endif
+
+/* The size in bytes of a DWARF 4 type signature. */
+#ifndef DWARF_TYPE_SIGNATURE_SIZE
+#define DWARF_TYPE_SIGNATURE_SIZE 8
+#endif
+
+/* Default sizes for base C types. If the sizes are different for
+ your target, you should override these values by defining the
+ appropriate symbols in your tm.h file. */
+
+#ifndef BITS_PER_WORD
+#define BITS_PER_WORD (BITS_PER_UNIT * UNITS_PER_WORD)
+#endif
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef BOOL_TYPE_SIZE
+/* `bool' has size and alignment `1', on almost all platforms. */
+#define BOOL_TYPE_SIZE CHAR_TYPE_SIZE
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2))
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE INT_TYPE_SIZE
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef DECIMAL32_TYPE_SIZE
+#define DECIMAL32_TYPE_SIZE 32
+#endif
+
+#ifndef DECIMAL64_TYPE_SIZE
+#define DECIMAL64_TYPE_SIZE 64
+#endif
+
+#ifndef DECIMAL128_TYPE_SIZE
+#define DECIMAL128_TYPE_SIZE 128
+#endif
+
+#ifndef SHORT_FRACT_TYPE_SIZE
+#define SHORT_FRACT_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef FRACT_TYPE_SIZE
+#define FRACT_TYPE_SIZE (BITS_PER_UNIT * 2)
+#endif
+
+#ifndef LONG_FRACT_TYPE_SIZE
+#define LONG_FRACT_TYPE_SIZE (BITS_PER_UNIT * 4)
+#endif
+
+#ifndef LONG_LONG_FRACT_TYPE_SIZE
+#define LONG_LONG_FRACT_TYPE_SIZE (BITS_PER_UNIT * 8)
+#endif
+
+#ifndef SHORT_ACCUM_TYPE_SIZE
+#define SHORT_ACCUM_TYPE_SIZE (SHORT_FRACT_TYPE_SIZE * 2)
+#endif
+
+#ifndef ACCUM_TYPE_SIZE
+#define ACCUM_TYPE_SIZE (FRACT_TYPE_SIZE * 2)
+#endif
+
+#ifndef LONG_ACCUM_TYPE_SIZE
+#define LONG_ACCUM_TYPE_SIZE (LONG_FRACT_TYPE_SIZE * 2)
+#endif
+
+#ifndef LONG_LONG_ACCUM_TYPE_SIZE
+#define LONG_LONG_ACCUM_TYPE_SIZE (LONG_LONG_FRACT_TYPE_SIZE * 2)
+#endif
+
+/* We let tm.h override the types used here, to handle trivial differences
+ such as the choice of unsigned int or long unsigned int for size_t.
+ When machines start needing nontrivial differences in the size type,
+ it would be best to do something here to figure out automatically
+ from other information what type to use. */
+
+#ifndef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#endif
+
+#ifndef SIZETYPE
+#define SIZETYPE SIZE_TYPE
+#endif
+
+#ifndef PID_TYPE
+#define PID_TYPE "int"
+#endif
+
+/* If GCC knows the exact uint_least16_t and uint_least32_t types from
+ <stdint.h>, use them for char16_t and char32_t. Otherwise, use
+ these guesses; getting the wrong type of a given width will not
+ affect C++ name mangling because in C++ these are distinct types
+ not typedefs. */
+
+#ifndef CHAR8_TYPE
+#define CHAR8_TYPE "unsigned char"
+#endif
+
+#ifdef UINT_LEAST16_TYPE
+#define CHAR16_TYPE UINT_LEAST16_TYPE
+#else
+#define CHAR16_TYPE "short unsigned int"
+#endif
+
+#ifdef UINT_LEAST32_TYPE
+#define CHAR32_TYPE UINT_LEAST32_TYPE
+#else
+#define CHAR32_TYPE "unsigned int"
+#endif
+
+#ifndef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#endif
+
+/* WCHAR_TYPE gets overridden by -fshort-wchar. */
+#define MODIFIED_WCHAR_TYPE \
+ (flag_short_wchar ? "short unsigned int" : WCHAR_TYPE)
+
+#ifndef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+#endif
+
+#ifndef WINT_TYPE
+#define WINT_TYPE "unsigned int"
+#endif
+
+#ifndef INTMAX_TYPE
+#define INTMAX_TYPE ((INT_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \
+ ? "int" \
+ : ((LONG_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \
+ ? "long int" \
+ : "long long int"))
+#endif
+
+#ifndef UINTMAX_TYPE
+#define UINTMAX_TYPE ((INT_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \
+ ? "unsigned int" \
+ : ((LONG_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \
+ ? "long unsigned int" \
+ : "long long unsigned int"))
+#endif
+
+
+/* There are no default definitions of these <stdint.h> types. */
+
+#ifndef SIG_ATOMIC_TYPE
+#define SIG_ATOMIC_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT8_TYPE
+#define INT8_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT16_TYPE
+#define INT16_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT32_TYPE
+#define INT32_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT64_TYPE
+#define INT64_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT8_TYPE
+#define UINT8_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT16_TYPE
+#define UINT16_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT32_TYPE
+#define UINT32_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT64_TYPE
+#define UINT64_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_LEAST8_TYPE
+#define INT_LEAST8_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_LEAST16_TYPE
+#define INT_LEAST16_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_LEAST32_TYPE
+#define INT_LEAST32_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_LEAST64_TYPE
+#define INT_LEAST64_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_LEAST8_TYPE
+#define UINT_LEAST8_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_LEAST16_TYPE
+#define UINT_LEAST16_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_LEAST32_TYPE
+#define UINT_LEAST32_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_LEAST64_TYPE
+#define UINT_LEAST64_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_FAST8_TYPE
+#define INT_FAST8_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_FAST16_TYPE
+#define INT_FAST16_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_FAST32_TYPE
+#define INT_FAST32_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INT_FAST64_TYPE
+#define INT_FAST64_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_FAST8_TYPE
+#define UINT_FAST8_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_FAST16_TYPE
+#define UINT_FAST16_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_FAST32_TYPE
+#define UINT_FAST32_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINT_FAST64_TYPE
+#define UINT_FAST64_TYPE ((const char *) NULL)
+#endif
+
+#ifndef INTPTR_TYPE
+#define INTPTR_TYPE ((const char *) NULL)
+#endif
+
+#ifndef UINTPTR_TYPE
+#define UINTPTR_TYPE ((const char *) NULL)
+#endif
+
+/* Width in bits of a pointer. Mind the value of the macro `Pmode'. */
+#ifndef POINTER_SIZE
+#define POINTER_SIZE BITS_PER_WORD
+#endif
+#ifndef POINTER_SIZE_UNITS
+#define POINTER_SIZE_UNITS ((POINTER_SIZE + BITS_PER_UNIT - 1) / BITS_PER_UNIT)
+#endif
+
+
+#ifndef PIC_OFFSET_TABLE_REGNUM
+#define PIC_OFFSET_TABLE_REGNUM INVALID_REGNUM
+#endif
+
+#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
+#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED 0
+#endif
+
+#ifndef TARGET_DLLIMPORT_DECL_ATTRIBUTES
+#define TARGET_DLLIMPORT_DECL_ATTRIBUTES 0
+#endif
+
+#ifndef TARGET_DECLSPEC
+#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
+/* If the target supports the "dllimport" attribute, users are
+ probably used to the "__declspec" syntax. */
+#define TARGET_DECLSPEC 1
+#else
+#define TARGET_DECLSPEC 0
+#endif
+#endif
+
+/* By default, the preprocessor should be invoked the same way in C++
+ as in C. */
+#ifndef CPLUSPLUS_CPP_SPEC
+#ifdef CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC CPP_SPEC
+#endif
+#endif
+
+#ifndef ACCUMULATE_OUTGOING_ARGS
+#define ACCUMULATE_OUTGOING_ARGS 0
+#endif
+
+/* By default, use the GNU runtime for Objective C. */
+#ifndef NEXT_OBJC_RUNTIME
+#define NEXT_OBJC_RUNTIME 0
+#endif
+
+/* Decide whether a function's arguments should be processed
+ from first to last or from last to first.
+
+ They should if the stack and args grow in opposite directions, but
+ only if we have push insns. */
+
+#ifdef PUSH_ROUNDING
+
+#ifndef PUSH_ARGS_REVERSED
+#if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD)
+#define PUSH_ARGS_REVERSED targetm.calls.push_argument (0)
+#endif
+#endif
+
+#endif
+
+#ifndef PUSH_ARGS_REVERSED
+#define PUSH_ARGS_REVERSED 0
+#endif
+
+/* Default value for the alignment (in bits) a C conformant malloc has to
+ provide. This default is intended to be safe and always correct. */
+#ifndef MALLOC_ABI_ALIGNMENT
+#define MALLOC_ABI_ALIGNMENT BITS_PER_WORD
+#endif
+
+/* If PREFERRED_STACK_BOUNDARY is not defined, set it to STACK_BOUNDARY.
+ STACK_BOUNDARY is required. */
+#ifndef PREFERRED_STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+/* Set INCOMING_STACK_BOUNDARY to PREFERRED_STACK_BOUNDARY if it is not
+ defined. */
+#ifndef INCOMING_STACK_BOUNDARY
+#define INCOMING_STACK_BOUNDARY PREFERRED_STACK_BOUNDARY
+#endif
+
+#ifndef TARGET_DEFAULT_PACK_STRUCT
+#define TARGET_DEFAULT_PACK_STRUCT 0
+#endif
+
+/* By default, the vtable entries are void pointers, the so the alignment
+ is the same as pointer alignment. The value of this macro specifies
+ the alignment of the vtable entry in bits. It should be defined only
+ when special alignment is necessary. */
+#ifndef TARGET_VTABLE_ENTRY_ALIGN
+#define TARGET_VTABLE_ENTRY_ALIGN POINTER_SIZE
+#endif
+
+/* There are a few non-descriptor entries in the vtable at offsets below
+ zero. If these entries must be padded (say, to preserve the alignment
+ specified by TARGET_VTABLE_ENTRY_ALIGN), set this to the number of
+ words in each data entry. */
+#ifndef TARGET_VTABLE_DATA_ENTRY_DISTANCE
+#define TARGET_VTABLE_DATA_ENTRY_DISTANCE 1
+#endif
+
+/* Decide whether it is safe to use a local alias for a virtual function
+ when constructing thunks. */
+#ifndef TARGET_USE_LOCAL_THUNK_ALIAS_P
+#ifdef ASM_OUTPUT_DEF
+#define TARGET_USE_LOCAL_THUNK_ALIAS_P(DECL) 1
+#else
+#define TARGET_USE_LOCAL_THUNK_ALIAS_P(DECL) 0
+#endif
+#endif
+
+/* Decide whether target supports aliases. */
+#ifndef TARGET_SUPPORTS_ALIASES
+#ifdef ASM_OUTPUT_DEF
+#define TARGET_SUPPORTS_ALIASES 1
+#else
+#define TARGET_SUPPORTS_ALIASES 0
+#endif
+#endif
+
+/* Select a format to encode pointers in exception handling data. We
+ prefer those that result in fewer dynamic relocations. Assume no
+ special support here and encode direct references. */
+#ifndef ASM_PREFERRED_EH_DATA_FORMAT
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) DW_EH_PE_absptr
+#endif
+
+/* By default, the C++ compiler will use the lowest bit of the pointer
+ to function to indicate a pointer-to-member-function points to a
+ virtual member function. However, if FUNCTION_BOUNDARY indicates
+ function addresses aren't always even, the lowest bit of the delta
+ field will be used. */
+#ifndef TARGET_PTRMEMFUNC_VBIT_LOCATION
+#define TARGET_PTRMEMFUNC_VBIT_LOCATION \
+ (FUNCTION_BOUNDARY >= 2 * BITS_PER_UNIT \
+ ? ptrmemfunc_vbit_in_pfn : ptrmemfunc_vbit_in_delta)
+#endif
+
+#ifndef DEFAULT_GDB_EXTENSIONS
+#define DEFAULT_GDB_EXTENSIONS 1
+#endif
+
+/* Default to DWARF2_DEBUGGING_INFO. Legacy targets can choose different
+ by defining PREFERRED_DEBUGGING_TYPE. */
+#ifndef PREFERRED_DEBUGGING_TYPE
+#if defined DWARF2_DEBUGGING_INFO || defined DWARF2_LINENO_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+#else
+#error You must define PREFERRED_DEBUGGING_TYPE if DWARF is not supported
+#endif
+#endif
+
+#ifndef FLOAT_LIB_COMPARE_RETURNS_BOOL
+#define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) false
+#endif
+
+/* True if the targets integer-comparison functions return { 0, 1, 2
+ } to indicate { <, ==, > }. False if { -1, 0, 1 } is used
+ instead. The libgcc routines are biased. */
+#ifndef TARGET_LIB_INT_CMP_BIASED
+#define TARGET_LIB_INT_CMP_BIASED (true)
+#endif
+
+/* If FLOAT_WORDS_BIG_ENDIAN is not defined in the header files,
+ then the word-endianness is the same as for integers. */
+#ifndef FLOAT_WORDS_BIG_ENDIAN
+#define FLOAT_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+#ifndef REG_WORDS_BIG_ENDIAN
+#define REG_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
+#endif
+
+
+#ifndef TARGET_DEC_EVAL_METHOD
+#define TARGET_DEC_EVAL_METHOD 2
+#endif
+
+#ifndef HAS_LONG_COND_BRANCH
+#define HAS_LONG_COND_BRANCH 0
+#endif
+
+#ifndef HAS_LONG_UNCOND_BRANCH
+#define HAS_LONG_UNCOND_BRANCH 0
+#endif
+
+/* Determine whether __cxa_atexit, rather than atexit, is used to
+ register C++ destructors for local statics and global objects. */
+#ifndef DEFAULT_USE_CXA_ATEXIT
+#define DEFAULT_USE_CXA_ATEXIT 0
+#endif
+
+#if GCC_VERSION >= 3000 && defined IN_GCC
+/* These old constraint macros shouldn't appear anywhere in a
+ configuration using MD constraint definitions. */
+#endif
+
+/* Determin whether the target runtime library is Bionic */
+#ifndef TARGET_HAS_BIONIC
+#define TARGET_HAS_BIONIC 0
+#endif
+
+/* Indicate that CLZ and CTZ are undefined at zero. */
+#ifndef CLZ_DEFINED_VALUE_AT_ZERO
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0
+#endif
+#ifndef CTZ_DEFINED_VALUE_AT_ZERO
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0
+#endif
+
+/* Provide a default value for STORE_FLAG_VALUE. */
+#ifndef STORE_FLAG_VALUE
+#define STORE_FLAG_VALUE 1
+#endif
+
+/* This macro is used to determine what the largest unit size that
+ move_by_pieces can use is. */
+
+/* MOVE_MAX_PIECES is the number of bytes at a time which we can
+ move efficiently, as opposed to MOVE_MAX which is the maximum
+ number of bytes we can move with a single instruction. */
+
+#ifndef MOVE_MAX_PIECES
+#define MOVE_MAX_PIECES MOVE_MAX
+#endif
+
+/* STORE_MAX_PIECES is the number of bytes at a time that we can
+ store efficiently. Due to internal GCC limitations, this is
+ MOVE_MAX_PIECES limited by the number of bytes GCC can represent
+ for an immediate constant. */
+
+#ifndef STORE_MAX_PIECES
+#define STORE_MAX_PIECES MIN (MOVE_MAX_PIECES, 2 * sizeof (HOST_WIDE_INT))
+#endif
+
+/* Likewise for block comparisons. */
+#ifndef COMPARE_MAX_PIECES
+#define COMPARE_MAX_PIECES MOVE_MAX_PIECES
+#endif
+
+#ifndef MAX_MOVE_MAX
+#define MAX_MOVE_MAX MOVE_MAX
+#endif
+
+#ifndef MIN_UNITS_PER_WORD
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#endif
+
+#ifndef MAX_BITS_PER_WORD
+#define MAX_BITS_PER_WORD BITS_PER_WORD
+#endif
+
+#ifndef STACK_POINTER_OFFSET
+#define STACK_POINTER_OFFSET 0
+#endif
+
+#ifndef LOCAL_REGNO
+#define LOCAL_REGNO(REGNO) 0
+#endif
+
+#ifndef HONOR_REG_ALLOC_ORDER
+#define HONOR_REG_ALLOC_ORDER 0
+#endif
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers. */
+#ifndef EXIT_IGNORE_STACK
+#define EXIT_IGNORE_STACK 0
+#endif
+
+/* Assume that case vectors are not pc-relative. */
+#ifndef CASE_VECTOR_PC_RELATIVE
+#define CASE_VECTOR_PC_RELATIVE 0
+#endif
+
+/* Force minimum alignment to be able to use the least significant bits
+ for distinguishing descriptor addresses from code addresses. */
+#define FUNCTION_ALIGNMENT(ALIGN) \
+ (lang_hooks.custom_function_descriptors \
+ && targetm.calls.custom_function_descriptors > 0 \
+ ? MAX ((ALIGN), \
+ 2 * targetm.calls.custom_function_descriptors * BITS_PER_UNIT)\
+ : (ALIGN))
+
+/* Assume that trampolines need function alignment. */
+#ifndef TRAMPOLINE_ALIGNMENT
+#define TRAMPOLINE_ALIGNMENT FUNCTION_ALIGNMENT (FUNCTION_BOUNDARY)
+#endif
+
+/* Register mappings for target machines without register windows. */
+#ifndef INCOMING_REGNO
+#define INCOMING_REGNO(N) (N)
+#endif
+
+#ifndef OUTGOING_REGNO
+#define OUTGOING_REGNO(N) (N)
+#endif
+
+#ifndef SHIFT_COUNT_TRUNCATED
+#define SHIFT_COUNT_TRUNCATED 0
+#endif
+
+#ifndef LEGITIMATE_PIC_OPERAND_P
+#define LEGITIMATE_PIC_OPERAND_P(X) 1
+#endif
+
+#ifndef TARGET_MEM_CONSTRAINT
+#define TARGET_MEM_CONSTRAINT 'm'
+#endif
+
+#ifndef REVERSIBLE_CC_MODE
+#define REVERSIBLE_CC_MODE(MODE) 0
+#endif
+
+/* Biggest alignment supported by the object file format of this machine. */
+#ifndef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT BIGGEST_ALIGNMENT
+#endif
+
+#ifndef FRAME_GROWS_DOWNWARD
+#define FRAME_GROWS_DOWNWARD 0
+#endif
+
+#ifndef RETURN_ADDR_IN_PREVIOUS_FRAME
+#define RETURN_ADDR_IN_PREVIOUS_FRAME 0
+#endif
+
+/* On most machines, the CFA coincides with the first incoming parm. */
+#ifndef ARG_POINTER_CFA_OFFSET
+#define ARG_POINTER_CFA_OFFSET(FNDECL) \
+ (FIRST_PARM_OFFSET (FNDECL) + crtl->args.pretend_args_size)
+#endif
+
+/* On most machines, we use the CFA as DW_AT_frame_base. */
+#ifndef CFA_FRAME_BASE_OFFSET
+#define CFA_FRAME_BASE_OFFSET(FNDECL) 0
+#endif
+
+/* The offset from the incoming value of %sp to the top of the stack frame
+ for the current function. */
+#ifndef INCOMING_FRAME_SP_OFFSET
+#define INCOMING_FRAME_SP_OFFSET 0
+#endif
+
+#ifndef HARD_REGNO_NREGS_HAS_PADDING
+#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) 0
+#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) -1
+#endif
+
+#ifndef OUTGOING_REG_PARM_STACK_SPACE
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 0
+#endif
+
+/* MAX_STACK_ALIGNMENT is the maximum stack alignment guaranteed by
+ the backend. MAX_SUPPORTED_STACK_ALIGNMENT is the maximum best
+ effort stack alignment supported by the backend. If the backend
+ supports stack alignment, MAX_SUPPORTED_STACK_ALIGNMENT and
+ MAX_STACK_ALIGNMENT are the same. Otherwise, the incoming stack
+ boundary will limit the maximum guaranteed stack alignment. */
+#ifdef MAX_STACK_ALIGNMENT
+#define MAX_SUPPORTED_STACK_ALIGNMENT MAX_STACK_ALIGNMENT
+#else
+#define MAX_STACK_ALIGNMENT STACK_BOUNDARY
+#define MAX_SUPPORTED_STACK_ALIGNMENT PREFERRED_STACK_BOUNDARY
+#endif
+
+#define SUPPORTS_STACK_ALIGNMENT (MAX_STACK_ALIGNMENT > STACK_BOUNDARY)
+
+#ifndef LOCAL_ALIGNMENT
+#define LOCAL_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT
+#endif
+
+#ifndef STACK_SLOT_ALIGNMENT
+#define STACK_SLOT_ALIGNMENT(TYPE,MODE,ALIGN) \
+ ((TYPE) ? LOCAL_ALIGNMENT ((TYPE), (ALIGN)) : (ALIGN))
+#endif
+
+#ifndef LOCAL_DECL_ALIGNMENT
+#define LOCAL_DECL_ALIGNMENT(DECL) \
+ LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL))
+#endif
+
+#ifndef MINIMUM_ALIGNMENT
+#define MINIMUM_ALIGNMENT(EXP,MODE,ALIGN) (ALIGN)
+#endif
+
+/* Alignment value for attribute ((aligned)). */
+#ifndef ATTRIBUTE_ALIGNED_VALUE
+#define ATTRIBUTE_ALIGNED_VALUE BIGGEST_ALIGNMENT
+#endif
+
+/* For most ports anything that evaluates to a constant symbolic
+ or integer value is acceptable as a constant address. */
+#ifndef CONSTANT_ADDRESS_P
+#define CONSTANT_ADDRESS_P(X) (CONSTANT_P (X) && GET_CODE (X) != CONST_DOUBLE)
+#endif
+
+#ifndef MAX_FIXED_MODE_SIZE
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
+#endif
+
+/* Nonzero if structures and unions should be returned in memory.
+
+ This should only be defined if compatibility with another compiler or
+ with an ABI is needed, because it results in slower code. */
+
+#ifndef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+#endif
+
+#ifndef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS false
+#endif
+
+#ifndef INSN_SETS_ARE_DELAYED
+#define INSN_SETS_ARE_DELAYED(INSN) false
+#endif
+
+#ifndef INSN_REFERENCES_ARE_DELAYED
+#define INSN_REFERENCES_ARE_DELAYED(INSN) false
+#endif
+
+#ifndef NO_FUNCTION_CSE
+#define NO_FUNCTION_CSE false
+#endif
+
+#ifndef HARD_REGNO_RENAME_OK
+#define HARD_REGNO_RENAME_OK(FROM, TO) true
+#endif
+
+#ifndef EPILOGUE_USES
+#define EPILOGUE_USES(REG) false
+#endif
+
+#ifndef ARGS_GROW_DOWNWARD
+#define ARGS_GROW_DOWNWARD 0
+#endif
+
+#ifndef STACK_GROWS_DOWNWARD
+#define STACK_GROWS_DOWNWARD 0
+#endif
+
+#ifndef STACK_PUSH_CODE
+#if STACK_GROWS_DOWNWARD
+#define STACK_PUSH_CODE PRE_DEC
+#else
+#define STACK_PUSH_CODE PRE_INC
+#endif
+#endif
+
+/* Default value for flag_pie when flag_pie is initialized to -1:
+ --enable-default-pie: Default flag_pie to -fPIE.
+ --disable-default-pie: Default flag_pie to 0.
+ */
+#ifdef ENABLE_DEFAULT_PIE
+# ifndef DEFAULT_FLAG_PIE
+# define DEFAULT_FLAG_PIE 2
+# endif
+#else
+# define DEFAULT_FLAG_PIE 0
+#endif
+
+#ifndef SWITCHABLE_TARGET
+#define SWITCHABLE_TARGET 0
+#endif
+
+/* If the target supports integers that are wider than two
+ HOST_WIDE_INTs on the host compiler, then the target should define
+ TARGET_SUPPORTS_WIDE_INT and make the appropriate fixups.
+ Otherwise the compiler really is not robust. */
+#ifndef TARGET_SUPPORTS_WIDE_INT
+#define TARGET_SUPPORTS_WIDE_INT 0
+#endif
+
+#ifndef SHORT_IMMEDIATES_SIGN_EXTEND
+#define SHORT_IMMEDIATES_SIGN_EXTEND 0
+#endif
+
+#ifndef WORD_REGISTER_OPERATIONS
+#define WORD_REGISTER_OPERATIONS 0
+#endif
+
+#ifndef LOAD_EXTEND_OP
+#define LOAD_EXTEND_OP(M) UNKNOWN
+#endif
+
+#ifndef INITIAL_FRAME_ADDRESS_RTX
+#define INITIAL_FRAME_ADDRESS_RTX NULL
+#endif
+
+#ifndef SETUP_FRAME_ADDRESSES
+#define SETUP_FRAME_ADDRESSES() do { } while (0)
+#endif
+
+#ifndef DYNAMIC_CHAIN_ADDRESS
+#define DYNAMIC_CHAIN_ADDRESS(x) (x)
+#endif
+
+#ifndef FRAME_ADDR_RTX
+#define FRAME_ADDR_RTX(x) (x)
+#endif
+
+#ifndef REVERSE_CONDITION
+#define REVERSE_CONDITION(code, mode) reverse_condition (code)
+#endif
+
+#ifndef TARGET_PECOFF
+#define TARGET_PECOFF 0
+#endif
+
+#ifndef TARGET_COFF
+#define TARGET_COFF 0
+#endif
+
+#ifndef EH_RETURN_HANDLER_RTX
+#define EH_RETURN_HANDLER_RTX NULL
+#endif
+
+#ifdef GCC_INSN_FLAGS_H
+/* Dependent default target macro definitions
+
+ This section of defaults.h defines target macros that depend on generated
+ headers. This is a bit awkward: We want to put all default definitions
+ for target macros in defaults.h, but some of the defaults depend on the
+ HAVE_* flags defines of insn-flags.h. But insn-flags.h is not always
+ included by files that do include defaults.h.
+
+ Fortunately, the default macro definitions that depend on the HAVE_*
+ macros are also the ones that will only be used inside GCC itself, i.e.
+ not in the gen* programs or in target objects like libgcc.
+
+ Obviously, it would be best to keep this section of defaults.h as small
+ as possible, by converting the macros defined below to target hooks or
+ functions.
+*/
+
+/* The default branch cost is 1. */
+#ifndef BRANCH_COST
+#define BRANCH_COST(speed_p, predictable_p) 1
+#endif
+
+/* If a memory-to-memory move would take MOVE_RATIO or more simple
+ move-instruction sequences, we will do a cpymem or libcall instead. */
+
+#ifndef MOVE_RATIO
+#if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
+#define MOVE_RATIO(speed) 2
+#else
+/* If we are optimizing for space (-Os), cut down the default move ratio. */
+#define MOVE_RATIO(speed) ((speed) ? 15 : 3)
+#endif
+#endif
+
+/* If a clear memory operation would take CLEAR_RATIO or more simple
+ move-instruction sequences, we will do a setmem or libcall instead. */
+
+#ifndef CLEAR_RATIO
+#if defined (HAVE_setmemqi) || defined (HAVE_setmemhi) || defined (HAVE_setmemsi) || defined (HAVE_setmemdi) || defined (HAVE_setmemti)
+#define CLEAR_RATIO(speed) 2
+#else
+/* If we are optimizing for space, cut down the default clear ratio. */
+#define CLEAR_RATIO(speed) ((speed) ? 15 :3)
+#endif
+#endif
+
+/* If a memory set (to value other than zero) operation would take
+ SET_RATIO or more simple move-instruction sequences, we will do a setmem
+ or libcall instead. */
+#ifndef SET_RATIO
+#define SET_RATIO(speed) MOVE_RATIO (speed)
+#endif
+
+/* Supply a default definition of STACK_SAVEAREA_MODE for emit_stack_save.
+ Normally move_insn, so Pmode stack pointer. */
+
+#ifndef STACK_SAVEAREA_MODE
+#define STACK_SAVEAREA_MODE(LEVEL) Pmode
+#endif
+
+/* Supply a default definition of STACK_SIZE_MODE for
+ allocate_dynamic_stack_space. Normally PLUS/MINUS, so word_mode. */
+
+#ifndef STACK_SIZE_MODE
+#define STACK_SIZE_MODE word_mode
+#endif
+
+/* Default value for flag_stack_protect when flag_stack_protect is initialized to -1:
+ --enable-default-ssp: Default flag_stack_protect to -fstack-protector-strong.
+ --disable-default-ssp: Default flag_stack_protect to 0.
+ */
+#ifdef ENABLE_DEFAULT_SSP
+# ifndef DEFAULT_FLAG_SSP
+# define DEFAULT_FLAG_SSP 3
+# endif
+#else
+# define DEFAULT_FLAG_SSP 0
+#endif
+
+/* Provide default values for the macros controlling stack checking. */
+
+/* The default is neither full builtin stack checking... */
+#ifndef STACK_CHECK_BUILTIN
+#define STACK_CHECK_BUILTIN 0
+#endif
+
+/* ...nor static builtin stack checking. */
+#ifndef STACK_CHECK_STATIC_BUILTIN
+#define STACK_CHECK_STATIC_BUILTIN 0
+#endif
+
+/* The default interval is one page (4096 bytes). */
+#ifndef STACK_CHECK_PROBE_INTERVAL_EXP
+#define STACK_CHECK_PROBE_INTERVAL_EXP 12
+#endif
+
+/* The default is not to move the stack pointer. */
+#ifndef STACK_CHECK_MOVING_SP
+#define STACK_CHECK_MOVING_SP 0
+#endif
+
+/* This is a kludge to try to capture the discrepancy between the old
+ mechanism (generic stack checking) and the new mechanism (static
+ builtin stack checking). STACK_CHECK_PROTECT needs to be bumped
+ for the latter because part of the protection area is effectively
+ included in STACK_CHECK_MAX_FRAME_SIZE for the former. */
+#ifdef STACK_CHECK_PROTECT
+#define STACK_OLD_CHECK_PROTECT STACK_CHECK_PROTECT
+#else
+#define STACK_OLD_CHECK_PROTECT \
+ (!global_options.x_flag_exceptions \
+ ? 75 * UNITS_PER_WORD \
+ : targetm_common.except_unwind_info (&global_options) == UI_SJLJ \
+ ? 4 * 1024 \
+ : 8 * 1024)
+#endif
+
+/* Minimum amount of stack required to recover from an anticipated stack
+ overflow detection. The default value conveys an estimate of the amount
+ of stack required to propagate an exception. */
+#ifndef STACK_CHECK_PROTECT
+#define STACK_CHECK_PROTECT \
+ (!global_options.x_flag_exceptions \
+ ? 4 * 1024 \
+ : targetm_common.except_unwind_info (&global_options) == UI_SJLJ \
+ ? 8 * 1024 \
+ : 12 * 1024)
+#endif
+
+/* Make the maximum frame size be the largest we can and still only need
+ one probe per function. */
+#ifndef STACK_CHECK_MAX_FRAME_SIZE
+#define STACK_CHECK_MAX_FRAME_SIZE \
+ ((1 << STACK_CHECK_PROBE_INTERVAL_EXP) - UNITS_PER_WORD)
+#endif
+
+/* This is arbitrary, but should be large enough everywhere. */
+#ifndef STACK_CHECK_FIXED_FRAME_SIZE
+#define STACK_CHECK_FIXED_FRAME_SIZE (4 * UNITS_PER_WORD)
+#endif
+
+/* Provide a reasonable default for the maximum size of an object to
+ allocate in the fixed frame. We may need to be able to make this
+ controllable by the user at some point. */
+#ifndef STACK_CHECK_MAX_VAR_SIZE
+#define STACK_CHECK_MAX_VAR_SIZE (STACK_CHECK_MAX_FRAME_SIZE / 100)
+#endif
+
+/* By default, the C++ compiler will use function addresses in the
+ vtable entries. Setting this nonzero tells the compiler to use
+ function descriptors instead. The value of this macro says how
+ many words wide the descriptor is (normally 2). It is assumed
+ that the address of a function descriptor may be treated as a
+ pointer to a function. */
+#ifndef TARGET_VTABLE_USES_DESCRIPTORS
+#define TARGET_VTABLE_USES_DESCRIPTORS 0
+#endif
+
+#endif /* GCC_INSN_FLAGS_H */
+
+#ifndef DWARF_GNAT_ENCODINGS_DEFAULT
+#define DWARF_GNAT_ENCODINGS_DEFAULT DWARF_GNAT_ENCODINGS_GDB
+#endif
+
+/* When generating dwarf info, the default standard version we'll honor
+ and advertise in absence of -gdwarf-<N> on the command line. */
+#ifndef DWARF_VERSION_DEFAULT
+#define DWARF_VERSION_DEFAULT 5
+#endif
+
+#ifndef USED_FOR_TARGET
+/* Done this way to keep gengtype happy. */
+#if BITS_PER_UNIT == 8
+#define TARGET_UNIT uint8_t
+#elif BITS_PER_UNIT == 16
+#define TARGET_UNIT uint16_t
+#elif BITS_PER_UNIT == 32
+#define TARGET_UNIT uint32_t
+#else
+#error Unknown BITS_PER_UNIT
+#endif
+typedef TARGET_UNIT target_unit;
+#endif
+
+#endif /* ! GCC_DEFAULTS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/df.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/df.h
new file mode 100644
index 0000000..aec2223
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/df.h
@@ -0,0 +1,1253 @@
+/* Form lists of pseudo register references for autoinc optimization
+ for GNU compiler. This is part of flow optimization.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+ Originally contributed by Michael P. Hayes
+ (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
+ Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
+ and Kenneth Zadeck (zadeck@naturalbridge.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DF_H
+#define GCC_DF_H
+
+#include "regset.h"
+#include "alloc-pool.h"
+#include "timevar.h"
+
+struct dataflow;
+class df_d;
+struct df_problem;
+struct df_link;
+struct df_insn_info;
+union df_ref_d;
+
+/* Data flow problems. All problems must have a unique id here. */
+
+/* Scanning is not really a dataflow problem, but it is useful to have
+ the basic block functions in the vector so that things get done in
+ a uniform manner. The last four problems can be added or deleted
+ at any time are always defined (though LIVE is always there at -O2
+ or higher); the others are always there. */
+enum df_problem_id
+ {
+ DF_SCAN,
+ DF_LR, /* Live Registers backward. */
+ DF_LIVE, /* Live Registers & Uninitialized Registers */
+ DF_RD, /* Reaching Defs. */
+ DF_CHAIN, /* Def-Use and/or Use-Def Chains. */
+ DF_WORD_LR, /* Subreg tracking lr. */
+ DF_NOTE, /* REG_DEAD and REG_UNUSED notes. */
+ DF_MD, /* Multiple Definitions. */
+ DF_MIR, /* Must-initialized Registers. */
+
+ DF_LAST_PROBLEM_PLUS1
+ };
+
+/* Dataflow direction. */
+enum df_flow_dir
+ {
+ DF_NONE,
+ DF_FORWARD,
+ DF_BACKWARD
+ };
+
+/* Descriminator for the various df_ref types. */
+enum df_ref_class {DF_REF_BASE, DF_REF_ARTIFICIAL, DF_REF_REGULAR};
+
+/* The first of these us a set of a registers. The remaining three
+ are all uses of a register (the mem_load and mem_store relate to
+ how the register as an addressing operand). */
+enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_USE,
+ DF_REF_REG_MEM_LOAD, DF_REF_REG_MEM_STORE};
+
+enum df_ref_flags
+ {
+ /* This flag is set if this ref occurs inside of a conditional
+ execution instruction. */
+ DF_REF_CONDITIONAL = 1 << 0,
+
+ /* If this flag is set for an artificial use or def, that ref
+ logically happens at the top of the block. If it is not set
+ for an artificial use or def, that ref logically happens at the
+ bottom of the block. This is never set for regular refs. */
+ DF_REF_AT_TOP = 1 << 1,
+
+ /* This flag is set if the use is inside a REG_EQUAL or REG_EQUIV
+ note. */
+ DF_REF_IN_NOTE = 1 << 2,
+
+ /* This bit is true if this ref can make regs_ever_live true for
+ this regno. */
+ DF_HARD_REG_LIVE = 1 << 3,
+
+
+ /* This flag is set if this ref is a partial use or def of the
+ associated register. */
+ DF_REF_PARTIAL = 1 << 4,
+
+ /* Read-modify-write refs generate both a use and a def and
+ these are marked with this flag to show that they are not
+ independent. */
+ DF_REF_READ_WRITE = 1 << 5,
+
+ /* This flag is set if this ref, generally a def, may clobber the
+ referenced register. This is generally only set for hard
+ registers that cross a call site. With better information
+ about calls, some of these could be changed in the future to
+ DF_REF_MUST_CLOBBER. */
+ DF_REF_MAY_CLOBBER = 1 << 6,
+
+ /* This flag is set if this ref, generally a def, is a real
+ clobber. This is not currently set for registers live across a
+ call because that clobbering may or may not happen.
+
+ Most of the uses of this are with sets that have a
+ GET_CODE(..)==CLOBBER. Note that this is set even if the
+ clobber is to a subreg. So in order to tell if the clobber
+ wipes out the entire register, it is necessary to also check
+ the DF_REF_PARTIAL flag. */
+ DF_REF_MUST_CLOBBER = 1 << 7,
+
+
+ /* If the ref has one of the following two flags set, then the
+ struct df_ref can be cast to struct df_ref_extract to access
+ the width and offset fields. */
+
+ /* This flag is set if the ref contains a SIGN_EXTRACT. */
+ DF_REF_SIGN_EXTRACT = 1 << 8,
+
+ /* This flag is set if the ref contains a ZERO_EXTRACT. */
+ DF_REF_ZERO_EXTRACT = 1 << 9,
+
+ /* This flag is set if the ref contains a STRICT_LOW_PART. */
+ DF_REF_STRICT_LOW_PART = 1 << 10,
+
+ /* This flag is set if the ref contains a SUBREG. */
+ DF_REF_SUBREG = 1 << 11,
+
+
+ /* This bit is true if this ref is part of a multiword hardreg. */
+ DF_REF_MW_HARDREG = 1 << 12,
+
+ /* This flag is set if this ref is a usage of the stack pointer by
+ a function call. */
+ DF_REF_CALL_STACK_USAGE = 1 << 13,
+
+ /* This flag is used for verification of existing refs. */
+ DF_REF_REG_MARKER = 1 << 14,
+
+ /* This flag is set if this ref is inside a pre/post modify. */
+ DF_REF_PRE_POST_MODIFY = 1 << 15
+
+ };
+
+/* The possible ordering of refs within the df_ref_info. */
+enum df_ref_order
+ {
+ /* There is not table. */
+ DF_REF_ORDER_NO_TABLE,
+
+ /* There is a table of refs but it is not (or no longer) organized
+ by one of the following methods. */
+ DF_REF_ORDER_UNORDERED,
+ DF_REF_ORDER_UNORDERED_WITH_NOTES,
+
+ /* Organize the table by reg order, all of the refs with regno 0
+ followed by all of the refs with regno 1 ... . Within all of
+ the regs for a particular regno, the refs are unordered. */
+ DF_REF_ORDER_BY_REG,
+
+ /* For uses, the refs within eq notes may be added for
+ DF_REF_ORDER_BY_REG. */
+ DF_REF_ORDER_BY_REG_WITH_NOTES,
+
+ /* Organize the refs in insn order. The insns are ordered within a
+ block, and the blocks are ordered by FOR_ALL_BB_FN. */
+ DF_REF_ORDER_BY_INSN,
+
+ /* For uses, the refs within eq notes may be added for
+ DF_REF_ORDER_BY_INSN. */
+ DF_REF_ORDER_BY_INSN_WITH_NOTES
+ };
+
+/* Function prototypes added to df_problem instance. */
+
+/* Allocate the problem specific data. */
+typedef void (*df_alloc_function) (bitmap);
+
+/* This function is called if the problem has global data that needs
+ to be cleared when ever the set of blocks changes. The bitmap
+ contains the set of blocks that may require special attention.
+ This call is only made if some of the blocks are going to change.
+ If everything is to be deleted, the wholesale deletion mechanisms
+ apply. */
+typedef void (*df_reset_function) (bitmap);
+
+/* Free the basic block info. Called from the block reordering code
+ to get rid of the blocks that have been squished down. */
+typedef void (*df_free_bb_function) (basic_block, void *);
+
+/* Local compute function. */
+typedef void (*df_local_compute_function) (bitmap);
+
+/* Init the solution specific data. */
+typedef void (*df_init_function) (bitmap);
+
+/* Iterative dataflow function. */
+typedef void (*df_dataflow_function) (struct dataflow *, bitmap, int *, int);
+
+/* Confluence operator for blocks with 0 out (or in) edges. */
+typedef void (*df_confluence_function_0) (basic_block);
+
+/* Confluence operator for blocks with 1 or more out (or in) edges.
+ Return true if BB input data has changed. */
+typedef bool (*df_confluence_function_n) (edge);
+
+/* Transfer function for blocks.
+ Return true if BB output data has changed. */
+typedef bool (*df_transfer_function) (int);
+
+/* Function to massage the information after the problem solving. */
+typedef void (*df_finalizer_function) (bitmap);
+
+/* Function to free all of the problem specific datastructures. */
+typedef void (*df_free_function) (void);
+
+/* Function to remove this problem from the stack of dataflow problems
+ without effecting the other problems in the stack except for those
+ that depend on this problem. */
+typedef void (*df_remove_problem_function) (void);
+
+/* Function to dump basic block independent results to FILE. */
+typedef void (*df_dump_problem_function) (FILE *);
+
+/* Function to dump top or bottom of basic block results to FILE. */
+typedef void (*df_dump_bb_problem_function) (basic_block, FILE *);
+
+/* Function to dump before or after an insn to FILE. */
+typedef void (*df_dump_insn_problem_function) (const rtx_insn *, FILE *);
+
+/* Function to dump top or bottom of basic block results to FILE. */
+typedef void (*df_verify_solution_start) (void);
+
+/* Function to dump top or bottom of basic block results to FILE. */
+typedef void (*df_verify_solution_end) (void);
+
+/* The static description of a dataflow problem to solve. See above
+ typedefs for doc for the function fields. */
+
+struct df_problem {
+ /* The unique id of the problem. This is used it index into
+ df->defined_problems to make accessing the problem data easy. */
+ enum df_problem_id id;
+ enum df_flow_dir dir; /* Dataflow direction. */
+ df_alloc_function alloc_fun;
+ df_reset_function reset_fun;
+ df_free_bb_function free_bb_fun;
+ df_local_compute_function local_compute_fun;
+ df_init_function init_fun;
+ df_dataflow_function dataflow_fun;
+ df_confluence_function_0 con_fun_0;
+ df_confluence_function_n con_fun_n;
+ df_transfer_function trans_fun;
+ df_finalizer_function finalize_fun;
+ df_free_function free_fun;
+ df_remove_problem_function remove_problem_fun;
+ df_dump_problem_function dump_start_fun;
+ df_dump_bb_problem_function dump_top_fun;
+ df_dump_bb_problem_function dump_bottom_fun;
+ df_dump_insn_problem_function dump_insn_top_fun;
+ df_dump_insn_problem_function dump_insn_bottom_fun;
+ df_verify_solution_start verify_start_fun;
+ df_verify_solution_end verify_end_fun;
+ const struct df_problem *dependent_problem;
+ unsigned int block_info_elt_size;
+
+ /* The timevar id associated with this pass. */
+ timevar_id_t tv_id;
+
+ /* True if the df_set_blocks should null out the basic block info if
+ this block drops out of df->blocks_to_analyze. */
+ bool free_blocks_on_set_blocks;
+};
+
+
+/* The specific instance of the problem to solve. */
+struct dataflow
+{
+ const struct df_problem *problem; /* The problem to be solved. */
+
+ /* Array indexed by bb->index, that contains basic block problem and
+ solution specific information. */
+ void *block_info;
+ unsigned int block_info_size;
+
+ /* The pool to allocate the block_info from. */
+ object_allocator<df_link> *block_pool;
+
+ /* The lr and live problems have their transfer functions recomputed
+ only if necessary. This is possible for them because, the
+ problems are kept active for the entire backend and their
+ transfer functions are indexed by the REGNO. These are not
+ defined for any other problem. */
+ bitmap out_of_date_transfer_functions;
+
+ /* Other problem specific data that is not on a per basic block
+ basis. The structure is generally defined privately for the
+ problem. The exception being the scanning problem where it is
+ fully public. */
+ void *problem_data;
+
+ /* Local flags for some of the problems. */
+ unsigned int local_flags;
+
+ /* True if this problem of this instance has been initialized. This
+ is used by the dumpers to keep garbage out of the dumps if, for
+ debugging a dump is produced before the first call to
+ df_analyze after a new problem is added. */
+ bool computed;
+
+ /* True if the something has changed which invalidates the dataflow
+ solutions. Note that this bit is always true for all problems except
+ lr and live. */
+ bool solutions_dirty;
+
+ /* If true, this pass is deleted by df_finish_pass. This is never
+ true for DF_SCAN and DF_LR. It is true for DF_LIVE if optimize >
+ 1. It is always true for the other problems. */
+ bool optional_p;
+};
+
+
+/* The set of multiword hardregs used as operands to this
+ instruction. These are factored into individual uses and defs but
+ the aggregate is still needed to service the REG_DEAD and
+ REG_UNUSED notes. */
+struct df_mw_hardreg
+{
+ df_mw_hardreg *next; /* Next entry for this instruction. */
+ rtx mw_reg; /* The multiword hardreg. */
+ /* These two bitfields are intentionally oversized, in the hope that
+ accesses to 16-bit fields will usually be quicker. */
+ ENUM_BITFIELD(df_ref_type) type : 16;
+ /* Used to see if the ref is read or write. */
+ int flags : 16; /* Various df_ref_flags. */
+ unsigned int start_regno; /* First word of the multi word subreg. */
+ unsigned int end_regno; /* Last word of the multi word subreg. */
+ unsigned int mw_order; /* Same as df_ref.ref_order. */
+};
+
+
+/* Define a register reference structure. One of these is allocated
+ for every register reference (use or def). Note some register
+ references (e.g., post_inc, subreg) generate both a def and a use. */
+struct df_base_ref
+{
+ /* These three bitfields are intentionally oversized, in the hope that
+ accesses to 8 and 16-bit fields will usually be quicker. */
+ ENUM_BITFIELD(df_ref_class) cl : 8;
+
+ ENUM_BITFIELD(df_ref_type) type : 8;
+ /* Type of ref. */
+ int flags : 16; /* Various df_ref_flags. */
+ unsigned int regno; /* The register number referenced. */
+ rtx reg; /* The register referenced. */
+ union df_ref_d *next_loc; /* Next ref for same insn or bb. */
+ struct df_link *chain; /* Head of def-use, use-def. */
+ /* Pointer to the insn info of the containing instruction. FIXME!
+ Currently this is NULL for artificial refs but this will be used
+ when FUDs are added. */
+ struct df_insn_info *insn_info;
+ /* For each regno, there are three chains of refs, one for the uses,
+ the eq_uses and the defs. These chains go through the refs
+ themselves rather than using an external structure. */
+ union df_ref_d *next_reg; /* Next ref with same regno and type. */
+ union df_ref_d *prev_reg; /* Prev ref with same regno and type. */
+ /* Location in the ref table. This is only valid after a call to
+ df_maybe_reorganize_[use,def]_refs which is an expensive operation. */
+ int id;
+ /* The index at which the operand was scanned in the insn. This is
+ used to totally order the refs in an insn. */
+ unsigned int ref_order;
+};
+
+
+/* The three types of df_refs. Note that the df_ref_extract is an
+ extension of the df_regular_ref, not the df_base_ref. */
+struct df_artificial_ref
+{
+ struct df_base_ref base;
+
+ /* Artificial refs do not have an insn, so to get the basic block,
+ it must be explicitly here. */
+ basic_block bb;
+};
+
+
+struct df_regular_ref
+{
+ struct df_base_ref base;
+ /* The loc is the address in the insn of the reg. This is not
+ defined for special registers, such as clobbers and stack
+ pointers that are also associated with call insns and so those
+ just use the base. */
+ rtx *loc;
+};
+
+/* Union of the different kinds of defs/uses placeholders. */
+union df_ref_d
+{
+ struct df_base_ref base;
+ struct df_regular_ref regular_ref;
+ struct df_artificial_ref artificial_ref;
+};
+typedef union df_ref_d *df_ref;
+
+
+/* One of these structures is allocated for every insn. */
+struct df_insn_info
+{
+ rtx_insn *insn; /* The insn this info comes from. */
+ df_ref defs; /* Head of insn-def chain. */
+ df_ref uses; /* Head of insn-use chain. */
+ /* Head of insn-use chain for uses in REG_EQUAL/EQUIV notes. */
+ df_ref eq_uses;
+ struct df_mw_hardreg *mw_hardregs;
+ /* The logical uid of the insn in the basic block. This is valid
+ after any call to df_analyze but may rot after insns are added,
+ deleted or moved. */
+ int luid;
+};
+
+/* These links are used for ref-ref chains. Currently only DEF-USE and
+ USE-DEF chains can be built by DF. */
+struct df_link
+{
+ df_ref ref;
+ struct df_link *next;
+};
+
+
+enum df_chain_flags
+{
+ /* Flags that control the building of chains. */
+ DF_DU_CHAIN = 1, /* Build DU chains. */
+ DF_UD_CHAIN = 2 /* Build UD chains. */
+};
+
+enum df_scan_flags
+{
+ /* Flags for the SCAN problem. */
+ DF_SCAN_EMPTY_ENTRY_EXIT = 1 /* Don't define any registers in the entry
+ block; don't use any in the exit block. */
+};
+
+enum df_changeable_flags
+{
+ /* Scanning flags. */
+ /* Flag to control the running of dce as a side effect of building LR. */
+ DF_LR_RUN_DCE = 1 << 0, /* Run DCE. */
+ DF_NO_HARD_REGS = 1 << 1, /* Skip hard registers in RD and CHAIN Building. */
+
+ DF_EQ_NOTES = 1 << 2, /* Build chains with uses present in EQUIV/EQUAL notes. */
+ DF_NO_REGS_EVER_LIVE = 1 << 3, /* Do not compute the regs_ever_live. */
+
+ /* Cause df_insn_rescan df_notes_rescan and df_insn_delete, to
+ return immediately. This is used by passes that know how to update
+ the scanning them selves. */
+ DF_NO_INSN_RESCAN = 1 << 4,
+
+ /* Cause df_insn_rescan df_notes_rescan and df_insn_delete, to
+ return after marking the insn for later processing. This allows all
+ rescans to be batched. */
+ DF_DEFER_INSN_RESCAN = 1 << 5,
+
+ /* Compute the reaching defs problem as "live and reaching defs" (LR&RD).
+ A DEF is reaching and live at insn I if DEF reaches I and REGNO(DEF)
+ is in LR_IN of the basic block containing I. */
+ DF_RD_PRUNE_DEAD_DEFS = 1 << 6,
+
+ DF_VERIFY_SCHEDULED = 1 << 7
+};
+
+/* Two of these structures are inline in df, one for the uses and one
+ for the defs. This structure is only contains the refs within the
+ boundary of the df_set_blocks if that has been defined. */
+struct df_ref_info
+{
+ df_ref *refs; /* Ref table, indexed by id. */
+ unsigned int *begin; /* First ref_index for this pseudo. */
+ unsigned int *count; /* Count of refs for this pseudo. */
+ unsigned int refs_size; /* Size of currently allocated refs table. */
+
+ /* Table_size is the number of elements in the refs table. This
+ will also be the width of the bitvectors in the rd and ru
+ problems. Total_size is the number of refs. These will be the
+ same if the focus has not been reduced by df_set_blocks. If the
+ focus has been reduced, table_size will be smaller since it only
+ contains the refs in the set blocks. */
+ unsigned int table_size;
+ unsigned int total_size;
+
+ enum df_ref_order ref_order;
+};
+
+/* Three of these structures are allocated for every pseudo reg. One
+ for the uses, one for the eq_uses and one for the defs. */
+struct df_reg_info
+{
+ /* Head of chain for refs of that type and regno. */
+ df_ref reg_chain;
+ /* Number of refs in the chain. */
+ unsigned int n_refs;
+};
+
+
+/*----------------------------------------------------------------------------
+ Problem data for the scanning dataflow problem. Unlike the other
+ dataflow problems, the problem data for scanning is fully exposed and
+ used by owners of the problem.
+----------------------------------------------------------------------------*/
+
+class df_d
+{
+public:
+
+ /* The set of problems to be solved is stored in two arrays. In
+ PROBLEMS_IN_ORDER, the problems are stored in the order that they
+ are solved. This is an internally dense array that may have
+ nulls at the end of it. In PROBLEMS_BY_INDEX, the problem is
+ stored by the value in df_problem.id. These are used to access
+ the problem local data without having to search the first
+ array. */
+
+ struct dataflow *problems_in_order[DF_LAST_PROBLEM_PLUS1];
+ struct dataflow *problems_by_index[DF_LAST_PROBLEM_PLUS1];
+
+ /* If not NULL, this subset of blocks of the program to be
+ considered for analysis. At certain times, this will contain all
+ the blocks in the function so it cannot be used as an indicator
+ of if we are analyzing a subset. See analyze_subset. */
+ bitmap blocks_to_analyze;
+
+ /* The following information is really the problem data for the
+ scanning instance but it is used too often by the other problems
+ to keep getting it from there. */
+ struct df_ref_info def_info; /* Def info. */
+ struct df_ref_info use_info; /* Use info. */
+
+ /* The following three arrays are allocated in parallel. They contain
+ the sets of refs of each type for each reg. */
+ struct df_reg_info **def_regs; /* Def reg info. */
+ struct df_reg_info **use_regs; /* Eq_use reg info. */
+ struct df_reg_info **eq_use_regs; /* Eq_use info. */
+ unsigned int regs_size; /* Size of currently allocated regs table. */
+ unsigned int regs_inited; /* Number of regs with reg_infos allocated. */
+
+
+ struct df_insn_info **insns; /* Insn table, indexed by insn UID. */
+ unsigned int insns_size; /* Size of insn table. */
+
+ int num_problems_defined;
+
+ bitmap_head hardware_regs_used; /* The set of hardware registers used. */
+ /* The set of hard regs that are in the artificial uses at the end
+ of a regular basic block. */
+ bitmap_head regular_block_artificial_uses;
+ /* The set of hard regs that are in the artificial uses at the end
+ of a basic block that has an EH pred. */
+ bitmap_head eh_block_artificial_uses;
+ /* The set of hardware registers live on entry to the function. */
+ bitmap entry_block_defs;
+ bitmap exit_block_uses; /* The set of hardware registers used in exit block. */
+
+ /* Insns to delete, rescan or reprocess the notes at next
+ df_rescan_all or df_process_deferred_rescans. */
+ bitmap_head insns_to_delete;
+ bitmap_head insns_to_rescan;
+ bitmap_head insns_to_notes_rescan;
+ int *postorder; /* The current set of basic blocks
+ in reverse postorder. */
+ vec<int> postorder_inverted; /* The current set of basic blocks
+ in reverse postorder of inverted CFG. */
+ int n_blocks; /* The number of blocks in reverse postorder. */
+
+ /* An array [FIRST_PSEUDO_REGISTER], indexed by regno, of the number
+ of refs that qualify as being real hard regs uses. Artificial
+ uses and defs as well as refs in eq notes are ignored. If the
+ ref is a def, it cannot be a MAY_CLOBBER def. If the ref is a
+ use, it cannot be the emim_reg_set or be the frame or arg pointer
+ register. Uses in debug insns are ignored.
+
+ IT IS NOT ACCEPTABLE TO MANUALLY CHANGE THIS ARRAY. This array
+ always reflects the actual number of refs in the insn stream that
+ satisfy the above criteria. */
+ unsigned int *hard_regs_live_count;
+
+ /* This counter provides a way to totally order refs without using
+ addresses. It is incremented whenever a ref is created. */
+ unsigned int ref_order;
+
+ /* Problem specific control information. This is a combination of
+ enum df_changeable_flags values. */
+ int changeable_flags : 8;
+
+ /* If this is true, then only a subset of the blocks of the program
+ is considered to compute the solutions of dataflow problems. */
+ bool analyze_subset;
+
+ /* True if someone added or deleted something from regs_ever_live so
+ that the entry and exit blocks need be reprocessed. */
+ bool redo_entry_and_exit;
+};
+
+#define DF_SCAN_BB_INFO(BB) (df_scan_get_bb_info ((BB)->index))
+#define DF_RD_BB_INFO(BB) (df_rd_get_bb_info ((BB)->index))
+#define DF_LR_BB_INFO(BB) (df_lr_get_bb_info ((BB)->index))
+#define DF_LIVE_BB_INFO(BB) (df_live_get_bb_info ((BB)->index))
+#define DF_WORD_LR_BB_INFO(BB) (df_word_lr_get_bb_info ((BB)->index))
+#define DF_MD_BB_INFO(BB) (df_md_get_bb_info ((BB)->index))
+#define DF_MIR_BB_INFO(BB) (df_mir_get_bb_info ((BB)->index))
+
+/* Most transformations that wish to use live register analysis will
+ use these macros. This info is the and of the lr and live sets. */
+#define DF_LIVE_IN(BB) (&DF_LIVE_BB_INFO (BB)->in)
+#define DF_LIVE_OUT(BB) (&DF_LIVE_BB_INFO (BB)->out)
+
+#define DF_MIR_IN(BB) (&DF_MIR_BB_INFO (BB)->in)
+#define DF_MIR_OUT(BB) (&DF_MIR_BB_INFO (BB)->out)
+
+/* These macros are used by passes that are not tolerant of
+ uninitialized variables. This intolerance should eventually
+ be fixed. */
+#define DF_LR_IN(BB) (&DF_LR_BB_INFO (BB)->in)
+#define DF_LR_OUT(BB) (&DF_LR_BB_INFO (BB)->out)
+
+/* These macros are used by passes that are not tolerant of
+ uninitialized variables. This intolerance should eventually
+ be fixed. */
+#define DF_WORD_LR_IN(BB) (&DF_WORD_LR_BB_INFO (BB)->in)
+#define DF_WORD_LR_OUT(BB) (&DF_WORD_LR_BB_INFO (BB)->out)
+
+/* Macros to access the elements within the ref structure. */
+
+
+#define DF_REF_REAL_REG(REF) (GET_CODE ((REF)->base.reg) == SUBREG \
+ ? SUBREG_REG ((REF)->base.reg) : ((REF)->base.reg))
+#define DF_REF_REGNO(REF) ((REF)->base.regno)
+#define DF_REF_REAL_LOC(REF) (GET_CODE (*((REF)->regular_ref.loc)) == SUBREG \
+ ? &SUBREG_REG (*((REF)->regular_ref.loc)) : ((REF)->regular_ref.loc))
+#define DF_REF_REG(REF) ((REF)->base.reg)
+#define DF_REF_LOC(REF) (DF_REF_CLASS (REF) == DF_REF_REGULAR ? \
+ (REF)->regular_ref.loc : NULL)
+#define DF_REF_BB(REF) (DF_REF_IS_ARTIFICIAL (REF) \
+ ? (REF)->artificial_ref.bb \
+ : BLOCK_FOR_INSN (DF_REF_INSN (REF)))
+#define DF_REF_BBNO(REF) (DF_REF_BB (REF)->index)
+#define DF_REF_INSN_INFO(REF) ((REF)->base.insn_info)
+#define DF_REF_INSN(REF) ((REF)->base.insn_info->insn)
+#define DF_REF_INSN_UID(REF) (INSN_UID (DF_REF_INSN(REF)))
+#define DF_REF_CLASS(REF) ((REF)->base.cl)
+#define DF_REF_TYPE(REF) ((REF)->base.type)
+#define DF_REF_CHAIN(REF) ((REF)->base.chain)
+#define DF_REF_ID(REF) ((REF)->base.id)
+#define DF_REF_FLAGS(REF) ((REF)->base.flags)
+#define DF_REF_FLAGS_IS_SET(REF, v) ((DF_REF_FLAGS (REF) & (v)) != 0)
+#define DF_REF_FLAGS_SET(REF, v) (DF_REF_FLAGS (REF) |= (v))
+#define DF_REF_FLAGS_CLEAR(REF, v) (DF_REF_FLAGS (REF) &= ~(v))
+#define DF_REF_ORDER(REF) ((REF)->base.ref_order)
+/* If DF_REF_IS_ARTIFICIAL () is true, this is not a real
+ definition/use, but an artificial one created to model always live
+ registers, eh uses, etc. */
+#define DF_REF_IS_ARTIFICIAL(REF) (DF_REF_CLASS (REF) == DF_REF_ARTIFICIAL)
+#define DF_REF_REG_MARK(REF) (DF_REF_FLAGS_SET ((REF),DF_REF_REG_MARKER))
+#define DF_REF_REG_UNMARK(REF) (DF_REF_FLAGS_CLEAR ((REF),DF_REF_REG_MARKER))
+#define DF_REF_IS_REG_MARKED(REF) (DF_REF_FLAGS_IS_SET ((REF),DF_REF_REG_MARKER))
+#define DF_REF_NEXT_LOC(REF) ((REF)->base.next_loc)
+#define DF_REF_NEXT_REG(REF) ((REF)->base.next_reg)
+#define DF_REF_PREV_REG(REF) ((REF)->base.prev_reg)
+/* The following two macros may only be applied if one of
+ DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT is true. */
+#define DF_REF_EXTRACT_WIDTH(REF) ((REF)->extract_ref.width)
+#define DF_REF_EXTRACT_OFFSET(REF) ((REF)->extract_ref.offset)
+#define DF_REF_EXTRACT_MODE(REF) ((REF)->extract_ref.mode)
+
+/* Macros to determine the reference type. */
+#define DF_REF_REG_DEF_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_DEF)
+#define DF_REF_REG_USE_P(REF) (!DF_REF_REG_DEF_P (REF))
+#define DF_REF_REG_MEM_STORE_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_STORE)
+#define DF_REF_REG_MEM_LOAD_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_LOAD)
+#define DF_REF_REG_MEM_P(REF) (DF_REF_REG_MEM_STORE_P (REF) \
+ || DF_REF_REG_MEM_LOAD_P (REF))
+
+#define DF_MWS_REG_DEF_P(MREF) (DF_MWS_TYPE (MREF) == DF_REF_REG_DEF)
+#define DF_MWS_REG_USE_P(MREF) (!DF_MWS_REG_DEF_P (MREF))
+#define DF_MWS_NEXT(MREF) ((MREF)->next)
+#define DF_MWS_TYPE(MREF) ((MREF)->type)
+
+/* Macros to get the refs out of def_info or use_info refs table. If
+ the focus of the dataflow has been set to some subset of blocks
+ with df_set_blocks, these macros will only find the uses and defs
+ in that subset of blocks.
+
+ These macros should be used with care. The def macros are only
+ usable after a call to df_maybe_reorganize_def_refs and the use
+ macros are only usable after a call to
+ df_maybe_reorganize_use_refs. HOWEVER, BUILDING AND USING THESE
+ ARRAYS ARE A CACHE LOCALITY KILLER. */
+
+#define DF_DEFS_TABLE_SIZE() (df->def_info.table_size)
+#define DF_DEFS_GET(ID) (df->def_info.refs[(ID)])
+#define DF_DEFS_SET(ID,VAL) (df->def_info.refs[(ID)]=(VAL))
+#define DF_DEFS_COUNT(ID) (df->def_info.count[(ID)])
+#define DF_DEFS_BEGIN(ID) (df->def_info.begin[(ID)])
+#define DF_USES_TABLE_SIZE() (df->use_info.table_size)
+#define DF_USES_GET(ID) (df->use_info.refs[(ID)])
+#define DF_USES_SET(ID,VAL) (df->use_info.refs[(ID)]=(VAL))
+#define DF_USES_COUNT(ID) (df->use_info.count[(ID)])
+#define DF_USES_BEGIN(ID) (df->use_info.begin[(ID)])
+
+/* Macros to access the register information from scan dataflow record. */
+
+#define DF_REG_SIZE(DF) (df->regs_inited)
+#define DF_REG_DEF_GET(REG) (df->def_regs[(REG)])
+#define DF_REG_DEF_CHAIN(REG) (df->def_regs[(REG)]->reg_chain)
+#define DF_REG_DEF_COUNT(REG) (df->def_regs[(REG)]->n_refs)
+#define DF_REG_USE_GET(REG) (df->use_regs[(REG)])
+#define DF_REG_USE_CHAIN(REG) (df->use_regs[(REG)]->reg_chain)
+#define DF_REG_USE_COUNT(REG) (df->use_regs[(REG)]->n_refs)
+#define DF_REG_EQ_USE_GET(REG) (df->eq_use_regs[(REG)])
+#define DF_REG_EQ_USE_CHAIN(REG) (df->eq_use_regs[(REG)]->reg_chain)
+#define DF_REG_EQ_USE_COUNT(REG) (df->eq_use_regs[(REG)]->n_refs)
+
+/* Macros to access the elements within the reg_info structure table. */
+
+#define DF_REGNO_FIRST_DEF(REGNUM) \
+(DF_REG_DEF_GET(REGNUM) ? DF_REG_DEF_GET (REGNUM) : 0)
+#define DF_REGNO_LAST_USE(REGNUM) \
+(DF_REG_USE_GET(REGNUM) ? DF_REG_USE_GET (REGNUM) : 0)
+
+/* Macros to access the elements within the insn_info structure table. */
+
+#define DF_INSN_SIZE() ((df)->insns_size)
+#define DF_INSN_INFO_GET(INSN) (df->insns[(INSN_UID (INSN))])
+#define DF_INSN_INFO_SET(INSN,VAL) (df->insns[(INSN_UID (INSN))]=(VAL))
+#define DF_INSN_INFO_LUID(II) ((II)->luid)
+#define DF_INSN_INFO_DEFS(II) ((II)->defs)
+#define DF_INSN_INFO_USES(II) ((II)->uses)
+#define DF_INSN_INFO_EQ_USES(II) ((II)->eq_uses)
+#define DF_INSN_INFO_MWS(II) ((II)->mw_hardregs)
+
+#define DF_INSN_LUID(INSN) (DF_INSN_INFO_LUID (DF_INSN_INFO_GET (INSN)))
+#define DF_INSN_DEFS(INSN) (DF_INSN_INFO_DEFS (DF_INSN_INFO_GET (INSN)))
+#define DF_INSN_USES(INSN) (DF_INSN_INFO_USES (DF_INSN_INFO_GET (INSN)))
+#define DF_INSN_EQ_USES(INSN) (DF_INSN_INFO_EQ_USES (DF_INSN_INFO_GET (INSN)))
+
+#define DF_INSN_UID_GET(UID) (df->insns[(UID)])
+#define DF_INSN_UID_SET(UID,VAL) (df->insns[(UID)]=(VAL))
+#define DF_INSN_UID_SAFE_GET(UID) (((unsigned)(UID) < DF_INSN_SIZE ()) \
+ ? DF_INSN_UID_GET (UID) \
+ : NULL)
+#define DF_INSN_UID_LUID(INSN) (DF_INSN_UID_GET (INSN)->luid)
+#define DF_INSN_UID_DEFS(INSN) (DF_INSN_UID_GET (INSN)->defs)
+#define DF_INSN_UID_USES(INSN) (DF_INSN_UID_GET (INSN)->uses)
+#define DF_INSN_UID_EQ_USES(INSN) (DF_INSN_UID_GET (INSN)->eq_uses)
+#define DF_INSN_UID_MWS(INSN) (DF_INSN_UID_GET (INSN)->mw_hardregs)
+
+#define FOR_EACH_INSN_INFO_DEF(ITER, INSN) \
+ for (ITER = DF_INSN_INFO_DEFS (INSN); ITER; ITER = DF_REF_NEXT_LOC (ITER))
+
+#define FOR_EACH_INSN_INFO_USE(ITER, INSN) \
+ for (ITER = DF_INSN_INFO_USES (INSN); ITER; ITER = DF_REF_NEXT_LOC (ITER))
+
+#define FOR_EACH_INSN_INFO_EQ_USE(ITER, INSN) \
+ for (ITER = DF_INSN_INFO_EQ_USES (INSN); ITER; ITER = DF_REF_NEXT_LOC (ITER))
+
+#define FOR_EACH_INSN_INFO_MW(ITER, INSN) \
+ for (ITER = DF_INSN_INFO_MWS (INSN); ITER; ITER = DF_MWS_NEXT (ITER))
+
+#define FOR_EACH_INSN_DEF(ITER, INSN) \
+ FOR_EACH_INSN_INFO_DEF(ITER, DF_INSN_INFO_GET (INSN))
+
+#define FOR_EACH_INSN_USE(ITER, INSN) \
+ FOR_EACH_INSN_INFO_USE(ITER, DF_INSN_INFO_GET (INSN))
+
+#define FOR_EACH_INSN_EQ_USE(ITER, INSN) \
+ FOR_EACH_INSN_INFO_EQ_USE(ITER, DF_INSN_INFO_GET (INSN))
+
+#define FOR_EACH_ARTIFICIAL_USE(ITER, BB_INDEX) \
+ for (ITER = df_get_artificial_uses (BB_INDEX); ITER; \
+ ITER = DF_REF_NEXT_LOC (ITER))
+
+#define FOR_EACH_ARTIFICIAL_DEF(ITER, BB_INDEX) \
+ for (ITER = df_get_artificial_defs (BB_INDEX); ITER; \
+ ITER = DF_REF_NEXT_LOC (ITER))
+
+/* An obstack for bitmap not related to specific dataflow problems.
+ This obstack should e.g. be used for bitmaps with a short life time
+ such as temporary bitmaps. This obstack is declared in df-core.cc. */
+
+extern bitmap_obstack df_bitmap_obstack;
+
+
+/* One of these structures is allocated for every basic block. */
+struct df_scan_bb_info
+{
+ /* The entry block has many artificial defs and these are at the
+ bottom of the block.
+
+ Blocks that are targets of exception edges may have some
+ artificial defs. These are logically located at the top of the
+ block.
+
+ Blocks that are the targets of non-local goto's have the hard
+ frame pointer defined at the top of the block. */
+ df_ref artificial_defs;
+
+ /* Blocks that are targets of exception edges may have some
+ artificial uses. These are logically at the top of the block.
+
+ Most blocks have artificial uses at the bottom of the block. */
+ df_ref artificial_uses;
+};
+
+
+/* Reaching definitions. All bitmaps are indexed by the id field of
+ the ref except sparse_kill which is indexed by regno. For the
+ LR&RD problem, the kill set is not complete: It does not contain
+ DEFs killed because the set register has died in the LR set. */
+class df_rd_bb_info
+{
+public:
+ /* Local sets to describe the basic blocks. */
+ bitmap_head kill;
+ bitmap_head sparse_kill;
+ bitmap_head gen; /* The set of defs generated in this block. */
+
+ /* The results of the dataflow problem. */
+ bitmap_head in; /* At the top of the block. */
+ bitmap_head out; /* At the bottom of the block. */
+};
+
+
+/* Multiple reaching definitions. All bitmaps are referenced by the
+ register number. */
+
+class df_md_bb_info
+{
+public:
+ /* Local sets to describe the basic blocks. */
+ bitmap_head gen; /* Partial/conditional definitions live at BB out. */
+ bitmap_head kill; /* Other definitions that are live at BB out. */
+ bitmap_head init; /* Definitions coming from dominance frontier edges. */
+
+ /* The results of the dataflow problem. */
+ bitmap_head in; /* Just before the block itself. */
+ bitmap_head out; /* At the bottom of the block. */
+};
+
+
+/* Live registers, a backwards dataflow problem. All bitmaps are
+ referenced by the register number. */
+
+class df_lr_bb_info
+{
+public:
+ /* Local sets to describe the basic blocks. */
+ bitmap_head def; /* The set of registers set in this block
+ - except artificial defs at the top. */
+ bitmap_head use; /* The set of registers used in this block. */
+
+ /* The results of the dataflow problem. */
+ bitmap_head in; /* Just before the block itself. */
+ bitmap_head out; /* At the bottom of the block. */
+};
+
+
+/* Uninitialized registers. All bitmaps are referenced by the
+ register number. Anded results of the forwards and backward live
+ info. Note that the forwards live information is not available
+ separately. */
+class df_live_bb_info
+{
+public:
+ /* Local sets to describe the basic blocks. */
+ bitmap_head kill; /* The set of registers unset in this block. Calls,
+ for instance, unset registers. */
+ bitmap_head gen; /* The set of registers set in this block. */
+
+ /* The results of the dataflow problem. */
+ bitmap_head in; /* At the top of the block. */
+ bitmap_head out; /* At the bottom of the block. */
+};
+
+
+/* Live registers, a backwards dataflow problem. These bitmaps are
+ indexed by 2 * regno for each pseudo and have two entries for each
+ pseudo. Only pseudos that have a size of 2 * UNITS_PER_WORD are
+ meaningfully tracked. */
+
+class df_word_lr_bb_info
+{
+public:
+ /* Local sets to describe the basic blocks. */
+ bitmap_head def; /* The set of registers set in this block
+ - except artificial defs at the top. */
+ bitmap_head use; /* The set of registers used in this block. */
+
+ /* The results of the dataflow problem. */
+ bitmap_head in; /* Just before the block itself. */
+ bitmap_head out; /* At the bottom of the block. */
+};
+
+/* Must-initialized registers. All bitmaps are referenced by the
+ register number. */
+class df_mir_bb_info
+{
+public:
+ /* Local sets to describe the basic blocks. */
+ bitmap_head kill; /* The set of registers unset in this block. Calls,
+ for instance, unset registers. */
+ bitmap_head gen; /* The set of registers set in this block, excluding the
+ ones killed later on in this block. */
+
+ /* The results of the dataflow problem. */
+ bitmap_head in; /* At the top of the block. */
+ bitmap_head out; /* At the bottom of the block. */
+ bool con_visited; /* Visited by con_fun_{0,n}. */
+};
+
+
+/* This is used for debugging and for the dumpers to find the latest
+ instance so that the df info can be added to the dumps. This
+ should not be used by regular code. */
+extern class df_d *df;
+#define df_scan (df->problems_by_index[DF_SCAN])
+#define df_rd (df->problems_by_index[DF_RD])
+#define df_lr (df->problems_by_index[DF_LR])
+#define df_live (df->problems_by_index[DF_LIVE])
+#define df_chain (df->problems_by_index[DF_CHAIN])
+#define df_word_lr (df->problems_by_index[DF_WORD_LR])
+#define df_note (df->problems_by_index[DF_NOTE])
+#define df_md (df->problems_by_index[DF_MD])
+#define df_mir (df->problems_by_index[DF_MIR])
+
+/* This symbol turns on checking that each modification of the cfg has
+ been identified to the appropriate df routines. It is not part of
+ verification per se because the check that the final solution has
+ not changed covers this. However, if the solution is not being
+ properly recomputed because the cfg is being modified, adding in
+ calls to df_check_cfg_clean can be used to find the source of that
+ kind of problem. */
+#if 0
+#define DF_DEBUG_CFG
+#endif
+
+
+/* Functions defined in df-core.cc. */
+
+extern void df_add_problem (const struct df_problem *);
+extern int df_set_flags (int);
+extern int df_clear_flags (int);
+extern void df_set_blocks (bitmap);
+extern void df_remove_problem (struct dataflow *);
+extern void df_finish_pass (bool);
+extern void df_analyze_problem (struct dataflow *, bitmap, int *, int);
+extern void df_analyze ();
+extern void df_analyze_loop (class loop *);
+extern int df_get_n_blocks (enum df_flow_dir);
+extern int *df_get_postorder (enum df_flow_dir);
+extern void df_simple_dataflow (enum df_flow_dir, df_init_function,
+ df_confluence_function_0, df_confluence_function_n,
+ df_transfer_function, bitmap, int *, int);
+extern void df_mark_solutions_dirty (void);
+extern bool df_get_bb_dirty (basic_block);
+extern void df_set_bb_dirty (basic_block);
+extern void df_compact_blocks (void);
+extern void df_bb_replace (int, basic_block);
+extern void df_bb_delete (int);
+extern void df_verify (void);
+#ifdef DF_DEBUG_CFG
+extern void df_check_cfg_clean (void);
+#endif
+extern df_ref df_bb_regno_first_def_find (basic_block, unsigned int);
+extern df_ref df_bb_regno_last_def_find (basic_block, unsigned int);
+extern df_ref df_find_def (rtx_insn *, rtx);
+extern bool df_reg_defined (rtx_insn *, rtx);
+extern df_ref df_find_use (rtx_insn *, rtx);
+extern bool df_reg_used (rtx_insn *, rtx);
+extern rtx df_find_single_def_src (rtx);
+extern void df_worklist_dataflow (struct dataflow *,bitmap, int *, int);
+extern void df_print_regset (FILE *file, const_bitmap r);
+extern void df_print_word_regset (FILE *file, const_bitmap r);
+extern void df_dump (FILE *);
+extern void df_dump_region (FILE *);
+extern void df_dump_start (FILE *);
+extern void df_dump_top (basic_block, FILE *);
+extern void df_dump_bottom (basic_block, FILE *);
+extern void df_dump_insn_top (const rtx_insn *, FILE *);
+extern void df_dump_insn_bottom (const rtx_insn *, FILE *);
+extern void df_refs_chain_dump (df_ref, bool, FILE *);
+extern void df_regs_chain_dump (df_ref, FILE *);
+extern void df_insn_debug (rtx_insn *, bool, FILE *);
+extern void df_insn_debug_regno (rtx_insn *, FILE *);
+extern void df_regno_debug (unsigned int, FILE *);
+extern void df_ref_debug (df_ref, FILE *);
+extern void debug_df_insn (rtx_insn *);
+extern void debug_df_regno (unsigned int);
+extern void debug_df_reg (rtx);
+extern void debug_df_defno (unsigned int);
+extern void debug_df_useno (unsigned int);
+extern void debug_df_ref (df_ref);
+extern void debug_df_chain (struct df_link *);
+
+/* Functions defined in df-problems.cc. */
+
+extern struct df_link *df_chain_create (df_ref, df_ref);
+extern void df_chain_unlink (df_ref);
+extern void df_chain_copy (df_ref, struct df_link *);
+extern void df_grow_bb_info (struct dataflow *);
+extern void df_chain_dump (struct df_link *, FILE *);
+extern void df_print_bb_index (basic_block bb, FILE *file);
+extern void df_rd_add_problem (void);
+extern void df_rd_simulate_artificial_defs_at_top (basic_block, bitmap);
+extern void df_rd_simulate_one_insn (basic_block, rtx_insn *, bitmap);
+extern void df_lr_add_problem (void);
+extern void df_lr_verify_transfer_functions (void);
+extern void df_live_verify_transfer_functions (void);
+extern void df_live_add_problem (void);
+extern void df_live_set_all_dirty (void);
+extern void df_chain_add_problem (unsigned int);
+extern void df_word_lr_add_problem (void);
+extern bool df_word_lr_mark_ref (df_ref, bool, bitmap);
+extern bool df_word_lr_simulate_defs (rtx_insn *, bitmap);
+extern void df_word_lr_simulate_uses (rtx_insn *, bitmap);
+extern void df_word_lr_simulate_artificial_refs_at_top (basic_block, bitmap);
+extern void df_word_lr_simulate_artificial_refs_at_end (basic_block, bitmap);
+extern void df_note_add_problem (void);
+extern void df_md_add_problem (void);
+extern void df_md_simulate_artificial_defs_at_top (basic_block, bitmap);
+extern void df_md_simulate_one_insn (basic_block, rtx_insn *, bitmap);
+extern void df_mir_add_problem (void);
+extern void df_mir_simulate_one_insn (basic_block, rtx_insn *, bitmap, bitmap);
+extern void df_simulate_find_noclobber_defs (rtx_insn *, bitmap);
+extern void df_simulate_find_defs (rtx_insn *, bitmap);
+extern void df_simulate_defs (rtx_insn *, bitmap);
+extern void df_simulate_uses (rtx_insn *, bitmap);
+extern void df_simulate_initialize_backwards (basic_block, bitmap);
+extern void df_simulate_one_insn_backwards (basic_block, rtx_insn *, bitmap);
+extern void df_simulate_finalize_backwards (basic_block, bitmap);
+extern void df_simulate_initialize_forwards (basic_block, bitmap);
+extern void df_simulate_one_insn_forwards (basic_block, rtx_insn *, bitmap);
+extern void simulate_backwards_to_point (basic_block, regset, rtx);
+extern bool can_move_insns_across (rtx_insn *, rtx_insn *,
+ rtx_insn *, rtx_insn *,
+ basic_block, regset,
+ regset, rtx_insn **);
+/* Functions defined in df-scan.cc. */
+
+extern void df_scan_alloc (bitmap);
+extern void df_scan_add_problem (void);
+extern void df_grow_reg_info (void);
+extern void df_grow_insn_info (void);
+extern void df_scan_blocks (void);
+extern void df_uses_create (rtx *, rtx_insn *, int);
+extern struct df_insn_info * df_insn_create_insn_record (rtx_insn *);
+extern void df_insn_delete (rtx_insn *);
+extern void df_bb_refs_record (int, bool);
+extern bool df_insn_rescan (rtx_insn *);
+extern bool df_insn_rescan_debug_internal (rtx_insn *);
+extern void df_insn_rescan_all (void);
+extern void df_process_deferred_rescans (void);
+extern void df_recompute_luids (basic_block);
+extern void df_insn_change_bb (rtx_insn *, basic_block);
+extern void df_maybe_reorganize_use_refs (enum df_ref_order);
+extern void df_maybe_reorganize_def_refs (enum df_ref_order);
+extern void df_ref_change_reg_with_loc (rtx, unsigned int);
+extern void df_notes_rescan (rtx_insn *);
+extern void df_hard_reg_init (void);
+extern void df_update_entry_block_defs (void);
+extern void df_update_exit_block_uses (void);
+extern void df_update_entry_exit_and_calls (void);
+extern bool df_hard_reg_used_p (unsigned int);
+extern unsigned int df_hard_reg_used_count (unsigned int);
+extern bool df_regs_ever_live_p (unsigned int);
+extern bool df_epilogue_uses_p (unsigned int);
+extern void df_set_regs_ever_live (unsigned int, bool);
+extern void df_compute_regs_ever_live (bool);
+extern void df_scan_verify (void);
+
+
+/*----------------------------------------------------------------------------
+ Public functions access functions for the dataflow problems.
+----------------------------------------------------------------------------*/
+
+inline struct df_scan_bb_info *
+df_scan_get_bb_info (unsigned int index)
+{
+ if (index < df_scan->block_info_size)
+ return &((struct df_scan_bb_info *) df_scan->block_info)[index];
+ else
+ return NULL;
+}
+
+inline class df_rd_bb_info *
+df_rd_get_bb_info (unsigned int index)
+{
+ if (index < df_rd->block_info_size)
+ return &((class df_rd_bb_info *) df_rd->block_info)[index];
+ else
+ return NULL;
+}
+
+inline class df_lr_bb_info *
+df_lr_get_bb_info (unsigned int index)
+{
+ if (index < df_lr->block_info_size)
+ return &((class df_lr_bb_info *) df_lr->block_info)[index];
+ else
+ return NULL;
+}
+
+inline class df_md_bb_info *
+df_md_get_bb_info (unsigned int index)
+{
+ if (index < df_md->block_info_size)
+ return &((class df_md_bb_info *) df_md->block_info)[index];
+ else
+ return NULL;
+}
+
+inline class df_live_bb_info *
+df_live_get_bb_info (unsigned int index)
+{
+ if (index < df_live->block_info_size)
+ return &((class df_live_bb_info *) df_live->block_info)[index];
+ else
+ return NULL;
+}
+
+inline class df_word_lr_bb_info *
+df_word_lr_get_bb_info (unsigned int index)
+{
+ if (index < df_word_lr->block_info_size)
+ return &((class df_word_lr_bb_info *) df_word_lr->block_info)[index];
+ else
+ return NULL;
+}
+
+inline class df_mir_bb_info *
+df_mir_get_bb_info (unsigned int index)
+{
+ if (index < df_mir->block_info_size)
+ return &((class df_mir_bb_info *) df_mir->block_info)[index];
+ else
+ return NULL;
+}
+
+/* Get the live at out set for BB no matter what problem happens to be
+ defined. This function is used by the register allocators who
+ choose different dataflow problems depending on the optimization
+ level. */
+
+inline bitmap
+df_get_live_out (basic_block bb)
+{
+ gcc_checking_assert (df_lr);
+
+ if (df_live)
+ return DF_LIVE_OUT (bb);
+ else
+ return DF_LR_OUT (bb);
+}
+
+/* Get the live at in set for BB no matter what problem happens to be
+ defined. This function is used by the register allocators who
+ choose different dataflow problems depending on the optimization
+ level. */
+
+inline bitmap
+df_get_live_in (basic_block bb)
+{
+ gcc_checking_assert (df_lr);
+
+ if (df_live)
+ return DF_LIVE_IN (bb);
+ else
+ return DF_LR_IN (bb);
+}
+
+/* Get basic block info. */
+/* Get the artificial defs for a basic block. */
+
+inline df_ref
+df_get_artificial_defs (unsigned int bb_index)
+{
+ return df_scan_get_bb_info (bb_index)->artificial_defs;
+}
+
+
+/* Get the artificial uses for a basic block. */
+
+inline df_ref
+df_get_artificial_uses (unsigned int bb_index)
+{
+ return df_scan_get_bb_info (bb_index)->artificial_uses;
+}
+
+/* If INSN defines exactly one register, return the associated reference,
+ otherwise return null. */
+
+inline df_ref
+df_single_def (const df_insn_info *info)
+{
+ df_ref defs = DF_INSN_INFO_DEFS (info);
+ return defs && !DF_REF_NEXT_LOC (defs) ? defs : NULL;
+}
+
+/* If INSN uses exactly one register, return the associated reference,
+ otherwise return null. */
+
+inline df_ref
+df_single_use (const df_insn_info *info)
+{
+ df_ref uses = DF_INSN_INFO_USES (info);
+ return uses && !DF_REF_NEXT_LOC (uses) ? uses : NULL;
+}
+
+/* web */
+
+struct web_entry_base
+{
+ private:
+ /* Reference to the parent in the union/find tree. */
+ web_entry_base *pred_pvt;
+
+ public:
+ /* Accessors. */
+ web_entry_base *pred () { return pred_pvt; }
+ void set_pred (web_entry_base *p) { pred_pvt = p; }
+
+ /* Find representative in union-find tree. */
+ web_entry_base *unionfind_root ();
+
+ /* Union with another set, returning TRUE if they are already unioned. */
+ friend bool unionfind_union (web_entry_base *first, web_entry_base *second);
+};
+
+#endif /* GCC_DF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dfp.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dfp.h
new file mode 100644
index 0000000..fc20340
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dfp.h
@@ -0,0 +1,50 @@
+/* Decimal floating point support functions for GNU compiler.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DFP_H
+#define GCC_DFP_H
+
+/* Encode REAL_VALUE_TYPEs into 32/64/128-bit IEEE 754 encoded values. */
+void encode_decimal32 (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *);
+void encode_decimal64 (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *);
+void decode_decimal128 (const struct real_format *, REAL_VALUE_TYPE *, const long *);
+
+/* Decode 32/64/128-bit IEEE 754 encoded values into REAL_VALUE_TYPEs. */
+void decode_decimal32 (const struct real_format *, REAL_VALUE_TYPE *, const long *);
+void decode_decimal64 (const struct real_format *, REAL_VALUE_TYPE *, const long *);
+void encode_decimal128 (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *);
+
+/* Arithmetic and conversion functions. */
+int decimal_do_compare (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int);
+void decimal_real_from_string (REAL_VALUE_TYPE *, const char *);
+void decimal_round_for_format (const struct real_format *, REAL_VALUE_TYPE *);
+void decimal_real_convert (REAL_VALUE_TYPE *, const real_format *,
+ const REAL_VALUE_TYPE *);
+void decimal_real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int);
+void decimal_do_fix_trunc (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+void decimal_real_maxval (REAL_VALUE_TYPE *, int, machine_mode);
+wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
+HOST_WIDE_INT decimal_real_to_integer (const REAL_VALUE_TYPE *);
+
+#ifdef TREE_CODE
+bool decimal_real_arithmetic (REAL_VALUE_TYPE *, enum tree_code, const REAL_VALUE_TYPE *,
+ const REAL_VALUE_TYPE *);
+#endif
+
+#endif /* GCC_DFP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-client-data-hooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-client-data-hooks.h
new file mode 100644
index 0000000..5f8b9a2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-client-data-hooks.h
@@ -0,0 +1,105 @@
+/* Additional metadata about a client for a diagnostic context.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_CLIENT_DATA_HOOKS_H
+#define GCC_DIAGNOSTIC_CLIENT_DATA_HOOKS_H
+
+class client_version_info;
+
+/* A bundle of additional metadata, owned by the diagnostic_context,
+ for querying things about the client, like version data. */
+
+class diagnostic_client_data_hooks
+{
+ public:
+ virtual ~diagnostic_client_data_hooks () {}
+
+ /* Get version info for this client, or NULL. */
+ virtual const client_version_info *get_any_version_info () const = 0;
+
+ /* Get the current logical_location for this client, or NULL. */
+ virtual const logical_location *get_current_logical_location () const = 0;
+
+ /* Get a sourceLanguage value for FILENAME, or return NULL.
+ See SARIF v2.1.0 Appendix J for suggested values. */
+ virtual const char *
+ maybe_get_sarif_source_language (const char *filename) const = 0;
+};
+
+/* Factory function for making an instance of diagnostic_client_data_hooks
+ for use in the compiler (i.e. with knowledge of "tree", access to
+ langhooks, etc). */
+
+extern diagnostic_client_data_hooks *make_compiler_data_hooks ();
+
+class diagnostic_client_plugin_info;
+
+/* Abstract base class for a diagnostic_context to get at
+ version information about the client. */
+
+class client_version_info
+{
+public:
+ class plugin_visitor
+ {
+ public:
+ virtual void on_plugin (const diagnostic_client_plugin_info &) = 0;
+ };
+
+ virtual ~client_version_info () {}
+
+ /* Get a string suitable for use as the value of the "name" property
+ (SARIF v2.1.0 section 3.19.8). */
+ virtual const char *get_tool_name () const = 0;
+
+ /* Create a string suitable for use as the value of the "fullName" property
+ (SARIF v2.1.0 section 3.19.9). */
+ virtual char *maybe_make_full_name () const = 0;
+
+ /* Get a string suitable for use as the value of the "version" property
+ (SARIF v2.1.0 section 3.19.13). */
+ virtual const char *get_version_string () const = 0;
+
+ /* Create a string suitable for use as the value of the "informationUri"
+ property (SARIF v2.1.0 section 3.19.17). */
+ virtual char *maybe_make_version_url () const = 0;
+
+ virtual void for_each_plugin (plugin_visitor &v) const = 0;
+};
+
+/* Abstract base class for a diagnostic_context to get at
+ information about a specific plugin within a client. */
+
+class diagnostic_client_plugin_info
+{
+public:
+ /* For use e.g. by SARIF "name" property (SARIF v2.1.0 section 3.19.8). */
+ virtual const char *get_short_name () const = 0;
+
+ /* For use e.g. by SARIF "fullName" property
+ (SARIF v2.1.0 section 3.19.9). */
+ virtual const char *get_full_name () const = 0;
+
+ /* For use e.g. by SARIF "version" property
+ (SARIF v2.1.0 section 3.19.13). */
+ virtual const char *get_version () const = 0;
+};
+
+#endif /* ! GCC_DIAGNOSTIC_CLIENT_DATA_HOOKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-color.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-color.h
new file mode 100644
index 0000000..213fd5a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-color.h
@@ -0,0 +1,65 @@
+/* Copyright (C) 2013-2023 Free Software Foundation, Inc.
+ Contributed by Manuel Lopez-Ibanez <manu@gcc.gnu.org>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Based on code from: */
+/* grep.c - main driver file for grep.
+ Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+ Written July 1992 by Mike Haertel. */
+
+#ifndef GCC_DIAGNOSTIC_COLOR_H
+#define GCC_DIAGNOSTIC_COLOR_H
+
+/* Whether to add color to diagnostics:
+ o DIAGNOSTICS_COLOR_NO: never
+ o DIAGNOSTICS_COLOR_YES: always
+ o DIAGNOSTICS_COLOR_AUTO: depending on the output stream. */
+typedef enum
+{
+ DIAGNOSTICS_COLOR_NO = 0,
+ DIAGNOSTICS_COLOR_YES = 1,
+ DIAGNOSTICS_COLOR_AUTO = 2
+} diagnostic_color_rule_t;
+
+const char *colorize_start (bool, const char *, size_t);
+const char *colorize_stop (bool);
+bool colorize_init (diagnostic_color_rule_t);
+
+inline const char *
+colorize_start (bool show_color, const char *name)
+{
+ return colorize_start (show_color, name, strlen (name));
+}
+
+#endif /* ! GCC_DIAGNOSTIC_COLOR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-core.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-core.h
new file mode 100644
index 0000000..7334c79
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-core.h
@@ -0,0 +1,128 @@
+/* Declarations of core diagnostic functionality for code that does
+ not need to deal with diagnostic contexts or diagnostic info
+ structures.
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_CORE_H
+#define GCC_DIAGNOSTIC_CORE_H
+
+#include "bversion.h"
+
+/* Constants used to discriminate diagnostics. */
+typedef enum
+{
+#define DEFINE_DIAGNOSTIC_KIND(K, msgid, C) K,
+#include "diagnostic.def"
+#undef DEFINE_DIAGNOSTIC_KIND
+ DK_LAST_DIAGNOSTIC_KIND,
+ /* This is used for tagging pragma pops in the diagnostic
+ classification history chain. */
+ DK_POP
+} diagnostic_t;
+
+/* RAII-style class for grouping related diagnostics. */
+
+class auto_diagnostic_group
+{
+ public:
+ auto_diagnostic_group ();
+ ~auto_diagnostic_group ();
+};
+
+/* Forward decl. */
+class diagnostic_metadata; /* See diagnostic-metadata.h. */
+
+extern const char *progname;
+
+extern const char *trim_filename (const char *);
+
+/* If we haven't already defined a front-end-specific diagnostics
+ style, use the generic one. */
+#ifndef GCC_DIAG_STYLE
+#define GCC_DIAG_STYLE __gcc_tdiag__
+#endif
+/* None of these functions are suitable for ATTRIBUTE_PRINTF, because
+ each language front end can extend them with its own set of format
+ specifiers. We must use custom format checks. */
+#if (CHECKING_P && GCC_VERSION >= 4001) || GCC_VERSION == BUILDING_GCC_VERSION
+#define ATTRIBUTE_GCC_DIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m, n))) ATTRIBUTE_NONNULL(m)
+#else
+#define ATTRIBUTE_GCC_DIAG(m, n) ATTRIBUTE_NONNULL(m)
+#endif
+extern void internal_error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2)
+ ATTRIBUTE_NORETURN;
+extern void internal_error_no_backtrace (const char *, ...)
+ ATTRIBUTE_GCC_DIAG(1,2) ATTRIBUTE_NORETURN;
+/* Pass one of the OPT_W* from options.h as the first parameter. */
+extern bool warning (int, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern bool warning_n (location_t, int, unsigned HOST_WIDE_INT,
+ const char *, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(4,6) ATTRIBUTE_GCC_DIAG(5,6);
+extern bool warning_n (rich_location *, int, unsigned HOST_WIDE_INT,
+ const char *, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(4, 6) ATTRIBUTE_GCC_DIAG(5, 6);
+extern bool warning_at (location_t, int, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+extern bool warning_at (rich_location *, int, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+extern bool warning_meta (rich_location *,
+ const diagnostic_metadata &, int,
+ const char *, ...)
+ ATTRIBUTE_GCC_DIAG(4,5);
+extern void error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2);
+extern void error_n (location_t, unsigned HOST_WIDE_INT, const char *,
+ const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,5) ATTRIBUTE_GCC_DIAG(4,5);
+extern void error_at (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern void error_at (rich_location *, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(2,3);
+extern void fatal_error (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3)
+ ATTRIBUTE_NORETURN;
+/* Pass one of the OPT_W* from options.h as the second parameter. */
+extern bool pedwarn (location_t, int, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+extern bool pedwarn (rich_location *, int, const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,4);
+extern bool permerror (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern bool permerror (rich_location *, const char *,
+ ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern void sorry (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2);
+extern void sorry_at (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern void inform (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern void inform (rich_location *, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern void inform_n (location_t, unsigned HOST_WIDE_INT, const char *,
+ const char *, ...)
+ ATTRIBUTE_GCC_DIAG(3,5) ATTRIBUTE_GCC_DIAG(4,5);
+extern void verbatim (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2);
+extern bool emit_diagnostic (diagnostic_t, location_t, int,
+ const char *, ...) ATTRIBUTE_GCC_DIAG(4,5);
+extern bool emit_diagnostic (diagnostic_t, rich_location *, int,
+ const char *, ...) ATTRIBUTE_GCC_DIAG(4,5);
+extern bool emit_diagnostic_valist (diagnostic_t, location_t, int, const char *,
+ va_list *) ATTRIBUTE_GCC_DIAG (4,0);
+extern bool seen_error (void);
+
+#ifdef BUFSIZ
+ /* N.B. Unlike all the others, fnotice is just gettext+fprintf, and
+ therefore it can have ATTRIBUTE_PRINTF. */
+extern void fnotice (FILE *, const char *, ...)
+ ATTRIBUTE_PRINTF_2;
+#endif
+
+#endif /* ! GCC_DIAGNOSTIC_CORE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-event-id.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-event-id.h
new file mode 100644
index 0000000..84f4b65
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-event-id.h
@@ -0,0 +1,61 @@
+/* A class for referring to events within a diagnostic_path.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_EVENT_ID_H
+#define GCC_DIAGNOSTIC_EVENT_ID_H
+
+/* A class for referring to events within a diagnostic_path.
+
+ They are stored as 0-based offsets into the events, but
+ printed (e.g. via %@) as 1-based numbers.
+
+ For example, a 3-event path has event offsets 0, 1, and 2,
+ which would be shown to the user as "(1)", "(2)" and "(3)".
+
+ This has its own header so that pretty-print.cc can use this
+ to implement "%@" without bringing in all of diagnostic_path
+ (which e.g. refers to "tree"). */
+
+class diagnostic_event_id_t
+{
+ public:
+ diagnostic_event_id_t () : m_index (UNKNOWN_EVENT_IDX) {}
+ diagnostic_event_id_t (int zero_based_idx) : m_index (zero_based_idx) {}
+
+ bool known_p () const { return m_index != UNKNOWN_EVENT_IDX; }
+
+ int one_based () const
+ {
+ gcc_assert (known_p ());
+ return m_index + 1;
+ }
+
+ private:
+ static const int UNKNOWN_EVENT_IDX = -1;
+ int m_index; // zero-based
+};
+
+/* A pointer to a diagnostic_event_id_t, for use with the "%@" format
+ code, which will print a 1-based representation for it, with suitable
+ colorization, e.g. "(1)".
+ The %@ format code requires that known_p be true for the event ID. */
+typedef diagnostic_event_id_t *diagnostic_event_id_ptr;
+
+#endif /* ! GCC_DIAGNOSTIC_EVENT_ID_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-metadata.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-metadata.h
new file mode 100644
index 0000000..8e06c89
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-metadata.h
@@ -0,0 +1,85 @@
+/* Additional metadata for a diagnostic.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_METADATA_H
+#define GCC_DIAGNOSTIC_METADATA_H
+
+/* A bundle of additional metadata that can be associated with a
+ diagnostic.
+
+ This supports an optional CWE identifier, and zero or more
+ "rules". */
+
+class diagnostic_metadata
+{
+ public:
+ /* Abstract base class for referencing a rule that has been violated,
+ such as within a coding standard, or within a specification. */
+ class rule
+ {
+ public:
+ virtual char *make_description () const = 0;
+ virtual char *make_url () const = 0;
+ };
+
+ /* Concrete subclass. */
+ class precanned_rule : public rule
+ {
+ public:
+ precanned_rule (const char *desc, const char *url)
+ : m_desc (desc), m_url (url)
+ {}
+
+ char *make_description () const final override
+ {
+ return m_desc ? xstrdup (m_desc) : NULL;
+ }
+
+ char *make_url () const final override
+ {
+ return m_url ? xstrdup (m_url) : NULL;
+ }
+
+ private:
+ const char *m_desc;
+ const char *m_url;
+ };
+
+ diagnostic_metadata () : m_cwe (0) {}
+
+ void add_cwe (int cwe) { m_cwe = cwe; }
+ int get_cwe () const { return m_cwe; }
+
+ /* Associate R with the diagnostic. R must outlive
+ the metadata. */
+ void add_rule (const rule &r)
+ {
+ m_rules.safe_push (&r);
+ }
+
+ unsigned get_num_rules () const { return m_rules.length (); }
+ const rule &get_rule (unsigned idx) const { return *(m_rules[idx]); }
+
+ private:
+ int m_cwe;
+ auto_vec<const rule *> m_rules;
+};
+
+#endif /* ! GCC_DIAGNOSTIC_METADATA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-path.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-path.h
new file mode 100644
index 0000000..9d9d629
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-path.h
@@ -0,0 +1,234 @@
+/* Paths through the code associated with a diagnostic.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_PATH_H
+#define GCC_DIAGNOSTIC_PATH_H
+
+#include "diagnostic.h" /* for ATTRIBUTE_GCC_DIAG. */
+#include "diagnostic-event-id.h"
+
+/* A diagnostic_path is an optional additional piece of metadata associated
+ with a diagnostic (via its rich_location).
+
+ It describes a sequence of events predicted by the compiler that
+ lead to the problem occurring, with their locations in the user's source,
+ and text descriptions.
+
+ For example, the following error has a 3-event path:
+
+ test.c: In function 'demo':
+ test.c:29:5: error: passing NULL as argument 1 to 'PyList_Append' which
+ requires a non-NULL parameter
+ 29 | PyList_Append(list, item);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~
+ 'demo': events 1-3
+ |
+ | 25 | list = PyList_New(0);
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (1) when 'PyList_New' fails, returning NULL
+ | 26 |
+ | 27 | for (i = 0; i < count; i++) {
+ | | ~~~
+ | | |
+ | | (2) when 'i < count'
+ | 28 | item = PyLong_FromLong(random());
+ | 29 | PyList_Append(list, item);
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (3) when calling 'PyList_Append', passing NULL from (1) as argument 1
+ |
+
+ The diagnostic-printing code has consolidated the path into a single
+ run of events, since all the events are near each other and within the same
+ function; more complicated examples (such as interprocedural paths)
+ might be printed as multiple runs of events. */
+
+/* Abstract base classes, describing events within a path, and the paths
+ themselves. */
+
+/* One event within a diagnostic_path. */
+
+class diagnostic_event
+{
+ public:
+ /* Enums for giving a sense of what this event means.
+ Roughly corresponds to SARIF v2.1.0 section 3.38.8. */
+ enum verb
+ {
+ VERB_unknown,
+
+ VERB_acquire,
+ VERB_release,
+ VERB_enter,
+ VERB_exit,
+ VERB_call,
+ VERB_return,
+ VERB_branch,
+
+ VERB_danger
+ };
+ enum noun
+ {
+ NOUN_unknown,
+
+ NOUN_taint,
+ NOUN_sensitive, // this one isn't in SARIF v2.1.0; filed as https://github.com/oasis-tcs/sarif-spec/issues/530
+ NOUN_function,
+ NOUN_lock,
+ NOUN_memory,
+ NOUN_resource
+ };
+ enum property
+ {
+ PROPERTY_unknown,
+
+ PROPERTY_true,
+ PROPERTY_false
+ };
+ /* A bundle of such enums, allowing for descriptions of the meaning of
+ an event, such as
+ - "acquire memory": meaning (VERB_acquire, NOUN_memory)
+ - "take true branch"": meaning (VERB_branch, PROPERTY_true)
+ - "return from function": meaning (VERB_return, NOUN_function)
+ etc, as per SARIF's threadFlowLocation "kinds" property
+ (SARIF v2.1.0 section 3.38.8). */
+ struct meaning
+ {
+ meaning ()
+ : m_verb (VERB_unknown),
+ m_noun (NOUN_unknown),
+ m_property (PROPERTY_unknown)
+ {
+ }
+ meaning (enum verb verb, enum noun noun)
+ : m_verb (verb), m_noun (noun), m_property (PROPERTY_unknown)
+ {
+ }
+ meaning (enum verb verb, enum property property)
+ : m_verb (verb), m_noun (NOUN_unknown), m_property (property)
+ {
+ }
+
+ void dump_to_pp (pretty_printer *pp) const;
+
+ static const char *maybe_get_verb_str (enum verb);
+ static const char *maybe_get_noun_str (enum noun);
+ static const char *maybe_get_property_str (enum property);
+
+ enum verb m_verb;
+ enum noun m_noun;
+ enum property m_property;
+ };
+
+ virtual ~diagnostic_event () {}
+
+ virtual location_t get_location () const = 0;
+
+ virtual tree get_fndecl () const = 0;
+
+ /* Stack depth, so that consumers can visualizes the interprocedural
+ calls, returns, and frame nesting. */
+ virtual int get_stack_depth () const = 0;
+
+ /* Get a localized (and possibly colorized) description of this event. */
+ virtual label_text get_desc (bool can_colorize) const = 0;
+
+ /* Get a logical_location for this event, or NULL. */
+ virtual const logical_location *get_logical_location () const = 0;
+
+ virtual meaning get_meaning () const = 0;
+};
+
+/* Abstract base class for getting at a sequence of events. */
+
+class diagnostic_path
+{
+ public:
+ virtual ~diagnostic_path () {}
+ virtual unsigned num_events () const = 0;
+ virtual const diagnostic_event & get_event (int idx) const = 0;
+
+ bool interprocedural_p () const;
+
+private:
+ bool get_first_event_in_a_function (unsigned *out_idx) const;
+};
+
+/* Concrete subclasses. */
+
+/* A simple implementation of diagnostic_event. */
+
+class simple_diagnostic_event : public diagnostic_event
+{
+ public:
+ simple_diagnostic_event (location_t loc, tree fndecl, int depth,
+ const char *desc);
+ ~simple_diagnostic_event ();
+
+ location_t get_location () const final override { return m_loc; }
+ tree get_fndecl () const final override { return m_fndecl; }
+ int get_stack_depth () const final override { return m_depth; }
+ label_text get_desc (bool) const final override
+ {
+ return label_text::borrow (m_desc);
+ }
+ const logical_location *get_logical_location () const final override
+ {
+ return NULL;
+ }
+ meaning get_meaning () const final override
+ {
+ return meaning ();
+ }
+
+ private:
+ location_t m_loc;
+ tree m_fndecl;
+ int m_depth;
+ char *m_desc; // has been i18n-ed and formatted
+};
+
+/* A simple implementation of diagnostic_path, as a vector of
+ simple_diagnostic_event instances. */
+
+class simple_diagnostic_path : public diagnostic_path
+{
+ public:
+ simple_diagnostic_path (pretty_printer *event_pp)
+ : m_event_pp (event_pp) {}
+
+ unsigned num_events () const final override;
+ const diagnostic_event & get_event (int idx) const final override;
+
+ diagnostic_event_id_t add_event (location_t loc, tree fndecl, int depth,
+ const char *fmt, ...)
+ ATTRIBUTE_GCC_DIAG(5,6);
+
+ private:
+ auto_delete_vec<simple_diagnostic_event> m_events;
+
+ /* (for use by add_event). */
+ pretty_printer *m_event_pp;
+};
+
+extern void debug (diagnostic_path *path);
+
+#endif /* ! GCC_DIAGNOSTIC_PATH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-spec.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-spec.h
new file mode 100644
index 0000000..5632689
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-spec.h
@@ -0,0 +1,142 @@
+/* Language-independent APIs to enable/disable per-location warnings.
+
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ Contributed by Martin Sebor <msebor@redhat.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef DIAGNOSTIC_SPEC_H_INCLUDED
+#define DIAGNOSTIC_SPEC_H_INCLUDED
+
+#include "hash-map.h"
+
+/* A "bitset" of warning groups. */
+
+class nowarn_spec_t
+{
+public:
+ enum
+ {
+ /* Middle end warnings about invalid accesses. */
+ NW_ACCESS = 1 << 0,
+ /* Front end/lexical warnings. */
+ NW_LEXICAL = 1 << 1,
+ /* Warnings about null pointers. */
+ NW_NONNULL = 1 << 2,
+ /* Warnings about uninitialized reads. */
+ NW_UNINIT = 1 << 3,
+ /* Warnings about arithmetic overflow. */
+ NW_VFLOW = 1 << 4,
+ /* Warnings about dangling pointers. */
+ NW_DANGLING = 1 << 5,
+ /* All other unclassified warnings. */
+ NW_OTHER = 1 << 6,
+ /* Warnings about redundant calls. */
+ NW_REDUNDANT = 1 << 7,
+ /* All groups of warnings. */
+ NW_ALL = (NW_ACCESS | NW_LEXICAL | NW_NONNULL
+ | NW_UNINIT | NW_VFLOW | NW_DANGLING | NW_REDUNDANT | NW_OTHER)
+ };
+
+ nowarn_spec_t (): m_bits () { }
+
+ nowarn_spec_t (opt_code);
+
+ /* Return the raw bitset. */
+ operator unsigned() const
+ {
+ return m_bits;
+ }
+
+ /* Return true if the bitset is clear. */
+ bool operator!() const
+ {
+ return !m_bits;
+ }
+
+ /* Return the inverse of the bitset. */
+ nowarn_spec_t operator~() const
+ {
+ nowarn_spec_t res (*this);
+ res.m_bits &= ~NW_ALL;
+ return res;
+ }
+
+ /* Set *THIS to the bitwise OR of *THIS and RHS. */
+ nowarn_spec_t& operator|= (const nowarn_spec_t &rhs)
+ {
+ m_bits |= rhs.m_bits;
+ return *this;
+ }
+
+ /* Set *THIS to the bitwise AND of *THIS and RHS. */
+ nowarn_spec_t& operator&= (const nowarn_spec_t &rhs)
+ {
+ m_bits &= rhs.m_bits;
+ return *this;
+ }
+
+ /* Set *THIS to the bitwise exclusive OR of *THIS and RHS. */
+ nowarn_spec_t& operator^= (const nowarn_spec_t &rhs)
+ {
+ m_bits ^= rhs.m_bits;
+ return *this;
+ }
+
+private:
+ /* Bitset of warning groups. */
+ unsigned m_bits;
+};
+
+/* Return the bitwise OR of LHS and RHS. */
+
+inline nowarn_spec_t
+operator| (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs)
+{
+ return nowarn_spec_t (lhs) |= rhs;
+}
+
+/* Return the bitwise AND of LHS and RHS. */
+
+inline nowarn_spec_t
+operator& (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs)
+{
+ return nowarn_spec_t (lhs) &= rhs;
+}
+
+/* Return true if LHS is equal RHS. */
+
+inline bool
+operator== (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs)
+{
+ return static_cast<unsigned>(lhs) == static_cast<unsigned>(rhs);
+}
+
+/* Return true if LHS is not equal RHS. */
+
+inline bool
+operator!= (const nowarn_spec_t &lhs, const nowarn_spec_t &rhs)
+{
+ return !(lhs == rhs);
+}
+
+typedef hash_map<location_hash, nowarn_spec_t> nowarn_map_t;
+
+/* A mapping from a 'location_t' to the warning spec set for it. */
+extern GTY(()) nowarn_map_t *nowarn_map;
+
+#endif // DIAGNOSTIC_SPEC_H_INCLUDED
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-url.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-url.h
new file mode 100644
index 0000000..93f0a69
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic-url.h
@@ -0,0 +1,52 @@
+/* Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_URL_H
+#define GCC_DIAGNOSTIC_URL_H
+
+/* Whether to add URLs to diagnostics:
+ - DIAGNOSTICS_URL_NO: never
+ - DIAGNOSTICS_URL_YES: always
+ - DIAGNOSTICS_URL_AUTO: depending on the output stream. */
+typedef enum
+{
+ DIAGNOSTICS_URL_NO = 0,
+ DIAGNOSTICS_URL_YES = 1,
+ DIAGNOSTICS_URL_AUTO = 2
+} diagnostic_url_rule_t;
+
+/* Tells whether URLs should be emitted, and, if so, how to
+ terminate strings within the escape sequence. */
+enum diagnostic_url_format
+{
+ /* No URLs shall be emitted. */
+ URL_FORMAT_NONE,
+
+ /* Use ST string termination. */
+ URL_FORMAT_ST,
+
+ /* Use BEL string termination. */
+ URL_FORMAT_BEL
+};
+
+const diagnostic_url_format URL_FORMAT_DEFAULT = URL_FORMAT_BEL;
+
+extern diagnostic_url_format determine_url_format (diagnostic_url_rule_t);
+
+#endif /* ! GCC_DIAGNOSTIC_URL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.def
new file mode 100644
index 0000000..813b8da
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.def
@@ -0,0 +1,55 @@
+/* Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* DK_UNSPECIFIED must be first so it has a value of zero. We never
+ assign this kind to an actual diagnostic, we only use this in
+ variables that can hold a kind, to mean they have yet to have a
+ kind specified. I.e. they're uninitialized. Within the diagnostic
+ machinery, this kind also means "don't change the existing kind",
+ meaning "no change is specified". */
+DEFINE_DIAGNOSTIC_KIND (DK_UNSPECIFIED, "", NULL)
+
+/* If a diagnostic is set to DK_IGNORED, it won't get reported at all.
+ This is used by the diagnostic machinery when it wants to disable a
+ diagnostic without disabling the option which causes it. */
+DEFINE_DIAGNOSTIC_KIND (DK_IGNORED, "", NULL)
+
+/* The remainder are real diagnostic types. */
+DEFINE_DIAGNOSTIC_KIND (DK_FATAL, "fatal error: ", "error")
+DEFINE_DIAGNOSTIC_KIND (DK_ICE, "internal compiler error: ", "error")
+DEFINE_DIAGNOSTIC_KIND (DK_ERROR, "error: ", "error")
+DEFINE_DIAGNOSTIC_KIND (DK_SORRY, "sorry, unimplemented: ", "error")
+DEFINE_DIAGNOSTIC_KIND (DK_WARNING, "warning: ", "warning")
+DEFINE_DIAGNOSTIC_KIND (DK_ANACHRONISM, "anachronism: ", "warning")
+DEFINE_DIAGNOSTIC_KIND (DK_NOTE, "note: ", "note")
+DEFINE_DIAGNOSTIC_KIND (DK_DEBUG, "debug: ", "note")
+
+/* For use when using the diagnostic_show_locus machinery to show
+ a range of events within a path. */
+DEFINE_DIAGNOSTIC_KIND (DK_DIAGNOSTIC_PATH, "path: ", "path")
+
+/* These two would be re-classified as DK_WARNING or DK_ERROR, so the
+prefix does not matter. */
+DEFINE_DIAGNOSTIC_KIND (DK_PEDWARN, "pedwarn: ", NULL)
+DEFINE_DIAGNOSTIC_KIND (DK_PERMERROR, "permerror: ", NULL)
+/* This one is just for counting DK_WARNING promoted to DK_ERROR
+ due to -Werror and -Werror=warning. */
+DEFINE_DIAGNOSTIC_KIND (DK_WERROR, "error: ", NULL)
+/* This is like DK_ICE, but backtrace is not printed. Used in the driver
+ when reporting fatal signal in the compiler. */
+DEFINE_DIAGNOSTIC_KIND (DK_ICE_NOBT, "internal compiler error: ", "error")
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.h
new file mode 100644
index 0000000..9a51097
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/diagnostic.h
@@ -0,0 +1,622 @@
+/* Various declarations for language-independent diagnostics subroutines.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+ Contributed by Gabriel Dos Reis <gdr@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIAGNOSTIC_H
+#define GCC_DIAGNOSTIC_H
+
+#include "pretty-print.h"
+#include "diagnostic-core.h"
+
+/* An enum for controlling what units to use for the column number
+ when diagnostics are output, used by the -fdiagnostics-column-unit option.
+ Tabs will be expanded or not according to the value of -ftabstop. The origin
+ (default 1) is controlled by -fdiagnostics-column-origin. */
+
+enum diagnostics_column_unit
+{
+ /* The default from GCC 11 onwards: display columns. */
+ DIAGNOSTICS_COLUMN_UNIT_DISPLAY,
+
+ /* The behavior in GCC 10 and earlier: simple bytes. */
+ DIAGNOSTICS_COLUMN_UNIT_BYTE
+};
+
+/* An enum for controlling how to print non-ASCII characters/bytes when
+ a diagnostic suggests escaping the source code on output. */
+
+enum diagnostics_escape_format
+{
+ /* Escape non-ASCII Unicode characters in the form <U+XXXX> and
+ non-UTF-8 bytes in the form <XX>. */
+ DIAGNOSTICS_ESCAPE_FORMAT_UNICODE,
+
+ /* Escape non-ASCII bytes in the form <XX> (thus showing the underlying
+ encoding of non-ASCII Unicode characters). */
+ DIAGNOSTICS_ESCAPE_FORMAT_BYTES
+};
+
+/* Enum for overriding the standard output format. */
+
+enum diagnostics_output_format
+{
+ /* The default: textual output. */
+ DIAGNOSTICS_OUTPUT_FORMAT_TEXT,
+
+ /* JSON-based output, to stderr. */
+ DIAGNOSTICS_OUTPUT_FORMAT_JSON_STDERR,
+
+ /* JSON-based output, to a file. */
+ DIAGNOSTICS_OUTPUT_FORMAT_JSON_FILE,
+
+ /* SARIF-based output, to stderr. */
+ DIAGNOSTICS_OUTPUT_FORMAT_SARIF_STDERR,
+
+ /* SARIF-based output, to a file. */
+ DIAGNOSTICS_OUTPUT_FORMAT_SARIF_FILE
+};
+
+/* An enum for controlling how diagnostic_paths should be printed. */
+enum diagnostic_path_format
+{
+ /* Don't print diagnostic_paths. */
+ DPF_NONE,
+
+ /* Print diagnostic_paths by emitting a separate "note" for every event
+ in the path. */
+ DPF_SEPARATE_EVENTS,
+
+ /* Print diagnostic_paths by consolidating events together where they
+ are close enough, and printing such runs of events with multiple
+ calls to diagnostic_show_locus, showing the individual events in
+ each run via labels in the source. */
+ DPF_INLINE_EVENTS
+};
+
+/* An enum for capturing values of GCC_EXTRA_DIAGNOSTIC_OUTPUT,
+ and for -fdiagnostics-parseable-fixits. */
+
+enum diagnostics_extra_output_kind
+{
+ /* No extra output, or an unrecognized value. */
+ EXTRA_DIAGNOSTIC_OUTPUT_none,
+
+ /* Emit fix-it hints using the "fixits-v1" format, equivalent to
+ -fdiagnostics-parseable-fixits. */
+ EXTRA_DIAGNOSTIC_OUTPUT_fixits_v1,
+
+ /* Emit fix-it hints using the "fixits-v2" format. */
+ EXTRA_DIAGNOSTIC_OUTPUT_fixits_v2
+};
+
+/* A diagnostic is described by the MESSAGE to send, the FILE and LINE of
+ its context and its KIND (ice, error, warning, note, ...) See complete
+ list in diagnostic.def. */
+struct diagnostic_info
+{
+ diagnostic_info ()
+ : message (), richloc (), metadata (), x_data (), kind (), option_index (),
+ m_iinfo ()
+ { }
+
+ /* Text to be formatted. */
+ text_info message;
+
+ /* The location at which the diagnostic is to be reported. */
+ rich_location *richloc;
+
+ /* An optional bundle of metadata associated with the diagnostic
+ (or NULL). */
+ const diagnostic_metadata *metadata;
+
+ /* Auxiliary data for client. */
+ void *x_data;
+ /* The kind of diagnostic it is about. */
+ diagnostic_t kind;
+ /* Which OPT_* directly controls this diagnostic. */
+ int option_index;
+
+ /* Inlining context containing locations for each call site along
+ the inlining stack. */
+ struct inlining_info
+ {
+ /* Locations along the inlining stack. */
+ auto_vec<location_t, 8> m_ilocs;
+ /* The abstract origin of the location. */
+ void *m_ao;
+ /* Set if every M_ILOCS element is in a system header. */
+ bool m_allsyslocs;
+ } m_iinfo;
+};
+
+/* Each time a diagnostic's classification is changed with a pragma,
+ we record the change and the location of the change in an array of
+ these structs. */
+struct diagnostic_classification_change_t
+{
+ location_t location;
+ int option;
+ diagnostic_t kind;
+};
+
+/* Forward declarations. */
+typedef void (*diagnostic_starter_fn) (diagnostic_context *,
+ diagnostic_info *);
+
+typedef void (*diagnostic_start_span_fn) (diagnostic_context *,
+ expanded_location);
+
+typedef void (*diagnostic_finalizer_fn) (diagnostic_context *,
+ diagnostic_info *,
+ diagnostic_t);
+
+class edit_context;
+namespace json { class value; }
+class diagnostic_client_data_hooks;
+class logical_location;
+
+/* This data structure bundles altogether any information relevant to
+ the context of a diagnostic message. */
+struct diagnostic_context
+{
+ /* Where most of the diagnostic formatting work is done. */
+ pretty_printer *printer;
+
+ /* Cache of source code. */
+ file_cache *m_file_cache;
+
+ /* The number of times we have issued diagnostics. */
+ int diagnostic_count[DK_LAST_DIAGNOSTIC_KIND];
+
+ /* True if it has been requested that warnings be treated as errors. */
+ bool warning_as_error_requested;
+
+ /* The number of option indexes that can be passed to warning() et
+ al. */
+ int n_opts;
+
+ /* For each option index that can be passed to warning() et al
+ (OPT_* from options.h when using this code with the core GCC
+ options), this array may contain a new kind that the diagnostic
+ should be changed to before reporting, or DK_UNSPECIFIED to leave
+ it as the reported kind, or DK_IGNORED to not report it at
+ all. */
+ diagnostic_t *classify_diagnostic;
+
+ /* History of all changes to the classifications above. This list
+ is stored in location-order, so we can search it, either
+ binary-wise or end-to-front, to find the most recent
+ classification for a given diagnostic, given the location of the
+ diagnostic. */
+ diagnostic_classification_change_t *classification_history;
+
+ /* The size of the above array. */
+ int n_classification_history;
+
+ /* For pragma push/pop. */
+ int *push_list;
+ int n_push;
+
+ /* True if we should print the source line with a caret indicating
+ the location. */
+ bool show_caret;
+
+ /* Maximum width of the source line printed. */
+ int caret_max_width;
+
+ /* Character used for caret diagnostics. */
+ char caret_chars[rich_location::STATICALLY_ALLOCATED_RANGES];
+
+ /* True if we should print any CWE identifiers associated with
+ diagnostics. */
+ bool show_cwe;
+
+ /* True if we should print any rules associated with diagnostics. */
+ bool show_rules;
+
+ /* How should diagnostic_path objects be printed. */
+ enum diagnostic_path_format path_format;
+
+ /* True if we should print stack depths when printing diagnostic paths. */
+ bool show_path_depths;
+
+ /* True if we should print the command line option which controls
+ each diagnostic, if known. */
+ bool show_option_requested;
+
+ /* True if we should raise a SIGABRT on errors. */
+ bool abort_on_error;
+
+ /* True if we should show the column number on diagnostics. */
+ bool show_column;
+
+ /* True if pedwarns are errors. */
+ bool pedantic_errors;
+
+ /* True if permerrors are warnings. */
+ bool permissive;
+
+ /* The index of the option to associate with turning permerrors into
+ warnings. */
+ int opt_permissive;
+
+ /* True if errors are fatal. */
+ bool fatal_errors;
+
+ /* True if all warnings should be disabled. */
+ bool dc_inhibit_warnings;
+
+ /* True if warnings should be given in system headers. */
+ bool dc_warn_system_headers;
+
+ /* Maximum number of errors to report. */
+ int max_errors;
+
+ /* This function is called before any message is printed out. It is
+ responsible for preparing message prefix and such. For example, it
+ might say:
+ In file included from "/usr/local/include/curses.h:5:
+ from "/home/gdr/src/nifty_printer.h:56:
+ ...
+ */
+ diagnostic_starter_fn begin_diagnostic;
+
+ /* This function is called by diagnostic_show_locus in between
+ disjoint spans of source code, so that the context can print
+ something to indicate that a new span of source code has begun. */
+ diagnostic_start_span_fn start_span;
+
+ /* This function is called after the diagnostic message is printed. */
+ diagnostic_finalizer_fn end_diagnostic;
+
+ /* Client hook to report an internal error. */
+ void (*internal_error) (diagnostic_context *, const char *, va_list *);
+
+ /* Client hook to say whether the option controlling a diagnostic is
+ enabled. Returns nonzero if enabled, zero if disabled. */
+ int (*option_enabled) (int, unsigned, void *);
+
+ /* Client information to pass as second argument to
+ option_enabled. */
+ void *option_state;
+
+ /* Client hook to return the name of an option that controls a
+ diagnostic. Returns malloced memory. The first diagnostic_t
+ argument is the kind of diagnostic before any reclassification
+ (of warnings as errors, etc.); the second is the kind after any
+ reclassification. May return NULL if no name is to be printed.
+ May be passed 0 as well as the index of a particular option. */
+ char *(*option_name) (diagnostic_context *, int, diagnostic_t, diagnostic_t);
+
+ /* Client hook to return a URL describing the option that controls
+ a diagnostic. Returns malloced memory. May return NULL if no URL
+ is available. May be passed 0 as well as the index of a
+ particular option. */
+ char *(*get_option_url) (diagnostic_context *, int);
+
+ void (*print_path) (diagnostic_context *, const diagnostic_path *);
+ json::value *(*make_json_for_path) (diagnostic_context *, const diagnostic_path *);
+
+ /* Auxiliary data for client. */
+ void *x_data;
+
+ /* Used to detect that the last caret was printed at the same location. */
+ location_t last_location;
+
+ /* Used to detect when the input file stack has changed since last
+ described. */
+ const line_map_ordinary *last_module;
+
+ int lock;
+
+ /* A copy of lang_hooks.option_lang_mask (). */
+ unsigned lang_mask;
+
+ bool inhibit_notes_p;
+
+ /* When printing source code, should the characters at carets and ranges
+ be colorized? (assuming colorization is on at all).
+ This should be true for frontends that generate range information
+ (so that the ranges of code are colorized),
+ and false for frontends that merely specify points within the
+ source code (to avoid e.g. colorizing just the first character in
+ a token, which would look strange). */
+ bool colorize_source_p;
+
+ /* When printing source code, should labelled ranges be printed? */
+ bool show_labels_p;
+
+ /* When printing source code, should there be a left-hand margin
+ showing line numbers? */
+ bool show_line_numbers_p;
+
+ /* If printing source code, what should the minimum width of the margin
+ be? Line numbers will be right-aligned, and padded to this width. */
+ int min_margin_width;
+
+ /* Usable by plugins; if true, print a debugging ruler above the
+ source output. */
+ bool show_ruler_p;
+
+ /* True if -freport-bug option is used. */
+ bool report_bug;
+
+ /* Used to specify additional diagnostic output to be emitted after the
+ rest of the diagnostic. This is for implementing
+ -fdiagnostics-parseable-fixits and GCC_EXTRA_DIAGNOSTIC_OUTPUT. */
+ enum diagnostics_extra_output_kind extra_output_kind;
+
+ /* What units to use when outputting the column number. */
+ enum diagnostics_column_unit column_unit;
+
+ /* The origin for the column number (1-based or 0-based typically). */
+ int column_origin;
+
+ /* The size of the tabstop for tab expansion. */
+ int tabstop;
+
+ /* How should non-ASCII/non-printable bytes be escaped when
+ a diagnostic suggests escaping the source code on output. */
+ enum diagnostics_escape_format escape_format;
+
+ /* If non-NULL, an edit_context to which fix-it hints should be
+ applied, for generating patches. */
+ edit_context *edit_context_ptr;
+
+ /* How many diagnostic_group instances are currently alive. */
+ int diagnostic_group_nesting_depth;
+
+ /* How many diagnostics have been emitted since the bottommost
+ diagnostic_group was pushed. */
+ int diagnostic_group_emission_count;
+
+ /* Optional callbacks for handling diagnostic groups. */
+
+ /* If non-NULL, this will be called immediately before the first
+ time a diagnostic is emitted within a stack of groups. */
+ void (*begin_group_cb) (diagnostic_context * context);
+
+ /* If non-NULL, this will be called when a stack of groups is
+ popped if any diagnostics were emitted within that group. */
+ void (*end_group_cb) (diagnostic_context * context);
+
+ /* Callback for final cleanup. */
+ void (*final_cb) (diagnostic_context *context);
+
+ /* Callback to set the locations of call sites along the inlining
+ stack corresponding to a diagnostic location. Needed to traverse
+ the BLOCK_SUPERCONTEXT() chain hanging off the LOCATION_BLOCK()
+ of a diagnostic's location. */
+ void (*set_locations_cb)(diagnostic_context *, diagnostic_info *);
+
+ /* Optional callback for attempting to handle ICEs gracefully. */
+ void (*ice_handler_cb) (diagnostic_context *context);
+
+ /* Include files that diagnostic_report_current_module has already listed the
+ include path for. */
+ hash_set<location_t, false, location_hash> *includes_seen;
+
+ /* A bundle of hooks for providing data to the context about its client
+ e.g. version information, plugins, etc.
+ Used by SARIF output to give metadata about the client that's
+ producing diagnostics. */
+ diagnostic_client_data_hooks *m_client_data_hooks;
+};
+
+inline void
+diagnostic_inhibit_notes (diagnostic_context * context)
+{
+ context->inhibit_notes_p = true;
+}
+
+
+/* Client supplied function to announce a diagnostic. */
+#define diagnostic_starter(DC) (DC)->begin_diagnostic
+
+/* Client supplied function called after a diagnostic message is
+ displayed. */
+#define diagnostic_finalizer(DC) (DC)->end_diagnostic
+
+/* Extension hooks for client. */
+#define diagnostic_context_auxiliary_data(DC) (DC)->x_data
+#define diagnostic_info_auxiliary_data(DI) (DI)->x_data
+
+/* Same as pp_format_decoder. Works on 'diagnostic_context *'. */
+#define diagnostic_format_decoder(DC) ((DC)->printer->format_decoder)
+
+/* Same as output_prefixing_rule. Works on 'diagnostic_context *'. */
+#define diagnostic_prefixing_rule(DC) ((DC)->printer->wrapping.rule)
+
+/* Raise SIGABRT on any diagnostic of severity DK_ERROR or higher. */
+#define diagnostic_abort_on_error(DC) \
+ (DC)->abort_on_error = true
+
+/* This diagnostic_context is used by front-ends that directly output
+ diagnostic messages without going through `error', `warning',
+ and similar functions. */
+extern diagnostic_context *global_dc;
+
+/* Returns whether the diagnostic framework has been intialized already and is
+ ready for use. */
+#define diagnostic_ready_p() (global_dc->printer != NULL)
+
+/* The total count of a KIND of diagnostics emitted so far. */
+#define diagnostic_kind_count(DC, DK) (DC)->diagnostic_count[(int) (DK)]
+
+/* The number of errors that have been issued so far. Ideally, these
+ would take a diagnostic_context as an argument. */
+#define errorcount diagnostic_kind_count (global_dc, DK_ERROR)
+/* Similarly, but for warnings. */
+#define warningcount diagnostic_kind_count (global_dc, DK_WARNING)
+/* Similarly, but for warnings promoted to errors. */
+#define werrorcount diagnostic_kind_count (global_dc, DK_WERROR)
+/* Similarly, but for sorrys. */
+#define sorrycount diagnostic_kind_count (global_dc, DK_SORRY)
+
+/* Returns nonzero if warnings should be emitted. */
+#define diagnostic_report_warnings_p(DC, LOC) \
+ (!(DC)->dc_inhibit_warnings \
+ && !(in_system_header_at (LOC) && !(DC)->dc_warn_system_headers))
+
+/* Override the option index to be used for reporting a
+ diagnostic. */
+
+inline void
+diagnostic_override_option_index (diagnostic_info *info, int optidx)
+{
+ info->option_index = optidx;
+}
+
+/* Diagnostic related functions. */
+extern void diagnostic_initialize (diagnostic_context *, int);
+extern void diagnostic_color_init (diagnostic_context *, int value = -1);
+extern void diagnostic_urls_init (diagnostic_context *, int value = -1);
+extern void diagnostic_finish (diagnostic_context *);
+extern void diagnostic_report_current_module (diagnostic_context *, location_t);
+extern void diagnostic_show_locus (diagnostic_context *,
+ rich_location *richloc,
+ diagnostic_t diagnostic_kind);
+extern void diagnostic_show_any_path (diagnostic_context *, diagnostic_info *);
+
+/* Because we read source files a second time after the frontend did it the
+ first time, we need to know how the frontend handled things like character
+ set conversion and UTF-8 BOM stripping, in order to make everything
+ consistent. This function needs to be called by each frontend that requires
+ non-default behavior, to inform the diagnostics infrastructure how input is
+ to be processed. The default behavior is to do no conversion and not to
+ strip a UTF-8 BOM.
+
+ The callback should return the input charset to be used to convert the given
+ file's contents to UTF-8, or it should return NULL if no conversion is needed
+ for this file. SHOULD_SKIP_BOM only applies in case no conversion was
+ performed, and if true, it will cause a UTF-8 BOM to be skipped at the
+ beginning of the file. (In case a conversion was performed, the BOM is
+ rather skipped as part of the conversion process.) */
+
+void diagnostic_initialize_input_context (diagnostic_context *context,
+ diagnostic_input_charset_callback ccb,
+ bool should_skip_bom);
+
+/* Force diagnostics controlled by OPTIDX to be kind KIND. */
+extern diagnostic_t diagnostic_classify_diagnostic (diagnostic_context *,
+ int /* optidx */,
+ diagnostic_t /* kind */,
+ location_t);
+extern void diagnostic_push_diagnostics (diagnostic_context *, location_t);
+extern void diagnostic_pop_diagnostics (diagnostic_context *, location_t);
+extern bool diagnostic_report_diagnostic (diagnostic_context *,
+ diagnostic_info *);
+#ifdef ATTRIBUTE_GCC_DIAG
+extern void diagnostic_set_info (diagnostic_info *, const char *, va_list *,
+ rich_location *, diagnostic_t) ATTRIBUTE_GCC_DIAG(2,0);
+extern void diagnostic_set_info_translated (diagnostic_info *, const char *,
+ va_list *, rich_location *,
+ diagnostic_t)
+ ATTRIBUTE_GCC_DIAG(2,0);
+extern void diagnostic_append_note (diagnostic_context *, location_t,
+ const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
+#endif
+extern char *diagnostic_build_prefix (diagnostic_context *, const diagnostic_info *);
+void default_diagnostic_starter (diagnostic_context *, diagnostic_info *);
+void default_diagnostic_start_span_fn (diagnostic_context *,
+ expanded_location);
+void default_diagnostic_finalizer (diagnostic_context *, diagnostic_info *,
+ diagnostic_t);
+void diagnostic_set_caret_max_width (diagnostic_context *context, int value);
+void diagnostic_action_after_output (diagnostic_context *, diagnostic_t);
+void diagnostic_check_max_errors (diagnostic_context *, bool flush = false);
+
+void diagnostic_file_cache_fini (void);
+
+int get_terminal_width (void);
+
+/* Return the location associated to this diagnostic. Parameter WHICH
+ specifies which location. By default, expand the first one. */
+
+inline location_t
+diagnostic_location (const diagnostic_info * diagnostic, int which = 0)
+{
+ return diagnostic->message.get_location (which);
+}
+
+/* Return the number of locations to be printed in DIAGNOSTIC. */
+
+inline unsigned int
+diagnostic_num_locations (const diagnostic_info * diagnostic)
+{
+ return diagnostic->message.m_richloc->get_num_locations ();
+}
+
+/* Expand the location of this diagnostic. Use this function for
+ consistency. Parameter WHICH specifies which location. By default,
+ expand the first one. */
+
+inline expanded_location
+diagnostic_expand_location (const diagnostic_info * diagnostic, int which = 0)
+{
+ return diagnostic->richloc->get_expanded_location (which);
+}
+
+/* This is somehow the right-side margin of a caret line, that is, we
+ print at least these many characters after the position pointed at
+ by the caret. */
+const int CARET_LINE_MARGIN = 10;
+
+/* Return true if the two locations can be represented within the same
+ caret line. This is used to build a prefix and also to determine
+ whether to print one or two caret lines. */
+
+inline bool
+diagnostic_same_line (const diagnostic_context *context,
+ expanded_location s1, expanded_location s2)
+{
+ return s2.column && s1.line == s2.line
+ && context->caret_max_width - CARET_LINE_MARGIN > abs (s1.column - s2.column);
+}
+
+extern const char *diagnostic_get_color_for_kind (diagnostic_t kind);
+extern int diagnostic_converted_column (diagnostic_context *context,
+ expanded_location s);
+
+/* Pure text formatting support functions. */
+extern char *file_name_as_prefix (diagnostic_context *, const char *);
+
+extern char *build_message_string (const char *, ...) ATTRIBUTE_PRINTF_1;
+
+extern void diagnostic_output_format_init (diagnostic_context *,
+ const char *base_file_name,
+ enum diagnostics_output_format);
+extern void diagnostic_output_format_init_json_stderr (diagnostic_context *context);
+extern void diagnostic_output_format_init_json_file (diagnostic_context *context,
+ const char *base_file_name);
+extern void diagnostic_output_format_init_sarif_stderr (diagnostic_context *context);
+extern void diagnostic_output_format_init_sarif_file (diagnostic_context *context,
+ const char *base_file_name);
+
+/* Compute the number of digits in the decimal representation of an integer. */
+extern int num_digits (int);
+
+extern json::value *json_from_expanded_location (diagnostic_context *context,
+ location_t loc);
+
+extern bool warning_enabled_at (location_t, int);
+
+extern char *get_cwe_url (int cwe);
+
+#endif /* ! GCC_DIAGNOSTIC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/digraph.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/digraph.h
new file mode 100644
index 0000000..c880739
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/digraph.h
@@ -0,0 +1,246 @@
+/* Template classes for directed graphs.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIGRAPH_H
+#define GCC_DIGRAPH_H
+
+#include "diagnostic.h"
+#include "tree-diagnostic.h" /* for default_tree_printer. */
+#include "graphviz.h"
+
+/* Templates for a family of classes: digraph, node, edge, and cluster.
+ This assumes a traits type with the following typedefs:
+ node_t: the node class
+ edge_t: the edge class
+ dump_args_t: additional args for dot-dumps
+ cluster_t: the cluster class (for use when generating .dot files).
+
+ Using a template allows for typesafe nodes and edges: a node's
+ predecessor and successor edges can be of a node-specific edge
+ subclass, without needing casting. */
+
+/* Abstract base class for a node in a directed graph. */
+
+template <typename GraphTraits>
+class dnode
+{
+ public:
+ typedef typename GraphTraits::edge_t edge_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+
+ virtual ~dnode () {}
+ virtual void dump_dot (graphviz_out *gv, const dump_args_t &args) const = 0;
+
+ auto_vec<edge_t *> m_preds;
+ auto_vec<edge_t *> m_succs;
+};
+
+/* Abstract base class for an edge in a directed graph. */
+
+template <typename GraphTraits>
+class dedge
+{
+ public:
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+
+ dedge (node_t *src, node_t *dest)
+ : m_src (src), m_dest (dest) {}
+
+ virtual ~dedge () {}
+
+ virtual void dump_dot (graphviz_out *gv, const dump_args_t &args) const = 0;
+
+ node_t *const m_src;
+ node_t *const m_dest;
+};
+
+/* Abstract base class for a directed graph.
+ This class maintains the vectors of nodes and edges,
+ and owns the nodes and edges. */
+
+template <typename GraphTraits>
+class digraph
+{
+ public:
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::edge_t edge_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+ typedef typename GraphTraits::cluster_t cluster_t;
+
+ digraph () {}
+ virtual ~digraph () {}
+
+ void dump_dot_to_pp (pretty_printer *pp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const;
+ void dump_dot_to_file (FILE *fp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const;
+ void dump_dot (const char *path,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const;
+
+ void add_node (node_t *node);
+ void add_edge (edge_t *edge);
+
+ auto_delete_vec<node_t> m_nodes;
+ auto_delete_vec<edge_t> m_edges;
+};
+
+/* Abstract base class for splitting dnodes into hierarchical clusters
+ in the generated .dot file.
+
+ See "Subgraphs and Clusters" within
+ https://www.graphviz.org/doc/info/lang.html
+ and e.g.
+ https://graphviz.gitlab.io/_pages/Gallery/directed/cluster.html
+
+ If a root_cluster is passed to dump_dot*, then all nodes will be
+ added to it at the start of dumping, via calls to add_node.
+
+ The root cluster can organize the nodes into a hierarchy of
+ child clusters.
+
+ After all nodes are added to the root cluster, dump_dot will then
+ be called on it (and not on the nodes themselves). */
+
+template <typename GraphTraits>
+class cluster
+{
+ public:
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+
+ virtual ~cluster () {}
+
+ virtual void add_node (node_t *node) = 0;
+
+ /* Recursively dump the cluster, all nodes, and child clusters. */
+ virtual void dump_dot (graphviz_out *gv, const dump_args_t &) const = 0;
+};
+
+/* Write .dot information for this graph to PP, passing ARGS to the nodes
+ and edges.
+ If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::dump_dot_to_pp (pretty_printer *pp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const
+{
+ graphviz_out gv (pp);
+
+ pp_string (pp, "digraph \"");
+ pp_string (pp, "base");
+ pp_string (pp, "\" {\n");
+
+ gv.indent ();
+
+ pp_string (pp, "overlap=false;\n");
+ pp_string (pp, "compound=true;\n");
+
+ /* If using clustering, emit all nodes via clusters. */
+ if (root_cluster)
+ {
+ int i;
+ node_t *n;
+ FOR_EACH_VEC_ELT (m_nodes, i, n)
+ root_cluster->add_node (n);
+ root_cluster->dump_dot (&gv, args);
+ }
+ else
+ {
+ /* Otherwise, display all nodes at top level. */
+ int i;
+ node_t *n;
+ FOR_EACH_VEC_ELT (m_nodes, i, n)
+ n->dump_dot (&gv, args);
+ }
+
+ /* Edges. */
+ int i;
+ edge_t *e;
+ FOR_EACH_VEC_ELT (m_edges, i, e)
+ e->dump_dot (&gv, args);
+
+ /* Terminate "digraph" */
+ gv.outdent ();
+ pp_string (pp, "}");
+ pp_newline (pp);
+}
+
+/* Write .dot information for this graph to FP, passing ARGS to the nodes
+ and edges.
+ If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::dump_dot_to_file (FILE *fp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const
+{
+ pretty_printer pp;
+ // TODO:
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp.buffer->stream = fp;
+ dump_dot_to_pp (&pp, root_cluster, args);
+ pp_flush (&pp);
+}
+
+/* Write .dot information for this graph to a file at PATH, passing ARGS
+ to the nodes and edges.
+ If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::dump_dot (const char *path,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const
+{
+ FILE *fp = fopen (path, "w");
+ dump_dot_to_file (fp, root_cluster, args);
+ fclose (fp);
+}
+
+/* Add NODE to this DIGRAPH, taking ownership. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::add_node (node_t *node)
+{
+ m_nodes.safe_push (node);
+}
+
+/* Add EDGE to this digraph, and to the preds/succs of its endpoints.
+ Take ownership of EDGE. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::add_edge (edge_t *edge)
+{
+ m_edges.safe_push (edge);
+ edge->m_dest->m_preds.safe_push (edge);
+ edge->m_src->m_succs.safe_push (edge);
+
+}
+
+#endif /* GCC_DIGRAPH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dojump.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dojump.h
new file mode 100644
index 0000000..5217a9a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dojump.h
@@ -0,0 +1,82 @@
+/* Export function prototypes from dojump.cc.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DOJUMP_H
+#define GCC_DOJUMP_H
+
+/* At the start of a function, record that we have no previously-pushed
+ arguments waiting to be popped. */
+extern void init_pending_stack_adjust (void);
+
+/* Discard any pending stack adjustment. */
+extern void discard_pending_stack_adjust (void);
+
+/* When exiting from function, if safe, clear out any pending stack adjust
+ so the adjustment won't get done. */
+extern void clear_pending_stack_adjust (void);
+
+/* Pop any previously-pushed arguments that have not been popped yet. */
+extern void do_pending_stack_adjust (void);
+
+/* Struct for saving/restoring of pending_stack_adjust/stack_pointer_delta
+ values. */
+
+class saved_pending_stack_adjust
+{
+public:
+ /* Saved value of pending_stack_adjust. */
+ poly_int64 x_pending_stack_adjust;
+
+ /* Saved value of stack_pointer_delta. */
+ poly_int64 x_stack_pointer_delta;
+};
+
+/* Remember pending_stack_adjust/stack_pointer_delta.
+ To be used around code that may call do_pending_stack_adjust (),
+ but the generated code could be discarded e.g. using delete_insns_since. */
+
+extern void save_pending_stack_adjust (saved_pending_stack_adjust *);
+
+/* Restore the saved pending_stack_adjust/stack_pointer_delta. */
+
+extern void restore_pending_stack_adjust (saved_pending_stack_adjust *);
+
+extern bool split_comparison (enum rtx_code, machine_mode,
+ enum rtx_code *, enum rtx_code *);
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */
+extern void jumpif (tree exp, rtx_code_label *label, profile_probability prob);
+extern void jumpif_1 (enum tree_code, tree, tree, rtx_code_label *,
+ profile_probability);
+
+/* Generate code to evaluate EXP and jump to LABEL if the value is zero. */
+extern void jumpifnot (tree exp, rtx_code_label *label,
+ profile_probability prob);
+extern void jumpifnot_1 (enum tree_code, tree, tree, rtx_code_label *,
+ profile_probability);
+
+extern void do_compare_rtx_and_jump (rtx, rtx, enum rtx_code, int, tree,
+ machine_mode, rtx, rtx_code_label *,
+ rtx_code_label *, profile_probability);
+
+extern void do_compare_rtx_and_jump (rtx, rtx, enum rtx_code, int,
+ machine_mode, rtx, rtx_code_label *,
+ rtx_code_label *, profile_probability);
+
+#endif /* GCC_DOJUMP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dominance.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dominance.h
new file mode 100644
index 0000000..ddac0f5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dominance.h
@@ -0,0 +1,94 @@
+/* Calculate (post)dominators header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DOMINANCE_H
+#define GCC_DOMINANCE_H
+
+enum cdi_direction
+{
+ CDI_DOMINATORS = 1,
+ CDI_POST_DOMINATORS = 2
+};
+
+/* State of dominance information. */
+
+enum dom_state
+{
+ DOM_NONE, /* Not computed at all. */
+ DOM_NO_FAST_QUERY, /* The data is OK, but the fast query data are not usable. */
+ DOM_OK /* Everything is ok. */
+};
+
+extern void calculate_dominance_info (enum cdi_direction, bool = true);
+extern void calculate_dominance_info_for_region (enum cdi_direction,
+ vec<basic_block>);
+extern void free_dominance_info (function *, enum cdi_direction);
+extern void free_dominance_info (enum cdi_direction);
+extern void free_dominance_info_for_region (function *,
+ enum cdi_direction,
+ vec<basic_block>);
+extern basic_block get_immediate_dominator (enum cdi_direction, basic_block);
+extern void set_immediate_dominator (enum cdi_direction, basic_block,
+ basic_block);
+extern auto_vec<basic_block> get_dominated_by (enum cdi_direction, basic_block);
+extern auto_vec<basic_block> get_dominated_by_region (enum cdi_direction,
+ basic_block *,
+ unsigned);
+extern auto_vec<basic_block> get_dominated_to_depth (enum cdi_direction,
+ basic_block, int);
+extern auto_vec<basic_block> get_all_dominated_blocks (enum cdi_direction,
+ basic_block);
+extern void redirect_immediate_dominators (enum cdi_direction, basic_block,
+ basic_block);
+extern basic_block nearest_common_dominator (enum cdi_direction,
+ basic_block, basic_block);
+extern basic_block nearest_common_dominator_for_set (enum cdi_direction,
+ bitmap);
+extern bool dominated_by_p (enum cdi_direction, const_basic_block,
+ const_basic_block);
+unsigned bb_dom_dfs_in (enum cdi_direction, basic_block);
+unsigned bb_dom_dfs_out (enum cdi_direction, basic_block);
+extern void verify_dominators (enum cdi_direction);
+
+/* Verify invariants of computed dominance information, if internal consistency
+ checks are enabled. */
+
+inline void
+checking_verify_dominators (cdi_direction dir)
+{
+ if (flag_checking)
+ verify_dominators (dir);
+}
+
+basic_block recompute_dominator (enum cdi_direction, basic_block);
+extern void iterate_fix_dominators (enum cdi_direction,
+ vec<basic_block> &, bool);
+extern void add_to_dominance_info (enum cdi_direction, basic_block);
+extern void delete_from_dominance_info (enum cdi_direction, basic_block);
+extern basic_block first_dom_son (enum cdi_direction, basic_block);
+extern basic_block next_dom_son (enum cdi_direction, basic_block);
+extern enum dom_state dom_info_state (function *, enum cdi_direction);
+extern enum dom_state dom_info_state (enum cdi_direction);
+extern void set_dom_info_availability (enum cdi_direction, enum dom_state);
+extern bool dom_info_available_p (function *, enum cdi_direction);
+extern bool dom_info_available_p (enum cdi_direction);
+
+
+
+#endif /* GCC_DOMINANCE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/domwalk.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/domwalk.h
new file mode 100644
index 0000000..d518d08
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/domwalk.h
@@ -0,0 +1,115 @@
+/* Generic dominator tree walker
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DOM_WALK_H
+#define GCC_DOM_WALK_H
+
+/**
+ * This is the main class for the dominator walker. It is expected that
+ * consumers will have a custom class inheriting from it, which will over ride
+ * at least one of before_dom_children and after_dom_children to implement the
+ * custom behavior.
+ */
+class dom_walker
+{
+public:
+ static const edge STOP;
+
+ /* An enum for determining whether the dom walk should be constrained to
+ blocks reachable by executable edges. */
+
+ enum reachability
+ {
+ /* Walk all blocks within the CFG. */
+ ALL_BLOCKS,
+
+ /* Use REACHABLE_BLOCKS when your subclass can discover that some edges
+ are not executable.
+
+ If a subclass can discover that a COND, SWITCH or GOTO has a static
+ target in the before_dom_children callback, the taken edge should
+ be returned. The generic walker will clear EDGE_EXECUTABLE on all
+ edges it can determine are not executable.
+
+ With REACHABLE_BLOCKS, EDGE_EXECUTABLE will be set on every edge in
+ the dom_walker ctor; the flag will then be cleared on edges that are
+ determined to be not executable. */
+ REACHABLE_BLOCKS,
+
+ /* Identical to REACHABLE_BLOCKS, but the initial state of EDGE_EXECUTABLE
+ will instead be preserved in the ctor, allowing for information about
+ non-executable edges to be merged in from an earlier analysis (and
+ potentially for additional edges to be marked as non-executable). */
+ REACHABLE_BLOCKS_PRESERVING_FLAGS
+ };
+
+ /* You can provide a mapping of basic-block index to RPO if you
+ have that readily available or you do multiple walks. If you
+ specify NULL as BB_INDEX_TO_RPO this mapping will be computed
+ lazily at walk time. If you specify -1 dominator children will
+ not be walked in RPO order. */
+ dom_walker (cdi_direction direction, enum reachability = ALL_BLOCKS,
+ int *bb_index_to_rpo = NULL);
+
+ ~dom_walker ();
+
+ /* Walk the dominator tree. */
+ void walk (basic_block);
+
+ /* Function to call before the recursive walk of the dominator children.
+
+ Return value is the always taken edge if the block has multiple outgoing
+ edges, NULL otherwise. When skipping unreachable blocks, the walker
+ uses the taken edge information to clear EDGE_EXECUTABLE on the other
+ edges, exposing unreachable blocks. A NULL return value means all
+ outgoing edges should still be considered executable. A return value
+ of STOP means to stop the domwalk from processing dominated blocks from
+ here. This can be used to process a SEME region only (note domwalk
+ will still do work linear in function size). */
+ virtual edge before_dom_children (basic_block) { return NULL; }
+
+ /* Function to call after the recursive walk of the dominator children. */
+ virtual void after_dom_children (basic_block) {}
+
+private:
+ /* This is the direction of the dominator tree we want to walk. i.e.,
+ if it is set to CDI_DOMINATORS, then we walk the dominator tree,
+ if it is set to CDI_POST_DOMINATORS, then we walk the post
+ dominator tree. */
+ const ENUM_BITFIELD (cdi_direction) m_dom_direction : 2;
+ const ENUM_BITFIELD (reachability) m_reachability : 2;
+ bool m_user_bb_to_rpo;
+ basic_block m_unreachable_dom;
+ int *m_bb_to_rpo;
+
+ /* Query whether or not the given block is reachable or not. */
+ bool bb_reachable (struct function *, basic_block);
+
+ /* Given an unreachable block, propagate that property to outgoing
+ and possibly incoming edges for the block. Typically called after
+ determining a block is unreachable in the before_dom_children
+ callback. */
+ void propagate_unreachable_to_edges (basic_block, FILE *, dump_flags_t);
+
+};
+
+extern void set_all_edges_as_executable (function *fn);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/double-int.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/double-int.h
new file mode 100644
index 0000000..41abe84
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/double-int.h
@@ -0,0 +1,470 @@
+/* Operations with long integers.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef DOUBLE_INT_H
+#define DOUBLE_INT_H
+
+/* A large integer is currently represented as a pair of HOST_WIDE_INTs.
+ It therefore represents a number with precision of
+ 2 * HOST_BITS_PER_WIDE_INT bits (it is however possible that the
+ internal representation will change, if numbers with greater precision
+ are needed, so the users should not rely on it). The representation does
+ not contain any information about signedness of the represented value, so
+ it can be used to represent both signed and unsigned numbers. For
+ operations where the results depend on signedness (division, comparisons),
+ it must be specified separately. For each such operation, there are three
+ versions of the function -- double_int_op, that takes an extra UNS argument
+ giving the signedness of the values, and double_int_sop and double_int_uop
+ that stand for its specializations for signed and unsigned values.
+
+ You may also represent with numbers in smaller precision using double_int.
+ You however need to use double_int_ext (that fills in the bits of the
+ number over the prescribed precision with zeros or with the sign bit) before
+ operations that do not perform arithmetics modulo 2^precision (comparisons,
+ division), and possibly before storing the results, if you want to keep
+ them in some canonical form). In general, the signedness of double_int_ext
+ should match the signedness of the operation.
+
+ ??? The components of double_int differ in signedness mostly for
+ historical reasons (they replace an older structure used to represent
+ numbers with precision higher than HOST_WIDE_INT). It might be less
+ confusing to have them both signed or both unsigned. */
+
+struct double_int
+{
+ /* Normally, we would define constructors to create instances.
+ Two things prevent us from doing so.
+ First, defining a constructor makes the class non-POD in C++03,
+ and we certainly want double_int to be a POD.
+ Second, the GCC conding conventions prefer explicit conversion,
+ and explicit conversion operators are not available until C++11. */
+
+ static double_int from_uhwi (unsigned HOST_WIDE_INT cst);
+ static double_int from_shwi (HOST_WIDE_INT cst);
+ static double_int from_pair (HOST_WIDE_INT high, unsigned HOST_WIDE_INT low);
+
+ /* Construct from a fuffer of length LEN. BUFFER will be read according
+ to byte endianness and word endianness. */
+ static double_int from_buffer (const unsigned char *buffer, int len);
+
+ /* No copy assignment operator or destructor to keep the type a POD. */
+
+ /* There are some special value-creation static member functions. */
+
+ static double_int mask (unsigned prec);
+ static double_int max_value (unsigned int prec, bool uns);
+ static double_int min_value (unsigned int prec, bool uns);
+
+ /* The following functions are mutating operations. */
+
+ double_int &operator ++ (); // prefix
+ double_int &operator -- (); // prefix
+ double_int &operator *= (double_int);
+ double_int &operator += (double_int);
+ double_int &operator -= (double_int);
+ double_int &operator &= (double_int);
+ double_int &operator ^= (double_int);
+ double_int &operator |= (double_int);
+
+ /* The following functions are non-mutating operations. */
+
+ /* Conversion functions. */
+
+ HOST_WIDE_INT to_shwi () const;
+ unsigned HOST_WIDE_INT to_uhwi () const;
+
+ /* Conversion query functions. */
+
+ bool fits_uhwi () const;
+ bool fits_shwi () const;
+ bool fits_hwi (bool uns) const;
+
+ /* Attribute query functions. */
+
+ int trailing_zeros () const;
+ int popcount () const;
+
+ /* Arithmetic query operations. */
+
+ bool multiple_of (double_int, bool, double_int *) const;
+
+ /* Arithmetic operation functions. */
+
+ /* The following operations perform arithmetics modulo 2^precision, so you
+ do not need to call .ext between them, even if you are representing
+ numbers with precision less than HOST_BITS_PER_DOUBLE_INT bits. */
+
+ double_int set_bit (unsigned) const;
+ double_int mul_with_sign (double_int, bool unsigned_p, bool *overflow) const;
+ double_int wide_mul_with_sign (double_int, bool unsigned_p,
+ double_int *higher, bool *overflow) const;
+ double_int add_with_sign (double_int, bool unsigned_p, bool *overflow) const;
+ double_int sub_with_overflow (double_int, bool *overflow) const;
+ double_int neg_with_overflow (bool *overflow) const;
+
+ double_int operator * (double_int) const;
+ double_int operator + (double_int) const;
+ double_int operator - (double_int) const;
+ double_int operator - () const;
+ double_int operator ~ () const;
+ double_int operator & (double_int) const;
+ double_int operator | (double_int) const;
+ double_int operator ^ (double_int) const;
+ double_int and_not (double_int) const;
+
+ double_int lshift (HOST_WIDE_INT count) const;
+ double_int lshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const;
+ double_int rshift (HOST_WIDE_INT count) const;
+ double_int rshift (HOST_WIDE_INT count, unsigned int prec, bool arith) const;
+ double_int alshift (HOST_WIDE_INT count, unsigned int prec) const;
+ double_int arshift (HOST_WIDE_INT count, unsigned int prec) const;
+ double_int llshift (HOST_WIDE_INT count, unsigned int prec) const;
+ double_int lrshift (HOST_WIDE_INT count, unsigned int prec) const;
+ double_int lrotate (HOST_WIDE_INT count, unsigned int prec) const;
+ double_int rrotate (HOST_WIDE_INT count, unsigned int prec) const;
+
+ /* You must ensure that double_int::ext is called on the operands
+ of the following operations, if the precision of the numbers
+ is less than HOST_BITS_PER_DOUBLE_INT bits. */
+
+ double_int div (double_int, bool, unsigned) const;
+ double_int sdiv (double_int, unsigned) const;
+ double_int udiv (double_int, unsigned) const;
+ double_int mod (double_int, bool, unsigned) const;
+ double_int smod (double_int, unsigned) const;
+ double_int umod (double_int, unsigned) const;
+ double_int divmod_with_overflow (double_int, bool, unsigned,
+ double_int *, bool *) const;
+ double_int divmod (double_int, bool, unsigned, double_int *) const;
+ double_int sdivmod (double_int, unsigned, double_int *) const;
+ double_int udivmod (double_int, unsigned, double_int *) const;
+
+ /* Precision control functions. */
+
+ double_int ext (unsigned prec, bool uns) const;
+ double_int zext (unsigned prec) const;
+ double_int sext (unsigned prec) const;
+
+ /* Comparative functions. */
+
+ bool is_zero () const;
+ bool is_one () const;
+ bool is_minus_one () const;
+ bool is_negative () const;
+
+ int cmp (double_int b, bool uns) const;
+ int ucmp (double_int b) const;
+ int scmp (double_int b) const;
+
+ bool ult (double_int b) const;
+ bool ule (double_int b) const;
+ bool ugt (double_int b) const;
+ bool slt (double_int b) const;
+ bool sle (double_int b) const;
+ bool sgt (double_int b) const;
+
+ double_int max (double_int b, bool uns);
+ double_int smax (double_int b);
+ double_int umax (double_int b);
+
+ double_int min (double_int b, bool uns);
+ double_int smin (double_int b);
+ double_int umin (double_int b);
+
+ bool operator == (double_int cst2) const;
+ bool operator != (double_int cst2) const;
+
+ /* Please migrate away from using these member variables publicly. */
+
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
+
+};
+
+#define HOST_BITS_PER_DOUBLE_INT (2 * HOST_BITS_PER_WIDE_INT)
+
+/* Constructors and conversions. */
+
+/* Constructs double_int from integer CST. The bits over the precision of
+ HOST_WIDE_INT are filled with the sign bit. */
+
+inline double_int
+double_int::from_shwi (HOST_WIDE_INT cst)
+{
+ double_int r;
+ r.low = (unsigned HOST_WIDE_INT) cst;
+ r.high = cst < 0 ? -1 : 0;
+ return r;
+}
+
+/* Some useful constants. */
+/* FIXME(crowl): Maybe remove after converting callers?
+ The problem is that a named constant would not be as optimizable,
+ while the functional syntax is more verbose. */
+
+#define double_int_minus_one (double_int::from_shwi (-1))
+#define double_int_zero (double_int::from_shwi (0))
+#define double_int_one (double_int::from_shwi (1))
+#define double_int_two (double_int::from_shwi (2))
+#define double_int_ten (double_int::from_shwi (10))
+
+/* Constructs double_int from unsigned integer CST. The bits over the
+ precision of HOST_WIDE_INT are filled with zeros. */
+
+inline double_int
+double_int::from_uhwi (unsigned HOST_WIDE_INT cst)
+{
+ double_int r;
+ r.low = cst;
+ r.high = 0;
+ return r;
+}
+
+inline double_int
+double_int::from_pair (HOST_WIDE_INT high, unsigned HOST_WIDE_INT low)
+{
+ double_int r;
+ r.low = low;
+ r.high = high;
+ return r;
+}
+
+inline double_int &
+double_int::operator ++ ()
+{
+ *this += double_int_one;
+ return *this;
+}
+
+inline double_int &
+double_int::operator -- ()
+{
+ *this -= double_int_one;
+ return *this;
+}
+
+inline double_int &
+double_int::operator &= (double_int b)
+{
+ *this = *this & b;
+ return *this;
+}
+
+inline double_int &
+double_int::operator ^= (double_int b)
+{
+ *this = *this ^ b;
+ return *this;
+}
+
+inline double_int &
+double_int::operator |= (double_int b)
+{
+ *this = *this | b;
+ return *this;
+}
+
+/* Returns value of CST as a signed number. CST must satisfy
+ double_int::fits_signed. */
+
+inline HOST_WIDE_INT
+double_int::to_shwi () const
+{
+ return (HOST_WIDE_INT) low;
+}
+
+/* Returns value of CST as an unsigned number. CST must satisfy
+ double_int::fits_unsigned. */
+
+inline unsigned HOST_WIDE_INT
+double_int::to_uhwi () const
+{
+ return low;
+}
+
+/* Returns true if CST fits in unsigned HOST_WIDE_INT. */
+
+inline bool
+double_int::fits_uhwi () const
+{
+ return high == 0;
+}
+
+/* Logical operations. */
+
+/* Returns ~A. */
+
+inline double_int
+double_int::operator ~ () const
+{
+ double_int result;
+ result.low = ~low;
+ result.high = ~high;
+ return result;
+}
+
+/* Returns A | B. */
+
+inline double_int
+double_int::operator | (double_int b) const
+{
+ double_int result;
+ result.low = low | b.low;
+ result.high = high | b.high;
+ return result;
+}
+
+/* Returns A & B. */
+
+inline double_int
+double_int::operator & (double_int b) const
+{
+ double_int result;
+ result.low = low & b.low;
+ result.high = high & b.high;
+ return result;
+}
+
+/* Returns A & ~B. */
+
+inline double_int
+double_int::and_not (double_int b) const
+{
+ double_int result;
+ result.low = low & ~b.low;
+ result.high = high & ~b.high;
+ return result;
+}
+
+/* Returns A ^ B. */
+
+inline double_int
+double_int::operator ^ (double_int b) const
+{
+ double_int result;
+ result.low = low ^ b.low;
+ result.high = high ^ b.high;
+ return result;
+}
+
+void dump_double_int (FILE *, double_int, bool);
+
+#define ALL_ONES HOST_WIDE_INT_M1U
+
+/* The operands of the following comparison functions must be processed
+ with double_int_ext, if their precision is less than
+ HOST_BITS_PER_DOUBLE_INT bits. */
+
+/* Returns true if CST is zero. */
+
+inline bool
+double_int::is_zero () const
+{
+ return low == 0 && high == 0;
+}
+
+/* Returns true if CST is one. */
+
+inline bool
+double_int::is_one () const
+{
+ return low == 1 && high == 0;
+}
+
+/* Returns true if CST is minus one. */
+
+inline bool
+double_int::is_minus_one () const
+{
+ return low == ALL_ONES && high == -1;
+}
+
+/* Returns true if CST is negative. */
+
+inline bool
+double_int::is_negative () const
+{
+ return high < 0;
+}
+
+/* Returns true if CST1 == CST2. */
+
+inline bool
+double_int::operator == (double_int cst2) const
+{
+ return low == cst2.low && high == cst2.high;
+}
+
+/* Returns true if CST1 != CST2. */
+
+inline bool
+double_int::operator != (double_int cst2) const
+{
+ return low != cst2.low || high != cst2.high;
+}
+
+/* Return number of set bits of CST. */
+
+inline int
+double_int::popcount () const
+{
+ return popcount_hwi (high) + popcount_hwi (low);
+}
+
+
+#ifndef GENERATOR_FILE
+/* Conversion to and from GMP integer representations. */
+
+void mpz_set_double_int (mpz_t, double_int, bool);
+double_int mpz_get_double_int (const_tree, mpz_t, bool);
+#endif
+
+namespace wi
+{
+ template <>
+ struct int_traits <double_int>
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = true;
+ static const unsigned int precision = HOST_BITS_PER_DOUBLE_INT;
+ static unsigned int get_precision (const double_int &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const double_int &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <double_int>::get_precision (const double_int &)
+{
+ return precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <double_int>::decompose (HOST_WIDE_INT *scratch, unsigned int p,
+ const double_int &x)
+{
+ gcc_checking_assert (precision == p);
+ scratch[0] = x.low;
+ if ((x.high == 0 && scratch[0] >= 0) || (x.high == -1 && scratch[0] < 0))
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = x.high;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+#endif /* DOUBLE_INT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dump-context.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dump-context.h
new file mode 100644
index 0000000..c9aeac1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dump-context.h
@@ -0,0 +1,305 @@
+/* Support code for handling the various dump_* calls in dumpfile.h
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_DUMP_CONTEXT_H
+#define GCC_DUMP_CONTEXT_H 1
+
+#include "dumpfile.h"
+#include "pretty-print.h"
+#include "selftest.h"
+#include "optinfo.h"
+
+class optrecord_json_writer;
+namespace selftest { class temp_dump_context; }
+class debug_dump_context;
+
+/* A class for handling the various dump_* calls.
+
+ In particular, this class has responsibility for consolidating
+ the "dump_*" calls into optinfo instances (delimited by "dump_*_loc"
+ calls), and emitting them.
+
+ Putting this in a class (rather than as global state) allows
+ for selftesting of this code. */
+
+class dump_context
+{
+ friend class selftest::temp_dump_context;
+ friend class debug_dump_context;
+
+ public:
+ static dump_context &get () { return *s_current; }
+
+ ~dump_context ();
+
+ void refresh_dumps_are_enabled ();
+
+ void dump_loc (const dump_metadata_t &metadata,
+ const dump_user_location_t &loc);
+ void dump_loc_immediate (dump_flags_t dump_kind,
+ const dump_user_location_t &loc);
+
+ void dump_gimple_stmt (const dump_metadata_t &metadata,
+ dump_flags_t extra_dump_flags,
+ gimple *gs, int spc);
+
+ void dump_gimple_stmt_loc (const dump_metadata_t &metadata,
+ const dump_user_location_t &loc,
+ dump_flags_t extra_dump_flags,
+ gimple *gs, int spc);
+
+ void dump_gimple_expr (const dump_metadata_t &metadata,
+ dump_flags_t extra_dump_flags,
+ gimple *gs, int spc);
+
+ void dump_gimple_expr_loc (const dump_metadata_t &metadata,
+ const dump_user_location_t &loc,
+ dump_flags_t extra_dump_flags,
+ gimple *gs,
+ int spc);
+
+ void dump_generic_expr (const dump_metadata_t &metadata,
+ dump_flags_t extra_dump_flags,
+ tree t);
+
+ void dump_generic_expr_loc (const dump_metadata_t &metadata,
+ const dump_user_location_t &loc,
+ dump_flags_t extra_dump_flags,
+ tree t);
+
+ void dump_printf_va (const dump_metadata_t &metadata, const char *format,
+ va_list *ap) ATTRIBUTE_GCC_DUMP_PRINTF (3, 0);
+
+ void dump_printf_loc_va (const dump_metadata_t &metadata,
+ const dump_user_location_t &loc,
+ const char *format, va_list *ap)
+ ATTRIBUTE_GCC_DUMP_PRINTF (4, 0);
+
+ template<unsigned int N, typename C>
+ void dump_dec (const dump_metadata_t &metadata, const poly_int<N, C> &value);
+
+ void dump_symtab_node (const dump_metadata_t &metadata, symtab_node *node);
+
+ /* Managing nested scopes. */
+ unsigned int get_scope_depth () const;
+ void begin_scope (const char *name,
+ const dump_user_location_t &user_location,
+ const dump_impl_location_t &impl_location);
+ void end_scope ();
+
+ /* Should optinfo instances be created?
+ All creation of optinfos should be guarded by this predicate.
+ Return true if any optinfo destinations are active. */
+ bool optinfo_enabled_p () const;
+
+ bool optimization_records_enabled_p () const
+ {
+ return m_json_writer != NULL;
+ }
+ void set_json_writer (optrecord_json_writer *writer);
+ void finish_any_json_writer ();
+
+ void end_any_optinfo ();
+
+ void emit_optinfo (const optinfo *info);
+ void emit_item (optinfo_item *item, dump_flags_t dump_kind);
+
+ bool apply_dump_filter_p (dump_flags_t dump_kind, dump_flags_t filter) const;
+
+ private:
+ optinfo &ensure_pending_optinfo (const dump_metadata_t &metadata);
+ optinfo &begin_next_optinfo (const dump_metadata_t &metadata,
+ const dump_user_location_t &loc);
+
+ /* The current nesting depth of dump scopes, for showing nesting
+ via indentation). */
+ unsigned int m_scope_depth;
+
+ /* The optinfo currently being accumulated since the last dump_*_loc call,
+ if any. */
+ optinfo *m_pending;
+
+ /* If -fsave-optimization-record is enabled, the heap-allocated JSON writer
+ instance, otherwise NULL. */
+ optrecord_json_writer *m_json_writer;
+
+ /* For use in selftests: if non-NULL, then items are to be printed
+ to this, using the given flags. */
+ pretty_printer *m_test_pp;
+ dump_flags_t m_test_pp_flags;
+
+ /* The currently active dump_context, for use by the dump_* API calls. */
+ static dump_context *s_current;
+
+ /* The default active context. */
+ static dump_context s_default;
+};
+
+/* A subclass of pretty_printer for implementing dump_context::dump_printf_va.
+ In particular, the formatted chunks are captured as optinfo_item instances,
+ thus retaining metadata about the entities being dumped (e.g. source
+ locations), rather than just as plain text. */
+
+class dump_pretty_printer : public pretty_printer
+{
+public:
+ dump_pretty_printer (dump_context *context, dump_flags_t dump_kind);
+
+ void emit_items (optinfo *dest);
+
+private:
+ /* Information on an optinfo_item that was generated during phase 2 of
+ formatting. */
+ class stashed_item
+ {
+ public:
+ stashed_item (const char **buffer_ptr_, optinfo_item *item_)
+ : buffer_ptr (buffer_ptr_), item (item_) {}
+ const char **buffer_ptr;
+ optinfo_item *item;
+ };
+
+ static bool format_decoder_cb (pretty_printer *pp, text_info *text,
+ const char *spec, int /*precision*/,
+ bool /*wide*/, bool /*set_locus*/,
+ bool /*verbose*/, bool */*quoted*/,
+ const char **buffer_ptr);
+
+ bool decode_format (text_info *text, const char *spec,
+ const char **buffer_ptr);
+
+ void stash_item (const char **buffer_ptr, optinfo_item *item);
+
+ void emit_any_pending_textual_chunks (optinfo *dest);
+
+ void emit_item (optinfo_item *item, optinfo *dest);
+
+ dump_context *m_context;
+ dump_flags_t m_dump_kind;
+ auto_vec<stashed_item> m_stashed_items;
+};
+
+/* An RAII-style class for use in debug dumpers for temporarily using a
+ different dump_context. It enables full details and outputs to
+ stderr instead of the currently active dump_file. */
+
+class debug_dump_context
+{
+ public:
+ debug_dump_context (FILE *f = stderr);
+ ~debug_dump_context ();
+
+ private:
+ dump_context m_context;
+ dump_context *m_saved;
+ dump_flags_t m_saved_flags;
+ dump_flags_t m_saved_pflags;
+ FILE *m_saved_file;
+};
+
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* An RAII-style class for use in selftests for temporarily using a different
+ dump_context. */
+
+class temp_dump_context
+{
+ public:
+ temp_dump_context (bool forcibly_enable_optinfo,
+ bool forcibly_enable_dumping,
+ dump_flags_t test_pp_flags);
+ ~temp_dump_context ();
+
+ /* Support for selftests. */
+ optinfo *get_pending_optinfo () const { return m_context.m_pending; }
+ const char *get_dumped_text ();
+
+ private:
+ pretty_printer m_pp;
+ dump_context m_context;
+ dump_context *m_saved;
+};
+
+/* Implementation detail of ASSERT_DUMPED_TEXT_EQ. */
+
+extern void verify_dumped_text (const location &loc,
+ temp_dump_context *context,
+ const char *expected_text);
+
+/* Verify that the text dumped so far in CONTEXT equals
+ EXPECTED_TEXT.
+ As a side-effect, the internal buffer is 0-terminated. */
+
+#define ASSERT_DUMPED_TEXT_EQ(CONTEXT, EXPECTED_TEXT) \
+ SELFTEST_BEGIN_STMT \
+ verify_dumped_text (SELFTEST_LOCATION, &(CONTEXT), (EXPECTED_TEXT)); \
+ SELFTEST_END_STMT
+
+
+/* Verify that ITEM has the expected values. */
+
+void
+verify_item (const location &loc,
+ const optinfo_item *item,
+ enum optinfo_item_kind expected_kind,
+ location_t expected_location,
+ const char *expected_text);
+
+/* Verify that ITEM is a text item, with EXPECTED_TEXT. */
+
+#define ASSERT_IS_TEXT(ITEM, EXPECTED_TEXT) \
+ SELFTEST_BEGIN_STMT \
+ verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_TEXT, \
+ UNKNOWN_LOCATION, (EXPECTED_TEXT)); \
+ SELFTEST_END_STMT
+
+/* Verify that ITEM is a tree item, with the expected values. */
+
+#define ASSERT_IS_TREE(ITEM, EXPECTED_LOCATION, EXPECTED_TEXT) \
+ SELFTEST_BEGIN_STMT \
+ verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_TREE, \
+ (EXPECTED_LOCATION), (EXPECTED_TEXT)); \
+ SELFTEST_END_STMT
+
+/* Verify that ITEM is a gimple item, with the expected values. */
+
+#define ASSERT_IS_GIMPLE(ITEM, EXPECTED_LOCATION, EXPECTED_TEXT) \
+ SELFTEST_BEGIN_STMT \
+ verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_GIMPLE, \
+ (EXPECTED_LOCATION), (EXPECTED_TEXT)); \
+ SELFTEST_END_STMT
+
+/* Verify that ITEM is a symtab node, with the expected values. */
+
+#define ASSERT_IS_SYMTAB_NODE(ITEM, EXPECTED_LOCATION, EXPECTED_TEXT) \
+ SELFTEST_BEGIN_STMT \
+ verify_item (SELFTEST_LOCATION, (ITEM), OPTINFO_ITEM_KIND_SYMTAB_NODE, \
+ (EXPECTED_LOCATION), (EXPECTED_TEXT)); \
+ SELFTEST_END_STMT
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+#endif /* GCC_DUMP_CONTEXT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dumpfile.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dumpfile.h
new file mode 100644
index 0000000..7d5eca8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dumpfile.h
@@ -0,0 +1,774 @@
+/* Definitions for the shared dumpfile.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_DUMPFILE_H
+#define GCC_DUMPFILE_H 1
+
+#include "profile-count.h"
+
+/* An attribute for annotating formatting printing functions that use
+ the dumpfile/optinfo formatting codes. These are the pretty_printer
+ format codes (see pretty-print.cc), with additional codes for middle-end
+ specific entities (see dumpfile.cc). */
+
+#if GCC_VERSION >= 9000
+#define ATTRIBUTE_GCC_DUMP_PRINTF(m, n) \
+ __attribute__ ((__format__ (__gcc_dump_printf__, m ,n))) \
+ ATTRIBUTE_NONNULL(m)
+#else
+#define ATTRIBUTE_GCC_DUMP_PRINTF(m, n) ATTRIBUTE_NONNULL(m)
+#endif
+
+/* Different tree dump places. When you add new tree dump places,
+ extend the DUMP_FILES array in dumpfile.cc. */
+enum tree_dump_index
+{
+ TDI_none, /* No dump */
+ TDI_cgraph, /* dump function call graph. */
+ TDI_inheritance, /* dump type inheritance graph. */
+ TDI_clones, /* dump IPA cloning decisions. */
+ TDI_original, /* dump each function before optimizing it */
+ TDI_gimple, /* dump each function after gimplifying it */
+ TDI_nested, /* dump each function after unnesting it */
+ TDI_lto_stream_out, /* dump information about lto streaming */
+ TDI_profile_report, /* dump information about profile quality */
+
+ TDI_lang_all, /* enable all the language dumps. */
+ TDI_tree_all, /* enable all the GENERIC/GIMPLE dumps. */
+ TDI_rtl_all, /* enable all the RTL dumps. */
+ TDI_ipa_all, /* enable all the IPA dumps. */
+
+ TDI_end
+};
+
+/* Enum used to distinguish dump files to types. */
+
+enum dump_kind
+{
+ DK_none,
+ DK_lang,
+ DK_tree,
+ DK_rtl,
+ DK_ipa
+};
+
+/* Bit masks to control dumping. Not all values are applicable to all
+ dumps. Add new ones at the end. When you define new values, extend
+ the DUMP_OPTIONS array in dumpfile.cc. The TDF_* flags coexist with
+ MSG_* flags (for -fopt-info) and the bit values must be chosen to
+ allow that. */
+enum dump_flag : uint32_t
+{
+ /* Value of TDF_NONE is used just for bits filtered by TDF_KIND_MASK. */
+ TDF_NONE = 0,
+
+ /* Dump node addresses. */
+ TDF_ADDRESS = (1 << 0),
+
+ /* Don't go wild following links. */
+ TDF_SLIM = (1 << 1),
+
+ /* Don't unparse the function. */
+ TDF_RAW = (1 << 2),
+
+ /* Show more detailed info about each pass. */
+ TDF_DETAILS = (1 << 3),
+
+ /* Dump various statistics about each pass. */
+ TDF_STATS = (1 << 4),
+
+ /* Display basic block boundaries. */
+ TDF_BLOCKS = (1 << 5),
+
+ /* Display virtual operands. */
+ TDF_VOPS = (1 << 6),
+
+ /* Display statement line numbers. */
+ TDF_LINENO = (1 << 7),
+
+ /* Display decl UIDs. */
+ TDF_UID = (1 << 8),
+
+ /* Address of stmt. */
+ TDF_STMTADDR = (1 << 9),
+
+ /* A graph dump is being emitted. */
+ TDF_GRAPH = (1 << 10),
+
+ /* Display memory symbols in expr.
+ Implies TDF_VOPS. */
+ TDF_MEMSYMS = (1 << 11),
+
+ /* A flag to only print the RHS of a gimple stmt. */
+ TDF_RHS_ONLY = (1 << 12),
+
+ /* Display asm names of decls. */
+ TDF_ASMNAME = (1 << 13),
+
+ /* Display EH region number holding this gimple statement. */
+ TDF_EH = (1 << 14),
+
+ /* Omit UIDs from dumps. */
+ TDF_NOUID = (1 << 15),
+
+ /* Display alias information. */
+ TDF_ALIAS = (1 << 16),
+
+ /* Enumerate locals by uid. */
+ TDF_ENUMERATE_LOCALS = (1 << 17),
+
+ /* Dump cselib details. */
+ TDF_CSELIB = (1 << 18),
+
+ /* Dump SCEV details. */
+ TDF_SCEV = (1 << 19),
+
+ /* Dump in GIMPLE FE syntax. */
+ TDF_GIMPLE = (1 << 20),
+
+ /* Dump folding details. */
+ TDF_FOLDING = (1 << 21),
+
+ /* MSG_* flags for expressing the kinds of message to
+ be emitted by -fopt-info. */
+
+ /* -fopt-info optimized sources. */
+ MSG_OPTIMIZED_LOCATIONS = (1 << 22),
+
+ /* Missed opportunities. */
+ MSG_MISSED_OPTIMIZATION = (1 << 23),
+
+ /* General optimization info. */
+ MSG_NOTE = (1 << 24),
+
+ /* Mask for selecting MSG_-kind flags. */
+ MSG_ALL_KINDS = (MSG_OPTIMIZED_LOCATIONS
+ | MSG_MISSED_OPTIMIZATION
+ | MSG_NOTE),
+
+ /* MSG_PRIORITY_* flags for expressing the priority levels of message
+ to be emitted by -fopt-info, and filtering on them.
+ By default, messages at the top-level dump scope are "user-facing",
+ whereas those that are in nested scopes are implicitly "internals".
+ This behavior can be overridden for a given dump message by explicitly
+ specifying one of the MSG_PRIORITY_* flags.
+
+ By default, dump files show both kinds of message, whereas -fopt-info
+ only shows "user-facing" messages, and requires the "-internals"
+ sub-option of -fopt-info to show the internal messages. */
+
+ /* Implicitly supplied for messages at the top-level dump scope. */
+ MSG_PRIORITY_USER_FACING = (1 << 25),
+
+ /* Implicitly supplied for messages within nested dump scopes. */
+ MSG_PRIORITY_INTERNALS = (1 << 26),
+
+ /* Supplied when an opt_problem generated in a nested scope is re-emitted
+ at the top-level. We want to default to showing these in -fopt-info
+ output, but to *not* show them in dump files, as the message would be
+ shown twice, messing up "scan-tree-dump-times" in DejaGnu tests. */
+ MSG_PRIORITY_REEMITTED = (1 << 27),
+
+ /* Mask for selecting MSG_PRIORITY_* flags. */
+ MSG_ALL_PRIORITIES = (MSG_PRIORITY_USER_FACING
+ | MSG_PRIORITY_INTERNALS
+ | MSG_PRIORITY_REEMITTED),
+
+ /* All -fdump- flags. */
+ TDF_ALL_VALUES = (1 << 28) - 1,
+
+ /* Dumping for -fcompare-debug. */
+ TDF_COMPARE_DEBUG = (1 << 28),
+
+ /* Dump a GIMPLE value which means wrapping certain things with _Literal. */
+ TDF_GIMPLE_VAL = (1 << 29),
+
+ /* For error. */
+ TDF_ERROR = ((uint32_t)1 << 30),
+};
+
+/* Dump flags type. */
+
+typedef enum dump_flag dump_flags_t;
+
+inline dump_flags_t
+operator| (dump_flags_t lhs, dump_flags_t rhs)
+{
+ return (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
+ | (std::underlying_type<dump_flags_t>::type)rhs);
+}
+
+inline dump_flags_t
+operator& (dump_flags_t lhs, dump_flags_t rhs)
+{
+ return (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
+ & (std::underlying_type<dump_flags_t>::type)rhs);
+}
+
+inline dump_flags_t
+operator~ (dump_flags_t flags)
+{
+ return (dump_flags_t)~((std::underlying_type<dump_flags_t>::type)flags);
+}
+
+inline dump_flags_t &
+operator|= (dump_flags_t &lhs, dump_flags_t rhs)
+{
+ lhs = (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
+ | (std::underlying_type<dump_flags_t>::type)rhs);
+ return lhs;
+}
+
+inline dump_flags_t &
+operator&= (dump_flags_t &lhs, dump_flags_t rhs)
+{
+ lhs = (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
+ & (std::underlying_type<dump_flags_t>::type)rhs);
+ return lhs;
+}
+
+/* Flags to control high-level -fopt-info dumps. Usually these flags
+ define a group of passes. An optimization pass can be part of
+ multiple groups. */
+
+enum optgroup_flag
+{
+ OPTGROUP_NONE = 0,
+
+ /* IPA optimization passes */
+ OPTGROUP_IPA = (1 << 1),
+
+ /* Loop optimization passes */
+ OPTGROUP_LOOP = (1 << 2),
+
+ /* Inlining passes */
+ OPTGROUP_INLINE = (1 << 3),
+
+ /* OMP (Offloading and Multi Processing) transformations */
+ OPTGROUP_OMP = (1 << 4),
+
+ /* Vectorization passes */
+ OPTGROUP_VEC = (1 << 5),
+
+ /* All other passes */
+ OPTGROUP_OTHER = (1 << 6),
+
+ OPTGROUP_ALL = (OPTGROUP_IPA | OPTGROUP_LOOP | OPTGROUP_INLINE
+ | OPTGROUP_OMP | OPTGROUP_VEC | OPTGROUP_OTHER)
+};
+
+typedef enum optgroup_flag optgroup_flags_t;
+
+inline optgroup_flags_t
+operator| (optgroup_flags_t lhs, optgroup_flags_t rhs)
+{
+ return (optgroup_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
+ | (std::underlying_type<dump_flags_t>::type)rhs);
+}
+
+inline optgroup_flags_t &
+operator|= (optgroup_flags_t &lhs, optgroup_flags_t rhs)
+{
+ lhs = (optgroup_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
+ | (std::underlying_type<dump_flags_t>::type)rhs);
+ return lhs;
+}
+
+/* Define a tree dump switch. */
+struct dump_file_info
+{
+ /* Suffix to give output file. */
+ const char *suffix;
+ /* Command line dump switch. */
+ const char *swtch;
+ /* Command line glob. */
+ const char *glob;
+ /* Filename for the pass-specific stream. */
+ const char *pfilename;
+ /* Filename for the -fopt-info stream. */
+ const char *alt_filename;
+ /* Pass-specific dump stream. */
+ FILE *pstream;
+ /* -fopt-info stream. */
+ FILE *alt_stream;
+ /* Dump kind. */
+ dump_kind dkind;
+ /* Dump flags. */
+ dump_flags_t pflags;
+ /* A pass flags for -fopt-info. */
+ dump_flags_t alt_flags;
+ /* Flags for -fopt-info given by a user. */
+ optgroup_flags_t optgroup_flags;
+ /* State of pass-specific stream. */
+ int pstate;
+ /* State of the -fopt-info stream. */
+ int alt_state;
+ /* Dump file number. */
+ int num;
+ /* Fields "suffix", "swtch", "glob" can be const strings,
+ or can be dynamically allocated, needing free. */
+ bool owns_strings;
+ /* When a given dump file is being initialized, this flag is set to true
+ if the corresponding TDF_graph dump file has also been initialized. */
+ bool graph_dump_initialized;
+};
+
+/* A class for describing where in the user's source that a dump message
+ relates to, with various constructors for convenience.
+ In particular, this lets us associate dump messages
+ with hotness information (e.g. from PGO), allowing them to
+ be prioritized by code hotness. */
+
+class dump_user_location_t
+{
+ public:
+ /* Default constructor, analogous to UNKNOWN_LOCATION. */
+ dump_user_location_t () : m_count (), m_loc (UNKNOWN_LOCATION) {}
+
+ /* Construct from a gimple statement (using its location and hotness). */
+ dump_user_location_t (const gimple *stmt);
+
+ /* Construct from an RTL instruction (using its location and hotness). */
+ dump_user_location_t (const rtx_insn *insn);
+
+ /* Construct from a location_t. This one is deprecated (since it doesn't
+ capture hotness information); it thus needs to be spelled out. */
+ static dump_user_location_t
+ from_location_t (location_t loc)
+ {
+ return dump_user_location_t (profile_count (), loc);
+ }
+
+ /* Construct from a function declaration. This one requires spelling out
+ to avoid accidentally constructing from other kinds of tree. */
+ static dump_user_location_t
+ from_function_decl (tree fndecl);
+
+ profile_count get_count () const { return m_count; }
+ location_t get_location_t () const { return m_loc; }
+
+ private:
+ /* Private ctor from count and location, for use by from_location_t. */
+ dump_user_location_t (profile_count count, location_t loc)
+ : m_count (count), m_loc (loc)
+ {}
+
+ profile_count m_count;
+ location_t m_loc;
+};
+
+/* A class for identifying where in the compiler's own source
+ (or a plugin) that a dump message is being emitted from. */
+
+class dump_impl_location_t
+{
+public:
+ dump_impl_location_t (
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ const char *file = __builtin_FILE (),
+ int line = __builtin_LINE (),
+ const char *function = __builtin_FUNCTION ()
+#else
+ const char *file = __FILE__,
+ int line = __LINE__,
+ const char *function = NULL
+#endif
+ )
+ : m_file (file), m_line (line), m_function (function)
+ {}
+
+ const char *m_file;
+ int m_line;
+ const char *m_function;
+};
+
+/* A bundle of metadata for describing a dump message:
+ (a) the dump_flags
+ (b) the source location within the compiler/plugin.
+
+ The constructors use default parameters so that (b) gets sets up
+ automatically.
+
+ Hence you can pass in e.g. MSG_NOTE, and the dump call
+ will automatically record where in GCC's source code the
+ dump was emitted from. */
+
+class dump_metadata_t
+{
+ public:
+ dump_metadata_t (dump_flags_t dump_flags,
+ const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ : m_dump_flags (dump_flags),
+ m_impl_location (impl_location)
+ {
+ }
+
+ dump_flags_t get_dump_flags () const { return m_dump_flags; }
+
+ const dump_impl_location_t &
+ get_impl_location () const { return m_impl_location; }
+
+ private:
+ dump_flags_t m_dump_flags;
+ dump_impl_location_t m_impl_location;
+};
+
+/* A bundle of information for describing the location of a dump message:
+ (a) the source location and hotness within the user's code, together with
+ (b) the source location within the compiler/plugin.
+
+ The constructors use default parameters so that (b) gets sets up
+ automatically.
+
+ The upshot is that you can pass in e.g. a gimple * to dump_printf_loc,
+ and the dump call will automatically record where in GCC's source
+ code the dump was emitted from. */
+
+class dump_location_t
+{
+ public:
+ /* Default constructor, analogous to UNKNOWN_LOCATION. */
+ dump_location_t (const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ : m_user_location (dump_user_location_t ()),
+ m_impl_location (impl_location)
+ {
+ }
+
+ /* Construct from a gimple statement (using its location and hotness). */
+ dump_location_t (const gimple *stmt,
+ const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ : m_user_location (dump_user_location_t (stmt)),
+ m_impl_location (impl_location)
+ {
+ }
+
+ /* Construct from an RTL instruction (using its location and hotness). */
+ dump_location_t (const rtx_insn *insn,
+ const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ : m_user_location (dump_user_location_t (insn)),
+ m_impl_location (impl_location)
+ {
+ }
+
+ /* Construct from a dump_user_location_t. */
+ dump_location_t (const dump_user_location_t &user_location,
+ const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ : m_user_location (user_location),
+ m_impl_location (impl_location)
+ {
+ }
+
+ /* Construct from a location_t. This one is deprecated (since it doesn't
+ capture hotness information), and thus requires spelling out. */
+ static dump_location_t
+ from_location_t (location_t loc,
+ const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ {
+ return dump_location_t (dump_user_location_t::from_location_t (loc),
+ impl_location);
+ }
+
+ const dump_user_location_t &
+ get_user_location () const { return m_user_location; }
+
+ const dump_impl_location_t &
+ get_impl_location () const { return m_impl_location; }
+
+ location_t get_location_t () const
+ {
+ return m_user_location.get_location_t ();
+ }
+
+ profile_count get_count () const { return m_user_location.get_count (); }
+
+ private:
+ dump_user_location_t m_user_location;
+ dump_impl_location_t m_impl_location;
+};
+
+/* In dumpfile.cc */
+extern FILE *dump_begin (int, dump_flags_t *, int part=-1);
+extern void dump_end (int, FILE *);
+extern int opt_info_switch_p (const char *);
+extern const char *dump_flag_name (int);
+extern const kv_pair<optgroup_flags_t> optgroup_options[];
+extern dump_flags_t
+parse_dump_option (const char *, const char **);
+
+/* Global variables used to communicate with passes. */
+extern FILE *dump_file;
+extern dump_flags_t dump_flags;
+extern const char *dump_file_name;
+
+extern bool dumps_are_enabled;
+
+extern void set_dump_file (FILE *new_dump_file);
+
+/* Return true if any of the dumps is enabled, false otherwise. */
+inline bool
+dump_enabled_p (void)
+{
+ return dumps_are_enabled;
+}
+
+/* The following API calls (which *don't* take a "FILE *")
+ write the output to zero or more locations.
+
+ Some destinations are written to immediately as dump_* calls
+ are made; for others, the output is consolidated into an "optinfo"
+ instance (with its own metadata), and only emitted once the optinfo
+ is complete.
+
+ The destinations are:
+
+ (a) the "immediate" destinations:
+ (a.1) the active dump_file, if any
+ (a.2) the -fopt-info destination, if any
+ (b) the "optinfo" destinations, if any:
+ (b.1) as optimization records
+
+ dump_* (MSG_*) --> dumpfile.cc --> items --> (a.1) dump_file
+ | `-> (a.2) alt_dump_file
+ |
+ `--> (b) optinfo
+ `---> optinfo destinations
+ (b.1) optimization records
+
+ For optinfos, the dump_*_loc mark the beginning of an optinfo
+ instance: all subsequent dump_* calls are consolidated into
+ that optinfo, until the next dump_*_loc call (or a change in
+ dump scope, or a call to dumpfile_ensure_any_optinfo_are_flushed).
+
+ A group of dump_* calls should be guarded by:
+
+ if (dump_enabled_p ())
+
+ to minimize the work done for the common case where dumps
+ are disabled. */
+
+extern void dump_printf (const dump_metadata_t &, const char *, ...)
+ ATTRIBUTE_GCC_DUMP_PRINTF (2, 3);
+
+extern void dump_printf_loc (const dump_metadata_t &, const dump_user_location_t &,
+ const char *, ...)
+ ATTRIBUTE_GCC_DUMP_PRINTF (3, 4);
+extern void dump_function (int phase, tree fn);
+extern void dump_basic_block (dump_flags_t, basic_block, int);
+extern void dump_generic_expr_loc (const dump_metadata_t &,
+ const dump_user_location_t &,
+ dump_flags_t, tree);
+extern void dump_generic_expr (const dump_metadata_t &, dump_flags_t, tree);
+extern void dump_gimple_stmt_loc (const dump_metadata_t &,
+ const dump_user_location_t &,
+ dump_flags_t, gimple *, int);
+extern void dump_gimple_stmt (const dump_metadata_t &, dump_flags_t, gimple *, int);
+extern void dump_gimple_expr_loc (const dump_metadata_t &,
+ const dump_user_location_t &,
+ dump_flags_t, gimple *, int);
+extern void dump_gimple_expr (const dump_metadata_t &, dump_flags_t, gimple *, int);
+extern void dump_symtab_node (const dump_metadata_t &, symtab_node *);
+
+template<unsigned int N, typename C>
+void dump_dec (const dump_metadata_t &, const poly_int<N, C> &);
+extern void dump_dec (dump_flags_t, const poly_wide_int &, signop);
+extern void dump_hex (dump_flags_t, const poly_wide_int &);
+
+extern void dumpfile_ensure_any_optinfo_are_flushed ();
+
+/* Managing nested scopes, so that dumps can express the call chain
+ leading to a dump message. */
+
+extern unsigned int get_dump_scope_depth ();
+extern void dump_begin_scope (const char *name,
+ const dump_user_location_t &user_location,
+ const dump_impl_location_t &impl_location);
+extern void dump_end_scope ();
+
+/* Implementation detail of the AUTO_DUMP_SCOPE macro below.
+
+ A RAII-style class intended to make it easy to emit dump
+ information about entering and exiting a collection of nested
+ function calls. */
+
+class auto_dump_scope
+{
+ public:
+ auto_dump_scope (const char *name,
+ const dump_user_location_t &user_location,
+ const dump_impl_location_t &impl_location
+ = dump_impl_location_t ())
+ {
+ if (dump_enabled_p ())
+ dump_begin_scope (name, user_location, impl_location);
+ }
+ ~auto_dump_scope ()
+ {
+ if (dump_enabled_p ())
+ dump_end_scope ();
+ }
+};
+
+/* A macro for calling:
+ dump_begin_scope (NAME, USER_LOC);
+ via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
+ and then calling
+ dump_end_scope ();
+ once the object goes out of scope, thus capturing the nesting of
+ the scopes.
+
+ These scopes affect dump messages within them: dump messages at the
+ top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
+ in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
+
+#define AUTO_DUMP_SCOPE(NAME, USER_LOC) \
+ auto_dump_scope scope (NAME, USER_LOC)
+
+extern void dump_function (int phase, tree fn);
+extern void print_combine_total_stats (void);
+extern bool enable_rtl_dump_file (void);
+
+/* In tree-dump.cc */
+extern void dump_node (const_tree, dump_flags_t, FILE *);
+
+/* In combine.cc */
+extern void dump_combine_total_stats (FILE *);
+/* In cfghooks.cc */
+extern void dump_bb (FILE *, basic_block, int, dump_flags_t);
+
+class opt_pass;
+
+namespace gcc {
+
+/* A class for managing all of the various dump files used by the
+ optimization passes. */
+
+class dump_manager
+{
+public:
+
+ dump_manager ();
+ ~dump_manager ();
+
+ /* Register a dumpfile.
+
+ TAKE_OWNERSHIP determines whether callee takes ownership of strings
+ SUFFIX, SWTCH, and GLOB. */
+ unsigned int
+ dump_register (const char *suffix, const char *swtch, const char *glob,
+ dump_kind dkind, optgroup_flags_t optgroup_flags,
+ bool take_ownership);
+
+ /* Allow languages and middle-end to register their dumps before the
+ optimization passes. */
+ void
+ register_dumps ();
+
+ /* Return the dump_file_info for the given phase. */
+ struct dump_file_info *
+ get_dump_file_info (int phase) const;
+
+ struct dump_file_info *
+ get_dump_file_info_by_switch (const char *swtch) const;
+
+ /* Return the name of the dump file for the given phase.
+ If the dump is not enabled, returns NULL. */
+ char *
+ get_dump_file_name (int phase, int part = -1) const;
+
+ char *
+ get_dump_file_name (struct dump_file_info *dfi, int part = -1) const;
+
+ void
+ dump_switch_p (const char *arg);
+
+ /* Start a dump for PHASE. Store user-supplied dump flags in
+ *FLAG_PTR. Return the number of streams opened. Set globals
+ DUMP_FILE, and ALT_DUMP_FILE to point to the opened streams, and
+ set dump_flags appropriately for both pass dump stream and
+ -fopt-info stream. */
+ int
+ dump_start (int phase, dump_flags_t *flag_ptr);
+
+ /* Finish a tree dump for PHASE and close associated dump streams. Also
+ reset the globals DUMP_FILE, ALT_DUMP_FILE, and DUMP_FLAGS. */
+ void
+ dump_finish (int phase);
+
+ FILE *
+ dump_begin (int phase, dump_flags_t *flag_ptr, int part);
+
+ /* Returns nonzero if tree dump PHASE has been initialized. */
+ int
+ dump_initialized_p (int phase) const;
+
+ /* Returns the switch name of PHASE. */
+ const char *
+ dump_flag_name (int phase) const;
+
+ void register_pass (opt_pass *pass);
+
+private:
+
+ int
+ dump_phase_enabled_p (int phase) const;
+
+ int
+ dump_switch_p_1 (const char *arg, struct dump_file_info *dfi, bool doglob);
+
+ int
+ dump_enable_all (dump_kind dkind, dump_flags_t flags, const char *filename);
+
+ int
+ opt_info_enable_passes (optgroup_flags_t optgroup_flags, dump_flags_t flags,
+ const char *filename);
+
+ bool update_dfi_for_opt_info (dump_file_info *dfi) const;
+
+private:
+
+ /* Dynamically registered dump files and switches. */
+ int m_next_dump;
+ struct dump_file_info *m_extra_dump_files;
+ size_t m_extra_dump_files_in_use;
+ size_t m_extra_dump_files_alloced;
+
+ /* Stored values from -fopt-info, for handling passes created after
+ option-parsing (by backends and by plugins). */
+ optgroup_flags_t m_optgroup_flags;
+ dump_flags_t m_optinfo_flags;
+ char *m_optinfo_filename;
+
+ /* Grant access to dump_enable_all. */
+ friend bool ::enable_rtl_dump_file (void);
+
+ /* Grant access to opt_info_enable_passes. */
+ friend int ::opt_info_switch_p (const char *arg);
+
+}; // class dump_manager
+
+} // namespace gcc
+
+#endif /* GCC_DUMPFILE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2asm.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2asm.h
new file mode 100644
index 0000000..37a0a05
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2asm.h
@@ -0,0 +1,100 @@
+/* Dwarf2 assembler output helper routines.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DWARF2ASM_H
+#define GCC_DWARF2ASM_H
+
+extern void dw2_assemble_integer (int, rtx);
+
+extern void dw2_asm_output_data_raw (int, unsigned HOST_WIDE_INT);
+
+extern void dw2_asm_output_data (int, unsigned HOST_WIDE_INT,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+
+extern void dw2_asm_output_delta (int, const char *, const char *,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_4;
+
+extern void dw2_asm_output_vms_delta (int, const char *, const char *,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_4;
+
+extern void dw2_asm_output_offset (int, const char *, section *,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_4;
+
+extern void dw2_asm_output_offset (int, const char *, HOST_WIDE_INT,
+ section *, const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_5;
+
+extern void dw2_asm_output_addr (int, const char *, const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+
+extern void dw2_asm_output_addr_rtx (int, rtx, const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+
+extern void dw2_asm_output_encoded_addr_rtx (int, rtx, bool,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_4;
+
+extern void dw2_asm_output_nstring (const char *, size_t,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+
+extern void dw2_asm_output_data_uleb128_raw (unsigned HOST_WIDE_INT);
+
+extern void dw2_asm_output_data_uleb128 (unsigned HOST_WIDE_INT,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_2;
+
+extern void dw2_asm_output_data_sleb128_raw (HOST_WIDE_INT);
+
+extern void dw2_asm_output_data_sleb128 (HOST_WIDE_INT,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_2;
+
+extern void dw2_asm_output_symname_uleb128 (const char *,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_2;
+
+extern void dw2_asm_output_delta_uleb128 (const char *, const char *,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+
+extern int size_of_uleb128 (unsigned HOST_WIDE_INT);
+extern int size_of_sleb128 (HOST_WIDE_INT);
+extern int size_of_encoded_value (int);
+extern const char *eh_data_format_name (int);
+
+extern rtx dw2_force_const_mem (rtx, bool);
+extern void dw2_output_indirect_constants (void);
+
+/* These are currently unused. */
+
+#if 0
+extern void dw2_asm_output_pcrel (int, const char *, const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+
+extern void dw2_asm_output_delta_sleb128 (const char *, const char *,
+ const char *, ...)
+ ATTRIBUTE_NULL_PRINTF_3;
+#endif
+
+#endif /* GCC_DWARF2ASM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2ctf.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2ctf.h
new file mode 100644
index 0000000..a5f645a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2ctf.h
@@ -0,0 +1,55 @@
+/* dwarf2ctf.h - DWARF interface for CTF/BTF generation.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file contains declarations and prototypes to define an interface
+ between DWARF and CTF/BTF generation. */
+
+#ifndef GCC_DWARF2CTF_H
+#define GCC_DWARF2CTF_H 1
+
+#include "dwarf2out.h"
+#include "flags.h"
+
+/* Debug Format Interface. Used in dwarf2out.cc. */
+
+extern void ctf_debug_init (void);
+extern void ctf_debug_init_postprocess (bool);
+extern bool ctf_do_die (dw_die_ref);
+extern void ctf_debug_early_finish (const char *);
+extern void ctf_debug_finish (const char *);
+
+/* Wrappers for CTF/BTF to fetch information from GCC DWARF DIE. Used in
+ ctfc.cc.
+
+ A CTF container does not store all debug information internally. Some of
+ the info is fetched indirectly via the DIE reference available in each CTF
+ container entry.
+
+ These functions will be used by the CTF container to give access to its
+ consumers (CTF/BTF) to various debug information available in DWARF DIE.
+ Direct access to debug information in GCC dwarf structures by the consumers
+ of CTF/BTF information is not ideal. */
+
+/* Source location information. */
+
+extern const char * ctf_get_die_loc_file (dw_die_ref);
+extern unsigned int ctf_get_die_loc_line (dw_die_ref);
+extern unsigned int ctf_get_die_loc_col (dw_die_ref);
+
+#endif /* GCC_DWARF2CTF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2out.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2out.h
new file mode 100644
index 0000000..870b56a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/dwarf2out.h
@@ -0,0 +1,470 @@
+/* dwarf2out.h - Various declarations for functions found in dwarf2out.cc
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DWARF2OUT_H
+#define GCC_DWARF2OUT_H 1
+
+#include "dwarf2.h" /* ??? Remove this once only used by dwarf2foo.c. */
+
+typedef struct die_struct *dw_die_ref;
+typedef const struct die_struct *const_dw_die_ref;
+
+typedef struct dw_val_node *dw_val_ref;
+typedef struct dw_cfi_node *dw_cfi_ref;
+typedef struct dw_loc_descr_node *dw_loc_descr_ref;
+typedef struct dw_loc_list_struct *dw_loc_list_ref;
+typedef struct dw_discr_list_node *dw_discr_list_ref;
+typedef wide_int *wide_int_ptr;
+
+
+/* Call frames are described using a sequence of Call Frame
+ Information instructions. The register number, offset
+ and address fields are provided as possible operands;
+ their use is selected by the opcode field. */
+
+enum dw_cfi_oprnd_type {
+ dw_cfi_oprnd_unused,
+ dw_cfi_oprnd_reg_num,
+ dw_cfi_oprnd_offset,
+ dw_cfi_oprnd_addr,
+ dw_cfi_oprnd_loc,
+ dw_cfi_oprnd_cfa_loc
+};
+
+typedef union GTY(()) {
+ unsigned int GTY ((tag ("dw_cfi_oprnd_reg_num"))) dw_cfi_reg_num;
+ HOST_WIDE_INT GTY ((tag ("dw_cfi_oprnd_offset"))) dw_cfi_offset;
+ const char * GTY ((tag ("dw_cfi_oprnd_addr"))) dw_cfi_addr;
+ struct dw_loc_descr_node * GTY ((tag ("dw_cfi_oprnd_loc"))) dw_cfi_loc;
+ struct dw_cfa_location * GTY ((tag ("dw_cfi_oprnd_cfa_loc")))
+ dw_cfi_cfa_loc;
+} dw_cfi_oprnd;
+
+struct GTY(()) dw_cfi_node {
+ enum dwarf_call_frame_info dw_cfi_opc;
+ dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd1_desc (%1.dw_cfi_opc)")))
+ dw_cfi_oprnd1;
+ dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd2_desc (%1.dw_cfi_opc)")))
+ dw_cfi_oprnd2;
+};
+
+
+typedef vec<dw_cfi_ref, va_gc> *cfi_vec;
+
+typedef struct dw_fde_node *dw_fde_ref;
+
+/* All call frame descriptions (FDE's) in the GCC generated DWARF
+ refer to a single Common Information Entry (CIE), defined at
+ the beginning of the .debug_frame section. This use of a single
+ CIE obviates the need to keep track of multiple CIE's
+ in the DWARF generation routines below. */
+
+struct GTY(()) dw_fde_node {
+ tree decl;
+ const char *dw_fde_begin;
+ const char *dw_fde_current_label;
+ const char *dw_fde_end;
+ const char *dw_fde_vms_end_prologue;
+ const char *dw_fde_vms_begin_epilogue;
+ const char *dw_fde_second_begin;
+ const char *dw_fde_second_end;
+ cfi_vec dw_fde_cfi;
+ int dw_fde_switch_cfi_index; /* Last CFI before switching sections. */
+ HOST_WIDE_INT stack_realignment;
+
+ unsigned funcdef_number;
+ unsigned fde_index;
+
+ /* Dynamic realign argument pointer register. */
+ unsigned int drap_reg;
+ /* Virtual dynamic realign argument pointer register. */
+ unsigned int vdrap_reg;
+ /* These 3 flags are copied from rtl_data in function.h. */
+ unsigned all_throwers_are_sibcalls : 1;
+ unsigned uses_eh_lsda : 1;
+ unsigned nothrow : 1;
+ /* Whether we did stack realign in this call frame. */
+ unsigned stack_realign : 1;
+ /* Whether dynamic realign argument pointer register has been saved. */
+ unsigned drap_reg_saved: 1;
+ /* True iff dw_fde_begin label is in text_section or cold_text_section. */
+ unsigned in_std_section : 1;
+ /* True iff dw_fde_second_begin label is in text_section or
+ cold_text_section. */
+ unsigned second_in_std_section : 1;
+ /* True if Rule 18 described in dwarf2cfi.cc is in action, i.e. for dynamic
+ stack realignment in between pushing of hard frame pointer to stack
+ and setting hard frame pointer to stack pointer. The register save for
+ hard frame pointer register should be emitted only on the latter
+ instruction. */
+ unsigned rule18 : 1;
+ /* True if this function is to be ignored by debugger. */
+ unsigned ignored_debug : 1;
+};
+
+
+/* This represents a register, in DWARF_FRAME_REGNUM space, for use in CFA
+ definitions and expressions.
+ Most architectures only need a single register number, but some (amdgcn)
+ have pointers that span multiple registers. DWARF permits arbitrary
+ register sets but existing use-cases only require contiguous register
+ sets, as represented here. */
+struct GTY(()) cfa_reg {
+ unsigned int reg;
+ unsigned short span;
+ unsigned short span_width; /* A.K.A. register mode size. */
+
+ cfa_reg& set_by_dwreg (unsigned int r)
+ {
+ reg = r;
+ span = 1;
+ span_width = 0; /* Unknown size (permitted when span == 1). */
+ return *this;
+ }
+
+ bool operator== (const cfa_reg &other) const
+ {
+ return (reg == other.reg && span == other.span
+ && (span_width == other.span_width
+ || (span == 1
+ && (span_width == 0 || other.span_width == 0))));
+ }
+
+ bool operator!= (const cfa_reg &other) const
+ {
+ return !(*this == other);
+ }
+};
+
+/* This is how we define the location of the CFA. We use to handle it
+ as REG + OFFSET all the time, but now it can be more complex.
+ It can now be either REG + CFA_OFFSET or *(REG + BASE_OFFSET) + CFA_OFFSET.
+ Instead of passing around REG and OFFSET, we pass a copy
+ of this structure. */
+struct GTY(()) dw_cfa_location {
+ poly_int64_pod offset;
+ poly_int64_pod base_offset;
+ /* REG is in DWARF_FRAME_REGNUM space, *not* normal REGNO space. */
+ struct cfa_reg reg;
+ BOOL_BITFIELD indirect : 1; /* 1 if CFA is accessed via a dereference. */
+ BOOL_BITFIELD in_use : 1; /* 1 if a saved cfa is stored here. */
+};
+
+
+/* Each DIE may have a series of attribute/value pairs. Values
+ can take on several forms. The forms that are used in this
+ implementation are listed below. */
+
+enum dw_val_class
+{
+ dw_val_class_none,
+ dw_val_class_addr,
+ dw_val_class_offset,
+ dw_val_class_loc,
+ dw_val_class_loc_list,
+ dw_val_class_range_list,
+ dw_val_class_const,
+ dw_val_class_unsigned_const,
+ dw_val_class_const_double,
+ dw_val_class_wide_int,
+ dw_val_class_vec,
+ dw_val_class_flag,
+ dw_val_class_die_ref,
+ dw_val_class_fde_ref,
+ dw_val_class_lbl_id,
+ dw_val_class_lineptr,
+ dw_val_class_str,
+ dw_val_class_macptr,
+ dw_val_class_loclistsptr,
+ dw_val_class_file,
+ dw_val_class_data8,
+ dw_val_class_decl_ref,
+ dw_val_class_vms_delta,
+ dw_val_class_high_pc,
+ dw_val_class_discr_value,
+ dw_val_class_discr_list,
+ dw_val_class_const_implicit,
+ dw_val_class_unsigned_const_implicit,
+ dw_val_class_file_implicit,
+ dw_val_class_view_list,
+ dw_val_class_symview
+};
+
+/* Describe a floating point constant value, or a vector constant value. */
+
+struct GTY(()) dw_vec_const {
+ void * GTY((atomic)) array;
+ unsigned length;
+ unsigned elt_size;
+};
+
+/* Describe a single value that a discriminant can match.
+
+ Discriminants (in the "record variant part" meaning) are scalars.
+ dw_discr_list_ref and dw_discr_value are a mean to describe a set of
+ discriminant values that are matched by a particular variant.
+
+ Discriminants can be signed or unsigned scalars, and can be discriminants
+ values. Both have to be consistent, though. */
+
+struct GTY(()) dw_discr_value {
+ int pos; /* Whether the discriminant value is positive (unsigned). */
+ union
+ {
+ HOST_WIDE_INT GTY ((tag ("0"))) sval;
+ unsigned HOST_WIDE_INT GTY ((tag ("1"))) uval;
+ }
+ GTY ((desc ("%1.pos"))) v;
+};
+
+struct addr_table_entry;
+
+/* The dw_val_node describes an attribute's value, as it is
+ represented internally. */
+
+struct GTY(()) dw_val_node {
+ enum dw_val_class val_class;
+ struct addr_table_entry * GTY(()) val_entry;
+ union dw_val_struct_union
+ {
+ rtx GTY ((tag ("dw_val_class_addr"))) val_addr;
+ unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_offset"))) val_offset;
+ dw_loc_list_ref GTY ((tag ("dw_val_class_loc_list"))) val_loc_list;
+ dw_die_ref GTY ((tag ("dw_val_class_view_list"))) val_view_list;
+ dw_loc_descr_ref GTY ((tag ("dw_val_class_loc"))) val_loc;
+ HOST_WIDE_INT GTY ((default)) val_int;
+ unsigned HOST_WIDE_INT
+ GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned;
+ double_int GTY ((tag ("dw_val_class_const_double"))) val_double;
+ wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide;
+ dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec;
+ struct dw_val_die_union
+ {
+ dw_die_ref die;
+ int external;
+ } GTY ((tag ("dw_val_class_die_ref"))) val_die_ref;
+ unsigned GTY ((tag ("dw_val_class_fde_ref"))) val_fde_index;
+ struct indirect_string_node * GTY ((tag ("dw_val_class_str"))) val_str;
+ char * GTY ((tag ("dw_val_class_lbl_id"))) val_lbl_id;
+ unsigned char GTY ((tag ("dw_val_class_flag"))) val_flag;
+ struct dwarf_file_data * GTY ((tag ("dw_val_class_file"))) val_file;
+ struct dwarf_file_data *
+ GTY ((tag ("dw_val_class_file_implicit"))) val_file_implicit;
+ unsigned char GTY ((tag ("dw_val_class_data8"))) val_data8[8];
+ tree GTY ((tag ("dw_val_class_decl_ref"))) val_decl_ref;
+ struct dw_val_vms_delta_union
+ {
+ char * lbl1;
+ char * lbl2;
+ } GTY ((tag ("dw_val_class_vms_delta"))) val_vms_delta;
+ dw_discr_value GTY ((tag ("dw_val_class_discr_value"))) val_discr_value;
+ dw_discr_list_ref GTY ((tag ("dw_val_class_discr_list"))) val_discr_list;
+ char * GTY ((tag ("dw_val_class_symview"))) val_symbolic_view;
+ }
+ GTY ((desc ("%1.val_class"))) v;
+};
+
+/* Locations in memory are described using a sequence of stack machine
+ operations. */
+
+struct GTY((chain_next ("%h.dw_loc_next"))) dw_loc_descr_node {
+ dw_loc_descr_ref dw_loc_next;
+ ENUM_BITFIELD (dwarf_location_atom) dw_loc_opc : 8;
+ /* Used to distinguish DW_OP_addr with a direct symbol relocation
+ from DW_OP_addr with a dtp-relative symbol relocation. */
+ unsigned int dtprel : 1;
+ /* For DW_OP_pick, DW_OP_dup and DW_OP_over operations: true iff.
+ it targets a DWARF prodecure argument. In this case, it needs to be
+ relocated according to the current frame offset. */
+ unsigned int frame_offset_rel : 1;
+ int dw_loc_addr;
+ dw_val_node dw_loc_oprnd1;
+ dw_val_node dw_loc_oprnd2;
+};
+
+/* A variant (inside a record variant part) is selected when the corresponding
+ discriminant matches its set of values (see the comment for dw_discr_value).
+ The following datastructure holds such matching information. */
+
+struct GTY(()) dw_discr_list_node {
+ dw_discr_list_ref dw_discr_next;
+
+ dw_discr_value dw_discr_lower_bound;
+ dw_discr_value dw_discr_upper_bound;
+ /* This node represents only the value in dw_discr_lower_bound when it's
+ zero. It represents the range between the two fields (bounds included)
+ otherwise. */
+ int dw_discr_range;
+};
+
+/* Interface from dwarf2out.cc to dwarf2cfi.cc. */
+extern struct dw_loc_descr_node *build_cfa_loc
+ (dw_cfa_location *, poly_int64);
+extern struct dw_loc_descr_node *build_cfa_aligned_loc
+ (dw_cfa_location *, poly_int64, HOST_WIDE_INT);
+extern struct dw_loc_descr_node *build_span_loc (struct cfa_reg);
+extern struct dw_loc_descr_node *mem_loc_descriptor
+ (rtx, machine_mode mode, machine_mode mem_mode,
+ enum var_init_status);
+extern bool loc_descr_equal_p (dw_loc_descr_ref, dw_loc_descr_ref);
+extern dw_fde_ref dwarf2out_alloc_current_fde (void);
+
+extern unsigned long size_of_locs (dw_loc_descr_ref);
+extern void output_loc_sequence (dw_loc_descr_ref, int);
+extern void output_loc_sequence_raw (dw_loc_descr_ref);
+
+/* Interface from dwarf2cfi.cc to dwarf2out.cc. */
+extern void lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc,
+ dw_cfa_location *remember);
+extern bool cfa_equal_p (const dw_cfa_location *, const dw_cfa_location *);
+
+extern void output_cfi (dw_cfi_ref, dw_fde_ref, int);
+
+extern GTY(()) cfi_vec cie_cfi_vec;
+
+/* Interface from dwarf2*.c to the rest of the compiler. */
+extern enum dw_cfi_oprnd_type dw_cfi_oprnd1_desc
+ (enum dwarf_call_frame_info cfi);
+extern enum dw_cfi_oprnd_type dw_cfi_oprnd2_desc
+ (enum dwarf_call_frame_info cfi);
+
+extern void output_cfi_directive (FILE *f, struct dw_cfi_node *cfi);
+
+extern void dwarf2out_emit_cfi (dw_cfi_ref cfi);
+
+extern void debug_dwarf (void);
+struct die_struct;
+extern void debug_dwarf_die (struct die_struct *);
+extern void debug_dwarf_loc_descr (dw_loc_descr_ref);
+extern void debug (die_struct &ref);
+extern void debug (die_struct *ptr);
+extern void dwarf2out_set_demangle_name_func (const char *(*) (const char *));
+#ifdef VMS_DEBUGGING_INFO
+extern void dwarf2out_vms_debug_main_pointer (void);
+#endif
+
+enum array_descr_ordering
+{
+ array_descr_ordering_default,
+ array_descr_ordering_row_major,
+ array_descr_ordering_column_major
+};
+
+#define DWARF2OUT_ARRAY_DESCR_INFO_MAX_DIMEN 16
+
+struct array_descr_info
+{
+ int ndimensions;
+ enum array_descr_ordering ordering;
+ tree element_type;
+ tree base_decl;
+ tree data_location;
+ tree allocated;
+ tree associated;
+ tree stride;
+ tree rank;
+ bool stride_in_bits;
+ struct array_descr_dimen
+ {
+ /* GCC uses sizetype for array indices, so lower_bound and upper_bound
+ will likely be "sizetype" values. However, bounds may have another
+ type in the original source code. */
+ tree bounds_type;
+ tree lower_bound;
+ tree upper_bound;
+
+ /* Only Fortran uses more than one dimension for array types. For other
+ languages, the stride can be rather specified for the whole array. */
+ tree stride;
+ } dimen[DWARF2OUT_ARRAY_DESCR_INFO_MAX_DIMEN];
+};
+
+enum fixed_point_scale_factor
+{
+ fixed_point_scale_factor_binary,
+ fixed_point_scale_factor_decimal,
+ fixed_point_scale_factor_arbitrary
+};
+
+struct fixed_point_type_info
+{
+ /* The scale factor is the value one has to multiply the actual data with
+ to get the fixed point value. We support three ways to encode it. */
+ enum fixed_point_scale_factor scale_factor_kind;
+ union
+ {
+ /* For a binary scale factor, the scale factor is 2 ** binary. */
+ int binary;
+ /* For a decimal scale factor, the scale factor is 10 ** decimal. */
+ int decimal;
+ /* For an arbitrary scale factor, the scale factor is the ratio
+ numerator / denominator. */
+ struct { tree numerator; tree denominator; } arbitrary;
+ } scale_factor;
+};
+
+void dwarf2out_cc_finalize (void);
+
+/* Some DWARF internals are exposed for the needs of DWARF-based debug
+ formats. */
+
+/* Each DIE attribute has a field specifying the attribute kind,
+ a link to the next attribute in the chain, and an attribute value.
+ Attributes are typically linked below the DIE they modify. */
+
+typedef struct GTY(()) dw_attr_struct {
+ enum dwarf_attribute dw_attr;
+ dw_val_node dw_attr_val;
+}
+dw_attr_node;
+
+extern dw_attr_node *get_AT (dw_die_ref, enum dwarf_attribute);
+extern HOST_WIDE_INT AT_int (dw_attr_node *);
+extern unsigned HOST_WIDE_INT AT_unsigned (dw_attr_node *a);
+extern dw_loc_descr_ref AT_loc (dw_attr_node *);
+extern dw_die_ref get_AT_ref (dw_die_ref, enum dwarf_attribute);
+extern const char *get_AT_string (dw_die_ref, enum dwarf_attribute);
+extern enum dw_val_class AT_class (dw_attr_node *);
+extern unsigned HOST_WIDE_INT AT_unsigned (dw_attr_node *);
+extern unsigned get_AT_unsigned (dw_die_ref, enum dwarf_attribute);
+extern int get_AT_flag (dw_die_ref, enum dwarf_attribute);
+
+extern void add_name_attribute (dw_die_ref, const char *);
+
+extern dw_die_ref new_die_raw (enum dwarf_tag);
+extern dw_die_ref base_type_die (tree, bool);
+
+extern dw_die_ref lookup_decl_die (tree);
+extern dw_die_ref lookup_type_die (tree);
+
+extern dw_die_ref dw_get_die_child (dw_die_ref);
+extern dw_die_ref dw_get_die_sib (dw_die_ref);
+extern enum dwarf_tag dw_get_die_tag (dw_die_ref);
+
+/* Data about a single source file. */
+struct GTY((for_user)) dwarf_file_data {
+ const char * key;
+ const char * filename;
+ int emitted_number;
+};
+
+extern struct dwarf_file_data *get_AT_file (dw_die_ref,
+ enum dwarf_attribute);
+
+#endif /* GCC_DWARF2OUT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/edit-context.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/edit-context.h
new file mode 100644
index 0000000..f2e69a6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/edit-context.h
@@ -0,0 +1,67 @@
+/* Determining the results of applying fix-it hints.
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_EDIT_CONTEXT_H
+#define GCC_EDIT_CONTEXT_H
+
+#include "typed-splay-tree.h"
+
+class edit_context;
+class edited_file;
+
+/* A set of changes to the source code.
+
+ The changes are "atomic" - if any changes can't be applied,
+ none of them can be (tracked by the m_valid flag).
+ Similarly, attempts to add the changes from a rich_location flagged
+ as containing invalid changes mean that the whole of the edit_context
+ is flagged as invalid.
+
+ A complication here is that fix-its are expressed relative to coordinates
+ in the files when they were parsed, before any changes have been made, and
+ so if there's more that one fix-it to be applied, we have to adjust
+ later fix-its to allow for the changes made by earlier ones. This
+ is done by the various "get_effective_column" methods. */
+
+class edit_context
+{
+ public:
+ edit_context ();
+
+ bool valid_p () const { return m_valid; }
+
+ void add_fixits (rich_location *richloc);
+
+ char *get_content (const char *filename);
+
+ int get_effective_column (const char *filename, int line, int column);
+
+ char *generate_diff (bool show_filenames);
+ void print_diff (pretty_printer *pp, bool show_filenames);
+
+ private:
+ bool apply_fixit (const fixit_hint *hint);
+ edited_file *get_file (const char *filename);
+ edited_file &get_or_insert_file (const char *filename);
+
+ bool m_valid;
+ typed_splay_tree<const char *, edited_file *> m_files;
+};
+
+#endif /* GCC_EDIT_CONTEXT_H. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/emit-rtl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/emit-rtl.h
new file mode 100644
index 0000000..c472c73
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/emit-rtl.h
@@ -0,0 +1,548 @@
+/* Exported functions from emit-rtl.cc
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_EMIT_RTL_H
+#define GCC_EMIT_RTL_H
+
+class temp_slot;
+typedef class temp_slot *temp_slot_p;
+class predefined_function_abi;
+namespace rtl_ssa { class function_info; }
+
+/* Information mainlined about RTL representation of incoming arguments. */
+struct GTY(()) incoming_args {
+ /* Number of bytes of args popped by function being compiled on its return.
+ Zero if no bytes are to be popped.
+ May affect compilation of return insn or of function epilogue. */
+ poly_int64_pod pops_args;
+
+ /* If function's args have a fixed size, this is that size, in bytes.
+ Otherwise, it is -1.
+ May affect compilation of return insn or of function epilogue. */
+ poly_int64_pod size;
+
+ /* # bytes the prologue should push and pretend that the caller pushed them.
+ The prologue must do this, but only if parms can be passed in
+ registers. */
+ int pretend_args_size;
+
+ /* This is the offset from the arg pointer to the place where the first
+ anonymous arg can be found, if there is one. */
+ rtx arg_offset_rtx;
+
+ /* Quantities of various kinds of registers
+ used for the current function's args. */
+ CUMULATIVE_ARGS info;
+
+ /* The arg pointer hard register, or the pseudo into which it was copied. */
+ rtx internal_arg_pointer;
+};
+
+
+/* Datastructures maintained for currently processed function in RTL form. */
+struct GTY(()) rtl_data {
+ void init_stack_alignment ();
+
+ struct expr_status expr;
+ struct emit_status emit;
+ struct varasm_status varasm;
+ struct incoming_args args;
+ struct function_subsections subsections;
+ struct rtl_eh eh;
+
+ /* The ABI of the function, i.e. the interface it presents to its callers.
+ This is the ABI that should be queried to see which registers the
+ function needs to save before it uses them.
+
+ Other functions (including those called by this function) might use
+ different ABIs. */
+ const predefined_function_abi *GTY((skip)) abi;
+
+ rtl_ssa::function_info *GTY((skip)) ssa;
+
+ /* For function.cc */
+
+ /* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is
+ defined, the needed space is pushed by the prologue. */
+ poly_int64_pod outgoing_args_size;
+
+ /* If nonzero, an RTL expression for the location at which the current
+ function returns its result. If the current function returns its
+ result in a register, current_function_return_rtx will always be
+ the hard register containing the result. */
+ rtx return_rtx;
+
+ /* Vector of initial-value pairs. Each pair consists of a pseudo
+ register of approprite mode that stores the initial value a hard
+ register REGNO, and that hard register itself. */
+ /* ??? This could be a VEC but there is currently no way to define an
+ opaque VEC type. */
+ struct initial_value_struct *hard_reg_initial_vals;
+
+ /* A variable living at the top of the frame that holds a known value.
+ Used for detecting stack clobbers. */
+ tree stack_protect_guard;
+
+ /* The __stack_chk_guard variable or expression holding the stack
+ protector canary value. */
+ tree stack_protect_guard_decl;
+
+ /* List (chain of INSN_LIST) of labels heading the current handlers for
+ nonlocal gotos. */
+ rtx_insn_list *x_nonlocal_goto_handler_labels;
+
+ /* Label that will go on function epilogue.
+ Jumping to this label serves as a "return" instruction
+ on machines which require execution of the epilogue on all returns. */
+ rtx_code_label *x_return_label;
+
+ /* Label that will go on the end of function epilogue.
+ Jumping to this label serves as a "naked return" instruction
+ on machines which require execution of the epilogue on all returns. */
+ rtx_code_label *x_naked_return_label;
+
+ /* List (chain of EXPR_LISTs) of all stack slots in this function.
+ Made for the sake of unshare_all_rtl. */
+ vec<rtx, va_gc> *x_stack_slot_list;
+
+ /* List of empty areas in the stack frame. */
+ class frame_space *frame_space_list;
+
+ /* Place after which to insert the tail_recursion_label if we need one. */
+ rtx_note *x_stack_check_probe_note;
+
+ /* Location at which to save the argument pointer if it will need to be
+ referenced. There are two cases where this is done: if nonlocal gotos
+ exist, or if vars stored at an offset from the argument pointer will be
+ needed by inner routines. */
+ rtx x_arg_pointer_save_area;
+
+ /* Dynamic Realign Argument Pointer used for realigning stack. */
+ rtx drap_reg;
+
+ /* Offset to end of allocated area of stack frame.
+ If stack grows down, this is the address of the last stack slot allocated.
+ If stack grows up, this is the address for the next slot. */
+ poly_int64_pod x_frame_offset;
+
+ /* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */
+ rtx_insn *x_parm_birth_insn;
+
+ /* List of all used temporaries allocated, by level. */
+ vec<temp_slot_p, va_gc> *x_used_temp_slots;
+
+ /* List of available temp slots. */
+ class temp_slot *x_avail_temp_slots;
+
+ /* Current nesting level for temporaries. */
+ int x_temp_slot_level;
+
+ /* The largest alignment needed on the stack, including requirement
+ for outgoing stack alignment. */
+ unsigned int stack_alignment_needed;
+
+ /* Preferred alignment of the end of stack frame, which is preferred
+ to call other functions. */
+ unsigned int preferred_stack_boundary;
+
+ /* The minimum alignment of parameter stack. */
+ unsigned int parm_stack_boundary;
+
+ /* The largest alignment of slot allocated on the stack. */
+ unsigned int max_used_stack_slot_alignment;
+
+ /* The stack alignment estimated before reload, with consideration of
+ following factors:
+ 1. Alignment of local stack variables (max_used_stack_slot_alignment)
+ 2. Alignment requirement to call other functions
+ (preferred_stack_boundary)
+ 3. Alignment of non-local stack variables but might be spilled in
+ local stack. */
+ unsigned int stack_alignment_estimated;
+
+ /* How many NOP insns to place at each function entry by default. */
+ unsigned short patch_area_size;
+
+ /* How far the real asm entry point is into this area. */
+ unsigned short patch_area_entry;
+
+ /* For reorg. */
+
+ /* Nonzero if function being compiled called builtin_return_addr or
+ builtin_frame_address with nonzero count. */
+ bool accesses_prior_frames;
+
+ /* Nonzero if the function calls __builtin_eh_return. */
+ bool calls_eh_return;
+
+ /* Nonzero if function saves all registers, e.g. if it has a nonlocal
+ label that can reach the exit block via non-exceptional paths. */
+ bool saves_all_registers;
+
+ /* Nonzero if function being compiled has nonlocal gotos to parent
+ function. */
+ bool has_nonlocal_goto;
+
+ /* Nonzero if function being compiled has an asm statement. */
+ bool has_asm_statement;
+
+ /* This bit is used by the exception handling logic. It is set if all
+ calls (if any) are sibling calls. Such functions do not have to
+ have EH tables generated, as they cannot throw. A call to such a
+ function, however, should be treated as throwing if any of its callees
+ can throw. */
+ bool all_throwers_are_sibcalls;
+
+ /* Nonzero if stack limit checking should be enabled in the current
+ function. */
+ bool limit_stack;
+
+ /* Nonzero if profiling code should be generated. */
+ bool profile;
+
+ /* Nonzero if the current function uses the constant pool. */
+ bool uses_const_pool;
+
+ /* Nonzero if the current function uses pic_offset_table_rtx. */
+ bool uses_pic_offset_table;
+
+ /* Nonzero if the current function needs an lsda for exception handling. */
+ bool uses_eh_lsda;
+
+ /* Set when the tail call has been produced. */
+ bool tail_call_emit;
+
+ /* Nonzero if code to initialize arg_pointer_save_area has been emitted. */
+ bool arg_pointer_save_area_init;
+
+ /* Nonzero if current function must be given a frame pointer.
+ Set in reload1.cc or lra-eliminations.cc if anything is allocated
+ on the stack there. */
+ bool frame_pointer_needed;
+
+ /* When set, expand should optimize for speed. */
+ bool maybe_hot_insn_p;
+
+ /* Nonzero if function stack realignment is needed. This flag may be
+ set twice: before and after reload. It is set before reload wrt
+ stack alignment estimation before reload. It will be changed after
+ reload if by then criteria of stack realignment is different.
+ The value set after reload is the accurate one and is finalized. */
+ bool stack_realign_needed;
+
+ /* Nonzero if function stack realignment is tried. This flag is set
+ only once before reload. It affects register elimination. This
+ is used to generate DWARF debug info for stack variables. */
+ bool stack_realign_tried;
+
+ /* Nonzero if function being compiled needs dynamic realigned
+ argument pointer (drap) if stack needs realigning. */
+ bool need_drap;
+
+ /* Nonzero if function stack realignment estimation is done, namely
+ stack_realign_needed flag has been set before reload wrt estimated
+ stack alignment info. */
+ bool stack_realign_processed;
+
+ /* Nonzero if function stack realignment has been finalized, namely
+ stack_realign_needed flag has been set and finalized after reload. */
+ bool stack_realign_finalized;
+
+ /* True if dbr_schedule has already been called for this function. */
+ bool dbr_scheduled_p;
+
+ /* True if current function cannot throw. Unlike
+ TREE_NOTHROW (current_function_decl) it is set even for overwritable
+ function where currently compiled version of it is nothrow. */
+ bool nothrow;
+
+ /* True if we performed shrink-wrapping for the current function. */
+ bool shrink_wrapped;
+
+ /* True if we performed shrink-wrapping for separate components for
+ the current function. */
+ bool shrink_wrapped_separate;
+
+ /* Nonzero if function being compiled doesn't modify the stack pointer
+ (ignoring the prologue and epilogue). This is only valid after
+ pass_stack_ptr_mod has run. */
+ bool sp_is_unchanging;
+
+ /* True if the stack pointer is clobbered by asm statement. */
+ bool sp_is_clobbered_by_asm;
+
+ /* Nonzero if function being compiled doesn't contain any calls
+ (ignoring the prologue and epilogue). This is set prior to
+ register allocation in IRA and is valid for the remaining
+ compiler passes. */
+ bool is_leaf;
+
+ /* Nonzero if the function being compiled is a leaf function which only
+ uses leaf registers. This is valid after reload (specifically after
+ sched2) and is useful only if the port defines LEAF_REGISTERS. */
+ bool uses_only_leaf_regs;
+
+ /* Nonzero if the function being compiled has undergone hot/cold partitioning
+ (under flag_reorder_blocks_and_partition) and has at least one cold
+ block. */
+ bool has_bb_partition;
+
+ /* Nonzero if the function being compiled has completed the bb reordering
+ pass. */
+ bool bb_reorder_complete;
+
+ /* Like regs_ever_live, but 1 if a reg is set or clobbered from an
+ asm. Unlike regs_ever_live, elements of this array corresponding
+ to eliminable regs (like the frame pointer) are set if an asm
+ sets them. */
+ HARD_REG_SET asm_clobbers;
+
+ /* All hard registers that need to be zeroed at the return of the routine. */
+ HARD_REG_SET must_be_zero_on_return;
+
+ /* The highest address seen during shorten_branches. */
+ int max_insn_address;
+};
+
+#define return_label (crtl->x_return_label)
+#define naked_return_label (crtl->x_naked_return_label)
+#define stack_slot_list (crtl->x_stack_slot_list)
+#define parm_birth_insn (crtl->x_parm_birth_insn)
+#define frame_offset (crtl->x_frame_offset)
+#define stack_check_probe_note (crtl->x_stack_check_probe_note)
+#define arg_pointer_save_area (crtl->x_arg_pointer_save_area)
+#define used_temp_slots (crtl->x_used_temp_slots)
+#define avail_temp_slots (crtl->x_avail_temp_slots)
+#define temp_slot_level (crtl->x_temp_slot_level)
+#define nonlocal_goto_handler_labels (crtl->x_nonlocal_goto_handler_labels)
+#define frame_pointer_needed (crtl->frame_pointer_needed)
+#define stack_realign_fp (crtl->stack_realign_needed && !crtl->need_drap)
+#define stack_realign_drap (crtl->stack_realign_needed && crtl->need_drap)
+
+extern GTY(()) struct rtl_data x_rtl;
+
+/* Accessor to RTL datastructures. We keep them statically allocated now since
+ we never keep multiple functions. For threaded compiler we might however
+ want to do differently. */
+#define crtl (&x_rtl)
+
+/* Return whether two MEM_ATTRs are equal. */
+bool mem_attrs_eq_p (const class mem_attrs *, const class mem_attrs *);
+
+/* Set the alias set of MEM to SET. */
+extern void set_mem_alias_set (rtx, alias_set_type);
+
+/* Set the alignment of MEM to ALIGN bits. */
+extern void set_mem_align (rtx, unsigned int);
+
+/* Set the address space of MEM to ADDRSPACE. */
+extern void set_mem_addr_space (rtx, addr_space_t);
+
+/* Set the expr for MEM to EXPR. */
+extern void set_mem_expr (rtx, tree);
+
+/* Set the offset for MEM to OFFSET. */
+extern void set_mem_offset (rtx, poly_int64);
+
+/* Clear the offset recorded for MEM. */
+extern void clear_mem_offset (rtx);
+
+/* Set the size for MEM to SIZE. */
+extern void set_mem_size (rtx, poly_int64);
+
+/* Clear the size recorded for MEM. */
+extern void clear_mem_size (rtx);
+
+/* Set the attributes for MEM appropriate for a spill slot. */
+extern void set_mem_attrs_for_spill (rtx);
+extern tree get_spill_slot_decl (bool);
+
+/* Return a memory reference like MEMREF, but with its address changed to
+ ADDR. The caller is asserting that the actual piece of memory pointed
+ to is the same, just the form of the address is being changed, such as
+ by putting something into a register. */
+extern rtx replace_equiv_address (rtx, rtx, bool = false);
+
+/* Likewise, but the reference is not required to be valid. */
+extern rtx replace_equiv_address_nv (rtx, rtx, bool = false);
+
+extern rtx gen_blockage (void);
+extern rtvec gen_rtvec (int, ...);
+extern rtx copy_insn_1 (rtx);
+extern rtx copy_insn (rtx);
+extern rtx_insn *copy_delay_slot_insn (rtx_insn *);
+extern rtx gen_int_mode (poly_int64, machine_mode);
+extern rtx_insn *emit_copy_of_insn_after (rtx_insn *, rtx_insn *);
+extern void set_reg_attrs_from_value (rtx, rtx);
+extern void set_reg_attrs_for_parm (rtx, rtx);
+extern void set_reg_attrs_for_decl_rtl (tree t, rtx x);
+extern void adjust_reg_mode (rtx, machine_mode);
+extern int mem_expr_equal_p (const_tree, const_tree);
+extern rtx gen_int_shift_amount (machine_mode, poly_int64);
+
+extern bool need_atomic_barrier_p (enum memmodel, bool);
+
+/* Return the current sequence. */
+
+inline struct sequence_stack *
+get_current_sequence (void)
+{
+ return &crtl->emit.seq;
+}
+
+/* Return the outermost sequence. */
+
+inline struct sequence_stack *
+get_topmost_sequence (void)
+{
+ struct sequence_stack *seq, *top;
+
+ seq = get_current_sequence ();
+ do
+ {
+ top = seq;
+ seq = seq->next;
+ } while (seq);
+ return top;
+}
+
+/* Return the first insn of the current sequence or current function. */
+
+inline rtx_insn *
+get_insns (void)
+{
+ return get_current_sequence ()->first;
+}
+
+/* Specify a new insn as the first in the chain. */
+
+inline void
+set_first_insn (rtx_insn *insn)
+{
+ gcc_checking_assert (!insn || !PREV_INSN (insn));
+ get_current_sequence ()->first = insn;
+}
+
+/* Return the last insn emitted in current sequence or current function. */
+
+inline rtx_insn *
+get_last_insn (void)
+{
+ return get_current_sequence ()->last;
+}
+
+/* Specify a new insn as the last in the chain. */
+
+inline void
+set_last_insn (rtx_insn *insn)
+{
+ gcc_checking_assert (!insn || !NEXT_INSN (insn));
+ get_current_sequence ()->last = insn;
+}
+
+/* Return a number larger than any instruction's uid in this function. */
+
+inline int
+get_max_uid (void)
+{
+ return crtl->emit.x_cur_insn_uid;
+}
+
+extern bool valid_for_const_vector_p (machine_mode, rtx);
+extern rtx gen_const_vec_duplicate (machine_mode, rtx);
+extern rtx gen_vec_duplicate (machine_mode, rtx);
+
+extern rtx gen_const_vec_series (machine_mode, rtx, rtx);
+extern rtx gen_vec_series (machine_mode, rtx, rtx);
+
+extern void set_decl_incoming_rtl (tree, rtx, bool);
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address changed to ADDR.
+ (VOIDmode means don't change the mode.
+ NULL for ADDR means don't change the address.) */
+extern rtx change_address (rtx, machine_mode, rtx);
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address offset by OFFSET bytes. */
+#define adjust_address(MEMREF, MODE, OFFSET) \
+ adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1, 0, 0)
+
+/* Likewise, but the reference is not required to be valid. */
+#define adjust_address_nv(MEMREF, MODE, OFFSET) \
+ adjust_address_1 (MEMREF, MODE, OFFSET, 0, 1, 0, 0)
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address offset by OFFSET bytes. Assume that it's
+ for a bitfield and conservatively drop the underlying object if we
+ cannot be sure to stay within its bounds. */
+#define adjust_bitfield_address(MEMREF, MODE, OFFSET) \
+ adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1, 1, 0)
+
+/* As for adjust_bitfield_address, but specify that the width of
+ BLKmode accesses is SIZE bytes. */
+#define adjust_bitfield_address_size(MEMREF, MODE, OFFSET, SIZE) \
+ adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1, 1, SIZE)
+
+/* Likewise, but the reference is not required to be valid. */
+#define adjust_bitfield_address_nv(MEMREF, MODE, OFFSET) \
+ adjust_address_1 (MEMREF, MODE, OFFSET, 0, 1, 1, 0)
+
+/* Return a memory reference like MEMREF, but with its mode changed
+ to MODE and its address changed to ADDR, which is assumed to be
+ increased by OFFSET bytes from MEMREF. */
+#define adjust_automodify_address(MEMREF, MODE, ADDR, OFFSET) \
+ adjust_automodify_address_1 (MEMREF, MODE, ADDR, OFFSET, 1)
+
+/* Likewise, but the reference is not required to be valid. */
+#define adjust_automodify_address_nv(MEMREF, MODE, ADDR, OFFSET) \
+ adjust_automodify_address_1 (MEMREF, MODE, ADDR, OFFSET, 0)
+
+extern rtx adjust_address_1 (rtx, machine_mode, poly_int64, int, int,
+ int, poly_int64);
+extern rtx adjust_automodify_address_1 (rtx, machine_mode, rtx,
+ poly_int64, int);
+
+/* Return a memory reference like MEMREF, but whose address is changed by
+ adding OFFSET, an RTX, to it. POW2 is the highest power of two factor
+ known to be in OFFSET (possibly 1). */
+extern rtx offset_address (rtx, rtx, unsigned HOST_WIDE_INT);
+
+/* Given REF, a MEM, and T, either the type of X or the expression
+ corresponding to REF, set the memory attributes. OBJECTP is nonzero
+ if we are making a new object of this type. */
+extern void set_mem_attributes (rtx, tree, int);
+
+/* Similar, except that BITPOS has not yet been applied to REF, so if
+ we alter MEM_OFFSET according to T then we should subtract BITPOS
+ expecting that it'll be added back in later. */
+extern void set_mem_attributes_minus_bitpos (rtx, tree, int, poly_int64);
+
+/* Return OFFSET if XEXP (MEM, 0) - OFFSET is known to be ALIGN
+ bits aligned for 0 <= OFFSET < ALIGN / BITS_PER_UNIT, or
+ -1 if not known. */
+extern int get_mem_align_offset (rtx, unsigned int);
+
+/* Return a memory reference like MEMREF, but with its mode widened to
+ MODE and adjusted by OFFSET. */
+extern rtx widen_memory_access (rtx, machine_mode, poly_int64);
+
+extern void maybe_set_max_label_num (rtx_code_label *x);
+
+#endif /* GCC_EMIT_RTL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/errors.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/errors.h
new file mode 100644
index 0000000..a621bb4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/errors.h
@@ -0,0 +1,40 @@
+/* Basic error reporting routines.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* warning, error, and fatal. These definitions are suitable for use
+ in the generator programs; eventually we would like to use them in
+ cc1 too, but that's a longer term project.
+
+ N.B. We cannot presently use ATTRIBUTE_PRINTF with these functions,
+ because they can be extended with additional format specifiers which
+ GCC does not know about. */
+
+#ifndef GCC_ERRORS_H
+#define GCC_ERRORS_H
+
+extern void warning (const char *, ...) ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD;
+extern void error (const char *, ...) ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD;
+extern void fatal (const char *, ...) ATTRIBUTE_NORETURN ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD;
+extern void internal_error (const char *, ...) ATTRIBUTE_NORETURN ATTRIBUTE_PRINTF_1 ATTRIBUTE_COLD;
+extern const char *trim_filename (const char *);
+
+extern int have_error;
+extern const char *progname;
+
+#endif /* ! GCC_ERRORS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/escaped_string.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/escaped_string.h
new file mode 100644
index 0000000..f1c0d7e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/escaped_string.h
@@ -0,0 +1,43 @@
+/* Shared escaped string class.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ESCAPED_STRING_H
+#define GCC_ESCAPED_STRING_H
+
+#include <cstdlib>
+
+/* A class to handle converting a string that might contain
+ control characters, (eg newline, form-feed, etc), into one
+ in which contains escape sequences instead. */
+
+class escaped_string
+{
+ public:
+ escaped_string () { m_owned = false; m_str = NULL; };
+ ~escaped_string () { if (m_owned) free (m_str); }
+ operator const char *() const { return m_str; }
+ void escape (const char *);
+ private:
+ escaped_string(const escaped_string&) {}
+ escaped_string& operator=(const escaped_string&) { return *this; }
+ char *m_str;
+ bool m_owned;
+};
+
+#endif /* ! GCC_ESCAPED_STRING_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/et-forest.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/et-forest.h
new file mode 100644
index 0000000..157b6af
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/et-forest.h
@@ -0,0 +1,85 @@
+/* Et-forest data structure implementation.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This package implements ET forest data structure. Each tree in
+ the structure maintains a tree structure and offers logarithmic time
+ for tree operations (insertion and removal of nodes and edges) and
+ poly-logarithmic time for nearest common ancestor.
+
+ ET tree stores its structure as a sequence of symbols obtained
+ by dfs(root)
+
+ dfs (node)
+ {
+ s = node;
+ for each child c of node do
+ s = concat (s, c, node);
+ return s;
+ }
+
+ For example for tree
+
+ 1
+ / | \
+ 2 3 4
+ / |
+ 4 5
+
+ the sequence is 1 2 4 2 5 3 1 3 1 4 1.
+
+ The sequence is stored in a slightly modified splay tree.
+ In order to support various types of node values, a hashtable
+ is used to convert node values to the internal representation. */
+
+#ifndef _ET_TREE_H
+#define _ET_TREE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* The node representing the node in an et tree. */
+struct et_node
+{
+ void *data; /* The data represented by the node. */
+
+ int dfs_num_in, dfs_num_out; /* Number of the node in the dfs ordering. */
+
+ struct et_node *father; /* Father of the node. */
+ struct et_node *son; /* The first of the sons of the node. */
+ struct et_node *left;
+ struct et_node *right; /* The brothers of the node. */
+
+ struct et_occ *rightmost_occ; /* The rightmost occurrence. */
+ struct et_occ *parent_occ; /* The occurrence of the parent node. */
+};
+
+struct et_node *et_new_tree (void *data);
+void et_free_tree (struct et_node *);
+void et_free_tree_force (struct et_node *);
+void et_free_pools (void);
+void et_set_father (struct et_node *, struct et_node *);
+void et_split (struct et_node *);
+struct et_node *et_nca (struct et_node *, struct et_node *);
+bool et_below (struct et_node *, struct et_node *);
+struct et_node *et_root (struct et_node *);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _ET_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/except.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/except.h
new file mode 100644
index 0000000..5ecdbc0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/except.h
@@ -0,0 +1,334 @@
+/* Exception Handling interface routines.
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+ Contributed by Mike Stump <mrs@cygnus.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* No include guards here, but define an include file marker anyway, so
+ that the compiler can keep track of where this file is included. This
+ is e.g. used to avoid including this file in front-end specific files. */
+#ifndef GCC_EXCEPT_H
+#define GCC_EXCEPT_H
+
+
+struct function;
+struct eh_region_d;
+
+/* The type of an exception region. */
+enum eh_region_type
+{
+ /* CLEANUP regions implement e.g. destructors run when exiting a block.
+ They can be generated from both GIMPLE_TRY_FINALLY and GIMPLE_TRY_CATCH
+ nodes. It is expected by the runtime that cleanup regions will *not*
+ resume normal program flow, but will continue propagation of the
+ exception. */
+ ERT_CLEANUP,
+
+ /* TRY regions implement catching an exception. The list of types associated
+ with the attached catch handlers is examined in order by the runtime and
+ control is transferred to the appropriate handler. Note that a NULL type
+ list is a catch-all handler, and that it will catch *all* exceptions
+ including those originating from a different language. */
+ ERT_TRY,
+
+ /* ALLOWED_EXCEPTIONS regions implement exception filtering, e.g. the
+ throw(type-list) specification that can be added to C++ functions.
+ The runtime examines the thrown exception vs the type list, and if
+ the exception does not match, transfers control to the handler. The
+ normal handler for C++ calls __cxa_call_unexpected. */
+ ERT_ALLOWED_EXCEPTIONS,
+
+ /* MUST_NOT_THROW regions prevent all exceptions from propagating. This
+ region type is used in C++ to surround destructors being run inside a
+ CLEANUP region. This differs from an ALLOWED_EXCEPTIONS region with
+ an empty type list in that the runtime is prepared to terminate the
+ program directly. We only generate code for MUST_NOT_THROW regions
+ along control paths that are already handling an exception within the
+ current function. */
+ ERT_MUST_NOT_THROW
+};
+
+
+/* A landing pad for a given exception region. Any transfer of control
+ from the EH runtime to the function happens at a landing pad. */
+
+struct GTY(()) eh_landing_pad_d
+{
+ /* The linked list of all landing pads associated with the region. */
+ struct eh_landing_pad_d *next_lp;
+
+ /* The region with which this landing pad is associated. */
+ struct eh_region_d *region;
+
+ /* At the gimple level, the location to which control will be transferred
+ for this landing pad. There can be both EH and normal edges into the
+ block containing the post-landing-pad label. */
+ tree post_landing_pad;
+
+ /* At the rtl level, the location to which the runtime will transfer
+ control. This differs from the post-landing-pad in that the target's
+ EXCEPTION_RECEIVER pattern will be expanded here, as well as other
+ bookkeeping specific to exceptions. There must not be normal edges
+ into the block containing the landing-pad label. */
+ rtx_code_label *landing_pad;
+
+ /* The index of this landing pad within fun->eh->lp_array. */
+ int index;
+};
+
+/* A catch handler associated with an ERT_TRY region. */
+
+struct GTY(()) eh_catch_d
+{
+ /* The double-linked list of all catch handlers for the region. */
+ struct eh_catch_d *next_catch;
+ struct eh_catch_d *prev_catch;
+
+ /* A TREE_LIST of runtime type objects that this catch handler
+ will catch, or NULL if all exceptions are caught. */
+ tree type_list;
+
+ /* A TREE_LIST of INTEGER_CSTs that correspond to the type_list entries,
+ having been mapped by assign_filter_values. These integers are to be
+ compared against the __builtin_eh_filter value. */
+ tree filter_list;
+
+ /* The code that should be executed if this catch handler matches the
+ thrown exception. This label is only maintained until
+ pass_lower_eh_dispatch, at which point it is cleared. */
+ tree label;
+};
+
+/* Describes one exception region. */
+
+struct GTY(()) eh_region_d
+{
+ /* The immediately surrounding region. */
+ struct eh_region_d *outer;
+
+ /* The list of immediately contained regions. */
+ struct eh_region_d *inner;
+ struct eh_region_d *next_peer;
+
+ /* The index of this region within fun->eh->region_array. */
+ int index;
+
+ /* Each region does exactly one thing. */
+ enum eh_region_type type;
+
+ /* Holds the action to perform based on the preceding type. */
+ union eh_region_u {
+ struct eh_region_u_try {
+ /* The double-linked list of all catch handlers for this region. */
+ struct eh_catch_d *first_catch;
+ struct eh_catch_d *last_catch;
+ } GTY ((tag ("ERT_TRY"))) eh_try;
+
+ struct eh_region_u_allowed {
+ /* A TREE_LIST of runtime type objects allowed to pass. */
+ tree type_list;
+ /* The code that should be executed if the thrown exception does
+ not match the type list. This label is only maintained until
+ pass_lower_eh_dispatch, at which point it is cleared. */
+ tree label;
+ /* The integer that will be passed by the runtime to signal that
+ we should execute the code at LABEL. This integer is assigned
+ by assign_filter_values and is to be compared against the
+ __builtin_eh_filter value. */
+ int filter;
+ } GTY ((tag ("ERT_ALLOWED_EXCEPTIONS"))) allowed;
+
+ struct eh_region_u_must_not_throw {
+ /* A function decl to be invoked if this region is actually reachable
+ from within the function, rather than implementable from the runtime.
+ The normal way for this to happen is for there to be a CLEANUP region
+ contained within this MUST_NOT_THROW region. Note that if the
+ runtime handles the MUST_NOT_THROW region, we have no control over
+ what termination function is called; it will be decided by the
+ personality function in effect for this CIE. */
+ tree failure_decl;
+ /* The location assigned to the call of FAILURE_DECL, if expanded. */
+ location_t failure_loc;
+ } GTY ((tag ("ERT_MUST_NOT_THROW"))) must_not_throw;
+ } GTY ((desc ("%0.type"))) u;
+
+ /* The list of landing pads associated with this region. */
+ struct eh_landing_pad_d *landing_pads;
+
+ /* EXC_PTR and FILTER values copied from the runtime for this region.
+ Each region gets its own psuedos so that if there are nested exceptions
+ we do not overwrite the values of the first exception. */
+ rtx exc_ptr_reg, filter_reg;
+
+ /* True if this region should use __cxa_end_cleanup instead
+ of _Unwind_Resume. */
+ bool use_cxa_end_cleanup;
+};
+
+typedef struct eh_landing_pad_d *eh_landing_pad;
+typedef struct eh_catch_d *eh_catch;
+typedef struct eh_region_d *eh_region;
+
+
+
+
+/* The exception status for each function. */
+
+struct GTY(()) eh_status
+{
+ /* The tree of all regions for this function. */
+ eh_region region_tree;
+
+ /* The same information as an indexable array. */
+ vec<eh_region, va_gc> *region_array;
+
+ /* The landing pads as an indexable array. */
+ vec<eh_landing_pad, va_gc> *lp_array;
+
+ /* At the gimple level, a mapping from gimple statement to landing pad
+ or must-not-throw region. See record_stmt_eh_region. */
+ hash_map<gimple *, int> *GTY(()) throw_stmt_table;
+
+ /* All of the runtime type data used by the function. These objects
+ are emitted to the lang-specific-data-area for the function. */
+ vec<tree, va_gc> *ttype_data;
+
+ /* The table of all action chains. These encode the eh_region tree in
+ a compact form for use by the runtime, and is also emitted to the
+ lang-specific-data-area. Note that the ARM EABI uses a different
+ format for the encoding than all other ports. */
+ union eh_status_u {
+ vec<tree, va_gc> *GTY((tag ("1"))) arm_eabi;
+ vec<uchar, va_gc> *GTY((tag ("0"))) other;
+ } GTY ((desc ("targetm.arm_eabi_unwinder"))) ehspec_data;
+};
+
+
+/* Invokes CALLBACK for every exception handler label. Only used by old
+ loop hackery; should not be used by new code. */
+extern void for_each_eh_label (void (*) (rtx));
+
+extern void init_eh_for_function (void);
+
+extern void remove_eh_landing_pad (eh_landing_pad);
+extern void remove_eh_handler (eh_region);
+extern void remove_unreachable_eh_regions (sbitmap);
+
+extern bool current_function_has_exception_handlers (void);
+extern void output_function_exception_table (int);
+
+extern rtx expand_builtin_eh_pointer (tree);
+extern rtx expand_builtin_eh_filter (tree);
+extern rtx expand_builtin_eh_copy_values (tree);
+extern void expand_builtin_unwind_init (void);
+extern rtx expand_builtin_eh_return_data_regno (tree);
+extern rtx expand_builtin_extract_return_addr (tree);
+extern void expand_builtin_init_dwarf_reg_sizes (tree);
+extern rtx expand_builtin_frob_return_addr (tree);
+extern rtx expand_builtin_dwarf_sp_column (void);
+extern void expand_builtin_eh_return (tree, tree);
+extern void expand_eh_return (void);
+extern rtx expand_builtin_extend_pointer (tree);
+
+typedef tree (*duplicate_eh_regions_map) (tree, void *);
+extern hash_map<void *, void *> *duplicate_eh_regions
+ (struct function *, eh_region, int, duplicate_eh_regions_map, void *);
+
+extern void sjlj_emit_function_exit_after (rtx_insn *);
+extern void update_sjlj_context (void);
+
+extern eh_region gen_eh_region_cleanup (eh_region);
+extern eh_region gen_eh_region_try (eh_region);
+extern eh_region gen_eh_region_allowed (eh_region, tree);
+extern eh_region gen_eh_region_must_not_throw (eh_region);
+
+extern eh_catch gen_eh_region_catch (eh_region, tree);
+extern eh_landing_pad gen_eh_landing_pad (eh_region);
+
+extern eh_region get_eh_region_from_number_fn (struct function *, int);
+extern eh_region get_eh_region_from_number (int);
+extern eh_landing_pad get_eh_landing_pad_from_number_fn (struct function*,int);
+extern eh_landing_pad get_eh_landing_pad_from_number (int);
+extern eh_region get_eh_region_from_lp_number_fn (struct function *, int);
+extern eh_region get_eh_region_from_lp_number (int);
+
+extern eh_region eh_region_outermost (struct function *, eh_region, eh_region);
+
+extern void make_reg_eh_region_note (rtx_insn *insn, int ecf_flags, int lp_nr);
+extern void make_reg_eh_region_note_nothrow_nononlocal (rtx_insn *);
+
+extern void verify_eh_tree (struct function *);
+extern void dump_eh_tree (FILE *, struct function *);
+void debug_eh_tree (struct function *);
+extern void add_type_for_runtime (tree);
+extern tree lookup_type_for_runtime (tree);
+extern void assign_filter_values (void);
+
+extern eh_region get_eh_region_from_rtx (const_rtx);
+extern eh_landing_pad get_eh_landing_pad_from_rtx (const_rtx);
+
+extern void finish_eh_generation (void);
+
+struct GTY(()) throw_stmt_node {
+ gimple *stmt;
+ int lp_nr;
+};
+
+extern hash_map<gimple *, int> *get_eh_throw_stmt_table (struct function *);
+extern void set_eh_throw_stmt_table (function *, hash_map<gimple *, int> *);
+
+enum eh_personality_kind {
+ eh_personality_none,
+ eh_personality_any,
+ eh_personality_lang
+};
+
+extern enum eh_personality_kind
+function_needs_eh_personality (struct function *);
+
+/* Pre-order iteration within the eh_region tree. */
+
+inline eh_region
+ehr_next (eh_region r, eh_region start)
+{
+ if (r->inner)
+ r = r->inner;
+ else if (r->next_peer && r != start)
+ r = r->next_peer;
+ else
+ {
+ do
+ {
+ r = r->outer;
+ if (r == start)
+ return NULL;
+ }
+ while (r->next_peer == NULL);
+ r = r->next_peer;
+ }
+ return r;
+}
+
+#define FOR_ALL_EH_REGION_AT(R, START) \
+ for ((R) = (START); (R) != NULL; (R) = ehr_next (R, START))
+
+#define FOR_ALL_EH_REGION_FN(R, FN) \
+ for ((R) = (FN)->eh->region_tree; (R) != NULL; (R) = ehr_next (R, NULL))
+
+#define FOR_ALL_EH_REGION(R) FOR_ALL_EH_REGION_FN (R, cfun)
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/explow.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/explow.h
new file mode 100644
index 0000000..2db4f5c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/explow.h
@@ -0,0 +1,143 @@
+/* Export function prototypes from explow.cc.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_EXPLOW_H
+#define GCC_EXPLOW_H
+
+/* Return a memory reference like MEMREF, but which is known to have a
+ valid address. */
+extern rtx validize_mem (rtx);
+
+extern rtx use_anchored_address (rtx);
+
+/* Copy given rtx to a new temp reg and return that. */
+extern rtx copy_to_reg (rtx);
+
+/* Like copy_to_reg but always make the reg Pmode. */
+extern rtx copy_addr_to_reg (rtx);
+
+/* Like copy_to_reg but always make the reg the specified mode MODE. */
+extern rtx copy_to_mode_reg (machine_mode, rtx);
+
+/* Copy given rtx to given temp reg and return that. */
+extern rtx copy_to_suggested_reg (rtx, rtx, machine_mode);
+
+/* Copy a value to a register if it isn't already a register.
+ Args are mode (in case value is a constant) and the value. */
+extern rtx force_reg (machine_mode, rtx);
+
+/* Return given rtx, copied into a new temp reg if it was in memory. */
+extern rtx force_not_mem (rtx);
+
+/* Return mode and signedness to use when an argument or result in the
+ given mode is promoted. */
+extern machine_mode promote_function_mode (const_tree, machine_mode, int *,
+ const_tree, int);
+
+/* Return mode and signedness to use when an object in the given mode
+ is promoted. */
+extern machine_mode promote_mode (const_tree, machine_mode, int *);
+
+/* Return mode and signedness to use when object is promoted. */
+machine_mode promote_decl_mode (const_tree, int *);
+
+/* Return mode and signedness to use when object is promoted. */
+machine_mode promote_ssa_mode (const_tree, int *);
+
+/* Remove some bytes from the stack. An rtx says how many. */
+extern void adjust_stack (rtx);
+
+/* Add some bytes to the stack. An rtx says how many. */
+extern void anti_adjust_stack (rtx);
+
+/* Add some bytes to the stack while probing it. An rtx says how many. */
+extern void anti_adjust_stack_and_probe (rtx, bool);
+
+/* Add some bytes to the stack while probing it. An rtx says how
+ many. Add additional probes to prevent stack clashing attacks. */
+extern void anti_adjust_stack_and_probe_stack_clash (rtx);
+
+/* Support for building allocation/probing loops for stack-clash
+ protection of dyamically allocated stack space. */
+extern void compute_stack_clash_protection_loop_data (rtx *, rtx *, rtx *,
+ HOST_WIDE_INT *, rtx);
+extern void emit_stack_clash_protection_probe_loop_start (rtx *, rtx *,
+ rtx, bool);
+extern void emit_stack_clash_protection_probe_loop_end (rtx, rtx,
+ rtx, bool);
+
+/* This enum is used for the following two functions. */
+enum save_level {SAVE_BLOCK, SAVE_FUNCTION, SAVE_NONLOCAL};
+
+/* Save the stack pointer at the specified level. */
+extern void emit_stack_save (enum save_level, rtx *);
+
+/* Restore the stack pointer from a save area of the specified level. */
+extern void emit_stack_restore (enum save_level, rtx);
+
+/* Invoke emit_stack_save for the nonlocal_goto_save_area. */
+extern void update_nonlocal_goto_save_area (void);
+
+/* Record a new stack level. */
+extern void record_new_stack_level (void);
+
+/* Allocate some space on the stack dynamically and return its address. */
+extern rtx allocate_dynamic_stack_space (rtx, unsigned, unsigned,
+ HOST_WIDE_INT, bool);
+
+/* Calculate the necessary size of a constant dynamic stack allocation from the
+ size of the variable area. */
+extern void get_dynamic_stack_size (rtx *, unsigned, unsigned, HOST_WIDE_INT *);
+
+/* Returns the address of the dynamic stack space without allocating it. */
+extern rtx get_dynamic_stack_base (poly_int64, unsigned, rtx);
+
+/* Return an rtx doing runtime alignment to REQUIRED_ALIGN on TARGET. */
+extern rtx align_dynamic_address (rtx, unsigned);
+
+/* Emit one stack probe at ADDRESS, an address within the stack. */
+extern void emit_stack_probe (rtx);
+
+/* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive.
+ FIRST is a constant and size is a Pmode RTX. These are offsets from
+ the current stack pointer. STACK_GROWS_DOWNWARD says whether to add
+ or subtract them from the stack pointer. */
+extern void probe_stack_range (HOST_WIDE_INT, rtx);
+
+/* Return an rtx that refers to the value returned by a library call
+ in its original home. This becomes invalid if any more code is emitted. */
+extern rtx hard_libcall_value (machine_mode, rtx);
+
+/* Return an rtx that refers to the value returned by a function
+ in its original home. This becomes invalid if any more code is emitted. */
+extern rtx hard_function_value (const_tree, const_tree, const_tree, int);
+
+/* Convert arg to a valid memory address for specified machine mode that points
+ to a specific named address space, by emitting insns to perform arithmetic
+ if necessary. */
+extern rtx memory_address_addr_space (machine_mode, rtx, addr_space_t);
+
+extern rtx eliminate_constant_term (rtx, rtx *);
+
+/* Like memory_address_addr_space, except assume the memory address points to
+ the generic named address space. */
+#define memory_address(MODE,RTX) \
+ memory_address_addr_space ((MODE), (RTX), ADDR_SPACE_GENERIC)
+
+#endif /* GCC_EXPLOW_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/expmed.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/expmed.h
new file mode 100644
index 0000000..c747a0d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/expmed.h
@@ -0,0 +1,728 @@
+/* Target-dependent costs for expmed.cc.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef EXPMED_H
+#define EXPMED_H 1
+
+#include "insn-codes.h"
+
+enum alg_code {
+ alg_unknown,
+ alg_zero,
+ alg_m, alg_shift,
+ alg_add_t_m2,
+ alg_sub_t_m2,
+ alg_add_factor,
+ alg_sub_factor,
+ alg_add_t2_m,
+ alg_sub_t2_m,
+ alg_impossible
+};
+
+/* Indicates the type of fixup needed after a constant multiplication.
+ BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
+ the result should be negated, and ADD_VARIANT means that the
+ multiplicand should be added to the result. */
+enum mult_variant {basic_variant, negate_variant, add_variant};
+
+bool choose_mult_variant (machine_mode, HOST_WIDE_INT,
+ struct algorithm *, enum mult_variant *, int);
+
+/* This structure holds the "cost" of a multiply sequence. The
+ "cost" field holds the total rtx_cost of every operator in the
+ synthetic multiplication sequence, hence cost(a op b) is defined
+ as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
+ The "latency" field holds the minimum possible latency of the
+ synthetic multiply, on a hypothetical infinitely parallel CPU.
+ This is the critical path, or the maximum height, of the expression
+ tree which is the sum of rtx_costs on the most expensive path from
+ any leaf to the root. Hence latency(a op b) is defined as zero for
+ leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
+
+struct mult_cost {
+ short cost; /* Total rtx_cost of the multiplication sequence. */
+ short latency; /* The latency of the multiplication sequence. */
+};
+
+/* This macro is used to compare a pointer to a mult_cost against an
+ single integer "rtx_cost" value. This is equivalent to the macro
+ CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
+#define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
+ || ((X)->cost == (Y) && (X)->latency < (Y)))
+
+/* This macro is used to compare two pointers to mult_costs against
+ each other. The macro returns true if X is cheaper than Y.
+ Currently, the cheaper of two mult_costs is the one with the
+ lower "cost". If "cost"s are tied, the lower latency is cheaper. */
+#define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
+ || ((X)->cost == (Y)->cost \
+ && (X)->latency < (Y)->latency))
+
+/* This structure records a sequence of operations.
+ `ops' is the number of operations recorded.
+ `cost' is their total cost.
+ The operations are stored in `op' and the corresponding
+ logarithms of the integer coefficients in `log'.
+
+ These are the operations:
+ alg_zero total := 0;
+ alg_m total := multiplicand;
+ alg_shift total := total * coeff
+ alg_add_t_m2 total := total + multiplicand * coeff;
+ alg_sub_t_m2 total := total - multiplicand * coeff;
+ alg_add_factor total := total * coeff + total;
+ alg_sub_factor total := total * coeff - total;
+ alg_add_t2_m total := total * coeff + multiplicand;
+ alg_sub_t2_m total := total * coeff - multiplicand;
+
+ The first operand must be either alg_zero or alg_m. */
+
+struct algorithm
+{
+ struct mult_cost cost;
+ short ops;
+ /* The size of the OP and LOG fields are not directly related to the
+ word size, but the worst-case algorithms will be if we have few
+ consecutive ones or zeros, i.e., a multiplicand like 10101010101...
+ In that case we will generate shift-by-2, add, shift-by-2, add,...,
+ in total wordsize operations. */
+ enum alg_code op[MAX_BITS_PER_WORD];
+ char log[MAX_BITS_PER_WORD];
+};
+
+/* The entry for our multiplication cache/hash table. */
+struct alg_hash_entry {
+ /* The number we are multiplying by. */
+ unsigned HOST_WIDE_INT t;
+
+ /* The mode in which we are multiplying something by T. */
+ machine_mode mode;
+
+ /* The best multiplication algorithm for t. */
+ enum alg_code alg;
+
+ /* The cost of multiplication if ALG_CODE is not alg_impossible.
+ Otherwise, the cost within which multiplication by T is
+ impossible. */
+ struct mult_cost cost;
+
+ /* Optimized for speed? */
+ bool speed;
+};
+
+/* The number of cache/hash entries. */
+#if HOST_BITS_PER_WIDE_INT == 64
+#define NUM_ALG_HASH_ENTRIES 1031
+#else
+#define NUM_ALG_HASH_ENTRIES 307
+#endif
+
+#define NUM_MODE_IP_INT (NUM_MODE_INT + NUM_MODE_PARTIAL_INT)
+#define NUM_MODE_IPV_INT (NUM_MODE_IP_INT + NUM_MODE_VECTOR_INT)
+
+struct expmed_op_cheap {
+ bool cheap[2][NUM_MODE_IPV_INT];
+};
+
+struct expmed_op_costs {
+ int cost[2][NUM_MODE_IPV_INT];
+};
+
+/* Target-dependent globals. */
+struct target_expmed {
+ /* Each entry of ALG_HASH caches alg_code for some integer. This is
+ actually a hash table. If we have a collision, that the older
+ entry is kicked out. */
+ struct alg_hash_entry x_alg_hash[NUM_ALG_HASH_ENTRIES];
+
+ /* True if x_alg_hash might already have been used. */
+ bool x_alg_hash_used_p;
+
+ /* Nonzero means divides or modulus operations are relatively cheap for
+ powers of two, so don't use branches; emit the operation instead.
+ Usually, this will mean that the MD file will emit non-branch
+ sequences. */
+ struct expmed_op_cheap x_sdiv_pow2_cheap;
+ struct expmed_op_cheap x_smod_pow2_cheap;
+
+ /* Cost of various pieces of RTL. Note that some of these are indexed by
+ shift count and some by mode. */
+ int x_zero_cost[2];
+ struct expmed_op_costs x_add_cost;
+ struct expmed_op_costs x_neg_cost;
+ struct expmed_op_costs x_shift_cost[MAX_BITS_PER_WORD];
+ struct expmed_op_costs x_shiftadd_cost[MAX_BITS_PER_WORD];
+ struct expmed_op_costs x_shiftsub0_cost[MAX_BITS_PER_WORD];
+ struct expmed_op_costs x_shiftsub1_cost[MAX_BITS_PER_WORD];
+ struct expmed_op_costs x_mul_cost;
+ struct expmed_op_costs x_sdiv_cost;
+ struct expmed_op_costs x_udiv_cost;
+ int x_mul_widen_cost[2][NUM_MODE_INT];
+ int x_mul_highpart_cost[2][NUM_MODE_INT];
+
+ /* Conversion costs are only defined between two scalar integer modes
+ of different sizes. The first machine mode is the destination mode,
+ and the second is the source mode. */
+ int x_convert_cost[2][NUM_MODE_IP_INT][NUM_MODE_IP_INT];
+};
+
+extern struct target_expmed default_target_expmed;
+#if SWITCHABLE_TARGET
+extern struct target_expmed *this_target_expmed;
+#else
+#define this_target_expmed (&default_target_expmed)
+#endif
+
+/* Return a pointer to the alg_hash_entry at IDX. */
+
+inline struct alg_hash_entry *
+alg_hash_entry_ptr (int idx)
+{
+ return &this_target_expmed->x_alg_hash[idx];
+}
+
+/* Return true if the x_alg_hash field might have been used. */
+
+inline bool
+alg_hash_used_p (void)
+{
+ return this_target_expmed->x_alg_hash_used_p;
+}
+
+/* Set whether the x_alg_hash field might have been used. */
+
+inline void
+set_alg_hash_used_p (bool usedp)
+{
+ this_target_expmed->x_alg_hash_used_p = usedp;
+}
+
+/* Compute an index into the cost arrays by mode class. */
+
+inline int
+expmed_mode_index (machine_mode mode)
+{
+ switch (GET_MODE_CLASS (mode))
+ {
+ case MODE_INT:
+ return mode - MIN_MODE_INT;
+ case MODE_PARTIAL_INT:
+ /* If there are no partial integer modes, help the compiler
+ to figure out this will never happen. See PR59934. */
+ if (MIN_MODE_PARTIAL_INT != VOIDmode)
+ return mode - MIN_MODE_PARTIAL_INT + NUM_MODE_INT;
+ break;
+ case MODE_VECTOR_INT:
+ /* If there are no vector integer modes, help the compiler
+ to figure out this will never happen. See PR59934. */
+ if (MIN_MODE_VECTOR_INT != VOIDmode)
+ return mode - MIN_MODE_VECTOR_INT + NUM_MODE_IP_INT;
+ break;
+ default:
+ break;
+ }
+ gcc_unreachable ();
+}
+
+/* Return a pointer to a boolean contained in EOC indicating whether
+ a particular operation performed in MODE is cheap when optimizing
+ for SPEED. */
+
+inline bool *
+expmed_op_cheap_ptr (struct expmed_op_cheap *eoc, bool speed,
+ machine_mode mode)
+{
+ int idx = expmed_mode_index (mode);
+ return &eoc->cheap[speed][idx];
+}
+
+/* Return a pointer to a cost contained in COSTS when a particular
+ operation is performed in MODE when optimizing for SPEED. */
+
+inline int *
+expmed_op_cost_ptr (struct expmed_op_costs *costs, bool speed,
+ machine_mode mode)
+{
+ int idx = expmed_mode_index (mode);
+ return &costs->cost[speed][idx];
+}
+
+/* Subroutine of {set_,}sdiv_pow2_cheap. Not to be used otherwise. */
+
+inline bool *
+sdiv_pow2_cheap_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cheap_ptr (&this_target_expmed->x_sdiv_pow2_cheap,
+ speed, mode);
+}
+
+/* Set whether a signed division by a power of 2 is cheap in MODE
+ when optimizing for SPEED. */
+
+inline void
+set_sdiv_pow2_cheap (bool speed, machine_mode mode, bool cheap_p)
+{
+ *sdiv_pow2_cheap_ptr (speed, mode) = cheap_p;
+}
+
+/* Return whether a signed division by a power of 2 is cheap in MODE
+ when optimizing for SPEED. */
+
+inline bool
+sdiv_pow2_cheap (bool speed, machine_mode mode)
+{
+ return *sdiv_pow2_cheap_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}smod_pow2_cheap. Not to be used otherwise. */
+
+inline bool *
+smod_pow2_cheap_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cheap_ptr (&this_target_expmed->x_smod_pow2_cheap,
+ speed, mode);
+}
+
+/* Set whether a signed modulo by a power of 2 is CHEAP in MODE when
+ optimizing for SPEED. */
+
+inline void
+set_smod_pow2_cheap (bool speed, machine_mode mode, bool cheap)
+{
+ *smod_pow2_cheap_ptr (speed, mode) = cheap;
+}
+
+/* Return whether a signed modulo by a power of 2 is cheap in MODE
+ when optimizing for SPEED. */
+
+inline bool
+smod_pow2_cheap (bool speed, machine_mode mode)
+{
+ return *smod_pow2_cheap_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}zero_cost. Not to be used otherwise. */
+
+inline int *
+zero_cost_ptr (bool speed)
+{
+ return &this_target_expmed->x_zero_cost[speed];
+}
+
+/* Set the COST of loading zero when optimizing for SPEED. */
+
+inline void
+set_zero_cost (bool speed, int cost)
+{
+ *zero_cost_ptr (speed) = cost;
+}
+
+/* Return the COST of loading zero when optimizing for SPEED. */
+
+inline int
+zero_cost (bool speed)
+{
+ return *zero_cost_ptr (speed);
+}
+
+/* Subroutine of {set_,}add_cost. Not to be used otherwise. */
+
+inline int *
+add_cost_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_add_cost, speed, mode);
+}
+
+/* Set the COST of computing an add in MODE when optimizing for SPEED. */
+
+inline void
+set_add_cost (bool speed, machine_mode mode, int cost)
+{
+ *add_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost of computing an add in MODE when optimizing for SPEED. */
+
+inline int
+add_cost (bool speed, machine_mode mode)
+{
+ return *add_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}neg_cost. Not to be used otherwise. */
+
+inline int *
+neg_cost_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_neg_cost, speed, mode);
+}
+
+/* Set the COST of computing a negation in MODE when optimizing for SPEED. */
+
+inline void
+set_neg_cost (bool speed, machine_mode mode, int cost)
+{
+ *neg_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost of computing a negation in MODE when optimizing for
+ SPEED. */
+
+inline int
+neg_cost (bool speed, machine_mode mode)
+{
+ return *neg_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}shift_cost. Not to be used otherwise. */
+
+inline int *
+shift_cost_ptr (bool speed, machine_mode mode, int bits)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_shift_cost[bits],
+ speed, mode);
+}
+
+/* Set the COST of doing a shift in MODE by BITS when optimizing for SPEED. */
+
+inline void
+set_shift_cost (bool speed, machine_mode mode, int bits, int cost)
+{
+ *shift_cost_ptr (speed, mode, bits) = cost;
+}
+
+/* Return the cost of doing a shift in MODE by BITS when optimizing for
+ SPEED. */
+
+inline int
+shift_cost (bool speed, machine_mode mode, int bits)
+{
+ return *shift_cost_ptr (speed, mode, bits);
+}
+
+/* Subroutine of {set_,}shiftadd_cost. Not to be used otherwise. */
+
+inline int *
+shiftadd_cost_ptr (bool speed, machine_mode mode, int bits)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_shiftadd_cost[bits],
+ speed, mode);
+}
+
+/* Set the COST of doing a shift in MODE by BITS followed by an add when
+ optimizing for SPEED. */
+
+inline void
+set_shiftadd_cost (bool speed, machine_mode mode, int bits, int cost)
+{
+ *shiftadd_cost_ptr (speed, mode, bits) = cost;
+}
+
+/* Return the cost of doing a shift in MODE by BITS followed by an add
+ when optimizing for SPEED. */
+
+inline int
+shiftadd_cost (bool speed, machine_mode mode, int bits)
+{
+ return *shiftadd_cost_ptr (speed, mode, bits);
+}
+
+/* Subroutine of {set_,}shiftsub0_cost. Not to be used otherwise. */
+
+inline int *
+shiftsub0_cost_ptr (bool speed, machine_mode mode, int bits)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_shiftsub0_cost[bits],
+ speed, mode);
+}
+
+/* Set the COST of doing a shift in MODE by BITS and then subtracting a
+ value when optimizing for SPEED. */
+
+inline void
+set_shiftsub0_cost (bool speed, machine_mode mode, int bits, int cost)
+{
+ *shiftsub0_cost_ptr (speed, mode, bits) = cost;
+}
+
+/* Return the cost of doing a shift in MODE by BITS and then subtracting
+ a value when optimizing for SPEED. */
+
+inline int
+shiftsub0_cost (bool speed, machine_mode mode, int bits)
+{
+ return *shiftsub0_cost_ptr (speed, mode, bits);
+}
+
+/* Subroutine of {set_,}shiftsub1_cost. Not to be used otherwise. */
+
+inline int *
+shiftsub1_cost_ptr (bool speed, machine_mode mode, int bits)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_shiftsub1_cost[bits],
+ speed, mode);
+}
+
+/* Set the COST of subtracting a shift in MODE by BITS from a value when
+ optimizing for SPEED. */
+
+inline void
+set_shiftsub1_cost (bool speed, machine_mode mode, int bits, int cost)
+{
+ *shiftsub1_cost_ptr (speed, mode, bits) = cost;
+}
+
+/* Return the cost of subtracting a shift in MODE by BITS from a value
+ when optimizing for SPEED. */
+
+inline int
+shiftsub1_cost (bool speed, machine_mode mode, int bits)
+{
+ return *shiftsub1_cost_ptr (speed, mode, bits);
+}
+
+/* Subroutine of {set_,}mul_cost. Not to be used otherwise. */
+
+inline int *
+mul_cost_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_mul_cost, speed, mode);
+}
+
+/* Set the COST of doing a multiplication in MODE when optimizing for
+ SPEED. */
+
+inline void
+set_mul_cost (bool speed, machine_mode mode, int cost)
+{
+ *mul_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost of doing a multiplication in MODE when optimizing
+ for SPEED. */
+
+inline int
+mul_cost (bool speed, machine_mode mode)
+{
+ return *mul_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}sdiv_cost. Not to be used otherwise. */
+
+inline int *
+sdiv_cost_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_sdiv_cost, speed, mode);
+}
+
+/* Set the COST of doing a signed division in MODE when optimizing
+ for SPEED. */
+
+inline void
+set_sdiv_cost (bool speed, machine_mode mode, int cost)
+{
+ *sdiv_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost of doing a signed division in MODE when optimizing
+ for SPEED. */
+
+inline int
+sdiv_cost (bool speed, machine_mode mode)
+{
+ return *sdiv_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}udiv_cost. Not to be used otherwise. */
+
+inline int *
+udiv_cost_ptr (bool speed, machine_mode mode)
+{
+ return expmed_op_cost_ptr (&this_target_expmed->x_udiv_cost, speed, mode);
+}
+
+/* Set the COST of doing an unsigned division in MODE when optimizing
+ for SPEED. */
+
+inline void
+set_udiv_cost (bool speed, machine_mode mode, int cost)
+{
+ *udiv_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost of doing an unsigned division in MODE when
+ optimizing for SPEED. */
+
+inline int
+udiv_cost (bool speed, machine_mode mode)
+{
+ return *udiv_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}mul_widen_cost. Not to be used otherwise. */
+
+inline int *
+mul_widen_cost_ptr (bool speed, machine_mode mode)
+{
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
+
+ return &this_target_expmed->x_mul_widen_cost[speed][mode - MIN_MODE_INT];
+}
+
+/* Set the COST for computing a widening multiplication in MODE when
+ optimizing for SPEED. */
+
+inline void
+set_mul_widen_cost (bool speed, machine_mode mode, int cost)
+{
+ *mul_widen_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost for computing a widening multiplication in MODE when
+ optimizing for SPEED. */
+
+inline int
+mul_widen_cost (bool speed, machine_mode mode)
+{
+ return *mul_widen_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}mul_highpart_cost. Not to be used otherwise. */
+
+inline int *
+mul_highpart_cost_ptr (bool speed, machine_mode mode)
+{
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
+ int m = mode - MIN_MODE_INT;
+ gcc_assert (m < NUM_MODE_INT);
+
+ return &this_target_expmed->x_mul_highpart_cost[speed][m];
+}
+
+/* Set the COST for computing the high part of a multiplication in MODE
+ when optimizing for SPEED. */
+
+inline void
+set_mul_highpart_cost (bool speed, machine_mode mode, int cost)
+{
+ *mul_highpart_cost_ptr (speed, mode) = cost;
+}
+
+/* Return the cost for computing the high part of a multiplication in MODE
+ when optimizing for SPEED. */
+
+inline int
+mul_highpart_cost (bool speed, machine_mode mode)
+{
+ return *mul_highpart_cost_ptr (speed, mode);
+}
+
+/* Subroutine of {set_,}convert_cost. Not to be used otherwise. */
+
+inline int *
+convert_cost_ptr (machine_mode to_mode, machine_mode from_mode,
+ bool speed)
+{
+ int to_idx = expmed_mode_index (to_mode);
+ int from_idx = expmed_mode_index (from_mode);
+
+ gcc_assert (IN_RANGE (to_idx, 0, NUM_MODE_IP_INT - 1));
+ gcc_assert (IN_RANGE (from_idx, 0, NUM_MODE_IP_INT - 1));
+
+ return &this_target_expmed->x_convert_cost[speed][to_idx][from_idx];
+}
+
+/* Set the COST for converting from FROM_MODE to TO_MODE when optimizing
+ for SPEED. */
+
+inline void
+set_convert_cost (machine_mode to_mode, machine_mode from_mode,
+ bool speed, int cost)
+{
+ *convert_cost_ptr (to_mode, from_mode, speed) = cost;
+}
+
+/* Return the cost for converting from FROM_MODE to TO_MODE when optimizing
+ for SPEED. */
+
+inline int
+convert_cost (machine_mode to_mode, machine_mode from_mode,
+ bool speed)
+{
+ return *convert_cost_ptr (to_mode, from_mode, speed);
+}
+
+extern int mult_by_coeff_cost (HOST_WIDE_INT, machine_mode, bool);
+extern rtx emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
+ machine_mode mode, machine_mode compare_mode,
+ int unsignedp, rtx x, rtx y, int normalizep,
+ machine_mode target_mode);
+
+/* Arguments MODE, RTX: return an rtx for the negation of that value.
+ May emit insns. */
+extern rtx negate_rtx (machine_mode, rtx);
+
+/* Arguments MODE, RTX: return an rtx for the flipping of that value.
+ May emit insns. */
+extern rtx flip_storage_order (machine_mode, rtx);
+
+/* Expand a logical AND operation. */
+extern rtx expand_and (machine_mode, rtx, rtx, rtx);
+
+/* Emit a store-flag operation. */
+extern rtx emit_store_flag (rtx, enum rtx_code, rtx, rtx, machine_mode,
+ int, int);
+
+/* Like emit_store_flag, but always succeeds. */
+extern rtx emit_store_flag_force (rtx, enum rtx_code, rtx, rtx,
+ machine_mode, int, int);
+
+extern void canonicalize_comparison (machine_mode, enum rtx_code *, rtx *);
+
+/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
+ replace division by D, and put the least significant N bits of the result
+ in *MULTIPLIER_PTR and return the most significant bit. */
+extern unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
+ int, unsigned HOST_WIDE_INT *,
+ int *, int *);
+
+#ifdef TREE_CODE
+extern rtx expand_variable_shift (enum tree_code, machine_mode,
+ rtx, tree, rtx, int);
+extern rtx expand_shift (enum tree_code, machine_mode, rtx, poly_int64, rtx,
+ int);
+extern rtx maybe_expand_shift (enum tree_code, machine_mode, rtx, int, rtx,
+ int);
+#ifdef GCC_OPTABS_H
+extern rtx expand_divmod (int, enum tree_code, machine_mode, rtx, rtx,
+ rtx, int, enum optab_methods = OPTAB_LIB_WIDEN);
+#endif
+#endif
+
+extern void store_bit_field (rtx, poly_uint64, poly_uint64,
+ poly_uint64, poly_uint64,
+ machine_mode, rtx, bool, bool);
+extern rtx extract_bit_field (rtx, poly_uint64, poly_uint64, int, rtx,
+ machine_mode, machine_mode, bool, rtx *);
+extern rtx extract_low_bits (machine_mode, machine_mode, rtx);
+extern rtx expand_mult (machine_mode, rtx, rtx, rtx, int, bool = false);
+extern rtx expand_mult_highpart_adjust (scalar_int_mode, rtx, rtx, rtx,
+ rtx, int);
+
+#endif // EXPMED_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/expr.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/expr.h
new file mode 100644
index 0000000..0c059ed
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/expr.h
@@ -0,0 +1,364 @@
+/* Definitions for code generation pass of GNU compiler.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_EXPR_H
+#define GCC_EXPR_H
+
+/* This is the 4th arg to `expand_expr'.
+ EXPAND_STACK_PARM means we are possibly expanding a call param onto
+ the stack.
+ EXPAND_SUM means it is ok to return a PLUS rtx or MULT rtx.
+ EXPAND_INITIALIZER is similar but also record any labels on forced_labels.
+ EXPAND_CONST_ADDRESS means it is ok to return a MEM whose address
+ is a constant that is not a legitimate address.
+ EXPAND_WRITE means we are only going to write to the resulting rtx.
+ EXPAND_MEMORY means we are interested in a memory result, even if
+ the memory is constant and we could have propagated a constant value,
+ or the memory is unaligned on a STRICT_ALIGNMENT target. */
+enum expand_modifier {EXPAND_NORMAL = 0, EXPAND_STACK_PARM, EXPAND_SUM,
+ EXPAND_CONST_ADDRESS, EXPAND_INITIALIZER, EXPAND_WRITE,
+ EXPAND_MEMORY};
+
+/* Prevent the compiler from deferring stack pops. See
+ inhibit_defer_pop for more information. */
+#define NO_DEFER_POP (inhibit_defer_pop += 1)
+
+/* Allow the compiler to defer stack pops. See inhibit_defer_pop for
+ more information. */
+#define OK_DEFER_POP (inhibit_defer_pop -= 1)
+
+/* This structure is used to pass around information about exploded
+ unary, binary and trinary expressions between expand_expr_real_1 and
+ friends. */
+typedef struct separate_ops
+{
+ enum tree_code code;
+ location_t location;
+ tree type;
+ tree op0, op1, op2;
+} *sepops;
+
+/* This is run during target initialization to set up which modes can be
+ used directly in memory and to initialize the block move optab. */
+extern void init_expr_target (void);
+
+/* This is run at the start of compiling a function. */
+extern void init_expr (void);
+
+/* Emit some rtl insns to move data between rtx's, converting machine modes.
+ Both modes must be floating or both fixed. */
+extern void convert_move (rtx, rtx, int);
+
+/* Convert an rtx to specified machine mode and return the result. */
+extern rtx convert_to_mode (machine_mode, rtx, int);
+
+/* Convert an rtx to MODE from OLDMODE and return the result. */
+extern rtx convert_modes (machine_mode mode, machine_mode oldmode,
+ rtx x, int unsignedp);
+
+/* Variant of convert_modes for ABI parameter passing/return. */
+extern rtx convert_float_to_wider_int (machine_mode mode, machine_mode fmode,
+ rtx x);
+
+/* Variant of convert_modes for ABI parameter passing/return. */
+extern rtx convert_wider_int_to_float (machine_mode mode, machine_mode imode,
+ rtx x);
+
+/* Expand a call to memcpy or memmove or memcmp, and return the result. */
+extern rtx emit_block_op_via_libcall (enum built_in_function, rtx, rtx, rtx,
+ bool);
+
+inline rtx
+emit_block_copy_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false)
+{
+ return emit_block_op_via_libcall (BUILT_IN_MEMCPY, dst, src, size, tailcall);
+}
+
+inline rtx
+emit_block_move_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false)
+{
+ return emit_block_op_via_libcall (BUILT_IN_MEMMOVE, dst, src, size, tailcall);
+}
+
+inline rtx
+emit_block_comp_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false)
+{
+ return emit_block_op_via_libcall (BUILT_IN_MEMCMP, dst, src, size, tailcall);
+}
+
+/* Emit code to move a block Y to a block X. */
+enum block_op_methods
+{
+ BLOCK_OP_NORMAL,
+ BLOCK_OP_NO_LIBCALL,
+ BLOCK_OP_CALL_PARM,
+ /* Like BLOCK_OP_NORMAL, but the libcall can be tail call optimized. */
+ BLOCK_OP_TAILCALL,
+ /* Like BLOCK_OP_NO_LIBCALL, but instead of emitting a libcall return
+ pc_rtx to indicate nothing has been emitted and let the caller handle
+ it. */
+ BLOCK_OP_NO_LIBCALL_RET
+};
+
+typedef rtx (*by_pieces_constfn) (void *, void *, HOST_WIDE_INT,
+ fixed_size_mode);
+
+/* The second pointer passed to by_pieces_constfn. */
+struct by_pieces_prev
+{
+ rtx data;
+ fixed_size_mode mode;
+};
+
+extern rtx emit_block_move (rtx, rtx, rtx, enum block_op_methods);
+extern rtx emit_block_move_hints (rtx, rtx, rtx, enum block_op_methods,
+ unsigned int, HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ bool bail_out_libcall = false,
+ bool *is_move_done = NULL,
+ bool might_overlap = false);
+extern rtx emit_block_cmp_hints (rtx, rtx, rtx, tree, rtx, bool,
+ by_pieces_constfn, void *);
+extern bool emit_storent_insn (rtx to, rtx from);
+
+/* Copy all or part of a value X into registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+extern void move_block_to_reg (int, rtx, int, machine_mode);
+
+/* Copy all or part of a BLKmode value X out of registers starting at REGNO.
+ The number of registers to be filled is NREGS. */
+extern void move_block_from_reg (int, rtx, int);
+
+/* Generate a non-consecutive group of registers represented by a PARALLEL. */
+extern rtx gen_group_rtx (rtx);
+
+/* Load a BLKmode value into non-consecutive registers represented by a
+ PARALLEL. */
+extern void emit_group_load (rtx, rtx, tree, poly_int64);
+
+/* Similarly, but load into new temporaries. */
+extern rtx emit_group_load_into_temps (rtx, rtx, tree, poly_int64);
+
+/* Move a non-consecutive group of registers represented by a PARALLEL into
+ a non-consecutive group of registers represented by a PARALLEL. */
+extern void emit_group_move (rtx, rtx);
+
+/* Move a group of registers represented by a PARALLEL into pseudos. */
+extern rtx emit_group_move_into_temps (rtx);
+
+/* Store a BLKmode value from non-consecutive registers represented by a
+ PARALLEL. */
+extern void emit_group_store (rtx, rtx, tree, poly_int64);
+
+extern rtx maybe_emit_group_store (rtx, tree);
+
+/* Mark REG as holding a parameter for the next CALL_INSN.
+ Mode is TYPE_MODE of the non-promoted parameter, or VOIDmode. */
+extern void use_reg_mode (rtx *, rtx, machine_mode);
+extern void clobber_reg_mode (rtx *, rtx, machine_mode);
+
+extern rtx copy_blkmode_to_reg (machine_mode, tree);
+
+/* Mark REG as holding a parameter for the next CALL_INSN. */
+inline void
+use_reg (rtx *fusage, rtx reg)
+{
+ use_reg_mode (fusage, reg, VOIDmode);
+}
+
+/* Mark REG as clobbered by the call with FUSAGE as CALL_INSN_FUNCTION_USAGE. */
+inline void
+clobber_reg (rtx *fusage, rtx reg)
+{
+ clobber_reg_mode (fusage, reg, VOIDmode);
+}
+
+/* Mark NREGS consecutive regs, starting at REGNO, as holding parameters
+ for the next CALL_INSN. */
+extern void use_regs (rtx *, int, int);
+
+/* Mark a PARALLEL as holding a parameter for the next CALL_INSN. */
+extern void use_group_regs (rtx *, rtx);
+
+#ifdef GCC_INSN_CODES_H
+extern rtx expand_cmpstrn_or_cmpmem (insn_code, rtx, rtx, rtx, tree, rtx,
+ HOST_WIDE_INT);
+#endif
+
+/* Write zeros through the storage of OBJECT.
+ If OBJECT has BLKmode, SIZE is its length in bytes. */
+extern rtx clear_storage (rtx, rtx, enum block_op_methods);
+extern rtx clear_storage_hints (rtx, rtx, enum block_op_methods,
+ unsigned int, HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned);
+/* The same, but always output an library call. */
+extern rtx set_storage_via_libcall (rtx, rtx, rtx, bool = false);
+
+/* Expand a setmem pattern; return true if successful. */
+extern bool set_storage_via_setmem (rtx, rtx, rtx, unsigned int,
+ unsigned int, HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT);
+
+/* Return nonzero if it is desirable to store LEN bytes generated by
+ CONSTFUN with several move instructions by store_by_pieces
+ function. CONSTFUNDATA is a pointer which will be passed as argument
+ in every CONSTFUN call.
+ ALIGN is maximum alignment we can assume.
+ MEMSETP is true if this is a real memset/bzero, not a copy
+ of a const string. */
+extern int can_store_by_pieces (unsigned HOST_WIDE_INT,
+ by_pieces_constfn,
+ void *, unsigned int, bool);
+
+/* Generate several move instructions to store LEN bytes generated by
+ CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
+ pointer which will be passed as argument in every CONSTFUN call.
+ ALIGN is maximum alignment we can assume.
+ MEMSETP is true if this is a real memset/bzero, not a copy.
+ Returns TO + LEN. */
+extern rtx store_by_pieces (rtx, unsigned HOST_WIDE_INT, by_pieces_constfn,
+ void *, unsigned int, bool, memop_ret);
+
+/* If can_store_by_pieces passes for worst-case values near MAX_LEN, call
+ store_by_pieces within conditionals so as to handle variable LEN efficiently,
+ storing VAL, if non-NULL_RTX, or valc instead. */
+extern bool try_store_by_multiple_pieces (rtx to, rtx len,
+ unsigned int ctz_len,
+ unsigned HOST_WIDE_INT min_len,
+ unsigned HOST_WIDE_INT max_len,
+ rtx val, char valc,
+ unsigned int align);
+
+/* Emit insns to set X from Y. */
+extern rtx_insn *emit_move_insn (rtx, rtx);
+extern rtx_insn *gen_move_insn (rtx, rtx);
+
+/* Emit insns to set X from Y, with no frills. */
+extern rtx_insn *emit_move_insn_1 (rtx, rtx);
+
+extern rtx_insn *emit_move_complex_push (machine_mode, rtx, rtx);
+extern rtx_insn *emit_move_complex_parts (rtx, rtx);
+extern rtx read_complex_part (rtx, bool);
+extern void write_complex_part (rtx, rtx, bool, bool);
+extern rtx read_complex_part (rtx, bool);
+extern rtx emit_move_resolve_push (machine_mode, rtx);
+
+/* Push a block of length SIZE (perhaps variable)
+ and return an rtx to address the beginning of the block. */
+extern rtx push_block (rtx, poly_int64, int);
+
+/* Generate code to push something onto the stack, given its mode and type. */
+extern bool emit_push_insn (rtx, machine_mode, tree, rtx, unsigned int,
+ int, rtx, poly_int64, rtx, rtx, int, rtx, bool);
+
+/* Extract the accessible bit-range from a COMPONENT_REF. */
+extern void get_bit_range (poly_uint64_pod *, poly_uint64_pod *, tree,
+ poly_int64_pod *, tree *);
+
+/* Expand an assignment that stores the value of FROM into TO. */
+extern void expand_assignment (tree, tree, bool);
+
+/* Generate code for computing expression EXP,
+ and storing the value into TARGET.
+ If SUGGEST_REG is nonzero, copy the value through a register
+ and return that register, if that is possible. */
+extern rtx store_expr (tree, rtx, int, bool, bool);
+
+/* Given an rtx that may include add and multiply operations,
+ generate them as insns and return a pseudo-reg containing the value.
+ Useful after calling expand_expr with 1 as sum_ok. */
+extern rtx force_operand (rtx, rtx);
+
+/* Work horses for expand_expr. */
+extern rtx expand_expr_real (tree, rtx, machine_mode,
+ enum expand_modifier, rtx *, bool);
+extern rtx expand_expr_real_1 (tree, rtx, machine_mode,
+ enum expand_modifier, rtx *, bool);
+extern rtx expand_expr_real_2 (sepops, rtx, machine_mode,
+ enum expand_modifier);
+
+/* Generate code for computing expression EXP.
+ An rtx for the computed value is returned. The value is never null.
+ In the case of a void EXP, const0_rtx is returned. */
+inline rtx
+expand_expr (tree exp, rtx target, machine_mode mode,
+ enum expand_modifier modifier)
+{
+ return expand_expr_real (exp, target, mode, modifier, NULL, false);
+}
+
+inline rtx
+expand_normal (tree exp)
+{
+ return expand_expr_real (exp, NULL_RTX, VOIDmode, EXPAND_NORMAL, NULL, false);
+}
+
+
+/* Return STRING_CST and set offset, size and decl, if the first
+ argument corresponds to a string constant. */
+extern tree string_constant (tree, tree *, tree *, tree *);
+/* Similar to string_constant, return a STRING_CST corresponding
+ to the value representation of the first argument if it's
+ a constant. */
+extern tree byte_representation (tree, tree *, tree *, tree *);
+
+extern enum tree_code maybe_optimize_mod_cmp (enum tree_code, tree *, tree *);
+extern void maybe_optimize_sub_cmp_0 (enum tree_code, tree *, tree *);
+
+/* Two different ways of generating switch statements. */
+extern int try_casesi (tree, tree, tree, tree, rtx, rtx, rtx, profile_probability);
+extern int try_tablejump (tree, tree, tree, tree, rtx, rtx, profile_probability);
+
+extern int safe_from_p (const_rtx, tree, int);
+
+/* Get the personality libfunc for a function decl. */
+rtx get_personality_function (tree);
+
+/* Determine whether the LEN bytes can be moved by using several move
+ instructions. Return nonzero if a call to move_by_pieces should
+ succeed. */
+extern bool can_move_by_pieces (unsigned HOST_WIDE_INT, unsigned int);
+
+extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree);
+
+extern bool categorize_ctor_elements (const_tree, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, HOST_WIDE_INT *,
+ bool *);
+extern bool immediate_const_ctor_p (const_tree, unsigned int words = 1);
+extern void store_constructor (tree, rtx, int, poly_int64, bool);
+extern HOST_WIDE_INT int_expr_size (const_tree exp);
+
+extern void expand_operands (tree, tree, rtx, rtx*, rtx*,
+ enum expand_modifier);
+
+/* rtl.h and tree.h were included. */
+/* Return an rtx for the size in bytes of the value of an expr. */
+extern rtx expr_size (tree);
+
+extern bool mem_ref_refers_to_non_mem_p (tree);
+extern bool non_mem_decl_p (tree);
+
+#endif /* GCC_EXPR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fibonacci_heap.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fibonacci_heap.h
new file mode 100644
index 0000000..17c360e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fibonacci_heap.h
@@ -0,0 +1,684 @@
+/* Fibonacci heap for GNU compiler.
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+ Contributed by Daniel Berlin (dan@cgsoftware.com).
+ Re-implemented in C++ by Martin Liska <mliska@suse.cz>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Fibonacci heaps are somewhat complex, but, there's an article in
+ DDJ that explains them pretty well:
+
+ http://www.ddj.com/articles/1997/9701/9701o/9701o.htm?topic=algoritms
+
+ Introduction to algorithms by Corman and Rivest also goes over them.
+
+ The original paper that introduced them is "Fibonacci heaps and their
+ uses in improved network optimization algorithms" by Tarjan and
+ Fredman (JACM 34(3), July 1987).
+
+ Amortized and real worst case time for operations:
+
+ ExtractMin: O(lg n) amortized. O(n) worst case.
+ DecreaseKey: O(1) amortized. O(lg n) worst case.
+ Insert: O(1) amortized.
+ Union: O(1) amortized. */
+
+#ifndef GCC_FIBONACCI_HEAP_H
+#define GCC_FIBONACCI_HEAP_H
+
+/* Forward definition. */
+
+template<class K, class V>
+class fibonacci_heap;
+
+/* Fibonacci heap node class. */
+
+template<class K, class V>
+class fibonacci_node
+{
+ typedef fibonacci_node<K,V> fibonacci_node_t;
+ friend class fibonacci_heap<K,V>;
+
+public:
+ /* Default constructor. */
+ fibonacci_node (): m_parent (NULL), m_child (NULL), m_left (this),
+ m_right (this), m_data (NULL), m_degree (0), m_mark (0)
+ {
+ }
+
+ /* Constructor for a node with given KEY. */
+ fibonacci_node (K key, V *data = NULL): m_parent (NULL), m_child (NULL),
+ m_left (this), m_right (this), m_key (key), m_data (data),
+ m_degree (0), m_mark (0)
+ {
+ }
+
+ /* Compare fibonacci node with OTHER node. */
+ int compare (fibonacci_node_t *other)
+ {
+ if (m_key < other->m_key)
+ return -1;
+ if (m_key > other->m_key)
+ return 1;
+ return 0;
+ }
+
+ /* Compare the node with a given KEY. */
+ int compare_data (K key)
+ {
+ return fibonacci_node_t (key).compare (this);
+ }
+
+ /* Remove fibonacci heap node. */
+ fibonacci_node_t *remove ();
+
+ /* Link the node with PARENT. */
+ void link (fibonacci_node_t *parent);
+
+ /* Return key associated with the node. */
+ K get_key ()
+ {
+ return m_key;
+ }
+
+ /* Return data associated with the node. */
+ V *get_data ()
+ {
+ return m_data;
+ }
+
+private:
+ /* Put node B after this node. */
+ void insert_after (fibonacci_node_t *b);
+
+ /* Insert fibonacci node B after this node. */
+ void insert_before (fibonacci_node_t *b)
+ {
+ m_left->insert_after (b);
+ }
+
+ /* Parent node. */
+ fibonacci_node *m_parent;
+ /* Child node. */
+ fibonacci_node *m_child;
+ /* Left sibling. */
+ fibonacci_node *m_left;
+ /* Right node. */
+ fibonacci_node *m_right;
+ /* Key associated with node. */
+ K m_key;
+ /* Data associated with node. */
+ V *m_data;
+
+#if defined (__GNUC__) && (!defined (SIZEOF_INT) || SIZEOF_INT < 4)
+ /* Degree of the node. */
+ __extension__ unsigned long int m_degree : 31;
+ /* Mark of the node. */
+ __extension__ unsigned long int m_mark : 1;
+#else
+ /* Degree of the node. */
+ unsigned int m_degree : 31;
+ /* Mark of the node. */
+ unsigned int m_mark : 1;
+#endif
+};
+
+/* Fibonacci heap class. */
+template<class K, class V>
+class fibonacci_heap
+{
+ typedef fibonacci_node<K,V> fibonacci_node_t;
+ friend class fibonacci_node<K,V>;
+
+public:
+ /* Default constructor. ALLOCATOR is optional and is primarily useful
+ when heaps are going to be merged (in that case they need to be allocated
+ in same alloc pool). */
+ fibonacci_heap (K global_min_key, pool_allocator *allocator = NULL):
+ m_nodes (0), m_min (NULL), m_root (NULL),
+ m_global_min_key (global_min_key),
+ m_allocator (allocator), m_own_allocator (false)
+ {
+ if (!m_allocator)
+ {
+ m_allocator = new pool_allocator ("Fibonacci heap",
+ sizeof (fibonacci_node_t));
+ m_own_allocator = true;
+ }
+ }
+
+ /* Destructor. */
+ ~fibonacci_heap ()
+ {
+ /* Actual memory will be released by the destructor of m_allocator. */
+ if (need_finalization_p<fibonacci_node_t> () || !m_own_allocator)
+ while (m_min != NULL)
+ {
+ fibonacci_node_t *n = extract_minimum_node ();
+ n->~fibonacci_node_t ();
+ if (!m_own_allocator)
+ m_allocator->remove (n);
+ }
+ if (m_own_allocator)
+ delete m_allocator;
+ }
+
+ /* Insert new node given by KEY and DATA associated with the key. */
+ fibonacci_node_t *insert (K key, V *data);
+
+ /* Return true if no entry is present. */
+ bool empty () const
+ {
+ return m_nodes == 0;
+ }
+
+ /* Return the number of nodes. */
+ size_t nodes () const
+ {
+ return m_nodes;
+ }
+
+ /* Return minimal key presented in the heap. */
+ K min_key () const
+ {
+ if (m_min == NULL)
+ gcc_unreachable ();
+
+ return m_min->m_key;
+ }
+
+ /* For given NODE, set new KEY value. */
+ K replace_key (fibonacci_node_t *node, K key)
+ {
+ K okey = node->m_key;
+
+ replace_key_data (node, key, node->m_data);
+ return okey;
+ }
+
+ /* For given NODE, decrease value to new KEY. */
+ K decrease_key (fibonacci_node_t *node, K key)
+ {
+ gcc_assert (key <= node->m_key);
+ return replace_key (node, key);
+ }
+
+ /* For given NODE, set new KEY and DATA value. */
+ V *replace_key_data (fibonacci_node_t *node, K key, V *data);
+
+ /* Extract minimum node in the heap. If RELEASE is specified,
+ memory is released. */
+ V *extract_min (bool release = true);
+
+ /* Return value associated with minimum node in the heap. */
+ V *min () const
+ {
+ if (m_min == NULL)
+ return NULL;
+
+ return m_min->m_data;
+ }
+
+ /* Replace data associated with NODE and replace it with DATA. */
+ V *replace_data (fibonacci_node_t *node, V *data)
+ {
+ return replace_key_data (node, node->m_key, data);
+ }
+
+ /* Delete NODE in the heap. */
+ V *delete_node (fibonacci_node_t *node, bool release = true);
+
+ /* Union the heap with HEAPB. */
+ fibonacci_heap *union_with (fibonacci_heap *heapb);
+
+private:
+ /* Insert new NODE given by KEY and DATA associated with the key. */
+ fibonacci_node_t *insert (fibonacci_node_t *node, K key, V *data);
+
+ /* Insert new NODE that has already filled key and value. */
+ fibonacci_node_t *insert_node (fibonacci_node_t *node);
+
+ /* Insert it into the root list. */
+ void insert_root (fibonacci_node_t *node);
+
+ /* Remove NODE from PARENT's child list. */
+ void cut (fibonacci_node_t *node, fibonacci_node_t *parent);
+
+ /* Process cut of node Y and do it recursivelly. */
+ void cascading_cut (fibonacci_node_t *y);
+
+ /* Extract minimum node from the heap. */
+ fibonacci_node_t * extract_minimum_node ();
+
+ /* Remove root NODE from the heap. */
+ void remove_root (fibonacci_node_t *node);
+
+ /* Consolidate heap. */
+ void consolidate ();
+
+ /* Number of nodes. */
+ size_t m_nodes;
+ /* Minimum node of the heap. */
+ fibonacci_node_t *m_min;
+ /* Root node of the heap. */
+ fibonacci_node_t *m_root;
+ /* Global minimum given in the heap construction. */
+ K m_global_min_key;
+
+ /* Allocator used to hold nodes. */
+ pool_allocator *m_allocator;
+ /* True if alocator is owned by the current heap only. */
+ bool m_own_allocator;
+};
+
+/* Remove fibonacci heap node. */
+
+template<class K, class V>
+fibonacci_node<K,V> *
+fibonacci_node<K,V>::remove ()
+{
+ fibonacci_node<K,V> *ret;
+
+ if (this == m_left)
+ ret = NULL;
+ else
+ ret = m_left;
+
+ if (m_parent != NULL && m_parent->m_child == this)
+ m_parent->m_child = ret;
+
+ m_right->m_left = m_left;
+ m_left->m_right = m_right;
+
+ m_parent = NULL;
+ m_left = this;
+ m_right = this;
+
+ return ret;
+}
+
+/* Link the node with PARENT. */
+
+template<class K, class V>
+void
+fibonacci_node<K,V>::link (fibonacci_node<K,V> *parent)
+{
+ if (parent->m_child == NULL)
+ parent->m_child = this;
+ else
+ parent->m_child->insert_before (this);
+ m_parent = parent;
+ parent->m_degree++;
+ m_mark = 0;
+}
+
+/* Put node B after this node. */
+
+template<class K, class V>
+void
+fibonacci_node<K,V>::insert_after (fibonacci_node<K,V> *b)
+{
+ fibonacci_node<K,V> *a = this;
+
+ if (a == a->m_right)
+ {
+ a->m_right = b;
+ a->m_left = b;
+ b->m_right = a;
+ b->m_left = a;
+ }
+ else
+ {
+ b->m_right = a->m_right;
+ a->m_right->m_left = b;
+ a->m_right = b;
+ b->m_left = a;
+ }
+}
+
+/* Insert new node given by KEY and DATA associated with the key. */
+
+template<class K, class V>
+fibonacci_node<K,V>*
+fibonacci_heap<K,V>::insert (K key, V *data)
+{
+ /* Create the new node. */
+ fibonacci_node<K,V> *node = new (m_allocator->allocate ())
+ fibonacci_node_t (key, data);
+
+ return insert_node (node);
+}
+
+/* Insert new NODE given by DATA associated with the key. */
+
+template<class K, class V>
+fibonacci_node<K,V>*
+fibonacci_heap<K,V>::insert (fibonacci_node_t *node, K key, V *data)
+{
+ /* Set the node's data. */
+ node->m_data = data;
+ node->m_key = key;
+
+ return insert_node (node);
+}
+
+/* Insert new NODE that has already filled key and value. */
+
+template<class K, class V>
+fibonacci_node<K,V>*
+fibonacci_heap<K,V>::insert_node (fibonacci_node_t *node)
+{
+ /* Insert it into the root list. */
+ insert_root (node);
+
+ /* If their was no minimum, or this key is less than the min,
+ it's the new min. */
+ if (m_min == NULL || node->m_key < m_min->m_key)
+ m_min = node;
+
+ m_nodes++;
+
+ return node;
+}
+
+/* For given NODE, set new KEY and DATA value. */
+
+template<class K, class V>
+V*
+fibonacci_heap<K,V>::replace_key_data (fibonacci_node<K,V> *node, K key,
+ V *data)
+{
+ K okey;
+ fibonacci_node<K,V> *y;
+ V *odata = node->m_data;
+
+ /* If we wanted to, we do a real increase by redeleting and
+ inserting. */
+ if (node->compare_data (key) > 0)
+ {
+ delete_node (node, false);
+
+ node = new (node) fibonacci_node_t ();
+ insert (node, key, data);
+
+ return odata;
+ }
+
+ okey = node->m_key;
+ node->m_data = data;
+ node->m_key = key;
+ y = node->m_parent;
+
+ /* Short-circuit if the key is the same, as we then don't have to
+ do anything. Except if we're trying to force the new node to
+ be the new minimum for delete. */
+ if (okey == key && okey != m_global_min_key)
+ return odata;
+
+ /* These two compares are specifically <= 0 to make sure that in the case
+ of equality, a node we replaced the data on, becomes the new min. This
+ is needed so that delete's call to extractmin gets the right node. */
+ if (y != NULL && node->compare (y) <= 0)
+ {
+ cut (node, y);
+ cascading_cut (y);
+ }
+
+ if (node->compare (m_min) <= 0)
+ m_min = node;
+
+ return odata;
+}
+
+/* Extract minimum node in the heap. Delete fibonacci node if RELEASE
+ is true. */
+
+template<class K, class V>
+V*
+fibonacci_heap<K,V>::extract_min (bool release)
+{
+ fibonacci_node<K,V> *z;
+ V *ret = NULL;
+
+ /* If we don't have a min set, it means we have no nodes. */
+ if (m_min != NULL)
+ {
+ /* Otherwise, extract the min node, free the node, and return the
+ node's data. */
+ z = extract_minimum_node ();
+ ret = z->m_data;
+
+ if (release)
+ {
+ z->~fibonacci_node_t ();
+ m_allocator->remove (z);
+ }
+ }
+
+ return ret;
+}
+
+/* Delete NODE in the heap, if RELEASE is specified memory is released. */
+
+template<class K, class V>
+V*
+fibonacci_heap<K,V>::delete_node (fibonacci_node<K,V> *node, bool release)
+{
+ V *ret = node->m_data;
+
+ /* To perform delete, we just make it the min key, and extract. */
+ replace_key (node, m_global_min_key);
+ if (node != m_min)
+ {
+ fprintf (stderr, "Can't force minimum on fibheap.\n");
+ abort ();
+ }
+ extract_min (release);
+
+ return ret;
+}
+
+/* Union the heap with HEAPB. One of the heaps is going to be deleted. */
+
+template<class K, class V>
+fibonacci_heap<K,V>*
+fibonacci_heap<K,V>::union_with (fibonacci_heap<K,V> *heapb)
+{
+ fibonacci_heap<K,V> *heapa = this;
+
+ fibonacci_node<K,V> *a_root, *b_root;
+
+ /* Both heaps must share allocator. */
+ gcc_checking_assert (m_allocator == heapb->m_allocator);
+
+ /* If one of the heaps is empty, the union is just the other heap. */
+ if ((a_root = heapa->m_root) == NULL)
+ {
+ delete (heapa);
+ return heapb;
+ }
+ if ((b_root = heapb->m_root) == NULL)
+ {
+ delete (heapb);
+ return heapa;
+ }
+
+ /* Merge them to the next nodes on the opposite chain. */
+ a_root->m_left->m_right = b_root;
+ b_root->m_left->m_right = a_root;
+ std::swap (a_root->m_left, b_root->m_left);
+ heapa->m_nodes += heapb->m_nodes;
+
+ /* And set the new minimum, if it's changed. */
+ if (heapb->m_min->compare (heapa->m_min) < 0)
+ heapa->m_min = heapb->m_min;
+
+ /* Set m_min to NULL to not to delete live fibonacci nodes. */
+ heapb->m_min = NULL;
+ delete (heapb);
+
+ return heapa;
+}
+
+/* Insert it into the root list. */
+
+template<class K, class V>
+void
+fibonacci_heap<K,V>::insert_root (fibonacci_node_t *node)
+{
+ /* If the heap is currently empty, the new node becomes the singleton
+ circular root list. */
+ if (m_root == NULL)
+ {
+ m_root = node;
+ node->m_left = node;
+ node->m_right = node;
+ return;
+ }
+
+ /* Otherwise, insert it in the circular root list between the root
+ and it's right node. */
+ m_root->insert_after (node);
+}
+
+/* Remove NODE from PARENT's child list. */
+
+template<class K, class V>
+void
+fibonacci_heap<K,V>::cut (fibonacci_node<K,V> *node,
+ fibonacci_node<K,V> *parent)
+{
+ node->remove ();
+ parent->m_degree--;
+ insert_root (node);
+ node->m_parent = NULL;
+ node->m_mark = 0;
+}
+
+/* Process cut of node Y and do it recursivelly. */
+
+template<class K, class V>
+void
+fibonacci_heap<K,V>::cascading_cut (fibonacci_node<K,V> *y)
+{
+ fibonacci_node<K,V> *z;
+
+ while ((z = y->m_parent) != NULL)
+ {
+ if (y->m_mark == 0)
+ {
+ y->m_mark = 1;
+ return;
+ }
+ else
+ {
+ cut (y, z);
+ y = z;
+ }
+ }
+}
+
+/* Extract minimum node from the heap. */
+
+template<class K, class V>
+fibonacci_node<K,V>*
+fibonacci_heap<K,V>::extract_minimum_node ()
+{
+ fibonacci_node<K,V> *ret = m_min;
+ fibonacci_node<K,V> *x, *y, *orig;
+
+ /* Attach the child list of the minimum node to the root list of the heap.
+ If there is no child list, we don't do squat. */
+ for (x = ret->m_child, orig = NULL; x != orig && x != NULL; x = y)
+ {
+ if (orig == NULL)
+ orig = x;
+ y = x->m_right;
+ x->m_parent = NULL;
+ insert_root (x);
+ }
+
+ /* Remove the old root. */
+ remove_root (ret);
+ m_nodes--;
+
+ /* If we are left with no nodes, then the min is NULL. */
+ if (m_nodes == 0)
+ m_min = NULL;
+ else
+ {
+ /* Otherwise, consolidate to find new minimum, as well as do the reorg
+ work that needs to be done. */
+ m_min = ret->m_right;
+ consolidate ();
+ }
+
+ return ret;
+}
+
+/* Remove root NODE from the heap. */
+
+template<class K, class V>
+void
+fibonacci_heap<K,V>::remove_root (fibonacci_node<K,V> *node)
+{
+ if (node->m_left == node)
+ m_root = NULL;
+ else
+ m_root = node->remove ();
+}
+
+/* Consolidate heap. */
+
+template<class K, class V>
+void fibonacci_heap<K,V>::consolidate ()
+{
+ const int D = 1 + 8 * sizeof (long);
+ fibonacci_node<K,V> *a[D];
+ fibonacci_node<K,V> *w, *x, *y;
+ int i, d;
+
+ memset (a, 0, sizeof (a));
+
+ while ((w = m_root) != NULL)
+ {
+ x = w;
+ remove_root (w);
+ d = x->m_degree;
+ gcc_checking_assert (d < D);
+ while (a[d] != NULL)
+ {
+ y = a[d];
+ if (x->compare (y) > 0)
+ std::swap (x, y);
+ y->link (x);
+ a[d] = NULL;
+ d++;
+ }
+ a[d] = x;
+ }
+ m_min = NULL;
+ for (i = 0; i < D; i++)
+ if (a[i] != NULL)
+ {
+ insert_root (a[i]);
+ if (m_min == NULL || a[i]->compare (m_min) < 0)
+ m_min = a[i];
+ }
+}
+
+#endif // GCC_FIBONACCI_HEAP_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-find.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-find.h
new file mode 100644
index 0000000..95f2b3b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-find.h
@@ -0,0 +1,47 @@
+/* Prototypes and data structures used for implementing functions for
+ finding files relative to GCC binaries.
+ Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FILE_FIND_H
+#define GCC_FILE_FIND_H
+
+/* Structure to hold all the directories in which to search for files to
+ execute. */
+
+struct prefix_list
+{
+ const char *prefix; /* String to prepend to the path. */
+ struct prefix_list *next; /* Next in linked list. */
+};
+
+struct path_prefix
+{
+ struct prefix_list *plist; /* List of prefixes to try */
+ int max_len; /* Max length of a prefix in PLIST */
+ const char *name; /* Name of this list (used in config stuff) */
+};
+
+extern void find_file_set_debug (bool);
+extern char *find_a_file (struct path_prefix *, const char *, int);
+extern void add_prefix (struct path_prefix *, const char *);
+extern void add_prefix_begin (struct path_prefix *, const char *);
+extern void prefix_from_env (const char *, struct path_prefix *);
+extern void prefix_from_string (const char *, struct path_prefix *);
+
+#endif /* GCC_FILE_FIND_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-prefix-map.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-prefix-map.h
new file mode 100644
index 0000000..23dce0c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/file-prefix-map.h
@@ -0,0 +1,31 @@
+/* Declarations for file prefix remapping support (-f*-prefix-map options).
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FILE_PREFIX_MAP_H
+#define GCC_FILE_PREFIX_MAP_H
+
+void add_macro_prefix_map (const char *);
+void add_debug_prefix_map (const char *);
+void add_file_prefix_map (const char *);
+void add_profile_prefix_map (const char *);
+extern bool flag_canon_prefix_map;
+
+const char *remap_macro_filename (const char *);
+const char *remap_debug_filename (const char *);
+const char *remap_profile_filename (const char *);
+
+#endif /* !GCC_FILE_PREFIX_MAP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/filenames.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/filenames.h
new file mode 100644
index 0000000..444c5cc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/filenames.h
@@ -0,0 +1,100 @@
+/* Macros for taking apart, interpreting and processing file names.
+
+ These are here because some non-Posix (a.k.a. DOSish) systems have
+ drive letter brain-damage at the beginning of an absolute file name,
+ use forward- and back-slash in path names interchangeably, and
+ some of them have case-insensitive file names.
+
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of BFD, the Binary File Descriptor library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef FILENAMES_H
+#define FILENAMES_H
+
+#include "hashtab.h" /* for hashval_t */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__MSDOS__) || (defined(_WIN32) && ! defined(__CYGWIN__)) || \
+ defined(__OS2__)
+# ifndef HAVE_DOS_BASED_FILE_SYSTEM
+# define HAVE_DOS_BASED_FILE_SYSTEM 1
+# endif
+# ifndef HAVE_CASE_INSENSITIVE_FILE_SYSTEM
+# define HAVE_CASE_INSENSITIVE_FILE_SYSTEM 1
+# endif
+# define HAS_DRIVE_SPEC(f) HAS_DOS_DRIVE_SPEC (f)
+# define IS_DIR_SEPARATOR(c) IS_DOS_DIR_SEPARATOR (c)
+# define IS_ABSOLUTE_PATH(f) IS_DOS_ABSOLUTE_PATH (f)
+#else /* not DOSish */
+# if defined(__APPLE__)
+# ifndef HAVE_CASE_INSENSITIVE_FILE_SYSTEM
+# define HAVE_CASE_INSENSITIVE_FILE_SYSTEM 1
+# endif
+# endif /* __APPLE__ */
+# define HAS_DRIVE_SPEC(f) (0)
+# define IS_DIR_SEPARATOR(c) IS_UNIX_DIR_SEPARATOR (c)
+# define IS_ABSOLUTE_PATH(f) IS_UNIX_ABSOLUTE_PATH (f)
+#endif
+
+#define IS_DIR_SEPARATOR_1(dos_based, c) \
+ (((c) == '/') \
+ || (((c) == '\\') && (dos_based)))
+
+#define HAS_DRIVE_SPEC_1(dos_based, f) \
+ ((f)[0] && ((f)[1] == ':') && (dos_based))
+
+/* Remove the drive spec from F, assuming HAS_DRIVE_SPEC (f).
+ The result is a pointer to the remainder of F. */
+#define STRIP_DRIVE_SPEC(f) ((f) + 2)
+
+#define IS_DOS_DIR_SEPARATOR(c) IS_DIR_SEPARATOR_1 (1, c)
+#define IS_DOS_ABSOLUTE_PATH(f) IS_ABSOLUTE_PATH_1 (1, f)
+#define HAS_DOS_DRIVE_SPEC(f) HAS_DRIVE_SPEC_1 (1, f)
+
+#define IS_UNIX_DIR_SEPARATOR(c) IS_DIR_SEPARATOR_1 (0, c)
+#define IS_UNIX_ABSOLUTE_PATH(f) IS_ABSOLUTE_PATH_1 (0, f)
+
+/* Note that when DOS_BASED is true, IS_ABSOLUTE_PATH accepts d:foo as
+ well, although it is only semi-absolute. This is because the users
+ of IS_ABSOLUTE_PATH want to know whether to prepend the current
+ working directory to a file name, which should not be done with a
+ name like d:foo. */
+#define IS_ABSOLUTE_PATH_1(dos_based, f) \
+ (IS_DIR_SEPARATOR_1 (dos_based, (f)[0]) \
+ || HAS_DRIVE_SPEC_1 (dos_based, f))
+
+extern int filename_cmp (const char *s1, const char *s2);
+#define FILENAME_CMP(s1, s2) filename_cmp(s1, s2)
+
+extern int filename_ncmp (const char *s1, const char *s2,
+ size_t n);
+
+extern hashval_t filename_hash (const void *s);
+
+extern int filename_eq (const void *s1, const void *s2);
+
+extern int canonical_filename_eq (const char *a, const char *b);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FILENAMES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fixed-value.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fixed-value.h
new file mode 100644
index 0000000..8d1002a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fixed-value.h
@@ -0,0 +1,111 @@
+/* Fixed-point arithmetic support.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FIXED_VALUE_H
+#define GCC_FIXED_VALUE_H
+
+struct GTY(()) fixed_value
+{
+ double_int data; /* Store data up to 2 wide integers. */
+ scalar_mode_pod mode; /* Use machine mode to know IBIT and FBIT. */
+};
+
+#define FIXED_VALUE_TYPE struct fixed_value
+
+#define MAX_FCONST0 18 /* For storing 18 fixed-point zeros per
+ fract, ufract, accum, and uaccum modes . */
+#define MAX_FCONST1 8 /* For storing 8 fixed-point ones per accum
+ and uaccum modes. */
+/* Constant fixed-point values 0 and 1. */
+extern FIXED_VALUE_TYPE fconst0[MAX_FCONST0];
+extern FIXED_VALUE_TYPE fconst1[MAX_FCONST1];
+
+/* Macros to access fconst0 and fconst1 via machine modes. */
+#define FCONST0(mode) fconst0[mode - QQmode]
+#define FCONST1(mode) fconst1[mode - HAmode]
+
+/* Return a CONST_FIXED with value R and mode M. */
+#define CONST_FIXED_FROM_FIXED_VALUE(r, m) \
+ const_fixed_from_fixed_value (r, m)
+extern rtx const_fixed_from_fixed_value (FIXED_VALUE_TYPE, machine_mode);
+
+/* Construct a FIXED_VALUE from a bit payload and machine mode MODE.
+ The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */
+extern FIXED_VALUE_TYPE fixed_from_double_int (double_int, scalar_mode);
+
+/* Return a CONST_FIXED from a bit payload and machine mode MODE.
+ The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */
+inline rtx
+const_fixed_from_double_int (double_int payload,
+ scalar_mode mode)
+{
+ return
+ const_fixed_from_fixed_value (fixed_from_double_int (payload, mode),
+ mode);
+}
+
+/* Initialize from a decimal or hexadecimal string. */
+extern void fixed_from_string (FIXED_VALUE_TYPE *, const char *,
+ scalar_mode);
+
+/* In tree.cc: wrap up a FIXED_VALUE_TYPE in a tree node. */
+extern tree build_fixed (tree, FIXED_VALUE_TYPE);
+
+/* Extend or truncate to a new mode. */
+extern bool fixed_convert (FIXED_VALUE_TYPE *, scalar_mode,
+ const FIXED_VALUE_TYPE *, bool);
+
+/* Convert to a fixed-point mode from an integer. */
+extern bool fixed_convert_from_int (FIXED_VALUE_TYPE *, scalar_mode,
+ double_int, bool, bool);
+
+/* Convert to a fixed-point mode from a real. */
+extern bool fixed_convert_from_real (FIXED_VALUE_TYPE *, scalar_mode,
+ const REAL_VALUE_TYPE *, bool);
+
+/* Convert to a real mode from a fixed-point. */
+extern void real_convert_from_fixed (REAL_VALUE_TYPE *, scalar_mode,
+ const FIXED_VALUE_TYPE *);
+
+/* Compare two fixed-point objects for bitwise identity. */
+extern bool fixed_identical (const FIXED_VALUE_TYPE *, const FIXED_VALUE_TYPE *);
+
+/* Calculate a hash value. */
+extern unsigned int fixed_hash (const FIXED_VALUE_TYPE *);
+
+#define FIXED_VALUES_IDENTICAL(x, y) fixed_identical (&(x), &(y))
+
+/* Determine whether a fixed-point value X is negative. */
+#define FIXED_VALUE_NEGATIVE(x) fixed_isneg (&(x))
+
+/* Render F as a decimal floating point constant. */
+extern void fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *, size_t);
+
+/* Binary or unary arithmetic on tree_code. */
+extern bool fixed_arithmetic (FIXED_VALUE_TYPE *, int, const FIXED_VALUE_TYPE *,
+ const FIXED_VALUE_TYPE *, bool);
+
+/* Compare fixed-point values by tree_code. */
+extern bool fixed_compare (int, const FIXED_VALUE_TYPE *,
+ const FIXED_VALUE_TYPE *);
+
+/* Determine whether a fixed-point value X is negative. */
+extern bool fixed_isneg (const FIXED_VALUE_TYPE *);
+
+#endif /* GCC_FIXED_VALUE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/flag-types.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/flag-types.h
new file mode 100644
index 0000000..78dbdbe
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/flag-types.h
@@ -0,0 +1,508 @@
+/* Compilation switch flag type definitions for GCC.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FLAG_TYPES_H
+#define GCC_FLAG_TYPES_H
+
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+
+enum debug_info_type
+{
+ DINFO_TYPE_NONE, /* No debug info. */
+ DINFO_TYPE_DWARF2, /* Dwarf v2 debug info. */
+ DINFO_TYPE_VMS, /* VMS debug info. */
+ DINFO_TYPE_CTF, /* CTF debug info. */
+ DINFO_TYPE_BTF, /* BTF debug info. */
+ DINFO_TYPE_BTF_WITH_CORE, /* BTF debug info with CO-RE relocations. */
+ DINFO_TYPE_MAX = DINFO_TYPE_BTF_WITH_CORE /* Marker only. */
+};
+
+#define NO_DEBUG (0U)
+/* Write DWARF2 debug info (using dwarf2out.cc). */
+#define DWARF2_DEBUG (1U << DINFO_TYPE_DWARF2)
+/* Write VMS debug info (using vmsdbgout.cc). */
+#define VMS_DEBUG (1U << DINFO_TYPE_VMS)
+/* Write CTF debug info (using ctfout.cc). */
+#define CTF_DEBUG (1U << DINFO_TYPE_CTF)
+/* Write BTF debug info (using btfout.cc). */
+#define BTF_DEBUG (1U << DINFO_TYPE_BTF)
+/* Write BTF debug info for BPF CO-RE usecase (using btfout.cc). */
+#define BTF_WITH_CORE_DEBUG (1U << DINFO_TYPE_BTF_WITH_CORE)
+
+/* Note: Adding new definitions to handle -combination- of debug formats,
+ like VMS_AND_DWARF2_DEBUG is not recommended. This definition remains
+ here for historical reasons. */
+/* Write VMS debug info (using vmsdbgout.cc) and DWARF v2 debug info (using
+ dwarf2out.cc). */
+#define VMS_AND_DWARF2_DEBUG ((VMS_DEBUG | DWARF2_DEBUG))
+
+enum debug_info_levels
+{
+ DINFO_LEVEL_NONE, /* Write no debugging info. */
+ DINFO_LEVEL_TERSE, /* Write minimal info to support tracebacks only. */
+ DINFO_LEVEL_NORMAL, /* Write info for all declarations (and line table). */
+ DINFO_LEVEL_VERBOSE /* Write normal info plus #define/#undef info. */
+};
+
+/* CTF debug info levels.
+ CTF debug info levels are untied with DWARF debug info levels because CTF
+ may co-exist with DWARF. */
+enum ctf_debug_info_levels
+{
+ CTFINFO_LEVEL_NONE = 0, /* Write no CTF debug info. */
+ CTFINFO_LEVEL_TERSE = 1, /* Write CTF information to support tracebacks
+ only. Not Implemented. */
+ CTFINFO_LEVEL_NORMAL = 2 /* Write CTF type information for all entities
+ (functions, data objects, variables etc.)
+ at file-scope or global-scope only. */
+};
+
+/* A major contribution to object and executable size is debug
+ information size. A major contribution to debug information
+ size is struct descriptions replicated in several object files.
+ The following function determines whether or not debug information
+ should be generated for a given struct. The indirect parameter
+ indicates that the struct is being handled indirectly, via
+ a pointer. See opts.cc for the implementation. */
+
+enum debug_info_usage
+{
+ DINFO_USAGE_DFN, /* A struct definition. */
+ DINFO_USAGE_DIR_USE, /* A direct use, such as the type of a variable. */
+ DINFO_USAGE_IND_USE, /* An indirect use, such as through a pointer. */
+ DINFO_USAGE_NUM_ENUMS /* The number of enumerators. */
+};
+
+/* A major contribution to object and executable size is debug
+ information size. A major contribution to debug information size
+ is struct descriptions replicated in several object files. The
+ following flags attempt to reduce this information. The basic
+ idea is to not emit struct debugging information in the current
+ compilation unit when that information will be generated by
+ another compilation unit.
+
+ Debug information for a struct defined in the current source
+ file should be generated in the object file. Likewise the
+ debug information for a struct defined in a header should be
+ generated in the object file of the corresponding source file.
+ Both of these case are handled when the base name of the file of
+ the struct definition matches the base name of the source file
+ of the current compilation unit. This matching emits minimal
+ struct debugging information.
+
+ The base file name matching rule above will fail to emit debug
+ information for structs defined in system headers. So a second
+ category of files includes system headers in addition to files
+ with matching bases.
+
+ The remaining types of files are library headers and application
+ headers. We cannot currently distinguish these two types. */
+
+enum debug_struct_file
+{
+ DINFO_STRUCT_FILE_NONE, /* Debug no structs. */
+ DINFO_STRUCT_FILE_BASE, /* Debug structs defined in files with the
+ same base name as the compilation unit. */
+ DINFO_STRUCT_FILE_SYS, /* Also debug structs defined in system
+ header files. */
+ DINFO_STRUCT_FILE_ANY /* Debug structs defined in all files. */
+};
+
+/* Balance between GNAT encodings and standard DWARF to emit. */
+
+enum dwarf_gnat_encodings
+{
+ DWARF_GNAT_ENCODINGS_ALL = 0, /* Emit all GNAT encodings, then emit as
+ much standard DWARF as possible so it
+ does not conflict with GNAT
+ encodings. */
+ DWARF_GNAT_ENCODINGS_GDB = 1, /* Emit as much standard DWARF as possible
+ as long as GDB handles them. Emit GNAT
+ encodings for the rest. */
+ DWARF_GNAT_ENCODINGS_MINIMAL = 2 /* Emit all the standard DWARF we can.
+ Emit GNAT encodings for the rest. */
+};
+
+/* Enumerate Objective-c instance variable visibility settings. */
+
+enum ivar_visibility
+{
+ IVAR_VISIBILITY_PRIVATE,
+ IVAR_VISIBILITY_PROTECTED,
+ IVAR_VISIBILITY_PUBLIC,
+ IVAR_VISIBILITY_PACKAGE
+};
+
+/* The stack reuse level. */
+enum stack_reuse_level
+{
+ SR_NONE,
+ SR_NAMED_VARS,
+ SR_ALL
+};
+
+/* The live patching level. */
+enum live_patching_level
+{
+ LIVE_PATCHING_NONE = 0,
+ LIVE_PATCHING_INLINE_ONLY_STATIC,
+ LIVE_PATCHING_INLINE_CLONE
+};
+
+/* The algorithm used for basic block reordering. */
+enum reorder_blocks_algorithm
+{
+ REORDER_BLOCKS_ALGORITHM_SIMPLE,
+ REORDER_BLOCKS_ALGORITHM_STC
+};
+
+/* The algorithm used for the integrated register allocator (IRA). */
+enum ira_algorithm
+{
+ IRA_ALGORITHM_CB,
+ IRA_ALGORITHM_PRIORITY
+};
+
+/* The regions used for the integrated register allocator (IRA). */
+enum ira_region
+{
+ IRA_REGION_ONE,
+ IRA_REGION_ALL,
+ IRA_REGION_MIXED,
+};
+
+/* The options for excess precision. */
+enum excess_precision
+{
+ EXCESS_PRECISION_DEFAULT,
+ EXCESS_PRECISION_FAST,
+ EXCESS_PRECISION_STANDARD,
+ EXCESS_PRECISION_FLOAT16
+};
+
+/* The options for which values of FLT_EVAL_METHOD are permissible. */
+enum permitted_flt_eval_methods
+{
+ PERMITTED_FLT_EVAL_METHODS_DEFAULT,
+ PERMITTED_FLT_EVAL_METHODS_TS_18661,
+ PERMITTED_FLT_EVAL_METHODS_C11
+};
+
+/* Type of stack check.
+
+ Stack checking is designed to detect infinite recursion and stack
+ overflows for Ada programs. Furthermore stack checking tries to ensure
+ in that scenario that enough stack space is left to run a signal handler.
+
+ -fstack-check= does not prevent stack-clash style attacks. For that
+ you want -fstack-clash-protection. */
+enum stack_check_type
+{
+ /* Do not check the stack. */
+ NO_STACK_CHECK = 0,
+
+ /* Check the stack generically, i.e. assume no specific support
+ from the target configuration files. */
+ GENERIC_STACK_CHECK,
+
+ /* Check the stack and rely on the target configuration files to
+ check the static frame of functions, i.e. use the generic
+ mechanism only for dynamic stack allocations. */
+ STATIC_BUILTIN_STACK_CHECK,
+
+ /* Check the stack and entirely rely on the target configuration
+ files, i.e. do not use the generic mechanism at all. */
+ FULL_BUILTIN_STACK_CHECK
+};
+
+/* Type of callgraph information. */
+enum callgraph_info_type
+{
+ /* No information. */
+ NO_CALLGRAPH_INFO = 0,
+
+ /* Naked callgraph. */
+ CALLGRAPH_INFO_NAKED = 1,
+
+ /* Callgraph decorated with stack usage information. */
+ CALLGRAPH_INFO_STACK_USAGE = 2,
+
+ /* Callgraph decoration with dynamic allocation information. */
+ CALLGRAPH_INFO_DYNAMIC_ALLOC = 4
+};
+
+/* Floating-point contraction mode. */
+enum fp_contract_mode {
+ FP_CONTRACT_OFF = 0,
+ FP_CONTRACT_ON = 1,
+ FP_CONTRACT_FAST = 2
+};
+
+/* Scalar storage order kind. */
+enum scalar_storage_order_kind {
+ SSO_NATIVE = 0,
+ SSO_BIG_ENDIAN,
+ SSO_LITTLE_ENDIAN
+};
+
+/* Vectorizer cost-model. Except for DEFAULT, the values are ordered from
+ the most conservative to the least conservative. */
+enum vect_cost_model {
+ VECT_COST_MODEL_VERY_CHEAP = -3,
+ VECT_COST_MODEL_CHEAP = -2,
+ VECT_COST_MODEL_DYNAMIC = -1,
+ VECT_COST_MODEL_UNLIMITED = 0,
+ VECT_COST_MODEL_DEFAULT = 1
+};
+
+/* Automatic variable initialization type. */
+enum auto_init_type {
+ AUTO_INIT_UNINITIALIZED = 0,
+ AUTO_INIT_PATTERN = 1,
+ AUTO_INIT_ZERO = 2
+};
+
+/* Different instrumentation modes. */
+enum sanitize_code {
+ /* AddressSanitizer. */
+ SANITIZE_ADDRESS = 1UL << 0,
+ SANITIZE_USER_ADDRESS = 1UL << 1,
+ SANITIZE_KERNEL_ADDRESS = 1UL << 2,
+ /* ThreadSanitizer. */
+ SANITIZE_THREAD = 1UL << 3,
+ /* LeakSanitizer. */
+ SANITIZE_LEAK = 1UL << 4,
+ /* UndefinedBehaviorSanitizer. */
+ SANITIZE_SHIFT_BASE = 1UL << 5,
+ SANITIZE_SHIFT_EXPONENT = 1UL << 6,
+ SANITIZE_DIVIDE = 1UL << 7,
+ SANITIZE_UNREACHABLE = 1UL << 8,
+ SANITIZE_VLA = 1UL << 9,
+ SANITIZE_NULL = 1UL << 10,
+ SANITIZE_RETURN = 1UL << 11,
+ SANITIZE_SI_OVERFLOW = 1UL << 12,
+ SANITIZE_BOOL = 1UL << 13,
+ SANITIZE_ENUM = 1UL << 14,
+ SANITIZE_FLOAT_DIVIDE = 1UL << 15,
+ SANITIZE_FLOAT_CAST = 1UL << 16,
+ SANITIZE_BOUNDS = 1UL << 17,
+ SANITIZE_ALIGNMENT = 1UL << 18,
+ SANITIZE_NONNULL_ATTRIBUTE = 1UL << 19,
+ SANITIZE_RETURNS_NONNULL_ATTRIBUTE = 1UL << 20,
+ SANITIZE_OBJECT_SIZE = 1UL << 21,
+ SANITIZE_VPTR = 1UL << 22,
+ SANITIZE_BOUNDS_STRICT = 1UL << 23,
+ SANITIZE_POINTER_OVERFLOW = 1UL << 24,
+ SANITIZE_BUILTIN = 1UL << 25,
+ SANITIZE_POINTER_COMPARE = 1UL << 26,
+ SANITIZE_POINTER_SUBTRACT = 1UL << 27,
+ SANITIZE_HWADDRESS = 1UL << 28,
+ SANITIZE_USER_HWADDRESS = 1UL << 29,
+ SANITIZE_KERNEL_HWADDRESS = 1UL << 30,
+ /* Shadow Call Stack. */
+ SANITIZE_SHADOW_CALL_STACK = 1UL << 31,
+ SANITIZE_SHIFT = SANITIZE_SHIFT_BASE | SANITIZE_SHIFT_EXPONENT,
+ SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE
+ | SANITIZE_VLA | SANITIZE_NULL | SANITIZE_RETURN
+ | SANITIZE_SI_OVERFLOW | SANITIZE_BOOL | SANITIZE_ENUM
+ | SANITIZE_BOUNDS | SANITIZE_ALIGNMENT
+ | SANITIZE_NONNULL_ATTRIBUTE
+ | SANITIZE_RETURNS_NONNULL_ATTRIBUTE
+ | SANITIZE_OBJECT_SIZE | SANITIZE_VPTR
+ | SANITIZE_POINTER_OVERFLOW | SANITIZE_BUILTIN,
+ SANITIZE_UNDEFINED_NONDEFAULT = SANITIZE_FLOAT_DIVIDE | SANITIZE_FLOAT_CAST
+ | SANITIZE_BOUNDS_STRICT
+};
+
+/* Different settings for zeroing subset of registers. */
+namespace zero_regs_flags {
+ const unsigned int UNSET = 0;
+ const unsigned int SKIP = 1UL << 0;
+ const unsigned int ONLY_USED = 1UL << 1;
+ const unsigned int ONLY_GPR = 1UL << 2;
+ const unsigned int ONLY_ARG = 1UL << 3;
+ const unsigned int ENABLED = 1UL << 4;
+ const unsigned int USED_GPR_ARG = ENABLED | ONLY_USED | ONLY_GPR | ONLY_ARG;
+ const unsigned int USED_GPR = ENABLED | ONLY_USED | ONLY_GPR;
+ const unsigned int USED_ARG = ENABLED | ONLY_USED | ONLY_ARG;
+ const unsigned int USED = ENABLED | ONLY_USED;
+ const unsigned int ALL_GPR_ARG = ENABLED | ONLY_GPR | ONLY_ARG;
+ const unsigned int ALL_GPR = ENABLED | ONLY_GPR;
+ const unsigned int ALL_ARG = ENABLED | ONLY_ARG;
+ const unsigned int ALL = ENABLED;
+}
+
+/* Settings of flag_incremental_link. */
+enum incremental_link {
+ INCREMENTAL_LINK_NONE,
+ /* Do incremental linking and produce binary. */
+ INCREMENTAL_LINK_NOLTO,
+ /* Do incremental linking and produce IL. */
+ INCREMENTAL_LINK_LTO
+};
+
+/* Different trace modes. */
+enum sanitize_coverage_code {
+ /* Trace PC. */
+ SANITIZE_COV_TRACE_PC = 1 << 0,
+ /* Trace Comparison. */
+ SANITIZE_COV_TRACE_CMP = 1 << 1
+};
+
+/* flag_vtable_verify initialization levels. */
+enum vtv_priority {
+ VTV_NO_PRIORITY = 0, /* i.E. Do NOT do vtable verification. */
+ VTV_STANDARD_PRIORITY = 1,
+ VTV_PREINIT_PRIORITY = 2
+};
+
+/* flag_lto_partition initialization values. */
+enum lto_partition_model {
+ LTO_PARTITION_NONE = 0,
+ LTO_PARTITION_ONE = 1,
+ LTO_PARTITION_BALANCED = 2,
+ LTO_PARTITION_1TO1 = 3,
+ LTO_PARTITION_MAX = 4
+};
+
+/* flag_lto_linker_output initialization values. */
+enum lto_linker_output {
+ LTO_LINKER_OUTPUT_UNKNOWN,
+ LTO_LINKER_OUTPUT_REL,
+ LTO_LINKER_OUTPUT_NOLTOREL,
+ LTO_LINKER_OUTPUT_DYN,
+ LTO_LINKER_OUTPUT_PIE,
+ LTO_LINKER_OUTPUT_EXEC
+};
+
+/* gfortran -finit-real= values. */
+
+enum gfc_init_local_real
+{
+ GFC_INIT_REAL_OFF = 0,
+ GFC_INIT_REAL_ZERO,
+ GFC_INIT_REAL_NAN,
+ GFC_INIT_REAL_SNAN,
+ GFC_INIT_REAL_INF,
+ GFC_INIT_REAL_NEG_INF
+};
+
+/* gfortran -fcoarray= values. */
+
+enum gfc_fcoarray
+{
+ GFC_FCOARRAY_NONE = 0,
+ GFC_FCOARRAY_SINGLE,
+ GFC_FCOARRAY_LIB
+};
+
+
+/* gfortran -fconvert= values; used for unformatted I/O.
+ Keep in sync with GFC_CONVERT_* in gcc/fortran/libgfortran.h. */
+enum gfc_convert
+{
+ GFC_FLAG_CONVERT_NATIVE = 0,
+ GFC_FLAG_CONVERT_SWAP,
+ GFC_FLAG_CONVERT_BIG,
+ GFC_FLAG_CONVERT_LITTLE,
+ GFC_FLAG_CONVERT_R16_IEEE = 4,
+ GFC_FLAG_CONVERT_R16_IEEE_SWAP,
+ GFC_FLAG_CONVERT_R16_IEEE_BIG,
+ GFC_FLAG_CONVERT_R16_IEEE_LITTLE,
+ GFC_FLAG_CONVERT_R16_IBM = 8,
+ GFC_FLAG_CONVERT_R16_IBM_SWAP,
+ GFC_FLAG_CONVERT_R16_IBM_BIG,
+ GFC_FLAG_CONVERT_R16_IBM_LITTLE,
+};
+
+
+/* Control-Flow Protection values. */
+enum cf_protection_level
+{
+ CF_NONE = 0,
+ CF_BRANCH = 1 << 0,
+ CF_RETURN = 1 << 1,
+ CF_FULL = CF_BRANCH | CF_RETURN,
+ CF_SET = 1 << 2,
+ CF_CHECK = 1 << 3
+};
+
+/* Parloops schedule type. */
+enum parloops_schedule_type
+{
+ PARLOOPS_SCHEDULE_STATIC = 0,
+ PARLOOPS_SCHEDULE_DYNAMIC,
+ PARLOOPS_SCHEDULE_GUIDED,
+ PARLOOPS_SCHEDULE_AUTO,
+ PARLOOPS_SCHEDULE_RUNTIME
+};
+
+/* Ranger debug mode. */
+enum ranger_debug
+{
+ RANGER_DEBUG_NONE = 0,
+ RANGER_DEBUG_TRACE = 1,
+ RANGER_DEBUG_CACHE = 2,
+ RANGER_DEBUG_GORI = 4,
+ RANGER_DEBUG_TRACE_GORI = (RANGER_DEBUG_TRACE | RANGER_DEBUG_GORI),
+ RANGER_DEBUG_TRACE_CACHE = (RANGER_DEBUG_TRACE | RANGER_DEBUG_CACHE),
+ RANGER_DEBUG_ALL = (RANGER_DEBUG_GORI | RANGER_DEBUG_CACHE
+ | RANGER_DEBUG_TRACE)
+};
+
+/* Jump threader verbose dumps. */
+enum threader_debug
+{
+ THREADER_DEBUG_NONE = 0,
+ THREADER_DEBUG_ALL = 1
+};
+
+/* VRP modes. */
+enum vrp_mode
+{
+ VRP_MODE_VRP,
+ VRP_MODE_RANGER
+};
+
+/* Modes of OpenACC 'kernels' constructs handling. */
+enum openacc_kernels
+{
+ OPENACC_KERNELS_DECOMPOSE,
+ OPENACC_KERNELS_PARLOOPS
+};
+
+/* Modes of OpenACC privatization diagnostics. */
+enum openacc_privatization
+{
+ OPENACC_PRIVATIZATION_QUIET,
+ OPENACC_PRIVATIZATION_NOISY
+};
+
+/* Targets for -fopenmp-target-simd-clone. */
+enum omp_target_simd_clone_device_kind
+{
+ OMP_TARGET_SIMD_CLONE_NONE = 0,
+ OMP_TARGET_SIMD_CLONE_HOST = 1,
+ OMP_TARGET_SIMD_CLONE_NOHOST = 2,
+ OMP_TARGET_SIMD_CLONE_ANY = 3
+};
+
+#endif
+
+#endif /* ! GCC_FLAG_TYPES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/flags.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/flags.h
new file mode 100644
index 0000000..e4bafa3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/flags.h
@@ -0,0 +1,117 @@
+/* Compilation switch flag definitions for GCC.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FLAGS_H
+#define GCC_FLAGS_H
+
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+
+/* Names of fundamental debug info formats indexed by enum
+ debug_info_type. */
+
+extern const char *const debug_type_names[];
+
+/* Get enum debug_info_type of the specified debug format, for error messages.
+ Can be used only for individual debug format types. */
+
+extern enum debug_info_type debug_set_to_format (uint32_t debug_info_set);
+
+/* Get the number of debug formats enabled for output. */
+
+unsigned int debug_set_count (uint32_t w_symbols);
+
+/* Get the names of the debug formats enabled for output. */
+
+const char * debug_set_names (uint32_t w_symbols);
+
+#ifndef GENERATOR_FILE
+/* Return true iff BTF debug info is enabled. */
+
+extern bool btf_debuginfo_p ();
+
+/* Return true iff BTF with CO-RE debug info is enabled. */
+
+extern bool btf_with_core_debuginfo_p ();
+
+/* Return true iff CTF debug info is enabled. */
+
+extern bool ctf_debuginfo_p ();
+
+/* Return true iff DWARF2 debug info is enabled. */
+
+extern bool dwarf_debuginfo_p (struct gcc_options *opts = &global_options);
+
+/* Return true iff the debug info format is to be generated based on DWARF
+ DIEs (like CTF and BTF debug info formats). */
+
+extern bool dwarf_based_debuginfo_p ();
+#endif
+
+extern void strip_off_ending (char *, int);
+extern int base_of_path (const char *path, const char **base_out);
+
+/* Return true iff flags are set as if -ffast-math. */
+extern bool fast_math_flags_set_p (const struct gcc_options *);
+extern bool fast_math_flags_struct_set_p (struct cl_optimization *);
+
+
+/* Now the symbols that are set with `-f' switches. */
+
+/* True if printing into -fdump-final-insns= dump. */
+
+extern bool final_insns_dump_p;
+
+
+/* Other basic status info about current function. */
+
+class target_flag_state
+{
+public:
+ /* Each falign-foo can generate up to two levels of alignment:
+ -falign-foo=N:M[:N2:M2] */
+ align_flags x_align_loops;
+ align_flags x_align_jumps;
+ align_flags x_align_labels;
+ align_flags x_align_functions;
+};
+
+extern class target_flag_state default_target_flag_state;
+#if SWITCHABLE_TARGET
+extern class target_flag_state *this_target_flag_state;
+#else
+#define this_target_flag_state (&default_target_flag_state)
+#endif
+
+#define align_loops (this_target_flag_state->x_align_loops)
+#define align_jumps (this_target_flag_state->x_align_jumps)
+#define align_labels (this_target_flag_state->x_align_labels)
+#define align_functions (this_target_flag_state->x_align_functions)
+
+/* Returns TRUE if generated code should match ABI version N or
+ greater is in use. */
+
+#define abi_version_at_least(N) \
+ (flag_abi_version == 0 || flag_abi_version >= (N))
+
+/* Whether to emit an overflow warning whose code is C. */
+#define issue_strict_overflow_warning(c) (warn_strict_overflow >= (int) (c))
+
+#endif /* ! in target library */
+
+#endif /* ! GCC_FLAGS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const-call.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const-call.h
new file mode 100644
index 0000000..ccb8f6d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const-call.h
@@ -0,0 +1,28 @@
+/* Fold calls to built-in and internal functions with constant arguments.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FOLD_CONST_CALL_H
+#define GCC_FOLD_CONST_CALL_H
+
+tree fold_const_call (combined_fn, tree, tree);
+tree fold_const_call (combined_fn, tree, tree, tree);
+tree fold_const_call (combined_fn, tree, tree, tree, tree);
+tree build_cmp_result (tree type, int res);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const.h
new file mode 100644
index 0000000..56ecaa8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/fold-const.h
@@ -0,0 +1,276 @@
+/* Fold a constant sub-tree into a single node for C-compiler
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FOLD_CONST_H
+#define GCC_FOLD_CONST_H
+
+/* Nonzero if we are folding constants inside an initializer or a C++
+ manifestly-constant-evaluated context; zero otherwise.
+ Should be used when folding in initializer enables additional
+ optimizations. */
+extern int folding_initializer;
+/* Nonzero if we are folding C++ manifestly-constant-evaluated context; zero
+ otherwise.
+ Should be used when certain constructs shouldn't be optimized
+ during folding in that context. */
+extern bool folding_cxx_constexpr;
+
+/* Convert between trees and native memory representation. */
+extern int native_encode_expr (const_tree, unsigned char *, int, int off = -1);
+extern int native_encode_initializer (tree, unsigned char *, int,
+ int off = -1, unsigned char * = nullptr);
+extern tree native_interpret_expr (tree, const unsigned char *, int);
+extern tree native_interpret_real (tree, const unsigned char *, int);
+extern bool can_native_interpret_type_p (tree);
+extern tree native_interpret_aggregate (tree, const unsigned char *, int, int);
+extern tree find_bitfield_repr_type (int, int);
+extern void shift_bytes_in_array_left (unsigned char *, unsigned int,
+ unsigned int);
+extern void shift_bytes_in_array_right (unsigned char *, unsigned int,
+ unsigned int);
+
+/* Fold constants as much as possible in an expression.
+ Returns the simplified expression.
+ Acts only on the top level of the expression;
+ if the argument itself cannot be simplified, its
+ subexpressions are not changed. */
+
+extern tree fold (tree);
+extern tree fold_init (tree);
+#define fold_unary(CODE,T1,T2)\
+ fold_unary_loc (UNKNOWN_LOCATION, CODE, T1, T2)
+extern tree fold_unary_loc (location_t, enum tree_code, tree, tree);
+#define fold_unary_ignore_overflow(CODE,T1,T2)\
+ fold_unary_ignore_overflow_loc (UNKNOWN_LOCATION, CODE, T1, T2)
+extern tree fold_unary_ignore_overflow_loc (location_t, enum tree_code, tree, tree);
+#define fold_binary(CODE,T1,T2,T3)\
+ fold_binary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3)
+extern tree fold_binary_loc (location_t, enum tree_code, tree, tree, tree);
+#define fold_ternary(CODE,T1,T2,T3,T4)\
+ fold_ternary_loc (UNKNOWN_LOCATION, CODE, T1, T2, T3, T4)
+extern tree fold_ternary_loc (location_t, enum tree_code, tree, tree, tree, tree);
+#define fold_build1(c,t1,t2)\
+ fold_build1_loc (UNKNOWN_LOCATION, c, t1, t2 MEM_STAT_INFO)
+extern tree fold_build1_loc (location_t, enum tree_code, tree,
+ tree CXX_MEM_STAT_INFO);
+#define fold_build2(c,t1,t2,t3)\
+ fold_build2_loc (UNKNOWN_LOCATION, c, t1, t2, t3 MEM_STAT_INFO)
+extern tree fold_build2_loc (location_t, enum tree_code, tree, tree,
+ tree CXX_MEM_STAT_INFO);
+#define fold_build3(c,t1,t2,t3,t4)\
+ fold_build3_loc (UNKNOWN_LOCATION, c, t1, t2, t3, t4 MEM_STAT_INFO)
+extern tree fold_build3_loc (location_t, enum tree_code, tree, tree, tree,
+ tree CXX_MEM_STAT_INFO);
+extern tree fold_build1_initializer_loc (location_t, enum tree_code, tree, tree);
+extern tree fold_build2_initializer_loc (location_t, enum tree_code, tree, tree, tree);
+#define fold_build_call_array(T1,T2,N,T4)\
+ fold_build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T4)
+extern tree fold_build_call_array_loc (location_t, tree, tree, int, tree *);
+#define fold_build_call_array_initializer(T1,T2,N,T4)\
+ fold_build_call_array_initializer_loc (UNKNOWN_LOCATION, T1, T2, N, T4)
+extern tree fold_build_call_array_initializer_loc (location_t, tree, tree, int, tree *);
+extern tree fold_binary_initializer_loc (location_t, tree_code, tree, tree, tree);
+extern tree get_array_ctor_element_at_index (tree, offset_int,
+ unsigned * = NULL);
+extern bool fold_convertible_p (const_tree, const_tree);
+#define fold_convert(T1,T2)\
+ fold_convert_loc (UNKNOWN_LOCATION, T1, T2)
+extern tree fold_convert_loc (location_t, tree, tree);
+extern tree fold_single_bit_test (location_t, enum tree_code, tree, tree, tree);
+extern tree fold_ignored_result (tree);
+extern tree fold_abs_const (tree, tree);
+extern tree fold_indirect_ref_1 (location_t, tree, tree);
+extern void fold_defer_overflow_warnings (void);
+extern void fold_undefer_overflow_warnings (bool, const gimple *, int);
+extern void fold_undefer_and_ignore_overflow_warnings (void);
+extern bool fold_deferring_overflow_warnings_p (void);
+extern void fold_overflow_warning (const char*, enum warn_strict_overflow_code);
+extern enum tree_code fold_div_compare (enum tree_code, tree, tree,
+ tree *, tree *, bool *);
+extern bool operand_equal_p (const_tree, const_tree, unsigned int flags = 0);
+extern int multiple_of_p (tree, const_tree, const_tree, bool = true);
+#define omit_one_operand(T1,T2,T3)\
+ omit_one_operand_loc (UNKNOWN_LOCATION, T1, T2, T3)
+extern tree omit_one_operand_loc (location_t, tree, tree, tree);
+#define omit_two_operands(T1,T2,T3,T4)\
+ omit_two_operands_loc (UNKNOWN_LOCATION, T1, T2, T3, T4)
+extern tree omit_two_operands_loc (location_t, tree, tree, tree, tree);
+#define invert_truthvalue(T)\
+ invert_truthvalue_loc (UNKNOWN_LOCATION, T)
+extern tree invert_truthvalue_loc (location_t, tree);
+extern tree fold_unary_to_constant (enum tree_code, tree, tree);
+extern tree fold_binary_to_constant (enum tree_code, tree, tree, tree);
+extern tree fold_bit_and_mask (tree, tree, enum tree_code,
+ tree, enum tree_code, tree, tree,
+ tree, enum tree_code, tree, tree, tree *);
+extern tree fold_read_from_constant_string (tree);
+extern tree fold_read_from_vector (tree, poly_uint64);
+#if GCC_VEC_PERN_INDICES_H
+extern tree fold_vec_perm (tree, tree, tree, const vec_perm_indices &);
+#endif
+extern bool wide_int_binop (wide_int &res, enum tree_code,
+ const wide_int &arg1, const wide_int &arg2,
+ signop, wi::overflow_type *);
+extern tree int_const_binop (enum tree_code, const_tree, const_tree, int = 1);
+#define build_fold_addr_expr(T)\
+ build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T))
+extern tree build_fold_addr_expr_loc (location_t, tree);
+#define build_fold_addr_expr_with_type(T,TYPE)\
+ build_fold_addr_expr_with_type_loc (UNKNOWN_LOCATION, (T), TYPE)
+extern tree build_fold_addr_expr_with_type_loc (location_t, tree, tree);
+extern tree fold_build_cleanup_point_expr (tree type, tree expr);
+#define build_fold_indirect_ref(T)\
+ build_fold_indirect_ref_loc (UNKNOWN_LOCATION, T)
+extern tree build_fold_indirect_ref_loc (location_t, tree);
+#define fold_indirect_ref(T)\
+ fold_indirect_ref_loc (UNKNOWN_LOCATION, T)
+extern tree fold_indirect_ref_loc (location_t, tree);
+extern tree build_simple_mem_ref_loc (location_t, tree);
+#define build_simple_mem_ref(T)\
+ build_simple_mem_ref_loc (UNKNOWN_LOCATION, T)
+extern poly_offset_int mem_ref_offset (const_tree);
+extern tree build_invariant_address (tree, tree, poly_int64);
+extern tree constant_boolean_node (bool, tree);
+extern tree div_if_zero_remainder (const_tree, const_tree);
+
+extern bool tree_swap_operands_p (const_tree, const_tree);
+extern enum tree_code swap_tree_comparison (enum tree_code);
+
+extern bool ptr_difference_const (tree, tree, poly_int64_pod *);
+extern enum tree_code invert_tree_comparison (enum tree_code, bool);
+extern bool inverse_conditions_p (const_tree, const_tree);
+
+extern bool tree_unary_nonzero_warnv_p (enum tree_code, tree, tree, bool *);
+extern bool tree_binary_nonzero_warnv_p (enum tree_code, tree, tree, tree op1,
+ bool *);
+extern bool tree_single_nonzero_warnv_p (tree, bool *);
+extern bool tree_unary_nonnegative_warnv_p (enum tree_code, tree, tree,
+ bool *, int);
+extern bool tree_binary_nonnegative_warnv_p (enum tree_code, tree, tree, tree,
+ bool *, int);
+extern bool tree_single_nonnegative_warnv_p (tree, bool *, int);
+extern bool tree_call_nonnegative_warnv_p (tree, combined_fn, tree, tree,
+ bool *, int);
+
+extern bool integer_valued_real_unary_p (tree_code, tree, int);
+extern bool integer_valued_real_binary_p (tree_code, tree, tree, int);
+extern bool integer_valued_real_call_p (combined_fn, tree, tree, int);
+extern bool integer_valued_real_single_p (tree, int);
+extern bool integer_valued_real_p (tree, int = 0);
+
+extern bool fold_real_zero_addition_p (const_tree, const_tree, const_tree,
+ int);
+extern tree combine_comparisons (location_t, enum tree_code, enum tree_code,
+ enum tree_code, tree, tree, tree);
+extern void debug_fold_checksum (const_tree);
+extern bool may_negate_without_overflow_p (const_tree);
+#define round_up(T,N) round_up_loc (UNKNOWN_LOCATION, T, N)
+extern tree round_up_loc (location_t, tree, unsigned int);
+#define round_down(T,N) round_down_loc (UNKNOWN_LOCATION, T, N)
+extern tree round_down_loc (location_t, tree, int);
+extern tree size_int_kind (poly_int64, enum size_type_kind);
+#define size_binop(CODE,T1,T2)\
+ size_binop_loc (UNKNOWN_LOCATION, CODE, T1, T2)
+extern tree size_binop_loc (location_t, enum tree_code, tree, tree);
+#define size_diffop(T1,T2)\
+ size_diffop_loc (UNKNOWN_LOCATION, T1, T2)
+extern tree size_diffop_loc (location_t, tree, tree);
+
+/* Return an expr equal to X but certainly not valid as an lvalue. */
+#define non_lvalue(T) non_lvalue_loc (UNKNOWN_LOCATION, T)
+extern tree non_lvalue_loc (location_t, tree);
+
+extern bool tree_expr_nonzero_p (tree);
+extern bool tree_expr_nonnegative_p (tree);
+extern bool tree_expr_nonnegative_warnv_p (tree, bool *, int = 0);
+extern bool tree_expr_finite_p (const_tree);
+extern bool tree_expr_infinite_p (const_tree);
+extern bool tree_expr_maybe_infinite_p (const_tree);
+extern bool tree_expr_signaling_nan_p (const_tree);
+extern bool tree_expr_maybe_signaling_nan_p (const_tree);
+extern bool tree_expr_nan_p (const_tree);
+extern bool tree_expr_maybe_nan_p (const_tree);
+extern bool tree_expr_maybe_real_minus_zero_p (const_tree);
+extern tree make_range (tree, int *, tree *, tree *, bool *);
+extern tree make_range_step (location_t, enum tree_code, tree, tree, tree,
+ tree *, tree *, int *, bool *);
+extern tree range_check_type (tree);
+extern tree build_range_check (location_t, tree, tree, int, tree, tree);
+extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
+ tree, tree);
+extern tree sign_bit_p (tree, const_tree);
+extern bool simple_condition_p (tree);
+extern tree exact_inverse (tree, tree);
+extern bool expr_not_equal_to (tree t, const wide_int &);
+extern tree const_unop (enum tree_code, tree, tree);
+extern tree const_binop (enum tree_code, tree, tree, tree);
+extern bool negate_mathfn_p (combined_fn);
+extern const char *getbyterep (tree, unsigned HOST_WIDE_INT *);
+extern const char *c_getstr (tree);
+extern wide_int tree_nonzero_bits (const_tree);
+extern int address_compare (tree_code, tree, tree, tree, tree &, tree &,
+ poly_int64 &, poly_int64 &, bool);
+extern tree ctor_single_nonzero_element (const_tree);
+
+/* Return OFF converted to a pointer offset type suitable as offset for
+ POINTER_PLUS_EXPR. Use location LOC for this conversion. */
+extern tree convert_to_ptrofftype_loc (location_t loc, tree off);
+
+#define convert_to_ptrofftype(t) convert_to_ptrofftype_loc (UNKNOWN_LOCATION, t)
+
+/* Build and fold a POINTER_PLUS_EXPR at LOC offsetting PTR by OFF. */
+extern tree fold_build_pointer_plus_loc (location_t loc, tree ptr, tree off);
+
+#define fold_build_pointer_plus(p,o) \
+ fold_build_pointer_plus_loc (UNKNOWN_LOCATION, p, o)
+
+/* Build and fold a POINTER_PLUS_EXPR at LOC offsetting PTR by OFF. */
+extern tree fold_build_pointer_plus_hwi_loc (location_t loc, tree ptr, HOST_WIDE_INT off);
+
+#define fold_build_pointer_plus_hwi(p,o) \
+ fold_build_pointer_plus_hwi_loc (UNKNOWN_LOCATION, p, o)
+
+/* In gimple-fold.cc. */
+extern void clear_type_padding_in_mask (tree, unsigned char *);
+extern bool clear_padding_type_may_have_padding_p (tree);
+extern bool arith_overflowed_p (enum tree_code, const_tree, const_tree,
+ const_tree);
+
+/* Class used to compare gimple operands. */
+
+class operand_compare
+{
+public:
+ /* Return true if two operands are equal. The flags fields can be used
+ to specify OEP flags described in tree-core.h. */
+ virtual bool operand_equal_p (const_tree, const_tree, unsigned int flags);
+
+ /* Generate a hash value for an expression. This can be used iteratively
+ by passing a previous result as the HSTATE argument. */
+ virtual void hash_operand (const_tree, inchash::hash &, unsigned flags);
+
+protected:
+ /* Verify that when arguments (ARG0 and ARG1) are equal, then they have
+ an equal hash value. When the function knowns comparison return,
+ true is returned. Then RET is set to corresponding comparsion result. */
+ bool verify_hash_value (const_tree arg0, const_tree arg1, unsigned int flags,
+ bool *ret);
+};
+
+#endif // GCC_FOLD_CONST_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/function-abi.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/function-abi.h
new file mode 100644
index 0000000..379e235
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/function-abi.h
@@ -0,0 +1,320 @@
+/* Information about function binary interfaces.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FUNCTION_ABI_H
+#define GCC_FUNCTION_ABI_H
+
+/* Most targets use the same ABI for all functions in a translation
+ unit, but some targets support interoperability between several ABIs.
+ Each such ABI has a unique 0-based identifier, with 0 always being
+ the default choice of ABI.
+
+ NUM_ABI_IDS is the maximum number of such ABIs that GCC can handle at once.
+ A bitfield with this number of bits can represent any combinaion of the
+ supported ABIs. */
+const size_t NUM_ABI_IDS = 8;
+
+/* Information about one of the target's predefined ABIs. */
+class predefined_function_abi
+{
+public:
+ /* A target-specific identifier for this ABI. The value must be in
+ the range [0, NUM_ABI_IDS - 1]. */
+ unsigned int id () const { return m_id; }
+
+ /* True if this ABI has been initialized. */
+ bool initialized_p () const { return m_initialized; }
+
+ /* Return true if a function call is allowed to alter every bit of
+ register REGNO, so that the register contains an arbitrary value
+ on return. If so, the register cannot hold any part of a value
+ that is live across a call. */
+ bool
+ clobbers_full_reg_p (unsigned int regno) const
+ {
+ return TEST_HARD_REG_BIT (m_full_reg_clobbers, regno);
+ }
+
+ /* Return true if a function call is allowed to alter some or all bits
+ of register REGNO.
+
+ This is true whenever clobbers_full_reg_p (REGNO) is true. It is
+ also true if, for example, the ABI says that a call must preserve the
+ low 32 or 64 bits of REGNO, but can clobber the upper bits of REGNO.
+ In the latter case, it is possible for REGNO to hold values that
+ are live across a call, provided that the value occupies only the
+ call-preserved part of the register. */
+ bool
+ clobbers_at_least_part_of_reg_p (unsigned int regno) const
+ {
+ return TEST_HARD_REG_BIT (m_full_and_partial_reg_clobbers, regno);
+ }
+
+ /* Return true if a function call is allowed to clobber at least part
+ of (reg:MODE REGNO). If so, it is not possible for the register
+ as a whole to be live across a call. */
+ bool
+ clobbers_reg_p (machine_mode mode, unsigned int regno) const
+ {
+ return overlaps_hard_reg_set_p (m_mode_clobbers[mode], mode, regno);
+ }
+
+ /* Return the set of registers that a function call is allowed to
+ alter completely, so that the registers contain arbitrary values
+ on return. This doesn't include registers that a call can only
+ partly clobber (as per TARGET_HARD_REGNO_CALL_PART_CLOBBERED).
+
+ These registers cannot hold any part of a value that is live across
+ a call. */
+ HARD_REG_SET full_reg_clobbers () const { return m_full_reg_clobbers; }
+
+ /* Return the set of registers that a function call is allowed to alter
+ to some degree. For example, if an ABI says that a call must preserve
+ the low 32 or 64 bits of a register R, but can clobber the upper bits
+ of R, R would be in this set but not in full_reg_clobbers ().
+
+ This set is a superset of full_reg_clobbers (). It is possible for a
+ register in full_and_partial_reg_clobbers () & ~full_reg_clobbers ()
+ to contain values that are live across a call, provided that the live
+ value only occupies the call-preserved part of the register. */
+ HARD_REG_SET
+ full_and_partial_reg_clobbers () const
+ {
+ return m_full_and_partial_reg_clobbers;
+ }
+
+ /* Return the set of registers that cannot be used to hold a value of
+ mode MODE across a function call. That is:
+
+ (reg:REGNO MODE)
+
+ might be clobbered by a call whenever:
+
+ overlaps_hard_reg_set (mode_clobbers (MODE), MODE, REGNO)
+
+ In allocation terms, the registers in the returned set conflict
+ with any value of mode MODE that is live across a call. */
+ HARD_REG_SET
+ mode_clobbers (machine_mode mode) const
+ {
+ return m_mode_clobbers[mode];
+ }
+
+ void initialize (unsigned int, const_hard_reg_set);
+ void add_full_reg_clobber (unsigned int);
+
+private:
+ unsigned int m_id : NUM_ABI_IDS;
+ unsigned int m_initialized : 1;
+ HARD_REG_SET m_full_reg_clobbers;
+ HARD_REG_SET m_full_and_partial_reg_clobbers;
+ HARD_REG_SET m_mode_clobbers[NUM_MACHINE_MODES];
+};
+
+/* Describes either a predefined ABI or the ABI of a particular function.
+ In the latter case, the ABI might make use of extra function-specific
+ information, such as for -fipa-ra. */
+class function_abi
+{
+public:
+ /* Initialize the structure for a general function with the given ABI. */
+ function_abi (const predefined_function_abi &base_abi)
+ : m_base_abi (&base_abi),
+ m_mask (base_abi.full_and_partial_reg_clobbers ()) {}
+
+ /* Initialize the structure for a function that has the given ABI and
+ that is known not to clobber registers outside MASK. */
+ function_abi (const predefined_function_abi &base_abi,
+ const_hard_reg_set mask)
+ : m_base_abi (&base_abi), m_mask (mask) {}
+
+ /* The predefined ABI from which this ABI is derived. */
+ const predefined_function_abi &base_abi () const { return *m_base_abi; }
+
+ /* The target-specific identifier of the predefined ABI. */
+ unsigned int id () const { return m_base_abi->id (); }
+
+ /* See the corresponding predefined_function_abi functions for
+ details about the following functions. */
+
+ HARD_REG_SET
+ full_reg_clobbers () const
+ {
+ return m_mask & m_base_abi->full_reg_clobbers ();
+ }
+
+ HARD_REG_SET
+ full_and_partial_reg_clobbers () const
+ {
+ return m_mask & m_base_abi->full_and_partial_reg_clobbers ();
+ }
+
+ HARD_REG_SET
+ mode_clobbers (machine_mode mode) const
+ {
+ return m_mask & m_base_abi->mode_clobbers (mode);
+ }
+
+ bool
+ clobbers_full_reg_p (unsigned int regno) const
+ {
+ return (TEST_HARD_REG_BIT (m_mask, regno)
+ & m_base_abi->clobbers_full_reg_p (regno));
+ }
+
+ bool
+ clobbers_at_least_part_of_reg_p (unsigned int regno) const
+ {
+ return (TEST_HARD_REG_BIT (m_mask, regno)
+ & m_base_abi->clobbers_at_least_part_of_reg_p (regno));
+ }
+
+ bool
+ clobbers_reg_p (machine_mode mode, unsigned int regno) const
+ {
+ return overlaps_hard_reg_set_p (mode_clobbers (mode), mode, regno);
+ }
+
+ bool
+ operator== (const function_abi &other) const
+ {
+ return m_base_abi == other.m_base_abi && m_mask == other.m_mask;
+ }
+
+ bool
+ operator!= (const function_abi &other) const
+ {
+ return !operator== (other);
+ }
+
+protected:
+ const predefined_function_abi *m_base_abi;
+ HARD_REG_SET m_mask;
+};
+
+/* This class collects information about the ABIs of functions that are
+ called in a particular region of code. It is mostly intended to be
+ used as a local variable during an IR walk. */
+class function_abi_aggregator
+{
+public:
+ function_abi_aggregator () : m_abi_clobbers () {}
+
+ /* Record that the code region calls a function with the given ABI. */
+ void
+ note_callee_abi (const function_abi &abi)
+ {
+ m_abi_clobbers[abi.id ()] |= abi.full_and_partial_reg_clobbers ();
+ }
+
+ HARD_REG_SET caller_save_regs (const function_abi &) const;
+
+private:
+ HARD_REG_SET m_abi_clobbers[NUM_ABI_IDS];
+};
+
+struct target_function_abi_info
+{
+ /* An array of all the target ABIs that are available in this
+ translation unit. Not all entries are used for all targets,
+ but the structures are relatively small, and using a fixed-size
+ array avoids extra indirection.
+
+ There are various ways of getting an ABI descriptor:
+
+ * fndecl_abi (FNDECL) is the ABI of function FNDECL.
+
+ * fntype_abi (FNTYPE) is the ABI of a function with type FNTYPE.
+
+ * crtl->abi is the ABI of the function that we are currently
+ compiling to rtl.
+
+ * insn_callee_abi (INSN) is the ABI used by the target of call insn INSN.
+
+ * eh_edge_abi is the "ABI" used when taking an EH edge from an
+ exception-throwing statement to an exception handler. Catching
+ exceptions from calls can be treated as an abnormal return from
+ those calls, and this ABI therefore describes the ABI of functions
+ on such an abnormal return. Statements that throw non-call
+ exceptions can be treated as being implicitly wrapped in a call
+ that has such an abnormal return.
+
+ At present, no target needs to support more than one EH ABI.
+
+ * function_abis[N] is the ABI with identifier N. This can be useful
+ when referring back to ABIs that have been collected by number in
+ a bitmask, such as after walking function calls in a particular
+ region of code.
+
+ * default_function_abi refers specifically to the target's default
+ choice of ABI, regardless of which (if any) functions actually
+ use it. This ABI and data derived from it do *not* provide
+ globally conservatively-correct information, so it is only
+ useful in very specific circumstances. */
+ predefined_function_abi x_function_abis[NUM_ABI_IDS];
+};
+
+extern target_function_abi_info default_target_function_abi_info;
+#if SWITCHABLE_TARGET
+extern target_function_abi_info *this_target_function_abi_info;
+#else
+#define this_target_function_abi_info (&default_target_function_abi_info)
+#endif
+
+/* See the comment above x_function_abis for when these macros should be used.
+ At present, eh_edge_abi is always the default ABI, but that could change
+ in future if a target needs it to. */
+#define function_abis \
+ (this_target_function_abi_info->x_function_abis)
+#define default_function_abi \
+ (this_target_function_abi_info->x_function_abis[0])
+#define eh_edge_abi default_function_abi
+
+extern HARD_REG_SET call_clobbers_in_region (unsigned int, const_hard_reg_set,
+ machine_mode mode);
+
+/* Return true if (reg:MODE REGNO) might be clobbered by one of the
+ calls in a region described by ABIS and MASK, where:
+
+ * Bit ID of ABIS is set if the region contains a call with
+ function_abi identifier ID.
+
+ * MASK contains all the registers that are fully or partially
+ clobbered by calls in the region.
+
+ This is not quite as accurate as testing each individual call,
+ but it's a close and conservatively-correct approximation.
+ It's much better for some targets than:
+
+ overlaps_hard_reg_set_p (MASK, MODE, REGNO). */
+
+inline bool
+call_clobbered_in_region_p (unsigned int abis, const_hard_reg_set mask,
+ machine_mode mode, unsigned int regno)
+{
+ HARD_REG_SET clobbers = call_clobbers_in_region (abis, mask, mode);
+ return overlaps_hard_reg_set_p (clobbers, mode, regno);
+}
+
+extern const predefined_function_abi &fntype_abi (const_tree);
+extern function_abi fndecl_abi (const_tree);
+extern function_abi insn_callee_abi (const rtx_insn *);
+extern function_abi expr_callee_abi (const_tree);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/function.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/function.h
new file mode 100644
index 0000000..d4ce8a7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/function.h
@@ -0,0 +1,728 @@
+/* Structure for saving state for a nested function.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FUNCTION_H
+#define GCC_FUNCTION_H
+
+
+/* Stack of pending (incomplete) sequences saved by `start_sequence'.
+ Each element describes one pending sequence.
+ The main insn-chain is saved in the last element of the chain,
+ unless the chain is empty. */
+
+struct GTY(()) sequence_stack {
+ /* First and last insns in the chain of the saved sequence. */
+ rtx_insn *first;
+ rtx_insn *last;
+ struct sequence_stack *next;
+};
+
+struct GTY(()) emit_status {
+ void ensure_regno_capacity ();
+
+ /* This is reset to LAST_VIRTUAL_REGISTER + 1 at the start of each function.
+ After rtl generation, it is 1 plus the largest register number used. */
+ int x_reg_rtx_no;
+
+ /* Lowest label number in current function. */
+ int x_first_label_num;
+
+ /* seq.first and seq.last are the ends of the doubly-linked chain of
+ rtl for the current function. Both are reset to null at the
+ start of rtl generation for the function.
+
+ start_sequence saves both of these on seq.next and then starts
+ a new, nested sequence of insns.
+
+ seq.next is a stack of pending (incomplete) sequences saved by
+ start_sequence. Each element describes one pending sequence.
+ The main insn-chain is the last element of the chain. */
+ struct sequence_stack seq;
+
+ /* INSN_UID for next insn emitted.
+ Reset to 1 for each function compiled. */
+ int x_cur_insn_uid;
+
+ /* INSN_UID for next debug insn emitted. Only used if
+ --param min-nondebug-insn-uid=<value> is given with nonzero value. */
+ int x_cur_debug_insn_uid;
+
+ /* The length of the regno_pointer_align, regno_decl, and x_regno_reg_rtx
+ vectors. Since these vectors are needed during the expansion phase when
+ the total number of registers in the function is not yet known, the
+ vectors are copied and made bigger when necessary. */
+ int regno_pointer_align_length;
+
+ /* Indexed by pseudo register number, if nonzero gives the known alignment
+ for that pseudo (if REG_POINTER is set in x_regno_reg_rtx).
+ Allocated in parallel with x_regno_reg_rtx. */
+ unsigned char * GTY((skip)) regno_pointer_align;
+};
+
+
+/* Indexed by register number, gives an rtx for that register (and only
+ that register). For pseudo registers, it is the unique rtx for
+ that pseudo. For hard registers, it is an rtx of the mode specified
+ by reg_raw_mode.
+
+ FIXME: We could put it into emit_status struct, but gengtype is not
+ able to deal with length attribute nested in top level structures. */
+
+extern GTY ((length ("crtl->emit.x_reg_rtx_no"))) rtx * regno_reg_rtx;
+
+/* For backward compatibility... eventually these should all go away. */
+#define reg_rtx_no (crtl->emit.x_reg_rtx_no)
+
+#define REGNO_POINTER_ALIGN(REGNO) (crtl->emit.regno_pointer_align[REGNO])
+
+struct GTY(()) expr_status {
+ /* Number of units that we should eventually pop off the stack.
+ These are the arguments to function calls that have already returned. */
+ poly_int64_pod x_pending_stack_adjust;
+
+ /* Under some ABIs, it is the caller's responsibility to pop arguments
+ pushed for function calls. A naive implementation would simply pop
+ the arguments immediately after each call. However, if several
+ function calls are made in a row, it is typically cheaper to pop
+ all the arguments after all of the calls are complete since a
+ single pop instruction can be used. Therefore, GCC attempts to
+ defer popping the arguments until absolutely necessary. (For
+ example, at the end of a conditional, the arguments must be popped,
+ since code outside the conditional won't know whether or not the
+ arguments need to be popped.)
+
+ When INHIBIT_DEFER_POP is nonzero, however, the compiler does not
+ attempt to defer pops. Instead, the stack is popped immediately
+ after each call. Rather then setting this variable directly, use
+ NO_DEFER_POP and OK_DEFER_POP. */
+ int x_inhibit_defer_pop;
+
+ /* If PREFERRED_STACK_BOUNDARY and PUSH_ROUNDING are defined, the stack
+ boundary can be momentarily unaligned while pushing the arguments.
+ Record the delta since last aligned boundary here in order to get
+ stack alignment in the nested function calls working right. */
+ poly_int64_pod x_stack_pointer_delta;
+
+ /* Nonzero means __builtin_saveregs has already been done in this function.
+ The value is the pseudoreg containing the value __builtin_saveregs
+ returned. */
+ rtx x_saveregs_value;
+
+ /* Similarly for __builtin_apply_args. */
+ rtx x_apply_args_value;
+
+ /* List of labels that must never be deleted. */
+ vec<rtx_insn *, va_gc> *x_forced_labels;
+};
+
+typedef struct call_site_record_d *call_site_record;
+
+/* RTL representation of exception handling. */
+struct GTY(()) rtl_eh {
+ rtx ehr_stackadj;
+ rtx ehr_handler;
+ rtx_code_label *ehr_label;
+
+ rtx sjlj_fc;
+ rtx_insn *sjlj_exit_after;
+
+ vec<uchar, va_gc> *action_record_data;
+
+ vec<call_site_record, va_gc> *call_site_record_v[2];
+};
+
+#define pending_stack_adjust (crtl->expr.x_pending_stack_adjust)
+#define inhibit_defer_pop (crtl->expr.x_inhibit_defer_pop)
+#define saveregs_value (crtl->expr.x_saveregs_value)
+#define apply_args_value (crtl->expr.x_apply_args_value)
+#define forced_labels (crtl->expr.x_forced_labels)
+#define stack_pointer_delta (crtl->expr.x_stack_pointer_delta)
+
+struct gimple_df;
+struct call_site_record_d;
+struct dw_fde_node;
+class range_query;
+
+struct GTY(()) varasm_status {
+ /* If we're using a per-function constant pool, this is it. */
+ struct rtx_constant_pool *pool;
+
+ /* Number of tree-constants deferred during the expansion of this
+ function. */
+ unsigned int deferred_constants;
+};
+
+
+/* Data for function partitioning. */
+struct GTY(()) function_subsections {
+ /* Assembly labels for the hot and cold text sections, to
+ be used by debugger functions for determining the size of text
+ sections. */
+
+ const char *hot_section_label;
+ const char *cold_section_label;
+ const char *hot_section_end_label;
+ const char *cold_section_end_label;
+};
+
+/* Describe an empty area of space in the stack frame. These can be chained
+ into a list; this is used to keep track of space wasted for alignment
+ reasons. */
+class GTY(()) frame_space
+{
+public:
+ class frame_space *next;
+
+ poly_int64 start;
+ poly_int64 length;
+};
+
+/* Describe emitted calls for -fcallgraph-info. */
+struct GTY(()) callinfo_callee
+{
+ location_t location;
+ tree decl;
+};
+
+/* Describe dynamic allocation for -fcallgraph-info=da. */
+struct GTY(()) callinfo_dalloc
+{
+ location_t location;
+ char const *name;
+};
+
+class GTY(()) stack_usage
+{
+public:
+ /* # of bytes of static stack space allocated by the function. */
+ HOST_WIDE_INT static_stack_size;
+
+ /* # of bytes of dynamic stack space allocated by the function. This is
+ meaningful only if has_unbounded_dynamic_stack_size is zero. */
+ HOST_WIDE_INT dynamic_stack_size;
+
+ /* Upper bound on the number of bytes pushed onto the stack after the
+ prologue. If !ACCUMULATE_OUTGOING_ARGS, it contains the outgoing
+ arguments. */
+ poly_int64 pushed_stack_size;
+
+ /* Nonzero if the amount of stack space allocated dynamically cannot
+ be bounded at compile-time. */
+ unsigned int has_unbounded_dynamic_stack_size : 1;
+
+ /* Functions called within the function, if callgraph is enabled. */
+ vec<callinfo_callee, va_gc> *callees;
+
+ /* Dynamic allocations encountered within the function, if callgraph
+ da is enabled. */
+ vec<callinfo_dalloc, va_gc> *dallocs;
+};
+
+#define current_function_static_stack_size (cfun->su->static_stack_size)
+#define current_function_dynamic_stack_size (cfun->su->dynamic_stack_size)
+#define current_function_pushed_stack_size (cfun->su->pushed_stack_size)
+#define current_function_has_unbounded_dynamic_stack_size \
+ (cfun->su->has_unbounded_dynamic_stack_size)
+#define current_function_allocates_dynamic_stack_space \
+ (current_function_dynamic_stack_size != 0 \
+ || current_function_has_unbounded_dynamic_stack_size)
+
+/* This structure can save all the important global and static variables
+ describing the status of the current function. */
+
+struct GTY(()) function {
+ struct eh_status *eh;
+
+ /* The control flow graph for this function. */
+ struct control_flow_graph *cfg;
+
+ /* GIMPLE body for this function. */
+ gimple_seq gimple_body;
+
+ /* SSA and dataflow information. */
+ struct gimple_df *gimple_df;
+
+ /* The loops in this function. */
+ struct loops *x_current_loops;
+
+ /* Filled by the GIMPLE and RTL FEs, pass to start compilation with. */
+ char *pass_startwith;
+
+ /* The stack usage of this function. */
+ class stack_usage *su;
+
+ /* Value histograms attached to particular statements. */
+ htab_t GTY((skip)) value_histograms;
+
+ /* For function.cc. */
+
+ /* Points to the FUNCTION_DECL of this function. */
+ tree decl;
+
+ /* A PARM_DECL that should contain the static chain for this function.
+ It will be initialized at the beginning of the function. */
+ tree static_chain_decl;
+
+ /* An expression that contains the non-local goto save area. The first
+ word is the saved frame pointer and the second is the saved stack
+ pointer. */
+ tree nonlocal_goto_save_area;
+
+ /* Vector of function local variables, functions, types and constants. */
+ vec<tree, va_gc> *local_decls;
+
+ /* For md files. */
+
+ /* tm.h can use this to store whatever it likes. */
+ struct machine_function * GTY ((maybe_undef)) machine;
+
+ /* Language-specific code can use this to store whatever it likes. */
+ struct language_function * language;
+
+ /* Used types hash table. */
+ hash_set<tree> *GTY (()) used_types_hash;
+
+ /* Dwarf2 Frame Description Entry, containing the Call Frame Instructions
+ used for unwinding. Only set when either dwarf2 unwinding or dwarf2
+ debugging is enabled. */
+ struct dw_fde_node *fde;
+
+ /* Range query mechanism for functions. The default is to pick up
+ global ranges. If a pass wants on-demand ranges OTOH, it must
+ call enable/disable_ranger(). The pointer is never null. It
+ should be queried by calling get_range_query(). */
+ range_query * GTY ((skip)) x_range_query;
+
+ /* Last statement uid. */
+ int last_stmt_uid;
+
+ /* Debug marker counter. Count begin stmt markers. We don't have
+ to keep it exact, it's more of a rough estimate to enable us to
+ decide whether they are too many to copy during inlining, or when
+ expanding to RTL. */
+ int debug_marker_count;
+
+ /* Function sequence number for profiling, debugging, etc. */
+ int funcdef_no;
+
+ /* Line number of the start of the function for debugging purposes. */
+ location_t function_start_locus;
+
+ /* Line number of the end of the function. */
+ location_t function_end_locus;
+
+ /* Properties used by the pass manager. */
+ unsigned int curr_properties;
+ unsigned int last_verified;
+
+ /* Different from normal TODO_flags which are handled right at the
+ beginning or the end of one pass execution, the pending_TODOs
+ are passed down in the pipeline until one of its consumers can
+ perform the requested action. Consumers should then clear the
+ flags for the actions that they have taken. */
+ unsigned int pending_TODOs;
+
+ /* Non-null if the function does something that would prevent it from
+ being copied; this applies to both versioning and inlining. Set to
+ a string describing the reason for failure. */
+ const char * GTY((skip)) cannot_be_copied_reason;
+
+ /* Last assigned dependence info clique. */
+ unsigned short last_clique;
+
+ /* Collected bit flags. */
+
+ /* Number of units of general registers that need saving in stdarg
+ function. What unit is depends on the backend, either it is number
+ of bytes, or it can be number of registers. */
+ unsigned int va_list_gpr_size : 8;
+
+ /* Number of units of floating point registers that need saving in stdarg
+ function. */
+ unsigned int va_list_fpr_size : 8;
+
+ /* Nonzero if function being compiled can call setjmp. */
+ unsigned int calls_setjmp : 1;
+
+ /* Nonzero if function being compiled can call alloca,
+ either as a subroutine or builtin. */
+ unsigned int calls_alloca : 1;
+
+ /* Nonzero if function being compiled can call __builtin_eh_return. */
+ unsigned int calls_eh_return : 1;
+
+ /* Nonzero if function being compiled receives nonlocal gotos
+ from nested functions. */
+ unsigned int has_nonlocal_label : 1;
+
+ /* Nonzero if function being compiled has a forced label
+ placed into static storage. */
+ unsigned int has_forced_label_in_static : 1;
+
+ /* Nonzero if we've set cannot_be_copied_reason. I.e. if
+ (cannot_be_copied_set && !cannot_be_copied_reason), the function
+ can in fact be copied. */
+ unsigned int cannot_be_copied_set : 1;
+
+ /* Nonzero if current function uses stdarg.h or equivalent. */
+ unsigned int stdarg : 1;
+
+ unsigned int after_inlining : 1;
+ unsigned int always_inline_functions_inlined : 1;
+
+ /* Nonzero if function being compiled can throw synchronous non-call
+ exceptions. */
+ unsigned int can_throw_non_call_exceptions : 1;
+
+ /* Nonzero if instructions that may throw exceptions but don't otherwise
+ contribute to the execution of the program can be deleted. */
+ unsigned int can_delete_dead_exceptions : 1;
+
+ /* Fields below this point are not set for abstract functions; see
+ allocate_struct_function. */
+
+ /* Nonzero if function being compiled needs to be given an address
+ where the value should be stored. */
+ unsigned int returns_struct : 1;
+
+ /* Nonzero if function being compiled needs to
+ return the address of where it has put a structure value. */
+ unsigned int returns_pcc_struct : 1;
+
+ /* Nonzero if this function has local DECL_HARD_REGISTER variables.
+ In this case code motion has to be done more carefully. */
+ unsigned int has_local_explicit_reg_vars : 1;
+
+ /* Nonzero if the current function is a thunk, i.e., a lightweight
+ function implemented by the output_mi_thunk hook) that just
+ adjusts one of its arguments and forwards to another
+ function. */
+ unsigned int is_thunk : 1;
+
+ /* Nonzero if the current function contains any loops with
+ loop->force_vectorize set. */
+ unsigned int has_force_vectorize_loops : 1;
+
+ /* Nonzero if the current function contains any loops with
+ nonzero value in loop->simduid. */
+ unsigned int has_simduid_loops : 1;
+
+ /* Nonzero when the tail call has been identified. */
+ unsigned int tail_call_marked : 1;
+
+ /* Nonzero if the current function contains a #pragma GCC unroll. */
+ unsigned int has_unroll : 1;
+
+ /* Set when the function was compiled with generation of debug
+ (begin stmt, inline entry, ...) markers enabled. */
+ unsigned int debug_nonbind_markers : 1;
+
+ /* Set if this is a coroutine-related function. */
+ unsigned int coroutine_component : 1;
+
+ /* Set if there are any OMP_TARGET regions in the function. */
+ unsigned int has_omp_target : 1;
+
+ /* Set for artificial function created for [[assume (cond)]].
+ These should be GIMPLE optimized, but not expanded to RTL. */
+ unsigned int assume_function : 1;
+};
+
+/* Add the decl D to the local_decls list of FUN. */
+
+void add_local_decl (struct function *fun, tree d);
+
+#define FOR_EACH_LOCAL_DECL(FUN, I, D) \
+ FOR_EACH_VEC_SAFE_ELT_REVERSE ((FUN)->local_decls, I, D)
+
+/* Record a final call to CALLEE at LOCATION. */
+void record_final_call (tree callee, location_t location);
+
+/* Record a dynamic allocation made for DECL_OR_EXP. */
+void record_dynamic_alloc (tree decl_or_exp);
+
+/* If va_list_[gf]pr_size is set to this, it means we don't know how
+ many units need to be saved. */
+#define VA_LIST_MAX_GPR_SIZE 255
+#define VA_LIST_MAX_FPR_SIZE 255
+
+/* The function currently being compiled. */
+extern GTY(()) struct function *cfun;
+
+/* In order to ensure that cfun is not set directly, we redefine it so
+ that it is not an lvalue. Rather than assign to cfun, use
+ push_cfun or set_cfun. */
+#define cfun (cfun + 0)
+
+/* Nonzero if we've already converted virtual regs to hard regs. */
+extern int virtuals_instantiated;
+
+/* Nonzero if at least one trampoline has been created. */
+extern int trampolines_created;
+
+struct GTY((for_user)) types_used_by_vars_entry {
+ tree type;
+ tree var_decl;
+};
+
+struct used_type_hasher : ggc_ptr_hash<types_used_by_vars_entry>
+{
+ static hashval_t hash (types_used_by_vars_entry *);
+ static bool equal (types_used_by_vars_entry *, types_used_by_vars_entry *);
+};
+
+/* Hash table making the relationship between a global variable
+ and the types it references in its initializer. The key of the
+ entry is a referenced type, and the value is the DECL of the global
+ variable. types_use_by_vars_do_hash and types_used_by_vars_eq below are
+ the hash and equality functions to use for this hash table. */
+extern GTY(()) hash_table<used_type_hasher> *types_used_by_vars_hash;
+
+void types_used_by_var_decl_insert (tree type, tree var_decl);
+
+/* During parsing of a global variable, this vector contains the types
+ referenced by the global variable. */
+extern GTY(()) vec<tree, va_gc> *types_used_by_cur_var_decl;
+
+
+/* Return the loop tree of FN. */
+
+inline struct loops *
+loops_for_fn (struct function *fn)
+{
+ return fn->x_current_loops;
+}
+
+/* Set the loop tree of FN to LOOPS. */
+
+inline void
+set_loops_for_fn (struct function *fn, struct loops *loops)
+{
+ gcc_checking_assert (fn->x_current_loops == NULL || loops == NULL);
+ fn->x_current_loops = loops;
+}
+
+/* For backward compatibility... eventually these should all go away. */
+#define current_function_funcdef_no (cfun->funcdef_no)
+
+#define current_loops (cfun->x_current_loops)
+#define dom_computed (cfun->cfg->x_dom_computed)
+#define n_bbs_in_dom_tree (cfun->cfg->x_n_bbs_in_dom_tree)
+#define VALUE_HISTOGRAMS(fun) (fun)->value_histograms
+
+/* A pointer to a function to create target specific, per-function
+ data structures. */
+extern struct machine_function * (*init_machine_status) (void);
+
+/* Structure to record the size of a sequence of arguments
+ as the sum of a tree-expression and a constant. This structure is
+ also used to store offsets from the stack, which might be negative,
+ so the variable part must be ssizetype, not sizetype. */
+
+struct args_size
+{
+ poly_int64_pod constant;
+ tree var;
+};
+
+/* Package up various arg related fields of struct args for
+ locate_and_pad_parm. */
+struct locate_and_pad_arg_data
+{
+ /* Size of this argument on the stack, rounded up for any padding it
+ gets. If REG_PARM_STACK_SPACE is defined, then register parms are
+ counted here, otherwise they aren't. */
+ struct args_size size;
+ /* Offset of this argument from beginning of stack-args. */
+ struct args_size offset;
+ /* Offset to the start of the stack slot. Different from OFFSET
+ if this arg pads downward. */
+ struct args_size slot_offset;
+ /* The amount that the stack pointer needs to be adjusted to
+ force alignment for the next argument. */
+ struct args_size alignment_pad;
+ /* Which way we should pad this arg. */
+ pad_direction where_pad;
+ /* slot_offset is at least this aligned. */
+ unsigned int boundary;
+};
+
+/* Add the value of the tree INC to the `struct args_size' TO. */
+
+#define ADD_PARM_SIZE(TO, INC) \
+do { \
+ tree inc = (INC); \
+ if (tree_fits_shwi_p (inc)) \
+ (TO).constant += tree_to_shwi (inc); \
+ else if ((TO).var == 0) \
+ (TO).var = fold_convert (ssizetype, inc); \
+ else \
+ (TO).var = size_binop (PLUS_EXPR, (TO).var, \
+ fold_convert (ssizetype, inc)); \
+} while (0)
+
+#define SUB_PARM_SIZE(TO, DEC) \
+do { \
+ tree dec = (DEC); \
+ if (tree_fits_shwi_p (dec)) \
+ (TO).constant -= tree_to_shwi (dec); \
+ else if ((TO).var == 0) \
+ (TO).var = size_binop (MINUS_EXPR, ssize_int (0), \
+ fold_convert (ssizetype, dec)); \
+ else \
+ (TO).var = size_binop (MINUS_EXPR, (TO).var, \
+ fold_convert (ssizetype, dec)); \
+} while (0)
+
+/* Convert the implicit sum in a `struct args_size' into a tree
+ of type ssizetype. */
+#define ARGS_SIZE_TREE(SIZE) \
+((SIZE).var == 0 ? ssize_int ((SIZE).constant) \
+ : size_binop (PLUS_EXPR, fold_convert (ssizetype, (SIZE).var), \
+ ssize_int ((SIZE).constant)))
+
+/* Convert the implicit sum in a `struct args_size' into an rtx. */
+#define ARGS_SIZE_RTX(SIZE) \
+((SIZE).var == 0 ? gen_int_mode ((SIZE).constant, Pmode) \
+ : expand_normal (ARGS_SIZE_TREE (SIZE)))
+
+#define ASLK_REDUCE_ALIGN 1
+#define ASLK_RECORD_PAD 2
+
+/* If pointers to member functions use the least significant bit to
+ indicate whether a function is virtual, ensure a pointer
+ to this function will have that bit clear. */
+#define MINIMUM_METHOD_BOUNDARY \
+ ((TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_pfn) \
+ ? MAX (FUNCTION_BOUNDARY, 2 * BITS_PER_UNIT) : FUNCTION_BOUNDARY)
+
+enum stack_clash_probes {
+ NO_PROBE_NO_FRAME,
+ NO_PROBE_SMALL_FRAME,
+ PROBE_INLINE,
+ PROBE_LOOP
+};
+
+extern void dump_stack_clash_frame_info (enum stack_clash_probes, bool);
+
+
+extern void push_function_context (void);
+extern void pop_function_context (void);
+
+/* Save and restore status information for a nested function. */
+extern void free_after_parsing (struct function *);
+extern void free_after_compilation (struct function *);
+
+/* Return size needed for stack frame based on slots so far allocated.
+ This size counts from zero. It is not rounded to STACK_BOUNDARY;
+ the caller may have to do that. */
+extern poly_int64 get_frame_size (void);
+
+/* Issue an error message and return TRUE if frame OFFSET overflows in
+ the signed target pointer arithmetics for function FUNC. Otherwise
+ return FALSE. */
+extern bool frame_offset_overflow (poly_int64, tree);
+
+extern unsigned int spill_slot_alignment (machine_mode);
+
+extern rtx assign_stack_local_1 (machine_mode, poly_int64, int, int);
+extern rtx assign_stack_local (machine_mode, poly_int64, int);
+extern rtx assign_stack_temp_for_type (machine_mode, poly_int64, tree);
+extern rtx assign_stack_temp (machine_mode, poly_int64);
+extern rtx assign_temp (tree, int, int);
+extern void update_temp_slot_address (rtx, rtx);
+extern void preserve_temp_slots (rtx);
+extern void free_temp_slots (void);
+extern void push_temp_slots (void);
+extern void pop_temp_slots (void);
+extern void init_temp_slots (void);
+extern rtx get_hard_reg_initial_reg (rtx);
+extern rtx get_hard_reg_initial_val (machine_mode, unsigned int);
+extern rtx has_hard_reg_initial_val (machine_mode, unsigned int);
+
+/* Called from gimple_expand_cfg. */
+extern unsigned int emit_initial_value_sets (void);
+
+extern bool initial_value_entry (int i, rtx *, rtx *);
+extern void instantiate_decl_rtl (rtx x);
+extern int aggregate_value_p (const_tree, const_tree);
+extern bool use_register_for_decl (const_tree);
+extern gimple_seq gimplify_parameters (gimple_seq *);
+extern void locate_and_pad_parm (machine_mode, tree, int, int, int,
+ tree, struct args_size *,
+ struct locate_and_pad_arg_data *);
+extern void generate_setjmp_warnings (void);
+
+/* Identify BLOCKs referenced by more than one NOTE_INSN_BLOCK_{BEG,END},
+ and create duplicate blocks. */
+extern void reorder_blocks (void);
+extern void clear_block_marks (tree);
+extern tree blocks_nreverse (tree);
+extern tree block_chainon (tree, tree);
+
+/* Set BLOCK_NUMBER for all the blocks in FN. */
+extern void number_blocks (tree);
+
+/* cfun shouldn't be set directly; use one of these functions instead. */
+extern void set_cfun (struct function *new_cfun, bool force = false);
+extern void push_cfun (struct function *new_cfun);
+extern void pop_cfun (void);
+
+extern int get_next_funcdef_no (void);
+extern int get_last_funcdef_no (void);
+extern void allocate_struct_function (tree, bool);
+extern void push_struct_function (tree fndecl, bool = false);
+extern void push_dummy_function (bool);
+extern void pop_dummy_function (void);
+extern void init_dummy_function_start (void);
+extern void init_function_start (tree);
+extern void stack_protect_epilogue (void);
+extern void expand_function_start (tree);
+extern void expand_dummy_function_end (void);
+
+extern void thread_prologue_and_epilogue_insns (void);
+extern void diddle_return_value (void (*)(rtx, void*), void*);
+extern void clobber_return_register (void);
+extern void expand_function_end (void);
+extern rtx get_arg_pointer_save_area (void);
+extern void maybe_copy_prologue_epilogue_insn (rtx, rtx);
+extern int prologue_contains (const rtx_insn *);
+extern int epilogue_contains (const rtx_insn *);
+extern int prologue_epilogue_contains (const rtx_insn *);
+extern void record_prologue_seq (rtx_insn *);
+extern void record_epilogue_seq (rtx_insn *);
+extern void emit_return_into_block (bool simple_p, basic_block bb);
+extern void set_return_jump_label (rtx_insn *);
+extern bool active_insn_between (rtx_insn *head, rtx_insn *tail);
+extern vec<edge> convert_jumps_to_returns (basic_block last_bb, bool simple_p,
+ vec<edge> unconverted);
+extern basic_block emit_return_for_exit (edge exit_fallthru_edge,
+ bool simple_p);
+extern void reposition_prologue_and_epilogue_notes (void);
+
+/* Returns the name of the current function. */
+extern const char *fndecl_name (tree);
+extern const char *function_name (struct function *);
+extern const char *current_function_name (void);
+
+extern void used_types_insert (tree);
+
+extern bool currently_expanding_function_start;
+
+#endif /* GCC_FUNCTION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-plugin.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-plugin.h
new file mode 100644
index 0000000..730aae7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-plugin.h
@@ -0,0 +1,47 @@
+/* Public header file for plugins to include.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PLUGIN_H
+#define GCC_PLUGIN_H
+
+#ifndef IN_GCC
+#define IN_GCC
+#endif
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "cfghooks.h"
+#include "hard-reg-set.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "lcm.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
+#include "plugin-api.h"
+#include "ipa-ref.h"
+#include "alias.h"
+#include "flags.h"
+#include "tree-core.h"
+#include "fold-const.h"
+#include "tree-check.h"
+#include "plugin.h"
+
+#endif /* GCC_PLUGIN_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-rich-location.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-rich-location.h
new file mode 100644
index 0000000..ffba4b8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-rich-location.h
@@ -0,0 +1,226 @@
+/* Declarations relating to class gcc_rich_location
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RICH_LOCATION_H
+#define GCC_RICH_LOCATION_H
+
+/* A gcc_rich_location is libcpp's rich_location with additional
+ helper methods for working with gcc's types. The class is not
+ copyable or assignable because rich_location isn't. */
+
+class gcc_rich_location : public rich_location
+{
+ public:
+ /* Constructors. */
+
+ /* Constructing from a location. */
+ explicit gcc_rich_location (location_t loc, const range_label *label = NULL)
+ : rich_location (line_table, loc, label)
+ {
+ }
+
+ /* Methods for adding ranges via gcc entities. */
+ void
+ add_expr (tree expr, range_label *label);
+
+ void
+ maybe_add_expr (tree t, range_label *label);
+
+ void add_fixit_misspelled_id (location_t misspelled_token_loc,
+ tree hint_id);
+
+ /* If LOC is within the spans of lines that will already be printed for
+ this gcc_rich_location, then add it as a secondary location
+ and return true.
+
+ Otherwise return false.
+
+ This allows for a diagnostic to compactly print secondary locations
+ in one diagnostic when these are near enough the primary locations for
+ diagnostics-show-locus.c to cope with them, and to fall back to
+ printing them via a note otherwise e.g.:
+
+ gcc_rich_location richloc (primary_loc);
+ bool added secondary = richloc.add_location_if_nearby (secondary_loc);
+ error_at (&richloc, "main message");
+ if (!added secondary)
+ inform (secondary_loc, "message for secondary");
+
+ Implemented in diagnostic-show-locus.cc. */
+
+ bool add_location_if_nearby (location_t loc,
+ bool restrict_to_current_line_spans = true,
+ const range_label *label = NULL);
+
+ /* Add a fix-it hint suggesting the insertion of CONTENT before
+ INSERTION_POINT.
+
+ Attempt to handle formatting: if INSERTION_POINT is the first thing on
+ its line, and INDENT is sufficiently sane, then add CONTENT on its own
+ line, using the indentation of INDENT.
+ Otherwise, add CONTENT directly before INSERTION_POINT.
+
+ For example, adding "CONTENT;" with the closing brace as the insertion
+ point and using "INDENT;" for indentation:
+
+ if ()
+ {
+ INDENT;
+ }
+
+ would lead to:
+
+ if ()
+ {
+ INDENT;
+ CONTENT;
+ }
+
+ but adding it to:
+
+ if () {INDENT;}
+
+ would lead to:
+
+ if () {INDENT;CONTENT;}
+ */
+ void add_fixit_insert_formatted (const char *content,
+ location_t insertion_point,
+ location_t indent);
+};
+
+/* Concrete subclass of libcpp's range_label.
+ Simple implementation using a string literal. */
+
+class text_range_label : public range_label
+{
+ public:
+ text_range_label (const char *text) : m_text (text) {}
+
+ label_text get_text (unsigned /*range_idx*/) const final override
+ {
+ return label_text::borrow (m_text);
+ }
+
+ private:
+ const char *m_text;
+};
+
+/* Concrete subclass of libcpp's range_label for use in
+ diagnostics involving mismatched types.
+
+ Each frontend that uses this should supply its own implementation.
+
+ Generate a label describing LABELLED_TYPE. The frontend may use
+ OTHER_TYPE where appropriate for highlighting the differences between
+ the two types (analogous to C++'s use of %H and %I with
+ template types).
+
+ Either or both of LABELLED_TYPE and OTHER_TYPE may be NULL_TREE.
+ If LABELLED_TYPE is NULL_TREE, then there is no label.
+
+ For example, this rich_location could use two instances of
+ range_label_for_type_mismatch:
+
+ printf ("arg0: %i arg1: %s arg2: %i",
+ ^~
+ |
+ const char *
+ 100, 101, 102);
+ ~~~
+ |
+ int
+
+ (a) the label for "%s" with LABELLED_TYPE for "const char*" and
+ (b) the label for "101" with LABELLED TYPE for "int"
+ where each one uses the other's type as OTHER_TYPE. */
+
+class range_label_for_type_mismatch : public range_label
+{
+ public:
+ range_label_for_type_mismatch (tree labelled_type, tree other_type)
+ : m_labelled_type (labelled_type), m_other_type (other_type)
+ {
+ }
+
+ label_text get_text (unsigned range_idx) const override;
+
+ protected:
+ tree m_labelled_type;
+ tree m_other_type;
+};
+
+/* Subclass of range_label for labelling the type of EXPR when reporting
+ a type mismatch between EXPR and OTHER_EXPR.
+ Either or both of EXPR and OTHER_EXPR could be NULL. */
+
+class maybe_range_label_for_tree_type_mismatch : public range_label
+{
+ public:
+ maybe_range_label_for_tree_type_mismatch (tree expr, tree other_expr)
+ : m_expr (expr), m_other_expr (other_expr)
+ {
+ }
+
+ label_text get_text (unsigned range_idx) const final override;
+
+ private:
+ tree m_expr;
+ tree m_other_expr;
+};
+
+class op_location_t;
+
+/* A subclass of rich_location for showing problems with binary operations.
+
+ If enough location information is available, the ctor will make a
+ 3-location rich_location of the form:
+
+ arg_0 op arg_1
+ ~~~~~ ^~ ~~~~~
+ | |
+ | arg1 type
+ arg0 type
+
+ labelling the types of the arguments if SHOW_TYPES is true.
+
+ Otherwise, it will fall back to a 1-location rich_location using the
+ compound location within LOC:
+
+ arg_0 op arg_1
+ ~~~~~~^~~~~~~~
+
+ for which we can't label the types. */
+
+class binary_op_rich_location : public gcc_rich_location
+{
+ public:
+ binary_op_rich_location (const op_location_t &loc,
+ tree arg0, tree arg1,
+ bool show_types);
+
+ private:
+ static bool use_operator_loc_p (const op_location_t &loc,
+ tree arg0, tree arg1);
+
+ maybe_range_label_for_tree_type_mismatch m_label_for_arg0;
+ maybe_range_label_for_tree_type_mismatch m_label_for_arg1;
+};
+
+#endif /* GCC_RICH_LOCATION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-symtab.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-symtab.h
new file mode 100644
index 0000000..3602ad6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc-symtab.h
@@ -0,0 +1,28 @@
+/* Declarations for symtab.cc.
+ FIXME - This file should be named symtab.h, but that name conflicts
+ with libcpp's symtab.h.
+
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SYMTAB_H
+#define GCC_SYMTAB_H
+
+extern void change_decl_assembler_name (tree, tree);
+
+#endif // GCC_SYMTAB_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc.h
new file mode 100644
index 0000000..19a61b3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcc.h
@@ -0,0 +1,100 @@
+/* Header file for modules that link with gcc.cc
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GCC_H
+#define GCC_GCC_H
+
+#include "version.h"
+#include "diagnostic-core.h"
+
+/* The top-level "main" within the driver would be ~1000 lines long.
+ This class breaks it up into smaller functions and contains some
+ state shared by them. */
+
+class driver
+{
+ public:
+ driver (bool can_finalize, bool debug);
+ ~driver ();
+ int main (int argc, char **argv);
+ void finalize ();
+
+ private:
+ void set_progname (const char *argv0) const;
+ void expand_at_files (int *argc, char ***argv) const;
+ void decode_argv (int argc, const char **argv);
+ void global_initializations ();
+ void build_multilib_strings () const;
+ void set_up_specs () const;
+ void putenv_COLLECT_GCC (const char *argv0) const;
+ void maybe_putenv_COLLECT_LTO_WRAPPER () const;
+ void maybe_putenv_OFFLOAD_TARGETS () const;
+ void handle_unrecognized_options ();
+ int maybe_print_and_exit () const;
+ bool prepare_infiles ();
+ void do_spec_on_infiles () const;
+ void maybe_run_linker (const char *argv0) const;
+ void final_actions () const;
+ void detect_jobserver () const;
+ int get_exit_code () const;
+
+ private:
+ char *explicit_link_files;
+ struct cl_decoded_option *decoded_options;
+ unsigned int decoded_options_count;
+ option_proposer m_option_proposer;
+};
+
+/* The mapping of a spec function name to the C function that
+ implements it. */
+struct spec_function
+{
+ const char *name;
+ const char *(*func) (int, const char **);
+};
+
+/* These are exported by gcc.cc. */
+extern int do_spec (const char *);
+extern void record_temp_file (const char *, int, int);
+extern void set_input (const char *);
+
+/* Spec files linked with gcc.cc must provide definitions for these. */
+
+/* Called before processing to change/add/remove arguments. */
+extern void lang_specific_driver (struct cl_decoded_option **,
+ unsigned int *, int *);
+
+/* Called before linking. Returns 0 on success and -1 on failure. */
+extern int lang_specific_pre_link (void);
+
+extern int n_infiles;
+
+/* Number of extra output files that lang_specific_pre_link may generate. */
+extern int lang_specific_extra_outfiles;
+
+/* A vector of corresponding output files is made up later. */
+
+extern const char **outfiles;
+
+extern void
+driver_get_configure_time_options (void (*cb)(const char *option,
+ void *user_data),
+ void *user_data);
+
+#endif /* ! GCC_GCC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-counter.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-counter.def
new file mode 100644
index 0000000..727ef42
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-counter.def
@@ -0,0 +1,51 @@
+/* Definitions for the gcov counters in the GNU compiler.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, define a macro:
+
+ DEF_GCOV_COUNTER(COUNTER, NAME, FN_TYPE)
+
+ This macro will be expanded to all supported gcov counters, their
+ names, or the type of handler functions. FN_TYPE will be
+ expanded to a handler function, like in gcov_merge, it is
+ expanded to __gcov_merge ## FN_TYPE. */
+
+/* Arc transitions. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_ARCS, "arcs", _add)
+
+/* Histogram of value inside an interval. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_V_INTERVAL, "interval", _add)
+
+/* Histogram of exact power2 logarithm of a value. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_V_POW2, "pow2", _add)
+
+/* The most common value of expression. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_V_TOPN, "topn", _topn)
+
+/* The most common indirect address. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_V_INDIR, "indirect_call", _topn)
+
+/* Compute average value passed to the counter. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_AVERAGE, "average", _add)
+
+/* IOR of the all values passed to counter. */
+DEF_GCOV_COUNTER(GCOV_COUNTER_IOR, "ior", _ior)
+
+/* Time profile collecting first run of a function */
+DEF_GCOV_COUNTER(GCOV_TIME_PROFILER, "time_profiler", _time_profile)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-io.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-io.h
new file mode 100644
index 0000000..bfe4439
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcov-io.h
@@ -0,0 +1,394 @@
+/* File format for coverage information
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+ Contributed by Bob Manson <manson@cygnus.com>.
+ Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* CAVEAT: Coverage information files should not be parsed directly,
+ instead use `gcov --json-format`, which provides
+ machine-readable coverage information.
+
+ Note that the following file format documentation might be outdated.
+
+ Coverage information is held in two files. A notes file, which is
+ generated by the compiler, and a data file, which is generated by
+ the program under test. Both files use a similar structure. We do
+ not attempt to make these files backwards compatible with previous
+ versions, as you only need coverage information when developing a
+ program. We do hold version information, so that mismatches can be
+ detected, and we use a format that allows tools to skip information
+ they do not understand or are not interested in.
+
+ Numbers are recorded in the 32 bit unsigned binary form of the
+ endianness of the machine generating the file. 64 bit numbers are
+ stored as two 32 bit numbers, the low part first.
+ The number of bytes is stored, followed by the
+ string. Zero length and NULL strings are simply stored as a length
+ of zero (they have no trailing NUL).
+
+ int32: byte3 byte2 byte1 byte0 | byte0 byte1 byte2 byte3
+ int64: int32:low int32:high
+ string: int32:0 | int32:length char* char:0
+ item: int32 | int64 | string
+
+ The basic format of the notes file is
+
+ file : int32:magic int32:version int32:stamp int32:support_unexecuted_blocks record*
+
+ The basic format of the data file is
+
+ file : int32:magic int32:version int32:stamp record*
+
+ A filename header may be used to provide a filename for the data in
+ a stream of data to support gcov in freestanding environments. This
+ header is used by the merge-stream subcommand of the gcov-tool. The
+ format of the filename header is
+
+ filename-header : int32:magic int32:version string
+
+ The magic ident is different for the notes and the data files as
+ well as the filename header. The magic ident is used to determine
+ the endianness of the file, when reading. The version is the same
+ for both files and is derived from gcc's version number. The stamp
+ value is used to synchronize note and data files and to synchronize
+ merging within a data file. It need not be an absolute time stamp,
+ merely a ticker that increments fast enough and cycles slow enough
+ to distinguish different compile/run/compile cycles.
+
+ Although the ident and version are formally 32 bit numbers, they
+ are derived from 4 character ASCII strings. The version number
+ consists of a two character major version number
+ (first digit starts from 'A' letter to not to clash with the older
+ numbering scheme), the single character minor version number,
+ and a single character indicating the status of the release.
+ That will be 'e' experimental, 'p' prerelease and 'r' for release.
+ Because, by good fortune, these are in alphabetical order, string
+ collating can be used to compare version strings. Be aware that
+ the 'e' designation will (naturally) be unstable and might be
+ incompatible with itself. For gcc 17.0 experimental, it would be
+ 'B70e' (0x42373065). As we currently do not release more than 5 minor
+ releases, the single character should be always fine. Major number
+ is currently changed roughly every year, which gives us space
+ for next 250 years (maximum allowed number would be 259.9).
+
+ A record has a tag, length and variable amount of data.
+
+ record: header data
+ header: int32:tag int32:length
+ data: item*
+
+ Records are not nested, but there is a record hierarchy. Tag
+ numbers reflect this hierarchy. Tags are unique across note and
+ data files. Some record types have a varying amount of data. The
+ LENGTH is the number of 4bytes that follow and is usually used to
+ determine how much data. The tag value is split into 4 8-bit
+ fields, one for each of four possible levels. The most significant
+ is allocated first. Unused levels are zero. Active levels are
+ odd-valued, so that the LSB of the level is one. A sub-level
+ incorporates the values of its superlevels. This formatting allows
+ you to determine the tag hierarchy, without understanding the tags
+ themselves, and is similar to the standard section numbering used
+ in technical documents. Level values [1..3f] are used for common
+ tags, values [41..9f] for the notes file and [a1..ff] for the data
+ file.
+
+ The notes file contains the following records
+ note: unit function-graph*
+ unit: header int32:checksum string:source
+ function-graph: announce_function basic_blocks {arcs | lines}*
+ announce_function: header int32:ident
+ int32:lineno_checksum int32:cfg_checksum
+ string:name string:source int32:start_lineno int32:start_column int32:end_lineno
+ basic_block: header int32:flags*
+ arcs: header int32:block_no arc*
+ arc: int32:dest_block int32:flags
+ lines: header int32:block_no line*
+ int32:0 string:NULL
+ line: int32:line_no | int32:0 string:filename
+
+ The BASIC_BLOCK record holds per-bb flags. The number of blocks
+ can be inferred from its data length. There is one ARCS record per
+ basic block. The number of arcs from a bb is implicit from the
+ data length. It enumerates the destination bb and per-arc flags.
+ There is one LINES record per basic block, it enumerates the source
+ lines which belong to that basic block. Source file names are
+ introduced by a line number of 0, following lines are from the new
+ source file. The initial source file for the function is NULL, but
+ the current source file should be remembered from one LINES record
+ to the next. The end of a block is indicated by an empty filename
+ - this does not reset the current source file. Note there is no
+ ordering of the ARCS and LINES records: they may be in any order,
+ interleaved in any manner. The current filename follows the order
+ the LINES records are stored in the file, *not* the ordering of the
+ blocks they are for.
+
+ The data file contains the following records.
+ data: {unit summary:object function-data*}*
+ unit: header int32:checksum
+ function-data: announce_function present counts
+ announce_function: header int32:ident
+ int32:lineno_checksum int32:cfg_checksum
+ present: header int32:present
+ counts: header int64:count*
+ summary: int32:checksum int32:runs int32:sum_max
+
+ The ANNOUNCE_FUNCTION record is the same as that in the note file,
+ but without the source location. The COUNTS gives the
+ counter values for instrumented features. The about the whole
+ program. The checksum is used for whole program summaries, and
+ disambiguates different programs which include the same
+ instrumented object file. There may be several program summaries,
+ each with a unique checksum. The object summary's checksum is
+ zero. Note that the data file might contain information from
+ several runs concatenated, or the data might be merged.
+
+ This file is included by both the compiler, gcov tools and the
+ runtime support library libgcov. IN_LIBGCOV and IN_GCOV are used to
+ distinguish which case is which. If IN_LIBGCOV is nonzero,
+ libgcov is being built. If IN_GCOV is nonzero, the gcov tools are
+ being built. Otherwise the compiler is being built. IN_GCOV may be
+ positive or negative. If positive, we are compiling a tool that
+ requires additional functions (see the code for knowledge of what
+ those functions are). */
+
+#ifndef GCC_GCOV_IO_H
+#define GCC_GCOV_IO_H
+
+/* GCOV key-value pair linked list type. */
+
+struct gcov_kvp;
+
+struct gcov_kvp
+{
+ gcov_type value;
+ gcov_type count;
+ struct gcov_kvp *next;
+};
+
+#ifndef IN_LIBGCOV
+/* About the host */
+
+typedef unsigned gcov_unsigned_t;
+typedef unsigned gcov_position_t;
+/* gcov_type is typedef'd elsewhere for the compiler */
+#if IN_GCOV
+#define GCOV_LINKAGE static
+typedef int64_t gcov_type;
+typedef uint64_t gcov_type_unsigned;
+#if IN_GCOV > 0
+#include <sys/types.h>
+#endif
+#endif
+
+#if defined (HOST_HAS_F_SETLKW)
+#define GCOV_LOCKED 1
+#else
+#define GCOV_LOCKED 0
+#endif
+
+#if defined (HOST_HAS_LK_LOCK)
+#define GCOV_LOCKED_WITH_LOCKING 1
+#else
+#define GCOV_LOCKED_WITH_LOCKING 0
+#endif
+
+#define ATTRIBUTE_HIDDEN
+
+#endif /* !IN_LIBGCOV */
+
+#ifndef GCOV_LINKAGE
+#define GCOV_LINKAGE extern
+#endif
+
+#if IN_LIBGCOV
+#define gcov_nonruntime_assert(EXPR) ((void)(0 && (EXPR)))
+#else
+#define gcov_nonruntime_assert(EXPR) gcc_assert (EXPR)
+#define gcov_error(...) fatal_error (input_location, __VA_ARGS__)
+#endif
+
+/* File suffixes. */
+#define GCOV_DATA_SUFFIX ".gcda"
+#define GCOV_NOTE_SUFFIX ".gcno"
+
+/* File magic. Must not be palindromes. */
+#define GCOV_DATA_MAGIC ((gcov_unsigned_t)0x67636461) /* "gcda" */
+#define GCOV_NOTE_MAGIC ((gcov_unsigned_t)0x67636e6f) /* "gcno" */
+#define GCOV_FILENAME_MAGIC ((gcov_unsigned_t)0x6763666e) /* "gcfn" */
+
+#include "version.h"
+
+/* Convert a magic or version number to a 4 character string. */
+#define GCOV_UNSIGNED2STRING(ARRAY,VALUE) \
+ ((ARRAY)[0] = (char)((VALUE) >> 24), \
+ (ARRAY)[1] = (char)((VALUE) >> 16), \
+ (ARRAY)[2] = (char)((VALUE) >> 8), \
+ (ARRAY)[3] = (char)((VALUE) >> 0))
+
+/* The record tags. Values [1..3f] are for tags which may be in either
+ file. Values [41..9f] for those in the note file and [a1..ff] for
+ the data file. The tag value zero is used as an explicit end of
+ file marker -- it is not required to be present.
+ All length values are in bytes. */
+
+#define GCOV_WORD_SIZE 4
+
+#define GCOV_TAG_FUNCTION ((gcov_unsigned_t)0x01000000)
+#define GCOV_TAG_FUNCTION_LENGTH (3 * GCOV_WORD_SIZE)
+#define GCOV_TAG_BLOCKS ((gcov_unsigned_t)0x01410000)
+#define GCOV_TAG_BLOCKS_LENGTH(NUM) (NUM)
+#define GCOV_TAG_ARCS ((gcov_unsigned_t)0x01430000)
+#define GCOV_TAG_ARCS_LENGTH(NUM) (1 + (NUM) * 2 * GCOV_WORD_SIZE)
+#define GCOV_TAG_ARCS_NUM(LENGTH) (((LENGTH / GCOV_WORD_SIZE) - 1) / 2)
+#define GCOV_TAG_LINES ((gcov_unsigned_t)0x01450000)
+#define GCOV_TAG_COUNTER_BASE ((gcov_unsigned_t)0x01a10000)
+#define GCOV_TAG_COUNTER_LENGTH(NUM) ((NUM) * 2 * GCOV_WORD_SIZE)
+#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH / GCOV_WORD_SIZE) / 2)
+#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000)
+#define GCOV_TAG_OBJECT_SUMMARY_LENGTH (2 * GCOV_WORD_SIZE)
+#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000) /* Obsolete */
+#define GCOV_TAG_AFDO_FILE_NAMES ((gcov_unsigned_t)0xaa000000)
+#define GCOV_TAG_AFDO_FUNCTION ((gcov_unsigned_t)0xac000000)
+#define GCOV_TAG_AFDO_WORKING_SET ((gcov_unsigned_t)0xaf000000)
+
+
+/* Counters that are collected. */
+
+#define DEF_GCOV_COUNTER(COUNTER, NAME, MERGE_FN) COUNTER,
+enum {
+#include "gcov-counter.def"
+GCOV_COUNTERS
+};
+#undef DEF_GCOV_COUNTER
+
+/* The first of counters used for value profiling. They must form a
+ consecutive interval and their order must match the order of
+ HIST_TYPEs in value-prof.h. */
+#define GCOV_FIRST_VALUE_COUNTER GCOV_COUNTER_V_INTERVAL
+
+/* The last of counters used for value profiling. */
+#define GCOV_LAST_VALUE_COUNTER (GCOV_COUNTERS - 1)
+
+/* Number of counters used for value profiling. */
+#define GCOV_N_VALUE_COUNTERS \
+ (GCOV_LAST_VALUE_COUNTER - GCOV_FIRST_VALUE_COUNTER + 1)
+
+/* Number of top N counters when being in memory. */
+#define GCOV_TOPN_MEM_COUNTERS 3
+
+/* Number of top N counters in disk representation. */
+#define GCOV_TOPN_DISK_COUNTERS 2
+
+/* Maximum number of tracked TOP N value profiles. */
+#define GCOV_TOPN_MAXIMUM_TRACKED_VALUES 32
+
+/* Convert a counter index to a tag. */
+#define GCOV_TAG_FOR_COUNTER(COUNT) \
+ (GCOV_TAG_COUNTER_BASE + ((gcov_unsigned_t)(COUNT) << 17))
+/* Convert a tag to a counter. */
+#define GCOV_COUNTER_FOR_TAG(TAG) \
+ ((unsigned)(((TAG) - GCOV_TAG_COUNTER_BASE) >> 17))
+/* Check whether a tag is a counter tag. */
+#define GCOV_TAG_IS_COUNTER(TAG) \
+ (!((TAG) & 0xFFFF) && GCOV_COUNTER_FOR_TAG (TAG) < GCOV_COUNTERS)
+
+/* The tag level mask has 1's in the position of the inner levels, &
+ the lsb of the current level, and zero on the current and outer
+ levels. */
+#define GCOV_TAG_MASK(TAG) (((TAG) - 1) ^ (TAG))
+
+/* Return nonzero if SUB is an immediate subtag of TAG. */
+#define GCOV_TAG_IS_SUBTAG(TAG,SUB) \
+ (GCOV_TAG_MASK (TAG) >> 8 == GCOV_TAG_MASK (SUB) \
+ && !(((SUB) ^ (TAG)) & ~GCOV_TAG_MASK (TAG)))
+
+/* Return nonzero if SUB is at a sublevel to TAG. */
+#define GCOV_TAG_IS_SUBLEVEL(TAG,SUB) \
+ (GCOV_TAG_MASK (TAG) > GCOV_TAG_MASK (SUB))
+
+/* Basic block flags. */
+#define GCOV_BLOCK_UNEXPECTED (1 << 1)
+
+/* Arc flags. */
+#define GCOV_ARC_ON_TREE (1 << 0)
+#define GCOV_ARC_FAKE (1 << 1)
+#define GCOV_ARC_FALLTHROUGH (1 << 2)
+
+/* Object & program summary record. */
+
+struct gcov_summary
+{
+ gcov_unsigned_t runs; /* Number of program runs. */
+ gcov_type sum_max; /* Sum of individual run max values. */
+};
+
+#if !defined(inhibit_libc)
+
+/* Functions for reading and writing gcov files. In libgcov you can
+ open the file for reading then writing. Elsewhere you can open the
+ file either for reading or for writing. When reading a file you may
+ use the gcov_read_* functions, gcov_sync, gcov_position, and
+ gcov_error. When writing a file you may use the gcov_write*
+ functions and gcov_error. When a file is to be rewritten
+ you use the functions for reading, then gcov_rewrite then the
+ functions for writing. Your file may become corrupted if you break
+ these invariants. */
+
+#if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
+GCOV_LINKAGE int gcov_magic (gcov_unsigned_t, gcov_unsigned_t);
+#endif
+
+/* Available everywhere. */
+GCOV_LINKAGE int gcov_open (const char *, int) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE int gcov_close (void) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE gcov_unsigned_t gcov_read_unsigned (void) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE gcov_type gcov_read_counter (void) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE void gcov_read_summary (struct gcov_summary *) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE const char *gcov_read_string (void);
+GCOV_LINKAGE void gcov_sync (gcov_position_t /*base*/,
+ gcov_unsigned_t /*length */);
+char *mangle_path (char const *base);
+
+#if !IN_GCOV
+/* Available outside gcov */
+GCOV_LINKAGE void gcov_write (const void *, unsigned) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE void gcov_write_unsigned (gcov_unsigned_t) ATTRIBUTE_HIDDEN;
+#endif
+
+#if !IN_GCOV && !IN_LIBGCOV
+/* Available only in compiler */
+GCOV_LINKAGE void gcov_write_string (const char *);
+GCOV_LINKAGE void gcov_write_filename (const char *);
+GCOV_LINKAGE gcov_position_t gcov_write_tag (gcov_unsigned_t);
+GCOV_LINKAGE void gcov_write_length (gcov_position_t /*position*/);
+#endif
+
+#if IN_GCOV > 0
+/* Available in gcov */
+GCOV_LINKAGE time_t gcov_time (void);
+#endif
+
+#endif /* !inhibit_libc */
+
+#endif /* GCC_GCOV_IO_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse-common.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse-common.h
new file mode 100644
index 0000000..2101880
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse-common.h
@@ -0,0 +1,47 @@
+/* Structures and prototypes common across the normal GCSE
+ implementation and the post-reload implementation.
+ Copyright (C) 1997-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GCSE_COMMON_H
+#define GCC_GCSE_COMMON_H
+
+typedef vec<rtx_insn *> vec_rtx_heap;
+struct modify_pair
+{
+ rtx dest; /* A MEM. */
+ rtx dest_addr; /* The canonical address of `dest'. */
+};
+
+typedef vec<modify_pair> vec_modify_pair_heap;
+
+struct gcse_note_stores_info
+{
+ rtx_insn *insn;
+ vec<modify_pair> *canon_mem_list;
+};
+
+extern void compute_transp (const_rtx, int, sbitmap *, bitmap,
+ bitmap, vec<modify_pair> *);
+extern void record_last_mem_set_info_common (rtx_insn *,
+ vec<rtx_insn *> *,
+ vec<modify_pair> *,
+ bitmap, bitmap);
+
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse.h
new file mode 100644
index 0000000..5582b29
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gcse.h
@@ -0,0 +1,45 @@
+/* Global common subexpression elimination/Partial redundancy elimination
+ and global constant/copy propagation for GNU compiler.
+ Copyright (C) 1997-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GCSE_H
+#define GCC_GCSE_H
+
+/* Target-dependent globals. */
+struct target_gcse {
+ /* Nonzero for each mode that supports (set (reg) (reg)).
+ This is trivially true for integer and floating point values.
+ It may or may not be true for condition codes. */
+ char x_can_copy[(int) NUM_MACHINE_MODES];
+
+ /* True if the previous field has been initialized. */
+ bool x_can_copy_init_p;
+};
+
+extern struct target_gcse default_target_gcse;
+#if SWITCHABLE_TARGET
+extern struct target_gcse *this_target_gcse;
+#else
+#define this_target_gcse (&default_target_gcse)
+#endif
+
+void gcse_cc_finalize (void);
+extern bool gcse_or_cprop_is_too_expensive (const char *);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/generic-match.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/generic-match.h
new file mode 100644
index 0000000..8b1f925
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/generic-match.h
@@ -0,0 +1,33 @@
+/* Generic simplify definitions.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Richard Guenther <rguenther@suse.de>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GENERIC_MATCH_H
+#define GCC_GENERIC_MATCH_H
+
+/* Note the following functions are supposed to be only used from
+ fold_unary_loc, fold_binary_loc and fold_ternary_loc respectively.
+ They are not considered a public API. */
+
+tree generic_simplify (location_t, enum tree_code, tree, tree);
+tree generic_simplify (location_t, enum tree_code, tree, tree, tree);
+tree generic_simplify (location_t, enum tree_code, tree, tree, tree, tree);
+
+#endif /* GCC_GENERIC_MATCH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gengtype.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gengtype.h
new file mode 100644
index 0000000..4e5df54
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gengtype.h
@@ -0,0 +1,521 @@
+/* Process source files and output type information.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GENGTYPE_H
+#define GCC_GENGTYPE_H
+
+#define obstack_chunk_alloc xmalloc
+#define obstack_chunk_free free
+#define OBSTACK_CHUNK_SIZE 0
+
+/* Sets of accepted source languages like C, C++, Ada... are
+ represented by a bitmap. */
+typedef unsigned lang_bitmap;
+
+/* Variable length structure representing an input file. A hash table
+ ensure uniqueness for a given input file name. The only function
+ allocating input_file-s is input_file_by_name. */
+struct input_file_st
+{
+ struct outf* inpoutf; /* Cached corresponding output file, computed
+ in get_output_file_with_visibility. */
+ lang_bitmap inpbitmap; /* The set of languages using this file. */
+ bool inpisplugin; /* Flag set for plugin input files. */
+ char inpname[1]; /* A variable-length array, ended by a null
+ char. */
+};
+typedef struct input_file_st input_file;
+
+/* A file position, mostly for error messages.
+ The FILE element may be compared using pointer equality. */
+struct fileloc
+{
+ const input_file *file;
+ int line;
+};
+
+
+/* Table of all input files and its size. */
+extern const input_file** gt_files;
+extern size_t num_gt_files;
+
+/* Table of headers to be included in gtype-desc.cc that are generated
+ during the build. These are identified as "./<filename>.h". */
+extern const char **build_headers;
+extern size_t num_build_headers;
+
+/* A number of places use the name of this "gengtype.cc" file for a
+ location for things that we can't rely on the source to define. We
+ also need to refer to the "system.h" file specifically. These two
+ pointers are initialized early in main. */
+extern input_file* this_file;
+extern input_file* system_h_file;
+
+/* Retrieve or create the input_file for a given name, which is a file
+ path. This is the only function allocating input_file-s and it is
+ hash-consing them. */
+input_file* input_file_by_name (const char* name);
+
+/* For F an input_file, return the relative path to F from $(srcdir)
+ if the latter is a prefix in F, NULL otherwise. */
+const char *get_file_srcdir_relative_path (const input_file *inpf);
+
+/* Get the name of an input file. */
+inline const char*
+get_input_file_name (const input_file *inpf)
+{
+ if (inpf)
+ return inpf->inpname;
+ return NULL;
+}
+
+/* Return a bitmap which has bit `1 << BASE_FILE_<lang>' set iff
+ INPUT_FILE is used by <lang>.
+
+ This function should be written to assume that a file _is_ used
+ if the situation is unclear. If it wrongly assumes a file _is_ used,
+ a linker error will result. If it wrongly assumes a file _is not_ used,
+ some GC roots may be missed, which is a much harder-to-debug problem.
+ */
+
+inline lang_bitmap
+get_lang_bitmap (const input_file* inpf)
+{
+ if (inpf == NULL)
+ return 0;
+ return inpf->inpbitmap;
+}
+
+/* Set the bitmap returned by get_lang_bitmap. The only legitimate
+ callers of this function are read_input_list & read_state_*. */
+inline void
+set_lang_bitmap (input_file* inpf, lang_bitmap n)
+{
+ gcc_assert (inpf);
+ inpf->inpbitmap = n;
+}
+
+/* Vector of per-language directories. */
+extern const char **lang_dir_names;
+extern size_t num_lang_dirs;
+
+/* Data types handed around within, but opaque to, the lexer and parser. */
+typedef struct pair *pair_p;
+typedef struct type *type_p;
+typedef const struct type *const_type_p;
+typedef struct options *options_p;
+
+/* Variables used to communicate between the lexer and the parser. */
+extern int lexer_toplevel_done;
+extern struct fileloc lexer_line;
+
+/* Various things, organized as linked lists, needed both in
+ gengtype.cc & in gengtype-state.cc files. */
+extern pair_p typedefs;
+extern type_p structures;
+extern pair_p variables;
+
+/* An enum for distinguishing GGC vs PCH. */
+
+enum write_types_kinds
+{
+ WTK_GGC,
+ WTK_PCH,
+
+ NUM_WTK
+};
+
+/* Discrimating kind of types we can understand. */
+
+enum typekind {
+ TYPE_NONE=0, /* Never used, so zeroed memory is invalid. */
+ TYPE_UNDEFINED, /* We have not yet seen a definition for this type.
+ If a type is still undefined when generating code,
+ an error will be generated. */
+ TYPE_SCALAR, /* Scalar types like char. */
+ TYPE_STRING, /* The string type. */
+ TYPE_STRUCT, /* Type for GTY-ed structs. */
+ TYPE_UNION, /* Type for GTY-ed discriminated unions. */
+ TYPE_POINTER, /* Pointer type to GTY-ed type. */
+ TYPE_ARRAY, /* Array of GTY-ed types. */
+ TYPE_CALLBACK, /* A function pointer that needs relocation if
+ the executable has been loaded at a different
+ address. */
+ TYPE_LANG_STRUCT, /* GCC front-end language specific structs.
+ Various languages may have homonymous but
+ different structs. */
+ TYPE_USER_STRUCT /* User defined type. Walkers and markers for
+ this type are assumed to be provided by the
+ user. */
+};
+
+/* Discriminating kind for options. */
+enum option_kind {
+ OPTION_NONE=0, /* Never used, so zeroed memory is invalid. */
+ OPTION_STRING, /* A string-valued option. Most options are
+ strings. */
+ OPTION_TYPE, /* A type-valued option. */
+ OPTION_NESTED /* Option data for 'nested_ptr'. */
+};
+
+
+/* A way to pass data through to the output end. */
+struct options {
+ struct options *next; /* next option of the same pair. */
+ const char *name; /* GTY option name. */
+ enum option_kind kind; /* discriminating option kind. */
+ union {
+ const char* string; /* When OPTION_STRING. */
+ type_p type; /* When OPTION_TYPE. */
+ struct nested_ptr_data* nested; /* when OPTION_NESTED. */
+ } info;
+};
+
+
+/* Option data for the 'nested_ptr' option. */
+struct nested_ptr_data {
+ type_p type;
+ const char *convert_to;
+ const char *convert_from;
+};
+
+/* Some functions to create various options structures with name NAME
+ and info INFO. NEXT is the next option in the chain. */
+
+/* Create a string option. */
+options_p create_string_option (options_p next, const char* name,
+ const char* info);
+
+/* Create a type option. */
+options_p create_type_option (options_p next, const char* name,
+ type_p info);
+
+/* Create a nested option. */
+options_p create_nested_option (options_p next, const char* name,
+ struct nested_ptr_data* info);
+
+/* Create a nested pointer option. */
+options_p create_nested_ptr_option (options_p next, type_p t,
+ const char *to, const char *from);
+
+/* A name and a type. */
+struct pair {
+ pair_p next; /* The next pair in the linked list. */
+ const char *name; /* The defined name. */
+ type_p type; /* Its GTY-ed type. */
+ struct fileloc line; /* The file location. */
+ options_p opt; /* GTY options, as a linked list. */
+};
+
+/* Usage information for GTY-ed types. Gengtype has to care only of
+ used GTY-ed types. Types are initially unused, and their usage is
+ computed by set_gc_used_type and set_gc_used functions. */
+
+enum gc_used_enum {
+
+ /* We need that zeroed types are initially unused. */
+ GC_UNUSED=0,
+
+ /* The GTY-ed type is used, e.g by a GTY-ed variable or a field
+ inside a GTY-ed used type. */
+ GC_USED,
+
+ /* For GTY-ed structures whose definitions we haven't seen so far
+ when we encounter a pointer to it that is annotated with
+ ``maybe_undef''. If after reading in everything we don't have
+ source file information for it, we assume that it never has been
+ defined. */
+ GC_MAYBE_POINTED_TO,
+
+ /* For known GTY-ed structures which are pointed to by GTY-ed
+ variables or fields. */
+ GC_POINTED_TO
+};
+
+/* Our type structure describes all types handled by gengtype. */
+struct type {
+ /* Discriminating kind, cannot be TYPE_NONE. */
+ enum typekind kind;
+
+ /* For top-level structs or unions, the 'next' field links the
+ global list 'structures'; for lang_structs, their homonymous structs are
+ linked using this 'next' field. The homonymous list starts at the
+ s.lang_struct field of the lang_struct. See the new_structure function
+ for details. This is tricky! */
+ type_p next;
+
+ /* State number used when writing & reading the persistent state. A
+ type with a positive number has already been written. For ease
+ of debugging, newly allocated types have a unique negative
+ number. */
+ int state_number;
+
+ /* Each GTY-ed type which is pointed to by some GTY-ed type knows
+ the GTY pointer type pointing to it. See create_pointer
+ function. */
+ type_p pointer_to;
+
+ /* Type usage information, computed by set_gc_used_type and
+ set_gc_used functions. */
+ enum gc_used_enum gc_used;
+
+ /* The following union is discriminated by the 'kind' field above. */
+ union {
+ /* TYPE__NONE is impossible. */
+
+ /* when TYPE_POINTER: */
+ type_p p;
+
+ /* when TYPE_STRUCT or TYPE_UNION or TYPE_LANG_STRUCT, we have an
+ aggregate type containing fields: */
+ struct {
+ const char *tag; /* the aggregate tag, if any. */
+ struct fileloc line; /* the source location. */
+ pair_p fields; /* the linked list of fields. */
+ options_p opt; /* the GTY options if any. */
+ lang_bitmap bitmap; /* the set of front-end languages
+ using that GTY-ed aggregate. */
+ /* For TYPE_LANG_STRUCT, the lang_struct field gives the first
+ element of a linked list of homonymous struct or union types.
+ Within this list, each homonymous type has as its lang_struct
+ field the original TYPE_LANG_STRUCT type. This is a dirty
+ trick, see the new_structure function for details. */
+ type_p lang_struct;
+
+ type_p base_class; /* the parent class, if any. */
+
+ /* The following two fields are not serialized in state files, and
+ are instead reconstructed on load. */
+
+ /* The head of a singly-linked list of immediate descendents in
+ the inheritance hierarchy. */
+ type_p first_subclass;
+ /* The next in that list. */
+ type_p next_sibling_class;
+
+ /* Have we already written ggc/pch user func for ptr to this?
+ (in write_user_func_for_structure_ptr). */
+ bool wrote_user_func_for_ptr[NUM_WTK];
+ } s;
+
+ /* when TYPE_SCALAR: */
+ bool scalar_is_char;
+
+ /* when TYPE_ARRAY: */
+ struct {
+ type_p p; /* The array component type. */
+ const char *len; /* The string if any giving its length. */
+ } a;
+
+ } u;
+};
+
+/* The one and only TYPE_STRING. */
+extern struct type string_type;
+
+/* The two and only TYPE_SCALARs. Their u.scalar_is_char flags are
+ set early in main. */
+extern struct type scalar_nonchar;
+extern struct type scalar_char;
+
+/* The one and only TYPE_CALLBACK. */
+extern struct type callback_type;
+
+/* Test if a type is a union, either a plain one or a language
+ specific one. */
+#define UNION_P(x) \
+ ((x)->kind == TYPE_UNION \
+ || ((x)->kind == TYPE_LANG_STRUCT \
+ && (x)->u.s.lang_struct->kind == TYPE_UNION))
+
+/* Test if a type is a union or a structure, perhaps a language
+ specific one. */
+inline bool
+union_or_struct_p (enum typekind kind)
+{
+ return (kind == TYPE_UNION
+ || kind == TYPE_STRUCT
+ || kind == TYPE_LANG_STRUCT
+ || kind == TYPE_USER_STRUCT);
+}
+
+inline bool
+union_or_struct_p (const_type_p x)
+{
+ return union_or_struct_p (x->kind);
+}
+
+/* Give the file location of a type, if any. */
+inline struct fileloc*
+type_fileloc (type_p t)
+{
+ if (!t)
+ return NULL;
+ if (union_or_struct_p (t))
+ return &t->u.s.line;
+ return NULL;
+}
+
+/* Structure representing an output file. */
+struct outf
+{
+ struct outf *next;
+ const char *name;
+ size_t buflength;
+ size_t bufused;
+ char *buf;
+};
+typedef struct outf *outf_p;
+
+/* The list of output files. */
+extern outf_p output_files;
+
+/* The output header file that is included into pretty much every
+ source file. */
+extern outf_p header_file;
+
+/* Print, like fprintf, to O. No-op if O is NULL. */
+void
+oprintf (outf_p o, const char *S, ...)
+ ATTRIBUTE_PRINTF_2;
+
+/* An output file, suitable for definitions, that can see declarations
+ made in INPF and is linked into every language that uses INPF. May
+ return NULL in plugin mode. The INPF argument is almost const, but
+ since the result is cached in its inpoutf field it cannot be
+ declared const. */
+outf_p get_output_file_with_visibility (input_file* inpf);
+
+/* The name of an output file, suitable for definitions, that can see
+ declarations made in INPF and is linked into every language that
+ uses INPF. May return NULL. */
+const char *get_output_file_name (input_file *inpf);
+
+
+/* Source directory. */
+extern const char *srcdir; /* (-S) program argument. */
+
+/* Length of srcdir name. */
+extern size_t srcdir_len;
+
+/* Variable used for reading and writing the state. */
+extern const char *read_state_filename; /* (-r) program argument. */
+extern const char *write_state_filename; /* (-w) program argument. */
+
+/* Functions reading and writing the entire gengtype state, called from
+ main, and implemented in file gengtype-state.cc. */
+void read_state (const char* path);
+/* Write the state, and update the state_number field in types. */
+void write_state (const char* path);
+
+
+/* Print an error message. */
+extern void error_at_line
+(const struct fileloc *pos, const char *msg, ...) ATTRIBUTE_PRINTF_2;
+
+/* Constructor routines for types. */
+extern void do_typedef (const char *s, type_p t, struct fileloc *pos);
+extern void do_scalar_typedef (const char *s, struct fileloc *pos);
+extern type_p resolve_typedef (const char *s, struct fileloc *pos);
+extern void add_subclass (type_p base, type_p subclass);
+extern type_p new_structure (const char *name, enum typekind kind,
+ struct fileloc *pos, pair_p fields,
+ options_p o, type_p base);
+type_p create_user_defined_type (const char *, struct fileloc *);
+extern type_p find_structure (const char *s, enum typekind kind);
+extern type_p create_scalar_type (const char *name);
+extern type_p create_pointer (type_p t);
+extern type_p create_array (type_p t, const char *len);
+extern pair_p create_field_at (pair_p next, type_p type,
+ const char *name, options_p opt,
+ struct fileloc *pos);
+extern pair_p nreverse_pairs (pair_p list);
+extern type_p adjust_field_type (type_p, options_p);
+extern void note_variable (const char *s, type_p t, options_p o,
+ struct fileloc *pos);
+
+/* Lexer and parser routines. */
+extern int yylex (const char **yylval);
+extern void yybegin (const char *fname);
+extern void yyend (void);
+extern void parse_file (const char *name);
+extern bool hit_error;
+
+/* Token codes. */
+enum gty_token
+{
+ EOF_TOKEN = 0,
+
+ /* Per standard convention, codes in the range (0, UCHAR_MAX]
+ represent single characters with those character codes. */
+ CHAR_TOKEN_OFFSET = UCHAR_MAX + 1,
+ GTY_TOKEN = CHAR_TOKEN_OFFSET,
+ TYPEDEF,
+ EXTERN,
+ STATIC,
+ UNION,
+ STRUCT,
+ ENUM,
+ ELLIPSIS,
+ PTR_ALIAS,
+ NESTED_PTR,
+ USER_GTY,
+ NUM,
+ SCALAR,
+ ID,
+ STRING,
+ CHAR,
+ ARRAY,
+ IGNORABLE_CXX_KEYWORD,
+
+ /* print_token assumes that any token >= FIRST_TOKEN_WITH_VALUE may have
+ a meaningful value to be printed. */
+ FIRST_TOKEN_WITH_VALUE = USER_GTY
+};
+
+
+/* Level for verbose messages, e.g. output file generation... */
+extern int verbosity_level; /* (-v) program argument. */
+
+/* For debugging purposes we provide two flags. */
+
+/* Dump everything to understand gengtype's state. Might be useful to
+ gengtype users. */
+extern int do_dump; /* (-d) program argument. */
+
+/* Trace the execution by many DBGPRINTF (with the position inside
+ gengtype source code). Only useful to debug gengtype itself. */
+extern int do_debug; /* (-D) program argument. */
+
+#define DBGPRINTF(Fmt,...) do {if (do_debug) \
+ fprintf (stderr, "%s:%d: " Fmt "\n", \
+ lbasename (__FILE__),__LINE__, ##__VA_ARGS__);} while (0)
+void dbgprint_count_type_at (const char *, int, const char *, type_p);
+#define DBGPRINT_COUNT_TYPE(Msg,Ty) do {if (do_debug) \
+ dbgprint_count_type_at (__FILE__, __LINE__, Msg, Ty);}while (0)
+
+#define FOR_ALL_INHERITED_FIELDS(TYPE, FIELD_VAR) \
+ for (type_p sub = (TYPE); sub; sub = sub->u.s.base_class) \
+ for (FIELD_VAR = sub->u.s.fields; FIELD_VAR; FIELD_VAR = FIELD_VAR->next)
+
+extern bool
+opts_have (options_p opts, const char *str);
+
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/genrtl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/genrtl.h
new file mode 100644
index 0000000..a425e24
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/genrtl.h
@@ -0,0 +1,1678 @@
+/* Generated automatically by gengenrtl from rtl.def. */
+
+#ifndef GCC_GENRTL_H
+#define GCC_GENRTL_H
+
+#include "statistics.h"
+
+static inline rtx
+init_rtx_fmt_0 (rtx rt, machine_mode mode)
+{
+ PUT_MODE_RAW (rt, mode);
+ X0EXP (rt, 0) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_0_stat (RTX_CODE code, machine_mode mode MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_0 (rt, mode);
+}
+
+#define gen_rtx_fmt_0(c, m) \
+ gen_rtx_fmt_0_stat ((c), (m) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_0(c, m) \
+ init_rtx_fmt_0 (rtx_alloca ((c)), (m))
+
+static inline rtx
+init_rtx_fmt_ee (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ee_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ee (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_ee(c, m, arg0, arg1) \
+ gen_rtx_fmt_ee_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ee(c, m, arg0, arg1) \
+ init_rtx_fmt_ee (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_ue (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ue_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ue (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_ue(c, m, arg0, arg1) \
+ gen_rtx_fmt_ue_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ue(c, m, arg0, arg1) \
+ init_rtx_fmt_ue (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_ie (rtx rt, machine_mode mode,
+ int arg0,
+ rtx arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XINT (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ie_stat (RTX_CODE code, machine_mode mode,
+ int arg0,
+ rtx arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ie (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_ie(c, m, arg0, arg1) \
+ gen_rtx_fmt_ie_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ie(c, m, arg0, arg1) \
+ init_rtx_fmt_ie (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_E (rtx rt, machine_mode mode,
+ rtvec arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XVEC (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_E_stat (RTX_CODE code, machine_mode mode,
+ rtvec arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_E (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_E(c, m, arg0) \
+ gen_rtx_fmt_E_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_E(c, m, arg0) \
+ init_rtx_fmt_E (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_i (rtx rt, machine_mode mode,
+ int arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XINT (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_i_stat (RTX_CODE code, machine_mode mode,
+ int arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_i (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_i(c, m, arg0) \
+ gen_rtx_fmt_i_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_i(c, m, arg0) \
+ init_rtx_fmt_i (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_uuBeiie (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3,
+ int arg4,
+ int arg5,
+ rtx arg6)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XBBDEF (rt, 2) = arg2;
+ XEXP (rt, 3) = arg3;
+ XINT (rt, 4) = arg4;
+ XINT (rt, 5) = arg5;
+ XEXP (rt, 6) = arg6;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_uuBeiie_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3,
+ int arg4,
+ int arg5,
+ rtx arg6 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_uuBeiie (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+}
+
+#define gen_rtx_fmt_uuBeiie(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+ gen_rtx_fmt_uuBeiie_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_uuBeiie(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+ init_rtx_fmt_uuBeiie (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6))
+
+static inline rtx
+init_rtx_fmt_uuBeiie0 (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3,
+ int arg4,
+ int arg5,
+ rtx arg6)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XBBDEF (rt, 2) = arg2;
+ XEXP (rt, 3) = arg3;
+ XINT (rt, 4) = arg4;
+ XINT (rt, 5) = arg5;
+ XEXP (rt, 6) = arg6;
+ X0EXP (rt, 7) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_uuBeiie0_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3,
+ int arg4,
+ int arg5,
+ rtx arg6 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_uuBeiie0 (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+}
+
+#define gen_rtx_fmt_uuBeiie0(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+ gen_rtx_fmt_uuBeiie0_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_uuBeiie0(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+ init_rtx_fmt_uuBeiie0 (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6))
+
+static inline rtx
+init_rtx_fmt_uuBeiiee (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3,
+ int arg4,
+ int arg5,
+ rtx arg6,
+ rtx arg7)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XBBDEF (rt, 2) = arg2;
+ XEXP (rt, 3) = arg3;
+ XINT (rt, 4) = arg4;
+ XINT (rt, 5) = arg5;
+ XEXP (rt, 6) = arg6;
+ XEXP (rt, 7) = arg7;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_uuBeiiee_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3,
+ int arg4,
+ int arg5,
+ rtx arg6,
+ rtx arg7 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_uuBeiiee (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+}
+
+#define gen_rtx_fmt_uuBeiiee(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ gen_rtx_fmt_uuBeiiee_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6), (arg7) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_uuBeiiee(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ init_rtx_fmt_uuBeiiee (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6), (arg7))
+
+static inline rtx
+init_rtx_fmt_uuBe0000 (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XBBDEF (rt, 2) = arg2;
+ XEXP (rt, 3) = arg3;
+ X0EXP (rt, 4) = NULL_RTX;
+ X0EXP (rt, 5) = NULL_RTX;
+ X0EXP (rt, 6) = NULL_RTX;
+ X0EXP (rt, 7) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_uuBe0000_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ rtx arg3 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_uuBe0000 (rt, mode, arg0, arg1, arg2, arg3);
+}
+
+#define gen_rtx_fmt_uuBe0000(c, m, arg0, arg1, arg2, arg3) \
+ gen_rtx_fmt_uuBe0000_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_uuBe0000(c, m, arg0, arg1, arg2, arg3) \
+ init_rtx_fmt_uuBe0000 (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3))
+
+static inline rtx
+init_rtx_fmt_uu00000 (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ X0EXP (rt, 2) = NULL_RTX;
+ X0EXP (rt, 3) = NULL_RTX;
+ X0EXP (rt, 4) = NULL_RTX;
+ X0EXP (rt, 5) = NULL_RTX;
+ X0EXP (rt, 6) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_uu00000_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_uu00000 (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_uu00000(c, m, arg0, arg1) \
+ gen_rtx_fmt_uu00000_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_uu00000(c, m, arg0, arg1) \
+ init_rtx_fmt_uu00000 (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_uuB00is (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ int arg3,
+ const char *arg4)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XBBDEF (rt, 2) = arg2;
+ X0EXP (rt, 3) = NULL_RTX;
+ X0EXP (rt, 4) = NULL_RTX;
+ XINT (rt, 5) = arg3;
+ XSTR (rt, 6) = arg4;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_uuB00is_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ basic_block arg2,
+ int arg3,
+ const char *arg4 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_uuB00is (rt, mode, arg0, arg1, arg2, arg3, arg4);
+}
+
+#define gen_rtx_fmt_uuB00is(c, m, arg0, arg1, arg2, arg3, arg4) \
+ gen_rtx_fmt_uuB00is_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_uuB00is(c, m, arg0, arg1, arg2, arg3, arg4) \
+ init_rtx_fmt_uuB00is (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4))
+
+static inline rtx
+init_rtx_fmt_si (rtx rt, machine_mode mode,
+ const char *arg0,
+ int arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XINT (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_si_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ int arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_si (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_si(c, m, arg0, arg1) \
+ gen_rtx_fmt_si_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_si(c, m, arg0, arg1) \
+ init_rtx_fmt_si (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_ssiEEEi (rtx rt, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ int arg2,
+ rtvec arg3,
+ rtvec arg4,
+ rtvec arg5,
+ int arg6)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ XINT (rt, 2) = arg2;
+ XVEC (rt, 3) = arg3;
+ XVEC (rt, 4) = arg4;
+ XVEC (rt, 5) = arg5;
+ XINT (rt, 6) = arg6;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ssiEEEi_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ int arg2,
+ rtvec arg3,
+ rtvec arg4,
+ rtvec arg5,
+ int arg6 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ssiEEEi (rt, mode, arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+}
+
+#define gen_rtx_fmt_ssiEEEi(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+ gen_rtx_fmt_ssiEEEi_stat ((c), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ssiEEEi(c, m, arg0, arg1, arg2, arg3, arg4, arg5, arg6) \
+ init_rtx_fmt_ssiEEEi (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3), (arg4), (arg5), (arg6))
+
+static inline rtx
+init_rtx_fmt_Ei (rtx rt, machine_mode mode,
+ rtvec arg0,
+ int arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XVEC (rt, 0) = arg0;
+ XINT (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_Ei_stat (RTX_CODE code, machine_mode mode,
+ rtvec arg0,
+ int arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_Ei (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_Ei(c, m, arg0, arg1) \
+ gen_rtx_fmt_Ei_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_Ei(c, m, arg0, arg1) \
+ init_rtx_fmt_Ei (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_eEee0 (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtvec arg1,
+ rtx arg2,
+ rtx arg3)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XVEC (rt, 1) = arg1;
+ XEXP (rt, 2) = arg2;
+ XEXP (rt, 3) = arg3;
+ X0EXP (rt, 4) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_eEee0_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtvec arg1,
+ rtx arg2,
+ rtx arg3 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_eEee0 (rt, mode, arg0, arg1, arg2, arg3);
+}
+
+#define gen_rtx_fmt_eEee0(c, m, arg0, arg1, arg2, arg3) \
+ gen_rtx_fmt_eEee0_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_eEee0(c, m, arg0, arg1, arg2, arg3) \
+ init_rtx_fmt_eEee0 (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3))
+
+static inline rtx
+init_rtx_fmt_eee (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ rtx arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XEXP (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_eee_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtx arg1,
+ rtx arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_eee (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_eee(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_eee_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_eee(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_eee (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_e (rtx rt, machine_mode mode,
+ rtx arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_e_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_e (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_e(c, m, arg0) \
+ gen_rtx_fmt_e_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_e(c, m, arg0) \
+ init_rtx_fmt_e (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_ (rtx rt, machine_mode mode)
+{
+ PUT_MODE_RAW (rt, mode);
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt__stat (RTX_CODE code, machine_mode mode MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ (rt, mode);
+}
+
+#define gen_rtx_fmt_(c, m) \
+ gen_rtx_fmt__stat ((c), (m) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_(c, m) \
+ init_rtx_fmt_ (rtx_alloca ((c)), (m))
+
+static inline rtx
+init_rtx_fmt_w (rtx rt, machine_mode mode,
+ HOST_WIDE_INT arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XWINT (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_w_stat (RTX_CODE code, machine_mode mode,
+ HOST_WIDE_INT arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_w (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_w(c, m, arg0) \
+ gen_rtx_fmt_w_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_w(c, m, arg0) \
+ init_rtx_fmt_w (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_www (rtx rt, machine_mode mode,
+ HOST_WIDE_INT arg0,
+ HOST_WIDE_INT arg1,
+ HOST_WIDE_INT arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XWINT (rt, 0) = arg0;
+ XWINT (rt, 1) = arg1;
+ XWINT (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_www_stat (RTX_CODE code, machine_mode mode,
+ HOST_WIDE_INT arg0,
+ HOST_WIDE_INT arg1,
+ HOST_WIDE_INT arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_www (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_www(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_www_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_www(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_www (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_s (rtx rt, machine_mode mode,
+ const char *arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_s_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_s (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_s(c, m, arg0) \
+ gen_rtx_fmt_s_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_s(c, m, arg0) \
+ init_rtx_fmt_s (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_ep (rtx rt, machine_mode mode,
+ rtx arg0,
+ poly_uint16 arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ SUBREG_BYTE (rt) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ep_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ poly_uint16 arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ep (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_ep(c, m, arg0, arg1) \
+ gen_rtx_fmt_ep_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ep(c, m, arg0, arg1) \
+ init_rtx_fmt_ep (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_e0 (rtx rt, machine_mode mode,
+ rtx arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ X0EXP (rt, 1) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_e0_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_e0 (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_e0(c, m, arg0) \
+ gen_rtx_fmt_e0_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_e0(c, m, arg0) \
+ init_rtx_fmt_e0 (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_u (rtx rt, machine_mode mode,
+ rtx arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_u_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_u (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_u(c, m, arg0) \
+ gen_rtx_fmt_u_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_u(c, m, arg0) \
+ init_rtx_fmt_u (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_s0 (rtx rt, machine_mode mode,
+ const char *arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ X0EXP (rt, 1) = NULL_RTX;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_s0_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_s0 (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_s0(c, m, arg0) \
+ gen_rtx_fmt_s0_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_s0(c, m, arg0) \
+ init_rtx_fmt_s0 (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_te (rtx rt, machine_mode mode,
+ tree arg0,
+ rtx arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XTREE (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_te_stat (RTX_CODE code, machine_mode mode,
+ tree arg0,
+ rtx arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_te (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_te(c, m, arg0, arg1) \
+ gen_rtx_fmt_te_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_te(c, m, arg0, arg1) \
+ init_rtx_fmt_te (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_t (rtx rt, machine_mode mode,
+ tree arg0)
+{
+ PUT_MODE_RAW (rt, mode);
+ XTREE (rt, 0) = arg0;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_t_stat (RTX_CODE code, machine_mode mode,
+ tree arg0 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_t (rt, mode, arg0);
+}
+
+#define gen_rtx_fmt_t(c, m, arg0) \
+ gen_rtx_fmt_t_stat ((c), (m), (arg0) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_t(c, m, arg0) \
+ init_rtx_fmt_t (rtx_alloca ((c)), (m), (arg0))
+
+static inline rtx
+init_rtx_fmt_iss (rtx rt, machine_mode mode,
+ int arg0,
+ const char *arg1,
+ const char *arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XINT (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ XSTR (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_iss_stat (RTX_CODE code, machine_mode mode,
+ int arg0,
+ const char *arg1,
+ const char *arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_iss (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_iss(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_iss_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_iss(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_iss (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_is (rtx rt, machine_mode mode,
+ int arg0,
+ const char *arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XINT (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_is_stat (RTX_CODE code, machine_mode mode,
+ int arg0,
+ const char *arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_is (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_is(c, m, arg0, arg1) \
+ gen_rtx_fmt_is_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_is(c, m, arg0, arg1) \
+ init_rtx_fmt_is (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_isE (rtx rt, machine_mode mode,
+ int arg0,
+ const char *arg1,
+ rtvec arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XINT (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ XVEC (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_isE_stat (RTX_CODE code, machine_mode mode,
+ int arg0,
+ const char *arg1,
+ rtvec arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_isE (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_isE(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_isE_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_isE(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_isE (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_iE (rtx rt, machine_mode mode,
+ int arg0,
+ rtvec arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XINT (rt, 0) = arg0;
+ XVEC (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_iE_stat (RTX_CODE code, machine_mode mode,
+ int arg0,
+ rtvec arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_iE (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_iE(c, m, arg0, arg1) \
+ gen_rtx_fmt_iE_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_iE(c, m, arg0, arg1) \
+ init_rtx_fmt_iE (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_ss (rtx rt, machine_mode mode,
+ const char *arg0,
+ const char *arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ss_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ const char *arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ss (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_ss(c, m, arg0, arg1) \
+ gen_rtx_fmt_ss_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ss(c, m, arg0, arg1) \
+ init_rtx_fmt_ss (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_eE (rtx rt, machine_mode mode,
+ rtx arg0,
+ rtvec arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XEXP (rt, 0) = arg0;
+ XVEC (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_eE_stat (RTX_CODE code, machine_mode mode,
+ rtx arg0,
+ rtvec arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_eE (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_eE(c, m, arg0, arg1) \
+ gen_rtx_fmt_eE_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_eE(c, m, arg0, arg1) \
+ init_rtx_fmt_eE (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_ses (rtx rt, machine_mode mode,
+ const char *arg0,
+ rtx arg1,
+ const char *arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ XSTR (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ses_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ rtx arg1,
+ const char *arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ses (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_ses(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_ses_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ses(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_ses (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_sss (rtx rt, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ const char *arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ XSTR (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_sss_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ const char *arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_sss (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_sss(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_sss_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_sss(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_sss (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_sse (rtx rt, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ rtx arg2)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ XEXP (rt, 2) = arg2;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_sse_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ rtx arg2 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_sse (rt, mode, arg0, arg1, arg2);
+}
+
+#define gen_rtx_fmt_sse(c, m, arg0, arg1, arg2) \
+ gen_rtx_fmt_sse_stat ((c), (m), (arg0), (arg1), (arg2) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_sse(c, m, arg0, arg1, arg2) \
+ init_rtx_fmt_sse (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2))
+
+static inline rtx
+init_rtx_fmt_sies (rtx rt, machine_mode mode,
+ const char *arg0,
+ int arg1,
+ rtx arg2,
+ const char *arg3)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XINT (rt, 1) = arg1;
+ XEXP (rt, 2) = arg2;
+ XSTR (rt, 3) = arg3;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_sies_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ int arg1,
+ rtx arg2,
+ const char *arg3 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_sies (rt, mode, arg0, arg1, arg2, arg3);
+}
+
+#define gen_rtx_fmt_sies(c, m, arg0, arg1, arg2, arg3) \
+ gen_rtx_fmt_sies_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_sies(c, m, arg0, arg1, arg2, arg3) \
+ init_rtx_fmt_sies (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3))
+
+static inline rtx
+init_rtx_fmt_sE (rtx rt, machine_mode mode,
+ const char *arg0,
+ rtvec arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XVEC (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_sE_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ rtvec arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_sE (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_sE(c, m, arg0, arg1) \
+ gen_rtx_fmt_sE_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_sE(c, m, arg0, arg1) \
+ init_rtx_fmt_sE (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_ww (rtx rt, machine_mode mode,
+ HOST_WIDE_INT arg0,
+ HOST_WIDE_INT arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XWINT (rt, 0) = arg0;
+ XWINT (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ww_stat (RTX_CODE code, machine_mode mode,
+ HOST_WIDE_INT arg0,
+ HOST_WIDE_INT arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ww (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_ww(c, m, arg0, arg1) \
+ gen_rtx_fmt_ww_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ww(c, m, arg0, arg1) \
+ init_rtx_fmt_ww (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_Ee (rtx rt, machine_mode mode,
+ rtvec arg0,
+ rtx arg1)
+{
+ PUT_MODE_RAW (rt, mode);
+ XVEC (rt, 0) = arg0;
+ XEXP (rt, 1) = arg1;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_Ee_stat (RTX_CODE code, machine_mode mode,
+ rtvec arg0,
+ rtx arg1 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_Ee (rt, mode, arg0, arg1);
+}
+
+#define gen_rtx_fmt_Ee(c, m, arg0, arg1) \
+ gen_rtx_fmt_Ee_stat ((c), (m), (arg0), (arg1) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_Ee(c, m, arg0, arg1) \
+ init_rtx_fmt_Ee (rtx_alloca ((c)), (m), (arg0), (arg1))
+
+static inline rtx
+init_rtx_fmt_sEsE (rtx rt, machine_mode mode,
+ const char *arg0,
+ rtvec arg1,
+ const char *arg2,
+ rtvec arg3)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XVEC (rt, 1) = arg1;
+ XSTR (rt, 2) = arg2;
+ XVEC (rt, 3) = arg3;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_sEsE_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ rtvec arg1,
+ const char *arg2,
+ rtvec arg3 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_sEsE (rt, mode, arg0, arg1, arg2, arg3);
+}
+
+#define gen_rtx_fmt_sEsE(c, m, arg0, arg1, arg2, arg3) \
+ gen_rtx_fmt_sEsE_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_sEsE(c, m, arg0, arg1, arg2, arg3) \
+ init_rtx_fmt_sEsE (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3))
+
+static inline rtx
+init_rtx_fmt_ssss (rtx rt, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ const char *arg2,
+ const char *arg3)
+{
+ PUT_MODE_RAW (rt, mode);
+ XSTR (rt, 0) = arg0;
+ XSTR (rt, 1) = arg1;
+ XSTR (rt, 2) = arg2;
+ XSTR (rt, 3) = arg3;
+ return rt;
+}
+
+static inline rtx
+gen_rtx_fmt_ssss_stat (RTX_CODE code, machine_mode mode,
+ const char *arg0,
+ const char *arg1,
+ const char *arg2,
+ const char *arg3 MEM_STAT_DECL)
+{
+ rtx rt;
+
+ rt = rtx_alloc (code PASS_MEM_STAT);
+ return init_rtx_fmt_ssss (rt, mode, arg0, arg1, arg2, arg3);
+}
+
+#define gen_rtx_fmt_ssss(c, m, arg0, arg1, arg2, arg3) \
+ gen_rtx_fmt_ssss_stat ((c), (m), (arg0), (arg1), (arg2), (arg3) MEM_STAT_INFO)
+
+#define alloca_rtx_fmt_ssss(c, m, arg0, arg1, arg2, arg3) \
+ init_rtx_fmt_ssss (rtx_alloca ((c)), (m), (arg0), (arg1), (arg2), (arg3))
+
+
+#define gen_rtx_VALUE(MODE) \
+ gen_rtx_fmt_0 (VALUE, (MODE))
+#define gen_rtx_DEBUG_EXPR(MODE) \
+ gen_rtx_fmt_0 (DEBUG_EXPR, (MODE))
+#define gen_rtx_raw_EXPR_LIST(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (EXPR_LIST, (MODE), (ARG0), (ARG1))
+#define gen_rtx_raw_INSN_LIST(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ue (INSN_LIST, (MODE), (ARG0), (ARG1))
+#define gen_rtx_INT_LIST(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ie (INT_LIST, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SEQUENCE(MODE, ARG0) \
+ gen_rtx_fmt_E (SEQUENCE, (MODE), (ARG0))
+#define gen_rtx_ADDRESS(MODE, ARG0) \
+ gen_rtx_fmt_i (ADDRESS, (MODE), (ARG0))
+#define gen_rtx_DEBUG_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \
+ gen_rtx_fmt_uuBeiie (DEBUG_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6))
+#define gen_rtx_raw_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \
+ gen_rtx_fmt_uuBeiie (INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6))
+#define gen_rtx_JUMP_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \
+ gen_rtx_fmt_uuBeiie0 (JUMP_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6))
+#define gen_rtx_CALL_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7) \
+ gen_rtx_fmt_uuBeiiee (CALL_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7))
+#define gen_rtx_JUMP_TABLE_DATA(MODE, ARG0, ARG1, ARG2, ARG3) \
+ gen_rtx_fmt_uuBe0000 (JUMP_TABLE_DATA, (MODE), (ARG0), (ARG1), (ARG2), (ARG3))
+#define gen_rtx_BARRIER(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_uu00000 (BARRIER, (MODE), (ARG0), (ARG1))
+#define gen_rtx_CODE_LABEL(MODE, ARG0, ARG1, ARG2, ARG3, ARG4) \
+ gen_rtx_fmt_uuB00is (CODE_LABEL, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4))
+#define gen_rtx_COND_EXEC(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (COND_EXEC, (MODE), (ARG0), (ARG1))
+#define gen_rtx_PARALLEL(MODE, ARG0) \
+ gen_rtx_fmt_E (PARALLEL, (MODE), (ARG0))
+#define gen_rtx_ASM_INPUT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_si (ASM_INPUT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ASM_OPERANDS(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \
+ gen_rtx_fmt_ssiEEEi (ASM_OPERANDS, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6))
+#define gen_rtx_UNSPEC(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_Ei (UNSPEC, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNSPEC_VOLATILE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_Ei (UNSPEC_VOLATILE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ADDR_VEC(MODE, ARG0) \
+ gen_rtx_fmt_E (ADDR_VEC, (MODE), (ARG0))
+#define gen_rtx_ADDR_DIFF_VEC(MODE, ARG0, ARG1, ARG2, ARG3) \
+ gen_rtx_fmt_eEee0 (ADDR_DIFF_VEC, (MODE), (ARG0), (ARG1), (ARG2), (ARG3))
+#define gen_rtx_PREFETCH(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_eee (PREFETCH, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_SET(ARG0, ARG1) \
+ gen_rtx_fmt_ee (SET, VOIDmode, (ARG0), (ARG1))
+#define gen_rtx_USE(MODE, ARG0) \
+ gen_rtx_fmt_e (USE, (MODE), (ARG0))
+#define gen_rtx_CLOBBER(MODE, ARG0) \
+ gen_rtx_fmt_e (CLOBBER, (MODE), (ARG0))
+#define gen_rtx_CALL(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (CALL, (MODE), (ARG0), (ARG1))
+#define gen_rtx_raw_RETURN(MODE) \
+ gen_rtx_fmt_ (RETURN, (MODE))
+#define gen_rtx_raw_SIMPLE_RETURN(MODE) \
+ gen_rtx_fmt_ (SIMPLE_RETURN, (MODE))
+#define gen_rtx_EH_RETURN(MODE) \
+ gen_rtx_fmt_ (EH_RETURN, (MODE))
+#define gen_rtx_TRAP_IF(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (TRAP_IF, (MODE), (ARG0), (ARG1))
+#define gen_rtx_raw_CONST_INT(MODE, ARG0) \
+ gen_rtx_fmt_w (CONST_INT, (MODE), (ARG0))
+#define gen_rtx_raw_CONST_VECTOR(MODE, ARG0) \
+ gen_rtx_fmt_E (CONST_VECTOR, (MODE), (ARG0))
+#define gen_rtx_CONST_STRING(MODE, ARG0) \
+ gen_rtx_fmt_s (CONST_STRING, (MODE), (ARG0))
+#define gen_rtx_CONST(MODE, ARG0) \
+ gen_rtx_fmt_e (CONST, (MODE), (ARG0))
+#define gen_rtx_raw_PC(MODE) \
+ gen_rtx_fmt_ (PC, (MODE))
+#define gen_rtx_SCRATCH(MODE) \
+ gen_rtx_fmt_ (SCRATCH, (MODE))
+#define gen_rtx_raw_SUBREG(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ep (SUBREG, (MODE), (ARG0), (ARG1))
+#define gen_rtx_STRICT_LOW_PART(MODE, ARG0) \
+ gen_rtx_fmt_e (STRICT_LOW_PART, (MODE), (ARG0))
+#define gen_rtx_CONCAT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (CONCAT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_CONCATN(MODE, ARG0) \
+ gen_rtx_fmt_E (CONCATN, (MODE), (ARG0))
+#define gen_rtx_raw_MEM(MODE, ARG0) \
+ gen_rtx_fmt_e0 (MEM, (MODE), (ARG0))
+#define gen_rtx_LABEL_REF(MODE, ARG0) \
+ gen_rtx_fmt_u (LABEL_REF, (MODE), (ARG0))
+#define gen_rtx_SYMBOL_REF(MODE, ARG0) \
+ gen_rtx_fmt_s0 (SYMBOL_REF, (MODE), (ARG0))
+#define gen_rtx_IF_THEN_ELSE(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_eee (IF_THEN_ELSE, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_COMPARE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (COMPARE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_PLUS(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (PLUS, (MODE), (ARG0), (ARG1))
+#define gen_rtx_MINUS(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (MINUS, (MODE), (ARG0), (ARG1))
+#define gen_rtx_NEG(MODE, ARG0) \
+ gen_rtx_fmt_e (NEG, (MODE), (ARG0))
+#define gen_rtx_MULT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (MULT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SS_MULT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SS_MULT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_US_MULT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (US_MULT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SMUL_HIGHPART(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SMUL_HIGHPART, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UMUL_HIGHPART(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UMUL_HIGHPART, (MODE), (ARG0), (ARG1))
+#define gen_rtx_DIV(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (DIV, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SS_DIV(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SS_DIV, (MODE), (ARG0), (ARG1))
+#define gen_rtx_US_DIV(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (US_DIV, (MODE), (ARG0), (ARG1))
+#define gen_rtx_MOD(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (MOD, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UDIV(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UDIV, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UMOD(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UMOD, (MODE), (ARG0), (ARG1))
+#define gen_rtx_AND(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (AND, (MODE), (ARG0), (ARG1))
+#define gen_rtx_IOR(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (IOR, (MODE), (ARG0), (ARG1))
+#define gen_rtx_XOR(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (XOR, (MODE), (ARG0), (ARG1))
+#define gen_rtx_NOT(MODE, ARG0) \
+ gen_rtx_fmt_e (NOT, (MODE), (ARG0))
+#define gen_rtx_ASHIFT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (ASHIFT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ROTATE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (ROTATE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ASHIFTRT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (ASHIFTRT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_LSHIFTRT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LSHIFTRT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ROTATERT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (ROTATERT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SMIN(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SMIN, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SMAX(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SMAX, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UMIN(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UMIN, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UMAX(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UMAX, (MODE), (ARG0), (ARG1))
+#define gen_rtx_PRE_DEC(MODE, ARG0) \
+ gen_rtx_fmt_e (PRE_DEC, (MODE), (ARG0))
+#define gen_rtx_PRE_INC(MODE, ARG0) \
+ gen_rtx_fmt_e (PRE_INC, (MODE), (ARG0))
+#define gen_rtx_POST_DEC(MODE, ARG0) \
+ gen_rtx_fmt_e (POST_DEC, (MODE), (ARG0))
+#define gen_rtx_POST_INC(MODE, ARG0) \
+ gen_rtx_fmt_e (POST_INC, (MODE), (ARG0))
+#define gen_rtx_PRE_MODIFY(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (PRE_MODIFY, (MODE), (ARG0), (ARG1))
+#define gen_rtx_POST_MODIFY(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (POST_MODIFY, (MODE), (ARG0), (ARG1))
+#define gen_rtx_NE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (NE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_EQ(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (EQ, (MODE), (ARG0), (ARG1))
+#define gen_rtx_GE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (GE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_GT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (GT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_LE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_LT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_LTGT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LTGT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_GEU(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (GEU, (MODE), (ARG0), (ARG1))
+#define gen_rtx_GTU(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (GTU, (MODE), (ARG0), (ARG1))
+#define gen_rtx_LEU(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LEU, (MODE), (ARG0), (ARG1))
+#define gen_rtx_LTU(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LTU, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNORDERED(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UNORDERED, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ORDERED(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (ORDERED, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNEQ(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UNEQ, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNGE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UNGE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNGT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UNGT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNLE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UNLE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_UNLT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (UNLT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SIGN_EXTEND(MODE, ARG0) \
+ gen_rtx_fmt_e (SIGN_EXTEND, (MODE), (ARG0))
+#define gen_rtx_ZERO_EXTEND(MODE, ARG0) \
+ gen_rtx_fmt_e (ZERO_EXTEND, (MODE), (ARG0))
+#define gen_rtx_TRUNCATE(MODE, ARG0) \
+ gen_rtx_fmt_e (TRUNCATE, (MODE), (ARG0))
+#define gen_rtx_FLOAT_EXTEND(MODE, ARG0) \
+ gen_rtx_fmt_e (FLOAT_EXTEND, (MODE), (ARG0))
+#define gen_rtx_FLOAT_TRUNCATE(MODE, ARG0) \
+ gen_rtx_fmt_e (FLOAT_TRUNCATE, (MODE), (ARG0))
+#define gen_rtx_FLOAT(MODE, ARG0) \
+ gen_rtx_fmt_e (FLOAT, (MODE), (ARG0))
+#define gen_rtx_FIX(MODE, ARG0) \
+ gen_rtx_fmt_e (FIX, (MODE), (ARG0))
+#define gen_rtx_UNSIGNED_FLOAT(MODE, ARG0) \
+ gen_rtx_fmt_e (UNSIGNED_FLOAT, (MODE), (ARG0))
+#define gen_rtx_UNSIGNED_FIX(MODE, ARG0) \
+ gen_rtx_fmt_e (UNSIGNED_FIX, (MODE), (ARG0))
+#define gen_rtx_FRACT_CONVERT(MODE, ARG0) \
+ gen_rtx_fmt_e (FRACT_CONVERT, (MODE), (ARG0))
+#define gen_rtx_UNSIGNED_FRACT_CONVERT(MODE, ARG0) \
+ gen_rtx_fmt_e (UNSIGNED_FRACT_CONVERT, (MODE), (ARG0))
+#define gen_rtx_SAT_FRACT(MODE, ARG0) \
+ gen_rtx_fmt_e (SAT_FRACT, (MODE), (ARG0))
+#define gen_rtx_UNSIGNED_SAT_FRACT(MODE, ARG0) \
+ gen_rtx_fmt_e (UNSIGNED_SAT_FRACT, (MODE), (ARG0))
+#define gen_rtx_ABS(MODE, ARG0) \
+ gen_rtx_fmt_e (ABS, (MODE), (ARG0))
+#define gen_rtx_SQRT(MODE, ARG0) \
+ gen_rtx_fmt_e (SQRT, (MODE), (ARG0))
+#define gen_rtx_BSWAP(MODE, ARG0) \
+ gen_rtx_fmt_e (BSWAP, (MODE), (ARG0))
+#define gen_rtx_FFS(MODE, ARG0) \
+ gen_rtx_fmt_e (FFS, (MODE), (ARG0))
+#define gen_rtx_CLRSB(MODE, ARG0) \
+ gen_rtx_fmt_e (CLRSB, (MODE), (ARG0))
+#define gen_rtx_CLZ(MODE, ARG0) \
+ gen_rtx_fmt_e (CLZ, (MODE), (ARG0))
+#define gen_rtx_CTZ(MODE, ARG0) \
+ gen_rtx_fmt_e (CTZ, (MODE), (ARG0))
+#define gen_rtx_POPCOUNT(MODE, ARG0) \
+ gen_rtx_fmt_e (POPCOUNT, (MODE), (ARG0))
+#define gen_rtx_PARITY(MODE, ARG0) \
+ gen_rtx_fmt_e (PARITY, (MODE), (ARG0))
+#define gen_rtx_SIGN_EXTRACT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_eee (SIGN_EXTRACT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_ZERO_EXTRACT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_eee (ZERO_EXTRACT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_HIGH(MODE, ARG0) \
+ gen_rtx_fmt_e (HIGH, (MODE), (ARG0))
+#define gen_rtx_LO_SUM(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (LO_SUM, (MODE), (ARG0), (ARG1))
+#define gen_rtx_VEC_MERGE(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_eee (VEC_MERGE, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_VEC_SELECT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (VEC_SELECT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_VEC_CONCAT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (VEC_CONCAT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_VEC_DUPLICATE(MODE, ARG0) \
+ gen_rtx_fmt_e (VEC_DUPLICATE, (MODE), (ARG0))
+#define gen_rtx_VEC_SERIES(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (VEC_SERIES, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SS_PLUS(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SS_PLUS, (MODE), (ARG0), (ARG1))
+#define gen_rtx_US_PLUS(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (US_PLUS, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SS_MINUS(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SS_MINUS, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SS_NEG(MODE, ARG0) \
+ gen_rtx_fmt_e (SS_NEG, (MODE), (ARG0))
+#define gen_rtx_US_NEG(MODE, ARG0) \
+ gen_rtx_fmt_e (US_NEG, (MODE), (ARG0))
+#define gen_rtx_SS_ABS(MODE, ARG0) \
+ gen_rtx_fmt_e (SS_ABS, (MODE), (ARG0))
+#define gen_rtx_SS_ASHIFT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (SS_ASHIFT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_US_ASHIFT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (US_ASHIFT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_US_MINUS(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ee (US_MINUS, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SS_TRUNCATE(MODE, ARG0) \
+ gen_rtx_fmt_e (SS_TRUNCATE, (MODE), (ARG0))
+#define gen_rtx_US_TRUNCATE(MODE, ARG0) \
+ gen_rtx_fmt_e (US_TRUNCATE, (MODE), (ARG0))
+#define gen_rtx_FMA(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_eee (FMA, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEBUG_IMPLICIT_PTR(MODE, ARG0) \
+ gen_rtx_fmt_t (DEBUG_IMPLICIT_PTR, (MODE), (ARG0))
+#define gen_rtx_ENTRY_VALUE(MODE) \
+ gen_rtx_fmt_0 (ENTRY_VALUE, (MODE))
+#define gen_rtx_DEBUG_PARAMETER_REF(MODE, ARG0) \
+ gen_rtx_fmt_t (DEBUG_PARAMETER_REF, (MODE), (ARG0))
+#define gen_rtx_DEBUG_MARKER(MODE) \
+ gen_rtx_fmt_ (DEBUG_MARKER, (MODE))
+#define gen_rtx_MATCH_OPERAND(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_iss (MATCH_OPERAND, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_MATCH_SCRATCH(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_is (MATCH_SCRATCH, (MODE), (ARG0), (ARG1))
+#define gen_rtx_MATCH_OPERATOR(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_isE (MATCH_OPERATOR, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_MATCH_PARALLEL(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_isE (MATCH_PARALLEL, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_MATCH_DUP(MODE, ARG0) \
+ gen_rtx_fmt_i (MATCH_DUP, (MODE), (ARG0))
+#define gen_rtx_MATCH_OP_DUP(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_iE (MATCH_OP_DUP, (MODE), (ARG0), (ARG1))
+#define gen_rtx_MATCH_PAR_DUP(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_iE (MATCH_PAR_DUP, (MODE), (ARG0), (ARG1))
+#define gen_rtx_MATCH_CODE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (MATCH_CODE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_MATCH_TEST(MODE, ARG0) \
+ gen_rtx_fmt_s (MATCH_TEST, (MODE), (ARG0))
+#define gen_rtx_DEFINE_DELAY(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_eE (DEFINE_DELAY, (MODE), (ARG0), (ARG1))
+#define gen_rtx_DEFINE_PREDICATE(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_ses (DEFINE_PREDICATE, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_SPECIAL_PREDICATE(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_ses (DEFINE_SPECIAL_PREDICATE, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_REGISTER_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sss (DEFINE_REGISTER_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_MEMORY_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_MEMORY_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_SPECIAL_MEMORY_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_SPECIAL_MEMORY_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_RELAXED_MEMORY_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_RELAXED_MEMORY_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_ADDRESS_CONSTRAINT(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_ADDRESS_CONSTRAINT, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_EXCLUSION_SET(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (EXCLUSION_SET, (MODE), (ARG0), (ARG1))
+#define gen_rtx_PRESENCE_SET(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (PRESENCE_SET, (MODE), (ARG0), (ARG1))
+#define gen_rtx_FINAL_PRESENCE_SET(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (FINAL_PRESENCE_SET, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ABSENCE_SET(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (ABSENCE_SET, (MODE), (ARG0), (ARG1))
+#define gen_rtx_FINAL_ABSENCE_SET(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (FINAL_ABSENCE_SET, (MODE), (ARG0), (ARG1))
+#define gen_rtx_DEFINE_AUTOMATON(MODE, ARG0) \
+ gen_rtx_fmt_s (DEFINE_AUTOMATON, (MODE), (ARG0))
+#define gen_rtx_AUTOMATA_OPTION(MODE, ARG0) \
+ gen_rtx_fmt_s (AUTOMATA_OPTION, (MODE), (ARG0))
+#define gen_rtx_DEFINE_RESERVATION(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (DEFINE_RESERVATION, (MODE), (ARG0), (ARG1))
+#define gen_rtx_DEFINE_INSN_RESERVATION(MODE, ARG0, ARG1, ARG2, ARG3) \
+ gen_rtx_fmt_sies (DEFINE_INSN_RESERVATION, (MODE), (ARG0), (ARG1), (ARG2), (ARG3))
+#define gen_rtx_DEFINE_ATTR(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_ATTR, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_DEFINE_ENUM_ATTR(MODE, ARG0, ARG1, ARG2) \
+ gen_rtx_fmt_sse (DEFINE_ENUM_ATTR, (MODE), (ARG0), (ARG1), (ARG2))
+#define gen_rtx_ATTR(MODE, ARG0) \
+ gen_rtx_fmt_s (ATTR, (MODE), (ARG0))
+#define gen_rtx_SET_ATTR(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (SET_ATTR, (MODE), (ARG0), (ARG1))
+#define gen_rtx_SET_ATTR_ALTERNATIVE(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_sE (SET_ATTR_ALTERNATIVE, (MODE), (ARG0), (ARG1))
+#define gen_rtx_EQ_ATTR(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ss (EQ_ATTR, (MODE), (ARG0), (ARG1))
+#define gen_rtx_EQ_ATTR_ALT(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_ww (EQ_ATTR_ALT, (MODE), (ARG0), (ARG1))
+#define gen_rtx_ATTR_FLAG(MODE, ARG0) \
+ gen_rtx_fmt_s (ATTR_FLAG, (MODE), (ARG0))
+#define gen_rtx_COND(MODE, ARG0, ARG1) \
+ gen_rtx_fmt_Ee (COND, (MODE), (ARG0), (ARG1))
+#define gen_rtx_DEFINE_SUBST(MODE, ARG0, ARG1, ARG2, ARG3) \
+ gen_rtx_fmt_sEsE (DEFINE_SUBST, (MODE), (ARG0), (ARG1), (ARG2), (ARG3))
+#define gen_rtx_DEFINE_SUBST_ATTR(MODE, ARG0, ARG1, ARG2, ARG3) \
+ gen_rtx_fmt_ssss (DEFINE_SUBST_ATTR, (MODE), (ARG0), (ARG1), (ARG2), (ARG3))
+
+#endif /* GCC_GENRTL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gensupport.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gensupport.h
new file mode 100644
index 0000000..a1edfbd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gensupport.h
@@ -0,0 +1,228 @@
+/* Declarations for rtx-reader support for gen* routines.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GENSUPPORT_H
+#define GCC_GENSUPPORT_H
+
+#include "read-md.h"
+
+struct obstack;
+extern struct obstack *rtl_obstack;
+
+/* Information about an .md define_* rtx. */
+class md_rtx_info {
+public:
+ /* The rtx itself. */
+ rtx def;
+
+ /* The location of the first line of the rtx. */
+ file_location loc;
+
+ /* The unique number attached to the rtx. Currently all define_insns,
+ define_expands, define_splits, define_peepholes and define_peephole2s
+ share the same insn_code index space. */
+ int index;
+};
+
+#define OPTAB_CL(name, pat, c, b, l) name,
+#define OPTAB_CX(name, pat)
+#define OPTAB_CD(name, pat) name,
+#define OPTAB_NL(name, pat, c, b, s, l) name,
+#define OPTAB_NC(name, pat, c) name,
+#define OPTAB_NX(name, pat)
+#define OPTAB_VL(name, pat, c, b, s, l) name,
+#define OPTAB_VC(name, pat, c) name,
+#define OPTAB_VX(name, pat)
+#define OPTAB_DC(name, pat, c) name,
+#define OPTAB_D(name, pat) name,
+
+/* Enumerates all optabs. */
+typedef enum optab_tag {
+ unknown_optab,
+#include "optabs.def"
+ NUM_OPTABS
+} optab;
+
+#undef OPTAB_CL
+#undef OPTAB_CX
+#undef OPTAB_CD
+#undef OPTAB_NL
+#undef OPTAB_NC
+#undef OPTAB_NX
+#undef OPTAB_VL
+#undef OPTAB_VC
+#undef OPTAB_VX
+#undef OPTAB_DC
+#undef OPTAB_D
+
+/* Describes one entry in optabs.def. */
+struct optab_def
+{
+ /* The name of the optab (e.g. "add_optab"). */
+ const char *name;
+
+ /* The pattern that matching define_expands and define_insns have.
+ See the comment at the head of optabs.def for details. */
+ const char *pattern;
+
+ /* The initializers (in the form of C code) for the libcall_basename,
+ libcall_suffix and libcall_gen fields of (convert_)optab_libcall_d. */
+ const char *base;
+ const char *suffix;
+ const char *libcall;
+
+ /* The optab's enum value. */
+ unsigned int op;
+
+ /* The value returned by optab_to_code (OP). */
+ enum rtx_code fcode;
+
+ /* CODE if code_to_optab (CODE) should return OP, otherwise UNKNOWN. */
+ enum rtx_code rcode;
+
+ /* 1: conversion optabs with libcall data,
+ 2: conversion optabs without libcall data,
+ 3: non-conversion optabs with libcall data ("normal" and "overflow"
+ optabs in the optabs.def comment)
+ 4: non-conversion optabs without libcall data ("direct" optabs). */
+ unsigned int kind;
+};
+
+extern optab_def optabs[];
+extern unsigned int num_optabs;
+
+/* Information about an instruction name that matches an optab pattern. */
+struct optab_pattern
+{
+ /* The name of the instruction. */
+ const char *name;
+
+ /* The matching optab. */
+ unsigned int op;
+
+ /* The optab modes. M2 is only significant for conversion optabs;
+ it is zero otherwise. */
+ unsigned int m1, m2;
+
+ /* An index that provides a lexicographical sort of (OP, M2, M1).
+ Used by genopinit.cc. */
+ unsigned int sort_num;
+};
+
+extern rtx add_implicit_parallel (rtvec);
+extern rtx_reader *init_rtx_reader_args_cb (int, const char **,
+ bool (*)(const char *));
+extern rtx_reader *init_rtx_reader_args (int, const char **);
+extern bool read_md_rtx (md_rtx_info *);
+extern unsigned int get_num_insn_codes ();
+
+/* Set this to 0 to disable automatic elision of insn patterns which
+ can never be used in this configuration. See genconditions.cc.
+ Must be set before calling init_md_reader. */
+extern int insn_elision;
+
+/* Return the C test that says whether a definition rtx can be used,
+ or "" if it can be used unconditionally. */
+extern const char *get_c_test (rtx);
+
+/* If the C test passed as the argument can be evaluated at compile
+ time, return its truth value; else return -1. The test must have
+ appeared somewhere in the machine description when genconditions
+ was run. */
+extern int maybe_eval_c_test (const char *);
+
+/* Add an entry to the table of conditions. Used by genconditions and
+ by read-rtl.cc. */
+extern void add_c_test (const char *, int);
+
+/* This structure is used internally by gensupport.cc and genconditions.cc. */
+struct c_test
+{
+ const char *expr;
+ int value;
+};
+
+#ifdef __HASHTAB_H__
+extern hashval_t hash_c_test (const void *);
+extern int cmp_c_test (const void *, const void *);
+extern void traverse_c_tests (htab_trav, void *);
+#endif
+
+/* Predicate handling: helper functions and data structures. */
+
+struct pred_data
+{
+ struct pred_data *next; /* for iterating over the set of all preds */
+ const char *name; /* predicate name */
+ bool special; /* special handling of modes? */
+
+ /* data used primarily by genpreds.cc */
+ const char *c_block; /* C test block */
+ rtx exp; /* RTL test expression */
+
+ /* data used primarily by genrecog.cc */
+ enum rtx_code singleton; /* if pred takes only one code, that code */
+ int num_codes; /* number of codes accepted */
+ bool allows_non_lvalue; /* if pred allows non-lvalue expressions */
+ bool allows_non_const; /* if pred allows non-const expressions */
+ bool codes[NUM_RTX_CODE]; /* set of codes accepted */
+};
+
+extern struct pred_data *first_predicate;
+extern struct pred_data *lookup_predicate (const char *);
+extern void add_predicate_code (struct pred_data *, enum rtx_code);
+extern void add_predicate (struct pred_data *);
+
+#define FOR_ALL_PREDICATES(p) for (p = first_predicate; p; p = p->next)
+
+struct pattern_stats
+{
+ /* The largest match_operand, match_operator or match_parallel
+ number found. */
+ int max_opno;
+
+ /* The largest match_dup, match_op_dup or match_par_dup number found. */
+ int max_dup_opno;
+
+ /* The smallest and largest match_scratch number found. */
+ int min_scratch_opno;
+ int max_scratch_opno;
+
+ /* The number of times match_dup, match_op_dup or match_par_dup appears
+ in the pattern. */
+ int num_dups;
+
+ /* The number of rtx arguments to the generator function. */
+ int num_generator_args;
+
+ /* The number of rtx operands in an insn. */
+ int num_insn_operands;
+
+ /* The number of operand variables that are needed. */
+ int num_operand_vars;
+};
+
+extern void get_pattern_stats (struct pattern_stats *ranges, rtvec vec);
+extern void compute_test_codes (rtx, file_location, char *);
+extern file_location get_file_location (rtx);
+extern const char *get_emit_function (rtx);
+extern bool needs_barrier_p (rtx);
+extern bool find_optab (optab_pattern *, const char *);
+
+#endif /* GCC_GENSUPPORT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc-internal.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc-internal.h
new file mode 100644
index 0000000..25e6ce6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc-internal.h
@@ -0,0 +1,115 @@
+/* Garbage collection for the GNU compiler. Internal definitions
+ for ggc-*.c and stringpool.cc.
+
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GGC_INTERNAL_H
+#define GCC_GGC_INTERNAL_H
+
+
+/* Call ggc_set_mark on all the roots. */
+extern void ggc_mark_roots (void);
+
+/* Stringpool. */
+
+/* Mark the entries in the string pool. */
+extern void ggc_mark_stringpool (void);
+
+/* Purge the entries in the string pool. */
+extern void ggc_purge_stringpool (void);
+
+/* Save and restore the string pool entries for PCH. */
+
+extern void gt_pch_save_stringpool (void);
+extern void gt_pch_fixup_stringpool (void);
+extern void gt_pch_restore_stringpool (void);
+
+/* PCH and GGC handling for strings, mostly trivial. */
+extern void gt_pch_p_S (void *, void *, gt_pointer_operator, void *);
+
+/* PCH. */
+
+struct ggc_pch_data;
+
+/* Return a new ggc_pch_data structure. */
+extern struct ggc_pch_data *init_ggc_pch (void);
+
+/* The second parameter and third parameters give the address and size
+ of an object. Update the ggc_pch_data structure with as much of
+ that information as is necessary. The bool argument should be true
+ if the object is a string. */
+extern void ggc_pch_count_object (struct ggc_pch_data *, void *, size_t, bool);
+
+/* Return the total size of the data to be written to hold all
+ the objects previously passed to ggc_pch_count_object. */
+extern size_t ggc_pch_total_size (struct ggc_pch_data *);
+
+/* The objects, when read, will most likely be at the address
+ in the second parameter. */
+extern void ggc_pch_this_base (struct ggc_pch_data *, void *);
+
+/* Assuming that the objects really do end up at the address
+ passed to ggc_pch_this_base, return the address of this object.
+ The bool argument should be true if the object is a string. */
+extern char *ggc_pch_alloc_object (struct ggc_pch_data *, void *, size_t, bool);
+
+/* Write out any initial information required. */
+extern void ggc_pch_prepare_write (struct ggc_pch_data *, FILE *);
+
+/* Write out this object, including any padding. The last argument should be
+ true if the object is a string. */
+extern void ggc_pch_write_object (struct ggc_pch_data *, FILE *, void *,
+ void *, size_t, bool);
+
+/* All objects have been written, write out any final information
+ required. */
+extern void ggc_pch_finish (struct ggc_pch_data *, FILE *);
+
+/* A PCH file has just been read in at the address specified second
+ parameter. Set up the GC implementation for the new objects. */
+extern void ggc_pch_read (FILE *, void *);
+
+
+/* Allocation and collection. */
+
+extern void ggc_record_overhead (size_t, size_t, void * FINAL_MEM_STAT_DECL);
+
+extern void ggc_free_overhead (void *);
+
+extern void ggc_prune_overhead_list (void);
+
+/* Return the number of bytes allocated at the indicated address. */
+extern size_t ggc_get_size (const void *);
+
+
+/* Statistics. */
+
+/* This structure contains the statistics common to all collectors.
+ Particular collectors can extend this structure. */
+struct ggc_statistics
+{
+ /* At present, we don't really gather any interesting statistics. */
+ int unused;
+};
+
+/* Used by the various collectors to gather and print statistics that
+ do not depend on the collector in use. */
+extern void ggc_print_common_statistics (FILE *, ggc_statistics *);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc.h
new file mode 100644
index 0000000..78eab7e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ggc.h
@@ -0,0 +1,371 @@
+/* Garbage collection for the GNU compiler.
+
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GGC_H
+#define GCC_GGC_H
+
+/* Symbols are marked with `ggc' for `gcc gc' so as not to interfere with
+ an external gc library that might be linked in. */
+
+/* Internal functions and data structures used by the GTY
+ machinery, including the generated gt*.[hc] files. */
+
+#include "gtype-desc.h"
+
+/* One of these applies its third parameter (with cookie in the fourth
+ parameter) to each pointer in the object pointed to by the first
+ parameter, using the second parameter. */
+typedef void (*gt_note_pointers) (void *, void *, gt_pointer_operator,
+ void *);
+
+/* One of these is called before objects are re-ordered in memory.
+ The first parameter is the original object, the second is the
+ subobject that has had its pointers reordered, the third parameter
+ can compute the new values of a pointer when given the cookie in
+ the fourth parameter. */
+typedef void (*gt_handle_reorder) (void *, void *, gt_pointer_operator,
+ void *);
+
+/* Used by the gt_pch_n_* routines. Register an object in the hash table. */
+extern int gt_pch_note_object (void *, void *, gt_note_pointers,
+ size_t length_override = (size_t)-1);
+
+/* Used by the gt_pch_p_* routines. Register address of a callback
+ pointer. */
+extern void gt_pch_note_callback (void *, void *);
+
+/* Used by the gt_pch_n_* routines. Register that an object has a reorder
+ function. */
+extern void gt_pch_note_reorder (void *, void *, gt_handle_reorder);
+
+/* generated function to clear caches in gc memory. */
+extern void gt_clear_caches ();
+
+/* Mark the object in the first parameter and anything it points to. */
+typedef void (*gt_pointer_walker) (void *);
+
+/* Structures for the easy way to mark roots.
+ In an array, terminated by having base == NULL. */
+struct ggc_root_tab {
+ void *base;
+ size_t nelt;
+ size_t stride;
+ gt_pointer_walker cb;
+ gt_pointer_walker pchw;
+};
+#define LAST_GGC_ROOT_TAB { NULL, 0, 0, NULL, NULL }
+/* Pointers to arrays of ggc_root_tab, terminated by NULL. */
+extern const struct ggc_root_tab * const gt_ggc_rtab[];
+extern const struct ggc_root_tab * const gt_ggc_deletable_rtab[];
+extern const struct ggc_root_tab * const gt_pch_scalar_rtab[];
+
+/* If EXPR is not NULL and previously unmarked, mark it and evaluate
+ to true. Otherwise evaluate to false. */
+#define ggc_test_and_set_mark(EXPR) \
+ ((EXPR) != NULL && ((void *) (EXPR)) != (void *) 1 && ! ggc_set_mark (EXPR))
+
+#define ggc_mark(EXPR) \
+ do { \
+ const void *const a__ = (EXPR); \
+ if (a__ != NULL && a__ != (void *) 1) \
+ ggc_set_mark (a__); \
+ } while (0)
+
+/* Actually set the mark on a particular region of memory, but don't
+ follow pointers. This function is called by ggc_mark_*. It
+ returns zero if the object was not previously marked; nonzero if
+ the object was already marked, or if, for any other reason,
+ pointers in this data structure should not be traversed. */
+extern int ggc_set_mark (const void *);
+
+/* Return 1 if P has been marked, zero otherwise.
+ P must have been allocated by the GC allocator; it mustn't point to
+ static objects, stack variables, or memory allocated with malloc. */
+extern int ggc_marked_p (const void *);
+
+/* PCH and GGC handling for strings, mostly trivial. */
+extern void gt_pch_n_S (const void *);
+extern void gt_pch_n_S2 (const void *, size_t);
+extern void gt_ggc_m_S (const void *);
+
+/* End of GTY machinery API. */
+
+/* Initialize the string pool. */
+extern void init_stringpool (void);
+
+/* Initialize the garbage collector. */
+extern void init_ggc (void);
+
+/* When true, identifier nodes are considered as GC roots. When
+ false, identifier nodes are treated like any other GC-allocated
+ object, and the identifier hash table is treated as a weak
+ hash. */
+extern bool ggc_protect_identifiers;
+
+/* Write out all GCed objects to F. */
+extern void gt_pch_save (FILE *f);
+
+
+/* Allocation. */
+
+/* The internal primitive. */
+extern void *ggc_internal_alloc (size_t, void (*)(void *), size_t,
+ size_t CXX_MEM_STAT_INFO)
+ ATTRIBUTE_MALLOC;
+
+inline void *
+ggc_internal_alloc (size_t s CXX_MEM_STAT_INFO)
+{
+ return ggc_internal_alloc (s, NULL, 0, 1 PASS_MEM_STAT);
+}
+
+extern size_t ggc_round_alloc_size (size_t requested_size);
+
+/* Allocates cleared memory. */
+extern void *ggc_internal_cleared_alloc (size_t, void (*)(void *),
+ size_t, size_t
+ CXX_MEM_STAT_INFO) ATTRIBUTE_MALLOC;
+
+inline void *
+ggc_internal_cleared_alloc (size_t s CXX_MEM_STAT_INFO)
+{
+ return ggc_internal_cleared_alloc (s, NULL, 0, 1 PASS_MEM_STAT);
+}
+
+/* Resize a block. */
+extern void *ggc_realloc (void *, size_t CXX_MEM_STAT_INFO);
+
+/* Free a block. To be used when known for certain it's not reachable. */
+extern void ggc_free (void *);
+
+extern void dump_ggc_loc_statistics ();
+
+/* Reallocator. */
+#define GGC_RESIZEVEC(T, P, N) \
+ ((T *) ggc_realloc ((P), (N) * sizeof (T) MEM_STAT_INFO))
+
+template<typename T>
+void
+finalize (void *p)
+{
+ static_cast<T *> (p)->~T ();
+}
+
+template<typename T>
+inline bool
+need_finalization_p ()
+{
+#if GCC_VERSION >= 4003
+ return !__has_trivial_destructor (T);
+#else
+ return true;
+#endif
+}
+
+template<typename T>
+inline T *
+ggc_alloc (ALONE_CXX_MEM_STAT_INFO)
+{
+ if (need_finalization_p<T> ())
+ return static_cast<T *> (ggc_internal_alloc (sizeof (T), finalize<T>, 0, 1
+ PASS_MEM_STAT));
+ else
+ return static_cast<T *> (ggc_internal_alloc (sizeof (T), NULL, 0, 1
+ PASS_MEM_STAT));
+}
+
+/* GGC allocation function that does not call finalizer for type
+ that have need_finalization_p equal to true. User is responsible
+ for calling of the destructor. */
+
+template<typename T>
+inline T *
+ggc_alloc_no_dtor (ALONE_CXX_MEM_STAT_INFO)
+{
+ return static_cast<T *> (ggc_internal_alloc (sizeof (T), NULL, 0, 1
+ PASS_MEM_STAT));
+}
+
+template<typename T>
+inline T *
+ggc_cleared_alloc (ALONE_CXX_MEM_STAT_INFO)
+{
+ if (need_finalization_p<T> ())
+ return static_cast<T *> (ggc_internal_cleared_alloc (sizeof (T),
+ finalize<T>, 0, 1
+ PASS_MEM_STAT));
+ else
+ return static_cast<T *> (ggc_internal_cleared_alloc (sizeof (T), NULL, 0, 1
+ PASS_MEM_STAT));
+}
+
+template<typename T>
+inline T *
+ggc_vec_alloc (size_t c CXX_MEM_STAT_INFO)
+{
+ if (need_finalization_p<T> ())
+ return static_cast<T *> (ggc_internal_alloc (c * sizeof (T), finalize<T>,
+ sizeof (T), c PASS_MEM_STAT));
+ else
+ return static_cast<T *> (ggc_internal_alloc (c * sizeof (T), NULL, 0, 0
+ PASS_MEM_STAT));
+}
+
+template<typename T>
+inline T *
+ggc_cleared_vec_alloc (size_t c CXX_MEM_STAT_INFO)
+{
+ if (need_finalization_p<T> ())
+ return static_cast<T *> (ggc_internal_cleared_alloc (c * sizeof (T),
+ finalize<T>,
+ sizeof (T), c
+ PASS_MEM_STAT));
+ else
+ return static_cast<T *> (ggc_internal_cleared_alloc (c * sizeof (T), NULL,
+ 0, 0 PASS_MEM_STAT));
+}
+
+inline void *
+ggc_alloc_atomic (size_t s CXX_MEM_STAT_INFO)
+{
+ return ggc_internal_alloc (s PASS_MEM_STAT);
+}
+
+/* Call destructor and free the garbage collected memory. */
+
+template <typename T>
+inline void
+ggc_delete (T *ptr)
+{
+ ptr->~T ();
+ ggc_free (ptr);
+}
+
+/* Allocate a gc-able string, and fill it with LENGTH bytes from CONTENTS.
+ If LENGTH is -1, then CONTENTS is assumed to be a
+ null-terminated string and the memory sized accordingly. */
+extern const char *ggc_alloc_string (const char *contents, int length
+ CXX_MEM_STAT_INFO);
+
+/* Make a copy of S, in GC-able memory. */
+#define ggc_strdup(S) ggc_alloc_string ((S), -1 MEM_STAT_INFO)
+
+/* Invoke the collector. Garbage collection occurs only when this
+ function is called, not during allocations. */
+enum ggc_collect {
+ GGC_COLLECT_HEURISTIC,
+ GGC_COLLECT_FORCE
+};
+extern void ggc_collect (enum ggc_collect mode = GGC_COLLECT_HEURISTIC);
+
+/* Return unused memory pages to the system. */
+extern void ggc_trim (void);
+
+/* Assume that all GGC memory is reachable and grow the limits for next collection. */
+extern void ggc_grow (void);
+
+/* Register an additional root table. This can be useful for some
+ plugins. Does nothing if the passed pointer is NULL. */
+extern void ggc_register_root_tab (const struct ggc_root_tab *);
+
+/* Read objects previously saved with gt_pch_save from F. */
+extern void gt_pch_restore (FILE *f);
+
+/* Statistics. */
+
+/* Print allocation statistics. */
+extern void ggc_print_statistics (void);
+
+extern void stringpool_statistics (void);
+
+/* Heuristics. */
+extern void init_ggc_heuristics (void);
+
+/* Report current heap memory use to stderr. */
+extern void report_heap_memory_use (void);
+
+#define ggc_alloc_rtvec_sized(NELT) \
+ (rtvec_def *) ggc_internal_alloc (sizeof (struct rtvec_def) \
+ + ((NELT) - 1) * sizeof (rtx)) \
+
+/* Memory statistics passing versions of some allocators. Too few of them to
+ make gengtype produce them, so just define the needed ones here. */
+inline struct rtx_def *
+ggc_alloc_rtx_def_stat (size_t s CXX_MEM_STAT_INFO)
+{
+ return (struct rtx_def *) ggc_internal_alloc (s PASS_MEM_STAT);
+}
+
+inline union tree_node *
+ggc_alloc_tree_node_stat (size_t s CXX_MEM_STAT_INFO)
+{
+ return (union tree_node *) ggc_internal_alloc (s PASS_MEM_STAT);
+}
+
+inline union tree_node *
+ggc_alloc_cleared_tree_node_stat (size_t s CXX_MEM_STAT_INFO)
+{
+ return (union tree_node *) ggc_internal_cleared_alloc (s PASS_MEM_STAT);
+}
+
+inline gimple *
+ggc_alloc_cleared_gimple_statement_stat (size_t s CXX_MEM_STAT_INFO)
+{
+ return (gimple *) ggc_internal_cleared_alloc (s PASS_MEM_STAT);
+}
+
+inline void
+gt_ggc_mx (const char *s)
+{
+ ggc_test_and_set_mark (const_cast<char *> (s));
+}
+
+inline void
+gt_pch_nx (const char *)
+{
+}
+
+inline void gt_pch_nx (bool) { }
+inline void gt_pch_nx (char) { }
+inline void gt_pch_nx (signed char) { }
+inline void gt_pch_nx (unsigned char) { }
+inline void gt_pch_nx (short) { }
+inline void gt_pch_nx (unsigned short) { }
+inline void gt_pch_nx (int) { }
+inline void gt_pch_nx (unsigned int) { }
+inline void gt_pch_nx (long int) { }
+inline void gt_pch_nx (unsigned long int) { }
+inline void gt_pch_nx (long long int) { }
+inline void gt_pch_nx (unsigned long long int) { }
+
+inline void gt_ggc_mx (bool) { }
+inline void gt_ggc_mx (char) { }
+inline void gt_ggc_mx (signed char) { }
+inline void gt_ggc_mx (unsigned char) { }
+inline void gt_ggc_mx (short) { }
+inline void gt_ggc_mx (unsigned short) { }
+inline void gt_ggc_mx (int) { }
+inline void gt_ggc_mx (unsigned int) { }
+inline void gt_ggc_mx (long int) { }
+inline void gt_ggc_mx (unsigned long int) { }
+inline void gt_ggc_mx (long long int) { }
+inline void gt_ggc_mx (unsigned long long int) { }
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-array-bounds.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-array-bounds.h
new file mode 100644
index 0000000..13e5f29
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-array-bounds.h
@@ -0,0 +1,49 @@
+/* Array bounds checking.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_ARRAY_BOUNDS_H
+#define GCC_GIMPLE_ARRAY_BOUNDS_H
+
+#include "pointer-query.h"
+
+class array_bounds_checker
+{
+ friend class check_array_bounds_dom_walker;
+
+public:
+ array_bounds_checker (struct function *, range_query *);
+ void check ();
+
+private:
+ static tree check_array_bounds (tree *tp, int *walk_subtree, void *data);
+ bool check_array_ref (location_t, tree, gimple *, bool ignore_off_by_one);
+ bool check_mem_ref (location_t, tree, bool ignore_off_by_one);
+ void check_addr_expr (location_t, tree, gimple *);
+ const value_range *get_value_range (const_tree op, gimple *);
+
+ /* Current function. */
+ struct function *fun;
+ /* A pointer_query object to store information about pointers and
+ their targets in. */
+ pointer_query m_ptr_qry;
+ /* Current statement. */
+ gimple *m_stmt;
+};
+
+#endif // GCC_GIMPLE_ARRAY_BOUNDS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-builder.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-builder.h
new file mode 100644
index 0000000..f9dd9fb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-builder.h
@@ -0,0 +1,36 @@
+/* Header file for high level statement building routines.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_GIMPLE_BUILDER_H
+#define GCC_GIMPLE_BUILDER_H
+
+/* ??? This API is legacy and should not be used in new code. */
+
+gassign *build_assign (enum tree_code, tree, int, tree lhs = NULL_TREE);
+gassign *build_assign (enum tree_code, gimple *, int, tree lhs = NULL_TREE);
+gassign *build_assign (enum tree_code, tree, tree, tree lhs = NULL_TREE);
+gassign *build_assign (enum tree_code, gimple *, tree, tree lhs = NULL_TREE);
+gassign *build_assign (enum tree_code, tree, gimple *, tree lhs = NULL_TREE);
+gassign *build_assign (enum tree_code, gimple *, gimple *,
+ tree lhs = NULL_TREE);
+gassign *build_type_cast (tree, tree, tree lhs = NULL_TREE);
+gassign *build_type_cast (tree, gimple *, tree lhs = NULL_TREE);
+
+#endif /* GCC_GIMPLE_BUILDER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-expr.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-expr.h
new file mode 100644
index 0000000..e89f7e2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-expr.h
@@ -0,0 +1,179 @@
+/* Header file for gimple decl, type and expressions.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_EXPR_H
+#define GCC_GIMPLE_EXPR_H
+
+extern bool useless_type_conversion_p (tree, tree);
+
+
+extern void gimple_set_body (tree, gimple_seq);
+extern gimple_seq gimple_body (tree);
+extern bool gimple_has_body_p (tree);
+extern const char *gimple_decl_printable_name (tree, int);
+extern tree copy_var_decl (tree, tree, tree);
+extern tree create_tmp_var_name (const char *);
+extern tree create_tmp_var_raw (tree, const char * = NULL);
+extern tree create_tmp_var (tree, const char * = NULL);
+extern tree create_tmp_reg (tree, const char * = NULL);
+extern tree create_tmp_reg_fn (struct function *, tree, const char *);
+
+
+extern void extract_ops_from_tree (tree, enum tree_code *, tree *, tree *,
+ tree *);
+extern void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *,
+ tree *);
+extern bool is_gimple_lvalue (tree);
+extern bool is_gimple_condexpr_for_cond (tree);
+extern bool is_gimple_address (const_tree);
+extern bool is_gimple_invariant_address (const_tree);
+extern bool is_gimple_ip_invariant_address (const_tree);
+extern bool is_gimple_min_invariant (const_tree);
+extern bool is_gimple_ip_invariant (const_tree);
+extern bool is_gimple_reg (tree);
+extern bool is_gimple_val (tree);
+extern bool is_gimple_asm_val (tree);
+extern bool is_gimple_min_lval (tree);
+extern bool is_gimple_call_addr (tree);
+extern bool is_gimple_mem_ref_addr (tree);
+extern void flush_mark_addressable_queue (void);
+extern void mark_addressable (tree);
+extern bool is_gimple_reg_rhs (tree);
+extern tree canonicalize_cond_expr_cond (tree);
+
+/* Return true if a conversion from either type of TYPE1 and TYPE2
+ to the other is not required. Otherwise return false. */
+
+inline bool
+types_compatible_p (tree type1, tree type2)
+{
+ return (type1 == type2
+ || (useless_type_conversion_p (type1, type2)
+ && useless_type_conversion_p (type2, type1)));
+}
+
+/* Return true if TYPE is a suitable type for a scalar register variable. */
+
+inline bool
+is_gimple_reg_type (tree type)
+{
+ return !AGGREGATE_TYPE_P (type);
+}
+
+/* Return true if T is a variable. */
+
+inline bool
+is_gimple_variable (tree t)
+{
+ return (TREE_CODE (t) == VAR_DECL
+ || TREE_CODE (t) == PARM_DECL
+ || TREE_CODE (t) == RESULT_DECL
+ || TREE_CODE (t) == SSA_NAME);
+}
+
+/* Return true if T is a GIMPLE identifier (something with an address). */
+
+inline bool
+is_gimple_id (tree t)
+{
+ return (is_gimple_variable (t)
+ || TREE_CODE (t) == FUNCTION_DECL
+ || TREE_CODE (t) == LABEL_DECL
+ || TREE_CODE (t) == CONST_DECL
+ /* Allow string constants, since they are addressable. */
+ || TREE_CODE (t) == STRING_CST);
+}
+
+/* Return true if OP, an SSA name or a DECL is a virtual operand. */
+
+inline bool
+virtual_operand_p (tree op)
+{
+ if (TREE_CODE (op) == SSA_NAME)
+ return SSA_NAME_IS_VIRTUAL_OPERAND (op);
+
+ if (TREE_CODE (op) == VAR_DECL)
+ return VAR_DECL_IS_VIRTUAL_OPERAND (op);
+
+ return false;
+}
+
+/* Return true if T is something whose address can be taken. */
+
+inline bool
+is_gimple_addressable (tree t)
+{
+ return (is_gimple_id (t) || handled_component_p (t)
+ || TREE_CODE (t) == TARGET_MEM_REF
+ || TREE_CODE (t) == MEM_REF);
+}
+
+/* Return true if T is a valid gimple constant. */
+
+inline bool
+is_gimple_constant (const_tree t)
+{
+ switch (TREE_CODE (t))
+ {
+ case INTEGER_CST:
+ case POLY_INT_CST:
+ case REAL_CST:
+ case FIXED_CST:
+ case COMPLEX_CST:
+ case VECTOR_CST:
+ case STRING_CST:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* A wrapper around extract_ops_from_tree with 3 ops, for callers which
+ expect to see only a maximum of two operands. */
+
+inline void
+extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0,
+ tree *op1)
+{
+ tree op2;
+ extract_ops_from_tree (expr, code, op0, op1, &op2);
+ gcc_assert (op2 == NULL_TREE);
+}
+
+/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL
+ associated with the callee if known. Otherwise return NULL_TREE. */
+
+inline tree
+gimple_call_addr_fndecl (const_tree fn)
+{
+ if (fn && TREE_CODE (fn) == ADDR_EXPR)
+ {
+ tree fndecl = TREE_OPERAND (fn, 0);
+ if (TREE_CODE (fndecl) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR
+ && integer_zerop (TREE_OPERAND (fndecl, 1)))
+ fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0);
+ if (TREE_CODE (fndecl) == FUNCTION_DECL)
+ return fndecl;
+ }
+ return NULL_TREE;
+}
+
+#endif /* GCC_GIMPLE_EXPR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-fold.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-fold.h
new file mode 100644
index 0000000..2fd58db
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-fold.h
@@ -0,0 +1,281 @@
+/* Gimple folding definitions.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Richard Guenther <rguenther@suse.de>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_FOLD_H
+#define GCC_GIMPLE_FOLD_H
+
+extern tree create_tmp_reg_or_ssa_name (tree, gimple *stmt = NULL);
+extern tree canonicalize_constructor_val (tree, tree);
+extern tree get_symbol_constant_value (tree);
+struct c_strlen_data;
+extern bool get_range_strlen (tree, c_strlen_data *, unsigned eltsize);
+extern void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree);
+extern bool update_gimple_call (gimple_stmt_iterator *, tree, int, ...);
+extern bool fold_stmt (gimple_stmt_iterator *);
+extern bool fold_stmt (gimple_stmt_iterator *, tree (*) (tree));
+extern bool fold_stmt_inplace (gimple_stmt_iterator *);
+extern tree maybe_fold_and_comparisons (tree, enum tree_code, tree, tree,
+ enum tree_code, tree, tree,
+ basic_block = nullptr);
+extern tree maybe_fold_or_comparisons (tree, enum tree_code, tree, tree,
+ enum tree_code, tree, tree,
+ basic_block = nullptr);
+extern bool optimize_atomic_compare_exchange_p (gimple *);
+extern void fold_builtin_atomic_compare_exchange (gimple_stmt_iterator *);
+extern tree no_follow_ssa_edges (tree);
+extern tree follow_single_use_edges (tree);
+extern tree follow_all_ssa_edges (tree);
+extern tree gimple_fold_stmt_to_constant_1 (gimple *, tree (*) (tree),
+ tree (*) (tree) = no_follow_ssa_edges);
+extern tree gimple_fold_stmt_to_constant (gimple *, tree (*) (tree));
+extern tree fold_ctor_reference (tree, tree, const poly_uint64&,
+ const poly_uint64&, tree,
+ unsigned HOST_WIDE_INT * = NULL);
+extern tree fold_const_aggregate_ref_1 (tree, tree (*) (tree));
+extern tree fold_const_aggregate_ref (tree);
+extern tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree,
+ bool *can_refer = NULL);
+extern tree gimple_get_virt_method_for_vtable (HOST_WIDE_INT, tree,
+ unsigned HOST_WIDE_INT,
+ bool *can_refer = NULL);
+extern tree gimple_fold_indirect_ref (tree);
+extern bool gimple_fold_builtin_sprintf (gimple_stmt_iterator *);
+extern bool gimple_fold_builtin_snprintf (gimple_stmt_iterator *);
+extern bool arith_code_with_undefined_signed_overflow (tree_code);
+extern gimple_seq rewrite_to_defined_overflow (gimple *, bool = false);
+extern void replace_call_with_value (gimple_stmt_iterator *, tree);
+extern tree tree_vec_extract (gimple_stmt_iterator *, tree, tree, tree, tree);
+extern void gsi_replace_with_seq_vops (gimple_stmt_iterator *, gimple_seq);
+
+/* gimple_build, functionally matching fold_buildN, outputs stmts
+ int the provided sequence, matching and simplifying them on-the-fly.
+ Supposed to replace force_gimple_operand (fold_buildN (...), ...). */
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, enum tree_code, tree, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, enum tree_code, tree, tree, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, enum tree_code, tree, tree, tree, tree);
+template<class ...Args>
+inline tree
+gimple_build (gimple_seq *seq, location_t loc,
+ enum tree_code code, tree type, Args ...ops)
+{
+ static_assert (sizeof...(ops) > 0 && sizeof...(ops) <= 3,
+ "Number of operands must be from one to three");
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, code, type, ops...);
+}
+template<class ...Args>
+inline tree
+gimple_build (gimple_seq *seq, enum tree_code code, tree type, Args ...ops)
+{
+ static_assert (sizeof...(ops) > 0 && sizeof...(ops) <= 3,
+ "Number of operands must be from one to three");
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, code, type, ops...);
+}
+
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, combined_fn, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, combined_fn, tree, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, combined_fn, tree, tree, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, combined_fn, tree, tree, tree, tree);
+template<class ...Args>
+inline tree
+gimple_build (gimple_seq *seq, location_t loc,
+ combined_fn fn, tree type, Args ...args)
+{
+ static_assert (sizeof...(args) < 4,
+ "Number of arguments must be less than four");
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, fn, type, args...);
+}
+template<class ...Args>
+inline tree
+gimple_build (gimple_seq *seq, combined_fn fn, tree type, Args ...args)
+{
+ static_assert (sizeof...(args) < 4,
+ "Number of arguments must be less than four");
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, fn, type, args...);
+}
+
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, code_helper, tree, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, code_helper, tree, tree, tree);
+extern tree gimple_build (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, code_helper, tree, tree, tree, tree);
+
+template<class ...Args>
+inline tree
+gimple_build (gimple_seq *seq, location_t loc,
+ code_helper code, tree type, Args ...ops)
+{
+ static_assert (sizeof...(ops) < 4,
+ "Number of operands must be less than four");
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, code, type, ops...);
+}
+template<class ...Args>
+inline tree
+gimple_build (gimple_seq *seq,
+ code_helper code, tree type, Args ...ops)
+{
+ static_assert (sizeof...(ops) < 4,
+ "Number of operands must be less than four");
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, code, type, ops...);
+}
+
+extern tree gimple_convert (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, tree, tree);
+inline tree
+gimple_convert (gimple_seq *seq, location_t loc, tree type, tree op)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_convert (&gsi, false, GSI_CONTINUE_LINKING, loc, type, op);
+}
+inline tree
+gimple_convert (gimple_seq *seq, tree type, tree op)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_convert (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, type, op);
+}
+
+extern tree gimple_convert_to_ptrofftype (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, tree);
+inline tree
+gimple_convert_to_ptrofftype (gimple_seq *seq, location_t loc, tree op)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_convert_to_ptrofftype (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, op);
+}
+inline tree
+gimple_convert_to_ptrofftype (gimple_seq *seq, tree op)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_convert_to_ptrofftype (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, op);
+}
+
+extern tree gimple_build_vector_from_val (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, tree, tree);
+inline tree
+gimple_build_vector_from_val (gimple_seq *seq, location_t loc,
+ tree type, tree op)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build_vector_from_val (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, type, op);
+}
+inline tree
+gimple_build_vector_from_val (gimple_seq *seq, tree type, tree op)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build_vector_from_val (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, type, op);
+}
+
+class tree_vector_builder;
+extern tree gimple_build_vector (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, tree_vector_builder *);
+inline tree
+gimple_build_vector (gimple_seq *seq, location_t loc,
+ tree_vector_builder *builder)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build_vector (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, builder);
+}
+inline tree
+gimple_build_vector (gimple_seq *seq, tree_vector_builder *builder)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build_vector (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, builder);
+}
+
+extern tree gimple_build_round_up (gimple_stmt_iterator *, bool,
+ enum gsi_iterator_update,
+ location_t, tree, tree,
+ unsigned HOST_WIDE_INT);
+inline tree
+gimple_build_round_up (gimple_seq *seq, location_t loc,
+ tree type, tree old_size, unsigned HOST_WIDE_INT align)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build_round_up (&gsi, false, GSI_CONTINUE_LINKING,
+ loc, type, old_size, align);
+}
+inline tree
+gimple_build_round_up (gimple_seq *seq, tree type, tree old_size,
+ unsigned HOST_WIDE_INT align)
+{
+ gimple_stmt_iterator gsi = gsi_last (*seq);
+ return gimple_build_round_up (&gsi, false, GSI_CONTINUE_LINKING,
+ UNKNOWN_LOCATION, type, old_size, align);
+}
+
+extern bool gimple_stmt_nonnegative_warnv_p (gimple *, bool *, int = 0);
+extern bool gimple_stmt_integer_valued_real_p (gimple *, int = 0);
+
+/* In gimple-match.cc. */
+extern tree gimple_simplify (enum tree_code, tree, tree,
+ gimple_seq *, tree (*)(tree));
+extern tree gimple_simplify (enum tree_code, tree, tree, tree,
+ gimple_seq *, tree (*)(tree));
+extern tree gimple_simplify (enum tree_code, tree, tree, tree, tree,
+ gimple_seq *, tree (*)(tree));
+extern tree gimple_simplify (combined_fn, tree, tree,
+ gimple_seq *, tree (*)(tree));
+extern tree gimple_simplify (combined_fn, tree, tree, tree,
+ gimple_seq *, tree (*)(tree));
+extern tree gimple_simplify (combined_fn, tree, tree, tree, tree,
+ gimple_seq *, tree (*)(tree));
+
+#endif /* GCC_GIMPLE_FOLD_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-iterator.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-iterator.h
new file mode 100644
index 0000000..38352aa
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-iterator.h
@@ -0,0 +1,415 @@
+/* Header file for gimple iterators.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_ITERATOR_H
+#define GCC_GIMPLE_ITERATOR_H
+
+/* Iterator object for GIMPLE statement sequences. */
+
+struct gimple_stmt_iterator
+{
+ /* Sequence node holding the current statement. */
+ gimple_seq_node ptr;
+
+ /* Sequence and basic block holding the statement. These fields
+ are necessary to handle edge cases such as when statement is
+ added to an empty basic block or when the last statement of a
+ block/sequence is removed. */
+ gimple_seq *seq;
+ basic_block bb;
+};
+
+/* Iterator over GIMPLE_PHI statements. */
+struct gphi_iterator : public gimple_stmt_iterator
+{
+ gphi *phi () const
+ {
+ return as_a <gphi *> (ptr);
+ }
+};
+
+enum gsi_iterator_update
+{
+ GSI_NEW_STMT = 2, /* Move the iterator to the first statement added. */
+ GSI_LAST_NEW_STMT, /* Move the iterator to the last statement added. */
+ GSI_SAME_STMT, /* Leave the iterator at the same statement. */
+ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
+ for linking other statements in the same
+ direction. */
+};
+
+extern void gsi_insert_seq_before_without_update (gimple_stmt_iterator *,
+ gimple_seq,
+ enum gsi_iterator_update);
+extern void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
+ enum gsi_iterator_update);
+extern void gsi_insert_seq_after_without_update (gimple_stmt_iterator *,
+ gimple_seq,
+ enum gsi_iterator_update);
+extern void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
+ enum gsi_iterator_update);
+extern gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
+extern void gsi_set_stmt (gimple_stmt_iterator *, gimple *);
+extern void gsi_split_seq_before (gimple_stmt_iterator *, gimple_seq *);
+extern bool gsi_replace (gimple_stmt_iterator *, gimple *, bool);
+extern void gsi_replace_with_seq (gimple_stmt_iterator *, gimple_seq, bool);
+extern void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple *,
+ enum gsi_iterator_update);
+extern void gsi_insert_before (gimple_stmt_iterator *, gimple *,
+ enum gsi_iterator_update);
+extern void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple *,
+ enum gsi_iterator_update);
+extern void gsi_insert_after (gimple_stmt_iterator *, gimple *,
+ enum gsi_iterator_update);
+extern bool gsi_remove (gimple_stmt_iterator *, bool);
+extern gimple_stmt_iterator gsi_for_stmt (gimple *);
+extern gimple_stmt_iterator gsi_for_stmt (gimple *, gimple_seq *);
+extern gphi_iterator gsi_for_phi (gphi *);
+extern void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
+extern void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
+extern void gsi_move_to_bb_end (gimple_stmt_iterator *, basic_block);
+extern void gsi_insert_on_edge (edge, gimple *);
+extern void gsi_insert_seq_on_edge (edge, gimple_seq);
+extern basic_block gsi_insert_on_edge_immediate (edge, gimple *);
+extern basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
+extern void gsi_commit_edge_inserts (void);
+extern void gsi_commit_one_edge_insert (edge, basic_block *);
+extern gphi_iterator gsi_start_phis (basic_block);
+extern void update_modified_stmts (gimple_seq);
+
+/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
+
+inline gimple_stmt_iterator
+gsi_start (gimple_seq &seq)
+{
+ gimple_stmt_iterator i;
+
+ i.ptr = gimple_seq_first (seq);
+ i.seq = &seq;
+ i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
+
+ return i;
+}
+
+inline gimple_stmt_iterator
+gsi_none (void)
+{
+ gimple_stmt_iterator i;
+ i.ptr = NULL;
+ i.seq = NULL;
+ i.bb = NULL;
+ return i;
+}
+
+/* Return a new iterator pointing to the first statement in basic block BB. */
+
+inline gimple_stmt_iterator
+gsi_start_bb (basic_block bb)
+{
+ gimple_stmt_iterator i;
+ gimple_seq *seq;
+
+ seq = bb_seq_addr (bb);
+ i.ptr = gimple_seq_first (*seq);
+ i.seq = seq;
+ i.bb = bb;
+
+ return i;
+}
+
+gimple_stmt_iterator gsi_start_edge (edge e);
+
+/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
+
+inline gimple_stmt_iterator
+gsi_last (gimple_seq &seq)
+{
+ gimple_stmt_iterator i;
+
+ i.ptr = gimple_seq_last (seq);
+ i.seq = &seq;
+ i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
+
+ return i;
+}
+
+/* Return a new iterator pointing to the last statement in basic block BB. */
+
+inline gimple_stmt_iterator
+gsi_last_bb (basic_block bb)
+{
+ gimple_stmt_iterator i;
+ gimple_seq *seq;
+
+ seq = bb_seq_addr (bb);
+ i.ptr = gimple_seq_last (*seq);
+ i.seq = seq;
+ i.bb = bb;
+
+ return i;
+}
+
+/* Return true if I is at the end of its sequence. */
+
+inline bool
+gsi_end_p (gimple_stmt_iterator i)
+{
+ return i.ptr == NULL;
+}
+
+/* Return true if I is one statement before the end of its sequence. */
+
+inline bool
+gsi_one_before_end_p (gimple_stmt_iterator i)
+{
+ return i.ptr != NULL && i.ptr->next == NULL;
+}
+
+/* Advance the iterator to the next gimple statement. */
+
+inline void
+gsi_next (gimple_stmt_iterator *i)
+{
+ i->ptr = i->ptr->next;
+}
+
+/* Advance the iterator to the previous gimple statement. */
+
+inline void
+gsi_prev (gimple_stmt_iterator *i)
+{
+ gimple *prev = i->ptr->prev;
+ if (prev->next)
+ i->ptr = prev;
+ else
+ i->ptr = NULL;
+}
+
+/* Return the current stmt. */
+
+inline gimple *
+gsi_stmt (gimple_stmt_iterator i)
+{
+ return i.ptr;
+}
+
+/* Return a block statement iterator that points to the first
+ non-label statement in block BB. */
+
+inline gimple_stmt_iterator
+gsi_after_labels (basic_block bb)
+{
+ gimple_stmt_iterator gsi = gsi_start_bb (bb);
+
+ for (; !gsi_end_p (gsi); )
+ {
+ if (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
+ gsi_next (&gsi);
+ else
+ break;
+ }
+
+ return gsi;
+}
+
+/* Return a statement iterator that points to the first
+ non-label statement in sequence SEQ. */
+
+inline gimple_stmt_iterator
+gsi_after_labels (gimple_seq &seq)
+{
+ gimple_stmt_iterator gsi = gsi_start (seq);
+
+ for (; !gsi_end_p (gsi); )
+ {
+ if (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
+ gsi_next (&gsi);
+ else
+ break;
+ }
+
+ return gsi;
+}
+
+/* Advance the iterator to the next non-debug gimple statement. */
+
+inline void
+gsi_next_nondebug (gimple_stmt_iterator *i)
+{
+ do
+ {
+ gsi_next (i);
+ }
+ while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
+}
+
+/* Advance the iterator to the previous non-debug gimple statement. */
+
+inline void
+gsi_prev_nondebug (gimple_stmt_iterator *i)
+{
+ do
+ {
+ gsi_prev (i);
+ }
+ while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
+}
+
+/* Return a new iterator pointing to the first non-debug statement in
+ SEQ. */
+
+inline gimple_stmt_iterator
+gsi_start_nondebug (gimple_seq seq)
+{
+ gimple_stmt_iterator gsi = gsi_start (seq);
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+
+ return gsi;
+}
+
+/* Return a new iterator pointing to the first non-debug statement in
+ basic block BB. */
+
+inline gimple_stmt_iterator
+gsi_start_nondebug_bb (basic_block bb)
+{
+ gimple_stmt_iterator i = gsi_start_bb (bb);
+
+ if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
+ gsi_next_nondebug (&i);
+
+ return i;
+}
+
+/* Return a new iterator pointing to the first non-debug non-label statement in
+ basic block BB. */
+
+inline gimple_stmt_iterator
+gsi_start_nondebug_after_labels_bb (basic_block bb)
+{
+ gimple_stmt_iterator i = gsi_after_labels (bb);
+
+ if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
+ gsi_next_nondebug (&i);
+
+ return i;
+}
+
+/* Return a new iterator pointing to the last non-debug statement in
+ basic block BB. */
+
+inline gimple_stmt_iterator
+gsi_last_nondebug_bb (basic_block bb)
+{
+ gimple_stmt_iterator i = gsi_last_bb (bb);
+
+ if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
+ gsi_prev_nondebug (&i);
+
+ return i;
+}
+
+/* Return true if I is followed only by debug statements in its
+ sequence. */
+
+inline bool
+gsi_one_nondebug_before_end_p (gimple_stmt_iterator i)
+{
+ if (gsi_one_before_end_p (i))
+ return true;
+ if (gsi_end_p (i))
+ return false;
+ gsi_next_nondebug (&i);
+ return gsi_end_p (i);
+}
+
+/* Advance I statement iterator to the next non-virtual GIMPLE_PHI
+ statement. */
+
+inline void
+gsi_next_nonvirtual_phi (gphi_iterator *i)
+{
+ do
+ {
+ gsi_next (i);
+ }
+ while (!gsi_end_p (*i) && virtual_operand_p (gimple_phi_result (i->phi ())));
+}
+
+/* Return a new iterator pointing to the first non-virtual phi statement in
+ basic block BB. */
+
+inline gphi_iterator
+gsi_start_nonvirtual_phis (basic_block bb)
+{
+ gphi_iterator i = gsi_start_phis (bb);
+
+ if (!gsi_end_p (i) && virtual_operand_p (gimple_phi_result (i.phi ())))
+ gsi_next_nonvirtual_phi (&i);
+
+ return i;
+}
+
+/* Return the basic block associated with this iterator. */
+
+inline basic_block
+gsi_bb (gimple_stmt_iterator i)
+{
+ return i.bb;
+}
+
+/* Return the sequence associated with this iterator. */
+
+inline gimple_seq
+gsi_seq (gimple_stmt_iterator i)
+{
+ return *i.seq;
+}
+
+/* Determine whether SEQ is a nondebug singleton. */
+
+inline bool
+gimple_seq_nondebug_singleton_p (gimple_seq seq)
+{
+ gimple_stmt_iterator gsi;
+
+ /* Find a nondebug gimple. */
+ gsi.ptr = gimple_seq_first (seq);
+ gsi.seq = &seq;
+ gsi.bb = NULL;
+ while (!gsi_end_p (gsi)
+ && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next (&gsi);
+
+ /* No nondebug gimple found, not a singleton. */
+ if (gsi_end_p (gsi))
+ return false;
+
+ /* Find a next nondebug gimple. */
+ gsi_next (&gsi);
+ while (!gsi_end_p (gsi)
+ && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next (&gsi);
+
+ /* Only a singleton if there's no next nondebug gimple. */
+ return gsi_end_p (gsi);
+}
+
+#endif /* GCC_GIMPLE_ITERATOR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-low.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-low.h
new file mode 100644
index 0000000..8587c28
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-low.h
@@ -0,0 +1,28 @@
+/* Header file for gimple lowering pass.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_LOW_H
+#define GCC_GIMPLE_LOW_H
+
+extern bool gimple_stmt_may_fallthru (gimple *);
+extern bool gimple_seq_may_fallthru (gimple_seq);
+extern void record_vars_into (tree, tree);
+extern void record_vars (tree);
+
+#endif /* GCC_GIMPLE_LOW_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-match.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-match.h
new file mode 100644
index 0000000..b20585d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-match.h
@@ -0,0 +1,338 @@
+/* Gimple simplify definitions.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Richard Guenther <rguenther@suse.de>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_MATCH_H
+#define GCC_GIMPLE_MATCH_H
+
+
+/* Represents the condition under which an operation should happen,
+ and the value to use otherwise. The condition applies elementwise
+ (as for VEC_COND_EXPR) if the values are vectors. */
+class gimple_match_cond
+{
+public:
+ enum uncond { UNCOND };
+
+ /* Build an unconditional op. */
+ gimple_match_cond (uncond) : cond (NULL_TREE), else_value (NULL_TREE) {}
+ gimple_match_cond (tree, tree);
+
+ gimple_match_cond any_else () const;
+
+ /* The condition under which the operation occurs, or NULL_TREE
+ if the operation is unconditional. */
+ tree cond;
+
+ /* The value to use when the condition is false. This is NULL_TREE if
+ the operation is unconditional or if the value doesn't matter. */
+ tree else_value;
+};
+
+inline
+gimple_match_cond::gimple_match_cond (tree cond_in, tree else_value_in)
+ : cond (cond_in), else_value (else_value_in)
+{
+}
+
+/* Return a gimple_match_cond with the same condition but with an
+ arbitrary ELSE_VALUE. */
+
+inline gimple_match_cond
+gimple_match_cond::any_else () const
+{
+ return gimple_match_cond (cond, NULL_TREE);
+}
+
+/* Represents an operation to be simplified, or the result of the
+ simplification. */
+class gimple_match_op
+{
+public:
+ gimple_match_op ();
+ gimple_match_op (const gimple_match_cond &, code_helper, tree, unsigned int);
+ gimple_match_op (const gimple_match_cond &,
+ code_helper, tree, tree);
+ gimple_match_op (const gimple_match_cond &,
+ code_helper, tree, tree, tree);
+ gimple_match_op (const gimple_match_cond &,
+ code_helper, tree, tree, tree, tree);
+ gimple_match_op (const gimple_match_cond &,
+ code_helper, tree, tree, tree, tree, tree);
+ gimple_match_op (const gimple_match_cond &,
+ code_helper, tree, tree, tree, tree, tree, tree);
+
+ void set_op (code_helper, tree, unsigned int);
+ void set_op (code_helper, tree, tree);
+ void set_op (code_helper, tree, tree, tree);
+ void set_op (code_helper, tree, tree, tree, tree);
+ void set_op (code_helper, tree, tree, tree, tree, bool);
+ void set_op (code_helper, tree, tree, tree, tree, tree);
+ void set_op (code_helper, tree, tree, tree, tree, tree, tree);
+ void set_value (tree);
+
+ tree op_or_null (unsigned int) const;
+
+ bool resimplify (gimple_seq *, tree (*)(tree));
+
+ /* The maximum value of NUM_OPS. */
+ static const unsigned int MAX_NUM_OPS = 5;
+
+ /* The conditions under which the operation is performed, and the value to
+ use as a fallback. */
+ gimple_match_cond cond;
+
+ /* The operation being performed. */
+ code_helper code;
+
+ /* The type of the result. */
+ tree type;
+
+ /* For a BIT_FIELD_REF, whether the group of bits is stored in reverse order
+ from the target order. */
+ bool reverse;
+
+ /* The number of operands to CODE. */
+ unsigned int num_ops;
+
+ /* The operands to CODE. Only the first NUM_OPS entries are meaningful. */
+ tree ops[MAX_NUM_OPS];
+};
+
+inline
+gimple_match_op::gimple_match_op ()
+ : cond (gimple_match_cond::UNCOND), type (NULL_TREE), reverse (false),
+ num_ops (0)
+{
+}
+
+/* Constructor that takes the condition, code, type and number of
+ operands, but leaves the caller to fill in the operands. */
+
+inline
+gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in,
+ code_helper code_in, tree type_in,
+ unsigned int num_ops_in)
+ : cond (cond_in), code (code_in), type (type_in), reverse (false),
+ num_ops (num_ops_in)
+{
+}
+
+/* Constructors for various numbers of operands. */
+
+inline
+gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in,
+ code_helper code_in, tree type_in,
+ tree op0)
+ : cond (cond_in), code (code_in), type (type_in), reverse (false),
+ num_ops (1)
+{
+ ops[0] = op0;
+}
+
+inline
+gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in,
+ code_helper code_in, tree type_in,
+ tree op0, tree op1)
+ : cond (cond_in), code (code_in), type (type_in), reverse (false),
+ num_ops (2)
+{
+ ops[0] = op0;
+ ops[1] = op1;
+}
+
+inline
+gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in,
+ code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2)
+ : cond (cond_in), code (code_in), type (type_in), reverse (false),
+ num_ops (3)
+{
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+}
+
+inline
+gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in,
+ code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2, tree op3)
+ : cond (cond_in), code (code_in), type (type_in), reverse (false),
+ num_ops (4)
+{
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+ ops[3] = op3;
+}
+
+inline
+gimple_match_op::gimple_match_op (const gimple_match_cond &cond_in,
+ code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2, tree op3,
+ tree op4)
+ : cond (cond_in), code (code_in), type (type_in), reverse (false),
+ num_ops (5)
+{
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+ ops[3] = op3;
+ ops[4] = op4;
+}
+
+/* Change the operation performed to CODE_IN, the type of the result to
+ TYPE_IN, and the number of operands to NUM_OPS_IN. The caller needs
+ to set the operands itself. */
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in,
+ unsigned int num_ops_in)
+{
+ code = code_in;
+ type = type_in;
+ num_ops = num_ops_in;
+}
+
+/* Functions for changing the operation performed, for various numbers
+ of operands. */
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in, tree op0)
+{
+ code = code_in;
+ type = type_in;
+ num_ops = 1;
+ ops[0] = op0;
+}
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in, tree op0, tree op1)
+{
+ code = code_in;
+ type = type_in;
+ num_ops = 2;
+ ops[0] = op0;
+ ops[1] = op1;
+}
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2)
+{
+ code = code_in;
+ type = type_in;
+ num_ops = 3;
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+}
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2, bool reverse_in)
+{
+ code = code_in;
+ type = type_in;
+ reverse = reverse_in;
+ num_ops = 3;
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+}
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2, tree op3)
+{
+ code = code_in;
+ type = type_in;
+ num_ops = 4;
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+ ops[3] = op3;
+}
+
+inline void
+gimple_match_op::set_op (code_helper code_in, tree type_in,
+ tree op0, tree op1, tree op2, tree op3, tree op4)
+{
+ code = code_in;
+ type = type_in;
+ num_ops = 5;
+ ops[0] = op0;
+ ops[1] = op1;
+ ops[2] = op2;
+ ops[3] = op3;
+ ops[4] = op4;
+}
+
+/* Set the "operation" to be the single value VALUE, such as a constant
+ or SSA_NAME. */
+
+inline void
+gimple_match_op::set_value (tree value)
+{
+ set_op (TREE_CODE (value), TREE_TYPE (value), value);
+}
+
+/* Return the value of operand I, or null if there aren't that many
+ operands. */
+
+inline tree
+gimple_match_op::op_or_null (unsigned int i) const
+{
+ return i < num_ops ? ops[i] : NULL_TREE;
+}
+
+/* Return whether OP is a non-expression result and a gimple value. */
+
+inline bool
+gimple_simplified_result_is_gimple_val (const gimple_match_op *op)
+{
+ return (op->code.is_tree_code ()
+ && (TREE_CODE_LENGTH ((tree_code) op->code) == 0
+ || ((tree_code) op->code) == ADDR_EXPR)
+ && is_gimple_val (op->ops[0]));
+}
+
+extern tree (*mprts_hook) (gimple_match_op *);
+
+bool gimple_extract_op (gimple *, gimple_match_op *);
+bool gimple_simplify (gimple *, gimple_match_op *, gimple_seq *,
+ tree (*)(tree), tree (*)(tree));
+tree maybe_push_res_to_seq (gimple_match_op *, gimple_seq *,
+ tree res = NULL_TREE);
+void maybe_build_generic_op (gimple_match_op *);
+
+bool commutative_binary_op_p (code_helper, tree);
+bool commutative_ternary_op_p (code_helper, tree);
+int first_commutative_argument (code_helper, tree);
+bool associative_binary_op_p (code_helper, tree);
+code_helper canonicalize_code (code_helper, tree);
+
+#ifdef GCC_OPTABS_TREE_H
+bool directly_supported_p (code_helper, tree, optab_subtype = optab_default);
+#endif
+
+internal_fn get_conditional_internal_fn (code_helper, tree);
+
+#endif /* GCC_GIMPLE_MATCH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predicate-analysis.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predicate-analysis.h
new file mode 100644
index 0000000..09ceebc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predicate-analysis.h
@@ -0,0 +1,175 @@
+/* Support for simple predicate analysis.
+
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ Contributed by Martin Sebor <msebor@redhat.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GIMPLE_PREDICATE_ANALYSIS_H_INCLUDED
+#define GIMPLE_PREDICATE_ANALYSIS_H_INCLUDED
+
+
+/* Represents a simple Boolean predicate. */
+struct pred_info
+{
+ tree pred_lhs;
+ tree pred_rhs;
+ enum tree_code cond_code;
+ bool invert;
+};
+
+/* The type to represent a sequence of predicates grouped
+ with .AND. operation. */
+typedef vec<pred_info, va_heap, vl_ptr> pred_chain;
+
+/* The type to represent a sequence of pred_chains grouped
+ with .OR. operation. */
+typedef vec<pred_chain, va_heap, vl_ptr> pred_chain_union;
+
+/* Represents a complex Boolean predicate expression. */
+class predicate
+{
+ public:
+ /* Construct with the specified EVAL object. */
+ predicate (bool empty_val) : m_preds (vNULL), m_cval (empty_val) { }
+
+ /* Copy. */
+ predicate (const predicate &rhs) : m_preds (vNULL) { *this = rhs; }
+
+ ~predicate ();
+
+ /* Assign. */
+ predicate& operator= (const predicate &);
+
+ bool is_empty () const
+ {
+ return m_preds.is_empty ();
+ }
+
+ bool is_true () const
+ {
+ return is_empty () && m_cval;
+ }
+
+ bool is_false () const
+ {
+ return is_empty () && !m_cval;
+ }
+
+ bool empty_val () const
+ {
+ return m_cval;
+ }
+
+ const pred_chain_union chain () const
+ {
+ return m_preds;
+ }
+
+ void init_from_control_deps (const vec<edge> *, unsigned, bool);
+
+ void dump (FILE *) const;
+ void dump (FILE *, gimple *, const char *) const;
+ void debug () const;
+
+ void normalize (gimple * = NULL, bool = false);
+ void simplify (gimple * = NULL, bool = false);
+
+ bool superset_of (const predicate &) const;
+
+private:
+
+ bool includes (const pred_chain &) const;
+ void push_pred (const pred_info &);
+
+ /* Normalization functions. */
+ void normalize (pred_chain *, pred_info, tree_code, pred_chain *,
+ hash_set<tree> *);
+ void normalize (const pred_info &);
+ void normalize (const pred_chain &);
+
+ /* Simplification functions. */
+ bool simplify_2 ();
+ bool simplify_3 ();
+ bool simplify_4 ();
+
+ /* Representation of the predicate expression(s). The predicate is
+ m_cval || m_preds[0] || ... */
+ pred_chain_union m_preds;
+ bool m_cval;
+};
+
+/* Represents a complex Boolean predicate expression. */
+class uninit_analysis
+{
+ public:
+ /* Base function object type used to determine whether an expression
+ is of interest. */
+ struct func_t
+ {
+ typedef unsigned phi_arg_set_t;
+
+ /* Return a bitset of PHI arguments of interest. By default returns
+ bitset with a bit set for each argument. Should be called in
+ the overriden function first and, if nonzero, the result then
+ refined as appropriate. */
+ virtual phi_arg_set_t phi_arg_set (gphi *);
+
+ /* Maximum number of PHI arguments supported by phi_arg_set(). */
+ static constexpr unsigned max_phi_args =
+ sizeof (phi_arg_set_t) * CHAR_BIT;
+ };
+
+ /* Construct with the specified EVAL object. */
+ uninit_analysis (func_t &eval)
+ : m_phi_def_preds (false), m_eval (eval) { }
+
+ /* Copy. */
+ uninit_analysis (const uninit_analysis &rhs) = delete;
+
+ /* Assign. */
+ uninit_analysis& operator= (const uninit_analysis&) = delete;
+
+ /* Return true if the use by a statement in the basic block of
+ a PHI operand is ruled out (i.e., guarded) by *THIS. */
+ bool is_use_guarded (gimple *, basic_block, gphi *, unsigned);
+
+private:
+ bool is_use_guarded (gimple *, basic_block, gphi *, unsigned,
+ hash_set<gphi *> *);
+ bool prune_phi_opnds (gphi *, unsigned, gphi *, tree, tree_code,
+ hash_set<gphi *> *, bitmap *);
+ bool overlap (gphi *, unsigned, hash_set<gphi *> *, const predicate &);
+
+ void collect_phi_def_edges (gphi *, basic_block, vec<edge> *,
+ hash_set<gimple *> *);
+ bool init_from_phi_def (gphi *);
+ bool init_use_preds (predicate &, basic_block, basic_block);
+
+
+ /* Representation of the predicate expression(s). */
+ predicate m_phi_def_preds;
+ /* Callback to evaluate an operand. Return true if it's interesting. */
+ func_t &m_eval;
+};
+
+/* Bit mask handling macros. */
+#define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
+#define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
+#define MASK_EMPTY(mask) (mask == 0)
+
+#endif // GIMPLE_PREDICATE_ANALYSIS_H_INCLUDED
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predict.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predict.h
new file mode 100644
index 0000000..6c085a9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-predict.h
@@ -0,0 +1,91 @@
+/* Gimple prediction routines.
+
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_PREDICT_H
+#define GCC_GIMPLE_PREDICT_H
+
+#include "predict.h"
+
+/* Return the predictor of GIMPLE_PREDICT statement GS. */
+
+inline enum br_predictor
+gimple_predict_predictor (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_PREDICT);
+ return (enum br_predictor) (gs->subcode & ~GF_PREDICT_TAKEN);
+}
+
+
+/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
+
+inline void
+gimple_predict_set_predictor (gimple *gs, enum br_predictor predictor)
+{
+ GIMPLE_CHECK (gs, GIMPLE_PREDICT);
+ gs->subcode = (gs->subcode & GF_PREDICT_TAKEN)
+ | (unsigned) predictor;
+}
+
+
+/* Return the outcome of GIMPLE_PREDICT statement GS. */
+
+inline enum prediction
+gimple_predict_outcome (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_PREDICT);
+ return (gs->subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
+}
+
+
+/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
+
+inline void
+gimple_predict_set_outcome (gimple *gs, enum prediction outcome)
+{
+ GIMPLE_CHECK (gs, GIMPLE_PREDICT);
+ if (outcome == TAKEN)
+ gs->subcode |= GF_PREDICT_TAKEN;
+ else
+ gs->subcode &= ~GF_PREDICT_TAKEN;
+}
+
+/* Build a GIMPLE_PREDICT statement. PREDICT is one of the predictors from
+ predict.def, OUTCOME is NOT_TAKEN or TAKEN. */
+
+inline gimple *
+gimple_build_predict (enum br_predictor predictor, enum prediction outcome)
+{
+ gimple *p = gimple_alloc (GIMPLE_PREDICT, 0);
+ /* Ensure all the predictors fit into the lower bits of the subcode. */
+ gcc_assert ((int) END_PREDICTORS <= GF_PREDICT_TAKEN);
+ gimple_predict_set_predictor (p, predictor);
+ gimple_predict_set_outcome (p, outcome);
+ return p;
+}
+
+/* Return true if GS is a GIMPLE_PREDICT statement. */
+
+inline bool
+is_gimple_predict (const gimple *gs)
+{
+ return gimple_code (gs) == GIMPLE_PREDICT;
+}
+
+#endif /* GCC_GIMPLE_PREDICT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-pretty-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-pretty-print.h
new file mode 100644
index 0000000..dd7c4a3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-pretty-print.h
@@ -0,0 +1,41 @@
+/* Various declarations for pretty formatting of GIMPLE statements and
+ expressions.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_PRETTY_PRINT_H
+#define GCC_GIMPLE_PRETTY_PRINT_H
+
+#include "tree-pretty-print.h"
+
+/* In gimple-pretty-print.cc */
+extern void debug_gimple_stmt (gimple *);
+extern void debug_gimple_seq (gimple_seq);
+extern void print_gimple_seq (FILE *, gimple_seq, int, dump_flags_t);
+extern void print_gimple_stmt (FILE *, gimple *, int, dump_flags_t = TDF_NONE);
+extern void debug (gimple &ref);
+extern void debug (gimple *ptr);
+extern void print_gimple_expr (FILE *, gimple *, int, dump_flags_t = TDF_NONE);
+extern void pp_gimple_stmt_1 (pretty_printer *, const gimple *, int,
+ dump_flags_t);
+extern void gimple_dump_bb (FILE *, basic_block, int, dump_flags_t);
+extern void gimple_dump_bb_for_graph (pretty_printer *, basic_block);
+extern void dump_ssaname_info_to_file (FILE *, tree, int);
+extern void percent_G_format (text_info *);
+
+#endif /* ! GCC_GIMPLE_PRETTY_PRINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-cache.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-cache.h
new file mode 100644
index 0000000..4ff435d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-cache.h
@@ -0,0 +1,121 @@
+/* Header file for gimple ranger SSA cache.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SSA_RANGE_CACHE_H
+#define GCC_SSA_RANGE_CACHE_H
+
+#include "gimple-range-gori.h"
+#include "gimple-range-infer.h"
+
+// This class manages a vector of pointers to ssa_block ranges. It
+// provides the basis for the "range on entry" cache for all
+// SSA names.
+
+class block_range_cache
+{
+public:
+ block_range_cache ();
+ ~block_range_cache ();
+
+ bool set_bb_range (tree name, const_basic_block bb, const vrange &v);
+ bool get_bb_range (vrange &v, tree name, const_basic_block bb);
+ bool bb_range_p (tree name, const_basic_block bb);
+
+ void dump (FILE *f);
+ void dump (FILE *f, basic_block bb, bool print_varying = true);
+private:
+ vec<class ssa_block_ranges *> m_ssa_ranges;
+ ssa_block_ranges &get_block_ranges (tree name);
+ ssa_block_ranges *query_block_ranges (tree name);
+ class vrange_allocator *m_range_allocator;
+ bitmap_obstack m_bitmaps;
+};
+
+// This global cache is used with the range engine as markers for what
+// has been visited during this incarnation. Once the ranger evaluates
+// a name, it is typically not re-evaluated again.
+
+class ssa_global_cache
+{
+public:
+ ssa_global_cache ();
+ ~ssa_global_cache ();
+ bool get_global_range (vrange &r, tree name) const;
+ bool set_global_range (tree name, const vrange &r);
+ void clear_global_range (tree name);
+ void clear ();
+ void dump (FILE *f = stderr);
+private:
+ vec<vrange *> m_tab;
+ vrange_allocator *m_range_allocator;
+};
+
+// This class provides all the caches a global ranger may need, and makes
+// them available for gori-computes to query so outgoing edges can be
+// properly calculated.
+
+class ranger_cache : public range_query
+{
+public:
+ ranger_cache (int not_executable_flag, bool use_imm_uses);
+ ~ranger_cache ();
+
+ bool range_of_expr (vrange &r, tree name, gimple *stmt) final override;
+ bool range_on_edge (vrange &r, edge e, tree expr) final override;
+ bool block_range (vrange &r, basic_block bb, tree name, bool calc = true);
+
+ bool get_global_range (vrange &r, tree name) const;
+ bool get_global_range (vrange &r, tree name, bool &current_p);
+ void set_global_range (tree name, const vrange &r);
+
+ void propagate_updated_value (tree name, basic_block bb);
+
+ void register_inferred_value (const vrange &r, tree name, basic_block bb);
+ void apply_inferred_ranges (gimple *s);
+ gori_compute m_gori;
+ infer_range_manager m_exit;
+
+ void dump_bb (FILE *f, basic_block bb);
+ virtual void dump (FILE *f) override;
+private:
+ ssa_global_cache m_globals;
+ block_range_cache m_on_entry;
+ class temporal_cache *m_temporal;
+ void fill_block_cache (tree name, basic_block bb, basic_block def_bb);
+ void propagate_cache (tree name);
+
+ enum rfd_mode
+ {
+ RFD_NONE, // Only look at current block cache.
+ RFD_READ_ONLY, // Scan DOM tree, do not write to cache.
+ RFD_FILL // Scan DOM tree, updating important nodes.
+ };
+ bool range_from_dom (vrange &r, tree name, basic_block bb, enum rfd_mode);
+ void resolve_dom (vrange &r, tree name, basic_block bb);
+ void range_of_def (vrange &r, tree name, basic_block bb = NULL);
+ void entry_range (vrange &r, tree expr, basic_block bb, enum rfd_mode);
+ void exit_range (vrange &r, tree expr, basic_block bb, enum rfd_mode);
+ bool edge_range (vrange &r, edge e, tree name, enum rfd_mode);
+
+ vec<basic_block> m_workback;
+ class update_list *m_update;
+};
+
+#endif // GCC_SSA_RANGE_CACHE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-edge.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-edge.h
new file mode 100644
index 0000000..bb0de1b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-edge.h
@@ -0,0 +1,58 @@
+/* Gimple range edge header file.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GIMPLE_RANGE_EDGE_H
+#define GIMPLE_RANGE_EDGE_H
+
+// This class is used to query ranges on constant edges in GIMPLE.
+//
+// For a COND_EXPR, the TRUE edge will return [1,1] and the false edge a [0,0].
+//
+// For SWITCH_EXPR, it is awkward to calculate ranges. When a request
+// is made, the entire switch is evaluated and the results cached.
+// Any future requests to that switch will use the cached value, providing
+// dramatic decrease in computation time.
+//
+// The API is simple, just ask for the range on the edge.
+// The return value is NULL for no range, or the branch statement which the
+// edge gets the range from, along with the range.
+
+class gimple_outgoing_range
+{
+public:
+ gimple_outgoing_range (int max_sw_edges = INT_MAX);
+ ~gimple_outgoing_range ();
+ gimple *edge_range_p (irange &r, edge e);
+private:
+ void calc_switch_ranges (gswitch *sw);
+ bool switch_edge_range (irange &r, gswitch *sw, edge e);
+
+ int m_max_edges;
+ hash_map<edge, irange *> *m_edge_table;
+ class obstack_vrange_allocator *m_range_allocator;
+};
+
+// If there is a range control statement at the end of block BB, return it.
+gimple *gimple_outgoing_range_stmt_p (basic_block bb);
+// Return the range on edge E if it is from a GCOND. Either TRUE or FALSE.
+void gcond_edge_range (irange &r, edge e);
+
+#endif // GIMPLE_RANGE_EDGE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-fold.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-fold.h
new file mode 100644
index 0000000..68c6d77
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-fold.h
@@ -0,0 +1,173 @@
+/* Header file for the GIMPLE fold_using_range interface.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_RANGE_FOLD_H
+#define GCC_GIMPLE_RANGE_FOLD_H
+
+// This file is the main include point for gimple range folding.
+// These routines will fold stmt S into the result range R.
+// Any ssa_names on the stmt will be calculated using the range_query
+// parameter via a call to range_of_expr.
+// If no range_query is provided, current global range info will be used.
+// The second variation specifies an edge, and stmt S is recalculated as if
+// it appeared on that edge.
+
+// Fold stmt S into range R using range query Q.
+bool fold_range (vrange &r, gimple *s, range_query *q = NULL);
+// Recalculate stmt S into R using range query Q as if it were on edge ON_EDGE.
+bool fold_range (vrange &v, gimple *s, edge on_edge, range_query *q = NULL);
+
+// These routines the operands to be specified when manually folding.
+// Any excess queries will be drawn from the current range_query.
+bool fold_range (vrange &r, gimple *s, vrange &r1);
+bool fold_range (vrange &r, gimple *s, vrange &r1, vrange &r2);
+bool fold_range (vrange &r, gimple *s, unsigned num_elements, vrange **vector);
+
+// Return the type of range which statement S calculates. If the type is
+// unsupported or no type can be determined, return NULL_TREE.
+
+inline tree
+gimple_range_type (const gimple *s)
+{
+ tree lhs = gimple_get_lhs (s);
+ tree type = NULL_TREE;
+ if (lhs)
+ type = TREE_TYPE (lhs);
+ else
+ {
+ enum gimple_code code = gimple_code (s);
+ if (code == GIMPLE_COND)
+ type = boolean_type_node;
+ else if (code == GIMPLE_PHI)
+ type = TREE_TYPE (gimple_phi_result (s));
+ else if (code == GIMPLE_CALL)
+ {
+ type = gimple_call_fntype (s);
+ // If it has a type, get the return type.
+ if (type)
+ type = TREE_TYPE (type);
+ }
+ }
+ if (type && Value_Range::supports_type_p (type))
+ return type;
+ return NULL_TREE;
+}
+
+// Return EXP if it is an SSA_NAME with a type supported by gimple ranges.
+
+inline tree
+gimple_range_ssa_p (tree exp)
+{
+ if (exp && TREE_CODE (exp) == SSA_NAME &&
+ !SSA_NAME_IS_VIRTUAL_OPERAND (exp) &&
+ !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (exp) &&
+ Value_Range::supports_type_p (TREE_TYPE (exp)))
+ return exp;
+ return NULL_TREE;
+}
+
+// Return true if TYPE1 and TYPE2 are compatible range types.
+
+inline bool
+range_compatible_p (tree type1, tree type2)
+{
+ // types_compatible_p requires conversion in both directions to be useless.
+ // GIMPLE only requires a cast one way in order to be compatible.
+ // Ranges really only need the sign and precision to be the same.
+ return (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
+ && TYPE_SIGN (type1) == TYPE_SIGN (type2));
+}
+
+// Source of all operands for fold_using_range and gori_compute.
+// It abstracts out the source of an operand so it can come from a stmt or
+// and edge or anywhere a derived class of fur_source wants.
+// The default simply picks up ranges from the current range_query.
+
+class fur_source
+{
+public:
+ fur_source (range_query *q = NULL);
+ inline range_query *query () { return m_query; }
+ inline class gori_compute *gori () { return m_gori; };
+ virtual bool get_operand (vrange &r, tree expr);
+ virtual bool get_phi_operand (vrange &r, tree expr, edge e);
+ virtual relation_kind query_relation (tree op1, tree op2);
+ virtual void register_relation (gimple *stmt, relation_kind k, tree op1,
+ tree op2);
+ virtual void register_relation (edge e, relation_kind k, tree op1,
+ tree op2);
+ void register_outgoing_edges (gcond *, irange &lhs_range, edge e0, edge e1);
+protected:
+ range_query *m_query;
+ gori_compute *m_gori;
+};
+
+// fur_stmt is the specification for drawing an operand from range_query Q
+// via a range_of_Expr call on stmt S.
+
+class fur_stmt : public fur_source
+{
+public:
+ fur_stmt (gimple *s, range_query *q = NULL);
+ virtual bool get_operand (vrange &r, tree expr) override;
+ virtual bool get_phi_operand (vrange &r, tree expr, edge e) override;
+ virtual relation_kind query_relation (tree op1, tree op2) override;
+private:
+ gimple *m_stmt;
+};
+
+// This version of fur_source will pick a range from a stmt, and also register
+// dependencies via a gori_compute object. This is mostly an internal API.
+
+class fur_depend : public fur_stmt
+{
+public:
+ fur_depend (gimple *s, gori_compute *gori, range_query *q = NULL);
+ virtual void register_relation (gimple *stmt, relation_kind k, tree op1,
+ tree op2) override;
+ virtual void register_relation (edge e, relation_kind k, tree op1,
+ tree op2) override;
+protected:
+ relation_oracle *m_oracle;
+};
+
+// This class uses ranges to fold a gimple statement producing a range for
+// the LHS. The source of all operands is supplied via the fur_source class
+// which provides a range_query as well as a source location and any other
+// required information.
+
+class fold_using_range
+{
+public:
+ bool fold_stmt (vrange &r, gimple *s, class fur_source &src,
+ tree name = NULL_TREE);
+protected:
+ bool range_of_range_op (vrange &r, gimple_range_op_handler &handler,
+ fur_source &src);
+ bool range_of_call (vrange &r, gcall *call, fur_source &src);
+ bool range_of_cond_expr (vrange &r, gassign* cond, fur_source &src);
+ bool range_of_address (irange &r, gimple *s, fur_source &src);
+ bool range_of_phi (vrange &r, gphi *phi, fur_source &src);
+ void range_of_ssa_name_with_loop_info (vrange &, tree, class loop *, gphi *,
+ fur_source &src);
+ void relation_fold_and_or (irange& lhs_range, gimple *s, fur_source &src);
+};
+#endif // GCC_GIMPLE_RANGE_FOLD_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-gori.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-gori.h
new file mode 100644
index 0000000..3ea4b45
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-gori.h
@@ -0,0 +1,229 @@
+/* Header file for gimple range GORI structures.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_RANGE_GORI_H
+#define GCC_GIMPLE_RANGE_GORI_H
+
+// RANGE_DEF_CHAIN is used to determine which SSA names in a block can
+// have range information calculated for them, and what the
+// dependencies on each other are.
+
+class range_def_chain
+{
+public:
+ range_def_chain ();
+ ~range_def_chain ();
+ tree depend1 (tree name) const;
+ tree depend2 (tree name) const;
+ bool in_chain_p (tree name, tree def);
+ bool chain_import_p (tree name, tree import);
+ void register_dependency (tree name, tree ssa1, basic_block bb = NULL);
+ void dump (FILE *f, basic_block bb, const char *prefix = NULL);
+protected:
+ bool has_def_chain (tree name);
+ bool def_chain_in_bitmap_p (tree name, bitmap b);
+ void add_def_chain_to_bitmap (bitmap b, tree name);
+ bitmap get_def_chain (tree name);
+ bitmap get_imports (tree name);
+ bitmap_obstack m_bitmaps;
+private:
+ struct rdc {
+ tree ssa1; // First direct dependency
+ tree ssa2; // Second direct dependency
+ bitmap bm; // All dependencies
+ bitmap m_import;
+ };
+ vec<rdc> m_def_chain; // SSA_NAME : def chain components.
+ void set_import (struct rdc &data, tree imp, bitmap b);
+ int m_logical_depth;
+};
+
+// Return the first direct dependency for NAME, if there is one.
+// Direct dependencies are those which occur on the definition statement.
+// Only the first 2 such names are cached.
+
+inline tree
+range_def_chain::depend1 (tree name) const
+{
+ unsigned v = SSA_NAME_VERSION (name);
+ if (v >= m_def_chain.length ())
+ return NULL_TREE;
+ return m_def_chain[v].ssa1;
+}
+
+// Return the second direct dependency for NAME, if there is one.
+
+inline tree
+range_def_chain::depend2 (tree name) const
+{
+ unsigned v = SSA_NAME_VERSION (name);
+ if (v >= m_def_chain.length ())
+ return NULL_TREE;
+ return m_def_chain[v].ssa2;
+}
+
+// GORI_MAP is used to accumulate what SSA names in a block can
+// generate range information, and provides tools for the block ranger
+// to enable it to efficiently calculate these ranges.
+
+class gori_map : public range_def_chain
+{
+public:
+ gori_map ();
+ ~gori_map ();
+
+ bool is_export_p (tree name, basic_block bb = NULL);
+ bool is_import_p (tree name, basic_block bb);
+ bitmap exports (basic_block bb);
+ bitmap imports (basic_block bb);
+ void set_range_invariant (tree name, bool invariant = true);
+
+ void dump (FILE *f);
+ void dump (FILE *f, basic_block bb, bool verbose = true);
+private:
+ vec<bitmap> m_outgoing; // BB: Outgoing ranges calculable on edges
+ vec<bitmap> m_incoming; // BB: Incoming ranges which can affect exports.
+ bitmap m_maybe_variant; // Names which might have outgoing ranges.
+ void maybe_add_gori (tree name, basic_block bb);
+ void calculate_gori (basic_block bb);
+};
+
+
+// This class is used to determine which SSA_NAMES can have ranges
+// calculated for them on outgoing edges from basic blocks. This represents
+// ONLY the effect of the basic block edge->src on a range.
+//
+// There are 2 primary entry points:
+//
+// has_edge_range_p (tree name, edge e)
+// returns true if the outgoing edge *may* be able to produce range
+// information for ssa_name NAME on edge E.
+// FALSE is returned if this edge does not affect the range of NAME.
+// if no edge is specified, return TRUE if name may have a value calculated
+// on *ANY* edge that has been seen. FALSE indicates that the global value
+// is applicable everywhere that has been processed.
+//
+// outgoing_edge_range_p (vrange &range, edge e, tree name)
+// Actually does the calculation of RANGE for name on E
+// This represents application of whatever static range effect edge E
+// may have on NAME, not any cumulative effect.
+
+// There are also some internal APIs
+//
+// ssa_range_in_bb () is an internal routine which is used to start any
+// calculation chain using SSA_NAMES which come from outside the block. ie
+// a_2 = b_4 - 8
+// if (a_2 < 30)
+// on the true edge, a_2 is known to be [0, 29]
+// b_4 can be calculated as [8, 37]
+// during this calculation, b_4 is considered an "import" and ssa_range_in_bb
+// is queried for a starting range which is used in the calculation.
+// A default value of VARYING provides the raw static info for the edge.
+//
+// If there is any known range for b_4 coming into this block, it can refine
+// the results. This allows for cascading results to be propagated.
+// if b_4 is [100, 200] on entry to the block, feeds into the calculation
+// of a_2 = [92, 192], and finally on the true edge the range would be
+// an empty range [] because it is not possible for the true edge to be taken.
+//
+// expr_range_in_bb is simply a wrapper which calls ssa_range_in_bb for
+// SSA_NAMES and otherwise simply calculates the range of the expression.
+//
+// The constructor takes a flag value to use on edges to check for the
+// NON_EXECUTABLE_EDGE property. The zero default means no flag is checked.
+// All value requests from NON_EXECUTABLE_EDGE edges are returned UNDEFINED.
+//
+// The remaining routines are internal use only.
+
+class value_relation;
+
+class gori_compute : public gori_map
+{
+public:
+ gori_compute (int not_executable_flag = 0);
+ bool outgoing_edge_range_p (vrange &r, edge e, tree name, range_query &q);
+ bool condexpr_adjust (vrange &r1, vrange &r2, gimple *s, tree cond, tree op1,
+ tree op2, fur_source &src);
+ bool has_edge_range_p (tree name, basic_block bb = NULL);
+ bool has_edge_range_p (tree name, edge e);
+ void dump (FILE *f);
+ bool compute_operand_range (vrange &r, gimple *stmt, const vrange &lhs,
+ tree name, class fur_source &src,
+ value_relation *rel = NULL);
+private:
+ bool refine_using_relation (tree op1, vrange &op1_range,
+ tree op2, vrange &op2_range,
+ fur_source &src, relation_kind k);
+ bool may_recompute_p (tree name, edge e, int depth = -1);
+ bool may_recompute_p (tree name, basic_block bb = NULL, int depth = -1);
+ bool compute_operand_range_switch (vrange &r, gswitch *s, const vrange &lhs,
+ tree name, fur_source &src);
+ bool compute_operand1_range (vrange &r, gimple_range_op_handler &handler,
+ const vrange &lhs, tree name, fur_source &src,
+ value_relation *rel = NULL);
+ bool compute_operand2_range (vrange &r, gimple_range_op_handler &handler,
+ const vrange &lhs, tree name, fur_source &src,
+ value_relation *rel = NULL);
+ bool compute_operand1_and_operand2_range (vrange &r,
+ gimple_range_op_handler &handler,
+ const vrange &lhs, tree name,
+ fur_source &src,
+ value_relation *rel = NULL);
+ void compute_logical_operands (vrange &true_range, vrange &false_range,
+ gimple_range_op_handler &handler,
+ const irange &lhs, tree name, fur_source &src,
+ tree op, bool op_in_chain);
+ bool logical_combine (vrange &r, enum tree_code code, const irange &lhs,
+ const vrange &op1_true, const vrange &op1_false,
+ const vrange &op2_true, const vrange &op2_false);
+ int_range<2> m_bool_zero; // Boolean false cached.
+ int_range<2> m_bool_one; // Boolean true cached.
+
+ gimple_outgoing_range outgoing; // Edge values for COND_EXPR & SWITCH_EXPR.
+ range_tracer tracer;
+ int m_not_executable_flag;
+};
+
+// For each name that is an import into BB's exports..
+#define FOR_EACH_GORI_IMPORT_NAME(gori, bb, name) \
+ for (gori_export_iterator iter ((gori).imports ((bb))); \
+ ((name) = iter.get_name ()); \
+ iter.next ())
+
+// For each name possibly exported from block BB.
+#define FOR_EACH_GORI_EXPORT_NAME(gori, bb, name) \
+ for (gori_export_iterator iter ((gori).exports ((bb))); \
+ ((name) = iter.get_name ()); \
+ iter.next ())
+
+// Used to assist with iterating over the GORI export list in various ways
+class gori_export_iterator {
+public:
+ gori_export_iterator (bitmap b);
+ void next ();
+ tree get_name ();
+protected:
+ bitmap bm;
+ bitmap_iterator bi;
+ unsigned y;
+};
+
+#endif // GCC_GIMPLE_RANGE_GORI_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-infer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-infer.h
new file mode 100644
index 0000000..3c85e29
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-infer.h
@@ -0,0 +1,86 @@
+/* Header file for gimple range inference.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_RANGE_SIDE_H
+#define GCC_GIMPLE_RANGE_SIDE_H
+
+// Inferred ranges are ranges which are applied to use operands as a by product
+// of executing an operation.
+
+// This class manages an on-demand summary of inferred ranges for a statement.
+// It can be instantiated as required and provides a list of inferred ranges.
+// New inferred ranges should be added in the constructor of this class.
+
+class gimple_infer_range
+{
+public:
+ gimple_infer_range (gimple *s);
+ inline unsigned num () const { return num_args; }
+ inline tree name (unsigned index) const
+ { gcc_checking_assert (index < num_args); return m_names[index]; }
+ inline const vrange& range (unsigned index) const
+ { gcc_checking_assert (index < num_args); return m_ranges[index]; }
+ void add_range (tree name, vrange &range);
+ void add_nonzero (tree name);
+private:
+ void check_assume_func (gcall *call);
+ unsigned num_args;
+ static const int size_limit = 10;
+ tree m_names[size_limit];
+ Value_Range m_ranges[size_limit];
+ inline void bump_index () { if (num_args < size_limit - 1) num_args++; }
+};
+
+// This class manages a list of inferred ranges for each basic block.
+// As inferences are made, they can be registered to a block and later
+// queried. When constructed with a TRUE flag, immediate uses chains are
+// followed the first time a name is referenced and block populated if
+// there are any inferred ranges.
+
+class infer_range_manager
+{
+public:
+ infer_range_manager (bool do_search);
+ ~infer_range_manager ();
+ void add_range (tree name, basic_block bb, const vrange &r);
+ void add_nonzero (tree name, basic_block bb);
+ bool has_range_p (tree name, basic_block bb);
+ bool has_range_p (basic_block bb);
+ bool maybe_adjust_range (vrange &r, tree name, basic_block bb);
+private:
+ class exit_range_head
+ {
+ public:
+ bitmap m_names; // list of names with an outgoing range.
+ class exit_range *head;
+ int m_num_ranges;
+ exit_range *find_ptr (tree name);
+ };
+ void register_all_uses (tree name);
+ vec <exit_range_head> m_on_exit;
+ const vrange &get_nonzero (tree name);
+ vec <vrange *> m_nonzero;
+ bitmap m_seen;
+ bitmap_obstack m_bitmaps;
+ struct obstack m_list_obstack;
+ class obstack_vrange_allocator *m_range_allocator;
+};
+
+#endif // GCC_GIMPLE_RANGE_SIDE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-op.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-op.h
new file mode 100644
index 0000000..1bf63c5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-op.h
@@ -0,0 +1,55 @@
+/* Header file for the GIMPLE range-op interface.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_RANGE_OP_H
+#define GCC_GIMPLE_RANGE_OP_H
+
+#include "range-op.h"
+
+
+class gimple_range_op_handler : public range_op_handler
+{
+public:
+ static bool supported_p (gimple *s);
+ gimple_range_op_handler (gimple *s);
+ inline gimple *stmt () const { return m_stmt; }
+ inline tree lhs () const { return gimple_get_lhs (m_stmt); }
+ tree operand1 () const { gcc_checking_assert (m_valid); return m_op1; }
+ tree operand2 () const { gcc_checking_assert (m_valid); return m_op2; }
+ bool calc_op1 (vrange &r, const vrange &lhs_range);
+ bool calc_op1 (vrange &r, const vrange &lhs_range, const vrange &op2_range,
+ relation_trio = TRIO_VARYING);
+ bool calc_op2 (vrange &r, const vrange &lhs_range, const vrange &op1_range,
+ relation_trio = TRIO_VARYING);
+private:
+ void maybe_builtin_call ();
+ void maybe_non_standard ();
+ gimple *m_stmt;
+ tree m_op1, m_op2;
+};
+
+// Given stmt S, fill VEC, up to VEC_SIZE elements, with relevant ssa-names
+// on the statement. For efficiency, it is an error to not pass in enough
+// elements for the vector. Return the number of ssa-names.
+
+unsigned gimple_range_ssa_names (tree *vec, unsigned vec_size, gimple *stmt);
+
+#endif // GCC_GIMPLE_RANGE_OP_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-path.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-path.h
new file mode 100644
index 0000000..e8b06b6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-path.h
@@ -0,0 +1,115 @@
+/* Header file for jump threading path solver.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_THREADSOLVER_H
+#define GCC_TREE_SSA_THREADSOLVER_H
+
+// This class is a basic block path solver. Given a set of BBs
+// indicating a path through the CFG, range_of_expr and range_of_stmt
+// will calculate the range of an SSA or STMT as if the BBs in the
+// path would have been executed in order.
+//
+// Note that the blocks are in reverse order, thus the exit block is
+// path[0].
+
+class path_range_query : public range_query
+{
+public:
+ path_range_query (class gimple_ranger &ranger,
+ const vec<basic_block> &path,
+ const bitmap_head *dependencies = NULL,
+ bool resolve = true);
+ path_range_query (gimple_ranger &ranger, bool resolve = true);
+ virtual ~path_range_query ();
+ void reset_path (const vec<basic_block> &, const bitmap_head *dependencies);
+ bool range_of_expr (vrange &r, tree name, gimple * = NULL) override;
+ bool range_of_stmt (vrange &r, gimple *, tree name = NULL) override;
+ bool unreachable_path_p ();
+ void dump (FILE *) override;
+ void debug ();
+
+private:
+ bool internal_range_of_expr (vrange &r, tree name, gimple *);
+ void compute_ranges (const bitmap_head *dependencies);
+ void compute_exit_dependencies (bitmap_head *dependencies);
+ bool defined_outside_path (tree name);
+ void range_on_path_entry (vrange &r, tree name);
+ path_oracle *get_path_oracle () { return (path_oracle *)m_oracle; }
+
+ // Cache manipulation.
+ void set_cache (const vrange &r, tree name);
+ bool get_cache (vrange &r, tree name);
+ void clear_cache (tree name);
+
+ // Methods to compute ranges for the given path.
+ bool range_defined_in_block (vrange &, tree name, basic_block bb);
+ void compute_ranges_in_block (basic_block bb);
+ void compute_ranges_in_phis (basic_block bb);
+ void adjust_for_non_null_uses (basic_block bb);
+ void ssa_range_in_phi (vrange &r, gphi *phi);
+ void compute_outgoing_relations (basic_block bb, basic_block next);
+ void compute_phi_relations (basic_block bb, basic_block prev);
+ void maybe_register_phi_relation (gphi *, edge e);
+ bool add_to_exit_dependencies (tree name, bitmap dependencies);
+ bool exit_dependency_p (tree name);
+ bool ssa_defined_in_bb (tree name, basic_block bb);
+ bool relations_may_be_invalidated (edge);
+
+ // Path navigation.
+ basic_block entry_bb () { return m_path[m_path.length () - 1]; }
+ basic_block exit_bb () { return m_path[0]; }
+ basic_block curr_bb () { return m_path[m_pos]; }
+ basic_block prev_bb () { return m_path[m_pos + 1]; }
+ basic_block next_bb () { return m_path[m_pos - 1]; }
+ bool at_entry () { return m_pos == m_path.length () - 1; }
+ bool at_exit () { return m_pos == 0; }
+ void move_next () { --m_pos; }
+
+ // Range cache for SSA names.
+ ssa_global_cache *m_cache;
+
+ // Set for each SSA that has an active entry in the cache.
+ bitmap m_has_cache_entry;
+
+ // Path being analyzed.
+ auto_vec<basic_block> m_path;
+
+ // This is a list of SSA names that may have relevant context
+ // information for solving the final conditional along the path.
+ // Ranges for these SSA names are pre-calculated and cached during a
+ // top-down traversal of the path, and are then used to answer
+ // questions at the path exit.
+ auto_bitmap m_exit_dependencies;
+
+ // A ranger used to resolve ranges for SSA names whose values come
+ // from outside the path.
+ gimple_ranger &m_ranger;
+
+ // Current path position.
+ unsigned m_pos;
+
+ // Use ranger to resolve anything not known on entry.
+ bool m_resolve;
+
+ // Set if there were any undefined expressions while pre-calculating path.
+ bool m_undefined_path;
+};
+
+#endif // GCC_TREE_SSA_THREADSOLVER_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-trace.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-trace.h
new file mode 100644
index 0000000..625d0be
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range-trace.h
@@ -0,0 +1,78 @@
+/* Header file for the GIMPLE range tracing/debugging facilities.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_RANGE_TRACE_H
+#define GCC_GIMPLE_RANGE_TRACE_H
+
+// This class manages range tracing for the ranger and gori components.
+// Tracing will provide a unique integer index whenever a new trace
+// is started. This can be used to identify where a calculation has gone wrong.
+
+class range_tracer
+{
+public:
+ range_tracer (const char *name = "");
+ unsigned header (const char *str);
+ void trailer (unsigned counter, const char *caller, bool result, tree name,
+ const vrange &r);
+ void print (unsigned counter, const char *str);
+ inline void enable_trace () { tracing = true; }
+ inline void disable_trace () { tracing = false; }
+ virtual void breakpoint (unsigned index);
+private:
+ unsigned do_header (const char *str);
+ void print_prefix (unsigned idx, bool blanks);
+ static const unsigned bump = 2;
+ unsigned indent;
+ static const unsigned name_len = 100;
+ char component[name_len];
+ bool tracing;
+};
+
+
+// If tracing is enabled, start a new trace header, returning the trace index.
+// Otherwise return 0.
+
+inline unsigned
+range_tracer::header (const char *str)
+{
+ if (tracing)
+ return do_header (str);
+ return 0;
+}
+
+// RAII class to change current dump_file and dump_flags, and restore
+// when the object goes out of scope.
+
+class push_dump_file
+{
+public:
+ push_dump_file (FILE *, dump_flags_t);
+ ~push_dump_file ();
+private:
+ FILE *old_dump_file;
+ dump_flags_t old_dump_flags;
+};
+
+void dump_ranger (FILE *);
+void dump_ranger (FILE *, const vec<basic_block> &path);
+
+#endif // GCC_GIMPLE_RANGE_TRACE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range.h
new file mode 100644
index 0000000..7ed4d38
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-range.h
@@ -0,0 +1,103 @@
+/* Header file for the GIMPLE range interface.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_RANGE_H
+#define GCC_GIMPLE_RANGE_H
+
+#include "range.h"
+#include "value-query.h"
+#include "gimple-range-op.h"
+#include "gimple-range-trace.h"
+#include "gimple-range-edge.h"
+#include "gimple-range-fold.h"
+#include "gimple-range-gori.h"
+#include "gimple-range-cache.h"
+
+// This is the basic range generator interface.
+//
+// This base class provides all the API entry points, but only provides
+// functionality at the statement level. Ie, it can calculate ranges on
+// statements, but does no additional lookup.
+//
+// All the range_of_* methods will return a range if the types is
+// supported by the range engine. It may be the full range for the
+// type, AKA varying_p or it may be a refined range. If the range
+// type is not supported, then false is returned. Non-statement
+// related methods return whatever the current global value is.
+
+class gimple_ranger : public range_query
+{
+public:
+ gimple_ranger (bool use_imm_uses = true);
+ ~gimple_ranger ();
+ virtual bool range_of_stmt (vrange &r, gimple *, tree name = NULL) override;
+ virtual bool range_of_expr (vrange &r, tree name, gimple * = NULL) override;
+ virtual bool range_on_edge (vrange &r, edge e, tree name) override;
+ virtual void update_stmt (gimple *) override;
+ void range_on_entry (vrange &r, basic_block bb, tree name);
+ void range_on_exit (vrange &r, basic_block bb, tree name);
+ void export_global_ranges ();
+ inline gori_compute &gori () { return m_cache.m_gori; }
+ virtual void dump (FILE *f) override;
+ void debug ();
+ void dump_bb (FILE *f, basic_block bb);
+ auto_edge_flag non_executable_edge_flag;
+ bool fold_stmt (gimple_stmt_iterator *gsi, tree (*) (tree));
+ void register_inferred_ranges (gimple *s);
+ void register_transitive_inferred_ranges (basic_block bb);
+protected:
+ bool fold_range_internal (vrange &r, gimple *s, tree name);
+ void prefill_name (vrange &r, tree name);
+ void prefill_stmt_dependencies (tree ssa);
+ ranger_cache m_cache;
+ range_tracer tracer;
+ basic_block current_bb;
+ vec<tree> m_stmt_list;
+ friend class path_range_query;
+};
+
+/* Create a new ranger instance and associate it with a function.
+ Each call must be paired with a call to disable_ranger to release
+ resources. If USE_IMM_USES is true, pre-calculate side effects like
+ non-null uses as required using the immediate use chains. */
+extern gimple_ranger *enable_ranger (struct function *m,
+ bool use_imm_uses = true);
+extern void disable_ranger (struct function *);
+
+class assume_query : public range_query
+{
+public:
+ assume_query ();
+ bool assume_range_p (vrange &r, tree name);
+ virtual bool range_of_expr (vrange &r, tree expr, gimple * = NULL);
+ void dump (FILE *f);
+protected:
+ void calculate_stmt (gimple *s, vrange &lhs_range, fur_source &src);
+ void calculate_op (tree op, gimple *s, vrange &lhs, fur_source &src);
+ void calculate_phi (gphi *phi, vrange &lhs_range, fur_source &src);
+ void check_taken_edge (edge e, fur_source &src);
+
+ ssa_global_cache global;
+ gori_compute m_gori;
+};
+
+
+#endif // GCC_GIMPLE_RANGE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-access.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-access.h
new file mode 100644
index 0000000..a8a2918
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-access.h
@@ -0,0 +1,48 @@
+/* Pass to detect and issue warnings for invalid accesses, including
+ invalid or mismatched allocation/deallocation calls.
+
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Martin Sebor <msebor@redhat.com>.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_SSA_WARN_ACCESS_H
+#define GCC_GIMPLE_SSA_WARN_ACCESS_H
+
+extern bool check_nul_terminated_array (tree, tree, tree = NULL_TREE);
+extern void warn_string_no_nul (location_t, gimple *, const char *, tree,
+ tree, tree = NULL_TREE, bool = false,
+ const wide_int[2] = NULL);
+extern void warn_string_no_nul (location_t, tree, const char *, tree,
+ tree, tree = NULL_TREE, bool = false,
+ const wide_int[2] = NULL);
+extern tree unterminated_array (tree, tree * = NULL, bool * = NULL);
+
+extern bool maybe_warn_nonstring_arg (tree, gimple *);
+extern bool maybe_warn_nonstring_arg (tree, tree);
+
+class access_data;
+extern bool maybe_warn_for_bound (opt_code, location_t, gimple *, tree,
+ tree[2], tree, const access_data * = NULL);
+extern bool maybe_warn_for_bound (opt_code, location_t, tree, tree,
+ tree[2], tree, const access_data * = NULL);
+
+class access_data;
+extern bool check_access (tree, tree, tree, tree, tree, access_mode,
+ const access_data * = NULL);
+
+#endif // GCC_GIMPLE_SSA_WARN_ACCESS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-restrict.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-restrict.h
new file mode 100644
index 0000000..722a895
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa-warn-restrict.h
@@ -0,0 +1,29 @@
+/* Warn on violations of the restrict qualifier.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by Martin Sebor <msebor@redhat.com>.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GIMPLE_SSA_WARN_RESTRICT_H
+
+extern opt_code check_bounds_or_overlap (gimple *, tree, tree, tree, tree,
+ bool = false, bool = true);
+extern opt_code check_bounds_or_overlap (class pointer_query &, gimple *,
+ tree, tree, tree, tree,
+ bool = false, bool = true);
+
+#endif /* GIMPLE_SSA_WARN_RESTRICT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa.h
new file mode 100644
index 0000000..604af8b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-ssa.h
@@ -0,0 +1,201 @@
+/* Header file for routines that straddle the border between GIMPLE and
+ SSA in gimple.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_SSA_H
+#define GCC_GIMPLE_SSA_H
+
+#include "tree-ssa-operands.h"
+
+/* This structure is used to map a gimple statement to a label,
+ or list of labels to represent transaction restart. */
+
+struct GTY((for_user)) tm_restart_node {
+ gimple *stmt;
+ tree label_or_list;
+};
+
+/* Hasher for tm_restart_node. */
+
+struct tm_restart_hasher : ggc_ptr_hash<tm_restart_node>
+{
+ static hashval_t hash (tm_restart_node *n) { return htab_hash_pointer (n); }
+
+ static bool
+ equal (tm_restart_node *a, tm_restart_node *b)
+ {
+ return a == b;
+ }
+};
+
+extern void gt_ggc_mx (gimple *&);
+extern void gt_pch_nx (gimple *&);
+
+struct ssa_name_hasher : ggc_ptr_hash<tree_node>
+{
+ /* Hash a tree in a uid_decl_map. */
+
+ static hashval_t
+ hash (tree item)
+ {
+ return item->ssa_name.var->decl_minimal.uid;
+ }
+
+ /* Return true if the DECL_UID in both trees are equal. */
+
+ static bool
+ equal (tree a, tree b)
+{
+ return (a->ssa_name.var->decl_minimal.uid == b->ssa_name.var->decl_minimal.uid);
+}
+};
+
+/* Gimple dataflow datastructure. All publicly available fields shall have
+ gimple_ accessor defined, all publicly modifiable fields should have
+ gimple_set accessor. */
+struct GTY(()) gimple_df {
+ /* Array of all SSA_NAMEs used in the function. */
+ vec<tree, va_gc> *ssa_names;
+
+ /* Artificial variable used for the virtual operand FUD chain. */
+ tree vop;
+
+ /* The PTA solution for the ESCAPED artificial variable. */
+ struct pt_solution escaped;
+
+ /* A map of decls to artificial ssa-names that point to the partition
+ of the decl. */
+ hash_map<tree, tree> * GTY((skip(""))) decls_to_pointers;
+
+ /* Free list of SSA_NAMEs. */
+ vec<tree, va_gc> *free_ssanames;
+
+ /* Queue of SSA_NAMEs to be freed at the next opportunity. */
+ vec<tree, va_gc> *free_ssanames_queue;
+
+ /* Hashtable holding definition for symbol. If this field is not NULL, it
+ means that the first reference to this variable in the function is a
+ USE or a VUSE. In those cases, the SSA renamer creates an SSA name
+ for this variable with an empty defining statement. */
+ hash_table<ssa_name_hasher> *default_defs;
+
+ /* True if there are any symbols that need to be renamed. */
+ unsigned int ssa_renaming_needed : 1;
+
+ /* True if all virtual operands need to be renamed. */
+ unsigned int rename_vops : 1;
+
+ /* True if the code is in ssa form. */
+ unsigned int in_ssa_p : 1;
+
+ /* True if IPA points-to information was computed for this function. */
+ unsigned int ipa_pta : 1;
+
+ struct ssa_operands ssa_operands;
+
+ /* Map gimple stmt to tree label (or list of labels) for transaction
+ restart and abort. */
+ hash_table<tm_restart_hasher> *tm_restart;
+};
+
+
+/* Return true when gimple SSA form was built.
+ gimple_in_ssa_p is queried by gimplifier in various early stages before SSA
+ infrastructure is initialized. Check for presence of the datastructures
+ at first place. */
+inline bool
+gimple_in_ssa_p (const struct function *fun)
+{
+ return fun && fun->gimple_df && fun->gimple_df->in_ssa_p;
+}
+
+/* Artificial variable used for the virtual operand FUD chain. */
+inline tree
+gimple_vop (const struct function *fun)
+{
+ gcc_checking_assert (fun && fun->gimple_df);
+ return fun->gimple_df->vop;
+}
+
+/* Return the set of VUSE operand for statement G. */
+
+inline use_operand_p
+gimple_vuse_op (const gimple *g)
+{
+ struct use_optype_d *ops;
+ const gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <const gimple_statement_with_memory_ops *> (g);
+ if (!mem_ops_stmt)
+ return NULL_USE_OPERAND_P;
+ ops = mem_ops_stmt->use_ops;
+ if (ops
+ && USE_OP_PTR (ops)->use == &mem_ops_stmt->vuse)
+ return USE_OP_PTR (ops);
+ return NULL_USE_OPERAND_P;
+}
+
+/* Return the set of VDEF operand for statement G. */
+
+inline def_operand_p
+gimple_vdef_op (gimple *g)
+{
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <gimple_statement_with_memory_ops *> (g);
+ if (!mem_ops_stmt)
+ return NULL_DEF_OPERAND_P;
+ if (mem_ops_stmt->vdef)
+ return &mem_ops_stmt->vdef;
+ return NULL_DEF_OPERAND_P;
+}
+
+/* Mark statement S as modified, and update it. */
+
+inline void
+update_stmt (gimple *s)
+{
+ if (gimple_has_ops (s))
+ {
+ gimple_set_modified (s, true);
+ update_stmt_operands (cfun, s);
+ }
+}
+
+/* Update statement S if it has been optimized. */
+
+inline void
+update_stmt_if_modified (gimple *s)
+{
+ if (gimple_modified_p (s))
+ update_stmt_operands (cfun, s);
+}
+
+/* Mark statement S as modified, and update it. */
+
+inline void
+update_stmt_fn (struct function *fn, gimple *s)
+{
+ if (gimple_has_ops (s))
+ {
+ gimple_set_modified (s, true);
+ update_stmt_operands (fn, s);
+ }
+}
+
+
+#endif /* GCC_GIMPLE_SSA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-streamer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-streamer.h
new file mode 100644
index 0000000..83d62ae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-streamer.h
@@ -0,0 +1,34 @@
+/* Data structures and functions for streaming GIMPLE.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_STREAMER_H
+#define GCC_GIMPLE_STREAMER_H
+
+#include "tree-streamer.h"
+
+/* In gimple-streamer-in.cc */
+void input_bb (class lto_input_block *, enum LTO_tags, class data_in *,
+ struct function *, int);
+
+/* In gimple-streamer-out.cc */
+void output_bb (struct output_block *, basic_block, struct function *);
+
+#endif /* GCC_GIMPLE_STREAMER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-walk.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-walk.h
new file mode 100644
index 0000000..c0222e0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple-walk.h
@@ -0,0 +1,101 @@
+/* Header file for gimple statement walk support.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_WALK_H
+#define GCC_GIMPLE_WALK_H
+
+/* Convenience routines to walk all statements of a gimple function.
+ Note that this is useful exclusively before the code is converted
+ into SSA form. Once the program is in SSA form, the standard
+ operand interface should be used to analyze/modify statements. */
+struct walk_stmt_info
+{
+ /* Points to the current statement being walked. */
+ gimple_stmt_iterator gsi;
+ gimple *stmt;
+
+ /* Additional data that the callback functions may want to carry
+ through the recursion. */
+ void *info;
+
+ /* Pointer map used to mark visited tree nodes when calling
+ walk_tree on each operand. If set to NULL, duplicate tree nodes
+ will be visited more than once. */
+ hash_set<tree> *pset;
+
+ /* Operand returned by the callbacks. This is set when calling
+ walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
+ returns non-NULL, this field will contain the tree returned by
+ the last callback. */
+ tree callback_result;
+
+ /* Indicates whether the operand being examined may be replaced
+ with something that matches is_gimple_val (if true) or something
+ slightly more complicated (if false). "Something" technically
+ means the common subset of is_gimple_lvalue and is_gimple_rhs,
+ but we never try to form anything more complicated than that, so
+ we don't bother checking.
+
+ Also note that CALLBACK should update this flag while walking the
+ sub-expressions of a statement. For instance, when walking the
+ statement 'foo (&var)', the flag VAL_ONLY will initially be set
+ to true, however, when walking &var, the operand of that
+ ADDR_EXPR does not need to be a GIMPLE value. */
+ BOOL_BITFIELD val_only : 1;
+
+ /* True if we are currently walking the LHS of an assignment. */
+ BOOL_BITFIELD is_lhs : 1;
+
+ /* Optional. Set to true by the callback functions if they made any
+ changes. */
+ BOOL_BITFIELD changed : 1;
+
+ /* True if we're interested in location information. */
+ BOOL_BITFIELD want_locations : 1;
+
+ /* True if we've removed the statement that was processed. */
+ BOOL_BITFIELD removed_stmt : 1;
+};
+
+/* Callback for walk_gimple_stmt. Called for every statement found
+ during traversal. The first argument points to the statement to
+ walk. The second argument is a flag that the callback sets to
+ 'true' if it the callback handled all the operands and
+ sub-statements of the statement (the default value of this flag is
+ 'false'). The third argument is an anonymous pointer to data
+ to be used by the callback. */
+typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
+ struct walk_stmt_info *);
+
+extern gimple *walk_gimple_seq_mod (gimple_seq *, walk_stmt_fn, walk_tree_fn,
+ struct walk_stmt_info *);
+extern gimple *walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
+ struct walk_stmt_info *);
+extern tree walk_gimple_op (gimple *, walk_tree_fn, struct walk_stmt_info *);
+extern tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn,
+ walk_tree_fn, struct walk_stmt_info *);
+typedef bool (*walk_stmt_load_store_addr_fn) (gimple *, tree, tree, void *);
+extern bool walk_stmt_load_store_addr_ops (gimple *, void *,
+ walk_stmt_load_store_addr_fn,
+ walk_stmt_load_store_addr_fn,
+ walk_stmt_load_store_addr_fn);
+extern bool walk_stmt_load_store_ops (gimple *, void *,
+ walk_stmt_load_store_addr_fn,
+ walk_stmt_load_store_addr_fn);
+#endif /* GCC_GIMPLE_WALK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.def
new file mode 100644
index 0000000..274350d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.def
@@ -0,0 +1,413 @@
+/* This file contains the definitions of the GIMPLE IR tuples used in GCC.
+
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The format of this file is
+ DEFGSCODE(GIMPLE_symbol, printable name, GSS_symbol). */
+
+
+/* Error marker. This is used in similar ways as ERROR_MARK in tree.def. */
+DEFGSCODE(GIMPLE_ERROR_MARK, "gimple_error_mark", GSS_BASE)
+
+/* IMPORTANT. Do not rearrange the codes between GIMPLE_COND and
+ GIMPLE_RETURN. The ordering is exposed by gimple_has_ops calls.
+ These are all the GIMPLE statements with register operands. */
+
+/* GIMPLE_COND <COND_CODE, OP1, OP2, TRUE_LABEL, FALSE_LABEL>
+ represents the conditional jump:
+
+ if (OP1 COND_CODE OP2) goto TRUE_LABEL else goto FALSE_LABEL
+
+ COND_CODE is the tree code used as the comparison predicate. It
+ must be of class tcc_comparison.
+
+ OP1 and OP2 are the operands used in the comparison. They must be
+ accepted by is_gimple_operand.
+
+ TRUE_LABEL and FALSE_LABEL are the LABEL_DECL nodes used as the
+ jump target for the comparison. */
+DEFGSCODE(GIMPLE_COND, "gimple_cond", GSS_WITH_OPS)
+
+/* GIMPLE_DEBUG represents a debug statement. */
+DEFGSCODE(GIMPLE_DEBUG, "gimple_debug", GSS_WITH_OPS)
+
+/* GIMPLE_GOTO <TARGET> represents unconditional jumps.
+ TARGET is a LABEL_DECL or an expression node for computed GOTOs. */
+DEFGSCODE(GIMPLE_GOTO, "gimple_goto", GSS_WITH_OPS)
+
+/* GIMPLE_LABEL <LABEL> represents label statements. LABEL is a
+ LABEL_DECL representing a jump target. */
+DEFGSCODE(GIMPLE_LABEL, "gimple_label", GSS_WITH_OPS)
+
+/* GIMPLE_SWITCH <INDEX, DEFAULT_LAB, LAB1, ..., LABN> represents the
+ multiway branch:
+
+ switch (INDEX)
+ {
+ case LAB1: ...; break;
+ ...
+ case LABN: ...; break;
+ default: ...
+ }
+
+ INDEX is the variable evaluated to decide which label to jump to.
+
+ DEFAULT_LAB, LAB1 ... LABN are the tree nodes representing case labels.
+ They must be CASE_LABEL_EXPR nodes. */
+DEFGSCODE(GIMPLE_SWITCH, "gimple_switch", GSS_WITH_OPS)
+
+/* IMPORTANT.
+
+ Do not rearrange the codes between GIMPLE_ASSIGN and GIMPLE_RETURN.
+ It's exposed by GIMPLE_RANGE_CHECK calls. These are all the GIMPLE
+ statements with memory and register operands. */
+
+/* GIMPLE_ASSIGN <SUBCODE, LHS, RHS1[, RHS2]> represents the assignment
+ statement
+
+ LHS = RHS1 SUBCODE RHS2.
+
+ SUBCODE is the tree code for the expression computed by the RHS of the
+ assignment. It must be one of the tree codes accepted by
+ get_gimple_rhs_class. If LHS is not a gimple register according to
+ is_gimple_reg, SUBCODE must be of class GIMPLE_SINGLE_RHS.
+
+ LHS is the operand on the LHS of the assignment. It must be a tree node
+ accepted by is_gimple_lvalue.
+
+ RHS1 is the first operand on the RHS of the assignment. It must always be
+ present. It must be a tree node accepted by is_gimple_val.
+
+ RHS2 is the second operand on the RHS of the assignment. It must be a tree
+ node accepted by is_gimple_val. This argument exists only if SUBCODE is
+ of class GIMPLE_BINARY_RHS. */
+DEFGSCODE(GIMPLE_ASSIGN, "gimple_assign", GSS_WITH_MEM_OPS)
+
+/* GIMPLE_ASM <STRING, I1, ..., IN, O1, ... OM, C1, ..., CP>
+ represents inline assembly statements.
+
+ STRING is the string containing the assembly statements.
+ I1 ... IN are the N input operands.
+ O1 ... OM are the M output operands.
+ C1 ... CP are the P clobber operands.
+ L1 ... LQ are the Q label operands. */
+DEFGSCODE(GIMPLE_ASM, "gimple_asm", GSS_ASM)
+
+/* GIMPLE_CALL <FN, LHS, ARG1, ..., ARGN[, CHAIN]> represents function
+ calls.
+
+ FN is the callee. It must be accepted by is_gimple_call_addr.
+
+ LHS is the operand where the return value from FN is stored. It may
+ be NULL.
+
+ ARG1 ... ARGN are the arguments. They must all be accepted by
+ is_gimple_operand.
+
+ CHAIN is the optional static chain link for nested functions. */
+DEFGSCODE(GIMPLE_CALL, "gimple_call", GSS_CALL)
+
+/* GIMPLE_TRANSACTION <BODY, LABEL> represents __transaction_atomic and
+ __transaction_relaxed blocks.
+ BODY is the sequence of statements inside the transaction.
+ LABEL is a label for the statement immediately following the
+ transaction. This is before RETURN so that it has MEM_OPS,
+ so that it can clobber global memory. */
+DEFGSCODE(GIMPLE_TRANSACTION, "gimple_transaction", GSS_TRANSACTION)
+
+/* GIMPLE_RETURN <RETVAL> represents return statements.
+
+ RETVAL is the value to return or NULL. If a value is returned it
+ must be accepted by is_gimple_operand. */
+DEFGSCODE(GIMPLE_RETURN, "gimple_return", GSS_WITH_MEM_OPS)
+
+/* GIMPLE_BIND <VARS, BLOCK, BODY> represents a lexical scope.
+ VARS is the set of variables declared in that scope.
+ BLOCK is the symbol binding block used for debug information.
+ BODY is the sequence of statements in the scope. */
+DEFGSCODE(GIMPLE_BIND, "gimple_bind", GSS_BIND)
+
+/* GIMPLE_CATCH <TYPES, HANDLER> represents a typed exception handler.
+ TYPES is the type (or list of types) handled. HANDLER is the
+ sequence of statements that handle these types. */
+DEFGSCODE(GIMPLE_CATCH, "gimple_catch", GSS_CATCH)
+
+/* GIMPLE_EH_FILTER <TYPES, FAILURE> represents an exception
+ specification. TYPES is a list of allowed types and FAILURE is the
+ sequence of statements to execute on failure. */
+DEFGSCODE(GIMPLE_EH_FILTER, "gimple_eh_filter", GSS_EH_FILTER)
+
+/* GIMPLE_EH_MUST_NOT_THROW <DECL> represents an exception barrier.
+ DECL is a noreturn function decl taking no arguments that will
+ be invoked if an exception propagates to this point. */
+DEFGSCODE(GIMPLE_EH_MUST_NOT_THROW, "gimple_eh_must_not_throw", GSS_EH_MNT)
+
+/* GIMPLE_EH_ELSE <N_BODY, E_BODY> must be the sole contents of
+ a GIMPLE_TRY_FINALLY node. For all normal exits from the try block,
+ N_BODY is run; for all exception exits from the try block,
+ E_BODY is run. */
+DEFGSCODE(GIMPLE_EH_ELSE, "gimple_eh_else", GSS_EH_ELSE)
+
+/* GIMPLE_RESX resumes execution after an exception. */
+DEFGSCODE(GIMPLE_RESX, "gimple_resx", GSS_EH_CTRL)
+
+/* GIMPLE_EH_DISPATCH demultiplexes an exception edge based on
+ the FILTER argument. */
+DEFGSCODE(GIMPLE_EH_DISPATCH, "gimple_eh_dispatch", GSS_EH_CTRL)
+
+/* GIMPLE_PHI <RESULT, ARG1, ..., ARGN> represents the PHI node
+
+ RESULT = PHI <ARG1, ..., ARGN>
+
+ RESULT is the SSA name created by this PHI node.
+
+ ARG1 ... ARGN are the arguments to the PHI node. N must be
+ exactly the same as the number of incoming edges to the basic block
+ holding the PHI node. Every argument is either an SSA name or a
+ tree node of class tcc_constant. */
+DEFGSCODE(GIMPLE_PHI, "gimple_phi", GSS_PHI)
+
+/* GIMPLE_TRY <TRY_KIND, EVAL, CLEANUP>
+ represents a try/catch or a try/finally statement.
+
+ TRY_KIND is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY.
+
+ EVAL is the sequence of statements to execute on entry to GIMPLE_TRY.
+
+ CLEANUP is the sequence of statements to execute according to
+ TRY_KIND. If TRY_KIND is GIMPLE_TRY_CATCH, CLEANUP is only exected
+ if an exception is thrown during execution of EVAL. If TRY_KIND is
+ GIMPLE_TRY_FINALLY, CLEANUP is always executed after executing EVAL
+ (regardless of whether EVAL finished normally, or jumped out or an
+ exception was thrown). */
+DEFGSCODE(GIMPLE_TRY, "gimple_try", GSS_TRY)
+
+/* GIMPLE_NOP represents the "do nothing" statement. */
+DEFGSCODE(GIMPLE_NOP, "gimple_nop", GSS_BASE)
+
+
+/* IMPORTANT.
+
+ Do not rearrange any of the GIMPLE_OMP_* codes. This ordering is
+ exposed by the range check in gimple_omp_subcode(). */
+
+
+/* Tuples used for lowering of OMP_ATOMIC. Although the form of the OMP_ATOMIC
+ expression is very simple (just in form mem op= expr), various implicit
+ conversions may cause the expression to become more complex, so that it does
+ not fit the gimple grammar very well. To overcome this problem, OMP_ATOMIC
+ is rewritten as a sequence of two codes in gimplification:
+
+ GIMPLE_OMP_LOAD (tmp, mem)
+ val = some computations involving tmp;
+ GIMPLE_OMP_STORE (val). */
+DEFGSCODE(GIMPLE_OMP_ATOMIC_LOAD, "gimple_omp_atomic_load",
+ GSS_OMP_ATOMIC_LOAD)
+DEFGSCODE(GIMPLE_OMP_ATOMIC_STORE, "gimple_omp_atomic_store",
+ GSS_OMP_ATOMIC_STORE_LAYOUT)
+
+/* GIMPLE_OMP_CONTINUE marks the location of the loop or sections
+ iteration in partially lowered OpenMP code. */
+DEFGSCODE(GIMPLE_OMP_CONTINUE, "gimple_omp_continue", GSS_OMP_CONTINUE)
+
+/* GIMPLE_OMP_CRITICAL <NAME, BODY> represents
+
+ #pragma omp critical [name]
+
+ NAME is the name given to the critical section.
+ BODY is the sequence of statements that are inside the critical section. */
+DEFGSCODE(GIMPLE_OMP_CRITICAL, "gimple_omp_critical", GSS_OMP_CRITICAL)
+
+/* GIMPLE_OMP_FOR <BODY, CLAUSES, INDEX, INITIAL, FINAL, COND, INCR, PRE_BODY>
+ represents
+
+ PRE_BODY
+ #pragma omp for [clause1 ... clauseN]
+ for (INDEX = INITIAL; INDEX COND FINAL; INDEX {+=,-=} INCR)
+ BODY
+
+ Likewise for:
+ #pragma acc loop [clause1 ... clauseN]
+
+ BODY is the loop body.
+
+ CLAUSES is the list of clauses.
+
+ INDEX must be an integer or pointer variable, which is implicitly thread
+ private. It must be accepted by is_gimple_operand.
+
+ INITIAL is the initial value given to INDEX. It must be
+ accepted by is_gimple_operand.
+
+ FINAL is the final value that INDEX should take. It must
+ be accepted by is_gimple_operand.
+
+ COND is the condition code for the controlling predicate. It must
+ be one of { <, >, <=, >= }
+
+ INCR is the loop index increment. It must be tree node of type
+ tcc_constant.
+
+ PRE_BODY is a landing pad filled by the gimplifier with things from
+ INIT, COND, and INCR that are technically part of the OMP_FOR
+ structured block, but are evaluated before the loop body begins.
+
+ INITIAL, FINAL and INCR are required to be loop invariant integer
+ expressions that are evaluated without any synchronization.
+ The evaluation order, frequency of evaluation and side-effects are
+ unspecified by the standards. */
+DEFGSCODE(GIMPLE_OMP_FOR, "gimple_omp_for", GSS_OMP_FOR)
+
+/* GIMPLE_OMP_MASTER <BODY> represents #pragma omp master.
+ BODY is the sequence of statements to execute in the master section. */
+DEFGSCODE(GIMPLE_OMP_MASTER, "gimple_omp_master", GSS_OMP)
+
+/* GIMPLE_OMP_MASKED <BODY, CLAUSES> represents #pragma omp masked.
+ BODY is the sequence of statements to execute in the masked section. */
+DEFGSCODE(GIMPLE_OMP_MASKED, "gimple_omp_masked", GSS_OMP_SINGLE_LAYOUT)
+
+/* GIMPLE_OMP_TASKGROUP <BODY, CLAUSES> represents #pragma omp taskgroup.
+ BODY is the sequence of statements inside the taskgroup section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
+DEFGSCODE(GIMPLE_OMP_TASKGROUP, "gimple_omp_taskgroup", GSS_OMP_SINGLE_LAYOUT)
+
+/* GIMPLE_OMP_PARALLEL <BODY, CLAUSES, CHILD_FN, DATA_ARG> represents
+
+ #pragma omp parallel [CLAUSES]
+ BODY
+
+ BODY is a the sequence of statements to be executed by all threads.
+
+ CLAUSES is an OMP_CLAUSE chain with all the clauses.
+
+ CHILD_FN is set when outlining the body of the parallel region.
+ All the statements in BODY are moved into this newly created
+ function when converting OMP constructs into low-GIMPLE.
+
+ DATA_ARG is a local variable in the parent function containing data
+ to be shared with CHILD_FN. This is used to implement all the data
+ sharing clauses. */
+DEFGSCODE(GIMPLE_OMP_PARALLEL, "gimple_omp_parallel", GSS_OMP_PARALLEL_LAYOUT)
+
+/* GIMPLE_OMP_TASK <BODY, CLAUSES, CHILD_FN, DATA_ARG, COPY_FN,
+ ARG_SIZE, ARG_ALIGN> represents
+
+ #pragma omp task [CLAUSES]
+ BODY
+
+ BODY is a the sequence of statements to be executed by all threads.
+
+ CLAUSES is an OMP_CLAUSE chain with all the clauses.
+
+ CHILD_FN is set when outlining the body of the explicit task region.
+ All the statements in BODY are moved into this newly created
+ function when converting OMP constructs into low-GIMPLE.
+
+ DATA_ARG is a local variable in the parent function containing data
+ to be shared with CHILD_FN. This is used to implement all the data
+ sharing clauses.
+
+ COPY_FN is set when outlining the firstprivate var initialization.
+ All the needed statements are emitted into the newly created
+ function, or when only memcpy is needed, it is NULL.
+
+ ARG_SIZE and ARG_ALIGN are the size and alignment of the incoming
+ data area allocated by GOMP_task and passed to CHILD_FN. */
+DEFGSCODE(GIMPLE_OMP_TASK, "gimple_omp_task", GSS_OMP_TASK)
+
+/* OMP_RETURN marks the end of an OpenMP directive. */
+DEFGSCODE(GIMPLE_OMP_RETURN, "gimple_omp_return", GSS_OMP_ATOMIC_STORE_LAYOUT)
+
+/* GIMPLE_OMP_SCAN <BODY, CLAUSES> represents #pragma omp scan
+ BODY is the sequence of statements inside the single section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
+DEFGSCODE(GIMPLE_OMP_SCAN, "gimple_omp_scan", GSS_OMP_SINGLE_LAYOUT)
+
+/* GIMPLE_OMP_SCOPE <BODY, CLAUSES> represents #pragma omp scope
+ BODY is the sequence of statements inside the single section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
+DEFGSCODE(GIMPLE_OMP_SCOPE, "gimple_omp_scope", GSS_OMP_SINGLE_LAYOUT)
+
+/* OMP_SECTION <BODY> represents #pragma omp section.
+ BODY is the sequence of statements in the section body. */
+DEFGSCODE(GIMPLE_OMP_SECTION, "gimple_omp_section", GSS_OMP)
+
+/* OMP_SECTIONS <BODY, CLAUSES, CONTROL> represents #pragma omp sections.
+
+ BODY is the sequence of statements in the sections body.
+ CLAUSES is an OMP_CLAUSE chain holding the list of associated clauses.
+ CONTROL is a VAR_DECL used for deciding which of the sections
+ to execute. */
+DEFGSCODE(GIMPLE_OMP_SECTIONS, "gimple_omp_sections", GSS_OMP_SECTIONS)
+
+/* GIMPLE_OMP_SECTIONS_SWITCH is a marker placed immediately after
+ OMP_SECTIONS. It represents the GIMPLE_SWITCH used to decide which
+ branch is taken. */
+DEFGSCODE(GIMPLE_OMP_SECTIONS_SWITCH, "gimple_omp_sections_switch", GSS_BASE)
+
+/* GIMPLE_OMP_SINGLE <BODY, CLAUSES> represents #pragma omp single
+ BODY is the sequence of statements inside the single section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
+DEFGSCODE(GIMPLE_OMP_SINGLE, "gimple_omp_single", GSS_OMP_SINGLE_LAYOUT)
+
+/* GIMPLE_OMP_TARGET <BODY, CLAUSES, CHILD_FN> represents
+ #pragma acc {kernels,parallel,serial,data,enter data,exit data,update}
+ #pragma omp target {,data,update}
+ BODY is the sequence of statements inside the construct
+ (NULL for some variants).
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses.
+ CHILD_FN is set when outlining the body of the offloaded region.
+ All the statements in BODY are moved into this newly created
+ function when converting OMP constructs into low-GIMPLE.
+ DATA_ARG is a vec of 3 local variables in the parent function
+ containing data to be mapped to CHILD_FN. This is used to
+ implement the MAP clauses. */
+DEFGSCODE(GIMPLE_OMP_TARGET, "gimple_omp_target", GSS_OMP_PARALLEL_LAYOUT)
+
+/* GIMPLE_OMP_TEAMS <BODY, CLAUSES, CHILD_FN, DATA_ARG> represents
+ #pragma omp teams
+ BODY is the sequence of statements inside the single section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses.
+ CHILD_FN and DATA_ARG like for GIMPLE_OMP_PARALLEL. */
+DEFGSCODE(GIMPLE_OMP_TEAMS, "gimple_omp_teams", GSS_OMP_PARALLEL_LAYOUT)
+
+/* GIMPLE_OMP_ORDERED <BODY, CLAUSES> represents #pragma omp ordered.
+ BODY is the sequence of statements to execute in the ordered section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
+DEFGSCODE(GIMPLE_OMP_ORDERED, "gimple_omp_ordered", GSS_OMP_SINGLE_LAYOUT)
+
+/* GIMPLE_PREDICT <PREDICT, OUTCOME> specifies a hint for branch prediction.
+
+ PREDICT is one of the predictors from predict.def.
+
+ OUTCOME is NOT_TAKEN or TAKEN. */
+DEFGSCODE(GIMPLE_PREDICT, "gimple_predict", GSS_BASE)
+
+/* This node represents a cleanup expression. It is ONLY USED INTERNALLY
+ by the gimplifier as a placeholder for cleanups, and its uses will be
+ cleaned up by the time gimplification is done.
+
+ This tuple should not exist outside of the gimplifier proper. */
+DEFGSCODE(GIMPLE_WITH_CLEANUP_EXPR, "gimple_with_cleanup_expr", GSS_WCE)
+
+/* GIMPLE_ASSUME <GUARD, BODY> represents [[assume(cond)]].
+ BODY is the GIMPLE_BIND with the condition which sets GUARD to true
+ (otherwise UB). */
+DEFGSCODE(GIMPLE_ASSUME, "gimple_assume", GSS_ASSUME)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.h
new file mode 100644
index 0000000..081d18e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimple.h
@@ -0,0 +1,6911 @@
+/* Gimple IR definitions.
+
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLE_H
+#define GCC_GIMPLE_H
+
+#include "tree-ssa-alias.h"
+#include "gimple-expr.h"
+
+typedef gimple *gimple_seq_node;
+
+enum gimple_code {
+#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
+#include "gimple.def"
+#undef DEFGSCODE
+ LAST_AND_UNUSED_GIMPLE_CODE
+};
+
+extern const char *const gimple_code_name[];
+extern const unsigned char gimple_rhs_class_table[];
+
+/* Strip the outermost pointer, from tr1/type_traits. */
+template<typename T> struct remove_pointer { typedef T type; };
+template<typename T> struct remove_pointer<T *> { typedef T type; };
+
+/* Error out if a gimple tuple is addressed incorrectly. */
+#if defined ENABLE_GIMPLE_CHECKING
+#define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR)
+extern void gimple_check_failed (const gimple *, const char *, int, \
+ const char *, enum gimple_code, \
+ enum tree_code) ATTRIBUTE_NORETURN \
+ ATTRIBUTE_COLD;
+
+#define GIMPLE_CHECK(GS, CODE) \
+ do { \
+ const gimple *__gs = (GS); \
+ if (gimple_code (__gs) != (CODE)) \
+ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
+ (CODE), ERROR_MARK); \
+ } while (0)
+template <typename T>
+inline T
+GIMPLE_CHECK2(const gimple *gs,
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ const char *file = __builtin_FILE (),
+ int line = __builtin_LINE (),
+ const char *fun = __builtin_FUNCTION ())
+#else
+ const char *file = __FILE__,
+ int line = __LINE__,
+ const char *fun = NULL)
+#endif
+{
+ T ret = dyn_cast <T> (gs);
+ if (!ret)
+ gimple_check_failed (gs, file, line, fun,
+ remove_pointer<T>::type::code_, ERROR_MARK);
+ return ret;
+}
+template <typename T>
+inline T
+GIMPLE_CHECK2(gimple *gs,
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ const char *file = __builtin_FILE (),
+ int line = __builtin_LINE (),
+ const char *fun = __builtin_FUNCTION ())
+#else
+ const char *file = __FILE__,
+ int line = __LINE__,
+ const char *fun = NULL)
+#endif
+{
+ T ret = dyn_cast <T> (gs);
+ if (!ret)
+ gimple_check_failed (gs, file, line, fun,
+ remove_pointer<T>::type::code_, ERROR_MARK);
+ return ret;
+}
+#else /* not ENABLE_GIMPLE_CHECKING */
+#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
+#define GIMPLE_CHECK(GS, CODE) (void)0
+template <typename T>
+inline T
+GIMPLE_CHECK2(gimple *gs)
+{
+ return as_a <T> (gs);
+}
+template <typename T>
+inline T
+GIMPLE_CHECK2(const gimple *gs)
+{
+ return as_a <T> (gs);
+}
+#endif
+
+/* Class of GIMPLE expressions suitable for the RHS of assignments. See
+ get_gimple_rhs_class. */
+enum gimple_rhs_class
+{
+ GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
+ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
+ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
+ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
+ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
+ name, a _DECL, a _REF, etc. */
+};
+
+/* Specific flags for individual GIMPLE statements. These flags are
+ always stored in gimple.subcode and they may only be
+ defined for statement codes that do not use subcodes.
+
+ Values for the masks can overlap as long as the overlapping values
+ are never used in the same statement class.
+
+ The maximum mask value that can be defined is 1 << 15 (i.e., each
+ statement code can hold up to 16 bitflags).
+
+ Keep this list sorted. */
+enum gf_mask {
+ GF_ASM_INPUT = 1 << 0,
+ GF_ASM_VOLATILE = 1 << 1,
+ GF_ASM_INLINE = 1 << 2,
+ GF_CALL_FROM_THUNK = 1 << 0,
+ GF_CALL_RETURN_SLOT_OPT = 1 << 1,
+ GF_CALL_TAILCALL = 1 << 2,
+ GF_CALL_VA_ARG_PACK = 1 << 3,
+ GF_CALL_NOTHROW = 1 << 4,
+ GF_CALL_ALLOCA_FOR_VAR = 1 << 5,
+ GF_CALL_INTERNAL = 1 << 6,
+ GF_CALL_CTRL_ALTERING = 1 << 7,
+ GF_CALL_MUST_TAIL_CALL = 1 << 9,
+ GF_CALL_BY_DESCRIPTOR = 1 << 10,
+ GF_CALL_NOCF_CHECK = 1 << 11,
+ GF_CALL_FROM_NEW_OR_DELETE = 1 << 12,
+ GF_OMP_PARALLEL_COMBINED = 1 << 0,
+ GF_OMP_TASK_TASKLOOP = 1 << 0,
+ GF_OMP_TASK_TASKWAIT = 1 << 1,
+ GF_OMP_FOR_KIND_MASK = (1 << 3) - 1,
+ GF_OMP_FOR_KIND_FOR = 0,
+ GF_OMP_FOR_KIND_DISTRIBUTE = 1,
+ GF_OMP_FOR_KIND_TASKLOOP = 2,
+ GF_OMP_FOR_KIND_OACC_LOOP = 4,
+ GF_OMP_FOR_KIND_SIMD = 5,
+ GF_OMP_FOR_COMBINED = 1 << 3,
+ GF_OMP_FOR_COMBINED_INTO = 1 << 4,
+ GF_OMP_TARGET_KIND_MASK = (1 << 5) - 1,
+ GF_OMP_TARGET_KIND_REGION = 0,
+ GF_OMP_TARGET_KIND_DATA = 1,
+ GF_OMP_TARGET_KIND_UPDATE = 2,
+ GF_OMP_TARGET_KIND_ENTER_DATA = 3,
+ GF_OMP_TARGET_KIND_EXIT_DATA = 4,
+ GF_OMP_TARGET_KIND_OACC_PARALLEL = 5,
+ GF_OMP_TARGET_KIND_OACC_KERNELS = 6,
+ GF_OMP_TARGET_KIND_OACC_SERIAL = 7,
+ GF_OMP_TARGET_KIND_OACC_DATA = 8,
+ GF_OMP_TARGET_KIND_OACC_UPDATE = 9,
+ GF_OMP_TARGET_KIND_OACC_ENTER_DATA = 10,
+ GF_OMP_TARGET_KIND_OACC_EXIT_DATA = 11,
+ GF_OMP_TARGET_KIND_OACC_DECLARE = 12,
+ GF_OMP_TARGET_KIND_OACC_HOST_DATA = 13,
+ /* A 'GF_OMP_TARGET_KIND_OACC_PARALLEL' representing an OpenACC 'kernels'
+ decomposed part, parallelized. */
+ GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_PARALLELIZED = 14,
+ /* A 'GF_OMP_TARGET_KIND_OACC_PARALLEL' representing an OpenACC 'kernels'
+ decomposed part, "gang-single". */
+ GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_GANG_SINGLE = 15,
+ /* A 'GF_OMP_TARGET_KIND_OACC_DATA' representing an OpenACC 'kernels'
+ decomposed parts' 'data' construct. */
+ GF_OMP_TARGET_KIND_OACC_DATA_KERNELS = 16,
+ GF_OMP_TEAMS_HOST = 1 << 0,
+
+ /* True on an GIMPLE_OMP_RETURN statement if the return does not require
+ a thread synchronization via some sort of barrier. The exact barrier
+ that would otherwise be emitted is dependent on the OMP statement with
+ which this return is associated. */
+ GF_OMP_RETURN_NOWAIT = 1 << 0,
+
+ GF_OMP_SECTION_LAST = 1 << 0,
+ GF_OMP_ORDERED_STANDALONE = 1 << 0,
+ GF_OMP_ATOMIC_MEMORY_ORDER = (1 << 6) - 1,
+ GF_OMP_ATOMIC_NEED_VALUE = 1 << 6,
+ GF_OMP_ATOMIC_WEAK = 1 << 7,
+ GF_PREDICT_TAKEN = 1 << 15
+};
+
+/* This subcode tells apart different kinds of stmts that are not used
+ for codegen, but rather to retain debug information. */
+enum gimple_debug_subcode {
+ GIMPLE_DEBUG_BIND = 0,
+ GIMPLE_DEBUG_SOURCE_BIND = 1,
+ GIMPLE_DEBUG_BEGIN_STMT = 2,
+ GIMPLE_DEBUG_INLINE_ENTRY = 3
+};
+
+/* Masks for selecting a pass local flag (PLF) to work on. These
+ masks are used by gimple_set_plf and gimple_plf. */
+enum plf_mask {
+ GF_PLF_1 = 1 << 0,
+ GF_PLF_2 = 1 << 1
+};
+
+/* Data structure definitions for GIMPLE tuples. NOTE: word markers
+ are for 64 bit hosts. */
+
+struct GTY((desc ("gimple_statement_structure (&%h)"), tag ("GSS_BASE"),
+ chain_next ("%h.next"), variable_size))
+ gimple
+{
+ /* [ WORD 1 ]
+ Main identifying code for a tuple. */
+ ENUM_BITFIELD(gimple_code) code : 8;
+
+ /* Nonzero if a warning should not be emitted on this tuple. */
+ unsigned int no_warning : 1;
+
+ /* Nonzero if this tuple has been visited. Passes are responsible
+ for clearing this bit before using it. */
+ unsigned int visited : 1;
+
+ /* Nonzero if this tuple represents a non-temporal move. */
+ unsigned int nontemporal_move : 1;
+
+ /* Pass local flags. These flags are free for any pass to use as
+ they see fit. Passes should not assume that these flags contain
+ any useful value when the pass starts. Any initial state that
+ the pass requires should be set on entry to the pass. See
+ gimple_set_plf and gimple_plf for usage. */
+ unsigned int plf : 2;
+
+ /* Nonzero if this statement has been modified and needs to have its
+ operands rescanned. */
+ unsigned modified : 1;
+
+ /* Nonzero if this statement contains volatile operands. */
+ unsigned has_volatile_ops : 1;
+
+ /* Padding to get subcode to 16 bit alignment. */
+ unsigned pad : 1;
+
+ /* The SUBCODE field can be used for tuple-specific flags for tuples
+ that do not require subcodes. Note that SUBCODE should be at
+ least as wide as tree codes, as several tuples store tree codes
+ in there. */
+ unsigned int subcode : 16;
+
+ /* UID of this statement. This is used by passes that want to
+ assign IDs to statements. It must be assigned and used by each
+ pass. By default it should be assumed to contain garbage. */
+ unsigned uid;
+
+ /* [ WORD 2 ]
+ Locus information for debug info. */
+ location_t location;
+
+ /* Number of operands in this tuple. */
+ unsigned num_ops;
+
+ /* [ WORD 3 ]
+ Basic block holding this statement. */
+ basic_block bb;
+
+ /* [ WORD 4-5 ]
+ Linked lists of gimple statements. The next pointers form
+ a NULL terminated list, the prev pointers are a cyclic list.
+ A gimple statement is hence also a double-ended list of
+ statements, with the pointer itself being the first element,
+ and the prev pointer being the last. */
+ gimple *next;
+ gimple *GTY((skip)) prev;
+};
+
+
+/* Base structure for tuples with operands. */
+
+/* This gimple subclass has no tag value. */
+struct GTY(())
+ gimple_statement_with_ops_base : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ]
+ SSA operand vectors. NOTE: It should be possible to
+ amalgamate these vectors with the operand vector OP. However,
+ the SSA operand vectors are organized differently and contain
+ more information (like immediate use chaining). */
+ struct use_optype_d GTY((skip (""))) *use_ops;
+};
+
+
+/* Statements that take register operands. */
+
+struct GTY((tag("GSS_WITH_OPS")))
+ gimple_statement_with_ops : public gimple_statement_with_ops_base
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8 ]
+ Operand vector. NOTE! This must always be the last field
+ of this structure. In particular, this means that this
+ structure cannot be embedded inside another one. */
+ tree GTY((length ("%h.num_ops"))) op[1];
+};
+
+
+/* Base for statements that take both memory and register operands. */
+
+struct GTY((tag("GSS_WITH_MEM_OPS_BASE")))
+ gimple_statement_with_memory_ops_base : public gimple_statement_with_ops_base
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8-9 ]
+ Virtual operands for this statement. The GC will pick them
+ up via the ssa_names array. */
+ tree GTY((skip (""))) vdef;
+ tree GTY((skip (""))) vuse;
+};
+
+
+/* Statements that take both memory and register operands. */
+
+struct GTY((tag("GSS_WITH_MEM_OPS")))
+ gimple_statement_with_memory_ops :
+ public gimple_statement_with_memory_ops_base
+{
+ /* [ WORD 1-9 ] : base class */
+
+ /* [ WORD 10 ]
+ Operand vector. NOTE! This must always be the last field
+ of this structure. In particular, this means that this
+ structure cannot be embedded inside another one. */
+ tree GTY((length ("%h.num_ops"))) op[1];
+};
+
+
+/* Call statements that take both memory and register operands. */
+
+struct GTY((tag("GSS_CALL")))
+ gcall : public gimple_statement_with_memory_ops_base
+{
+ /* [ WORD 1-9 ] : base class */
+
+ /* [ WORD 10-13 ] */
+ struct pt_solution call_used;
+ struct pt_solution call_clobbered;
+
+ /* [ WORD 14 ] */
+ union GTY ((desc ("%1.subcode & GF_CALL_INTERNAL"))) {
+ tree GTY ((tag ("0"))) fntype;
+ enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
+ } u;
+
+ /* [ WORD 15 ]
+ Operand vector. NOTE! This must always be the last field
+ of this structure. In particular, this means that this
+ structure cannot be embedded inside another one. */
+ tree GTY((length ("%h.num_ops"))) op[1];
+
+ static const enum gimple_code code_ = GIMPLE_CALL;
+};
+
+
+/* OMP statements. */
+
+struct GTY((tag("GSS_OMP")))
+ gimple_statement_omp : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] */
+ gimple_seq body;
+};
+
+
+/* GIMPLE_BIND */
+
+struct GTY((tag("GSS_BIND")))
+ gbind : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ]
+ Variables declared in this scope. */
+ tree vars;
+
+ /* [ WORD 8 ]
+ This is different than the BLOCK field in gimple,
+ which is analogous to TREE_BLOCK (i.e., the lexical block holding
+ this statement). This field is the equivalent of BIND_EXPR_BLOCK
+ in tree land (i.e., the lexical scope defined by this bind). See
+ gimple-low.cc. */
+ tree block;
+
+ /* [ WORD 9 ] */
+ gimple_seq body;
+};
+
+
+/* GIMPLE_CATCH */
+
+struct GTY((tag("GSS_CATCH")))
+ gcatch : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] */
+ tree types;
+
+ /* [ WORD 8 ] */
+ gimple_seq handler;
+};
+
+
+/* GIMPLE_EH_FILTER */
+
+struct GTY((tag("GSS_EH_FILTER")))
+ geh_filter : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ]
+ Filter types. */
+ tree types;
+
+ /* [ WORD 8 ]
+ Failure actions. */
+ gimple_seq failure;
+};
+
+/* GIMPLE_EH_ELSE */
+
+struct GTY((tag("GSS_EH_ELSE")))
+ geh_else : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7,8 ] */
+ gimple_seq n_body, e_body;
+};
+
+/* GIMPLE_EH_MUST_NOT_THROW */
+
+struct GTY((tag("GSS_EH_MNT")))
+ geh_mnt : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] Abort function decl. */
+ tree fndecl;
+};
+
+/* GIMPLE_PHI */
+
+struct GTY((tag("GSS_PHI")))
+ gphi : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] */
+ unsigned capacity;
+ unsigned nargs;
+
+ /* [ WORD 8 ] */
+ tree result;
+
+ /* [ WORD 9 ] */
+ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
+};
+
+
+/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
+
+struct GTY((tag("GSS_EH_CTRL")))
+ gimple_statement_eh_ctrl : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ]
+ Exception region number. */
+ int region;
+};
+
+struct GTY((tag("GSS_EH_CTRL")))
+ gresx : public gimple_statement_eh_ctrl
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_RESX. */
+};
+
+struct GTY((tag("GSS_EH_CTRL")))
+ geh_dispatch : public gimple_statement_eh_ctrl
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_EH_DISPATH. */
+};
+
+
+/* GIMPLE_TRY */
+
+struct GTY((tag("GSS_TRY")))
+ gtry : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ]
+ Expression to evaluate. */
+ gimple_seq eval;
+
+ /* [ WORD 8 ]
+ Cleanup expression. */
+ gimple_seq cleanup;
+};
+
+/* Kind of GIMPLE_TRY statements. */
+enum gimple_try_flags
+{
+ /* A try/catch. */
+ GIMPLE_TRY_CATCH = 1 << 0,
+
+ /* A try/finally. */
+ GIMPLE_TRY_FINALLY = 1 << 1,
+ GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
+
+ /* Analogous to TRY_CATCH_IS_CLEANUP. */
+ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
+};
+
+/* GIMPLE_WITH_CLEANUP_EXPR */
+
+struct GTY((tag("GSS_WCE")))
+ gimple_statement_wce : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
+ executed if an exception is thrown, not on normal exit of its
+ scope. This flag is analogous to the CLEANUP_EH_ONLY flag
+ in TARGET_EXPRs. */
+
+ /* [ WORD 7 ]
+ Cleanup expression. */
+ gimple_seq cleanup;
+};
+
+
+/* GIMPLE_ASM */
+
+struct GTY((tag("GSS_ASM")))
+ gasm : public gimple_statement_with_memory_ops_base
+{
+ /* [ WORD 1-9 ] : base class */
+
+ /* [ WORD 10 ]
+ __asm__ statement. */
+ const char *string;
+
+ /* [ WORD 11 ]
+ Number of inputs, outputs, clobbers, labels. */
+ unsigned char ni;
+ unsigned char no;
+ unsigned char nc;
+ unsigned char nl;
+
+ /* [ WORD 12 ]
+ Operand vector. NOTE! This must always be the last field
+ of this structure. In particular, this means that this
+ structure cannot be embedded inside another one. */
+ tree GTY((length ("%h.num_ops"))) op[1];
+};
+
+/* GIMPLE_OMP_CRITICAL */
+
+struct GTY((tag("GSS_OMP_CRITICAL")))
+ gomp_critical : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8 ] */
+ tree clauses;
+
+ /* [ WORD 9 ]
+ Critical section name. */
+ tree name;
+};
+
+
+struct GTY(()) gimple_omp_for_iter {
+ /* Condition code. */
+ enum tree_code cond;
+
+ /* Index variable. */
+ tree index;
+
+ /* Initial value. */
+ tree initial;
+
+ /* Final value. */
+ tree final;
+
+ /* Increment. */
+ tree incr;
+};
+
+/* GIMPLE_OMP_FOR */
+
+struct GTY((tag("GSS_OMP_FOR")))
+ gomp_for : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8 ] */
+ tree clauses;
+
+ /* [ WORD 9 ]
+ Number of elements in iter array. */
+ size_t collapse;
+
+ /* [ WORD 10 ] */
+ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
+
+ /* [ WORD 11 ]
+ Pre-body evaluated before the loop body begins. */
+ gimple_seq pre_body;
+};
+
+
+/* GIMPLE_OMP_PARALLEL, GIMPLE_OMP_TARGET, GIMPLE_OMP_TASK, GIMPLE_OMP_TEAMS */
+
+struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
+ gimple_statement_omp_parallel_layout : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8 ]
+ Clauses. */
+ tree clauses;
+
+ /* [ WORD 9 ]
+ Child function holding the body of the parallel region. */
+ tree child_fn;
+
+ /* [ WORD 10 ]
+ Shared data argument. */
+ tree data_arg;
+};
+
+/* GIMPLE_OMP_PARALLEL or GIMPLE_TASK */
+struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
+ gimple_statement_omp_taskreg : public gimple_statement_omp_parallel_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_PARALLEL
+ || stmt->code == GIMPLE_OMP_TASK
+ || stmt->code == GIMPLE_OMP_TEAMS. */
+};
+
+/* GIMPLE_OMP_PARALLEL */
+struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
+ gomp_parallel : public gimple_statement_omp_taskreg
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_PARALLEL. */
+};
+
+/* GIMPLE_OMP_TARGET */
+struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
+ gomp_target : public gimple_statement_omp_parallel_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_TARGET. */
+};
+
+/* GIMPLE_OMP_TASK */
+
+struct GTY((tag("GSS_OMP_TASK")))
+ gomp_task : public gimple_statement_omp_taskreg
+{
+ /* [ WORD 1-10 ] : base class */
+
+ /* [ WORD 11 ]
+ Child function holding firstprivate initialization if needed. */
+ tree copy_fn;
+
+ /* [ WORD 12-13 ]
+ Size and alignment in bytes of the argument data block. */
+ tree arg_size;
+ tree arg_align;
+};
+
+
+/* GIMPLE_OMP_SECTION */
+/* Uses struct gimple_statement_omp. */
+
+
+/* GIMPLE_OMP_SECTIONS */
+
+struct GTY((tag("GSS_OMP_SECTIONS")))
+ gomp_sections : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8 ] */
+ tree clauses;
+
+ /* [ WORD 9 ]
+ The control variable used for deciding which of the sections to
+ execute. */
+ tree control;
+};
+
+/* GIMPLE_OMP_CONTINUE.
+
+ Note: This does not inherit from gimple_statement_omp, because we
+ do not need the body field. */
+
+struct GTY((tag("GSS_OMP_CONTINUE")))
+ gomp_continue : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] */
+ tree control_def;
+
+ /* [ WORD 8 ] */
+ tree control_use;
+};
+
+/* GIMPLE_OMP_SINGLE, GIMPLE_OMP_ORDERED, GIMPLE_OMP_TASKGROUP,
+ GIMPLE_OMP_SCAN, GIMPLE_OMP_MASKED, GIMPLE_OMP_SCOPE. */
+
+struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
+ gimple_statement_omp_single_layout : public gimple_statement_omp
+{
+ /* [ WORD 1-7 ] : base class */
+
+ /* [ WORD 8 ] */
+ tree clauses;
+};
+
+struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
+ gomp_single : public gimple_statement_omp_single_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_SINGLE. */
+};
+
+struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT")))
+ gomp_teams : public gimple_statement_omp_taskreg
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_TEAMS. */
+};
+
+struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
+ gomp_ordered : public gimple_statement_omp_single_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_ORDERED. */
+};
+
+struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
+ gomp_scan : public gimple_statement_omp_single_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_SCAN. */
+};
+
+
+/* GIMPLE_OMP_ATOMIC_LOAD.
+ Note: This is based on gimple, not g_s_omp, because g_s_omp
+ contains a sequence, which we don't need here. */
+
+struct GTY((tag("GSS_OMP_ATOMIC_LOAD")))
+ gomp_atomic_load : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7-8 ] */
+ tree rhs, lhs;
+};
+
+/* GIMPLE_OMP_ATOMIC_STORE.
+ See note on GIMPLE_OMP_ATOMIC_LOAD. */
+
+struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
+ gimple_statement_omp_atomic_store_layout : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] */
+ tree val;
+};
+
+struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
+ gomp_atomic_store :
+ public gimple_statement_omp_atomic_store_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_ATOMIC_STORE. */
+};
+
+struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT")))
+ gimple_statement_omp_return :
+ public gimple_statement_omp_atomic_store_layout
+{
+ /* No extra fields; adds invariant:
+ stmt->code == GIMPLE_OMP_RETURN. */
+};
+
+/* Assumptions. */
+
+struct GTY((tag("GSS_ASSUME")))
+ gimple_statement_assume : public gimple
+{
+ /* [ WORD 1-6 ] : base class */
+
+ /* [ WORD 7 ] */
+ tree guard;
+
+ /* [ WORD 8 ] */
+ gimple_seq body;
+};
+
+/* GIMPLE_TRANSACTION. */
+
+/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
+
+/* The __transaction_atomic was declared [[outer]] or it is
+ __transaction_relaxed. */
+#define GTMA_IS_OUTER (1u << 0)
+#define GTMA_IS_RELAXED (1u << 1)
+#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
+
+/* The transaction is seen to not have an abort. */
+#define GTMA_HAVE_ABORT (1u << 2)
+/* The transaction is seen to have loads or stores. */
+#define GTMA_HAVE_LOAD (1u << 3)
+#define GTMA_HAVE_STORE (1u << 4)
+/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
+#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
+/* The transaction WILL enter serial irrevocable mode.
+ An irrevocable block post-dominates the entire transaction, such
+ that all invocations of the transaction will go serial-irrevocable.
+ In such case, we don't bother instrumenting the transaction, and
+ tell the runtime that it should begin the transaction in
+ serial-irrevocable mode. */
+#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
+/* The transaction contains no instrumentation code whatsover, most
+ likely because it is guaranteed to go irrevocable upon entry. */
+#define GTMA_HAS_NO_INSTRUMENTATION (1u << 7)
+
+struct GTY((tag("GSS_TRANSACTION")))
+ gtransaction : public gimple_statement_with_memory_ops_base
+{
+ /* [ WORD 1-9 ] : base class */
+
+ /* [ WORD 10 ] */
+ gimple_seq body;
+
+ /* [ WORD 11-13 ] */
+ tree label_norm;
+ tree label_uninst;
+ tree label_over;
+};
+
+#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
+enum gimple_statement_structure_enum {
+#include "gsstruct.def"
+ LAST_GSS_ENUM
+};
+#undef DEFGSSTRUCT
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_COND
+ i.e. a conditional jump statement. */
+
+struct GTY((tag("GSS_WITH_OPS")))
+ gcond : public gimple_statement_with_ops
+{
+ /* no additional fields; this uses the layout for GSS_WITH_OPS. */
+ static const enum gimple_code code_ = GIMPLE_COND;
+};
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_DEBUG
+ i.e. a debug statement. */
+
+struct GTY((tag("GSS_WITH_OPS")))
+ gdebug : public gimple_statement_with_ops
+{
+ /* no additional fields; this uses the layout for GSS_WITH_OPS. */
+};
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_GOTO
+ i.e. a goto statement. */
+
+struct GTY((tag("GSS_WITH_OPS")))
+ ggoto : public gimple_statement_with_ops
+{
+ /* no additional fields; this uses the layout for GSS_WITH_OPS. */
+};
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_LABEL
+ i.e. a label statement. */
+
+struct GTY((tag("GSS_WITH_OPS")))
+ glabel : public gimple_statement_with_ops
+{
+ /* no additional fields; this uses the layout for GSS_WITH_OPS. */
+};
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_SWITCH
+ i.e. a switch statement. */
+
+struct GTY((tag("GSS_WITH_OPS")))
+ gswitch : public gimple_statement_with_ops
+{
+ /* no additional fields; this uses the layout for GSS_WITH_OPS. */
+};
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_ASSIGN
+ i.e. an assignment statement. */
+
+struct GTY((tag("GSS_WITH_MEM_OPS")))
+ gassign : public gimple_statement_with_memory_ops
+{
+ static const enum gimple_code code_ = GIMPLE_ASSIGN;
+ /* no additional fields; this uses the layout for GSS_WITH_MEM_OPS. */
+};
+
+/* A statement with the invariant that
+ stmt->code == GIMPLE_RETURN
+ i.e. a return statement. */
+
+struct GTY((tag("GSS_WITH_MEM_OPS")))
+ greturn : public gimple_statement_with_memory_ops
+{
+ /* no additional fields; this uses the layout for GSS_WITH_MEM_OPS. */
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <gasm *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_ASM;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gassign *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_ASSIGN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gassign *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_ASSIGN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gbind *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_BIND;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gcall *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_CALL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gcatch *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_CATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gcond *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_COND;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gcond *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_COND;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gdebug *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_DEBUG;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gdebug *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_DEBUG;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <ggoto *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_GOTO;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const ggoto *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_GOTO;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <glabel *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_LABEL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const glabel *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_LABEL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gresx *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_RESX;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <geh_dispatch *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_EH_DISPATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <geh_else *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_EH_ELSE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const geh_else *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_EH_ELSE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <geh_filter *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_EH_FILTER;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <geh_mnt *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_EH_MUST_NOT_THROW;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const geh_mnt *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_EH_MUST_NOT_THROW;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_atomic_load *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_atomic_store *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_STORE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_return *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_continue *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_CONTINUE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_critical *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_CRITICAL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_ordered *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_ORDERED;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_scan *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_SCAN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_for *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_FOR;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_omp_taskreg *>::test (gimple *gs)
+{
+ return (gs->code == GIMPLE_OMP_PARALLEL
+ || gs->code == GIMPLE_OMP_TASK
+ || gs->code == GIMPLE_OMP_TEAMS);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_parallel *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_PARALLEL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_target *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_TARGET;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_sections *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_SECTIONS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_single *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_SINGLE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_teams *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_TEAMS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gomp_task *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_TASK;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gphi *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_PHI;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <greturn *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gswitch *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_SWITCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gswitch *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_SWITCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_assume *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_ASSUME;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gtransaction *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_TRANSACTION;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gtry *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_TRY;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gtry *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_TRY;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_wce *>::test (gimple *gs)
+{
+ return gs->code == GIMPLE_WITH_CLEANUP_EXPR;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gasm *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_ASM;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gbind *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_BIND;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gcall *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_CALL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gcatch *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_CATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gresx *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_RESX;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const geh_dispatch *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_EH_DISPATCH;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const geh_filter *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_EH_FILTER;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_atomic_load *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_LOAD;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_atomic_store *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_ATOMIC_STORE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_return *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_continue *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_CONTINUE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_critical *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_CRITICAL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_ordered *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_ORDERED;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_scan *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_SCAN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_for *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_FOR;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_omp_taskreg *>::test (const gimple *gs)
+{
+ return (gs->code == GIMPLE_OMP_PARALLEL
+ || gs->code == GIMPLE_OMP_TASK
+ || gs->code == GIMPLE_OMP_TEAMS);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_parallel *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_PARALLEL;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_target *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_TARGET;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_sections *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_SECTIONS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_single *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_SINGLE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_teams *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_TEAMS;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gomp_task *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_OMP_TASK;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gphi *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_PHI;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const greturn *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_assume *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_ASSUME;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gtransaction *>::test (const gimple *gs)
+{
+ return gs->code == GIMPLE_TRANSACTION;
+}
+
+/* Offset in bytes to the location of the operand vector.
+ Zero if there is no operand vector for this tuple structure. */
+extern size_t const gimple_ops_offset_[];
+
+/* Map GIMPLE codes to GSS codes. */
+extern enum gimple_statement_structure_enum const gss_for_code_[];
+
+/* This variable holds the currently expanded gimple statement for purposes
+ of comminucating the profile info to the builtin expanders. */
+extern gimple *currently_expanding_gimple_stmt;
+
+size_t gimple_size (enum gimple_code code, unsigned num_ops = 0);
+void gimple_init (gimple *g, enum gimple_code code, unsigned num_ops);
+gimple *gimple_alloc (enum gimple_code, unsigned CXX_MEM_STAT_INFO);
+greturn *gimple_build_return (tree);
+void gimple_call_reset_alias_info (gcall *);
+gcall *gimple_build_call_vec (tree, const vec<tree> &);
+gcall *gimple_build_call (tree, unsigned, ...);
+gcall *gimple_build_call_valist (tree, unsigned, va_list);
+gcall *gimple_build_call_internal (enum internal_fn, unsigned, ...);
+gcall *gimple_build_call_internal_vec (enum internal_fn, const vec<tree> &);
+gcall *gimple_build_call_from_tree (tree, tree);
+gassign *gimple_build_assign (tree, tree CXX_MEM_STAT_INFO);
+gassign *gimple_build_assign (tree, enum tree_code,
+ tree, tree, tree CXX_MEM_STAT_INFO);
+gassign *gimple_build_assign (tree, enum tree_code,
+ tree, tree CXX_MEM_STAT_INFO);
+gassign *gimple_build_assign (tree, enum tree_code, tree CXX_MEM_STAT_INFO);
+gcond *gimple_build_cond (enum tree_code, tree, tree, tree, tree);
+gcond *gimple_build_cond_from_tree (tree, tree, tree);
+void gimple_cond_set_condition_from_tree (gcond *, tree);
+glabel *gimple_build_label (tree label);
+ggoto *gimple_build_goto (tree dest);
+gimple *gimple_build_nop (void);
+gbind *gimple_build_bind (tree, gimple_seq, tree);
+gasm *gimple_build_asm_vec (const char *, vec<tree, va_gc> *,
+ vec<tree, va_gc> *, vec<tree, va_gc> *,
+ vec<tree, va_gc> *);
+gcatch *gimple_build_catch (tree, gimple_seq);
+geh_filter *gimple_build_eh_filter (tree, gimple_seq);
+geh_mnt *gimple_build_eh_must_not_throw (tree);
+geh_else *gimple_build_eh_else (gimple_seq, gimple_seq);
+gtry *gimple_build_try (gimple_seq, gimple_seq,
+ enum gimple_try_flags);
+gimple *gimple_build_wce (gimple_seq);
+gresx *gimple_build_resx (int);
+gswitch *gimple_build_switch_nlabels (unsigned, tree, tree);
+gswitch *gimple_build_switch (tree, tree, const vec<tree> &);
+geh_dispatch *gimple_build_eh_dispatch (int);
+gdebug *gimple_build_debug_bind (tree, tree, gimple * CXX_MEM_STAT_INFO);
+gdebug *gimple_build_debug_source_bind (tree, tree, gimple * CXX_MEM_STAT_INFO);
+gdebug *gimple_build_debug_begin_stmt (tree, location_t CXX_MEM_STAT_INFO);
+gdebug *gimple_build_debug_inline_entry (tree, location_t CXX_MEM_STAT_INFO);
+gomp_critical *gimple_build_omp_critical (gimple_seq, tree, tree);
+gomp_for *gimple_build_omp_for (gimple_seq, int, tree, size_t, gimple_seq);
+gomp_parallel *gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
+gomp_task *gimple_build_omp_task (gimple_seq, tree, tree, tree, tree,
+ tree, tree);
+gimple *gimple_build_omp_section (gimple_seq);
+gimple *gimple_build_omp_scope (gimple_seq, tree);
+gimple *gimple_build_omp_master (gimple_seq);
+gimple *gimple_build_omp_masked (gimple_seq, tree);
+gimple *gimple_build_omp_taskgroup (gimple_seq, tree);
+gomp_continue *gimple_build_omp_continue (tree, tree);
+gomp_ordered *gimple_build_omp_ordered (gimple_seq, tree);
+gimple *gimple_build_omp_return (bool);
+gomp_scan *gimple_build_omp_scan (gimple_seq, tree);
+gomp_sections *gimple_build_omp_sections (gimple_seq, tree);
+gimple *gimple_build_omp_sections_switch (void);
+gomp_single *gimple_build_omp_single (gimple_seq, tree);
+gomp_target *gimple_build_omp_target (gimple_seq, int, tree);
+gomp_teams *gimple_build_omp_teams (gimple_seq, tree);
+gomp_atomic_load *gimple_build_omp_atomic_load (tree, tree,
+ enum omp_memory_order);
+gomp_atomic_store *gimple_build_omp_atomic_store (tree, enum omp_memory_order);
+gimple *gimple_build_assume (tree, gimple_seq);
+gtransaction *gimple_build_transaction (gimple_seq);
+extern void gimple_seq_add_stmt (gimple_seq *, gimple *);
+extern void gimple_seq_add_stmt_without_update (gimple_seq *, gimple *);
+void gimple_seq_add_seq (gimple_seq *, gimple_seq);
+void gimple_seq_add_seq_without_update (gimple_seq *, gimple_seq);
+extern void annotate_all_with_location_after (gimple_seq, gimple_stmt_iterator,
+ location_t);
+extern void annotate_all_with_location (gimple_seq, location_t);
+bool empty_body_p (gimple_seq);
+gimple_seq gimple_seq_copy (gimple_seq);
+bool gimple_call_same_target_p (const gimple *, const gimple *);
+int gimple_call_flags (const gimple *);
+int gimple_call_arg_flags (const gcall *, unsigned);
+int gimple_call_retslot_flags (const gcall *);
+int gimple_call_static_chain_flags (const gcall *);
+int gimple_call_return_flags (const gcall *);
+bool gimple_call_nonnull_result_p (gcall *);
+tree gimple_call_nonnull_arg (gcall *);
+bool gimple_assign_copy_p (gimple *);
+bool gimple_assign_ssa_name_copy_p (gimple *);
+bool gimple_assign_unary_nop_p (gimple *);
+void gimple_set_bb (gimple *, basic_block);
+void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
+void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *, enum tree_code,
+ tree, tree, tree);
+tree gimple_get_lhs (const gimple *);
+void gimple_set_lhs (gimple *, tree);
+gimple *gimple_copy (gimple *);
+void gimple_move_vops (gimple *, gimple *);
+bool gimple_has_side_effects (const gimple *);
+bool gimple_could_trap_p_1 (const gimple *, bool, bool);
+bool gimple_could_trap_p (const gimple *);
+bool gimple_assign_rhs_could_trap_p (gimple *);
+extern void dump_gimple_statistics (void);
+unsigned get_gimple_rhs_num_ops (enum tree_code);
+gcall *gimple_call_copy_skip_args (gcall *, bitmap);
+extern bool gimple_compare_field_offset (tree, tree);
+extern tree gimple_unsigned_type (tree);
+extern tree gimple_signed_type (tree);
+extern alias_set_type gimple_get_alias_set (tree);
+extern bool gimple_ior_addresses_taken (bitmap, gimple *);
+extern bool gimple_builtin_call_types_compatible_p (const gimple *, tree);
+extern combined_fn gimple_call_combined_fn (const gimple *);
+extern bool gimple_call_operator_delete_p (const gcall *);
+extern bool gimple_call_builtin_p (const gimple *);
+extern bool gimple_call_builtin_p (const gimple *, enum built_in_class);
+extern bool gimple_call_builtin_p (const gimple *, enum built_in_function);
+extern bool gimple_asm_clobbers_memory_p (const gasm *);
+extern void dump_decl_set (FILE *, bitmap);
+extern bool nonfreeing_call_p (gimple *);
+extern bool nonbarrier_call_p (gimple *);
+extern bool infer_nonnull_range (gimple *, tree);
+extern bool infer_nonnull_range_by_dereference (gimple *, tree);
+extern bool infer_nonnull_range_by_attribute (gimple *, tree);
+extern void sort_case_labels (vec<tree> &);
+extern void preprocess_case_label_vec_for_gimple (vec<tree> &, tree, tree *);
+extern void gimple_seq_set_location (gimple_seq, location_t);
+extern void gimple_seq_discard (gimple_seq);
+extern void maybe_remove_unused_call_args (struct function *, gimple *);
+extern bool gimple_inexpensive_call_p (gcall *);
+extern bool stmt_can_terminate_bb_p (gimple *);
+extern location_t gimple_or_expr_nonartificial_location (gimple *, tree);
+gcall *gimple_build_builtin_unreachable (location_t);
+
+/* Return the disposition for a warning (or all warnings by default)
+ for a statement. */
+extern bool warning_suppressed_p (const gimple *, opt_code = all_warnings)
+ ATTRIBUTE_NONNULL (1);
+/* Set the disposition for a warning (or all warnings by default)
+ at a location to enabled by default. */
+extern void suppress_warning (gimple *, opt_code = all_warnings,
+ bool = true) ATTRIBUTE_NONNULL (1);
+
+/* Copy the warning disposition mapping from one statement to another. */
+extern void copy_warning (gimple *, const gimple *)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_NONNULL (2);
+/* Copy the warning disposition mapping from an expression to a statement. */
+extern void copy_warning (gimple *, const_tree)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_NONNULL (2);
+/* Copy the warning disposition mapping from a statement to an expression. */
+extern void copy_warning (tree, const gimple *)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_NONNULL (2);
+
+/* Formal (expression) temporary table handling: multiple occurrences of
+ the same scalar expression are evaluated into the same temporary. */
+
+typedef struct gimple_temp_hash_elt
+{
+ tree val; /* Key */
+ tree temp; /* Value */
+} elt_t;
+
+/* Get the number of the next statement uid to be allocated. */
+inline unsigned int
+gimple_stmt_max_uid (struct function *fn)
+{
+ return fn->last_stmt_uid;
+}
+
+/* Set the number of the next statement uid to be allocated. */
+inline void
+set_gimple_stmt_max_uid (struct function *fn, unsigned int maxid)
+{
+ fn->last_stmt_uid = maxid;
+}
+
+/* Set the number of the next statement uid to be allocated. */
+inline unsigned int
+inc_gimple_stmt_max_uid (struct function *fn)
+{
+ return fn->last_stmt_uid++;
+}
+
+/* Return the first node in GIMPLE sequence S. */
+
+inline gimple_seq_node
+gimple_seq_first (gimple_seq s)
+{
+ return s;
+}
+
+
+/* Return the first statement in GIMPLE sequence S. */
+
+inline gimple *
+gimple_seq_first_stmt (gimple_seq s)
+{
+ gimple_seq_node n = gimple_seq_first (s);
+ return n;
+}
+
+/* Return the first statement in GIMPLE sequence S as a gbind *,
+ verifying that it has code GIMPLE_BIND in a checked build. */
+
+inline gbind *
+gimple_seq_first_stmt_as_a_bind (gimple_seq s)
+{
+ gimple_seq_node n = gimple_seq_first (s);
+ return as_a <gbind *> (n);
+}
+
+
+/* Return the last node in GIMPLE sequence S. */
+
+inline gimple_seq_node
+gimple_seq_last (gimple_seq s)
+{
+ return s ? s->prev : NULL;
+}
+
+
+/* Return the last statement in GIMPLE sequence S. */
+
+inline gimple *
+gimple_seq_last_stmt (gimple_seq s)
+{
+ gimple_seq_node n = gimple_seq_last (s);
+ return n;
+}
+
+
+/* Set the last node in GIMPLE sequence *PS to LAST. */
+
+inline void
+gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
+{
+ (*ps)->prev = last;
+}
+
+
+/* Set the first node in GIMPLE sequence *PS to FIRST. */
+
+inline void
+gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
+{
+ *ps = first;
+}
+
+
+/* Return true if GIMPLE sequence S is empty. */
+
+inline bool
+gimple_seq_empty_p (gimple_seq s)
+{
+ return s == NULL;
+}
+
+/* Allocate a new sequence and initialize its first element with STMT. */
+
+inline gimple_seq
+gimple_seq_alloc_with_stmt (gimple *stmt)
+{
+ gimple_seq seq = NULL;
+ gimple_seq_add_stmt (&seq, stmt);
+ return seq;
+}
+
+
+/* Returns the sequence of statements in BB. */
+
+inline gimple_seq
+bb_seq (const_basic_block bb)
+{
+ return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
+}
+
+inline gimple_seq *
+bb_seq_addr (basic_block bb)
+{
+ return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
+}
+
+/* Sets the sequence of statements in BB to SEQ. */
+
+inline void
+set_bb_seq (basic_block bb, gimple_seq seq)
+{
+ gcc_checking_assert (!(bb->flags & BB_RTL));
+ bb->il.gimple.seq = seq;
+}
+
+
+/* Return the code for GIMPLE statement G. */
+
+inline enum gimple_code
+gimple_code (const gimple *g)
+{
+ return g->code;
+}
+
+
+/* Return the GSS code used by a GIMPLE code. */
+
+inline enum gimple_statement_structure_enum
+gss_for_code (enum gimple_code code)
+{
+ gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
+ return gss_for_code_[code];
+}
+
+
+/* Return which GSS code is used by GS. */
+
+inline enum gimple_statement_structure_enum
+gimple_statement_structure (gimple *gs)
+{
+ return gss_for_code (gimple_code (gs));
+}
+
+
+/* Return true if statement G has sub-statements. This is only true for
+ High GIMPLE statements. */
+
+inline bool
+gimple_has_substatements (gimple *g)
+{
+ switch (gimple_code (g))
+ {
+ case GIMPLE_ASSUME:
+ case GIMPLE_BIND:
+ case GIMPLE_CATCH:
+ case GIMPLE_EH_FILTER:
+ case GIMPLE_EH_ELSE:
+ case GIMPLE_TRY:
+ case GIMPLE_OMP_FOR:
+ case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
+ case GIMPLE_OMP_TASKGROUP:
+ case GIMPLE_OMP_ORDERED:
+ case GIMPLE_OMP_SECTION:
+ case GIMPLE_OMP_PARALLEL:
+ case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_SCOPE:
+ case GIMPLE_OMP_SECTIONS:
+ case GIMPLE_OMP_SINGLE:
+ case GIMPLE_OMP_TARGET:
+ case GIMPLE_OMP_TEAMS:
+ case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_WITH_CLEANUP_EXPR:
+ case GIMPLE_TRANSACTION:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+
+/* Return the basic block holding statement G. */
+
+inline basic_block
+gimple_bb (const gimple *g)
+{
+ return g->bb;
+}
+
+
+/* Return the lexical scope block holding statement G. */
+
+inline tree
+gimple_block (const gimple *g)
+{
+ return LOCATION_BLOCK (g->location);
+}
+
+/* Forward declare. */
+inline void gimple_set_location (gimple *, location_t);
+
+/* Set BLOCK to be the lexical scope block holding statement G. */
+
+inline void
+gimple_set_block (gimple *g, tree block)
+{
+ gimple_set_location (g, set_block (g->location, block));
+}
+
+/* Return location information for statement G. */
+
+inline location_t
+gimple_location (const gimple *g)
+{
+ return g->location;
+}
+
+/* Return location information for statement G if g is not NULL.
+ Otherwise, UNKNOWN_LOCATION is returned. */
+
+inline location_t
+gimple_location_safe (const gimple *g)
+{
+ return g ? gimple_location (g) : UNKNOWN_LOCATION;
+}
+
+/* Set location information for statement G. */
+
+inline void
+gimple_set_location (gimple *g, location_t location)
+{
+ /* Copy the no-warning data to the statement location. */
+ if (g->location != UNKNOWN_LOCATION)
+ copy_warning (location, g->location);
+ g->location = location;
+}
+
+/* Return address of the location information for statement G. */
+
+inline location_t *
+gimple_location_ptr (gimple *g)
+{
+ return &g->location;
+}
+
+
+/* Return true if G contains location information. */
+
+inline bool
+gimple_has_location (const gimple *g)
+{
+ return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION;
+}
+
+
+/* Return non-artificial location information for statement G. */
+
+inline location_t
+gimple_nonartificial_location (const gimple *g)
+{
+ location_t *ploc = NULL;
+
+ if (tree block = gimple_block (g))
+ ploc = block_nonartificial_location (block);
+
+ return ploc ? *ploc : gimple_location (g);
+}
+
+
+/* Return the file name of the location of STMT. */
+
+inline const char *
+gimple_filename (const gimple *stmt)
+{
+ return LOCATION_FILE (gimple_location (stmt));
+}
+
+
+/* Return the line number of the location of STMT. */
+
+inline int
+gimple_lineno (const gimple *stmt)
+{
+ return LOCATION_LINE (gimple_location (stmt));
+}
+
+
+/* Determine whether SEQ is a singleton. */
+
+inline bool
+gimple_seq_singleton_p (gimple_seq seq)
+{
+ return ((gimple_seq_first (seq) != NULL)
+ && (gimple_seq_first (seq) == gimple_seq_last (seq)));
+}
+
+/* Return true if no warnings should be emitted for statement STMT. */
+
+inline bool
+gimple_no_warning_p (const gimple *stmt)
+{
+ return stmt->no_warning;
+}
+
+/* Set the no_warning flag of STMT to NO_WARNING. */
+
+inline void
+gimple_set_no_warning (gimple *stmt, bool no_warning)
+{
+ stmt->no_warning = (unsigned) no_warning;
+}
+
+/* Set the visited status on statement STMT to VISITED_P.
+
+ Please note that this 'visited' property of the gimple statement is
+ supposed to be undefined at pass boundaries. This means that a
+ given pass should not assume it contains any useful value when the
+ pass starts and thus can set it to any value it sees fit.
+
+ You can learn more about the visited property of the gimple
+ statement by reading the comments of the 'visited' data member of
+ struct gimple.
+ */
+
+inline void
+gimple_set_visited (gimple *stmt, bool visited_p)
+{
+ stmt->visited = (unsigned) visited_p;
+}
+
+
+/* Return the visited status for statement STMT.
+
+ Please note that this 'visited' property of the gimple statement is
+ supposed to be undefined at pass boundaries. This means that a
+ given pass should not assume it contains any useful value when the
+ pass starts and thus can set it to any value it sees fit.
+
+ You can learn more about the visited property of the gimple
+ statement by reading the comments of the 'visited' data member of
+ struct gimple. */
+
+inline bool
+gimple_visited_p (gimple *stmt)
+{
+ return stmt->visited;
+}
+
+
+/* Set pass local flag PLF on statement STMT to VAL_P.
+
+ Please note that this PLF property of the gimple statement is
+ supposed to be undefined at pass boundaries. This means that a
+ given pass should not assume it contains any useful value when the
+ pass starts and thus can set it to any value it sees fit.
+
+ You can learn more about the PLF property by reading the comment of
+ the 'plf' data member of struct gimple_statement_structure. */
+
+inline void
+gimple_set_plf (gimple *stmt, enum plf_mask plf, bool val_p)
+{
+ if (val_p)
+ stmt->plf |= (unsigned int) plf;
+ else
+ stmt->plf &= ~((unsigned int) plf);
+}
+
+
+/* Return the value of pass local flag PLF on statement STMT.
+
+ Please note that this 'plf' property of the gimple statement is
+ supposed to be undefined at pass boundaries. This means that a
+ given pass should not assume it contains any useful value when the
+ pass starts and thus can set it to any value it sees fit.
+
+ You can learn more about the plf property by reading the comment of
+ the 'plf' data member of struct gimple_statement_structure. */
+
+inline unsigned int
+gimple_plf (gimple *stmt, enum plf_mask plf)
+{
+ return stmt->plf & ((unsigned int) plf);
+}
+
+
+/* Set the UID of statement.
+
+ Please note that this UID property is supposed to be undefined at
+ pass boundaries. This means that a given pass should not assume it
+ contains any useful value when the pass starts and thus can set it
+ to any value it sees fit. */
+
+inline void
+gimple_set_uid (gimple *g, unsigned uid)
+{
+ g->uid = uid;
+}
+
+
+/* Return the UID of statement.
+
+ Please note that this UID property is supposed to be undefined at
+ pass boundaries. This means that a given pass should not assume it
+ contains any useful value when the pass starts and thus can set it
+ to any value it sees fit. */
+
+inline unsigned
+gimple_uid (const gimple *g)
+{
+ return g->uid;
+}
+
+
+/* Make statement G a singleton sequence. */
+
+inline void
+gimple_init_singleton (gimple *g)
+{
+ g->next = NULL;
+ g->prev = g;
+}
+
+
+/* Return true if GIMPLE statement G has register or memory operands. */
+
+inline bool
+gimple_has_ops (const gimple *g)
+{
+ return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_with_ops *>::test (const gimple *gs)
+{
+ return gimple_has_ops (gs);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_with_ops *>::test (gimple *gs)
+{
+ return gimple_has_ops (gs);
+}
+
+/* Return true if GIMPLE statement G has memory operands. */
+
+inline bool
+gimple_has_mem_ops (const gimple *g)
+{
+ return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const gimple_statement_with_memory_ops *>::test (const gimple *gs)
+{
+ return gimple_has_mem_ops (gs);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <gimple_statement_with_memory_ops *>::test (gimple *gs)
+{
+ return gimple_has_mem_ops (gs);
+}
+
+/* Return the set of USE operands for statement G. */
+
+inline struct use_optype_d *
+gimple_use_ops (const gimple *g)
+{
+ const gimple_statement_with_ops *ops_stmt =
+ dyn_cast <const gimple_statement_with_ops *> (g);
+ if (!ops_stmt)
+ return NULL;
+ return ops_stmt->use_ops;
+}
+
+
+/* Set USE to be the set of USE operands for statement G. */
+
+inline void
+gimple_set_use_ops (gimple *g, struct use_optype_d *use)
+{
+ gimple_statement_with_ops *ops_stmt =
+ as_a <gimple_statement_with_ops *> (g);
+ ops_stmt->use_ops = use;
+}
+
+
+/* Return the single VUSE operand of the statement G. */
+
+inline tree
+gimple_vuse (const gimple *g)
+{
+ const gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <const gimple_statement_with_memory_ops *> (g);
+ if (!mem_ops_stmt)
+ return NULL_TREE;
+ return mem_ops_stmt->vuse;
+}
+
+/* Return the single VDEF operand of the statement G. */
+
+inline tree
+gimple_vdef (const gimple *g)
+{
+ const gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <const gimple_statement_with_memory_ops *> (g);
+ if (!mem_ops_stmt)
+ return NULL_TREE;
+ return mem_ops_stmt->vdef;
+}
+
+/* Return the single VUSE operand of the statement G. */
+
+inline tree *
+gimple_vuse_ptr (gimple *g)
+{
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <gimple_statement_with_memory_ops *> (g);
+ if (!mem_ops_stmt)
+ return NULL;
+ return &mem_ops_stmt->vuse;
+}
+
+/* Return the single VDEF operand of the statement G. */
+
+inline tree *
+gimple_vdef_ptr (gimple *g)
+{
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ dyn_cast <gimple_statement_with_memory_ops *> (g);
+ if (!mem_ops_stmt)
+ return NULL;
+ return &mem_ops_stmt->vdef;
+}
+
+/* Set the single VUSE operand of the statement G. */
+
+inline void
+gimple_set_vuse (gimple *g, tree vuse)
+{
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ as_a <gimple_statement_with_memory_ops *> (g);
+ mem_ops_stmt->vuse = vuse;
+}
+
+/* Set the single VDEF operand of the statement G. */
+
+inline void
+gimple_set_vdef (gimple *g, tree vdef)
+{
+ gimple_statement_with_memory_ops *mem_ops_stmt =
+ as_a <gimple_statement_with_memory_ops *> (g);
+ mem_ops_stmt->vdef = vdef;
+}
+
+
+/* Return true if statement G has operands and the modified field has
+ been set. */
+
+inline bool
+gimple_modified_p (const gimple *g)
+{
+ return (gimple_has_ops (g)) ? (bool) g->modified : false;
+}
+
+
+/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
+ a MODIFIED field. */
+
+inline void
+gimple_set_modified (gimple *s, bool modifiedp)
+{
+ if (gimple_has_ops (s))
+ s->modified = (unsigned) modifiedp;
+}
+
+
+/* Return true if statement STMT contains volatile operands. */
+
+inline bool
+gimple_has_volatile_ops (const gimple *stmt)
+{
+ if (gimple_has_mem_ops (stmt))
+ return stmt->has_volatile_ops;
+ else
+ return false;
+}
+
+
+/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
+
+inline void
+gimple_set_has_volatile_ops (gimple *stmt, bool volatilep)
+{
+ if (gimple_has_mem_ops (stmt))
+ stmt->has_volatile_ops = (unsigned) volatilep;
+}
+
+/* Return true if STMT is in a transaction. */
+
+inline bool
+gimple_in_transaction (const gimple *stmt)
+{
+ return bb_in_transaction (gimple_bb (stmt));
+}
+
+/* Return true if statement STMT may access memory. */
+
+inline bool
+gimple_references_memory_p (gimple *stmt)
+{
+ return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
+}
+
+
+/* Return the subcode for OMP statement S. */
+
+inline unsigned
+gimple_omp_subcode (const gimple *s)
+{
+ gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
+ && gimple_code (s) <= GIMPLE_OMP_ORDERED);
+ return s->subcode;
+}
+
+/* Set the subcode for OMP statement S to SUBCODE. */
+
+inline void
+gimple_omp_set_subcode (gimple *s, unsigned int subcode)
+{
+ /* We only have 16 bits for the subcode. Assert that we are not
+ overflowing it. */
+ gcc_gimple_checking_assert (subcode < (1 << 16));
+ s->subcode = subcode;
+}
+
+/* Set the nowait flag on OMP_RETURN statement S. */
+
+inline void
+gimple_omp_return_set_nowait (gimple *s)
+{
+ GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
+ s->subcode |= GF_OMP_RETURN_NOWAIT;
+}
+
+
+/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
+ flag set. */
+
+inline bool
+gimple_omp_return_nowait_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
+ return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
+}
+
+
+/* Set the LHS of OMP return. */
+
+inline void
+gimple_omp_return_set_lhs (gimple *g, tree lhs)
+{
+ gimple_statement_omp_return *omp_return_stmt =
+ as_a <gimple_statement_omp_return *> (g);
+ omp_return_stmt->val = lhs;
+}
+
+
+/* Get the LHS of OMP return. */
+
+inline tree
+gimple_omp_return_lhs (const gimple *g)
+{
+ const gimple_statement_omp_return *omp_return_stmt =
+ as_a <const gimple_statement_omp_return *> (g);
+ return omp_return_stmt->val;
+}
+
+
+/* Return a pointer to the LHS of OMP return. */
+
+inline tree *
+gimple_omp_return_lhs_ptr (gimple *g)
+{
+ gimple_statement_omp_return *omp_return_stmt =
+ as_a <gimple_statement_omp_return *> (g);
+ return &omp_return_stmt->val;
+}
+
+
+/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
+ flag set. */
+
+inline bool
+gimple_omp_section_last_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
+ return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
+}
+
+
+/* Set the GF_OMP_SECTION_LAST flag on G. */
+
+inline void
+gimple_omp_section_set_last (gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
+ g->subcode |= GF_OMP_SECTION_LAST;
+}
+
+
+/* Return true if OMP ordered construct is stand-alone
+ (G has the GF_OMP_ORDERED_STANDALONE flag set). */
+
+inline bool
+gimple_omp_ordered_standalone_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_ORDERED);
+ return (gimple_omp_subcode (g) & GF_OMP_ORDERED_STANDALONE) != 0;
+}
+
+
+/* Set the GF_OMP_ORDERED_STANDALONE flag on G. */
+
+inline void
+gimple_omp_ordered_standalone (gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_ORDERED);
+ g->subcode |= GF_OMP_ORDERED_STANDALONE;
+}
+
+
+/* Return true if OMP parallel statement G has the
+ GF_OMP_PARALLEL_COMBINED flag set. */
+
+inline bool
+gimple_omp_parallel_combined_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
+ return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
+}
+
+
+/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
+ value of COMBINED_P. */
+
+inline void
+gimple_omp_parallel_set_combined_p (gimple *g, bool combined_p)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
+ if (combined_p)
+ g->subcode |= GF_OMP_PARALLEL_COMBINED;
+ else
+ g->subcode &= ~GF_OMP_PARALLEL_COMBINED;
+}
+
+
+/* Return true if OMP atomic load/store statement G has the
+ GF_OMP_ATOMIC_NEED_VALUE flag set. */
+
+inline bool
+gimple_omp_atomic_need_value_p (const gimple *g)
+{
+ if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
+ GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
+ return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0;
+}
+
+
+/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
+
+inline void
+gimple_omp_atomic_set_need_value (gimple *g)
+{
+ if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
+ GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
+ g->subcode |= GF_OMP_ATOMIC_NEED_VALUE;
+}
+
+
+/* Return true if OMP atomic load/store statement G has the
+ GF_OMP_ATOMIC_WEAK flag set. */
+
+inline bool
+gimple_omp_atomic_weak_p (const gimple *g)
+{
+ if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
+ GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
+ return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_WEAK) != 0;
+}
+
+
+/* Set the GF_OMP_ATOMIC_WEAK flag on G. */
+
+inline void
+gimple_omp_atomic_set_weak (gimple *g)
+{
+ if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
+ GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
+ g->subcode |= GF_OMP_ATOMIC_WEAK;
+}
+
+
+/* Return the memory order of the OMP atomic load/store statement G. */
+
+inline enum omp_memory_order
+gimple_omp_atomic_memory_order (const gimple *g)
+{
+ if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
+ GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
+ return (enum omp_memory_order)
+ (gimple_omp_subcode (g) & GF_OMP_ATOMIC_MEMORY_ORDER);
+}
+
+
+/* Set the memory order on G. */
+
+inline void
+gimple_omp_atomic_set_memory_order (gimple *g, enum omp_memory_order mo)
+{
+ if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
+ GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
+ g->subcode = ((g->subcode & ~GF_OMP_ATOMIC_MEMORY_ORDER)
+ | (mo & GF_OMP_ATOMIC_MEMORY_ORDER));
+}
+
+
+/* Return the number of operands for statement GS. */
+
+inline unsigned
+gimple_num_ops (const gimple *gs)
+{
+ return gs->num_ops;
+}
+
+
+/* Set the number of operands for statement GS. */
+
+inline void
+gimple_set_num_ops (gimple *gs, unsigned num_ops)
+{
+ gs->num_ops = num_ops;
+}
+
+
+/* Return the array of operands for statement GS. */
+
+inline tree *
+gimple_ops (gimple *gs)
+{
+ size_t off;
+
+ /* All the tuples have their operand vector at the very bottom
+ of the structure. Note that those structures that do not
+ have an operand vector have a zero offset. */
+ off = gimple_ops_offset_[gimple_statement_structure (gs)];
+ gcc_gimple_checking_assert (off != 0);
+
+ return (tree *) ((char *) gs + off);
+}
+
+
+/* Return operand I for statement GS. */
+
+inline tree
+gimple_op (const gimple *gs, unsigned i)
+{
+ if (gimple_has_ops (gs))
+ {
+ gcc_gimple_checking_assert (i < gimple_num_ops (gs));
+ return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
+ }
+ else
+ return NULL_TREE;
+}
+
+/* Return a pointer to operand I for statement GS. */
+
+inline tree *
+gimple_op_ptr (gimple *gs, unsigned i)
+{
+ if (gimple_has_ops (gs))
+ {
+ gcc_gimple_checking_assert (i < gimple_num_ops (gs));
+ return gimple_ops (gs) + i;
+ }
+ else
+ return NULL;
+}
+
+/* Set operand I of statement GS to OP. */
+
+inline void
+gimple_set_op (gimple *gs, unsigned i, tree op)
+{
+ gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
+
+ /* Note. It may be tempting to assert that OP matches
+ is_gimple_operand, but that would be wrong. Different tuples
+ accept slightly different sets of tree operands. Each caller
+ should perform its own validation. */
+ gimple_ops (gs)[i] = op;
+}
+
+/* Return true if GS is a GIMPLE_ASSIGN. */
+
+inline bool
+is_gimple_assign (const gimple *gs)
+{
+ return gimple_code (gs) == GIMPLE_ASSIGN;
+}
+
+/* Determine if expression CODE is one of the valid expressions that can
+ be used on the RHS of GIMPLE assignments. */
+
+inline enum gimple_rhs_class
+get_gimple_rhs_class (enum tree_code code)
+{
+ return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
+}
+
+/* Return the LHS of assignment statement GS. */
+
+inline tree
+gimple_assign_lhs (const gassign *gs)
+{
+ return gs->op[0];
+}
+
+inline tree
+gimple_assign_lhs (const gimple *gs)
+{
+ const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
+ return gimple_assign_lhs (ass);
+}
+
+
+/* Return a pointer to the LHS of assignment statement GS. */
+
+inline tree *
+gimple_assign_lhs_ptr (gassign *gs)
+{
+ return &gs->op[0];
+}
+
+inline tree *
+gimple_assign_lhs_ptr (gimple *gs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ return gimple_assign_lhs_ptr (ass);
+}
+
+
+/* Set LHS to be the LHS operand of assignment statement GS. */
+
+inline void
+gimple_assign_set_lhs (gassign *gs, tree lhs)
+{
+ gs->op[0] = lhs;
+
+ if (lhs && TREE_CODE (lhs) == SSA_NAME)
+ SSA_NAME_DEF_STMT (lhs) = gs;
+}
+
+inline void
+gimple_assign_set_lhs (gimple *gs, tree lhs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ gimple_assign_set_lhs (ass, lhs);
+}
+
+
+/* Return the first operand on the RHS of assignment statement GS. */
+
+inline tree
+gimple_assign_rhs1 (const gassign *gs)
+{
+ return gs->op[1];
+}
+
+inline tree
+gimple_assign_rhs1 (const gimple *gs)
+{
+ const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
+ return gimple_assign_rhs1 (ass);
+}
+
+
+/* Return a pointer to the first operand on the RHS of assignment
+ statement GS. */
+
+inline tree *
+gimple_assign_rhs1_ptr (gassign *gs)
+{
+ return &gs->op[1];
+}
+
+inline tree *
+gimple_assign_rhs1_ptr (gimple *gs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ return gimple_assign_rhs1_ptr (ass);
+}
+
+/* Set RHS to be the first operand on the RHS of assignment statement GS. */
+
+inline void
+gimple_assign_set_rhs1 (gassign *gs, tree rhs)
+{
+ gs->op[1] = rhs;
+}
+
+inline void
+gimple_assign_set_rhs1 (gimple *gs, tree rhs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ gimple_assign_set_rhs1 (ass, rhs);
+}
+
+
+/* Return the second operand on the RHS of assignment statement GS.
+ If GS does not have two operands, NULL is returned instead. */
+
+inline tree
+gimple_assign_rhs2 (const gassign *gs)
+{
+ if (gimple_num_ops (gs) >= 3)
+ return gs->op[2];
+ else
+ return NULL_TREE;
+}
+
+inline tree
+gimple_assign_rhs2 (const gimple *gs)
+{
+ const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
+ return gimple_assign_rhs2 (ass);
+}
+
+
+/* Return a pointer to the second operand on the RHS of assignment
+ statement GS. */
+
+inline tree *
+gimple_assign_rhs2_ptr (gassign *gs)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) >= 3);
+ return &gs->op[2];
+}
+
+inline tree *
+gimple_assign_rhs2_ptr (gimple *gs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ return gimple_assign_rhs2_ptr (ass);
+}
+
+
+/* Set RHS to be the second operand on the RHS of assignment statement GS. */
+
+inline void
+gimple_assign_set_rhs2 (gassign *gs, tree rhs)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) >= 3);
+ gs->op[2] = rhs;
+}
+
+inline void
+gimple_assign_set_rhs2 (gimple *gs, tree rhs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ return gimple_assign_set_rhs2 (ass, rhs);
+}
+
+/* Return the third operand on the RHS of assignment statement GS.
+ If GS does not have two operands, NULL is returned instead. */
+
+inline tree
+gimple_assign_rhs3 (const gassign *gs)
+{
+ if (gimple_num_ops (gs) >= 4)
+ return gs->op[3];
+ else
+ return NULL_TREE;
+}
+
+inline tree
+gimple_assign_rhs3 (const gimple *gs)
+{
+ const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
+ return gimple_assign_rhs3 (ass);
+}
+
+/* Return a pointer to the third operand on the RHS of assignment
+ statement GS. */
+
+inline tree *
+gimple_assign_rhs3_ptr (gimple *gs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ gcc_gimple_checking_assert (gimple_num_ops (gs) >= 4);
+ return &ass->op[3];
+}
+
+
+/* Set RHS to be the third operand on the RHS of assignment statement GS. */
+
+inline void
+gimple_assign_set_rhs3 (gassign *gs, tree rhs)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) >= 4);
+ gs->op[3] = rhs;
+}
+
+inline void
+gimple_assign_set_rhs3 (gimple *gs, tree rhs)
+{
+ gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
+ gimple_assign_set_rhs3 (ass, rhs);
+}
+
+
+/* A wrapper around 3 operand gimple_assign_set_rhs_with_ops, for callers
+ which expect to see only two operands. */
+
+inline void
+gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
+ tree op1, tree op2)
+{
+ gimple_assign_set_rhs_with_ops (gsi, code, op1, op2, NULL);
+}
+
+/* A wrapper around 3 operand gimple_assign_set_rhs_with_ops, for callers
+ which expect to see only one operands. */
+
+inline void
+gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
+ tree op1)
+{
+ gimple_assign_set_rhs_with_ops (gsi, code, op1, NULL, NULL);
+}
+
+/* Returns true if GS is a nontemporal move. */
+
+inline bool
+gimple_assign_nontemporal_move_p (const gassign *gs)
+{
+ return gs->nontemporal_move;
+}
+
+/* Sets nontemporal move flag of GS to NONTEMPORAL. */
+
+inline void
+gimple_assign_set_nontemporal_move (gimple *gs, bool nontemporal)
+{
+ GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
+ gs->nontemporal_move = nontemporal;
+}
+
+
+/* Return the code of the expression computed on the rhs of assignment
+ statement GS. In case that the RHS is a single object, returns the
+ tree code of the object. */
+
+inline enum tree_code
+gimple_assign_rhs_code (const gassign *gs)
+{
+ enum tree_code code = (enum tree_code) gs->subcode;
+ /* While we initially set subcode to the TREE_CODE of the rhs for
+ GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
+ in sync when we rewrite stmts into SSA form or do SSA propagations. */
+ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
+ code = TREE_CODE (gs->op[1]);
+
+ return code;
+}
+
+inline enum tree_code
+gimple_assign_rhs_code (const gimple *gs)
+{
+ const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
+ return gimple_assign_rhs_code (ass);
+}
+
+
+/* Set CODE to be the code for the expression computed on the RHS of
+ assignment S. */
+
+inline void
+gimple_assign_set_rhs_code (gimple *s, enum tree_code code)
+{
+ GIMPLE_CHECK (s, GIMPLE_ASSIGN);
+ s->subcode = code;
+}
+
+
+/* Return the gimple rhs class of the code of the expression computed on
+ the rhs of assignment statement GS.
+ This will never return GIMPLE_INVALID_RHS. */
+
+inline enum gimple_rhs_class
+gimple_assign_rhs_class (const gimple *gs)
+{
+ return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
+}
+
+/* Return true if GS is an assignment with a singleton RHS, i.e.,
+ there is no operator associated with the assignment itself.
+ Unlike gimple_assign_copy_p, this predicate returns true for
+ any RHS operand, including those that perform an operation
+ and do not have the semantics of a copy, such as COND_EXPR. */
+
+inline bool
+gimple_assign_single_p (const gimple *gs)
+{
+ return (is_gimple_assign (gs)
+ && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS);
+}
+
+/* Return true if GS performs a store to its lhs. */
+
+inline bool
+gimple_store_p (const gimple *gs)
+{
+ tree lhs = gimple_get_lhs (gs);
+ return lhs && !is_gimple_reg (lhs);
+}
+
+/* Return true if GS is an assignment that loads from its rhs1. */
+
+inline bool
+gimple_assign_load_p (const gimple *gs)
+{
+ tree rhs;
+ if (!gimple_assign_single_p (gs))
+ return false;
+ rhs = gimple_assign_rhs1 (gs);
+ if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
+ return true;
+ rhs = get_base_address (rhs);
+ return (DECL_P (rhs)
+ || TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF);
+}
+
+
+/* Return true if S is a type-cast assignment. */
+
+inline bool
+gimple_assign_cast_p (const gimple *s)
+{
+ if (is_gimple_assign (s))
+ {
+ enum tree_code sc = gimple_assign_rhs_code (s);
+ return CONVERT_EXPR_CODE_P (sc)
+ || sc == VIEW_CONVERT_EXPR
+ || sc == FIX_TRUNC_EXPR;
+ }
+
+ return false;
+}
+
+/* Return true if S is a clobber statement. */
+
+inline bool
+gimple_clobber_p (const gimple *s)
+{
+ return gimple_assign_single_p (s)
+ && TREE_CLOBBER_P (gimple_assign_rhs1 (s));
+}
+
+/* Return true if S is a clobber statement. */
+
+inline bool
+gimple_clobber_p (const gimple *s, enum clobber_kind kind)
+{
+ return gimple_clobber_p (s)
+ && CLOBBER_KIND (gimple_assign_rhs1 (s)) == kind;
+}
+
+/* Return true if GS is a GIMPLE_CALL. */
+
+inline bool
+is_gimple_call (const gimple *gs)
+{
+ return gimple_code (gs) == GIMPLE_CALL;
+}
+
+/* Return the LHS of call statement GS. */
+
+inline tree
+gimple_call_lhs (const gcall *gs)
+{
+ return gs->op[0];
+}
+
+inline tree
+gimple_call_lhs (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_lhs (gc);
+}
+
+
+/* Return a pointer to the LHS of call statement GS. */
+
+inline tree *
+gimple_call_lhs_ptr (gcall *gs)
+{
+ return &gs->op[0];
+}
+
+inline tree *
+gimple_call_lhs_ptr (gimple *gs)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
+ return gimple_call_lhs_ptr (gc);
+}
+
+
+/* Set LHS to be the LHS operand of call statement GS. */
+
+inline void
+gimple_call_set_lhs (gcall *gs, tree lhs)
+{
+ gs->op[0] = lhs;
+ if (lhs && TREE_CODE (lhs) == SSA_NAME)
+ SSA_NAME_DEF_STMT (lhs) = gs;
+}
+
+inline void
+gimple_call_set_lhs (gimple *gs, tree lhs)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
+ gimple_call_set_lhs (gc, lhs);
+}
+
+
+/* Return true if call GS calls an internal-only function, as enumerated
+ by internal_fn. */
+
+inline bool
+gimple_call_internal_p (const gcall *gs)
+{
+ return (gs->subcode & GF_CALL_INTERNAL) != 0;
+}
+
+inline bool
+gimple_call_internal_p (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_internal_p (gc);
+}
+
+/* Return true if call GS is marked as nocf_check. */
+
+inline bool
+gimple_call_nocf_check_p (const gcall *gs)
+{
+ return (gs->subcode & GF_CALL_NOCF_CHECK) != 0;
+}
+
+/* Mark statement GS as nocf_check call. */
+
+inline void
+gimple_call_set_nocf_check (gcall *gs, bool nocf_check)
+{
+ if (nocf_check)
+ gs->subcode |= GF_CALL_NOCF_CHECK;
+ else
+ gs->subcode &= ~GF_CALL_NOCF_CHECK;
+}
+
+/* Return the target of internal call GS. */
+
+inline enum internal_fn
+gimple_call_internal_fn (const gcall *gs)
+{
+ gcc_gimple_checking_assert (gimple_call_internal_p (gs));
+ return gs->u.internal_fn;
+}
+
+inline enum internal_fn
+gimple_call_internal_fn (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_internal_fn (gc);
+}
+
+/* Return true, if this internal gimple call is unique. */
+
+inline bool
+gimple_call_internal_unique_p (const gcall *gs)
+{
+ return gimple_call_internal_fn (gs) == IFN_UNIQUE;
+}
+
+inline bool
+gimple_call_internal_unique_p (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_internal_unique_p (gc);
+}
+
+/* Return true if GS is an internal function FN. */
+
+inline bool
+gimple_call_internal_p (const gimple *gs, internal_fn fn)
+{
+ return (is_gimple_call (gs)
+ && gimple_call_internal_p (gs)
+ && gimple_call_internal_fn (gs) == fn);
+}
+
+/* If CTRL_ALTERING_P is true, mark GIMPLE_CALL S to be a stmt
+ that could alter control flow. */
+
+inline void
+gimple_call_set_ctrl_altering (gcall *s, bool ctrl_altering_p)
+{
+ if (ctrl_altering_p)
+ s->subcode |= GF_CALL_CTRL_ALTERING;
+ else
+ s->subcode &= ~GF_CALL_CTRL_ALTERING;
+}
+
+inline void
+gimple_call_set_ctrl_altering (gimple *s, bool ctrl_altering_p)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (s);
+ gimple_call_set_ctrl_altering (gc, ctrl_altering_p);
+}
+
+/* Return true if call GS calls an func whose GF_CALL_CTRL_ALTERING
+ flag is set. Such call could not be a stmt in the middle of a bb. */
+
+inline bool
+gimple_call_ctrl_altering_p (const gcall *gs)
+{
+ return (gs->subcode & GF_CALL_CTRL_ALTERING) != 0;
+}
+
+inline bool
+gimple_call_ctrl_altering_p (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_ctrl_altering_p (gc);
+}
+
+
+/* Return the function type of the function called by GS. */
+
+inline tree
+gimple_call_fntype (const gcall *gs)
+{
+ if (gimple_call_internal_p (gs))
+ return NULL_TREE;
+ return gs->u.fntype;
+}
+
+inline tree
+gimple_call_fntype (const gimple *gs)
+{
+ const gcall *call_stmt = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_fntype (call_stmt);
+}
+
+/* Set the type of the function called by CALL_STMT to FNTYPE. */
+
+inline void
+gimple_call_set_fntype (gcall *call_stmt, tree fntype)
+{
+ gcc_gimple_checking_assert (!gimple_call_internal_p (call_stmt));
+ call_stmt->u.fntype = fntype;
+}
+
+
+/* Return the tree node representing the function called by call
+ statement GS. */
+
+inline tree
+gimple_call_fn (const gcall *gs)
+{
+ return gs->op[1];
+}
+
+inline tree
+gimple_call_fn (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_fn (gc);
+}
+
+/* Return a pointer to the tree node representing the function called by call
+ statement GS. */
+
+inline tree *
+gimple_call_fn_ptr (gcall *gs)
+{
+ return &gs->op[1];
+}
+
+inline tree *
+gimple_call_fn_ptr (gimple *gs)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
+ return gimple_call_fn_ptr (gc);
+}
+
+
+/* Set FN to be the function called by call statement GS. */
+
+inline void
+gimple_call_set_fn (gcall *gs, tree fn)
+{
+ gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
+ gs->op[1] = fn;
+}
+
+
+/* Set FNDECL to be the function called by call statement GS. */
+
+inline void
+gimple_call_set_fndecl (gcall *gs, tree decl)
+{
+ gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
+ gs->op[1] = build1_loc (gimple_location (gs), ADDR_EXPR,
+ build_pointer_type (TREE_TYPE (decl)), decl);
+}
+
+inline void
+gimple_call_set_fndecl (gimple *gs, tree decl)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
+ gimple_call_set_fndecl (gc, decl);
+}
+
+
+/* Set internal function FN to be the function called by call statement CALL_STMT. */
+
+inline void
+gimple_call_set_internal_fn (gcall *call_stmt, enum internal_fn fn)
+{
+ gcc_gimple_checking_assert (gimple_call_internal_p (call_stmt));
+ call_stmt->u.internal_fn = fn;
+}
+
+
+/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
+ Otherwise return NULL. This function is analogous to
+ get_callee_fndecl in tree land. */
+
+inline tree
+gimple_call_fndecl (const gcall *gs)
+{
+ return gimple_call_addr_fndecl (gimple_call_fn (gs));
+}
+
+inline tree
+gimple_call_fndecl (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_fndecl (gc);
+}
+
+
+/* Return the type returned by call statement GS. */
+
+inline tree
+gimple_call_return_type (const gcall *gs)
+{
+ tree type = gimple_call_fntype (gs);
+
+ if (type == NULL_TREE)
+ return TREE_TYPE (gimple_call_lhs (gs));
+
+ /* The type returned by a function is the type of its
+ function type. */
+ return TREE_TYPE (type);
+}
+
+
+/* Return the static chain for call statement GS. */
+
+inline tree
+gimple_call_chain (const gcall *gs)
+{
+ return gs->op[2];
+}
+
+inline tree
+gimple_call_chain (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_chain (gc);
+}
+
+
+/* Return a pointer to the static chain for call statement CALL_STMT. */
+
+inline tree *
+gimple_call_chain_ptr (gcall *call_stmt)
+{
+ return &call_stmt->op[2];
+}
+
+/* Set CHAIN to be the static chain for call statement CALL_STMT. */
+
+inline void
+gimple_call_set_chain (gcall *call_stmt, tree chain)
+{
+ call_stmt->op[2] = chain;
+}
+
+
+/* Return the number of arguments used by call statement GS. */
+
+inline unsigned
+gimple_call_num_args (const gcall *gs)
+{
+ return gimple_num_ops (gs) - 3;
+}
+
+inline unsigned
+gimple_call_num_args (const gimple *gs)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_num_args (gc);
+}
+
+
+/* Return the argument at position INDEX for call statement GS. */
+
+inline tree
+gimple_call_arg (const gcall *gs, unsigned index)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 3);
+ return gs->op[index + 3];
+}
+
+inline tree
+gimple_call_arg (const gimple *gs, unsigned index)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
+ return gimple_call_arg (gc, index);
+}
+
+
+/* Return a pointer to the argument at position INDEX for call
+ statement GS. */
+
+inline tree *
+gimple_call_arg_ptr (gcall *gs, unsigned index)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 3);
+ return &gs->op[index + 3];
+}
+
+inline tree *
+gimple_call_arg_ptr (gimple *gs, unsigned index)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
+ return gimple_call_arg_ptr (gc, index);
+}
+
+
+/* Set ARG to be the argument at position INDEX for call statement GS. */
+
+inline void
+gimple_call_set_arg (gcall *gs, unsigned index, tree arg)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 3);
+ gs->op[index + 3] = arg;
+}
+
+inline void
+gimple_call_set_arg (gimple *gs, unsigned index, tree arg)
+{
+ gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
+ gimple_call_set_arg (gc, index, arg);
+}
+
+
+/* If TAIL_P is true, mark call statement S as being a tail call
+ (i.e., a call just before the exit of a function). These calls are
+ candidate for tail call optimization. */
+
+inline void
+gimple_call_set_tail (gcall *s, bool tail_p)
+{
+ if (tail_p)
+ s->subcode |= GF_CALL_TAILCALL;
+ else
+ s->subcode &= ~GF_CALL_TAILCALL;
+}
+
+
+/* Return true if GIMPLE_CALL S is marked as a tail call. */
+
+inline bool
+gimple_call_tail_p (const gcall *s)
+{
+ return (s->subcode & GF_CALL_TAILCALL) != 0;
+}
+
+/* Mark (or clear) call statement S as requiring tail call optimization. */
+
+inline void
+gimple_call_set_must_tail (gcall *s, bool must_tail_p)
+{
+ if (must_tail_p)
+ s->subcode |= GF_CALL_MUST_TAIL_CALL;
+ else
+ s->subcode &= ~GF_CALL_MUST_TAIL_CALL;
+}
+
+/* Return true if call statement has been marked as requiring
+ tail call optimization. */
+
+inline bool
+gimple_call_must_tail_p (const gcall *s)
+{
+ return (s->subcode & GF_CALL_MUST_TAIL_CALL) != 0;
+}
+
+/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
+ slot optimization. This transformation uses the target of the call
+ expansion as the return slot for calls that return in memory. */
+
+inline void
+gimple_call_set_return_slot_opt (gcall *s, bool return_slot_opt_p)
+{
+ if (return_slot_opt_p)
+ s->subcode |= GF_CALL_RETURN_SLOT_OPT;
+ else
+ s->subcode &= ~GF_CALL_RETURN_SLOT_OPT;
+}
+
+
+/* Return true if S is marked for return slot optimization. */
+
+inline bool
+gimple_call_return_slot_opt_p (const gcall *s)
+{
+ return (s->subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
+}
+
+
+/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
+ thunk to the thunked-to function. */
+
+inline void
+gimple_call_set_from_thunk (gcall *s, bool from_thunk_p)
+{
+ if (from_thunk_p)
+ s->subcode |= GF_CALL_FROM_THUNK;
+ else
+ s->subcode &= ~GF_CALL_FROM_THUNK;
+}
+
+
+/* Return true if GIMPLE_CALL S is a jump from a thunk. */
+
+inline bool
+gimple_call_from_thunk_p (gcall *s)
+{
+ return (s->subcode & GF_CALL_FROM_THUNK) != 0;
+}
+
+
+/* If FROM_NEW_OR_DELETE_P is true, mark GIMPLE_CALL S as being a call
+ to operator new or delete created from a new or delete expression. */
+
+inline void
+gimple_call_set_from_new_or_delete (gcall *s, bool from_new_or_delete_p)
+{
+ if (from_new_or_delete_p)
+ s->subcode |= GF_CALL_FROM_NEW_OR_DELETE;
+ else
+ s->subcode &= ~GF_CALL_FROM_NEW_OR_DELETE;
+}
+
+
+/* Return true if GIMPLE_CALL S is a call to operator new or delete from
+ from a new or delete expression. */
+
+inline bool
+gimple_call_from_new_or_delete (const gcall *s)
+{
+ return (s->subcode & GF_CALL_FROM_NEW_OR_DELETE) != 0;
+}
+
+
+/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
+ argument pack in its argument list. */
+
+inline void
+gimple_call_set_va_arg_pack (gcall *s, bool pass_arg_pack_p)
+{
+ if (pass_arg_pack_p)
+ s->subcode |= GF_CALL_VA_ARG_PACK;
+ else
+ s->subcode &= ~GF_CALL_VA_ARG_PACK;
+}
+
+
+/* Return true if GIMPLE_CALL S is a stdarg call that needs the
+ argument pack in its argument list. */
+
+inline bool
+gimple_call_va_arg_pack_p (const gcall *s)
+{
+ return (s->subcode & GF_CALL_VA_ARG_PACK) != 0;
+}
+
+
+/* Return true if S is a noreturn call. */
+
+inline bool
+gimple_call_noreturn_p (const gcall *s)
+{
+ return (gimple_call_flags (s) & ECF_NORETURN) != 0;
+}
+
+inline bool
+gimple_call_noreturn_p (const gimple *s)
+{
+ const gcall *gc = GIMPLE_CHECK2<const gcall *> (s);
+ return gimple_call_noreturn_p (gc);
+}
+
+
+/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
+ even if the called function can throw in other cases. */
+
+inline void
+gimple_call_set_nothrow (gcall *s, bool nothrow_p)
+{
+ if (nothrow_p)
+ s->subcode |= GF_CALL_NOTHROW;
+ else
+ s->subcode &= ~GF_CALL_NOTHROW;
+}
+
+/* Return true if S is a nothrow call. */
+
+inline bool
+gimple_call_nothrow_p (gcall *s)
+{
+ return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
+}
+
+/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
+ is known to be emitted for VLA objects. Those are wrapped by
+ stack_save/stack_restore calls and hence can't lead to unbounded
+ stack growth even when they occur in loops. */
+
+inline void
+gimple_call_set_alloca_for_var (gcall *s, bool for_var)
+{
+ if (for_var)
+ s->subcode |= GF_CALL_ALLOCA_FOR_VAR;
+ else
+ s->subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
+}
+
+/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
+
+inline bool
+gimple_call_alloca_for_var_p (gcall *s)
+{
+ return (s->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
+}
+
+inline bool
+gimple_call_alloca_for_var_p (gimple *s)
+{
+ const gcall *gc = GIMPLE_CHECK2<gcall *> (s);
+ return (gc->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
+}
+
+/* If BY_DESCRIPTOR_P is true, GIMPLE_CALL S is an indirect call for which
+ pointers to nested function are descriptors instead of trampolines. */
+
+inline void
+gimple_call_set_by_descriptor (gcall *s, bool by_descriptor_p)
+{
+ if (by_descriptor_p)
+ s->subcode |= GF_CALL_BY_DESCRIPTOR;
+ else
+ s->subcode &= ~GF_CALL_BY_DESCRIPTOR;
+}
+
+/* Return true if S is a by-descriptor call. */
+
+inline bool
+gimple_call_by_descriptor_p (gcall *s)
+{
+ return (s->subcode & GF_CALL_BY_DESCRIPTOR) != 0;
+}
+
+/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
+
+inline void
+gimple_call_copy_flags (gcall *dest_call, gcall *orig_call)
+{
+ dest_call->subcode = orig_call->subcode;
+}
+
+
+/* Return a pointer to the points-to solution for the set of call-used
+ variables of the call CALL_STMT. */
+
+inline struct pt_solution *
+gimple_call_use_set (gcall *call_stmt)
+{
+ return &call_stmt->call_used;
+}
+
+/* As above, but const. */
+
+inline const pt_solution *
+gimple_call_use_set (const gcall *call_stmt)
+{
+ return &call_stmt->call_used;
+}
+
+/* Return a pointer to the points-to solution for the set of call-used
+ variables of the call CALL_STMT. */
+
+inline struct pt_solution *
+gimple_call_clobber_set (gcall *call_stmt)
+{
+ return &call_stmt->call_clobbered;
+}
+
+/* As above, but const. */
+
+inline const pt_solution *
+gimple_call_clobber_set (const gcall *call_stmt)
+{
+ return &call_stmt->call_clobbered;
+}
+
+
+/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
+ non-NULL lhs. */
+
+inline bool
+gimple_has_lhs (const gimple *stmt)
+{
+ if (is_gimple_assign (stmt))
+ return true;
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ return gimple_call_lhs (call) != NULL_TREE;
+ return false;
+}
+
+
+/* Return the code of the predicate computed by conditional statement GS. */
+
+inline enum tree_code
+gimple_cond_code (const gcond *gs)
+{
+ return (enum tree_code) gs->subcode;
+}
+
+inline enum tree_code
+gimple_cond_code (const gimple *gs)
+{
+ const gcond *gc = GIMPLE_CHECK2<const gcond *> (gs);
+ return gimple_cond_code (gc);
+}
+
+
+/* Set CODE to be the predicate code for the conditional statement GS. */
+
+inline void
+gimple_cond_set_code (gcond *gs, enum tree_code code)
+{
+ gs->subcode = code;
+}
+
+
+/* Return the LHS of the predicate computed by conditional statement GS. */
+
+inline tree
+gimple_cond_lhs (const gcond *gs)
+{
+ return gs->op[0];
+}
+
+inline tree
+gimple_cond_lhs (const gimple *gs)
+{
+ const gcond *gc = GIMPLE_CHECK2<const gcond *> (gs);
+ return gimple_cond_lhs (gc);
+}
+
+/* Return the pointer to the LHS of the predicate computed by conditional
+ statement GS. */
+
+inline tree *
+gimple_cond_lhs_ptr (gcond *gs)
+{
+ return &gs->op[0];
+}
+
+/* Set LHS to be the LHS operand of the predicate computed by
+ conditional statement GS. */
+
+inline void
+gimple_cond_set_lhs (gcond *gs, tree lhs)
+{
+ gs->op[0] = lhs;
+}
+
+
+/* Return the RHS operand of the predicate computed by conditional GS. */
+
+inline tree
+gimple_cond_rhs (const gcond *gs)
+{
+ return gs->op[1];
+}
+
+inline tree
+gimple_cond_rhs (const gimple *gs)
+{
+ const gcond *gc = GIMPLE_CHECK2<const gcond *> (gs);
+ return gimple_cond_rhs (gc);
+}
+
+/* Return the pointer to the RHS operand of the predicate computed by
+ conditional GS. */
+
+inline tree *
+gimple_cond_rhs_ptr (gcond *gs)
+{
+ return &gs->op[1];
+}
+
+
+/* Set RHS to be the RHS operand of the predicate computed by
+ conditional statement GS. */
+
+inline void
+gimple_cond_set_rhs (gcond *gs, tree rhs)
+{
+ gs->op[1] = rhs;
+}
+
+
+/* Return the label used by conditional statement GS when its
+ predicate evaluates to true. */
+
+inline tree
+gimple_cond_true_label (const gcond *gs)
+{
+ return gs->op[2];
+}
+
+
+/* Set LABEL to be the label used by conditional statement GS when its
+ predicate evaluates to true. */
+
+inline void
+gimple_cond_set_true_label (gcond *gs, tree label)
+{
+ gs->op[2] = label;
+}
+
+
+/* Set LABEL to be the label used by conditional statement GS when its
+ predicate evaluates to false. */
+
+inline void
+gimple_cond_set_false_label (gcond *gs, tree label)
+{
+ gs->op[3] = label;
+}
+
+
+/* Return the label used by conditional statement GS when its
+ predicate evaluates to false. */
+
+inline tree
+gimple_cond_false_label (const gcond *gs)
+{
+ return gs->op[3];
+}
+
+
+/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
+
+inline void
+gimple_cond_make_false (gcond *gs)
+{
+ gimple_cond_set_lhs (gs, boolean_false_node);
+ gimple_cond_set_rhs (gs, boolean_false_node);
+ gs->subcode = NE_EXPR;
+}
+
+
+/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
+
+inline void
+gimple_cond_make_true (gcond *gs)
+{
+ gimple_cond_set_lhs (gs, boolean_true_node);
+ gimple_cond_set_rhs (gs, boolean_false_node);
+ gs->subcode = NE_EXPR;
+}
+
+/* Check if conditional statemente GS is of the form 'if (1 == 1)',
+ 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
+
+inline bool
+gimple_cond_true_p (const gcond *gs)
+{
+ tree lhs = gimple_cond_lhs (gs);
+ tree rhs = gimple_cond_rhs (gs);
+ enum tree_code code = gimple_cond_code (gs);
+
+ if (lhs != boolean_true_node && lhs != boolean_false_node)
+ return false;
+
+ if (rhs != boolean_true_node && rhs != boolean_false_node)
+ return false;
+
+ if (code == NE_EXPR && lhs != rhs)
+ return true;
+
+ if (code == EQ_EXPR && lhs == rhs)
+ return true;
+
+ return false;
+}
+
+/* Check if conditional statement GS is of the form 'if (1 != 1)',
+ 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
+
+inline bool
+gimple_cond_false_p (const gcond *gs)
+{
+ tree lhs = gimple_cond_lhs (gs);
+ tree rhs = gimple_cond_rhs (gs);
+ enum tree_code code = gimple_cond_code (gs);
+
+ if (lhs != boolean_true_node && lhs != boolean_false_node)
+ return false;
+
+ if (rhs != boolean_true_node && rhs != boolean_false_node)
+ return false;
+
+ if (code == NE_EXPR && lhs == rhs)
+ return true;
+
+ if (code == EQ_EXPR && lhs != rhs)
+ return true;
+
+ return false;
+}
+
+/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
+
+inline void
+gimple_cond_set_condition (gcond *stmt, enum tree_code code, tree lhs,
+ tree rhs)
+{
+ gimple_cond_set_code (stmt, code);
+ gimple_cond_set_lhs (stmt, lhs);
+ gimple_cond_set_rhs (stmt, rhs);
+}
+
+
+/* Return the tree code for the expression computed by STMT. This is
+ only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
+ GIMPLE_CALL, return CALL_EXPR as the expression code for
+ consistency. This is useful when the caller needs to deal with the
+ three kinds of computation that GIMPLE supports. */
+
+inline enum tree_code
+gimple_expr_code (const gimple *stmt)
+{
+ if (const gassign *ass = dyn_cast<const gassign *> (stmt))
+ return gimple_assign_rhs_code (ass);
+ if (const gcond *cond = dyn_cast<const gcond *> (stmt))
+ return gimple_cond_code (cond);
+ else
+ {
+ gcc_gimple_checking_assert (gimple_code (stmt) == GIMPLE_CALL);
+ return CALL_EXPR;
+ }
+}
+
+
+/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
+
+inline tree
+gimple_label_label (const glabel *gs)
+{
+ return gs->op[0];
+}
+
+
+/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
+ GS. */
+
+inline void
+gimple_label_set_label (glabel *gs, tree label)
+{
+ gs->op[0] = label;
+}
+
+
+/* Return the destination of the unconditional jump GS. */
+
+inline tree
+gimple_goto_dest (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_GOTO);
+ return gimple_op (gs, 0);
+}
+
+
+/* Set DEST to be the destination of the unconditonal jump GS. */
+
+inline void
+gimple_goto_set_dest (ggoto *gs, tree dest)
+{
+ gs->op[0] = dest;
+}
+
+
+/* Return the variables declared in the GIMPLE_BIND statement GS. */
+
+inline tree
+gimple_bind_vars (const gbind *bind_stmt)
+{
+ return bind_stmt->vars;
+}
+
+
+/* Set VARS to be the set of variables declared in the GIMPLE_BIND
+ statement GS. */
+
+inline void
+gimple_bind_set_vars (gbind *bind_stmt, tree vars)
+{
+ bind_stmt->vars = vars;
+}
+
+
+/* Append VARS to the set of variables declared in the GIMPLE_BIND
+ statement GS. */
+
+inline void
+gimple_bind_append_vars (gbind *bind_stmt, tree vars)
+{
+ bind_stmt->vars = chainon (bind_stmt->vars, vars);
+}
+
+
+inline gimple_seq *
+gimple_bind_body_ptr (gbind *bind_stmt)
+{
+ return &bind_stmt->body;
+}
+
+/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
+
+inline gimple_seq
+gimple_bind_body (const gbind *gs)
+{
+ return *gimple_bind_body_ptr (const_cast <gbind *> (gs));
+}
+
+
+/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
+ statement GS. */
+
+inline void
+gimple_bind_set_body (gbind *bind_stmt, gimple_seq seq)
+{
+ bind_stmt->body = seq;
+}
+
+
+/* Append a statement to the end of a GIMPLE_BIND's body. */
+
+inline void
+gimple_bind_add_stmt (gbind *bind_stmt, gimple *stmt)
+{
+ gimple_seq_add_stmt (&bind_stmt->body, stmt);
+}
+
+
+/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
+
+inline void
+gimple_bind_add_seq (gbind *bind_stmt, gimple_seq seq)
+{
+ gimple_seq_add_seq (&bind_stmt->body, seq);
+}
+
+
+/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
+ GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
+
+inline tree
+gimple_bind_block (const gbind *bind_stmt)
+{
+ return bind_stmt->block;
+}
+
+
+/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
+ statement GS. */
+
+inline void
+gimple_bind_set_block (gbind *bind_stmt, tree block)
+{
+ gcc_gimple_checking_assert (block == NULL_TREE
+ || TREE_CODE (block) == BLOCK);
+ bind_stmt->block = block;
+}
+
+
+/* Return the number of input operands for GIMPLE_ASM ASM_STMT. */
+
+inline unsigned
+gimple_asm_ninputs (const gasm *asm_stmt)
+{
+ return asm_stmt->ni;
+}
+
+
+/* Return the number of output operands for GIMPLE_ASM ASM_STMT. */
+
+inline unsigned
+gimple_asm_noutputs (const gasm *asm_stmt)
+{
+ return asm_stmt->no;
+}
+
+
+/* Return the number of clobber operands for GIMPLE_ASM ASM_STMT. */
+
+inline unsigned
+gimple_asm_nclobbers (const gasm *asm_stmt)
+{
+ return asm_stmt->nc;
+}
+
+/* Return the number of label operands for GIMPLE_ASM ASM_STMT. */
+
+inline unsigned
+gimple_asm_nlabels (const gasm *asm_stmt)
+{
+ return asm_stmt->nl;
+}
+
+/* Return input operand INDEX of GIMPLE_ASM ASM_STMT. */
+
+inline tree
+gimple_asm_input_op (const gasm *asm_stmt, unsigned index)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->ni);
+ return asm_stmt->op[index + asm_stmt->no];
+}
+
+/* Set IN_OP to be input operand INDEX in GIMPLE_ASM ASM_STMT. */
+
+inline void
+gimple_asm_set_input_op (gasm *asm_stmt, unsigned index, tree in_op)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->ni
+ && TREE_CODE (in_op) == TREE_LIST);
+ asm_stmt->op[index + asm_stmt->no] = in_op;
+}
+
+
+/* Return output operand INDEX of GIMPLE_ASM ASM_STMT. */
+
+inline tree
+gimple_asm_output_op (const gasm *asm_stmt, unsigned index)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->no);
+ return asm_stmt->op[index];
+}
+
+/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM ASM_STMT. */
+
+inline void
+gimple_asm_set_output_op (gasm *asm_stmt, unsigned index, tree out_op)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->no
+ && TREE_CODE (out_op) == TREE_LIST);
+ asm_stmt->op[index] = out_op;
+}
+
+
+/* Return clobber operand INDEX of GIMPLE_ASM ASM_STMT. */
+
+inline tree
+gimple_asm_clobber_op (const gasm *asm_stmt, unsigned index)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->nc);
+ return asm_stmt->op[index + asm_stmt->ni + asm_stmt->no];
+}
+
+
+/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM ASM_STMT. */
+
+inline void
+gimple_asm_set_clobber_op (gasm *asm_stmt, unsigned index, tree clobber_op)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->nc
+ && TREE_CODE (clobber_op) == TREE_LIST);
+ asm_stmt->op[index + asm_stmt->ni + asm_stmt->no] = clobber_op;
+}
+
+/* Return label operand INDEX of GIMPLE_ASM ASM_STMT. */
+
+inline tree
+gimple_asm_label_op (const gasm *asm_stmt, unsigned index)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->nl);
+ return asm_stmt->op[index + asm_stmt->no + asm_stmt->ni + asm_stmt->nc];
+}
+
+/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM ASM_STMT. */
+
+inline void
+gimple_asm_set_label_op (gasm *asm_stmt, unsigned index, tree label_op)
+{
+ gcc_gimple_checking_assert (index < asm_stmt->nl
+ && TREE_CODE (label_op) == TREE_LIST);
+ asm_stmt->op[index + asm_stmt->no + asm_stmt->ni + asm_stmt->nc] = label_op;
+}
+
+/* Return the string representing the assembly instruction in
+ GIMPLE_ASM ASM_STMT. */
+
+inline const char *
+gimple_asm_string (const gasm *asm_stmt)
+{
+ return asm_stmt->string;
+}
+
+
+/* Return true if ASM_STMT is marked volatile. */
+
+inline bool
+gimple_asm_volatile_p (const gasm *asm_stmt)
+{
+ return (asm_stmt->subcode & GF_ASM_VOLATILE) != 0;
+}
+
+
+/* If VOLATILE_P is true, mark asm statement ASM_STMT as volatile. */
+
+inline void
+gimple_asm_set_volatile (gasm *asm_stmt, bool volatile_p)
+{
+ if (volatile_p)
+ asm_stmt->subcode |= GF_ASM_VOLATILE;
+ else
+ asm_stmt->subcode &= ~GF_ASM_VOLATILE;
+}
+
+
+/* Return true if ASM_STMT is marked inline. */
+
+inline bool
+gimple_asm_inline_p (const gasm *asm_stmt)
+{
+ return (asm_stmt->subcode & GF_ASM_INLINE) != 0;
+}
+
+
+/* If INLINE_P is true, mark asm statement ASM_STMT as inline. */
+
+inline void
+gimple_asm_set_inline (gasm *asm_stmt, bool inline_p)
+{
+ if (inline_p)
+ asm_stmt->subcode |= GF_ASM_INLINE;
+ else
+ asm_stmt->subcode &= ~GF_ASM_INLINE;
+}
+
+
+/* If INPUT_P is true, mark asm ASM_STMT as an ASM_INPUT. */
+
+inline void
+gimple_asm_set_input (gasm *asm_stmt, bool input_p)
+{
+ if (input_p)
+ asm_stmt->subcode |= GF_ASM_INPUT;
+ else
+ asm_stmt->subcode &= ~GF_ASM_INPUT;
+}
+
+
+/* Return true if asm ASM_STMT is an ASM_INPUT. */
+
+inline bool
+gimple_asm_input_p (const gasm *asm_stmt)
+{
+ return (asm_stmt->subcode & GF_ASM_INPUT) != 0;
+}
+
+
+/* Return the types handled by GIMPLE_CATCH statement CATCH_STMT. */
+
+inline tree
+gimple_catch_types (const gcatch *catch_stmt)
+{
+ return catch_stmt->types;
+}
+
+
+/* Return a pointer to the types handled by GIMPLE_CATCH statement CATCH_STMT. */
+
+inline tree *
+gimple_catch_types_ptr (gcatch *catch_stmt)
+{
+ return &catch_stmt->types;
+}
+
+
+/* Return a pointer to the GIMPLE sequence representing the body of
+ the handler of GIMPLE_CATCH statement CATCH_STMT. */
+
+inline gimple_seq *
+gimple_catch_handler_ptr (gcatch *catch_stmt)
+{
+ return &catch_stmt->handler;
+}
+
+
+/* Return the GIMPLE sequence representing the body of the handler of
+ GIMPLE_CATCH statement CATCH_STMT. */
+
+inline gimple_seq
+gimple_catch_handler (const gcatch *catch_stmt)
+{
+ return *gimple_catch_handler_ptr (const_cast <gcatch *> (catch_stmt));
+}
+
+
+/* Set T to be the set of types handled by GIMPLE_CATCH CATCH_STMT. */
+
+inline void
+gimple_catch_set_types (gcatch *catch_stmt, tree t)
+{
+ catch_stmt->types = t;
+}
+
+
+/* Set HANDLER to be the body of GIMPLE_CATCH CATCH_STMT. */
+
+inline void
+gimple_catch_set_handler (gcatch *catch_stmt, gimple_seq handler)
+{
+ catch_stmt->handler = handler;
+}
+
+
+/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
+
+inline tree
+gimple_eh_filter_types (const gimple *gs)
+{
+ const geh_filter *eh_filter_stmt = as_a <const geh_filter *> (gs);
+ return eh_filter_stmt->types;
+}
+
+
+/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
+ GS. */
+
+inline tree *
+gimple_eh_filter_types_ptr (gimple *gs)
+{
+ geh_filter *eh_filter_stmt = as_a <geh_filter *> (gs);
+ return &eh_filter_stmt->types;
+}
+
+
+/* Return a pointer to the sequence of statement to execute when
+ GIMPLE_EH_FILTER statement fails. */
+
+inline gimple_seq *
+gimple_eh_filter_failure_ptr (gimple *gs)
+{
+ geh_filter *eh_filter_stmt = as_a <geh_filter *> (gs);
+ return &eh_filter_stmt->failure;
+}
+
+
+/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
+ statement fails. */
+
+inline gimple_seq
+gimple_eh_filter_failure (const gimple *gs)
+{
+ return *gimple_eh_filter_failure_ptr (const_cast <gimple *> (gs));
+}
+
+
+/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER
+ EH_FILTER_STMT. */
+
+inline void
+gimple_eh_filter_set_types (geh_filter *eh_filter_stmt, tree types)
+{
+ eh_filter_stmt->types = types;
+}
+
+
+/* Set FAILURE to be the sequence of statements to execute on failure
+ for GIMPLE_EH_FILTER EH_FILTER_STMT. */
+
+inline void
+gimple_eh_filter_set_failure (geh_filter *eh_filter_stmt,
+ gimple_seq failure)
+{
+ eh_filter_stmt->failure = failure;
+}
+
+/* Get the function decl to be called by the MUST_NOT_THROW region. */
+
+inline tree
+gimple_eh_must_not_throw_fndecl (const geh_mnt *eh_mnt_stmt)
+{
+ return eh_mnt_stmt->fndecl;
+}
+
+/* Set the function decl to be called by GS to DECL. */
+
+inline void
+gimple_eh_must_not_throw_set_fndecl (geh_mnt *eh_mnt_stmt,
+ tree decl)
+{
+ eh_mnt_stmt->fndecl = decl;
+}
+
+/* GIMPLE_EH_ELSE accessors. */
+
+inline gimple_seq *
+gimple_eh_else_n_body_ptr (geh_else *eh_else_stmt)
+{
+ return &eh_else_stmt->n_body;
+}
+
+inline gimple_seq
+gimple_eh_else_n_body (const geh_else *eh_else_stmt)
+{
+ return *gimple_eh_else_n_body_ptr (const_cast <geh_else *> (eh_else_stmt));
+}
+
+inline gimple_seq *
+gimple_eh_else_e_body_ptr (geh_else *eh_else_stmt)
+{
+ return &eh_else_stmt->e_body;
+}
+
+inline gimple_seq
+gimple_eh_else_e_body (const geh_else *eh_else_stmt)
+{
+ return *gimple_eh_else_e_body_ptr (const_cast <geh_else *> (eh_else_stmt));
+}
+
+inline void
+gimple_eh_else_set_n_body (geh_else *eh_else_stmt, gimple_seq seq)
+{
+ eh_else_stmt->n_body = seq;
+}
+
+inline void
+gimple_eh_else_set_e_body (geh_else *eh_else_stmt, gimple_seq seq)
+{
+ eh_else_stmt->e_body = seq;
+}
+
+/* GIMPLE_TRY accessors. */
+
+/* Return the kind of try block represented by GIMPLE_TRY GS. This is
+ either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
+
+inline enum gimple_try_flags
+gimple_try_kind (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRY);
+ return (enum gimple_try_flags) (gs->subcode & GIMPLE_TRY_KIND);
+}
+
+
+/* Set the kind of try block represented by GIMPLE_TRY GS. */
+
+inline void
+gimple_try_set_kind (gtry *gs, enum gimple_try_flags kind)
+{
+ gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
+ || kind == GIMPLE_TRY_FINALLY);
+ if (gimple_try_kind (gs) != kind)
+ gs->subcode = (unsigned int) kind;
+}
+
+
+/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
+
+inline bool
+gimple_try_catch_is_cleanup (const gimple *gs)
+{
+ gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
+ return (gs->subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
+}
+
+
+/* Return a pointer to the sequence of statements used as the
+ body for GIMPLE_TRY GS. */
+
+inline gimple_seq *
+gimple_try_eval_ptr (gimple *gs)
+{
+ gtry *try_stmt = as_a <gtry *> (gs);
+ return &try_stmt->eval;
+}
+
+
+/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
+
+inline gimple_seq
+gimple_try_eval (const gimple *gs)
+{
+ return *gimple_try_eval_ptr (const_cast <gimple *> (gs));
+}
+
+
+/* Return a pointer to the sequence of statements used as the cleanup body for
+ GIMPLE_TRY GS. */
+
+inline gimple_seq *
+gimple_try_cleanup_ptr (gimple *gs)
+{
+ gtry *try_stmt = as_a <gtry *> (gs);
+ return &try_stmt->cleanup;
+}
+
+
+/* Return the sequence of statements used as the cleanup body for
+ GIMPLE_TRY GS. */
+
+inline gimple_seq
+gimple_try_cleanup (const gimple *gs)
+{
+ return *gimple_try_cleanup_ptr (const_cast <gimple *> (gs));
+}
+
+
+/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
+
+inline void
+gimple_try_set_catch_is_cleanup (gtry *g, bool catch_is_cleanup)
+{
+ gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
+ if (catch_is_cleanup)
+ g->subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
+ else
+ g->subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
+}
+
+
+/* Set EVAL to be the sequence of statements to use as the body for
+ GIMPLE_TRY TRY_STMT. */
+
+inline void
+gimple_try_set_eval (gtry *try_stmt, gimple_seq eval)
+{
+ try_stmt->eval = eval;
+}
+
+
+/* Set CLEANUP to be the sequence of statements to use as the cleanup
+ body for GIMPLE_TRY TRY_STMT. */
+
+inline void
+gimple_try_set_cleanup (gtry *try_stmt, gimple_seq cleanup)
+{
+ try_stmt->cleanup = cleanup;
+}
+
+
+/* Return a pointer to the cleanup sequence for cleanup statement GS. */
+
+inline gimple_seq *
+gimple_wce_cleanup_ptr (gimple *gs)
+{
+ gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce *> (gs);
+ return &wce_stmt->cleanup;
+}
+
+
+/* Return the cleanup sequence for cleanup statement GS. */
+
+inline gimple_seq
+gimple_wce_cleanup (gimple *gs)
+{
+ return *gimple_wce_cleanup_ptr (gs);
+}
+
+
+/* Set CLEANUP to be the cleanup sequence for GS. */
+
+inline void
+gimple_wce_set_cleanup (gimple *gs, gimple_seq cleanup)
+{
+ gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce *> (gs);
+ wce_stmt->cleanup = cleanup;
+}
+
+
+/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
+
+inline bool
+gimple_wce_cleanup_eh_only (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
+ return gs->subcode != 0;
+}
+
+
+/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
+
+inline void
+gimple_wce_set_cleanup_eh_only (gimple *gs, bool eh_only_p)
+{
+ GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
+ gs->subcode = (unsigned int) eh_only_p;
+}
+
+
+/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
+
+inline unsigned
+gimple_phi_capacity (const gimple *gs)
+{
+ const gphi *phi_stmt = as_a <const gphi *> (gs);
+ return phi_stmt->capacity;
+}
+
+
+/* Return the number of arguments in GIMPLE_PHI GS. This must always
+ be exactly the number of incoming edges for the basic block holding
+ GS. */
+
+inline unsigned
+gimple_phi_num_args (const gimple *gs)
+{
+ const gphi *phi_stmt = as_a <const gphi *> (gs);
+ return phi_stmt->nargs;
+}
+
+
+/* Return the SSA name created by GIMPLE_PHI GS. */
+
+inline tree
+gimple_phi_result (const gphi *gs)
+{
+ return gs->result;
+}
+
+inline tree
+gimple_phi_result (const gimple *gs)
+{
+ const gphi *phi_stmt = as_a <const gphi *> (gs);
+ return gimple_phi_result (phi_stmt);
+}
+
+/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
+
+inline tree *
+gimple_phi_result_ptr (gphi *gs)
+{
+ return &gs->result;
+}
+
+inline tree *
+gimple_phi_result_ptr (gimple *gs)
+{
+ gphi *phi_stmt = as_a <gphi *> (gs);
+ return gimple_phi_result_ptr (phi_stmt);
+}
+
+/* Set RESULT to be the SSA name created by GIMPLE_PHI PHI. */
+
+inline void
+gimple_phi_set_result (gphi *phi, tree result)
+{
+ phi->result = result;
+ if (result && TREE_CODE (result) == SSA_NAME)
+ SSA_NAME_DEF_STMT (result) = phi;
+}
+
+
+/* Return the PHI argument corresponding to incoming edge INDEX for
+ GIMPLE_PHI GS. */
+
+inline struct phi_arg_d *
+gimple_phi_arg (gphi *gs, unsigned index)
+{
+ gcc_gimple_checking_assert (index < gs->nargs);
+ return &(gs->args[index]);
+}
+
+inline const phi_arg_d *
+gimple_phi_arg (const gphi *gs, unsigned index)
+{
+ gcc_gimple_checking_assert (index < gs->nargs);
+ return &(gs->args[index]);
+}
+
+inline struct phi_arg_d *
+gimple_phi_arg (gimple *gs, unsigned index)
+{
+ gphi *phi_stmt = as_a <gphi *> (gs);
+ return gimple_phi_arg (phi_stmt, index);
+}
+
+/* Set PHIARG to be the argument corresponding to incoming edge INDEX
+ for GIMPLE_PHI PHI. */
+
+inline void
+gimple_phi_set_arg (gphi *phi, unsigned index, struct phi_arg_d * phiarg)
+{
+ gcc_gimple_checking_assert (index < phi->nargs);
+ phi->args[index] = *phiarg;
+}
+
+/* Return the PHI nodes for basic block BB, or NULL if there are no
+ PHI nodes. */
+
+inline gimple_seq
+phi_nodes (const_basic_block bb)
+{
+ gcc_checking_assert (!(bb->flags & BB_RTL));
+ return bb->il.gimple.phi_nodes;
+}
+
+/* Return a pointer to the PHI nodes for basic block BB. */
+
+inline gimple_seq *
+phi_nodes_ptr (basic_block bb)
+{
+ gcc_checking_assert (!(bb->flags & BB_RTL));
+ return &bb->il.gimple.phi_nodes;
+}
+
+/* Return the tree operand for argument I of PHI node GS. */
+
+inline tree
+gimple_phi_arg_def (const gphi *gs, size_t index)
+{
+ return gimple_phi_arg (gs, index)->def;
+}
+
+inline tree
+gimple_phi_arg_def (gimple *gs, size_t index)
+{
+ return gimple_phi_arg (gs, index)->def;
+}
+
+
+/* Return a pointer to the tree operand for argument I of phi node PHI. */
+
+inline tree *
+gimple_phi_arg_def_ptr (gphi *phi, size_t index)
+{
+ return &gimple_phi_arg (phi, index)->def;
+}
+
+/* Return the edge associated with argument I of phi node PHI. */
+
+inline edge
+gimple_phi_arg_edge (const gphi *phi, size_t i)
+{
+ return EDGE_PRED (gimple_bb (phi), i);
+}
+
+/* Return the source location of gimple argument I of phi node PHI. */
+
+inline location_t
+gimple_phi_arg_location (const gphi *phi, size_t i)
+{
+ return gimple_phi_arg (phi, i)->locus;
+}
+
+/* Return the source location of the argument on edge E of phi node PHI. */
+
+inline location_t
+gimple_phi_arg_location_from_edge (gphi *phi, edge e)
+{
+ return gimple_phi_arg (phi, e->dest_idx)->locus;
+}
+
+/* Set the source location of gimple argument I of phi node PHI to LOC. */
+
+inline void
+gimple_phi_arg_set_location (gphi *phi, size_t i, location_t loc)
+{
+ gimple_phi_arg (phi, i)->locus = loc;
+}
+
+/* Return address of source location of gimple argument I of phi node PHI. */
+
+inline location_t *
+gimple_phi_arg_location_ptr (gphi *phi, size_t i)
+{
+ return &gimple_phi_arg (phi, i)->locus;
+}
+
+/* Return TRUE if argument I of phi node PHI has a location record. */
+
+inline bool
+gimple_phi_arg_has_location (const gphi *phi, size_t i)
+{
+ return gimple_phi_arg_location (phi, i) != UNKNOWN_LOCATION;
+}
+
+/* Return the number of arguments that can be accessed by gimple_arg. */
+
+inline unsigned
+gimple_num_args (const gimple *gs)
+{
+ if (auto phi = dyn_cast<const gphi *> (gs))
+ return gimple_phi_num_args (phi);
+ if (auto call = dyn_cast<const gcall *> (gs))
+ return gimple_call_num_args (call);
+ return gimple_num_ops (as_a <const gassign *> (gs)) - 1;
+}
+
+/* GS must be an assignment, a call, or a PHI.
+ If it's an assignment, return rhs operand I.
+ If it's a call, return function argument I.
+ If it's a PHI, return the value of PHI argument I. */
+
+inline tree
+gimple_arg (const gimple *gs, unsigned int i)
+{
+ if (auto phi = dyn_cast<const gphi *> (gs))
+ return gimple_phi_arg_def (phi, i);
+ if (auto call = dyn_cast<const gcall *> (gs))
+ return gimple_call_arg (call, i);
+ return gimple_op (as_a <const gassign *> (gs), i + 1);
+}
+
+/* Return a pointer to gimple_arg (GS, I). */
+
+inline tree *
+gimple_arg_ptr (gimple *gs, unsigned int i)
+{
+ if (auto phi = dyn_cast<gphi *> (gs))
+ return gimple_phi_arg_def_ptr (phi, i);
+ if (auto call = dyn_cast<gcall *> (gs))
+ return gimple_call_arg_ptr (call, i);
+ return gimple_op_ptr (as_a <gassign *> (gs), i + 1);
+}
+
+/* Return the region number for GIMPLE_RESX RESX_STMT. */
+
+inline int
+gimple_resx_region (const gresx *resx_stmt)
+{
+ return resx_stmt->region;
+}
+
+/* Set REGION to be the region number for GIMPLE_RESX RESX_STMT. */
+
+inline void
+gimple_resx_set_region (gresx *resx_stmt, int region)
+{
+ resx_stmt->region = region;
+}
+
+/* Return the region number for GIMPLE_EH_DISPATCH EH_DISPATCH_STMT. */
+
+inline int
+gimple_eh_dispatch_region (const geh_dispatch *eh_dispatch_stmt)
+{
+ return eh_dispatch_stmt->region;
+}
+
+/* Set REGION to be the region number for GIMPLE_EH_DISPATCH
+ EH_DISPATCH_STMT. */
+
+inline void
+gimple_eh_dispatch_set_region (geh_dispatch *eh_dispatch_stmt, int region)
+{
+ eh_dispatch_stmt->region = region;
+}
+
+/* Return the number of labels associated with the switch statement GS. */
+
+inline unsigned
+gimple_switch_num_labels (const gswitch *gs)
+{
+ unsigned num_ops;
+ GIMPLE_CHECK (gs, GIMPLE_SWITCH);
+ num_ops = gimple_num_ops (gs);
+ gcc_gimple_checking_assert (num_ops > 1);
+ return num_ops - 1;
+}
+
+
+/* Set NLABELS to be the number of labels for the switch statement GS. */
+
+inline void
+gimple_switch_set_num_labels (gswitch *g, unsigned nlabels)
+{
+ GIMPLE_CHECK (g, GIMPLE_SWITCH);
+ gimple_set_num_ops (g, nlabels + 1);
+}
+
+
+/* Return the index variable used by the switch statement GS. */
+
+inline tree
+gimple_switch_index (const gswitch *gs)
+{
+ return gs->op[0];
+}
+
+
+/* Return a pointer to the index variable for the switch statement GS. */
+
+inline tree *
+gimple_switch_index_ptr (gswitch *gs)
+{
+ return &gs->op[0];
+}
+
+
+/* Set INDEX to be the index variable for switch statement GS. */
+
+inline void
+gimple_switch_set_index (gswitch *gs, tree index)
+{
+ gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
+ gs->op[0] = index;
+}
+
+
+/* Return the label numbered INDEX. The default label is 0, followed by any
+ labels in a switch statement. */
+
+inline tree
+gimple_switch_label (const gswitch *gs, unsigned index)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
+ return gs->op[index + 1];
+}
+
+/* Set the label number INDEX to LABEL. 0 is always the default label. */
+
+inline void
+gimple_switch_set_label (gswitch *gs, unsigned index, tree label)
+{
+ gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
+ && (label == NULL_TREE
+ || TREE_CODE (label) == CASE_LABEL_EXPR));
+ gs->op[index + 1] = label;
+}
+
+/* Return the default label for a switch statement. */
+
+inline tree
+gimple_switch_default_label (const gswitch *gs)
+{
+ tree label = gimple_switch_label (gs, 0);
+ gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
+ return label;
+}
+
+/* Set the default label for a switch statement. */
+
+inline void
+gimple_switch_set_default_label (gswitch *gs, tree label)
+{
+ gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
+ gimple_switch_set_label (gs, 0, label);
+}
+
+/* Return true if GS is a GIMPLE_DEBUG statement. */
+
+inline bool
+is_gimple_debug (const gimple *gs)
+{
+ return gimple_code (gs) == GIMPLE_DEBUG;
+}
+
+
+/* Return the first nondebug statement in GIMPLE sequence S. */
+
+inline gimple *
+gimple_seq_first_nondebug_stmt (gimple_seq s)
+{
+ gimple_seq_node n = gimple_seq_first (s);
+ while (n && is_gimple_debug (n))
+ n = n->next;
+ return n;
+}
+
+
+/* Return the last nondebug statement in GIMPLE sequence S. */
+
+inline gimple *
+gimple_seq_last_nondebug_stmt (gimple_seq s)
+{
+ gimple_seq_node n;
+ for (n = gimple_seq_last (s);
+ n && is_gimple_debug (n);
+ n = n->prev)
+ if (n == s)
+ return NULL;
+ return n;
+}
+
+
+/* Return true if S is a GIMPLE_DEBUG BIND statement. */
+
+inline bool
+gimple_debug_bind_p (const gimple *s)
+{
+ if (is_gimple_debug (s))
+ return s->subcode == GIMPLE_DEBUG_BIND;
+
+ return false;
+}
+
+/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
+
+inline tree
+gimple_debug_bind_get_var (const gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ return gimple_op (dbg, 0);
+}
+
+/* Return the value bound to the variable in a GIMPLE_DEBUG bind
+ statement. */
+
+inline tree
+gimple_debug_bind_get_value (const gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ return gimple_op (dbg, 1);
+}
+
+/* Return a pointer to the value bound to the variable in a
+ GIMPLE_DEBUG bind statement. */
+
+inline tree *
+gimple_debug_bind_get_value_ptr (gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ return gimple_op_ptr (dbg, 1);
+}
+
+/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
+
+inline void
+gimple_debug_bind_set_var (gimple *dbg, tree var)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ gimple_set_op (dbg, 0, var);
+}
+
+/* Set the value bound to the variable in a GIMPLE_DEBUG bind
+ statement. */
+
+inline void
+gimple_debug_bind_set_value (gimple *dbg, tree value)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ gimple_set_op (dbg, 1, value);
+}
+
+/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
+ optimized away. */
+#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
+
+/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
+ statement. */
+
+inline void
+gimple_debug_bind_reset_value (gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
+}
+
+/* Return true if the GIMPLE_DEBUG bind statement is bound to a
+ value. */
+
+inline bool
+gimple_debug_bind_has_value_p (gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
+ return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
+}
+
+#undef GIMPLE_DEBUG_BIND_NOVALUE
+
+/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
+
+inline bool
+gimple_debug_source_bind_p (const gimple *s)
+{
+ if (is_gimple_debug (s))
+ return s->subcode == GIMPLE_DEBUG_SOURCE_BIND;
+
+ return false;
+}
+
+/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
+
+inline tree
+gimple_debug_source_bind_get_var (const gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
+ return gimple_op (dbg, 0);
+}
+
+/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
+ statement. */
+
+inline tree
+gimple_debug_source_bind_get_value (const gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
+ return gimple_op (dbg, 1);
+}
+
+/* Return a pointer to the value bound to the variable in a
+ GIMPLE_DEBUG source bind statement. */
+
+inline tree *
+gimple_debug_source_bind_get_value_ptr (gimple *dbg)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
+ return gimple_op_ptr (dbg, 1);
+}
+
+/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
+
+inline void
+gimple_debug_source_bind_set_var (gimple *dbg, tree var)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
+ gimple_set_op (dbg, 0, var);
+}
+
+/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
+ statement. */
+
+inline void
+gimple_debug_source_bind_set_value (gimple *dbg, tree value)
+{
+ GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
+ gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
+ gimple_set_op (dbg, 1, value);
+}
+
+/* Return true if S is a GIMPLE_DEBUG BEGIN_STMT statement. */
+
+inline bool
+gimple_debug_begin_stmt_p (const gimple *s)
+{
+ if (is_gimple_debug (s))
+ return s->subcode == GIMPLE_DEBUG_BEGIN_STMT;
+
+ return false;
+}
+
+/* Return true if S is a GIMPLE_DEBUG INLINE_ENTRY statement. */
+
+inline bool
+gimple_debug_inline_entry_p (const gimple *s)
+{
+ if (is_gimple_debug (s))
+ return s->subcode == GIMPLE_DEBUG_INLINE_ENTRY;
+
+ return false;
+}
+
+/* Return true if S is a GIMPLE_DEBUG non-binding marker statement. */
+
+inline bool
+gimple_debug_nonbind_marker_p (const gimple *s)
+{
+ if (is_gimple_debug (s))
+ return s->subcode == GIMPLE_DEBUG_BEGIN_STMT
+ || s->subcode == GIMPLE_DEBUG_INLINE_ENTRY;
+
+ return false;
+}
+
+/* Return the line number for EXPR, or return -1 if we have no line
+ number information for it. */
+inline int
+get_lineno (const gimple *stmt)
+{
+ location_t loc;
+
+ if (!stmt)
+ return -1;
+
+ loc = gimple_location (stmt);
+ if (loc == UNKNOWN_LOCATION)
+ return -1;
+
+ return LOCATION_LINE (loc);
+}
+
+/* Return a pointer to the body for the OMP statement GS. */
+
+inline gimple_seq *
+gimple_omp_body_ptr (gimple *gs)
+{
+ return &static_cast <gimple_statement_omp *> (gs)->body;
+}
+
+/* Return the body for the OMP statement GS. */
+
+inline gimple_seq
+gimple_omp_body (const gimple *gs)
+{
+ return *gimple_omp_body_ptr (const_cast <gimple *> (gs));
+}
+
+/* Set BODY to be the body for the OMP statement GS. */
+
+inline void
+gimple_omp_set_body (gimple *gs, gimple_seq body)
+{
+ static_cast <gimple_statement_omp *> (gs)->body = body;
+}
+
+
+/* Return the name associated with OMP_CRITICAL statement CRIT_STMT. */
+
+inline tree
+gimple_omp_critical_name (const gomp_critical *crit_stmt)
+{
+ return crit_stmt->name;
+}
+
+
+/* Return a pointer to the name associated with OMP critical statement
+ CRIT_STMT. */
+
+inline tree *
+gimple_omp_critical_name_ptr (gomp_critical *crit_stmt)
+{
+ return &crit_stmt->name;
+}
+
+
+/* Set NAME to be the name associated with OMP critical statement
+ CRIT_STMT. */
+
+inline void
+gimple_omp_critical_set_name (gomp_critical *crit_stmt, tree name)
+{
+ crit_stmt->name = name;
+}
+
+
+/* Return the clauses associated with OMP_CRITICAL statement CRIT_STMT. */
+
+inline tree
+gimple_omp_critical_clauses (const gomp_critical *crit_stmt)
+{
+ return crit_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP critical statement
+ CRIT_STMT. */
+
+inline tree *
+gimple_omp_critical_clauses_ptr (gomp_critical *crit_stmt)
+{
+ return &crit_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP critical statement
+ CRIT_STMT. */
+
+inline void
+gimple_omp_critical_set_clauses (gomp_critical *crit_stmt, tree clauses)
+{
+ crit_stmt->clauses = clauses;
+}
+
+
+/* Return the clauses associated with OMP_ORDERED statement ORD_STMT. */
+
+inline tree
+gimple_omp_ordered_clauses (const gomp_ordered *ord_stmt)
+{
+ return ord_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP ordered statement
+ ORD_STMT. */
+
+inline tree *
+gimple_omp_ordered_clauses_ptr (gomp_ordered *ord_stmt)
+{
+ return &ord_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP ordered statement
+ ORD_STMT. */
+
+inline void
+gimple_omp_ordered_set_clauses (gomp_ordered *ord_stmt, tree clauses)
+{
+ ord_stmt->clauses = clauses;
+}
+
+
+/* Return the clauses associated with OMP_SCAN statement SCAN_STMT. */
+
+inline tree
+gimple_omp_scan_clauses (const gomp_scan *scan_stmt)
+{
+ return scan_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP scan statement
+ ORD_STMT. */
+
+inline tree *
+gimple_omp_scan_clauses_ptr (gomp_scan *scan_stmt)
+{
+ return &scan_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP scan statement
+ ORD_STMT. */
+
+inline void
+gimple_omp_scan_set_clauses (gomp_scan *scan_stmt, tree clauses)
+{
+ scan_stmt->clauses = clauses;
+}
+
+
+/* Return the clauses associated with OMP_TASKGROUP statement GS. */
+
+inline tree
+gimple_omp_taskgroup_clauses (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_TASKGROUP);
+ return
+ static_cast <const gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP taskgroup statement
+ GS. */
+
+inline tree *
+gimple_omp_taskgroup_clauses_ptr (gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_TASKGROUP);
+ return &static_cast <gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP taskgroup statement
+ GS. */
+
+inline void
+gimple_omp_taskgroup_set_clauses (gimple *gs, tree clauses)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_TASKGROUP);
+ static_cast <gimple_statement_omp_single_layout *> (gs)->clauses
+ = clauses;
+}
+
+
+/* Return the clauses associated with OMP_MASKED statement GS. */
+
+inline tree
+gimple_omp_masked_clauses (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
+ return
+ static_cast <const gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP masked statement
+ GS. */
+
+inline tree *
+gimple_omp_masked_clauses_ptr (gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
+ return &static_cast <gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP masked statement
+ GS. */
+
+inline void
+gimple_omp_masked_set_clauses (gimple *gs, tree clauses)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
+ static_cast <gimple_statement_omp_single_layout *> (gs)->clauses
+ = clauses;
+}
+
+
+/* Return the clauses associated with OMP_SCOPE statement GS. */
+
+inline tree
+gimple_omp_scope_clauses (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
+ return
+ static_cast <const gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP scope statement
+ GS. */
+
+inline tree *
+gimple_omp_scope_clauses_ptr (gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
+ return &static_cast <gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP scope statement
+ GS. */
+
+inline void
+gimple_omp_scope_set_clauses (gimple *gs, tree clauses)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
+ static_cast <gimple_statement_omp_single_layout *> (gs)->clauses
+ = clauses;
+}
+
+
+/* Return the kind of the OMP_FOR statemement G. */
+
+inline int
+gimple_omp_for_kind (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
+ return (gimple_omp_subcode (g) & GF_OMP_FOR_KIND_MASK);
+}
+
+
+/* Set the kind of the OMP_FOR statement G. */
+
+inline void
+gimple_omp_for_set_kind (gomp_for *g, int kind)
+{
+ g->subcode = (g->subcode & ~GF_OMP_FOR_KIND_MASK)
+ | (kind & GF_OMP_FOR_KIND_MASK);
+}
+
+
+/* Return true if OMP_FOR statement G has the
+ GF_OMP_FOR_COMBINED flag set. */
+
+inline bool
+gimple_omp_for_combined_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
+ return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED) != 0;
+}
+
+
+/* Set the GF_OMP_FOR_COMBINED field in the OMP_FOR statement G depending on
+ the boolean value of COMBINED_P. */
+
+inline void
+gimple_omp_for_set_combined_p (gomp_for *g, bool combined_p)
+{
+ if (combined_p)
+ g->subcode |= GF_OMP_FOR_COMBINED;
+ else
+ g->subcode &= ~GF_OMP_FOR_COMBINED;
+}
+
+
+/* Return true if the OMP_FOR statement G has the
+ GF_OMP_FOR_COMBINED_INTO flag set. */
+
+inline bool
+gimple_omp_for_combined_into_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
+ return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED_INTO) != 0;
+}
+
+
+/* Set the GF_OMP_FOR_COMBINED_INTO field in the OMP_FOR statement G depending
+ on the boolean value of COMBINED_P. */
+
+inline void
+gimple_omp_for_set_combined_into_p (gomp_for *g, bool combined_p)
+{
+ if (combined_p)
+ g->subcode |= GF_OMP_FOR_COMBINED_INTO;
+ else
+ g->subcode &= ~GF_OMP_FOR_COMBINED_INTO;
+}
+
+
+/* Return the clauses associated with the OMP_FOR statement GS. */
+
+inline tree
+gimple_omp_for_clauses (const gimple *gs)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ return omp_for_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with the OMP_FOR statement
+ GS. */
+
+inline tree *
+gimple_omp_for_clauses_ptr (gimple *gs)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ return &omp_for_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the list of clauses associated with the OMP_FOR statement
+ GS. */
+
+inline void
+gimple_omp_for_set_clauses (gimple *gs, tree clauses)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ omp_for_stmt->clauses = clauses;
+}
+
+
+/* Get the collapse count of the OMP_FOR statement GS. */
+
+inline size_t
+gimple_omp_for_collapse (const gimple *gs)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ return omp_for_stmt->collapse;
+}
+
+
+/* Return the condition code associated with the OMP_FOR statement GS. */
+
+inline enum tree_code
+gimple_omp_for_cond (const gimple *gs, size_t i)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].cond;
+}
+
+
+/* Set COND to be the condition code for the OMP_FOR statement GS. */
+
+inline void
+gimple_omp_for_set_cond (gimple *gs, size_t i, enum tree_code cond)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
+ && i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].cond = cond;
+}
+
+
+/* Return the index variable for the OMP_FOR statement GS. */
+
+inline tree
+gimple_omp_for_index (const gimple *gs, size_t i)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].index;
+}
+
+
+/* Return a pointer to the index variable for the OMP_FOR statement GS. */
+
+inline tree *
+gimple_omp_for_index_ptr (gimple *gs, size_t i)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].index;
+}
+
+
+/* Set INDEX to be the index variable for the OMP_FOR statement GS. */
+
+inline void
+gimple_omp_for_set_index (gimple *gs, size_t i, tree index)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].index = index;
+}
+
+
+/* Return the initial value for the OMP_FOR statement GS. */
+
+inline tree
+gimple_omp_for_initial (const gimple *gs, size_t i)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].initial;
+}
+
+
+/* Return a pointer to the initial value for the OMP_FOR statement GS. */
+
+inline tree *
+gimple_omp_for_initial_ptr (gimple *gs, size_t i)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].initial;
+}
+
+
+/* Set INITIAL to be the initial value for the OMP_FOR statement GS. */
+
+inline void
+gimple_omp_for_set_initial (gimple *gs, size_t i, tree initial)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].initial = initial;
+}
+
+
+/* Return the final value for the OMP_FOR statement GS. */
+
+inline tree
+gimple_omp_for_final (const gimple *gs, size_t i)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].final;
+}
+
+
+/* Return a pointer to the final value for the OMP_FOR statement GS. */
+
+inline tree *
+gimple_omp_for_final_ptr (gimple *gs, size_t i)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].final;
+}
+
+
+/* Set FINAL to be the final value for the OMP_FOR statement GS. */
+
+inline void
+gimple_omp_for_set_final (gimple *gs, size_t i, tree final)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].final = final;
+}
+
+
+/* Return the increment value for the OMP_FOR statement GS. */
+
+inline tree
+gimple_omp_for_incr (const gimple *gs, size_t i)
+{
+ const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return omp_for_stmt->iter[i].incr;
+}
+
+
+/* Return a pointer to the increment value for the OMP_FOR statement GS. */
+
+inline tree *
+gimple_omp_for_incr_ptr (gimple *gs, size_t i)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ return &omp_for_stmt->iter[i].incr;
+}
+
+
+/* Set INCR to be the increment value for the OMP_FOR statement GS. */
+
+inline void
+gimple_omp_for_set_incr (gimple *gs, size_t i, tree incr)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ gcc_gimple_checking_assert (i < omp_for_stmt->collapse);
+ omp_for_stmt->iter[i].incr = incr;
+}
+
+
+/* Return a pointer to the sequence of statements to execute before the OMP_FOR
+ statement GS starts. */
+
+inline gimple_seq *
+gimple_omp_for_pre_body_ptr (gimple *gs)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ return &omp_for_stmt->pre_body;
+}
+
+
+/* Return the sequence of statements to execute before the OMP_FOR
+ statement GS starts. */
+
+inline gimple_seq
+gimple_omp_for_pre_body (const gimple *gs)
+{
+ return *gimple_omp_for_pre_body_ptr (const_cast <gimple *> (gs));
+}
+
+
+/* Set PRE_BODY to be the sequence of statements to execute before the
+ OMP_FOR statement GS starts. */
+
+inline void
+gimple_omp_for_set_pre_body (gimple *gs, gimple_seq pre_body)
+{
+ gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
+ omp_for_stmt->pre_body = pre_body;
+}
+
+/* Return the clauses associated with OMP_PARALLEL GS. */
+
+inline tree
+gimple_omp_parallel_clauses (const gimple *gs)
+{
+ const gomp_parallel *omp_parallel_stmt = as_a <const gomp_parallel *> (gs);
+ return omp_parallel_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_PARALLEL_STMT. */
+
+inline tree *
+gimple_omp_parallel_clauses_ptr (gomp_parallel *omp_parallel_stmt)
+{
+ return &omp_parallel_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL_STMT. */
+
+inline void
+gimple_omp_parallel_set_clauses (gomp_parallel *omp_parallel_stmt,
+ tree clauses)
+{
+ omp_parallel_stmt->clauses = clauses;
+}
+
+
+/* Return the child function used to hold the body of OMP_PARALLEL_STMT. */
+
+inline tree
+gimple_omp_parallel_child_fn (const gomp_parallel *omp_parallel_stmt)
+{
+ return omp_parallel_stmt->child_fn;
+}
+
+/* Return a pointer to the child function used to hold the body of
+ OMP_PARALLEL_STMT. */
+
+inline tree *
+gimple_omp_parallel_child_fn_ptr (gomp_parallel *omp_parallel_stmt)
+{
+ return &omp_parallel_stmt->child_fn;
+}
+
+
+/* Set CHILD_FN to be the child function for OMP_PARALLEL_STMT. */
+
+inline void
+gimple_omp_parallel_set_child_fn (gomp_parallel *omp_parallel_stmt,
+ tree child_fn)
+{
+ omp_parallel_stmt->child_fn = child_fn;
+}
+
+
+/* Return the artificial argument used to send variables and values
+ from the parent to the children threads in OMP_PARALLEL_STMT. */
+
+inline tree
+gimple_omp_parallel_data_arg (const gomp_parallel *omp_parallel_stmt)
+{
+ return omp_parallel_stmt->data_arg;
+}
+
+
+/* Return a pointer to the data argument for OMP_PARALLEL_STMT. */
+
+inline tree *
+gimple_omp_parallel_data_arg_ptr (gomp_parallel *omp_parallel_stmt)
+{
+ return &omp_parallel_stmt->data_arg;
+}
+
+
+/* Set DATA_ARG to be the data argument for OMP_PARALLEL_STMT. */
+
+inline void
+gimple_omp_parallel_set_data_arg (gomp_parallel *omp_parallel_stmt,
+ tree data_arg)
+{
+ omp_parallel_stmt->data_arg = data_arg;
+}
+
+/* Return the clauses associated with OMP_TASK GS. */
+
+inline tree
+gimple_omp_task_clauses (const gimple *gs)
+{
+ const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
+ return omp_task_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_TASK GS. */
+
+inline tree *
+gimple_omp_task_clauses_ptr (gimple *gs)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ return &omp_task_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the list of clauses associated with OMP_TASK
+ GS. */
+
+inline void
+gimple_omp_task_set_clauses (gimple *gs, tree clauses)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ omp_task_stmt->clauses = clauses;
+}
+
+
+/* Return true if OMP task statement G has the
+ GF_OMP_TASK_TASKLOOP flag set. */
+
+inline bool
+gimple_omp_task_taskloop_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
+ return (gimple_omp_subcode (g) & GF_OMP_TASK_TASKLOOP) != 0;
+}
+
+
+/* Set the GF_OMP_TASK_TASKLOOP field in G depending on the boolean
+ value of TASKLOOP_P. */
+
+inline void
+gimple_omp_task_set_taskloop_p (gimple *g, bool taskloop_p)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
+ if (taskloop_p)
+ g->subcode |= GF_OMP_TASK_TASKLOOP;
+ else
+ g->subcode &= ~GF_OMP_TASK_TASKLOOP;
+}
+
+
+/* Return true if OMP task statement G has the
+ GF_OMP_TASK_TASKWAIT flag set. */
+
+inline bool
+gimple_omp_task_taskwait_p (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
+ return (gimple_omp_subcode (g) & GF_OMP_TASK_TASKWAIT) != 0;
+}
+
+
+/* Set the GF_OMP_TASK_TASKWAIT field in G depending on the boolean
+ value of TASKWAIT_P. */
+
+inline void
+gimple_omp_task_set_taskwait_p (gimple *g, bool taskwait_p)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
+ if (taskwait_p)
+ g->subcode |= GF_OMP_TASK_TASKWAIT;
+ else
+ g->subcode &= ~GF_OMP_TASK_TASKWAIT;
+}
+
+
+/* Return the child function used to hold the body of OMP_TASK GS. */
+
+inline tree
+gimple_omp_task_child_fn (const gimple *gs)
+{
+ const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
+ return omp_task_stmt->child_fn;
+}
+
+/* Return a pointer to the child function used to hold the body of
+ OMP_TASK GS. */
+
+inline tree *
+gimple_omp_task_child_fn_ptr (gimple *gs)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ return &omp_task_stmt->child_fn;
+}
+
+
+/* Set CHILD_FN to be the child function for OMP_TASK GS. */
+
+inline void
+gimple_omp_task_set_child_fn (gimple *gs, tree child_fn)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ omp_task_stmt->child_fn = child_fn;
+}
+
+
+/* Return the artificial argument used to send variables and values
+ from the parent to the children threads in OMP_TASK GS. */
+
+inline tree
+gimple_omp_task_data_arg (const gimple *gs)
+{
+ const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
+ return omp_task_stmt->data_arg;
+}
+
+
+/* Return a pointer to the data argument for OMP_TASK GS. */
+
+inline tree *
+gimple_omp_task_data_arg_ptr (gimple *gs)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ return &omp_task_stmt->data_arg;
+}
+
+
+/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
+
+inline void
+gimple_omp_task_set_data_arg (gimple *gs, tree data_arg)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ omp_task_stmt->data_arg = data_arg;
+}
+
+
+/* Return the clauses associated with OMP_TASK GS. */
+
+inline tree
+gimple_omp_taskreg_clauses (const gimple *gs)
+{
+ const gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <const gimple_statement_omp_taskreg *> (gs);
+ return omp_taskreg_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_TASK GS. */
+
+inline tree *
+gimple_omp_taskreg_clauses_ptr (gimple *gs)
+{
+ gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <gimple_statement_omp_taskreg *> (gs);
+ return &omp_taskreg_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the list of clauses associated with OMP_TASK
+ GS. */
+
+inline void
+gimple_omp_taskreg_set_clauses (gimple *gs, tree clauses)
+{
+ gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <gimple_statement_omp_taskreg *> (gs);
+ omp_taskreg_stmt->clauses = clauses;
+}
+
+
+/* Return the child function used to hold the body of OMP_TASK GS. */
+
+inline tree
+gimple_omp_taskreg_child_fn (const gimple *gs)
+{
+ const gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <const gimple_statement_omp_taskreg *> (gs);
+ return omp_taskreg_stmt->child_fn;
+}
+
+/* Return a pointer to the child function used to hold the body of
+ OMP_TASK GS. */
+
+inline tree *
+gimple_omp_taskreg_child_fn_ptr (gimple *gs)
+{
+ gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <gimple_statement_omp_taskreg *> (gs);
+ return &omp_taskreg_stmt->child_fn;
+}
+
+
+/* Set CHILD_FN to be the child function for OMP_TASK GS. */
+
+inline void
+gimple_omp_taskreg_set_child_fn (gimple *gs, tree child_fn)
+{
+ gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <gimple_statement_omp_taskreg *> (gs);
+ omp_taskreg_stmt->child_fn = child_fn;
+}
+
+
+/* Return the artificial argument used to send variables and values
+ from the parent to the children threads in OMP_TASK GS. */
+
+inline tree
+gimple_omp_taskreg_data_arg (const gimple *gs)
+{
+ const gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <const gimple_statement_omp_taskreg *> (gs);
+ return omp_taskreg_stmt->data_arg;
+}
+
+
+/* Return a pointer to the data argument for OMP_TASK GS. */
+
+inline tree *
+gimple_omp_taskreg_data_arg_ptr (gimple *gs)
+{
+ gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <gimple_statement_omp_taskreg *> (gs);
+ return &omp_taskreg_stmt->data_arg;
+}
+
+
+/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
+
+inline void
+gimple_omp_taskreg_set_data_arg (gimple *gs, tree data_arg)
+{
+ gimple_statement_omp_taskreg *omp_taskreg_stmt
+ = as_a <gimple_statement_omp_taskreg *> (gs);
+ omp_taskreg_stmt->data_arg = data_arg;
+}
+
+
+/* Return the copy function used to hold the body of OMP_TASK GS. */
+
+inline tree
+gimple_omp_task_copy_fn (const gimple *gs)
+{
+ const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
+ return omp_task_stmt->copy_fn;
+}
+
+/* Return a pointer to the copy function used to hold the body of
+ OMP_TASK GS. */
+
+inline tree *
+gimple_omp_task_copy_fn_ptr (gimple *gs)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ return &omp_task_stmt->copy_fn;
+}
+
+
+/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
+
+inline void
+gimple_omp_task_set_copy_fn (gimple *gs, tree copy_fn)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ omp_task_stmt->copy_fn = copy_fn;
+}
+
+
+/* Return size of the data block in bytes in OMP_TASK GS. */
+
+inline tree
+gimple_omp_task_arg_size (const gimple *gs)
+{
+ const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
+ return omp_task_stmt->arg_size;
+}
+
+
+/* Return a pointer to the data block size for OMP_TASK GS. */
+
+inline tree *
+gimple_omp_task_arg_size_ptr (gimple *gs)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ return &omp_task_stmt->arg_size;
+}
+
+
+/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
+
+inline void
+gimple_omp_task_set_arg_size (gimple *gs, tree arg_size)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ omp_task_stmt->arg_size = arg_size;
+}
+
+
+/* Return align of the data block in bytes in OMP_TASK GS. */
+
+inline tree
+gimple_omp_task_arg_align (const gimple *gs)
+{
+ const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
+ return omp_task_stmt->arg_align;
+}
+
+
+/* Return a pointer to the data block align for OMP_TASK GS. */
+
+inline tree *
+gimple_omp_task_arg_align_ptr (gimple *gs)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ return &omp_task_stmt->arg_align;
+}
+
+
+/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
+
+inline void
+gimple_omp_task_set_arg_align (gimple *gs, tree arg_align)
+{
+ gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
+ omp_task_stmt->arg_align = arg_align;
+}
+
+
+/* Return the clauses associated with OMP_SINGLE GS. */
+
+inline tree
+gimple_omp_single_clauses (const gimple *gs)
+{
+ const gomp_single *omp_single_stmt = as_a <const gomp_single *> (gs);
+ return omp_single_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
+
+inline tree *
+gimple_omp_single_clauses_ptr (gimple *gs)
+{
+ gomp_single *omp_single_stmt = as_a <gomp_single *> (gs);
+ return &omp_single_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP_SINGLE_STMT. */
+
+inline void
+gimple_omp_single_set_clauses (gomp_single *omp_single_stmt, tree clauses)
+{
+ omp_single_stmt->clauses = clauses;
+}
+
+
+/* Return the clauses associated with OMP_TARGET GS. */
+
+inline tree
+gimple_omp_target_clauses (const gimple *gs)
+{
+ const gomp_target *omp_target_stmt = as_a <const gomp_target *> (gs);
+ return omp_target_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_TARGET GS. */
+
+inline tree *
+gimple_omp_target_clauses_ptr (gimple *gs)
+{
+ gomp_target *omp_target_stmt = as_a <gomp_target *> (gs);
+ return &omp_target_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP_TARGET_STMT. */
+
+inline void
+gimple_omp_target_set_clauses (gomp_target *omp_target_stmt,
+ tree clauses)
+{
+ omp_target_stmt->clauses = clauses;
+}
+
+
+/* Return the kind of the OMP_TARGET G. */
+
+inline int
+gimple_omp_target_kind (const gimple *g)
+{
+ GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
+ return (gimple_omp_subcode (g) & GF_OMP_TARGET_KIND_MASK);
+}
+
+
+/* Set the kind of the OMP_TARGET G. */
+
+inline void
+gimple_omp_target_set_kind (gomp_target *g, int kind)
+{
+ g->subcode = (g->subcode & ~GF_OMP_TARGET_KIND_MASK)
+ | (kind & GF_OMP_TARGET_KIND_MASK);
+}
+
+
+/* Return the child function used to hold the body of OMP_TARGET_STMT. */
+
+inline tree
+gimple_omp_target_child_fn (const gomp_target *omp_target_stmt)
+{
+ return omp_target_stmt->child_fn;
+}
+
+/* Return a pointer to the child function used to hold the body of
+ OMP_TARGET_STMT. */
+
+inline tree *
+gimple_omp_target_child_fn_ptr (gomp_target *omp_target_stmt)
+{
+ return &omp_target_stmt->child_fn;
+}
+
+
+/* Set CHILD_FN to be the child function for OMP_TARGET_STMT. */
+
+inline void
+gimple_omp_target_set_child_fn (gomp_target *omp_target_stmt,
+ tree child_fn)
+{
+ omp_target_stmt->child_fn = child_fn;
+}
+
+
+/* Return the artificial argument used to send variables and values
+ from the parent to the children threads in OMP_TARGET_STMT. */
+
+inline tree
+gimple_omp_target_data_arg (const gomp_target *omp_target_stmt)
+{
+ return omp_target_stmt->data_arg;
+}
+
+
+/* Return a pointer to the data argument for OMP_TARGET GS. */
+
+inline tree *
+gimple_omp_target_data_arg_ptr (gomp_target *omp_target_stmt)
+{
+ return &omp_target_stmt->data_arg;
+}
+
+
+/* Set DATA_ARG to be the data argument for OMP_TARGET_STMT. */
+
+inline void
+gimple_omp_target_set_data_arg (gomp_target *omp_target_stmt,
+ tree data_arg)
+{
+ omp_target_stmt->data_arg = data_arg;
+}
+
+
+/* Return the clauses associated with OMP_TEAMS GS. */
+
+inline tree
+gimple_omp_teams_clauses (const gimple *gs)
+{
+ const gomp_teams *omp_teams_stmt = as_a <const gomp_teams *> (gs);
+ return omp_teams_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_TEAMS GS. */
+
+inline tree *
+gimple_omp_teams_clauses_ptr (gimple *gs)
+{
+ gomp_teams *omp_teams_stmt = as_a <gomp_teams *> (gs);
+ return &omp_teams_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP_TEAMS_STMT. */
+
+inline void
+gimple_omp_teams_set_clauses (gomp_teams *omp_teams_stmt, tree clauses)
+{
+ omp_teams_stmt->clauses = clauses;
+}
+
+/* Return the child function used to hold the body of OMP_TEAMS_STMT. */
+
+inline tree
+gimple_omp_teams_child_fn (const gomp_teams *omp_teams_stmt)
+{
+ return omp_teams_stmt->child_fn;
+}
+
+/* Return a pointer to the child function used to hold the body of
+ OMP_TEAMS_STMT. */
+
+inline tree *
+gimple_omp_teams_child_fn_ptr (gomp_teams *omp_teams_stmt)
+{
+ return &omp_teams_stmt->child_fn;
+}
+
+
+/* Set CHILD_FN to be the child function for OMP_TEAMS_STMT. */
+
+inline void
+gimple_omp_teams_set_child_fn (gomp_teams *omp_teams_stmt, tree child_fn)
+{
+ omp_teams_stmt->child_fn = child_fn;
+}
+
+
+/* Return the artificial argument used to send variables and values
+ from the parent to the children threads in OMP_TEAMS_STMT. */
+
+inline tree
+gimple_omp_teams_data_arg (const gomp_teams *omp_teams_stmt)
+{
+ return omp_teams_stmt->data_arg;
+}
+
+
+/* Return a pointer to the data argument for OMP_TEAMS_STMT. */
+
+inline tree *
+gimple_omp_teams_data_arg_ptr (gomp_teams *omp_teams_stmt)
+{
+ return &omp_teams_stmt->data_arg;
+}
+
+
+/* Set DATA_ARG to be the data argument for OMP_TEAMS_STMT. */
+
+inline void
+gimple_omp_teams_set_data_arg (gomp_teams *omp_teams_stmt, tree data_arg)
+{
+ omp_teams_stmt->data_arg = data_arg;
+}
+
+/* Return the host flag of an OMP_TEAMS_STMT. */
+
+inline bool
+gimple_omp_teams_host (const gomp_teams *omp_teams_stmt)
+{
+ return (gimple_omp_subcode (omp_teams_stmt) & GF_OMP_TEAMS_HOST) != 0;
+}
+
+/* Set host flag of an OMP_TEAMS_STMT to VALUE. */
+
+inline void
+gimple_omp_teams_set_host (gomp_teams *omp_teams_stmt, bool value)
+{
+ if (value)
+ omp_teams_stmt->subcode |= GF_OMP_TEAMS_HOST;
+ else
+ omp_teams_stmt->subcode &= ~GF_OMP_TEAMS_HOST;
+}
+
+/* Return the clauses associated with OMP_SECTIONS GS. */
+
+inline tree
+gimple_omp_sections_clauses (const gimple *gs)
+{
+ const gomp_sections *omp_sections_stmt = as_a <const gomp_sections *> (gs);
+ return omp_sections_stmt->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
+
+inline tree *
+gimple_omp_sections_clauses_ptr (gimple *gs)
+{
+ gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
+ return &omp_sections_stmt->clauses;
+}
+
+
+/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
+ GS. */
+
+inline void
+gimple_omp_sections_set_clauses (gimple *gs, tree clauses)
+{
+ gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
+ omp_sections_stmt->clauses = clauses;
+}
+
+
+/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
+ in GS. */
+
+inline tree
+gimple_omp_sections_control (const gimple *gs)
+{
+ const gomp_sections *omp_sections_stmt = as_a <const gomp_sections *> (gs);
+ return omp_sections_stmt->control;
+}
+
+
+/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
+ GS. */
+
+inline tree *
+gimple_omp_sections_control_ptr (gimple *gs)
+{
+ gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
+ return &omp_sections_stmt->control;
+}
+
+
+/* Set CONTROL to be the set of clauses associated with the
+ GIMPLE_OMP_SECTIONS in GS. */
+
+inline void
+gimple_omp_sections_set_control (gimple *gs, tree control)
+{
+ gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
+ omp_sections_stmt->control = control;
+}
+
+
+/* Set the value being stored in an atomic store. */
+
+inline void
+gimple_omp_atomic_store_set_val (gomp_atomic_store *store_stmt, tree val)
+{
+ store_stmt->val = val;
+}
+
+
+/* Return the value being stored in an atomic store. */
+
+inline tree
+gimple_omp_atomic_store_val (const gomp_atomic_store *store_stmt)
+{
+ return store_stmt->val;
+}
+
+
+/* Return a pointer to the value being stored in an atomic store. */
+
+inline tree *
+gimple_omp_atomic_store_val_ptr (gomp_atomic_store *store_stmt)
+{
+ return &store_stmt->val;
+}
+
+
+/* Set the LHS of an atomic load. */
+
+inline void
+gimple_omp_atomic_load_set_lhs (gomp_atomic_load *load_stmt, tree lhs)
+{
+ load_stmt->lhs = lhs;
+}
+
+
+/* Get the LHS of an atomic load. */
+
+inline tree
+gimple_omp_atomic_load_lhs (const gomp_atomic_load *load_stmt)
+{
+ return load_stmt->lhs;
+}
+
+
+/* Return a pointer to the LHS of an atomic load. */
+
+inline tree *
+gimple_omp_atomic_load_lhs_ptr (gomp_atomic_load *load_stmt)
+{
+ return &load_stmt->lhs;
+}
+
+
+/* Set the RHS of an atomic load. */
+
+inline void
+gimple_omp_atomic_load_set_rhs (gomp_atomic_load *load_stmt, tree rhs)
+{
+ load_stmt->rhs = rhs;
+}
+
+
+/* Get the RHS of an atomic load. */
+
+inline tree
+gimple_omp_atomic_load_rhs (const gomp_atomic_load *load_stmt)
+{
+ return load_stmt->rhs;
+}
+
+
+/* Return a pointer to the RHS of an atomic load. */
+
+inline tree *
+gimple_omp_atomic_load_rhs_ptr (gomp_atomic_load *load_stmt)
+{
+ return &load_stmt->rhs;
+}
+
+
+/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
+
+inline tree
+gimple_omp_continue_control_def (const gomp_continue *cont_stmt)
+{
+ return cont_stmt->control_def;
+}
+
+/* The same as above, but return the address. */
+
+inline tree *
+gimple_omp_continue_control_def_ptr (gomp_continue *cont_stmt)
+{
+ return &cont_stmt->control_def;
+}
+
+/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
+
+inline void
+gimple_omp_continue_set_control_def (gomp_continue *cont_stmt, tree def)
+{
+ cont_stmt->control_def = def;
+}
+
+
+/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
+
+inline tree
+gimple_omp_continue_control_use (const gomp_continue *cont_stmt)
+{
+ return cont_stmt->control_use;
+}
+
+
+/* The same as above, but return the address. */
+
+inline tree *
+gimple_omp_continue_control_use_ptr (gomp_continue *cont_stmt)
+{
+ return &cont_stmt->control_use;
+}
+
+
+/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
+
+inline void
+gimple_omp_continue_set_control_use (gomp_continue *cont_stmt, tree use)
+{
+ cont_stmt->control_use = use;
+}
+
+/* Return the guard associated with the GIMPLE_ASSUME statement GS. */
+
+inline tree
+gimple_assume_guard (const gimple *gs)
+{
+ const gimple_statement_assume *assume_stmt
+ = as_a <const gimple_statement_assume *> (gs);
+ return assume_stmt->guard;
+}
+
+/* Set the guard associated with the GIMPLE_ASSUME statement GS. */
+
+inline void
+gimple_assume_set_guard (gimple *gs, tree guard)
+{
+ gimple_statement_assume *assume_stmt = as_a <gimple_statement_assume *> (gs);
+ assume_stmt->guard = guard;
+}
+
+inline tree *
+gimple_assume_guard_ptr (gimple *gs)
+{
+ gimple_statement_assume *assume_stmt = as_a <gimple_statement_assume *> (gs);
+ return &assume_stmt->guard;
+}
+
+/* Return the address of the GIMPLE sequence contained in the GIMPLE_ASSUME
+ statement GS. */
+
+inline gimple_seq *
+gimple_assume_body_ptr (gimple *gs)
+{
+ gimple_statement_assume *assume_stmt = as_a <gimple_statement_assume *> (gs);
+ return &assume_stmt->body;
+}
+
+/* Return the GIMPLE sequence contained in the GIMPLE_ASSUME statement GS. */
+
+inline gimple_seq
+gimple_assume_body (const gimple *gs)
+{
+ const gimple_statement_assume *assume_stmt
+ = as_a <const gimple_statement_assume *> (gs);
+ return assume_stmt->body;
+}
+
+/* Return a pointer to the body for the GIMPLE_TRANSACTION statement
+ TRANSACTION_STMT. */
+
+inline gimple_seq *
+gimple_transaction_body_ptr (gtransaction *transaction_stmt)
+{
+ return &transaction_stmt->body;
+}
+
+/* Return the body for the GIMPLE_TRANSACTION statement TRANSACTION_STMT. */
+
+inline gimple_seq
+gimple_transaction_body (const gtransaction *transaction_stmt)
+{
+ return transaction_stmt->body;
+}
+
+/* Return the label associated with a GIMPLE_TRANSACTION. */
+
+inline tree
+gimple_transaction_label_norm (const gtransaction *transaction_stmt)
+{
+ return transaction_stmt->label_norm;
+}
+
+inline tree *
+gimple_transaction_label_norm_ptr (gtransaction *transaction_stmt)
+{
+ return &transaction_stmt->label_norm;
+}
+
+inline tree
+gimple_transaction_label_uninst (const gtransaction *transaction_stmt)
+{
+ return transaction_stmt->label_uninst;
+}
+
+inline tree *
+gimple_transaction_label_uninst_ptr (gtransaction *transaction_stmt)
+{
+ return &transaction_stmt->label_uninst;
+}
+
+inline tree
+gimple_transaction_label_over (const gtransaction *transaction_stmt)
+{
+ return transaction_stmt->label_over;
+}
+
+inline tree *
+gimple_transaction_label_over_ptr (gtransaction *transaction_stmt)
+{
+ return &transaction_stmt->label_over;
+}
+
+/* Return the subcode associated with a GIMPLE_TRANSACTION. */
+
+inline unsigned int
+gimple_transaction_subcode (const gtransaction *transaction_stmt)
+{
+ return transaction_stmt->subcode;
+}
+
+/* Set BODY to be the body for the GIMPLE_TRANSACTION statement
+ TRANSACTION_STMT. */
+
+inline void
+gimple_transaction_set_body (gtransaction *transaction_stmt,
+ gimple_seq body)
+{
+ transaction_stmt->body = body;
+}
+
+/* Set the label associated with a GIMPLE_TRANSACTION. */
+
+inline void
+gimple_transaction_set_label_norm (gtransaction *transaction_stmt, tree label)
+{
+ transaction_stmt->label_norm = label;
+}
+
+inline void
+gimple_transaction_set_label_uninst (gtransaction *transaction_stmt, tree label)
+{
+ transaction_stmt->label_uninst = label;
+}
+
+inline void
+gimple_transaction_set_label_over (gtransaction *transaction_stmt, tree label)
+{
+ transaction_stmt->label_over = label;
+}
+
+/* Set the subcode associated with a GIMPLE_TRANSACTION. */
+
+inline void
+gimple_transaction_set_subcode (gtransaction *transaction_stmt,
+ unsigned int subcode)
+{
+ transaction_stmt->subcode = subcode;
+}
+
+/* Return a pointer to the return value for GIMPLE_RETURN GS. */
+
+inline tree *
+gimple_return_retval_ptr (greturn *gs)
+{
+ return &gs->op[0];
+}
+
+/* Return the return value for GIMPLE_RETURN GS. */
+
+inline tree
+gimple_return_retval (const greturn *gs)
+{
+ return gs->op[0];
+}
+
+
+/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
+
+inline void
+gimple_return_set_retval (greturn *gs, tree retval)
+{
+ gs->op[0] = retval;
+}
+
+
+/* Returns true when the gimple statement STMT is any of the OMP types. */
+
+#define CASE_GIMPLE_OMP \
+ case GIMPLE_OMP_PARALLEL: \
+ case GIMPLE_OMP_TASK: \
+ case GIMPLE_OMP_FOR: \
+ case GIMPLE_OMP_SECTIONS: \
+ case GIMPLE_OMP_SECTIONS_SWITCH: \
+ case GIMPLE_OMP_SINGLE: \
+ case GIMPLE_OMP_TARGET: \
+ case GIMPLE_OMP_TEAMS: \
+ case GIMPLE_OMP_SCOPE: \
+ case GIMPLE_OMP_SECTION: \
+ case GIMPLE_OMP_MASTER: \
+ case GIMPLE_OMP_MASKED: \
+ case GIMPLE_OMP_TASKGROUP: \
+ case GIMPLE_OMP_ORDERED: \
+ case GIMPLE_OMP_CRITICAL: \
+ case GIMPLE_OMP_SCAN: \
+ case GIMPLE_OMP_RETURN: \
+ case GIMPLE_OMP_ATOMIC_LOAD: \
+ case GIMPLE_OMP_ATOMIC_STORE: \
+ case GIMPLE_OMP_CONTINUE
+
+inline bool
+is_gimple_omp (const gimple *stmt)
+{
+ switch (gimple_code (stmt))
+ {
+ CASE_GIMPLE_OMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Return true if the OMP gimple statement STMT is any of the OpenACC types
+ specifically. */
+
+inline bool
+is_gimple_omp_oacc (const gimple *stmt)
+{
+ gcc_assert (is_gimple_omp (stmt));
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_OMP_ATOMIC_LOAD:
+ case GIMPLE_OMP_ATOMIC_STORE:
+ case GIMPLE_OMP_CONTINUE:
+ case GIMPLE_OMP_RETURN:
+ /* Codes shared between OpenACC and OpenMP cannot be used to disambiguate
+ the two. */
+ gcc_unreachable ();
+
+ case GIMPLE_OMP_FOR:
+ switch (gimple_omp_for_kind (stmt))
+ {
+ case GF_OMP_FOR_KIND_OACC_LOOP:
+ return true;
+ default:
+ return false;
+ }
+ case GIMPLE_OMP_TARGET:
+ switch (gimple_omp_target_kind (stmt))
+ {
+ case GF_OMP_TARGET_KIND_OACC_PARALLEL:
+ case GF_OMP_TARGET_KIND_OACC_KERNELS:
+ case GF_OMP_TARGET_KIND_OACC_SERIAL:
+ case GF_OMP_TARGET_KIND_OACC_DATA:
+ case GF_OMP_TARGET_KIND_OACC_UPDATE:
+ case GF_OMP_TARGET_KIND_OACC_ENTER_DATA:
+ case GF_OMP_TARGET_KIND_OACC_EXIT_DATA:
+ case GF_OMP_TARGET_KIND_OACC_DECLARE:
+ case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
+ case GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_PARALLELIZED:
+ case GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_GANG_SINGLE:
+ case GF_OMP_TARGET_KIND_OACC_DATA_KERNELS:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+
+/* Return true if the OMP gimple statement STMT is offloaded. */
+
+inline bool
+is_gimple_omp_offloaded (const gimple *stmt)
+{
+ gcc_assert (is_gimple_omp (stmt));
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_OMP_TARGET:
+ switch (gimple_omp_target_kind (stmt))
+ {
+ case GF_OMP_TARGET_KIND_REGION:
+ case GF_OMP_TARGET_KIND_OACC_PARALLEL:
+ case GF_OMP_TARGET_KIND_OACC_KERNELS:
+ case GF_OMP_TARGET_KIND_OACC_SERIAL:
+ case GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_PARALLELIZED:
+ case GF_OMP_TARGET_KIND_OACC_PARALLEL_KERNELS_GANG_SINGLE:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+
+/* Returns TRUE if statement G is a GIMPLE_NOP. */
+
+inline bool
+gimple_nop_p (const gimple *g)
+{
+ return gimple_code (g) == GIMPLE_NOP;
+}
+
+
+/* Return true if GS is a GIMPLE_RESX. */
+
+inline bool
+is_gimple_resx (const gimple *gs)
+{
+ return gimple_code (gs) == GIMPLE_RESX;
+}
+
+
+/* Enum and arrays used for allocation stats. Keep in sync with
+ gimple.cc:gimple_alloc_kind_names. */
+enum gimple_alloc_kind
+{
+ gimple_alloc_kind_assign, /* Assignments. */
+ gimple_alloc_kind_phi, /* PHI nodes. */
+ gimple_alloc_kind_cond, /* Conditionals. */
+ gimple_alloc_kind_rest, /* Everything else. */
+ gimple_alloc_kind_all
+};
+
+extern uint64_t gimple_alloc_counts[];
+extern uint64_t gimple_alloc_sizes[];
+
+/* Return the allocation kind for a given stmt CODE. */
+inline enum gimple_alloc_kind
+gimple_alloc_kind (enum gimple_code code)
+{
+ switch (code)
+ {
+ case GIMPLE_ASSIGN:
+ return gimple_alloc_kind_assign;
+ case GIMPLE_PHI:
+ return gimple_alloc_kind_phi;
+ case GIMPLE_COND:
+ return gimple_alloc_kind_cond;
+ default:
+ return gimple_alloc_kind_rest;
+ }
+}
+
+/* Return true if a location should not be emitted for this statement
+ by annotate_all_with_location. */
+
+inline bool
+gimple_do_not_emit_location_p (gimple *g)
+{
+ return gimple_plf (g, GF_PLF_1);
+}
+
+/* Mark statement G so a location will not be emitted by
+ annotate_one_with_location. */
+
+inline void
+gimple_set_do_not_emit_location (gimple *g)
+{
+ /* The PLF flags are initialized to 0 when a new tuple is created,
+ so no need to initialize it anywhere. */
+ gimple_set_plf (g, GF_PLF_1, true);
+}
+
+#endif /* GCC_GIMPLE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify-me.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify-me.h
new file mode 100644
index 0000000..50b4802
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify-me.h
@@ -0,0 +1,37 @@
+/* Header file for middle end gimplification.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLIFY_ME_H
+#define GCC_GIMPLIFY_ME_H
+
+/* Validation of GIMPLE expressions. Note that these predicates only check
+ * the basic form of the expression, they don't recurse to make sure that
+ * underlying nodes are also of the right form. */
+typedef bool (*gimple_predicate)(tree);
+
+extern tree force_gimple_operand_1 (tree, gimple_seq *, gimple_predicate, tree);
+extern tree force_gimple_operand (tree, gimple_seq *, bool, tree);
+extern tree force_gimple_operand_gsi_1 (gimple_stmt_iterator *, tree,
+ gimple_predicate, tree,
+ bool, enum gsi_iterator_update);
+extern tree force_gimple_operand_gsi (gimple_stmt_iterator *, tree, bool, tree,
+ bool, enum gsi_iterator_update);
+extern void gimple_regimplify_operands (gimple *, gimple_stmt_iterator *);
+
+#endif /* GCC_GIMPLIFY_ME_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify.h
new file mode 100644
index 0000000..f4a3eea
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gimplify.h
@@ -0,0 +1,92 @@
+/* Header file for gimplification.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GIMPLIFY_H
+#define GCC_GIMPLIFY_H
+
+/* Validation of GIMPLE expressions. Note that these predicates only check
+ the basic form of the expression, they don't recurse to make sure that
+ underlying nodes are also of the right form. */
+typedef bool (*gimple_predicate)(tree);
+
+/* FIXME we should deduce this from the predicate. */
+enum fallback {
+ fb_none = 0, /* Do not generate a temporary. */
+
+ fb_rvalue = 1, /* Generate an rvalue to hold the result of a
+ gimplified expression. */
+
+ fb_lvalue = 2, /* Generate an lvalue to hold the result of a
+ gimplified expression. */
+
+ fb_mayfail = 4, /* Gimplification may fail. Error issued
+ afterwards. */
+ fb_either= fb_rvalue | fb_lvalue
+};
+
+typedef int fallback_t;
+
+enum gimplify_status {
+ GS_ERROR = -2, /* Something Bad Seen. */
+ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
+ GS_OK = 0, /* We did something, maybe more to do. */
+ GS_ALL_DONE = 1 /* The expression is fully gimplified. */
+};
+
+extern void free_gimplify_stack (void);
+extern void push_gimplify_context (bool in_ssa = false,
+ bool rhs_cond_ok = false);
+extern void pop_gimplify_context (gimple *);
+extern gbind *gimple_current_bind_expr (void);
+extern vec<gbind *> gimple_bind_expr_stack (void);
+extern void gimplify_and_add (tree, gimple_seq *);
+extern tree get_formal_tmp_var (tree, gimple_seq *);
+extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq * = NULL,
+ bool = true);
+extern void declare_vars (tree, gimple *, bool);
+extern void gimple_add_tmp_var (tree);
+extern void gimple_add_tmp_var_fn (struct function *, tree);
+extern void copy_if_shared (tree *, void * = NULL);
+extern tree unshare_expr (tree);
+extern tree unshare_expr_without_location (tree);
+extern tree voidify_wrapper_expr (tree, tree);
+extern tree build_and_jump (tree *);
+extern enum gimplify_status gimplify_self_mod_expr (tree *, gimple_seq *,
+ gimple_seq *, bool, tree);
+extern tree gimple_boolify (tree);
+extern gimple_predicate rhs_predicate_for (tree);
+extern bool gimplify_stmt (tree *, gimple_seq *);
+extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
+extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
+ bool (*) (tree), fallback_t);
+
+int omp_construct_selector_matches (enum tree_code *, int, int *);
+
+extern void gimplify_type_sizes (tree, gimple_seq *);
+extern void gimplify_one_sizepos (tree *, gimple_seq *);
+extern gbind *gimplify_body (tree, bool);
+extern enum gimplify_status gimplify_arg (tree *, gimple_seq *, location_t,
+ bool = true);
+extern void gimplify_function_tree (tree);
+extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
+ gimple_seq *);
+extern bool generic_expr_could_trap_p (tree expr);
+gimple *gimplify_assign (tree, tree, gimple_seq *);
+
+#endif /* GCC_GIMPLIFY_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/glimits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/glimits.h
new file mode 100644
index 0000000..5aadbaa
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/glimits.h
@@ -0,0 +1,163 @@
+/* Copyright (C) 1991-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _LIMITS_H___
+#define _LIMITS_H___
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT __CHAR_BIT__
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#undef SCHAR_MAX
+#define SCHAR_MAX __SCHAR_MAX__
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#if __SCHAR_MAX__ == __INT_MAX__
+# define UCHAR_MAX (SCHAR_MAX * 2U + 1U)
+#else
+# define UCHAR_MAX (SCHAR_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+# undef CHAR_MIN
+# if __SCHAR_MAX__ == __INT_MAX__
+# define CHAR_MIN 0U
+# else
+# define CHAR_MIN 0
+# endif
+# undef CHAR_MAX
+# define CHAR_MAX UCHAR_MAX
+#else
+# undef CHAR_MIN
+# define CHAR_MIN SCHAR_MIN
+# undef CHAR_MAX
+# define CHAR_MAX SCHAR_MAX
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+#define SHRT_MIN (-SHRT_MAX - 1)
+#undef SHRT_MAX
+#define SHRT_MAX __SHRT_MAX__
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#if __SHRT_MAX__ == __INT_MAX__
+# define USHRT_MAX (SHRT_MAX * 2U + 1U)
+#else
+# define USHRT_MAX (SHRT_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `signed int' can hold. */
+#undef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1U)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX - 1L)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1UL)
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LLONG_MIN
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+# undef LLONG_MAX
+# define LLONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULLONG_MAX
+# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LONG_LONG_MIN
+# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL)
+# undef LONG_LONG_MAX
+# define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULONG_LONG_MAX
+# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if (defined __STDC_WANT_IEC_60559_BFP_EXT__ \
+ || (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L))
+/* TS 18661-1 / C2X widths of integer types. */
+# undef CHAR_WIDTH
+# define CHAR_WIDTH __SCHAR_WIDTH__
+# undef SCHAR_WIDTH
+# define SCHAR_WIDTH __SCHAR_WIDTH__
+# undef UCHAR_WIDTH
+# define UCHAR_WIDTH __SCHAR_WIDTH__
+# undef SHRT_WIDTH
+# define SHRT_WIDTH __SHRT_WIDTH__
+# undef USHRT_WIDTH
+# define USHRT_WIDTH __SHRT_WIDTH__
+# undef INT_WIDTH
+# define INT_WIDTH __INT_WIDTH__
+# undef UINT_WIDTH
+# define UINT_WIDTH __INT_WIDTH__
+# undef LONG_WIDTH
+# define LONG_WIDTH __LONG_WIDTH__
+# undef ULONG_WIDTH
+# define ULONG_WIDTH __LONG_WIDTH__
+# undef LLONG_WIDTH
+# define LLONG_WIDTH __LONG_LONG_WIDTH__
+# undef ULLONG_WIDTH
+# define ULLONG_WIDTH __LONG_LONG_WIDTH__
+#endif
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L
+/* C2X width and limit of _Bool. */
+# undef BOOL_MAX
+# define BOOL_MAX 1
+# undef BOOL_WIDTH
+# define BOOL_WIDTH 1
+
+# define __STDC_VERSION_LIMITS_H__ 202311L
+#endif
+
+#endif /* _LIMITS_H___ */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gomp-constants.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gomp-constants.h
new file mode 100644
index 0000000..1b9b07d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gomp-constants.h
@@ -0,0 +1,354 @@
+/* Communication between GCC and libgomp.
+
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+ Contributed by Mentor Embedded.
+
+ This file is part of the GNU Offloading and Multi Processing Library
+ (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GOMP_CONSTANTS_H
+#define GOMP_CONSTANTS_H 1
+
+/* Memory mapping types. */
+
+/* One byte. */
+#define GOMP_MAP_LAST (1 << 8)
+
+#define GOMP_MAP_FLAG_TO (1 << 0)
+#define GOMP_MAP_FLAG_FROM (1 << 1)
+/* Special map kinds, enumerated starting here. */
+#define GOMP_MAP_FLAG_SPECIAL_0 (1 << 2)
+#define GOMP_MAP_FLAG_SPECIAL_1 (1 << 3)
+#define GOMP_MAP_FLAG_SPECIAL_2 (1 << 4)
+#define GOMP_MAP_FLAG_SPECIAL_3 (1 << 5)
+#define GOMP_MAP_FLAG_SPECIAL_4 (1 << 6)
+#define GOMP_MAP_FLAG_SPECIAL (GOMP_MAP_FLAG_SPECIAL_1 \
+ | GOMP_MAP_FLAG_SPECIAL_0)
+#define GOMP_MAP_DEEP_COPY (GOMP_MAP_FLAG_SPECIAL_4 \
+ | GOMP_MAP_FLAG_SPECIAL_2)
+/* This value indicates the map was created implicitly according to
+ OpenMP rules. */
+#define GOMP_MAP_IMPLICIT (GOMP_MAP_FLAG_SPECIAL_3 \
+ | GOMP_MAP_FLAG_SPECIAL_4)
+/* Mask for entire set of special map kind bits. */
+#define GOMP_MAP_FLAG_SPECIAL_BITS (GOMP_MAP_FLAG_SPECIAL_0 \
+ | GOMP_MAP_FLAG_SPECIAL_1 \
+ | GOMP_MAP_FLAG_SPECIAL_2 \
+ | GOMP_MAP_FLAG_SPECIAL_3 \
+ | GOMP_MAP_FLAG_SPECIAL_4)
+/* Flag to force a specific behavior (or else, trigger a run-time error). */
+#define GOMP_MAP_FLAG_FORCE (1 << 7)
+
+enum gomp_map_kind
+ {
+ /* If not already present, allocate. */
+ GOMP_MAP_ALLOC = 0,
+ /* ..., and copy to device. */
+ GOMP_MAP_TO = (GOMP_MAP_ALLOC | GOMP_MAP_FLAG_TO),
+ /* ..., and copy from device. */
+ GOMP_MAP_FROM = (GOMP_MAP_ALLOC | GOMP_MAP_FLAG_FROM),
+ /* ..., and copy to and from device. */
+ GOMP_MAP_TOFROM = (GOMP_MAP_TO | GOMP_MAP_FROM),
+ /* The following kind is an internal only map kind, used for pointer based
+ array sections. OMP_CLAUSE_SIZE for these is not the pointer size,
+ which is implicitly POINTER_SIZE_UNITS, but the bias. */
+ GOMP_MAP_POINTER = (GOMP_MAP_FLAG_SPECIAL_0 | 0),
+ /* Also internal, behaves like GOMP_MAP_TO, but additionally any
+ GOMP_MAP_POINTER records consecutive after it which have addresses
+ falling into that range will not be ignored if GOMP_MAP_TO_PSET wasn't
+ mapped already.
+ For OpenACC attach operations (e.g. copyin of struct members),
+ GOMP_MAP_TO_PSET is followed by a single GOMP_MAP_ATTACH mapping
+ instead. */
+ GOMP_MAP_TO_PSET = (GOMP_MAP_FLAG_SPECIAL_0 | 1),
+ /* Must already be present. */
+ GOMP_MAP_FORCE_PRESENT = (GOMP_MAP_FLAG_SPECIAL_0 | 2),
+ /* Deallocate a mapping, without copying from device. */
+ GOMP_MAP_DELETE = (GOMP_MAP_FLAG_SPECIAL_0 | 3),
+ /* Is a device pointer. OMP_CLAUSE_SIZE for these is unused; is implicitly
+ POINTER_SIZE_UNITS. */
+ GOMP_MAP_FORCE_DEVICEPTR = (GOMP_MAP_FLAG_SPECIAL_1 | 0),
+ /* OpenACC device_resident. */
+ GOMP_MAP_DEVICE_RESIDENT = (GOMP_MAP_FLAG_SPECIAL_1 | 1),
+ /* OpenACC link. */
+ GOMP_MAP_LINK = (GOMP_MAP_FLAG_SPECIAL_1 | 2),
+ /* Use device data if present, fall back to host address otherwise. */
+ GOMP_MAP_IF_PRESENT = (GOMP_MAP_FLAG_SPECIAL_1 | 3),
+ /* Do not map, copy bits for firstprivate instead. */
+ GOMP_MAP_FIRSTPRIVATE = (GOMP_MAP_FLAG_SPECIAL | 0),
+ /* Similarly, but store the value in the pointer rather than
+ pointed by the pointer. */
+ GOMP_MAP_FIRSTPRIVATE_INT = (GOMP_MAP_FLAG_SPECIAL | 1),
+ /* Pointer translate host address into device address and copy that
+ back to host. */
+ GOMP_MAP_USE_DEVICE_PTR = (GOMP_MAP_FLAG_SPECIAL | 2),
+ /* Allocate a zero length array section. Prefer next non-zero length
+ mapping over previous non-zero length mapping over zero length mapping
+ at the address. If not already mapped, do nothing (and pointer translate
+ to NULL). */
+ GOMP_MAP_ZERO_LEN_ARRAY_SECTION = (GOMP_MAP_FLAG_SPECIAL | 3),
+ /* Allocate. */
+ GOMP_MAP_FORCE_ALLOC = (GOMP_MAP_FLAG_FORCE | GOMP_MAP_ALLOC),
+ /* ..., and copy to device. */
+ GOMP_MAP_FORCE_TO = (GOMP_MAP_FLAG_FORCE | GOMP_MAP_TO),
+ /* ..., and copy from device. */
+ GOMP_MAP_FORCE_FROM = (GOMP_MAP_FLAG_FORCE | GOMP_MAP_FROM),
+ /* ..., and copy to and from device. */
+ GOMP_MAP_FORCE_TOFROM = (GOMP_MAP_FLAG_FORCE | GOMP_MAP_TOFROM),
+ /* Like GOMP_MAP_USE_DEVICE_PTR above, translate a host to a device
+ address. If translation fails because the target is not mapped,
+ continue using the host address. */
+ GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT = (GOMP_MAP_FLAG_SPECIAL_2 | 0),
+ /* If not already present, allocate. And unconditionally copy to
+ device. */
+ GOMP_MAP_ALWAYS_TO = (GOMP_MAP_FLAG_SPECIAL_2 | GOMP_MAP_TO),
+ /* If not already present, allocate. And unconditionally copy from
+ device. */
+ GOMP_MAP_ALWAYS_FROM = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_FROM),
+ /* If not already present, allocate. And unconditionally copy to and from
+ device. */
+ GOMP_MAP_ALWAYS_TOFROM = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_TOFROM),
+ /* Map a sparse struct; the address is the base of the structure, alignment
+ it's required alignment, and size is the number of adjacent entries
+ that belong to the struct. The adjacent entries should be sorted by
+ increasing address, so it is easy to determine lowest needed address
+ (address of the first adjacent entry) and highest needed address
+ (address of the last adjacent entry plus its size). */
+ GOMP_MAP_STRUCT = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_FLAG_SPECIAL | 0),
+ /* On a location of a pointer/reference that is assumed to be already mapped
+ earlier, store the translated address of the preceeding mapping.
+ No refcount is bumped by this, and the store is done unconditionally. */
+ GOMP_MAP_ALWAYS_POINTER = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_FLAG_SPECIAL | 1),
+ /* Like GOMP_MAP_POINTER, but allow zero-length array section, i.e. set to
+ NULL if target is not mapped. */
+ GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION
+ = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_FLAG_SPECIAL | 2),
+ /* Forced deallocation of zero length array section. */
+ GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION
+ = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_FLAG_SPECIAL | 3),
+ /* Decrement usage count and deallocate if zero. */
+ GOMP_MAP_RELEASE = (GOMP_MAP_FLAG_SPECIAL_2
+ | GOMP_MAP_DELETE),
+ /* The attach/detach mappings below use the OMP_CLAUSE_SIZE field as a
+ bias. This will typically be zero, except when mapping an array slice
+ with a non-zero base. In that case the bias will indicate the
+ (positive) difference between the start of the actual mapped data and
+ the "virtual" origin of the array.
+ In OpenACC, attach a pointer to a mapped struct field. */
+ GOMP_MAP_ATTACH = (GOMP_MAP_DEEP_COPY | 0),
+ /* In OpenACC, detach a pointer to a mapped struct field. */
+ GOMP_MAP_DETACH = (GOMP_MAP_DEEP_COPY | 1),
+ /* In OpenACC, detach a pointer to a mapped struct field. */
+ GOMP_MAP_FORCE_DETACH = (GOMP_MAP_DEEP_COPY
+ | GOMP_MAP_FLAG_FORCE | 1),
+
+ /* Like GOMP_MAP_ATTACH, but allow attaching to zero-length array sections
+ (i.e. set to NULL when array section is not mapped) Currently only used
+ by OpenMP. */
+ GOMP_MAP_ATTACH_ZERO_LENGTH_ARRAY_SECTION
+ = (GOMP_MAP_DEEP_COPY | 2),
+
+ /* Internal to GCC, not used in libgomp. */
+ /* Do not map, but pointer assign a pointer instead. */
+ GOMP_MAP_FIRSTPRIVATE_POINTER = (GOMP_MAP_LAST | 1),
+ /* Do not map, but pointer assign a reference instead. */
+ GOMP_MAP_FIRSTPRIVATE_REFERENCE = (GOMP_MAP_LAST | 2),
+ /* An attach or detach operation. Rewritten to the appropriate type during
+ gimplification, depending on directive (i.e. "enter data" or
+ parallel/kernels region vs. "exit data"). */
+ GOMP_MAP_ATTACH_DETACH = (GOMP_MAP_LAST | 3)
+ };
+
+#define GOMP_MAP_COPY_TO_P(X) \
+ (!((X) & GOMP_MAP_FLAG_SPECIAL) \
+ && ((X) & GOMP_MAP_FLAG_TO))
+
+#define GOMP_MAP_COPY_FROM_P(X) \
+ (!((X) & GOMP_MAP_FLAG_SPECIAL) \
+ && ((X) & GOMP_MAP_FLAG_FROM))
+
+#define GOMP_MAP_ALWAYS_POINTER_P(X) \
+ ((X) == GOMP_MAP_ALWAYS_POINTER)
+
+#define GOMP_MAP_POINTER_P(X) \
+ ((X) == GOMP_MAP_POINTER \
+ || (X) == GOMP_MAP_POINTER_TO_ZERO_LENGTH_ARRAY_SECTION)
+
+#define GOMP_MAP_ALWAYS_TO_P(X) \
+ (((X) == GOMP_MAP_ALWAYS_TO) || ((X) == GOMP_MAP_ALWAYS_TOFROM))
+
+#define GOMP_MAP_ALWAYS_FROM_P(X) \
+ (((X) == GOMP_MAP_ALWAYS_FROM) || ((X) == GOMP_MAP_ALWAYS_TOFROM))
+
+#define GOMP_MAP_ALWAYS_P(X) \
+ (GOMP_MAP_ALWAYS_TO_P (X) || ((X) == GOMP_MAP_ALWAYS_FROM))
+
+#define GOMP_MAP_IMPLICIT_P(X) \
+ (((X) & GOMP_MAP_FLAG_SPECIAL_BITS) == GOMP_MAP_IMPLICIT)
+
+
+/* Asynchronous behavior. Keep in sync with
+ libgomp/{openacc.h,openacc.f90,openacc_lib.h}:acc_async_t. */
+
+#define GOMP_ASYNC_NOVAL -1
+#define GOMP_ASYNC_SYNC -2
+
+
+/* Device codes. Keep in sync with
+ libgomp/{openacc.h,openacc.f90,openacc_lib.h}:acc_device_t as well as
+ libgomp/libgomp-plugin.h. */
+#define GOMP_DEVICE_NONE 0
+#define GOMP_DEVICE_DEFAULT 1
+#define GOMP_DEVICE_HOST 2
+/* #define GOMP_DEVICE_HOST_NONSHM 3 removed. */
+#define GOMP_DEVICE_NOT_HOST 4
+#define GOMP_DEVICE_NVIDIA_PTX 5
+/* #define GOMP_DEVICE_INTEL_MIC 6 removed. */
+/* #define GOMP_DEVICE_HSA 7 removed. */
+#define GOMP_DEVICE_GCN 8
+
+/* We have a compatibility issue. OpenMP 5.2 introduced
+ omp_initial_device with value of -1 which clashes with our
+ GOMP_DEVICE_ICV, so we need to remap user supplied device
+ ids, -1 (aka omp_initial_device) to GOMP_DEVICE_HOST_FALLBACK,
+ and -2 (one of many non-conforming device numbers, but with
+ OMP_TARGET_OFFLOAD=mandatory needs to be treated a
+ omp_invalid_device) to -3 (so that for dev_num >= -2U we can
+ subtract 1). -4 is then what we use for omp_invalid_device,
+ which unlike the other non-conforming device numbers results
+ in fatal error regardless of OMP_TARGET_OFFLOAD. */
+#define GOMP_DEVICE_ICV -1
+#define GOMP_DEVICE_HOST_FALLBACK -2
+#define GOMP_DEVICE_INVALID -4
+
+/* GOMP_task/GOMP_taskloop* flags argument. */
+#define GOMP_TASK_FLAG_UNTIED (1 << 0)
+#define GOMP_TASK_FLAG_FINAL (1 << 1)
+#define GOMP_TASK_FLAG_MERGEABLE (1 << 2)
+#define GOMP_TASK_FLAG_DEPEND (1 << 3)
+#define GOMP_TASK_FLAG_PRIORITY (1 << 4)
+#define GOMP_TASK_FLAG_UP (1 << 8)
+#define GOMP_TASK_FLAG_GRAINSIZE (1 << 9)
+#define GOMP_TASK_FLAG_IF (1 << 10)
+#define GOMP_TASK_FLAG_NOGROUP (1 << 11)
+#define GOMP_TASK_FLAG_REDUCTION (1 << 12)
+#define GOMP_TASK_FLAG_DETACH (1 << 13)
+#define GOMP_TASK_FLAG_STRICT (1 << 14)
+
+/* GOMP_target{_ext,update_ext,enter_exit_data} flags argument. */
+#define GOMP_TARGET_FLAG_NOWAIT (1 << 0)
+#define GOMP_TARGET_FLAG_EXIT_DATA (1 << 1)
+/* Internal to libgomp. */
+#define GOMP_TARGET_FLAG_UPDATE (1U << 31)
+
+
+/* OpenACC construct flags. */
+
+/* Force host fallback execution. */
+#define GOACC_FLAG_HOST_FALLBACK (1 << 0)
+
+/* For legacy reasons, in the ABI, the GOACC_FLAGs are encoded as an inverted
+ bitmask. */
+#define GOACC_FLAGS_MARSHAL_OP BIT_NOT_EXPR
+#define GOACC_FLAGS_UNMARSHAL(X) (~(X))
+
+
+/* Versions of libgomp and device-specific plugins. GOMP_VERSION
+ should be incremented whenever an ABI-incompatible change is introduced
+ to the plugin interface defined in libgomp/libgomp.h. */
+#define GOMP_VERSION 2
+#define GOMP_VERSION_NVIDIA_PTX 1
+#define GOMP_VERSION_GCN 3
+
+#define GOMP_VERSION_PACK(LIB, DEV) (((LIB) << 16) | (DEV))
+#define GOMP_VERSION_LIB(PACK) (((PACK) >> 16) & 0xffff)
+#define GOMP_VERSION_DEV(PACK) ((PACK) & 0xffff)
+
+#define GOMP_DIM_GANG 0
+#define GOMP_DIM_WORKER 1
+#define GOMP_DIM_VECTOR 2
+#define GOMP_DIM_MAX 3
+#define GOMP_DIM_MASK(X) (1u << (X))
+
+/* Varadic launch arguments. End of list is marked by a zero. */
+#define GOMP_LAUNCH_DIM 1 /* Launch dimensions, op = mask */
+#define GOMP_LAUNCH_ASYNC 2 /* Async, op = cst val if not MAX */
+#define GOMP_LAUNCH_WAIT 3 /* Waits, op = num waits. */
+#define GOMP_LAUNCH_CODE_SHIFT 28
+#define GOMP_LAUNCH_DEVICE_SHIFT 16
+#define GOMP_LAUNCH_OP_SHIFT 0
+#define GOMP_LAUNCH_PACK(CODE,DEVICE,OP) \
+ (((CODE) << GOMP_LAUNCH_CODE_SHIFT) \
+ | ((DEVICE) << GOMP_LAUNCH_DEVICE_SHIFT) \
+ | ((OP) << GOMP_LAUNCH_OP_SHIFT))
+#define GOMP_LAUNCH_CODE(X) (((X) >> GOMP_LAUNCH_CODE_SHIFT) & 0xf)
+#define GOMP_LAUNCH_DEVICE(X) (((X) >> GOMP_LAUNCH_DEVICE_SHIFT) & 0xfff)
+#define GOMP_LAUNCH_OP(X) (((X) >> GOMP_LAUNCH_OP_SHIFT) & 0xffff)
+#define GOMP_LAUNCH_OP_MAX 0xffff
+
+/* Bitmask to apply in order to find out the intended device of a target
+ argument. */
+#define GOMP_TARGET_ARG_DEVICE_MASK ((1 << 7) - 1)
+/* The target argument is significant for all devices. */
+#define GOMP_TARGET_ARG_DEVICE_ALL 0
+
+/* Flag set when the subsequent element in the device-specific argument
+ values. */
+#define GOMP_TARGET_ARG_SUBSEQUENT_PARAM (1 << 7)
+
+/* Bitmask to apply to a target argument to find out the value identifier. */
+#define GOMP_TARGET_ARG_ID_MASK (((1 << 8) - 1) << 8)
+/* Target argument index of NUM_TEAMS. */
+#define GOMP_TARGET_ARG_NUM_TEAMS (1 << 8)
+/* Target argument index of THREAD_LIMIT. */
+#define GOMP_TARGET_ARG_THREAD_LIMIT (2 << 8)
+
+/* If the value is directly embeded in target argument, it should be a 16-bit
+ at most and shifted by this many bits. */
+#define GOMP_TARGET_ARG_VALUE_SHIFT 16
+
+/* Dependence types in omp_depend_t objects. */
+#define GOMP_DEPEND_IN 1
+#define GOMP_DEPEND_OUT 2
+#define GOMP_DEPEND_INOUT 3
+#define GOMP_DEPEND_MUTEXINOUTSET 4
+#define GOMP_DEPEND_INOUTSET 5
+
+/* Flag values for OpenMP 'requires' directive features. */
+#define GOMP_REQUIRES_UNIFIED_ADDRESS 0x10
+#define GOMP_REQUIRES_UNIFIED_SHARED_MEMORY 0x20
+#define GOMP_REQUIRES_REVERSE_OFFLOAD 0x80
+#define GOMP_REQUIRES_TARGET_USED 0x200
+
+/* HSA specific data structures. */
+
+/* Identifiers of device-specific target arguments. */
+#define GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES (1 << 8)
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graph.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graph.h
new file mode 100644
index 0000000..8e68a35
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graph.h
@@ -0,0 +1,27 @@
+/* Header file for graph routines.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GRAPH_H
+#define GCC_GRAPH_H
+
+extern void print_graph_cfg (const char *, struct function *);
+extern void clean_graph_dump_file (const char *);
+extern void finish_graph_dump_file (const char *);
+
+#endif /* ! GCC_GRAPH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphds.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphds.h
new file mode 100644
index 0000000..40efde5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphds.h
@@ -0,0 +1,69 @@
+/* Graph representation.
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GRAPHDS_H
+#define GCC_GRAPHDS_H
+
+/* Structure representing edge of a graph. */
+
+struct graph_edge
+{
+ int src, dest; /* Source and destination. */
+ struct graph_edge *pred_next, *succ_next;
+ /* Next edge in predecessor and successor lists. */
+ void *data; /* Data attached to the edge. */
+};
+
+/* Structure representing vertex of a graph. */
+
+struct vertex
+{
+ struct graph_edge *pred, *succ;
+ /* Lists of predecessors and successors. */
+ int component; /* Number of dfs restarts before reaching the
+ vertex. */
+ int post; /* Postorder number. */
+ void *data; /* Data attached to the vertex. */
+};
+
+/* Structure representing a graph. */
+
+struct graph
+{
+ int n_vertices; /* Number of vertices. */
+ struct vertex *vertices; /* The vertices. */
+ struct obstack ob; /* Obstack for vertex and edge allocation. */
+};
+
+struct graph *new_graph (int);
+void dump_graph (FILE *, struct graph *);
+struct graph_edge *add_edge (struct graph *, int, int);
+void identify_vertices (struct graph *, int, int);
+typedef bool (*skip_edge_callback) (struct graph_edge *);
+int graphds_dfs (struct graph *, int *, int,
+ vec<int> *, bool, bitmap, skip_edge_callback = NULL);
+int graphds_scc (struct graph *, bitmap, skip_edge_callback = NULL,
+ vec<int> * = NULL);
+void graphds_domtree (struct graph *, int, int *, int *, int *);
+typedef void (*graphds_edge_callback) (struct graph *,
+ struct graph_edge *, void *);
+void for_each_edge (struct graph *, graphds_edge_callback, void *);
+void free_graph (struct graph *g);
+
+#endif /* GCC_GRAPHDS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphite.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphite.h
new file mode 100644
index 0000000..dc6e482
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphite.h
@@ -0,0 +1,452 @@
+/* Graphite polyhedral representation.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+ Contributed by Sebastian Pop <sebastian.pop@amd.com> and
+ Tobias Grosser <grosser@fim.uni-passau.de>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GRAPHITE_POLY_H
+#define GCC_GRAPHITE_POLY_H
+
+#include "sese.h"
+
+typedef struct poly_dr *poly_dr_p;
+
+typedef struct poly_bb *poly_bb_p;
+
+typedef struct scop *scop_p;
+
+typedef unsigned graphite_dim_t;
+
+inline graphite_dim_t scop_nb_params (scop_p);
+
+/* A data reference can write or read some memory or we
+ just know it may write some memory. */
+enum poly_dr_type
+{
+ PDR_READ,
+ /* PDR_MAY_READs are represented using PDR_READS. This does not
+ limit the expressiveness. */
+ PDR_WRITE,
+ PDR_MAY_WRITE
+};
+
+struct poly_dr
+{
+ /* An identifier for this PDR. */
+ int id;
+
+ /* The number of data refs identical to this one in the PBB. */
+ int nb_refs;
+
+ /* A pointer to the gimple stmt containing this reference. */
+ gimple *stmt;
+
+ /* A pointer to the PBB that contains this data reference. */
+ poly_bb_p pbb;
+
+ enum poly_dr_type type;
+
+ /* The access polyhedron contains the polyhedral space this data
+ reference will access.
+
+ The polyhedron contains these dimensions:
+
+ - The alias set (a):
+ Every memory access is classified in at least one alias set.
+
+ - The subscripts (s_0, ..., s_n):
+ The memory is accessed using zero or more subscript dimensions.
+
+ - The iteration domain (variables and parameters)
+
+ Do not hardcode the dimensions. Use the following accessor functions:
+ - pdr_alias_set_dim
+ - pdr_subscript_dim
+ - pdr_iterator_dim
+ - pdr_parameter_dim
+
+ Example:
+
+ | int A[1335][123];
+ | int *p = malloc ();
+ |
+ | k = ...
+ | for i
+ | {
+ | if (unknown_function ())
+ | p = A;
+ | ... = p[?][?];
+ | for j
+ | A[i][j+k] = m;
+ | }
+
+ The data access A[i][j+k] in alias set "5" is described like this:
+
+ | i j k a s0 s1 1
+ | 0 0 0 1 0 0 -5 = 0
+ |-1 0 0 0 1 0 0 = 0
+ | 0 -1 -1 0 0 1 0 = 0
+ | 0 0 0 0 1 0 0 >= 0 # The last four lines describe the
+ | 0 0 0 0 0 1 0 >= 0 # array size.
+ | 0 0 0 0 -1 0 1335 >= 0
+ | 0 0 0 0 0 -1 123 >= 0
+
+ The pointer "*p" in alias set "5" and "7" is described as a union of
+ polyhedron:
+
+
+ | i k a s0 1
+ | 0 0 1 0 -5 = 0
+ | 0 0 0 1 0 >= 0
+
+ "or"
+
+ | i k a s0 1
+ | 0 0 1 0 -7 = 0
+ | 0 0 0 1 0 >= 0
+
+ "*p" accesses all of the object allocated with 'malloc'.
+
+ The scalar data access "m" is represented as an array with zero subscript
+ dimensions.
+
+ | i j k a 1
+ | 0 0 0 -1 15 = 0
+
+ The difference between the graphite internal format for access data and
+ the OpenSop format is in the order of columns.
+ Instead of having:
+
+ | i j k a s0 s1 1
+ | 0 0 0 1 0 0 -5 = 0
+ |-1 0 0 0 1 0 0 = 0
+ | 0 -1 -1 0 0 1 0 = 0
+ | 0 0 0 0 1 0 0 >= 0 # The last four lines describe the
+ | 0 0 0 0 0 1 0 >= 0 # array size.
+ | 0 0 0 0 -1 0 1335 >= 0
+ | 0 0 0 0 0 -1 123 >= 0
+
+ In OpenScop we have:
+
+ | a s0 s1 i j k 1
+ | 1 0 0 0 0 0 -5 = 0
+ | 0 1 0 -1 0 0 0 = 0
+ | 0 0 1 0 -1 -1 0 = 0
+ | 0 1 0 0 0 0 0 >= 0 # The last four lines describe the
+ | 0 0 1 0 0 0 0 >= 0 # array size.
+ | 0 -1 0 0 0 0 1335 >= 0
+ | 0 0 -1 0 0 0 123 >= 0
+
+ The OpenScop access function is printed as follows:
+
+ | 1 # The number of disjunct components in a union of access functions.
+ | R C O I L P # Described bellow.
+ | a s0 s1 i j k 1
+ | 1 0 0 0 0 0 -5 = 0
+ | 0 1 0 -1 0 0 0 = 0
+ | 0 0 1 0 -1 -1 0 = 0
+ | 0 1 0 0 0 0 0 >= 0 # The last four lines describe the
+ | 0 0 1 0 0 0 0 >= 0 # array size.
+ | 0 -1 0 0 0 0 1335 >= 0
+ | 0 0 -1 0 0 0 123 >= 0
+
+ Where:
+ - R: Number of rows.
+ - C: Number of columns.
+ - O: Number of output dimensions = alias set + number of subscripts.
+ - I: Number of input dimensions (iterators).
+ - L: Number of local (existentially quantified) dimensions.
+ - P: Number of parameters.
+
+ In the example, the vector "R C O I L P" is "7 7 3 2 0 1". */
+ isl_map *accesses;
+ isl_set *subscript_sizes;
+};
+
+#define PDR_ID(PDR) (PDR->id)
+#define PDR_NB_REFS(PDR) (PDR->nb_refs)
+#define PDR_PBB(PDR) (PDR->pbb)
+#define PDR_TYPE(PDR) (PDR->type)
+#define PDR_ACCESSES(PDR) (NULL)
+
+void new_poly_dr (poly_bb_p, gimple *, enum poly_dr_type,
+ isl_map *, isl_set *);
+void debug_pdr (poly_dr_p);
+void print_pdr (FILE *, poly_dr_p);
+
+inline bool
+pdr_read_p (poly_dr_p pdr)
+{
+ return PDR_TYPE (pdr) == PDR_READ;
+}
+
+/* Returns true when PDR is a "write". */
+
+inline bool
+pdr_write_p (poly_dr_p pdr)
+{
+ return PDR_TYPE (pdr) == PDR_WRITE;
+}
+
+/* Returns true when PDR is a "may write". */
+
+inline bool
+pdr_may_write_p (poly_dr_p pdr)
+{
+ return PDR_TYPE (pdr) == PDR_MAY_WRITE;
+}
+
+/* POLY_BB represents a blackbox in the polyhedral model. */
+
+struct poly_bb
+{
+ /* Pointer to a basic block or a statement in the compiler. */
+ gimple_poly_bb_p black_box;
+
+ /* Pointer to the SCOP containing this PBB. */
+ scop_p scop;
+
+ /* The iteration domain of this bb. The layout of this polyhedron
+ is I|G with I the iteration domain, G the context parameters.
+
+ Example:
+
+ for (i = a - 7*b + 8; i <= 3*a + 13*b + 20; i++)
+ for (j = 2; j <= 2*i + 5; j++)
+ for (k = 0; k <= 5; k++)
+ S (i,j,k)
+
+ Loop iterators: i, j, k
+ Parameters: a, b
+
+ | i >= a - 7b + 8
+ | i <= 3a + 13b + 20
+ | j >= 2
+ | j <= 2i + 5
+ | k >= 0
+ | k <= 5
+
+ The number of variables in the DOMAIN may change and is not
+ related to the number of loops in the original code. */
+ isl_set *domain;
+ isl_set *iterators;
+
+ /* The data references we access. */
+ vec<poly_dr_p> drs;
+
+ /* The last basic block generated for this pbb. */
+ basic_block new_bb;
+};
+
+#define PBB_BLACK_BOX(PBB) ((gimple_poly_bb_p) PBB->black_box)
+#define PBB_SCOP(PBB) (PBB->scop)
+#define PBB_DRS(PBB) (PBB->drs)
+
+extern poly_bb_p new_poly_bb (scop_p, gimple_poly_bb_p);
+extern void print_pbb_domain (FILE *, poly_bb_p);
+extern void print_pbb (FILE *, poly_bb_p);
+extern void print_scop_context (FILE *, scop_p);
+extern void print_scop (FILE *, scop_p);
+extern void debug_pbb_domain (poly_bb_p);
+extern void debug_pbb (poly_bb_p);
+extern void print_pdrs (FILE *, poly_bb_p);
+extern void debug_pdrs (poly_bb_p);
+extern void debug_scop_context (scop_p);
+extern void debug_scop (scop_p);
+extern void print_scop_params (FILE *, scop_p);
+extern void debug_scop_params (scop_p);
+extern void print_iteration_domain (FILE *, poly_bb_p);
+extern void print_iteration_domains (FILE *, scop_p);
+extern void debug_iteration_domain (poly_bb_p);
+extern void debug_iteration_domains (scop_p);
+extern void print_isl_set (FILE *, isl_set *);
+extern void print_isl_map (FILE *, isl_map *);
+extern void print_isl_union_map (FILE *, isl_union_map *);
+extern void print_isl_aff (FILE *, isl_aff *);
+extern void print_isl_constraint (FILE *, isl_constraint *);
+extern void print_isl_schedule (FILE *, isl_schedule *);
+extern void debug_isl_schedule (isl_schedule *);
+extern void print_isl_ast (FILE *, isl_ast_node *);
+extern void debug_isl_ast (isl_ast_node *);
+extern void debug_isl_set (isl_set *);
+extern void debug_isl_map (isl_map *);
+extern void debug_isl_union_map (isl_union_map *);
+extern void debug_isl_aff (isl_aff *);
+extern void debug_isl_constraint (isl_constraint *);
+extern void debug_gmp_value (mpz_t);
+extern void debug_scop_pbb (scop_p scop, int i);
+extern void print_schedule_ast (FILE *, __isl_keep isl_schedule *, scop_p);
+extern void debug_schedule_ast (__isl_keep isl_schedule *, scop_p);
+
+/* The basic block of the PBB. */
+
+inline basic_block
+pbb_bb (poly_bb_p pbb)
+{
+ return GBB_BB (PBB_BLACK_BOX (pbb));
+}
+
+inline int
+pbb_index (poly_bb_p pbb)
+{
+ return pbb_bb (pbb)->index;
+}
+
+/* The loop of the PBB. */
+
+inline loop_p
+pbb_loop (poly_bb_p pbb)
+{
+ return gbb_loop (PBB_BLACK_BOX (pbb));
+}
+
+/* The scop that contains the PDR. */
+
+inline scop_p
+pdr_scop (poly_dr_p pdr)
+{
+ return PBB_SCOP (PDR_PBB (pdr));
+}
+
+/* Set black box of PBB to BLACKBOX. */
+
+inline void
+pbb_set_black_box (poly_bb_p pbb, gimple_poly_bb_p black_box)
+{
+ pbb->black_box = black_box;
+}
+
+/* A helper structure to keep track of data references, polyhedral BBs, and
+ alias sets. */
+
+struct dr_info
+{
+ enum {
+ invalid_alias_set = -1
+ };
+ /* The data reference. */
+ data_reference_p dr;
+
+ /* The polyhedral BB containing this DR. */
+ poly_bb_p pbb;
+
+ /* ALIAS_SET is the SCC number assigned by a graph_dfs of the alias graph.
+ -1 is an invalid alias set. */
+ int alias_set;
+
+ /* Construct a DR_INFO from a data reference DR, an ALIAS_SET, and a PBB. */
+ dr_info (data_reference_p dr, poly_bb_p pbb,
+ int alias_set = invalid_alias_set)
+ : dr (dr), pbb (pbb), alias_set (alias_set) {}
+};
+
+/* A SCOP is a Static Control Part of the program, simple enough to be
+ represented in polyhedral form. */
+struct scop
+{
+ /* A SCOP is defined as a SESE region. */
+ sese_info_p scop_info;
+
+ /* Number of parameters in SCoP. */
+ graphite_dim_t nb_params;
+
+ /* The maximum alias set as assigned to drs by build_alias_sets. */
+ unsigned max_alias_set;
+
+ /* All the basic blocks in this scop that contain memory references
+ and that will be represented as statements in the polyhedral
+ representation. */
+ vec<poly_bb_p> pbbs;
+
+ /* All the data references in this scop. */
+ vec<dr_info> drs;
+
+ /* The context describes known restrictions concerning the parameters
+ and relations in between the parameters.
+
+ void f (int8_t a, uint_16_t b) {
+ c = 2 a + b;
+ ...
+ }
+
+ Here we can add these restrictions to the context:
+
+ -128 >= a >= 127
+ 0 >= b >= 65,535
+ c = 2a + b */
+ isl_set *param_context;
+
+ /* The context used internally by isl. */
+ isl_ctx *isl_context;
+
+ /* SCoP original schedule. */
+ isl_schedule *original_schedule;
+
+ /* SCoP transformed schedule. */
+ isl_schedule *transformed_schedule;
+
+ /* The data dependence relation among the data references in this scop. */
+ isl_union_map *dependence;
+};
+
+extern scop_p new_scop (edge, edge);
+extern void free_scop (scop_p);
+extern gimple_poly_bb_p new_gimple_poly_bb (basic_block, vec<data_reference_p>,
+ vec<scalar_use>, vec<tree>);
+extern bool apply_poly_transforms (scop_p);
+
+/* Set the region of SCOP to REGION. */
+
+inline void
+scop_set_region (scop_p scop, sese_info_p region)
+{
+ scop->scop_info = region;
+}
+
+/* Returns the number of parameters for SCOP. */
+
+inline graphite_dim_t
+scop_nb_params (scop_p scop)
+{
+ return scop->nb_params;
+}
+
+/* Set the number of params of SCOP to NB_PARAMS. */
+
+inline void
+scop_set_nb_params (scop_p scop, graphite_dim_t nb_params)
+{
+ scop->nb_params = nb_params;
+}
+
+extern void scop_get_dependences (scop_p scop);
+
+bool
+carries_deps (__isl_keep isl_union_map *schedule,
+ __isl_keep isl_union_map *deps,
+ int depth);
+
+extern bool build_poly_scop (scop_p);
+extern bool graphite_regenerate_ast_isl (scop_p);
+extern void build_scops (vec<scop_p> *);
+extern tree cached_scalar_evolution_in_region (const sese_l &, loop_p, tree);
+extern void dot_all_sese (FILE *, vec<sese_l> &);
+extern void dot_sese (sese_l &);
+extern void dot_cfg ();
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphviz.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphviz.h
new file mode 100644
index 0000000..e6be819
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/graphviz.h
@@ -0,0 +1,59 @@
+/* Helper code for graphviz output.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GRAPHVIZ_H
+#define GCC_GRAPHVIZ_H
+
+#include "pretty-print.h" /* for ATTRIBUTE_GCC_PPDIAG. */
+
+/* A class for writing .dot output to a pretty_printer with
+ indentation to show nesting. */
+
+class graphviz_out {
+ public:
+ graphviz_out (pretty_printer *pp);
+
+ void print (const char *fmt, ...)
+ ATTRIBUTE_GCC_PPDIAG(2,3);
+ void println (const char *fmt, ...)
+ ATTRIBUTE_GCC_PPDIAG(2,3);
+
+ void indent () { m_indent++; }
+ void outdent () { m_indent--; }
+
+ void write_indent ();
+
+ void begin_tr ();
+ void end_tr ();
+
+ void begin_td ();
+ void end_td ();
+
+ void begin_trtd ();
+ void end_tdtr ();
+
+ pretty_printer *get_pp () const { return m_pp; }
+
+ private:
+ pretty_printer *m_pp;
+ int m_indent;
+};
+
+#endif /* GCC_GRAPHVIZ_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsstruct.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsstruct.def
new file mode 100644
index 0000000..33c033e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsstruct.def
@@ -0,0 +1,54 @@
+/* This file contains the definitions for the gimple IR structure
+ enumeration used in GCC.
+
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The format of this file is
+ DEFGSSTRUCT(GSS enumeration value, structure name, has-tree-operands).
+ Each enum value should correspond with a single member of the union
+ gimple_statement_d. */
+
+DEFGSSTRUCT(GSS_BASE, gimple, false)
+DEFGSSTRUCT(GSS_WITH_OPS, gimple_statement_with_ops, true)
+DEFGSSTRUCT(GSS_WITH_MEM_OPS_BASE, gimple_statement_with_memory_ops_base, false)
+DEFGSSTRUCT(GSS_WITH_MEM_OPS, gimple_statement_with_memory_ops, true)
+DEFGSSTRUCT(GSS_CALL, gcall, true)
+DEFGSSTRUCT(GSS_ASM, gasm, true)
+DEFGSSTRUCT(GSS_BIND, gbind, false)
+DEFGSSTRUCT(GSS_PHI, gphi, false)
+DEFGSSTRUCT(GSS_TRY, gtry, false)
+DEFGSSTRUCT(GSS_CATCH, gcatch, false)
+DEFGSSTRUCT(GSS_EH_FILTER, geh_filter, false)
+DEFGSSTRUCT(GSS_EH_MNT, geh_mnt, false)
+DEFGSSTRUCT(GSS_EH_CTRL, gimple_statement_eh_ctrl, false)
+DEFGSSTRUCT(GSS_EH_ELSE, geh_else, false)
+DEFGSSTRUCT(GSS_WCE, gimple_statement_wce, false)
+DEFGSSTRUCT(GSS_OMP, gimple_statement_omp, false)
+DEFGSSTRUCT(GSS_OMP_CRITICAL, gomp_critical, false)
+DEFGSSTRUCT(GSS_OMP_FOR, gomp_for, false)
+DEFGSSTRUCT(GSS_OMP_PARALLEL_LAYOUT, gimple_statement_omp_parallel_layout, false)
+DEFGSSTRUCT(GSS_OMP_TASK, gomp_task, false)
+DEFGSSTRUCT(GSS_OMP_SECTIONS, gomp_sections, false)
+DEFGSSTRUCT(GSS_OMP_SINGLE_LAYOUT, gimple_statement_omp_single_layout, false)
+DEFGSSTRUCT(GSS_OMP_CONTINUE, gomp_continue, false)
+DEFGSSTRUCT(GSS_OMP_ATOMIC_LOAD, gomp_atomic_load, false)
+DEFGSSTRUCT(GSS_OMP_ATOMIC_STORE_LAYOUT, gomp_atomic_store, false)
+DEFGSSTRUCT(GSS_ASSUME, gimple_statement_assume, false)
+DEFGSSTRUCT(GSS_TRANSACTION, gtransaction, false)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyms.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyms.h
new file mode 100644
index 0000000..9a79f1a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyms.h
@@ -0,0 +1,97 @@
+/* Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* For cross compilation, use the portable definitions from the COFF
+ documentation. */
+#ifndef GCC_GSYMS_H
+#define GCC_GSYMS_H
+
+#define __GNU_SYMS__
+
+enum sdb_storage_class
+{
+ C_EFCN = -1,
+ C_NULL = 0,
+ C_AUTO = 1,
+ C_EXT = 2,
+ C_STAT = 3,
+ C_REG = 4,
+ C_EXTDEF = 5,
+ C_LABEL = 6,
+ C_ULABEL = 7,
+ C_MOS = 8,
+ C_ARG = 9,
+ C_STRTAG = 10,
+ C_MOU = 11,
+ C_UNTAG = 12,
+ C_TPDEF = 13,
+ C_USTATIC = 14,
+ C_ENTAG = 15,
+ C_MOE = 16,
+ C_REGPARM = 17,
+ C_FIELD = 18,
+
+ C_BLOCK = 100,
+ C_FCN = 101,
+ C_EOS = 102,
+ C_FILE = 103,
+ C_LINE = 104,
+ C_ALIAS = 105,
+ C_HIDDEN = 106
+};
+
+enum sdb_type
+{
+ T_NULL = 0,
+ T_ARG = 1,
+ T_VOID = 1,
+ T_CHAR = 2,
+ T_SHORT = 3,
+ T_INT = 4,
+ T_LONG = 5,
+ T_FLOAT = 6,
+ T_DOUBLE = 7,
+ T_STRUCT = 8,
+ T_UNION = 9,
+ T_ENUM = 10,
+ T_MOE = 11,
+ T_UCHAR = 12,
+ T_USHORT = 13,
+ T_UINT = 14,
+ T_ULONG = 15
+};
+
+enum sdb_type_class
+{
+ DT_NON = 0,
+ DT_PTR = 1,
+ DT_FCN = 2,
+ DT_ARY = 3
+};
+
+enum sdb_masks
+{
+ N_BTMASK = 017,
+ N_TMASK = 060,
+ N_TMASK1 = 0300,
+ N_TMASK2 = 0360,
+ N_BTSHFT = 4,
+ N_TSHIFT = 2
+};
+
+#endif /* GCC_GSYMS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyslimits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyslimits.h
new file mode 100644
index 0000000..a362802
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gsyslimits.h
@@ -0,0 +1,8 @@
+/* syslimits.h stands for the system's own limits.h file.
+ If we can use it ok unmodified, then we install this text.
+ If fixincludes fixes it, then the fixed version is installed
+ instead of this text. */
+
+#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */
+#include_next <limits.h>
+#undef _GCC_NEXT_LIMITS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtm-builtins.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtm-builtins.def
new file mode 100644
index 0000000..6d5cfb9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtm-builtins.def
@@ -0,0 +1,212 @@
+DEF_TM_BUILTIN (BUILT_IN_TM_START, "_ITM_beginTransaction",
+ BT_FN_UINT32_UINT32_VAR, ATTR_TM_NOTHROW_RT_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_COMMIT, "_ITM_commitTransaction",
+ BT_FN_VOID, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_COMMIT_EH, "_ITM_commitTransactionEH",
+ BT_FN_VOID_PTR, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_ABORT, "_ITM_abortTransaction",
+ BT_FN_VOID_INT, ATTR_TM_NORETURN_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_IRREVOCABLE, "_ITM_changeTransactionMode",
+ BT_FN_VOID_INT, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY, "_ITM_memcpyRtWt",
+ BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY_RNWT, "_ITM_memcpyRnWt",
+ BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY_RTWN, "_ITM_memcpyRtWn",
+ BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMMOVE, "_ITM_memmoveRtWt",
+ BT_FN_VOID_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMSET, "_ITM_memsetW",
+ BT_FN_VOID_PTR_INT_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_GETTMCLONE_IRR, "_ITM_getTMCloneOrIrrevocable",
+ BT_FN_PTR_PTR, ATTR_TM_CONST_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_GETTMCLONE_SAFE, "_ITM_getTMCloneSafe",
+ BT_FN_PTR_PTR, ATTR_TM_CONST_NOTHROW_LIST)
+
+/* Memory allocation builtins. */
+DEF_TM_BUILTIN (BUILT_IN_TM_MALLOC, "_ITM_malloc",
+ BT_FN_PTR_SIZE, ATTR_TMPURE_MALLOC_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_CALLOC, "_ITM_calloc",
+ BT_FN_PTR_SIZE_SIZE, ATTR_TMPURE_MALLOC_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_FREE, "_ITM_free",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+
+/* Logging builtins. */
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_1, "_ITM_LU1",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_2, "_ITM_LU2",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_4, "_ITM_LU4",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_8, "_ITM_LU8",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_FLOAT, "_ITM_LF",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_DOUBLE, "_ITM_LD",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_LDOUBLE, "_ITM_LE",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG, "_ITM_LB",
+ BT_FN_VOID_VPTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+
+/* These stubs should get defined in the backend if applicable. */
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOG_M64, "__builtin__ITM_LM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOG_M128, "__builtin__ITM_LM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOG_M256, "__builtin__ITM_LM256")
+
+/* Writes.
+
+ Note: The writes must follow the following order: STORE, WAR, WAW.
+ The TM optimizations depend on this order.
+
+ BUILT_IN_TM_STORE_1 must be the first builtin.
+ BUILTIN_TM_LOAD_STORE_P depends on this. */
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_1, "_ITM_WU1",
+ BT_FN_VOID_VPTR_I1, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_1, "_ITM_WaRU1",
+ BT_FN_VOID_VPTR_I1, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_1, "_ITM_WaWU1",
+ BT_FN_VOID_VPTR_I1, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_2, "_ITM_WU2",
+ BT_FN_VOID_VPTR_I2, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_2, "_ITM_WaRU2",
+ BT_FN_VOID_VPTR_I2, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_2, "_ITM_WaWU2",
+ BT_FN_VOID_VPTR_I2, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_4, "_ITM_WU4",
+ BT_FN_VOID_VPTR_I4, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_4, "_ITM_WaRU4",
+ BT_FN_VOID_VPTR_I4, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_4, "_ITM_WaWU4",
+ BT_FN_VOID_VPTR_I4, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_8, "_ITM_WU8",
+ BT_FN_VOID_VPTR_I8, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_8, "_ITM_WaRU8",
+ BT_FN_VOID_VPTR_I8, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_8, "_ITM_WaWU8",
+ BT_FN_VOID_VPTR_I8, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_FLOAT, "_ITM_WF",
+ BT_FN_VOID_VPTR_FLOAT, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_FLOAT, "_ITM_WaRF",
+ BT_FN_VOID_VPTR_FLOAT, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_FLOAT, "_ITM_WaWF",
+ BT_FN_VOID_VPTR_FLOAT, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_DOUBLE, "_ITM_WD",
+ BT_FN_VOID_VPTR_DOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_DOUBLE, "_ITM_WaRD",
+ BT_FN_VOID_VPTR_DOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_DOUBLE, "_ITM_WaWD",
+ BT_FN_VOID_VPTR_DOUBLE, ATTR_TM_NOTHROW_LIST)
+
+/* These stubs should get defined in the backend if applicable. */
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_M64, "__builtin__ITM_WM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAR_M64, "__builtin__ITM_WaRM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAW_M64, "__builtin__ITM_WaWM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_M128, "__builtin__ITM_WM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAR_M128, "__builtin__ITM_WaRM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAW_M128, "__builtin__ITM_WaWM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_M256, "__builtin__ITM_WM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAR_M256, "__builtin__ITM_WaRM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAW_M256, "__builtin__ITM_WaWM256")
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_LDOUBLE, "_ITM_WE",
+ BT_FN_VOID_VPTR_LDOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_LDOUBLE, "_ITM_WaRE",
+ BT_FN_VOID_VPTR_LDOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_LDOUBLE, "_ITM_WaWE",
+ BT_FN_VOID_VPTR_LDOUBLE, ATTR_TM_NOTHROW_LIST)
+/* Note: BUILT_IN_TM_STORE_WAW_LDOUBLE must be the last TM store.
+ BUILTIN_TM_STORE_P depends on this. */
+
+/* Reads.
+
+ Note: The reads must follow the following order: LOAD, RAR, RAW, RFW.
+ The TM optimizations depend on this order. */
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_1, "_ITM_RU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_1, "_ITM_RaRU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_1, "_ITM_RaWU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_1, "_ITM_RfWU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_2, "_ITM_RU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_2, "_ITM_RaRU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_2, "_ITM_RaWU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_2, "_ITM_RfWU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_4, "_ITM_RU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_4, "_ITM_RaRU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_4, "_ITM_RaWU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_4, "_ITM_RfWU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_8, "_ITM_RU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_8, "_ITM_RaRU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_8, "_ITM_RaWU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_8, "_ITM_RfWU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_FLOAT, "_ITM_RF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_FLOAT, "_ITM_RaRF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_FLOAT, "_ITM_RaWF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_FLOAT, "_ITM_RfWF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_DOUBLE, "_ITM_RD",
+ BT_FN_DOUBLE_CONST_DOUBLE_PTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_DOUBLE, "_ITM_RaRD",
+ BT_FN_DOUBLE_CONST_DOUBLE_PTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_DOUBLE, "_ITM_RaWD",
+ BT_FN_DOUBLE_CONST_DOUBLE_PTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_DOUBLE, "_ITM_RfWD",
+ BT_FN_DOUBLE_CONST_DOUBLE_PTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+/* These stubs should get defined in the backend if applicable. */
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_M64, "__builtin__ITM_RM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAR_M64, "__builtin__ITM_RaRM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAW_M64, "__builtin__ITM_RaRM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RFW_M64, "__builtin__ITM_RfWM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_M128, "__builtin__ITM_RM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAR_M128, "__builtin__ITM_RaRM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAW_M128, "__builtin__ITM_RaRM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RFW_M128, "__builtin__ITM_RfWM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_M256, "__builtin__ITM_RM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAR_M256, "__builtin__ITM_RaRM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAW_M256, "__builtin__ITM_RaRM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RFW_M256, "__builtin__ITM_RfWM256")
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_LDOUBLE, "_ITM_RE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_LDOUBLE, "_ITM_RaRE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_LDOUBLE, "_ITM_RaWE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_LDOUBLE, "_ITM_RfWE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+/* Note: BUILT_IN_TM_LOAD_RFW_LDOUBLE must be the last TM load as well
+ as the last builtin. BUILTIN_TM_LOAD_STORE_P and BUILTIN_TM_LOAD_P
+ depend on this. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtype-desc.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtype-desc.h
new file mode 100644
index 0000000..5a4f4a9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/gtype-desc.h
@@ -0,0 +1,3853 @@
+/* Type information for GCC.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file is machine generated. Do not edit. */
+
+/* GC marker procedures. */
+/* Macros and declarations. */
+#define gt_ggc_m_9tree_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_node (X);\
+ } while (0)
+#define gt_ggc_mx_tree_node gt_ggc_mx_lang_tree_node
+#define gt_ggc_m_9line_maps(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_line_maps (X);\
+ } while (0)
+extern void gt_ggc_mx_line_maps (void *);
+#define gt_ggc_m_9cpp_token(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cpp_token (X);\
+ } while (0)
+extern void gt_ggc_mx_cpp_token (void *);
+#define gt_ggc_m_9cpp_macro(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cpp_macro (X);\
+ } while (0)
+extern void gt_ggc_mx_cpp_macro (void *);
+#define gt_ggc_m_13string_concat(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_string_concat (X);\
+ } while (0)
+extern void gt_ggc_mx_string_concat (void *);
+#define gt_ggc_m_16string_concat_db(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_string_concat_db (X);\
+ } while (0)
+extern void gt_ggc_mx_string_concat_db (void *);
+#define gt_ggc_m_38hash_map_location_hash_string_concat__(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_location_hash_string_concat__ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_location_hash_string_concat__ (void *);
+#define gt_ggc_m_11bitmap_head(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_bitmap_head (X);\
+ } while (0)
+extern void gt_ggc_mx_bitmap_head (void *);
+#define gt_ggc_m_7rtx_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_rtx_def (X);\
+ } while (0)
+extern void gt_ggc_mx_rtx_def (void *);
+#define gt_ggc_m_9rtvec_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_rtvec_def (X);\
+ } while (0)
+extern void gt_ggc_mx_rtvec_def (void *);
+#define gt_ggc_m_6gimple(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_gimple (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple (void *);
+#define gt_ggc_m_11symtab_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_symtab_node (X);\
+ } while (0)
+extern void gt_ggc_mx_symtab_node (void *);
+#define gt_ggc_m_11cgraph_edge(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cgraph_edge (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_edge (void *);
+#define gt_ggc_m_7section(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_section (X);\
+ } while (0)
+extern void gt_ggc_mx_section (void *);
+#define gt_ggc_m_16cl_target_option(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cl_target_option (X);\
+ } while (0)
+extern void gt_ggc_mx_cl_target_option (void *);
+#define gt_ggc_m_15cl_optimization(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cl_optimization (X);\
+ } while (0)
+extern void gt_ggc_mx_cl_optimization (void *);
+#define gt_ggc_m_8edge_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_edge_def (X);\
+ } while (0)
+extern void gt_ggc_mx_edge_def (void *);
+#define gt_ggc_m_15basic_block_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_basic_block_def (X);\
+ } while (0)
+extern void gt_ggc_mx_basic_block_def (void *);
+#define gt_ggc_m_16machine_function(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_machine_function (X);\
+ } while (0)
+extern void gt_ggc_mx_machine_function (void *);
+#define gt_ggc_m_14bitmap_element(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_bitmap_element (X);\
+ } while (0)
+extern void gt_ggc_mx_bitmap_element (void *);
+#define gt_ggc_m_34generic_wide_int_wide_int_storage_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_generic_wide_int_wide_int_storage_ (X);\
+ } while (0)
+extern void gt_ggc_mx_generic_wide_int_wide_int_storage_ (void *);
+#define gt_ggc_m_13coverage_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_coverage_data (X);\
+ } while (0)
+extern void gt_ggc_mx_coverage_data (void *);
+#define gt_ggc_m_9mem_attrs(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_mem_attrs (X);\
+ } while (0)
+extern void gt_ggc_mx_mem_attrs (void *);
+#define gt_ggc_m_9reg_attrs(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_reg_attrs (X);\
+ } while (0)
+extern void gt_ggc_mx_reg_attrs (void *);
+#define gt_ggc_m_12object_block(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_object_block (X);\
+ } while (0)
+extern void gt_ggc_mx_object_block (void *);
+#define gt_ggc_m_14vec_rtx_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_rtx_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_rtx_va_gc_ (void *);
+#define gt_ggc_m_11fixed_value(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_fixed_value (X);\
+ } while (0)
+extern void gt_ggc_mx_fixed_value (void *);
+#define gt_ggc_m_23constant_descriptor_rtx(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_constant_descriptor_rtx (X);\
+ } while (0)
+extern void gt_ggc_mx_constant_descriptor_rtx (void *);
+#define gt_ggc_m_8function(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_function (X);\
+ } while (0)
+extern void gt_ggc_mx_function (void *);
+#define gt_ggc_m_10target_rtl(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_target_rtl (X);\
+ } while (0)
+extern void gt_ggc_mx_target_rtl (void *);
+#define gt_ggc_m_15cgraph_rtl_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cgraph_rtl_info (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_rtl_info (void *);
+#define gt_ggc_m_42hash_map_tree_tree_decl_tree_cache_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_tree_decl_tree_cache_traits_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_tree_decl_tree_cache_traits_ (void *);
+#define gt_ggc_m_42hash_map_tree_tree_type_tree_cache_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_tree_type_tree_cache_traits_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_tree_type_tree_cache_traits_ (void *);
+#define gt_ggc_m_36hash_map_tree_tree_decl_tree_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_tree_decl_tree_traits_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_tree_decl_tree_traits_ (void *);
+#define gt_ggc_m_12ptr_info_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ptr_info_def (X);\
+ } while (0)
+extern void gt_ggc_mx_ptr_info_def (void *);
+#define gt_ggc_m_19irange_storage_slot(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_irange_storage_slot (X);\
+ } while (0)
+extern void gt_ggc_mx_irange_storage_slot (void *);
+#define gt_ggc_m_10die_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_die_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_die_struct (void *);
+#define gt_ggc_m_26vec_constructor_elt_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_constructor_elt_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_constructor_elt_va_gc_ (void *);
+#define gt_ggc_m_19frange_storage_slot(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_frange_storage_slot (X);\
+ } while (0)
+extern void gt_ggc_mx_frange_storage_slot (void *);
+#define gt_ggc_m_15vec_tree_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_tree_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_tree_va_gc_ (void *);
+#define gt_ggc_m_9lang_type(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_lang_type (X);\
+ } while (0)
+extern void gt_ggc_mx_lang_type (void *);
+#define gt_ggc_m_9lang_decl(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_lang_decl (X);\
+ } while (0)
+extern void gt_ggc_mx_lang_decl (void *);
+#define gt_ggc_m_24tree_statement_list_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_statement_list_node (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_statement_list_node (void *);
+#define gt_ggc_m_14target_globals(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_target_globals (X);\
+ } while (0)
+extern void gt_ggc_mx_target_globals (void *);
+#define gt_ggc_m_14lang_tree_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_lang_tree_node (X);\
+ } while (0)
+extern void gt_ggc_mx_lang_tree_node (void *);
+#define gt_ggc_m_8tree_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_map (void *);
+#define gt_ggc_m_13tree_decl_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_decl_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_decl_map (void *);
+#define gt_ggc_m_12tree_int_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_int_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_int_map (void *);
+#define gt_ggc_m_12tree_vec_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_vec_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_vec_map (void *);
+#define gt_ggc_m_21vec_alias_pair_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_alias_pair_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_alias_pair_va_gc_ (void *);
+#define gt_ggc_m_13libfunc_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_libfunc_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_libfunc_entry (void *);
+#define gt_ggc_m_26hash_table_libfunc_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_libfunc_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_libfunc_hasher_ (void *);
+#define gt_ggc_m_15target_libfuncs(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_target_libfuncs (X);\
+ } while (0)
+extern void gt_ggc_mx_target_libfuncs (void *);
+#define gt_ggc_m_14sequence_stack(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_sequence_stack (X);\
+ } while (0)
+extern void gt_ggc_mx_sequence_stack (void *);
+#define gt_ggc_m_20vec_rtx_insn__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_rtx_insn__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_rtx_insn__va_gc_ (void *);
+#define gt_ggc_m_18call_site_record_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_call_site_record_d (X);\
+ } while (0)
+extern void gt_ggc_mx_call_site_record_d (void *);
+#define gt_ggc_m_16vec_uchar_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_uchar_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_uchar_va_gc_ (void *);
+#define gt_ggc_m_27vec_call_site_record_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_call_site_record_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_call_site_record_va_gc_ (void *);
+#define gt_ggc_m_9gimple_df(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_gimple_df (X);\
+ } while (0)
+extern void gt_ggc_mx_gimple_df (void *);
+#define gt_ggc_m_11dw_fde_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_fde_node (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_fde_node (void *);
+#define gt_ggc_m_17rtx_constant_pool(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_rtx_constant_pool (X);\
+ } while (0)
+extern void gt_ggc_mx_rtx_constant_pool (void *);
+#define gt_ggc_m_11frame_space(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_frame_space (X);\
+ } while (0)
+extern void gt_ggc_mx_frame_space (void *);
+#define gt_ggc_m_26vec_callinfo_callee_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_callinfo_callee_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_callinfo_callee_va_gc_ (void *);
+#define gt_ggc_m_26vec_callinfo_dalloc_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_callinfo_dalloc_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_callinfo_dalloc_va_gc_ (void *);
+#define gt_ggc_m_11stack_usage(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_stack_usage (X);\
+ } while (0)
+extern void gt_ggc_mx_stack_usage (void *);
+#define gt_ggc_m_9eh_status(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_eh_status (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_status (void *);
+#define gt_ggc_m_18control_flow_graph(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_control_flow_graph (X);\
+ } while (0)
+extern void gt_ggc_mx_control_flow_graph (void *);
+#define gt_ggc_m_5loops(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_loops (X);\
+ } while (0)
+extern void gt_ggc_mx_loops (void *);
+#define gt_ggc_m_17language_function(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_language_function (X);\
+ } while (0)
+extern void gt_ggc_mx_language_function (void *);
+#define gt_ggc_m_14hash_set_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_set_tree_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_set_tree_ (void *);
+#define gt_ggc_m_24types_used_by_vars_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_types_used_by_vars_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_types_used_by_vars_entry (void *);
+#define gt_ggc_m_28hash_table_used_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_used_type_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_used_type_hasher_ (void *);
+#define gt_ggc_m_13nb_iter_bound(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_nb_iter_bound (X);\
+ } while (0)
+extern void gt_ggc_mx_nb_iter_bound (void *);
+#define gt_ggc_m_9loop_exit(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_loop_exit (X);\
+ } while (0)
+extern void gt_ggc_mx_loop_exit (void *);
+#define gt_ggc_m_4loop(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_loop (X);\
+ } while (0)
+extern void gt_ggc_mx_loop (void *);
+#define gt_ggc_m_10control_iv(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_control_iv (X);\
+ } while (0)
+extern void gt_ggc_mx_control_iv (void *);
+#define gt_ggc_m_17vec_loop_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_loop_p_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_loop_p_va_gc_ (void *);
+#define gt_ggc_m_10niter_desc(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_niter_desc (X);\
+ } while (0)
+extern void gt_ggc_mx_niter_desc (void *);
+#define gt_ggc_m_28hash_table_loop_exit_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_loop_exit_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_loop_exit_hasher_ (void *);
+#define gt_ggc_m_22vec_basic_block_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_basic_block_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_basic_block_va_gc_ (void *);
+#define gt_ggc_m_11rtl_bb_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_rtl_bb_info (X);\
+ } while (0)
+extern void gt_ggc_mx_rtl_bb_info (void *);
+#define gt_ggc_m_15vec_edge_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_edge_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_edge_va_gc_ (void *);
+#define gt_ggc_m_18section_hash_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_section_hash_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_section_hash_entry (void *);
+#define gt_ggc_m_18lto_file_decl_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_lto_file_decl_data (X);\
+ } while (0)
+extern void gt_ggc_mx_lto_file_decl_data (void *);
+#define gt_ggc_m_15ipa_replace_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_replace_map (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_replace_map (void *);
+#define gt_ggc_m_17cgraph_simd_clone(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cgraph_simd_clone (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_simd_clone (void *);
+#define gt_ggc_m_28cgraph_function_version_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cgraph_function_version_info (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_function_version_info (void *);
+#define gt_ggc_m_30hash_table_cgraph_edge_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_cgraph_edge_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_cgraph_edge_hasher_ (void *);
+#define gt_ggc_m_25cgraph_indirect_call_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cgraph_indirect_call_info (X);\
+ } while (0)
+extern void gt_ggc_mx_cgraph_indirect_call_info (void *);
+#define gt_ggc_m_8asm_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_asm_node (X);\
+ } while (0)
+extern void gt_ggc_mx_asm_node (void *);
+#define gt_ggc_m_10thunk_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_thunk_info (X);\
+ } while (0)
+extern void gt_ggc_mx_thunk_info (void *);
+#define gt_ggc_m_29function_summary_thunk_info__(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_function_summary_thunk_info__ (X);\
+ } while (0)
+extern void gt_ggc_mx_function_summary_thunk_info__ (void *);
+#define gt_ggc_m_10clone_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_clone_info (X);\
+ } while (0)
+extern void gt_ggc_mx_clone_info (void *);
+#define gt_ggc_m_29function_summary_clone_info__(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_function_summary_clone_info__ (X);\
+ } while (0)
+extern void gt_ggc_mx_function_summary_clone_info__ (void *);
+#define gt_ggc_m_12symbol_table(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_symbol_table (X);\
+ } while (0)
+extern void gt_ggc_mx_symbol_table (void *);
+#define gt_ggc_m_31hash_table_section_name_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_section_name_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_section_name_hasher_ (void *);
+#define gt_ggc_m_26hash_table_asmname_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_asmname_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_asmname_hasher_ (void *);
+#define gt_ggc_m_42hash_map_symtab_node__symbol_priority_map_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_symtab_node__symbol_priority_map_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_symtab_node__symbol_priority_map_ (void *);
+#define gt_ggc_m_24constant_descriptor_tree(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_constant_descriptor_tree (X);\
+ } while (0)
+extern void gt_ggc_mx_constant_descriptor_tree (void *);
+#define gt_ggc_m_28vec_unprocessed_thunk_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_unprocessed_thunk_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_unprocessed_thunk_va_gc_ (void *);
+#define gt_ggc_m_27vec_ipa_replace_map__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_replace_map__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_replace_map__va_gc_ (void *);
+#define gt_ggc_m_21ipa_param_adjustments(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_param_adjustments (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_param_adjustments (void *);
+#define gt_ggc_m_28hash_map_alias_set_hash_int_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_alias_set_hash_int_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_alias_set_hash_int_ (void *);
+#define gt_ggc_m_15alias_set_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_alias_set_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_alias_set_entry (void *);
+#define gt_ggc_m_27vec_alias_set_entry__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_alias_set_entry__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_alias_set_entry__va_gc_ (void *);
+#define gt_ggc_m_35hash_table_function_version_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_function_version_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_function_version_hasher_ (void *);
+#define gt_ggc_m_17lto_in_decl_state(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_lto_in_decl_state (X);\
+ } while (0)
+extern void gt_ggc_mx_lto_in_decl_state (void *);
+#define gt_ggc_m_35hash_table_ipa_bit_ggc_hash_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_ipa_bit_ggc_hash_traits_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_ipa_bit_ggc_hash_traits_ (void *);
+#define gt_ggc_m_34hash_table_ipa_vr_ggc_hash_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_ipa_vr_ggc_hash_traits_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_ipa_vr_ggc_hash_traits_ (void *);
+#define gt_ggc_m_15ipa_node_params(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_node_params (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_node_params (void *);
+#define gt_ggc_m_13ipa_edge_args(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_edge_args (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_edge_args (void *);
+#define gt_ggc_m_14ipa_fn_summary(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_fn_summary (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_fn_summary (void *);
+#define gt_ggc_m_10odr_type_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_odr_type_d (X);\
+ } while (0)
+extern void gt_ggc_mx_odr_type_d (void *);
+#define gt_ggc_m_29vec_ipa_adjusted_param_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_adjusted_param_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_adjusted_param_va_gc_ (void *);
+#define gt_ggc_m_12param_access(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_param_access (X);\
+ } while (0)
+extern void gt_ggc_mx_param_access (void *);
+#define gt_ggc_m_24vec_param_access__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_param_access__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_param_access__va_gc_ (void *);
+#define gt_ggc_m_17isra_func_summary(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_isra_func_summary (X);\
+ } while (0)
+extern void gt_ggc_mx_isra_func_summary (void *);
+#define gt_ggc_m_26vec_isra_param_desc_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_isra_param_desc_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_isra_param_desc_va_gc_ (void *);
+#define gt_ggc_m_26ipa_sra_function_summaries(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_sra_function_summaries (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_sra_function_summaries (void *);
+#define gt_ggc_m_27modref_tree_alias_set_type_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_modref_tree_alias_set_type_ (X);\
+ } while (0)
+extern void gt_ggc_mx_modref_tree_alias_set_type_ (void *);
+#define gt_ggc_m_14modref_summary(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_modref_summary (X);\
+ } while (0)
+extern void gt_ggc_mx_modref_summary (void *);
+#define gt_ggc_m_18modref_summary_lto(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_modref_summary_lto (X);\
+ } while (0)
+extern void gt_ggc_mx_modref_summary_lto (void *);
+#define gt_ggc_m_44fast_function_summary_modref_summary__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_fast_function_summary_modref_summary__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_fast_function_summary_modref_summary__va_gc_ (void *);
+#define gt_ggc_m_48fast_function_summary_modref_summary_lto__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_fast_function_summary_modref_summary_lto__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_fast_function_summary_modref_summary_lto__va_gc_ (void *);
+#define gt_ggc_m_17modref_tree_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_modref_tree_tree_ (X);\
+ } while (0)
+extern void gt_ggc_mx_modref_tree_tree_ (void *);
+#define gt_ggc_m_37hash_map_location_hash_nowarn_spec_t_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_location_hash_nowarn_spec_t_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_location_hash_nowarn_spec_t_ (void *);
+#define gt_ggc_m_11dw_cfi_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_cfi_node (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_cfi_node (void *);
+#define gt_ggc_m_17dw_loc_descr_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_loc_descr_node (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_loc_descr_node (void *);
+#define gt_ggc_m_18dw_loc_list_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_loc_list_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_loc_list_struct (void *);
+#define gt_ggc_m_18dw_discr_list_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_discr_list_node (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_discr_list_node (void *);
+#define gt_ggc_m_15dw_cfa_location(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_cfa_location (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_cfa_location (void *);
+#define gt_ggc_m_21vec_dw_cfi_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_cfi_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_cfi_ref_va_gc_ (void *);
+#define gt_ggc_m_16addr_table_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_addr_table_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_addr_table_entry (void *);
+#define gt_ggc_m_20indirect_string_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_indirect_string_node (X);\
+ } while (0)
+extern void gt_ggc_mx_indirect_string_node (void *);
+#define gt_ggc_m_15dwarf_file_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dwarf_file_data (X);\
+ } while (0)
+extern void gt_ggc_mx_dwarf_file_data (void *);
+#define gt_ggc_m_20hash_map_char__tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_char__tree_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_char__tree_ (void *);
+#define gt_ggc_m_10dw_cfi_row(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_cfi_row (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_cfi_row (void *);
+#define gt_ggc_m_17reg_saved_in_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_reg_saved_in_data (X);\
+ } while (0)
+extern void gt_ggc_mx_reg_saved_in_data (void *);
+#define gt_ggc_m_21vec_dw_fde_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_fde_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_fde_ref_va_gc_ (void *);
+#define gt_ggc_m_34hash_table_indirect_string_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_indirect_string_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_indirect_string_hasher_ (void *);
+#define gt_ggc_m_16vec_char__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_char__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_char__va_gc_ (void *);
+#define gt_ggc_m_16comdat_type_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_comdat_type_node (X);\
+ } while (0)
+extern void gt_ggc_mx_comdat_type_node (void *);
+#define gt_ggc_m_29vec_dw_line_info_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_line_info_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_line_info_entry_va_gc_ (void *);
+#define gt_ggc_m_18dw_line_info_table(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_dw_line_info_table (X);\
+ } while (0)
+extern void gt_ggc_mx_dw_line_info_table (void *);
+#define gt_ggc_m_23vec_dw_attr_node_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_attr_node_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_attr_node_va_gc_ (void *);
+#define gt_ggc_m_16limbo_die_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_limbo_die_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_limbo_die_struct (void *);
+#define gt_ggc_m_29hash_table_dwarf_file_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_dwarf_file_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_dwarf_file_hasher_ (void *);
+#define gt_ggc_m_27hash_table_decl_die_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_decl_die_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_decl_die_hasher_ (void *);
+#define gt_ggc_m_21vec_dw_die_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_die_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_die_ref_va_gc_ (void *);
+#define gt_ggc_m_21variable_value_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_variable_value_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_variable_value_struct (void *);
+#define gt_ggc_m_33hash_table_variable_value_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_variable_value_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_variable_value_hasher_ (void *);
+#define gt_ggc_m_28hash_table_block_die_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_block_die_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_block_die_hasher_ (void *);
+#define gt_ggc_m_12var_loc_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_var_loc_node (X);\
+ } while (0)
+extern void gt_ggc_mx_var_loc_node (void *);
+#define gt_ggc_m_16var_loc_list_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_var_loc_list_def (X);\
+ } while (0)
+extern void gt_ggc_mx_var_loc_list_def (void *);
+#define gt_ggc_m_17call_arg_loc_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_call_arg_loc_node (X);\
+ } while (0)
+extern void gt_ggc_mx_call_arg_loc_node (void *);
+#define gt_ggc_m_27hash_table_decl_loc_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_decl_loc_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_decl_loc_hasher_ (void *);
+#define gt_ggc_m_22cached_dw_loc_list_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cached_dw_loc_list_def (X);\
+ } while (0)
+extern void gt_ggc_mx_cached_dw_loc_list_def (void *);
+#define gt_ggc_m_30hash_table_dw_loc_list_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_dw_loc_list_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_dw_loc_list_hasher_ (void *);
+#define gt_ggc_m_30vec_dw_line_info_table__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_line_info_table__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_line_info_table__va_gc_ (void *);
+#define gt_ggc_m_24vec_pubname_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_pubname_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_pubname_entry_va_gc_ (void *);
+#define gt_ggc_m_24vec_macinfo_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_macinfo_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_macinfo_entry_va_gc_ (void *);
+#define gt_ggc_m_20vec_dw_ranges_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_ranges_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_ranges_va_gc_ (void *);
+#define gt_ggc_m_29vec_dw_ranges_by_label_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_dw_ranges_by_label_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_dw_ranges_by_label_va_gc_ (void *);
+#define gt_ggc_m_24vec_die_arg_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_die_arg_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_die_arg_entry_va_gc_ (void *);
+#define gt_ggc_m_23hash_table_addr_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_addr_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_addr_hasher_ (void *);
+#define gt_ggc_m_27hash_map_tree_sym_off_pair_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_sym_off_pair_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_sym_off_pair_ (void *);
+#define gt_ggc_m_17inline_entry_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_inline_entry_data (X);\
+ } while (0)
+extern void gt_ggc_mx_inline_entry_data (void *);
+#define gt_ggc_m_36hash_table_inline_entry_data_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_inline_entry_data_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_inline_entry_data_hasher_ (void *);
+#define gt_ggc_m_10ctf_string(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ctf_string (X);\
+ } while (0)
+extern void gt_ggc_mx_ctf_string (void *);
+#define gt_ggc_m_9ctf_dmdef(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ctf_dmdef (X);\
+ } while (0)
+extern void gt_ggc_mx_ctf_dmdef (void *);
+#define gt_ggc_m_12ctf_func_arg(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ctf_func_arg (X);\
+ } while (0)
+extern void gt_ggc_mx_ctf_func_arg (void *);
+#define gt_ggc_m_9ctf_dtdef(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ctf_dtdef (X);\
+ } while (0)
+extern void gt_ggc_mx_ctf_dtdef (void *);
+#define gt_ggc_m_9ctf_dvdef(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ctf_dvdef (X);\
+ } while (0)
+extern void gt_ggc_mx_ctf_dvdef (void *);
+#define gt_ggc_m_27hash_table_ctfc_dtd_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_ctfc_dtd_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_ctfc_dtd_hasher_ (void *);
+#define gt_ggc_m_27hash_table_ctfc_dvd_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_ctfc_dvd_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_ctfc_dvd_hasher_ (void *);
+#define gt_ggc_m_13ctf_container(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ctf_container (X);\
+ } while (0)
+extern void gt_ggc_mx_ctf_container (void *);
+#define gt_ggc_m_32hash_map_ctf_dvdef_ref_unsigned_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_ctf_dvdef_ref_unsigned_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_ctf_dvdef_ref_unsigned_ (void *);
+#define gt_ggc_m_24vec_ctf_dtdef_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ctf_dtdef_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ctf_dtdef_ref_va_gc_ (void *);
+#define gt_ggc_m_9temp_slot(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_temp_slot (X);\
+ } while (0)
+extern void gt_ggc_mx_temp_slot (void *);
+#define gt_ggc_m_20initial_value_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_initial_value_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_initial_value_struct (void *);
+#define gt_ggc_m_22vec_temp_slot_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_temp_slot_p_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_temp_slot_p_va_gc_ (void *);
+#define gt_ggc_m_28hash_table_const_int_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_const_int_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_const_int_hasher_ (void *);
+#define gt_ggc_m_33hash_table_const_wide_int_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_const_wide_int_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_const_wide_int_hasher_ (void *);
+#define gt_ggc_m_33hash_table_const_poly_int_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_const_poly_int_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_const_poly_int_hasher_ (void *);
+#define gt_ggc_m_27hash_table_reg_attr_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_reg_attr_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_reg_attr_hasher_ (void *);
+#define gt_ggc_m_31hash_table_const_double_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_const_double_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_const_double_hasher_ (void *);
+#define gt_ggc_m_30hash_table_const_fixed_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_const_fixed_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_const_fixed_hasher_ (void *);
+#define gt_ggc_m_11eh_region_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_eh_region_d (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_region_d (void *);
+#define gt_ggc_m_16eh_landing_pad_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_eh_landing_pad_d (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_landing_pad_d (void *);
+#define gt_ggc_m_10eh_catch_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_eh_catch_d (X);\
+ } while (0)
+extern void gt_ggc_mx_eh_catch_d (void *);
+#define gt_ggc_m_20vec_eh_region_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_eh_region_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_eh_region_va_gc_ (void *);
+#define gt_ggc_m_25vec_eh_landing_pad_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_eh_landing_pad_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_eh_landing_pad_va_gc_ (void *);
+#define gt_ggc_m_21hash_map_gimple__int_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_gimple__int_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_gimple__int_ (void *);
+#define gt_ggc_m_29hash_table_insn_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_insn_cache_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_insn_cache_hasher_ (void *);
+#define gt_ggc_m_23temp_slot_address_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_temp_slot_address_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_temp_slot_address_entry (void *);
+#define gt_ggc_m_31hash_table_temp_address_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_temp_address_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_temp_address_hasher_ (void *);
+#define gt_ggc_m_24hash_map_tree_hash_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_hash_tree_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_hash_tree_ (void *);
+#define gt_ggc_m_11test_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_test_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_test_struct (void *);
+#define gt_ggc_m_14test_of_length(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_test_of_length (X);\
+ } while (0)
+extern void gt_ggc_mx_test_of_length (void *);
+#define gt_ggc_m_10test_other(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_test_other (X);\
+ } while (0)
+extern void gt_ggc_mx_test_other (void *);
+#define gt_ggc_m_13test_of_union(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_test_of_union (X);\
+ } while (0)
+extern void gt_ggc_mx_test_of_union (void *);
+#define gt_ggc_m_12example_base(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_example_base (X);\
+ } while (0)
+extern void gt_ggc_mx_example_base (void *);
+#define gt_ggc_m_9test_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_test_node (X);\
+ } while (0)
+extern void gt_ggc_mx_test_node (void *);
+#define gt_ggc_m_11user_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_user_struct (X);\
+ } while (0)
+extern void gt_ggc_mx_user_struct (void *);
+#define gt_ggc_m_31hash_table_libfunc_decl_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_libfunc_decl_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_libfunc_decl_hasher_ (void *);
+#define gt_ggc_m_16string_pool_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_string_pool_data (X);\
+ } while (0)
+extern void gt_ggc_mx_string_pool_data (void *);
+#define gt_ggc_m_9type_hash(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_type_hash (X);\
+ } while (0)
+extern void gt_ggc_mx_type_hash (void *);
+#define gt_ggc_m_29hash_table_type_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_type_cache_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_type_cache_hasher_ (void *);
+#define gt_ggc_m_26hash_table_int_cst_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_int_cst_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_int_cst_hasher_ (void *);
+#define gt_ggc_m_31hash_table_poly_int_cst_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_poly_int_cst_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_poly_int_cst_hasher_ (void *);
+#define gt_ggc_m_28hash_table_cl_option_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_cl_option_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_cl_option_hasher_ (void *);
+#define gt_ggc_m_38hash_table_tree_decl_map_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tree_decl_map_cache_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tree_decl_map_cache_hasher_ (void *);
+#define gt_ggc_m_37hash_table_tree_vec_map_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tree_vec_map_cache_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tree_vec_map_cache_hasher_ (void *);
+#define gt_ggc_m_26hash_table_section_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_section_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_section_hasher_ (void *);
+#define gt_ggc_m_31hash_table_object_block_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_object_block_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_object_block_hasher_ (void *);
+#define gt_ggc_m_34hash_table_tree_descriptor_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tree_descriptor_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tree_descriptor_hasher_ (void *);
+#define gt_ggc_m_33hash_table_const_rtx_desc_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_const_rtx_desc_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_const_rtx_desc_hasher_ (void *);
+#define gt_ggc_m_27hash_table_tm_clone_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tm_clone_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tm_clone_hasher_ (void *);
+#define gt_ggc_m_15tm_restart_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tm_restart_node (X);\
+ } while (0)
+extern void gt_ggc_mx_tm_restart_node (void *);
+#define gt_ggc_m_19hash_map_tree_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_tree_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_tree_ (void *);
+#define gt_ggc_m_27hash_table_ssa_name_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_ssa_name_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_ssa_name_hasher_ (void *);
+#define gt_ggc_m_29hash_table_tm_restart_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tm_restart_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tm_restart_hasher_ (void *);
+#define gt_ggc_m_28vec_mem_addr_template_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_mem_addr_template_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_mem_addr_template_va_gc_ (void *);
+#define gt_ggc_m_13scev_info_str(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_scev_info_str (X);\
+ } while (0)
+extern void gt_ggc_mx_scev_info_str (void *);
+#define gt_ggc_m_28hash_table_scev_info_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_scev_info_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_scev_info_hasher_ (void *);
+#define gt_ggc_m_20ssa_operand_memory_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ssa_operand_memory_d (X);\
+ } while (0)
+extern void gt_ggc_mx_ssa_operand_memory_d (void *);
+#define gt_ggc_m_36vec_omp_declare_variant_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_omp_declare_variant_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_omp_declare_variant_entry_va_gc_ (void *);
+#define gt_ggc_m_30omp_declare_variant_base_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_omp_declare_variant_base_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_omp_declare_variant_base_entry (void *);
+#define gt_ggc_m_38hash_table_omp_declare_variant_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_omp_declare_variant_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_omp_declare_variant_hasher_ (void *);
+#define gt_ggc_m_42hash_table_omp_declare_variant_alt_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_omp_declare_variant_alt_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_omp_declare_variant_alt_hasher_ (void *);
+#define gt_ggc_m_24hash_map_char__unsigned_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_char__unsigned_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_char__unsigned_ (void *);
+#define gt_ggc_m_18vec_gimple__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_gimple__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_gimple__va_gc_ (void *);
+#define gt_ggc_m_12int_range_1_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_int_range_1_ (X);\
+ } while (0)
+extern void gt_ggc_mx_int_range_1_ (void *);
+#define gt_ggc_m_26vec_ipa_agg_jf_item_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_agg_jf_item_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_agg_jf_item_va_gc_ (void *);
+#define gt_ggc_m_19ipcp_transformation(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipcp_transformation (X);\
+ } while (0)
+extern void gt_ggc_mx_ipcp_transformation (void *);
+#define gt_ggc_m_8ipa_bits(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_bits (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_bits (void *);
+#define gt_ggc_m_31vec_ipa_param_descriptor_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_param_descriptor_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_param_descriptor_va_gc_ (void *);
+#define gt_ggc_m_27vec_ipa_argagg_value_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_argagg_value_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_argagg_value_va_gc_ (void *);
+#define gt_ggc_m_20vec_ipa_bits__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_bits__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_bits__va_gc_ (void *);
+#define gt_ggc_m_17vec_ipa_vr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_vr_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_vr_va_gc_ (void *);
+#define gt_ggc_m_24vec_ipa_jump_func_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_jump_func_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_jump_func_va_gc_ (void *);
+#define gt_ggc_m_39vec_ipa_polymorphic_call_context_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_polymorphic_call_context_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_polymorphic_call_context_va_gc_ (void *);
+#define gt_ggc_m_17ipa_node_params_t(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_node_params_t (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_node_params_t (void *);
+#define gt_ggc_m_19ipa_edge_args_sum_t(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_ipa_edge_args_sum_t (X);\
+ } while (0)
+extern void gt_ggc_mx_ipa_edge_args_sum_t (void *);
+#define gt_ggc_m_38function_summary_ipcp_transformation__(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_function_summary_ipcp_transformation__ (X);\
+ } while (0)
+extern void gt_ggc_mx_function_summary_ipcp_transformation__ (void *);
+#define gt_ggc_m_29hash_table_tm_wrapper_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tm_wrapper_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tm_wrapper_hasher_ (void *);
+#define gt_ggc_m_29hash_table_decl_state_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_decl_state_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_decl_state_hasher_ (void *);
+#define gt_ggc_m_23vec_expr_eval_op_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_expr_eval_op_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_expr_eval_op_va_gc_ (void *);
+#define gt_ggc_m_20vec_condition_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_condition_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_condition_va_gc_ (void *);
+#define gt_ggc_m_37vec_ipa_freqcounting_predicate_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ipa_freqcounting_predicate_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ipa_freqcounting_predicate_va_gc_ (void *);
+#define gt_ggc_m_44fast_function_summary_ipa_fn_summary__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_fast_function_summary_ipa_fn_summary__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_fast_function_summary_ipa_fn_summary__va_gc_ (void *);
+#define gt_ggc_m_13tree_type_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_type_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_type_map (void *);
+#define gt_ggc_m_38hash_table_tree_type_map_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_tree_type_map_cache_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_tree_type_map_cache_hasher_ (void *);
+#define gt_ggc_m_19vec_odr_type_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_odr_type_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_odr_type_va_gc_ (void *);
+#define gt_ggc_m_35hash_table_value_annotation_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_value_annotation_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_value_annotation_hasher_ (void *);
+#define gt_ggc_m_27vec_Entity_Id_va_gc_atomic_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_Entity_Id_va_gc_atomic_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_Entity_Id_va_gc_atomic_ (void *);
+#define gt_ggc_m_19tree_entity_vec_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_entity_vec_map (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_entity_vec_map (void *);
+#define gt_ggc_m_29hash_table_dummy_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_dummy_type_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_dummy_type_hasher_ (void *);
+#define gt_ggc_m_11parm_attr_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_parm_attr_d (X);\
+ } while (0)
+extern void gt_ggc_mx_parm_attr_d (void *);
+#define gt_ggc_m_20vec_parm_attr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_parm_attr_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_parm_attr_va_gc_ (void *);
+#define gt_ggc_m_10stmt_group(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_stmt_group (X);\
+ } while (0)
+extern void gt_ggc_mx_stmt_group (void *);
+#define gt_ggc_m_9elab_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_elab_info (X);\
+ } while (0)
+extern void gt_ggc_mx_elab_info (void *);
+#define gt_ggc_m_18range_check_info_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_range_check_info_d (X);\
+ } while (0)
+extern void gt_ggc_mx_range_check_info_d (void *);
+#define gt_ggc_m_27vec_range_check_info_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_range_check_info_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_range_check_info_va_gc_ (void *);
+#define gt_ggc_m_11loop_info_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_loop_info_d (X);\
+ } while (0)
+extern void gt_ggc_mx_loop_info_d (void *);
+#define gt_ggc_m_20vec_loop_info_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_loop_info_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_loop_info_va_gc_ (void *);
+#define gt_ggc_m_18gnat_binding_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_gnat_binding_level (X);\
+ } while (0)
+extern void gt_ggc_mx_gnat_binding_level (void *);
+#define gt_ggc_m_18packable_type_hash(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_packable_type_hash (X);\
+ } while (0)
+extern void gt_ggc_mx_packable_type_hash (void *);
+#define gt_ggc_m_32hash_table_packable_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_packable_type_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_packable_type_hasher_ (void *);
+#define gt_ggc_m_13pad_type_hash(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_pad_type_hash (X);\
+ } while (0)
+extern void gt_ggc_mx_pad_type_hash (void *);
+#define gt_ggc_m_27hash_table_pad_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_pad_type_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_pad_type_hasher_ (void *);
+#define gt_ggc_m_12c_label_vars(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_c_label_vars (X);\
+ } while (0)
+extern void gt_ggc_mx_c_label_vars (void *);
+#define gt_ggc_m_9c_binding(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_c_binding (X);\
+ } while (0)
+extern void gt_ggc_mx_c_binding (void *);
+#define gt_ggc_m_7c_scope(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_c_scope (X);\
+ } while (0)
+extern void gt_ggc_mx_c_scope (void *);
+#define gt_ggc_m_15c_goto_bindings(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_c_goto_bindings (X);\
+ } while (0)
+extern void gt_ggc_mx_c_goto_bindings (void *);
+#define gt_ggc_m_28vec_c_goto_bindings_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_c_goto_bindings_p_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_c_goto_bindings_p_va_gc_ (void *);
+#define gt_ggc_m_15c_inline_static(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_c_inline_static (X);\
+ } while (0)
+extern void gt_ggc_mx_c_inline_static (void *);
+#define gt_ggc_m_18sorted_fields_type(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_sorted_fields_type (X);\
+ } while (0)
+extern void gt_ggc_mx_sorted_fields_type (void *);
+#define gt_ggc_m_23vec_const_char_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_const_char_p_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_const_char_p_va_gc_ (void *);
+#define gt_ggc_m_22vec_tree_gc_vec_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_tree_gc_vec_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_tree_gc_vec_va_gc_ (void *);
+#define gt_ggc_m_11align_stack(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_align_stack (X);\
+ } while (0)
+extern void gt_ggc_mx_align_stack (void *);
+#define gt_ggc_m_23vec_pending_weak_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_pending_weak_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_pending_weak_va_gc_ (void *);
+#define gt_ggc_m_31vec_pending_redefinition_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_pending_redefinition_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_pending_redefinition_va_gc_ (void *);
+#define gt_ggc_m_9opt_stack(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_opt_stack (X);\
+ } while (0)
+extern void gt_ggc_mx_opt_stack (void *);
+#define gt_ggc_m_8c_parser(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_c_parser (X);\
+ } while (0)
+extern void gt_ggc_mx_c_parser (void *);
+#define gt_ggc_m_18vec_c_token_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_c_token_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_c_token_va_gc_ (void *);
+#define gt_ggc_m_36vec_c_omp_declare_target_attr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_c_omp_declare_target_attr_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_c_omp_declare_target_attr_va_gc_ (void *);
+#define gt_ggc_m_16cp_binding_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cp_binding_level (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_binding_level (void *);
+#define gt_ggc_m_11cxx_binding(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cxx_binding (X);\
+ } while (0)
+extern void gt_ggc_mx_cxx_binding (void *);
+#define gt_ggc_m_27vec_cp_class_binding_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cp_class_binding_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cp_class_binding_va_gc_ (void *);
+#define gt_ggc_m_14cp_token_cache(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cp_token_cache (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_token_cache (void *);
+#define gt_ggc_m_32vec_deferred_access_check_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_deferred_access_check_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_deferred_access_check_va_gc_ (void *);
+#define gt_ggc_m_28vec_cxx_saved_binding_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cxx_saved_binding_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cxx_saved_binding_va_gc_ (void *);
+#define gt_ggc_m_37vec_cp_omp_declare_target_attr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cp_omp_declare_target_attr_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cp_omp_declare_target_attr_va_gc_ (void *);
+#define gt_ggc_m_36vec_cp_omp_begin_assumes_data_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cp_omp_begin_assumes_data_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cp_omp_begin_assumes_data_va_gc_ (void *);
+#define gt_ggc_m_11saved_scope(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_saved_scope (X);\
+ } while (0)
+extern void gt_ggc_mx_saved_scope (void *);
+#define gt_ggc_m_17named_label_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_named_label_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_named_label_entry (void *);
+#define gt_ggc_m_28hash_table_named_label_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_named_label_hash_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_named_label_hash_ (void *);
+#define gt_ggc_m_11tree_pair_s(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_pair_s (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_pair_s (void *);
+#define gt_ggc_m_22vec_tree_pair_s_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_tree_pair_s_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_tree_pair_s_va_gc_ (void *);
+#define gt_ggc_m_27hash_table_named_decl_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_named_decl_hash_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_named_decl_hash_ (void *);
+#define gt_ggc_m_10spec_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_spec_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_spec_entry (void *);
+#define gt_ggc_m_11tinst_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tinst_level (X);\
+ } while (0)
+extern void gt_ggc_mx_tinst_level (void *);
+#define gt_ggc_m_12module_state(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_module_state (X);\
+ } while (0)
+extern void gt_ggc_mx_module_state (void *);
+#define gt_ggc_m_16constexpr_fundef(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_constexpr_fundef (X);\
+ } while (0)
+extern void gt_ggc_mx_constexpr_fundef (void *);
+#define gt_ggc_m_10tree_check(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_tree_check (X);\
+ } while (0)
+extern void gt_ggc_mx_tree_check (void *);
+#define gt_ggc_m_19vec_cp_token_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cp_token_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cp_token_va_gc_ (void *);
+#define gt_ggc_m_8cp_lexer(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cp_lexer (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_lexer (void *);
+#define gt_ggc_m_31vec_cp_default_arg_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cp_default_arg_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cp_default_arg_entry_va_gc_ (void *);
+#define gt_ggc_m_17cp_parser_context(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cp_parser_context (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_parser_context (void *);
+#define gt_ggc_m_38vec_cp_unparsed_functions_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_cp_unparsed_functions_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_cp_unparsed_functions_entry_va_gc_ (void *);
+#define gt_ggc_m_9cp_parser(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_cp_parser (X);\
+ } while (0)
+extern void gt_ggc_mx_cp_parser (void *);
+#define gt_ggc_m_18hash_map_tree_int_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_int_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_int_ (void *);
+#define gt_ggc_m_35hash_table_constexpr_fundef_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_constexpr_fundef_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_constexpr_fundef_hasher_ (void *);
+#define gt_ggc_m_14constexpr_call(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_constexpr_call (X);\
+ } while (0)
+extern void gt_ggc_mx_constexpr_call (void *);
+#define gt_ggc_m_33hash_table_constexpr_call_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_constexpr_call_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_constexpr_call_hasher_ (void *);
+#define gt_ggc_m_10norm_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_norm_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_norm_entry (void *);
+#define gt_ggc_m_23hash_table_norm_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_norm_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_norm_hasher_ (void *);
+#define gt_ggc_m_23hash_table_atom_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_atom_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_atom_hasher_ (void *);
+#define gt_ggc_m_9sat_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_sat_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_sat_entry (void *);
+#define gt_ggc_m_22hash_table_sat_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_sat_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_sat_hasher_ (void *);
+#define gt_ggc_m_14coroutine_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_coroutine_info (X);\
+ } while (0)
+extern void gt_ggc_mx_coroutine_info (void *);
+#define gt_ggc_m_33hash_table_coroutine_info_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_coroutine_info_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_coroutine_info_hasher_ (void *);
+#define gt_ggc_m_27source_location_table_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_source_location_table_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_source_location_table_entry (void *);
+#define gt_ggc_m_44hash_table_source_location_table_entry_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_source_location_table_entry_hash_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_source_location_table_entry_hash_ (void *);
+#define gt_ggc_m_21named_label_use_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_named_label_use_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_named_label_use_entry (void *);
+#define gt_ggc_m_25vec_incomplete_var_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_incomplete_var_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_incomplete_var_va_gc_ (void *);
+#define gt_ggc_m_27hash_table_typename_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_typename_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_typename_hasher_ (void *);
+#define gt_ggc_m_29hash_table_mangled_decl_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_mangled_decl_hash_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_mangled_decl_hash_ (void *);
+#define gt_ggc_m_43hash_map_unsigned_tree_priority_map_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_unsigned_tree_priority_map_traits_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_unsigned_tree_priority_map_traits_ (void *);
+#define gt_ggc_m_27vec_pending_noexcept_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_pending_noexcept_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_pending_noexcept_va_gc_ (void *);
+#define gt_ggc_m_27vec_lambda_sig_count_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_lambda_sig_count_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_lambda_sig_count_va_gc_ (void *);
+#define gt_ggc_m_31vec_lambda_discriminator_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_lambda_discriminator_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_lambda_discriminator_va_gc_ (void *);
+#define gt_ggc_m_28hash_table_conv_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_conv_type_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_conv_type_hasher_ (void *);
+#define gt_ggc_m_17subsumption_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_subsumption_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_subsumption_entry (void *);
+#define gt_ggc_m_30hash_table_subsumption_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_subsumption_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_subsumption_hasher_ (void *);
+#define gt_ggc_m_8slurping(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_slurping (X);\
+ } while (0)
+extern void gt_ggc_mx_slurping (void *);
+#define gt_ggc_m_24vec_module_state__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_module_state__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_module_state__va_gc_ (void *);
+#define gt_ggc_m_29hash_table_module_state_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_module_state_hash_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_module_state_hash_ (void *);
+#define gt_ggc_m_33hash_table_note_def_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_note_def_cache_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_note_def_cache_hasher_ (void *);
+#define gt_ggc_m_23vec_macro_export_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_macro_export_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_macro_export_va_gc_ (void *);
+#define gt_ggc_m_16pending_template(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_pending_template (X);\
+ } while (0)
+extern void gt_ggc_mx_pending_template (void *);
+#define gt_ggc_m_23hash_table_spec_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_spec_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_spec_hasher_ (void *);
+#define gt_ggc_m_22hash_table_ctp_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_ctp_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_ctp_hasher_ (void *);
+#define gt_ggc_m_26hash_map_tree_tree_pair_p_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_tree_tree_pair_p_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_tree_tree_pair_p_ (void *);
+#define gt_ggc_m_18vec_tinfo_s_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_tinfo_s_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_tinfo_s_va_gc_ (void *);
+#define gt_ggc_m_26vec_deferred_access_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_deferred_access_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_deferred_access_va_gc_ (void *);
+#define gt_ggc_m_30hash_table_cplus_array_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_cplus_array_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_cplus_array_hasher_ (void *);
+#define gt_ggc_m_23hash_table_list_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_list_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_list_hasher_ (void *);
+#define gt_ggc_m_9Statement(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_Statement (X);\
+ } while (0)
+extern void gt_ggc_mx_Statement (void *);
+#define gt_ggc_m_13binding_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_binding_level (X);\
+ } while (0)
+extern void gt_ggc_mx_binding_level (void *);
+#define gt_ggc_m_17d_label_use_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_d_label_use_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_d_label_use_entry (void *);
+#define gt_ggc_m_34hash_map_Statement__d_label_entry_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_map_Statement__d_label_entry_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_map_Statement__d_label_entry_ (void *);
+#define gt_ggc_m_25hash_table_module_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_module_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_module_hasher_ (void *);
+#define gt_ggc_m_17module_htab_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_module_htab_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_module_htab_entry (void *);
+#define gt_ggc_m_30hash_table_module_decl_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_module_decl_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_module_decl_hasher_ (void *);
+#define gt_ggc_m_7rtenode(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_rtenode (X);\
+ } while (0)
+extern void gt_ggc_mx_rtenode (void *);
+#define gt_ggc_m_19vec_rtenode__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_rtenode__va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_rtenode__va_gc_ (void *);
+#define gt_ggc_m_18struct_constructor(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_struct_constructor (X);\
+ } while (0)
+extern void gt_ggc_mx_struct_constructor (void *);
+#define gt_ggc_m_10array_desc(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_array_desc (X);\
+ } while (0)
+extern void gt_ggc_mx_array_desc (void *);
+#define gt_ggc_m_16objc_map_private(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_objc_map_private (X);\
+ } while (0)
+extern void gt_ggc_mx_objc_map_private (void *);
+#define gt_ggc_m_12hashed_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hashed_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_hashed_entry (void *);
+#define gt_ggc_m_16hashed_attribute(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hashed_attribute (X);\
+ } while (0)
+extern void gt_ggc_mx_hashed_attribute (void *);
+#define gt_ggc_m_9imp_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_imp_entry (X);\
+ } while (0)
+extern void gt_ggc_mx_imp_entry (void *);
+#define gt_ggc_m_17string_descriptor(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_string_descriptor (X);\
+ } while (0)
+extern void gt_ggc_mx_string_descriptor (void *);
+#define gt_ggc_m_30hash_table_objc_string_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_hash_table_objc_string_hasher_ (X);\
+ } while (0)
+extern void gt_ggc_mx_hash_table_objc_string_hasher_ (void *);
+#define gt_ggc_m_27vec_ident_data_tuple_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ident_data_tuple_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ident_data_tuple_va_gc_ (void *);
+#define gt_ggc_m_23vec_msgref_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_msgref_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_msgref_entry_va_gc_ (void *);
+#define gt_ggc_m_26vec_prot_list_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_prot_list_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_prot_list_entry_va_gc_ (void *);
+#define gt_ggc_m_24vec_ivarref_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_ggc_mx_vec_ivarref_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_ggc_mx_vec_ivarref_entry_va_gc_ (void *);
+
+/* functions code */
+
+/* PCH type-walking procedures. */
+/* Macros and declarations. */
+#define gt_pch_n_9tree_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_node (X);\
+ } while (0)
+#define gt_pch_nx_tree_node gt_pch_nx_lang_tree_node
+#define gt_pch_n_9line_maps(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_line_maps (X);\
+ } while (0)
+extern void gt_pch_nx_line_maps (void *);
+#define gt_pch_n_9cpp_token(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cpp_token (X);\
+ } while (0)
+extern void gt_pch_nx_cpp_token (void *);
+#define gt_pch_n_9cpp_macro(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cpp_macro (X);\
+ } while (0)
+extern void gt_pch_nx_cpp_macro (void *);
+#define gt_pch_n_13string_concat(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_string_concat (X);\
+ } while (0)
+extern void gt_pch_nx_string_concat (void *);
+#define gt_pch_n_16string_concat_db(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_string_concat_db (X);\
+ } while (0)
+extern void gt_pch_nx_string_concat_db (void *);
+#define gt_pch_n_38hash_map_location_hash_string_concat__(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_location_hash_string_concat__ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_location_hash_string_concat__ (void *);
+#define gt_pch_n_11bitmap_head(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_bitmap_head (X);\
+ } while (0)
+extern void gt_pch_nx_bitmap_head (void *);
+#define gt_pch_n_7rtx_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_rtx_def (X);\
+ } while (0)
+extern void gt_pch_nx_rtx_def (void *);
+#define gt_pch_n_9rtvec_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_rtvec_def (X);\
+ } while (0)
+extern void gt_pch_nx_rtvec_def (void *);
+#define gt_pch_n_6gimple(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_gimple (X);\
+ } while (0)
+extern void gt_pch_nx_gimple (void *);
+#define gt_pch_n_11symtab_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_symtab_node (X);\
+ } while (0)
+extern void gt_pch_nx_symtab_node (void *);
+#define gt_pch_n_11cgraph_edge(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cgraph_edge (X);\
+ } while (0)
+extern void gt_pch_nx_cgraph_edge (void *);
+#define gt_pch_n_7section(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_section (X);\
+ } while (0)
+extern void gt_pch_nx_section (void *);
+#define gt_pch_n_16cl_target_option(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cl_target_option (X);\
+ } while (0)
+extern void gt_pch_nx_cl_target_option (void *);
+#define gt_pch_n_15cl_optimization(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cl_optimization (X);\
+ } while (0)
+extern void gt_pch_nx_cl_optimization (void *);
+#define gt_pch_n_8edge_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_edge_def (X);\
+ } while (0)
+extern void gt_pch_nx_edge_def (void *);
+#define gt_pch_n_15basic_block_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_basic_block_def (X);\
+ } while (0)
+extern void gt_pch_nx_basic_block_def (void *);
+#define gt_pch_n_16machine_function(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_machine_function (X);\
+ } while (0)
+extern void gt_pch_nx_machine_function (void *);
+#define gt_pch_n_14bitmap_element(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_bitmap_element (X);\
+ } while (0)
+extern void gt_pch_nx_bitmap_element (void *);
+#define gt_pch_n_34generic_wide_int_wide_int_storage_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_generic_wide_int_wide_int_storage_ (X);\
+ } while (0)
+extern void gt_pch_nx_generic_wide_int_wide_int_storage_ (void *);
+#define gt_pch_n_13coverage_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_coverage_data (X);\
+ } while (0)
+extern void gt_pch_nx_coverage_data (void *);
+#define gt_pch_n_9mem_attrs(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_mem_attrs (X);\
+ } while (0)
+extern void gt_pch_nx_mem_attrs (void *);
+#define gt_pch_n_9reg_attrs(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_reg_attrs (X);\
+ } while (0)
+extern void gt_pch_nx_reg_attrs (void *);
+#define gt_pch_n_12object_block(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_object_block (X);\
+ } while (0)
+extern void gt_pch_nx_object_block (void *);
+#define gt_pch_n_14vec_rtx_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_rtx_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_rtx_va_gc_ (void *);
+#define gt_pch_n_11fixed_value(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_fixed_value (X);\
+ } while (0)
+extern void gt_pch_nx_fixed_value (void *);
+#define gt_pch_n_23constant_descriptor_rtx(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_constant_descriptor_rtx (X);\
+ } while (0)
+extern void gt_pch_nx_constant_descriptor_rtx (void *);
+#define gt_pch_n_8function(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_function (X);\
+ } while (0)
+extern void gt_pch_nx_function (void *);
+#define gt_pch_n_10target_rtl(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_target_rtl (X);\
+ } while (0)
+extern void gt_pch_nx_target_rtl (void *);
+#define gt_pch_n_15cgraph_rtl_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cgraph_rtl_info (X);\
+ } while (0)
+extern void gt_pch_nx_cgraph_rtl_info (void *);
+#define gt_pch_n_42hash_map_tree_tree_decl_tree_cache_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_tree_decl_tree_cache_traits_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_tree_decl_tree_cache_traits_ (void *);
+#define gt_pch_n_42hash_map_tree_tree_type_tree_cache_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_tree_type_tree_cache_traits_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_tree_type_tree_cache_traits_ (void *);
+#define gt_pch_n_36hash_map_tree_tree_decl_tree_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_tree_decl_tree_traits_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_tree_decl_tree_traits_ (void *);
+#define gt_pch_n_12ptr_info_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ptr_info_def (X);\
+ } while (0)
+extern void gt_pch_nx_ptr_info_def (void *);
+#define gt_pch_n_19irange_storage_slot(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_irange_storage_slot (X);\
+ } while (0)
+extern void gt_pch_nx_irange_storage_slot (void *);
+#define gt_pch_n_10die_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_die_struct (X);\
+ } while (0)
+extern void gt_pch_nx_die_struct (void *);
+#define gt_pch_n_26vec_constructor_elt_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_constructor_elt_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_constructor_elt_va_gc_ (void *);
+#define gt_pch_n_19frange_storage_slot(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_frange_storage_slot (X);\
+ } while (0)
+extern void gt_pch_nx_frange_storage_slot (void *);
+#define gt_pch_n_15vec_tree_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_tree_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_tree_va_gc_ (void *);
+#define gt_pch_n_9lang_type(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_lang_type (X);\
+ } while (0)
+extern void gt_pch_nx_lang_type (void *);
+#define gt_pch_n_9lang_decl(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_lang_decl (X);\
+ } while (0)
+extern void gt_pch_nx_lang_decl (void *);
+#define gt_pch_n_24tree_statement_list_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_statement_list_node (X);\
+ } while (0)
+extern void gt_pch_nx_tree_statement_list_node (void *);
+#define gt_pch_n_14target_globals(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_target_globals (X);\
+ } while (0)
+extern void gt_pch_nx_target_globals (void *);
+#define gt_pch_n_14lang_tree_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_lang_tree_node (X);\
+ } while (0)
+extern void gt_pch_nx_lang_tree_node (void *);
+#define gt_pch_n_8tree_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_map (X);\
+ } while (0)
+extern void gt_pch_nx_tree_map (void *);
+#define gt_pch_n_13tree_decl_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_decl_map (X);\
+ } while (0)
+extern void gt_pch_nx_tree_decl_map (void *);
+#define gt_pch_n_12tree_int_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_int_map (X);\
+ } while (0)
+extern void gt_pch_nx_tree_int_map (void *);
+#define gt_pch_n_12tree_vec_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_vec_map (X);\
+ } while (0)
+extern void gt_pch_nx_tree_vec_map (void *);
+#define gt_pch_n_21vec_alias_pair_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_alias_pair_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_alias_pair_va_gc_ (void *);
+#define gt_pch_n_13libfunc_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_libfunc_entry (X);\
+ } while (0)
+extern void gt_pch_nx_libfunc_entry (void *);
+#define gt_pch_n_26hash_table_libfunc_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_libfunc_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_libfunc_hasher_ (void *);
+#define gt_pch_n_15target_libfuncs(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_target_libfuncs (X);\
+ } while (0)
+extern void gt_pch_nx_target_libfuncs (void *);
+#define gt_pch_n_14sequence_stack(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_sequence_stack (X);\
+ } while (0)
+extern void gt_pch_nx_sequence_stack (void *);
+#define gt_pch_n_20vec_rtx_insn__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_rtx_insn__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_rtx_insn__va_gc_ (void *);
+#define gt_pch_n_18call_site_record_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_call_site_record_d (X);\
+ } while (0)
+extern void gt_pch_nx_call_site_record_d (void *);
+#define gt_pch_n_16vec_uchar_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_uchar_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_uchar_va_gc_ (void *);
+#define gt_pch_n_27vec_call_site_record_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_call_site_record_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_call_site_record_va_gc_ (void *);
+#define gt_pch_n_9gimple_df(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_gimple_df (X);\
+ } while (0)
+extern void gt_pch_nx_gimple_df (void *);
+#define gt_pch_n_11dw_fde_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_fde_node (X);\
+ } while (0)
+extern void gt_pch_nx_dw_fde_node (void *);
+#define gt_pch_n_17rtx_constant_pool(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_rtx_constant_pool (X);\
+ } while (0)
+extern void gt_pch_nx_rtx_constant_pool (void *);
+#define gt_pch_n_11frame_space(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_frame_space (X);\
+ } while (0)
+extern void gt_pch_nx_frame_space (void *);
+#define gt_pch_n_26vec_callinfo_callee_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_callinfo_callee_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_callinfo_callee_va_gc_ (void *);
+#define gt_pch_n_26vec_callinfo_dalloc_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_callinfo_dalloc_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_callinfo_dalloc_va_gc_ (void *);
+#define gt_pch_n_11stack_usage(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_stack_usage (X);\
+ } while (0)
+extern void gt_pch_nx_stack_usage (void *);
+#define gt_pch_n_9eh_status(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_eh_status (X);\
+ } while (0)
+extern void gt_pch_nx_eh_status (void *);
+#define gt_pch_n_18control_flow_graph(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_control_flow_graph (X);\
+ } while (0)
+extern void gt_pch_nx_control_flow_graph (void *);
+#define gt_pch_n_5loops(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_loops (X);\
+ } while (0)
+extern void gt_pch_nx_loops (void *);
+#define gt_pch_n_17language_function(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_language_function (X);\
+ } while (0)
+extern void gt_pch_nx_language_function (void *);
+#define gt_pch_n_14hash_set_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_set_tree_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_set_tree_ (void *);
+#define gt_pch_n_24types_used_by_vars_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_types_used_by_vars_entry (X);\
+ } while (0)
+extern void gt_pch_nx_types_used_by_vars_entry (void *);
+#define gt_pch_n_28hash_table_used_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_used_type_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_used_type_hasher_ (void *);
+#define gt_pch_n_13nb_iter_bound(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_nb_iter_bound (X);\
+ } while (0)
+extern void gt_pch_nx_nb_iter_bound (void *);
+#define gt_pch_n_9loop_exit(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_loop_exit (X);\
+ } while (0)
+extern void gt_pch_nx_loop_exit (void *);
+#define gt_pch_n_4loop(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_loop (X);\
+ } while (0)
+extern void gt_pch_nx_loop (void *);
+#define gt_pch_n_10control_iv(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_control_iv (X);\
+ } while (0)
+extern void gt_pch_nx_control_iv (void *);
+#define gt_pch_n_17vec_loop_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_loop_p_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_loop_p_va_gc_ (void *);
+#define gt_pch_n_10niter_desc(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_niter_desc (X);\
+ } while (0)
+extern void gt_pch_nx_niter_desc (void *);
+#define gt_pch_n_28hash_table_loop_exit_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_loop_exit_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_loop_exit_hasher_ (void *);
+#define gt_pch_n_22vec_basic_block_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_basic_block_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_basic_block_va_gc_ (void *);
+#define gt_pch_n_11rtl_bb_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_rtl_bb_info (X);\
+ } while (0)
+extern void gt_pch_nx_rtl_bb_info (void *);
+#define gt_pch_n_15vec_edge_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_edge_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_edge_va_gc_ (void *);
+#define gt_pch_n_18section_hash_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_section_hash_entry (X);\
+ } while (0)
+extern void gt_pch_nx_section_hash_entry (void *);
+#define gt_pch_n_18lto_file_decl_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_lto_file_decl_data (X);\
+ } while (0)
+extern void gt_pch_nx_lto_file_decl_data (void *);
+#define gt_pch_n_15ipa_replace_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_replace_map (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_replace_map (void *);
+#define gt_pch_n_17cgraph_simd_clone(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cgraph_simd_clone (X);\
+ } while (0)
+extern void gt_pch_nx_cgraph_simd_clone (void *);
+#define gt_pch_n_28cgraph_function_version_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cgraph_function_version_info (X);\
+ } while (0)
+extern void gt_pch_nx_cgraph_function_version_info (void *);
+#define gt_pch_n_30hash_table_cgraph_edge_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_cgraph_edge_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_cgraph_edge_hasher_ (void *);
+#define gt_pch_n_25cgraph_indirect_call_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cgraph_indirect_call_info (X);\
+ } while (0)
+extern void gt_pch_nx_cgraph_indirect_call_info (void *);
+#define gt_pch_n_8asm_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_asm_node (X);\
+ } while (0)
+extern void gt_pch_nx_asm_node (void *);
+#define gt_pch_n_10thunk_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_thunk_info (X);\
+ } while (0)
+extern void gt_pch_nx_thunk_info (void *);
+#define gt_pch_n_29function_summary_thunk_info__(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_function_summary_thunk_info__ (X);\
+ } while (0)
+extern void gt_pch_nx_function_summary_thunk_info__ (void *);
+#define gt_pch_n_10clone_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_clone_info (X);\
+ } while (0)
+extern void gt_pch_nx_clone_info (void *);
+#define gt_pch_n_29function_summary_clone_info__(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_function_summary_clone_info__ (X);\
+ } while (0)
+extern void gt_pch_nx_function_summary_clone_info__ (void *);
+#define gt_pch_n_12symbol_table(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_symbol_table (X);\
+ } while (0)
+extern void gt_pch_nx_symbol_table (void *);
+#define gt_pch_n_31hash_table_section_name_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_section_name_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_section_name_hasher_ (void *);
+#define gt_pch_n_26hash_table_asmname_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_asmname_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_asmname_hasher_ (void *);
+#define gt_pch_n_42hash_map_symtab_node__symbol_priority_map_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_symtab_node__symbol_priority_map_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_symtab_node__symbol_priority_map_ (void *);
+#define gt_pch_n_24constant_descriptor_tree(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_constant_descriptor_tree (X);\
+ } while (0)
+extern void gt_pch_nx_constant_descriptor_tree (void *);
+#define gt_pch_n_28vec_unprocessed_thunk_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_unprocessed_thunk_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_unprocessed_thunk_va_gc_ (void *);
+#define gt_pch_n_27vec_ipa_replace_map__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_replace_map__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_replace_map__va_gc_ (void *);
+#define gt_pch_n_21ipa_param_adjustments(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_param_adjustments (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_param_adjustments (void *);
+#define gt_pch_n_28hash_map_alias_set_hash_int_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_alias_set_hash_int_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_alias_set_hash_int_ (void *);
+#define gt_pch_n_15alias_set_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_alias_set_entry (X);\
+ } while (0)
+extern void gt_pch_nx_alias_set_entry (void *);
+#define gt_pch_n_27vec_alias_set_entry__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_alias_set_entry__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_alias_set_entry__va_gc_ (void *);
+#define gt_pch_n_35hash_table_function_version_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_function_version_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_function_version_hasher_ (void *);
+#define gt_pch_n_17lto_in_decl_state(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_lto_in_decl_state (X);\
+ } while (0)
+extern void gt_pch_nx_lto_in_decl_state (void *);
+#define gt_pch_n_35hash_table_ipa_bit_ggc_hash_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_ipa_bit_ggc_hash_traits_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_ipa_bit_ggc_hash_traits_ (void *);
+#define gt_pch_n_34hash_table_ipa_vr_ggc_hash_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_ipa_vr_ggc_hash_traits_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_ipa_vr_ggc_hash_traits_ (void *);
+#define gt_pch_n_15ipa_node_params(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_node_params (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_node_params (void *);
+#define gt_pch_n_13ipa_edge_args(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_edge_args (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_edge_args (void *);
+#define gt_pch_n_14ipa_fn_summary(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_fn_summary (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_fn_summary (void *);
+#define gt_pch_n_10odr_type_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_odr_type_d (X);\
+ } while (0)
+extern void gt_pch_nx_odr_type_d (void *);
+#define gt_pch_n_29vec_ipa_adjusted_param_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_adjusted_param_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_adjusted_param_va_gc_ (void *);
+#define gt_pch_n_12param_access(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_param_access (X);\
+ } while (0)
+extern void gt_pch_nx_param_access (void *);
+#define gt_pch_n_24vec_param_access__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_param_access__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_param_access__va_gc_ (void *);
+#define gt_pch_n_17isra_func_summary(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_isra_func_summary (X);\
+ } while (0)
+extern void gt_pch_nx_isra_func_summary (void *);
+#define gt_pch_n_26vec_isra_param_desc_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_isra_param_desc_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_isra_param_desc_va_gc_ (void *);
+#define gt_pch_n_26ipa_sra_function_summaries(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_sra_function_summaries (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_sra_function_summaries (void *);
+#define gt_pch_n_27modref_tree_alias_set_type_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_modref_tree_alias_set_type_ (X);\
+ } while (0)
+extern void gt_pch_nx_modref_tree_alias_set_type_ (void *);
+#define gt_pch_n_14modref_summary(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_modref_summary (X);\
+ } while (0)
+extern void gt_pch_nx_modref_summary (void *);
+#define gt_pch_n_18modref_summary_lto(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_modref_summary_lto (X);\
+ } while (0)
+extern void gt_pch_nx_modref_summary_lto (void *);
+#define gt_pch_n_44fast_function_summary_modref_summary__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_fast_function_summary_modref_summary__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_fast_function_summary_modref_summary__va_gc_ (void *);
+#define gt_pch_n_48fast_function_summary_modref_summary_lto__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_fast_function_summary_modref_summary_lto__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_fast_function_summary_modref_summary_lto__va_gc_ (void *);
+#define gt_pch_n_17modref_tree_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_modref_tree_tree_ (X);\
+ } while (0)
+extern void gt_pch_nx_modref_tree_tree_ (void *);
+#define gt_pch_n_37hash_map_location_hash_nowarn_spec_t_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_location_hash_nowarn_spec_t_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_location_hash_nowarn_spec_t_ (void *);
+#define gt_pch_n_11dw_cfi_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_cfi_node (X);\
+ } while (0)
+extern void gt_pch_nx_dw_cfi_node (void *);
+#define gt_pch_n_17dw_loc_descr_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_loc_descr_node (X);\
+ } while (0)
+extern void gt_pch_nx_dw_loc_descr_node (void *);
+#define gt_pch_n_18dw_loc_list_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_loc_list_struct (X);\
+ } while (0)
+extern void gt_pch_nx_dw_loc_list_struct (void *);
+#define gt_pch_n_18dw_discr_list_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_discr_list_node (X);\
+ } while (0)
+extern void gt_pch_nx_dw_discr_list_node (void *);
+#define gt_pch_n_15dw_cfa_location(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_cfa_location (X);\
+ } while (0)
+extern void gt_pch_nx_dw_cfa_location (void *);
+#define gt_pch_n_21vec_dw_cfi_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_cfi_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_cfi_ref_va_gc_ (void *);
+#define gt_pch_n_16addr_table_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_addr_table_entry (X);\
+ } while (0)
+extern void gt_pch_nx_addr_table_entry (void *);
+#define gt_pch_n_20indirect_string_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_indirect_string_node (X);\
+ } while (0)
+extern void gt_pch_nx_indirect_string_node (void *);
+#define gt_pch_n_15dwarf_file_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dwarf_file_data (X);\
+ } while (0)
+extern void gt_pch_nx_dwarf_file_data (void *);
+#define gt_pch_n_20hash_map_char__tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_char__tree_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_char__tree_ (void *);
+#define gt_pch_n_10dw_cfi_row(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_cfi_row (X);\
+ } while (0)
+extern void gt_pch_nx_dw_cfi_row (void *);
+#define gt_pch_n_17reg_saved_in_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_reg_saved_in_data (X);\
+ } while (0)
+extern void gt_pch_nx_reg_saved_in_data (void *);
+#define gt_pch_n_21vec_dw_fde_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_fde_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_fde_ref_va_gc_ (void *);
+#define gt_pch_n_34hash_table_indirect_string_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_indirect_string_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_indirect_string_hasher_ (void *);
+#define gt_pch_n_16vec_char__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_char__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_char__va_gc_ (void *);
+#define gt_pch_n_16comdat_type_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_comdat_type_node (X);\
+ } while (0)
+extern void gt_pch_nx_comdat_type_node (void *);
+#define gt_pch_n_29vec_dw_line_info_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_line_info_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_line_info_entry_va_gc_ (void *);
+#define gt_pch_n_18dw_line_info_table(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_dw_line_info_table (X);\
+ } while (0)
+extern void gt_pch_nx_dw_line_info_table (void *);
+#define gt_pch_n_23vec_dw_attr_node_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_attr_node_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_attr_node_va_gc_ (void *);
+#define gt_pch_n_16limbo_die_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_limbo_die_struct (X);\
+ } while (0)
+extern void gt_pch_nx_limbo_die_struct (void *);
+#define gt_pch_n_29hash_table_dwarf_file_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_dwarf_file_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_dwarf_file_hasher_ (void *);
+#define gt_pch_n_27hash_table_decl_die_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_decl_die_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_decl_die_hasher_ (void *);
+#define gt_pch_n_21vec_dw_die_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_die_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_die_ref_va_gc_ (void *);
+#define gt_pch_n_21variable_value_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_variable_value_struct (X);\
+ } while (0)
+extern void gt_pch_nx_variable_value_struct (void *);
+#define gt_pch_n_33hash_table_variable_value_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_variable_value_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_variable_value_hasher_ (void *);
+#define gt_pch_n_28hash_table_block_die_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_block_die_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_block_die_hasher_ (void *);
+#define gt_pch_n_12var_loc_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_var_loc_node (X);\
+ } while (0)
+extern void gt_pch_nx_var_loc_node (void *);
+#define gt_pch_n_16var_loc_list_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_var_loc_list_def (X);\
+ } while (0)
+extern void gt_pch_nx_var_loc_list_def (void *);
+#define gt_pch_n_17call_arg_loc_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_call_arg_loc_node (X);\
+ } while (0)
+extern void gt_pch_nx_call_arg_loc_node (void *);
+#define gt_pch_n_27hash_table_decl_loc_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_decl_loc_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_decl_loc_hasher_ (void *);
+#define gt_pch_n_22cached_dw_loc_list_def(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cached_dw_loc_list_def (X);\
+ } while (0)
+extern void gt_pch_nx_cached_dw_loc_list_def (void *);
+#define gt_pch_n_30hash_table_dw_loc_list_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_dw_loc_list_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_dw_loc_list_hasher_ (void *);
+#define gt_pch_n_30vec_dw_line_info_table__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_line_info_table__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_line_info_table__va_gc_ (void *);
+#define gt_pch_n_24vec_pubname_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_pubname_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_pubname_entry_va_gc_ (void *);
+#define gt_pch_n_24vec_macinfo_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_macinfo_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_macinfo_entry_va_gc_ (void *);
+#define gt_pch_n_20vec_dw_ranges_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_ranges_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_ranges_va_gc_ (void *);
+#define gt_pch_n_29vec_dw_ranges_by_label_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_dw_ranges_by_label_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_dw_ranges_by_label_va_gc_ (void *);
+#define gt_pch_n_24vec_die_arg_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_die_arg_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_die_arg_entry_va_gc_ (void *);
+#define gt_pch_n_23hash_table_addr_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_addr_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_addr_hasher_ (void *);
+#define gt_pch_n_27hash_map_tree_sym_off_pair_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_sym_off_pair_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_sym_off_pair_ (void *);
+#define gt_pch_n_17inline_entry_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_inline_entry_data (X);\
+ } while (0)
+extern void gt_pch_nx_inline_entry_data (void *);
+#define gt_pch_n_36hash_table_inline_entry_data_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_inline_entry_data_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_inline_entry_data_hasher_ (void *);
+#define gt_pch_n_10ctf_string(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ctf_string (X);\
+ } while (0)
+extern void gt_pch_nx_ctf_string (void *);
+#define gt_pch_n_9ctf_dmdef(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ctf_dmdef (X);\
+ } while (0)
+extern void gt_pch_nx_ctf_dmdef (void *);
+#define gt_pch_n_12ctf_func_arg(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ctf_func_arg (X);\
+ } while (0)
+extern void gt_pch_nx_ctf_func_arg (void *);
+#define gt_pch_n_9ctf_dtdef(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ctf_dtdef (X);\
+ } while (0)
+extern void gt_pch_nx_ctf_dtdef (void *);
+#define gt_pch_n_9ctf_dvdef(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ctf_dvdef (X);\
+ } while (0)
+extern void gt_pch_nx_ctf_dvdef (void *);
+#define gt_pch_n_27hash_table_ctfc_dtd_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_ctfc_dtd_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_ctfc_dtd_hasher_ (void *);
+#define gt_pch_n_27hash_table_ctfc_dvd_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_ctfc_dvd_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_ctfc_dvd_hasher_ (void *);
+#define gt_pch_n_13ctf_container(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ctf_container (X);\
+ } while (0)
+extern void gt_pch_nx_ctf_container (void *);
+#define gt_pch_n_32hash_map_ctf_dvdef_ref_unsigned_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_ctf_dvdef_ref_unsigned_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_ctf_dvdef_ref_unsigned_ (void *);
+#define gt_pch_n_24vec_ctf_dtdef_ref_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ctf_dtdef_ref_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ctf_dtdef_ref_va_gc_ (void *);
+#define gt_pch_n_9temp_slot(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_temp_slot (X);\
+ } while (0)
+extern void gt_pch_nx_temp_slot (void *);
+#define gt_pch_n_20initial_value_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_initial_value_struct (X);\
+ } while (0)
+extern void gt_pch_nx_initial_value_struct (void *);
+#define gt_pch_n_22vec_temp_slot_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_temp_slot_p_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_temp_slot_p_va_gc_ (void *);
+#define gt_pch_n_28hash_table_const_int_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_const_int_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_const_int_hasher_ (void *);
+#define gt_pch_n_33hash_table_const_wide_int_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_const_wide_int_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_const_wide_int_hasher_ (void *);
+#define gt_pch_n_33hash_table_const_poly_int_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_const_poly_int_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_const_poly_int_hasher_ (void *);
+#define gt_pch_n_27hash_table_reg_attr_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_reg_attr_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_reg_attr_hasher_ (void *);
+#define gt_pch_n_31hash_table_const_double_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_const_double_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_const_double_hasher_ (void *);
+#define gt_pch_n_30hash_table_const_fixed_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_const_fixed_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_const_fixed_hasher_ (void *);
+#define gt_pch_n_11eh_region_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_eh_region_d (X);\
+ } while (0)
+extern void gt_pch_nx_eh_region_d (void *);
+#define gt_pch_n_16eh_landing_pad_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_eh_landing_pad_d (X);\
+ } while (0)
+extern void gt_pch_nx_eh_landing_pad_d (void *);
+#define gt_pch_n_10eh_catch_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_eh_catch_d (X);\
+ } while (0)
+extern void gt_pch_nx_eh_catch_d (void *);
+#define gt_pch_n_20vec_eh_region_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_eh_region_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_eh_region_va_gc_ (void *);
+#define gt_pch_n_25vec_eh_landing_pad_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_eh_landing_pad_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_eh_landing_pad_va_gc_ (void *);
+#define gt_pch_n_21hash_map_gimple__int_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_gimple__int_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_gimple__int_ (void *);
+#define gt_pch_n_29hash_table_insn_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_insn_cache_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_insn_cache_hasher_ (void *);
+#define gt_pch_n_23temp_slot_address_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_temp_slot_address_entry (X);\
+ } while (0)
+extern void gt_pch_nx_temp_slot_address_entry (void *);
+#define gt_pch_n_31hash_table_temp_address_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_temp_address_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_temp_address_hasher_ (void *);
+#define gt_pch_n_24hash_map_tree_hash_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_hash_tree_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_hash_tree_ (void *);
+#define gt_pch_n_11test_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_test_struct (X);\
+ } while (0)
+extern void gt_pch_nx_test_struct (void *);
+#define gt_pch_n_14test_of_length(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_test_of_length (X);\
+ } while (0)
+extern void gt_pch_nx_test_of_length (void *);
+#define gt_pch_n_10test_other(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_test_other (X);\
+ } while (0)
+extern void gt_pch_nx_test_other (void *);
+#define gt_pch_n_13test_of_union(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_test_of_union (X);\
+ } while (0)
+extern void gt_pch_nx_test_of_union (void *);
+#define gt_pch_n_12example_base(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_example_base (X);\
+ } while (0)
+extern void gt_pch_nx_example_base (void *);
+#define gt_pch_n_9test_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_test_node (X);\
+ } while (0)
+extern void gt_pch_nx_test_node (void *);
+#define gt_pch_n_11user_struct(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_user_struct (X);\
+ } while (0)
+extern void gt_pch_nx_user_struct (void *);
+#define gt_pch_n_31hash_table_libfunc_decl_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_libfunc_decl_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_libfunc_decl_hasher_ (void *);
+#define gt_pch_n_16string_pool_data(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_string_pool_data (X);\
+ } while (0)
+extern void gt_pch_nx_string_pool_data (void *);
+#define gt_pch_n_9type_hash(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_type_hash (X);\
+ } while (0)
+extern void gt_pch_nx_type_hash (void *);
+#define gt_pch_n_29hash_table_type_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_type_cache_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_type_cache_hasher_ (void *);
+#define gt_pch_n_26hash_table_int_cst_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_int_cst_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_int_cst_hasher_ (void *);
+#define gt_pch_n_31hash_table_poly_int_cst_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_poly_int_cst_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_poly_int_cst_hasher_ (void *);
+#define gt_pch_n_28hash_table_cl_option_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_cl_option_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_cl_option_hasher_ (void *);
+#define gt_pch_n_38hash_table_tree_decl_map_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tree_decl_map_cache_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tree_decl_map_cache_hasher_ (void *);
+#define gt_pch_n_37hash_table_tree_vec_map_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tree_vec_map_cache_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tree_vec_map_cache_hasher_ (void *);
+#define gt_pch_n_26hash_table_section_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_section_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_section_hasher_ (void *);
+#define gt_pch_n_31hash_table_object_block_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_object_block_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_object_block_hasher_ (void *);
+#define gt_pch_n_34hash_table_tree_descriptor_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tree_descriptor_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tree_descriptor_hasher_ (void *);
+#define gt_pch_n_33hash_table_const_rtx_desc_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_const_rtx_desc_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_const_rtx_desc_hasher_ (void *);
+#define gt_pch_n_27hash_table_tm_clone_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tm_clone_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tm_clone_hasher_ (void *);
+#define gt_pch_n_15tm_restart_node(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tm_restart_node (X);\
+ } while (0)
+extern void gt_pch_nx_tm_restart_node (void *);
+#define gt_pch_n_19hash_map_tree_tree_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_tree_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_tree_ (void *);
+#define gt_pch_n_27hash_table_ssa_name_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_ssa_name_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_ssa_name_hasher_ (void *);
+#define gt_pch_n_29hash_table_tm_restart_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tm_restart_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tm_restart_hasher_ (void *);
+#define gt_pch_n_28vec_mem_addr_template_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_mem_addr_template_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_mem_addr_template_va_gc_ (void *);
+#define gt_pch_n_13scev_info_str(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_scev_info_str (X);\
+ } while (0)
+extern void gt_pch_nx_scev_info_str (void *);
+#define gt_pch_n_28hash_table_scev_info_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_scev_info_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_scev_info_hasher_ (void *);
+#define gt_pch_n_20ssa_operand_memory_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ssa_operand_memory_d (X);\
+ } while (0)
+extern void gt_pch_nx_ssa_operand_memory_d (void *);
+#define gt_pch_n_36vec_omp_declare_variant_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_omp_declare_variant_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_omp_declare_variant_entry_va_gc_ (void *);
+#define gt_pch_n_30omp_declare_variant_base_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_omp_declare_variant_base_entry (X);\
+ } while (0)
+extern void gt_pch_nx_omp_declare_variant_base_entry (void *);
+#define gt_pch_n_38hash_table_omp_declare_variant_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_omp_declare_variant_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_omp_declare_variant_hasher_ (void *);
+#define gt_pch_n_42hash_table_omp_declare_variant_alt_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_omp_declare_variant_alt_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_omp_declare_variant_alt_hasher_ (void *);
+#define gt_pch_n_24hash_map_char__unsigned_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_char__unsigned_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_char__unsigned_ (void *);
+#define gt_pch_n_18vec_gimple__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_gimple__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_gimple__va_gc_ (void *);
+#define gt_pch_n_12int_range_1_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_int_range_1_ (X);\
+ } while (0)
+extern void gt_pch_nx_int_range_1_ (void *);
+#define gt_pch_n_26vec_ipa_agg_jf_item_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_agg_jf_item_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_agg_jf_item_va_gc_ (void *);
+#define gt_pch_n_19ipcp_transformation(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipcp_transformation (X);\
+ } while (0)
+extern void gt_pch_nx_ipcp_transformation (void *);
+#define gt_pch_n_8ipa_bits(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_bits (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_bits (void *);
+#define gt_pch_n_31vec_ipa_param_descriptor_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_param_descriptor_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_param_descriptor_va_gc_ (void *);
+#define gt_pch_n_27vec_ipa_argagg_value_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_argagg_value_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_argagg_value_va_gc_ (void *);
+#define gt_pch_n_20vec_ipa_bits__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_bits__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_bits__va_gc_ (void *);
+#define gt_pch_n_17vec_ipa_vr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_vr_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_vr_va_gc_ (void *);
+#define gt_pch_n_24vec_ipa_jump_func_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_jump_func_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_jump_func_va_gc_ (void *);
+#define gt_pch_n_39vec_ipa_polymorphic_call_context_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_polymorphic_call_context_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_polymorphic_call_context_va_gc_ (void *);
+#define gt_pch_n_17ipa_node_params_t(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_node_params_t (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_node_params_t (void *);
+#define gt_pch_n_19ipa_edge_args_sum_t(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_ipa_edge_args_sum_t (X);\
+ } while (0)
+extern void gt_pch_nx_ipa_edge_args_sum_t (void *);
+#define gt_pch_n_38function_summary_ipcp_transformation__(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_function_summary_ipcp_transformation__ (X);\
+ } while (0)
+extern void gt_pch_nx_function_summary_ipcp_transformation__ (void *);
+#define gt_pch_n_29hash_table_tm_wrapper_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tm_wrapper_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tm_wrapper_hasher_ (void *);
+#define gt_pch_n_29hash_table_decl_state_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_decl_state_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_decl_state_hasher_ (void *);
+#define gt_pch_n_23vec_expr_eval_op_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_expr_eval_op_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_expr_eval_op_va_gc_ (void *);
+#define gt_pch_n_20vec_condition_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_condition_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_condition_va_gc_ (void *);
+#define gt_pch_n_37vec_ipa_freqcounting_predicate_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ipa_freqcounting_predicate_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ipa_freqcounting_predicate_va_gc_ (void *);
+#define gt_pch_n_44fast_function_summary_ipa_fn_summary__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_fast_function_summary_ipa_fn_summary__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_fast_function_summary_ipa_fn_summary__va_gc_ (void *);
+#define gt_pch_n_13tree_type_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_type_map (X);\
+ } while (0)
+extern void gt_pch_nx_tree_type_map (void *);
+#define gt_pch_n_38hash_table_tree_type_map_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_tree_type_map_cache_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_tree_type_map_cache_hasher_ (void *);
+#define gt_pch_n_19vec_odr_type_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_odr_type_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_odr_type_va_gc_ (void *);
+#define gt_pch_n_35hash_table_value_annotation_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_value_annotation_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_value_annotation_hasher_ (void *);
+#define gt_pch_n_27vec_Entity_Id_va_gc_atomic_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_Entity_Id_va_gc_atomic_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_Entity_Id_va_gc_atomic_ (void *);
+#define gt_pch_n_19tree_entity_vec_map(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_entity_vec_map (X);\
+ } while (0)
+extern void gt_pch_nx_tree_entity_vec_map (void *);
+#define gt_pch_n_29hash_table_dummy_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_dummy_type_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_dummy_type_hasher_ (void *);
+#define gt_pch_n_11parm_attr_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_parm_attr_d (X);\
+ } while (0)
+extern void gt_pch_nx_parm_attr_d (void *);
+#define gt_pch_n_20vec_parm_attr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_parm_attr_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_parm_attr_va_gc_ (void *);
+#define gt_pch_n_10stmt_group(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_stmt_group (X);\
+ } while (0)
+extern void gt_pch_nx_stmt_group (void *);
+#define gt_pch_n_9elab_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_elab_info (X);\
+ } while (0)
+extern void gt_pch_nx_elab_info (void *);
+#define gt_pch_n_18range_check_info_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_range_check_info_d (X);\
+ } while (0)
+extern void gt_pch_nx_range_check_info_d (void *);
+#define gt_pch_n_27vec_range_check_info_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_range_check_info_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_range_check_info_va_gc_ (void *);
+#define gt_pch_n_11loop_info_d(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_loop_info_d (X);\
+ } while (0)
+extern void gt_pch_nx_loop_info_d (void *);
+#define gt_pch_n_20vec_loop_info_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_loop_info_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_loop_info_va_gc_ (void *);
+#define gt_pch_n_18gnat_binding_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_gnat_binding_level (X);\
+ } while (0)
+extern void gt_pch_nx_gnat_binding_level (void *);
+#define gt_pch_n_18packable_type_hash(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_packable_type_hash (X);\
+ } while (0)
+extern void gt_pch_nx_packable_type_hash (void *);
+#define gt_pch_n_32hash_table_packable_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_packable_type_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_packable_type_hasher_ (void *);
+#define gt_pch_n_13pad_type_hash(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_pad_type_hash (X);\
+ } while (0)
+extern void gt_pch_nx_pad_type_hash (void *);
+#define gt_pch_n_27hash_table_pad_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_pad_type_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_pad_type_hasher_ (void *);
+#define gt_pch_n_12c_label_vars(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_c_label_vars (X);\
+ } while (0)
+extern void gt_pch_nx_c_label_vars (void *);
+#define gt_pch_n_9c_binding(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_c_binding (X);\
+ } while (0)
+extern void gt_pch_nx_c_binding (void *);
+#define gt_pch_n_7c_scope(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_c_scope (X);\
+ } while (0)
+extern void gt_pch_nx_c_scope (void *);
+#define gt_pch_n_15c_goto_bindings(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_c_goto_bindings (X);\
+ } while (0)
+extern void gt_pch_nx_c_goto_bindings (void *);
+#define gt_pch_n_28vec_c_goto_bindings_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_c_goto_bindings_p_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_c_goto_bindings_p_va_gc_ (void *);
+#define gt_pch_n_15c_inline_static(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_c_inline_static (X);\
+ } while (0)
+extern void gt_pch_nx_c_inline_static (void *);
+#define gt_pch_n_18sorted_fields_type(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_sorted_fields_type (X);\
+ } while (0)
+extern void gt_pch_nx_sorted_fields_type (void *);
+#define gt_pch_n_23vec_const_char_p_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_const_char_p_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_const_char_p_va_gc_ (void *);
+#define gt_pch_n_22vec_tree_gc_vec_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_tree_gc_vec_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_tree_gc_vec_va_gc_ (void *);
+#define gt_pch_n_11align_stack(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_align_stack (X);\
+ } while (0)
+extern void gt_pch_nx_align_stack (void *);
+#define gt_pch_n_23vec_pending_weak_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_pending_weak_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_pending_weak_va_gc_ (void *);
+#define gt_pch_n_31vec_pending_redefinition_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_pending_redefinition_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_pending_redefinition_va_gc_ (void *);
+#define gt_pch_n_9opt_stack(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_opt_stack (X);\
+ } while (0)
+extern void gt_pch_nx_opt_stack (void *);
+#define gt_pch_n_8c_parser(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_c_parser (X);\
+ } while (0)
+extern void gt_pch_nx_c_parser (void *);
+#define gt_pch_n_18vec_c_token_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_c_token_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_c_token_va_gc_ (void *);
+#define gt_pch_n_36vec_c_omp_declare_target_attr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_c_omp_declare_target_attr_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_c_omp_declare_target_attr_va_gc_ (void *);
+#define gt_pch_n_16cp_binding_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cp_binding_level (X);\
+ } while (0)
+extern void gt_pch_nx_cp_binding_level (void *);
+#define gt_pch_n_11cxx_binding(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cxx_binding (X);\
+ } while (0)
+extern void gt_pch_nx_cxx_binding (void *);
+#define gt_pch_n_27vec_cp_class_binding_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cp_class_binding_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cp_class_binding_va_gc_ (void *);
+#define gt_pch_n_14cp_token_cache(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cp_token_cache (X);\
+ } while (0)
+extern void gt_pch_nx_cp_token_cache (void *);
+#define gt_pch_n_32vec_deferred_access_check_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_deferred_access_check_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_deferred_access_check_va_gc_ (void *);
+#define gt_pch_n_28vec_cxx_saved_binding_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cxx_saved_binding_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cxx_saved_binding_va_gc_ (void *);
+#define gt_pch_n_37vec_cp_omp_declare_target_attr_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cp_omp_declare_target_attr_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cp_omp_declare_target_attr_va_gc_ (void *);
+#define gt_pch_n_36vec_cp_omp_begin_assumes_data_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cp_omp_begin_assumes_data_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cp_omp_begin_assumes_data_va_gc_ (void *);
+#define gt_pch_n_11saved_scope(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_saved_scope (X);\
+ } while (0)
+extern void gt_pch_nx_saved_scope (void *);
+#define gt_pch_n_17named_label_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_named_label_entry (X);\
+ } while (0)
+extern void gt_pch_nx_named_label_entry (void *);
+#define gt_pch_n_28hash_table_named_label_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_named_label_hash_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_named_label_hash_ (void *);
+#define gt_pch_n_11tree_pair_s(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_pair_s (X);\
+ } while (0)
+extern void gt_pch_nx_tree_pair_s (void *);
+#define gt_pch_n_22vec_tree_pair_s_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_tree_pair_s_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_tree_pair_s_va_gc_ (void *);
+#define gt_pch_n_27hash_table_named_decl_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_named_decl_hash_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_named_decl_hash_ (void *);
+#define gt_pch_n_10spec_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_spec_entry (X);\
+ } while (0)
+extern void gt_pch_nx_spec_entry (void *);
+#define gt_pch_n_11tinst_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tinst_level (X);\
+ } while (0)
+extern void gt_pch_nx_tinst_level (void *);
+#define gt_pch_n_12module_state(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_module_state (X);\
+ } while (0)
+extern void gt_pch_nx_module_state (void *);
+#define gt_pch_n_16constexpr_fundef(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_constexpr_fundef (X);\
+ } while (0)
+extern void gt_pch_nx_constexpr_fundef (void *);
+#define gt_pch_n_10tree_check(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_tree_check (X);\
+ } while (0)
+extern void gt_pch_nx_tree_check (void *);
+#define gt_pch_n_19vec_cp_token_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cp_token_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cp_token_va_gc_ (void *);
+#define gt_pch_n_8cp_lexer(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cp_lexer (X);\
+ } while (0)
+extern void gt_pch_nx_cp_lexer (void *);
+#define gt_pch_n_31vec_cp_default_arg_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cp_default_arg_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cp_default_arg_entry_va_gc_ (void *);
+#define gt_pch_n_17cp_parser_context(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cp_parser_context (X);\
+ } while (0)
+extern void gt_pch_nx_cp_parser_context (void *);
+#define gt_pch_n_38vec_cp_unparsed_functions_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_cp_unparsed_functions_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_cp_unparsed_functions_entry_va_gc_ (void *);
+#define gt_pch_n_9cp_parser(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_cp_parser (X);\
+ } while (0)
+extern void gt_pch_nx_cp_parser (void *);
+#define gt_pch_n_18hash_map_tree_int_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_int_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_int_ (void *);
+#define gt_pch_n_35hash_table_constexpr_fundef_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_constexpr_fundef_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_constexpr_fundef_hasher_ (void *);
+#define gt_pch_n_14constexpr_call(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_constexpr_call (X);\
+ } while (0)
+extern void gt_pch_nx_constexpr_call (void *);
+#define gt_pch_n_33hash_table_constexpr_call_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_constexpr_call_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_constexpr_call_hasher_ (void *);
+#define gt_pch_n_10norm_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_norm_entry (X);\
+ } while (0)
+extern void gt_pch_nx_norm_entry (void *);
+#define gt_pch_n_23hash_table_norm_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_norm_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_norm_hasher_ (void *);
+#define gt_pch_n_23hash_table_atom_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_atom_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_atom_hasher_ (void *);
+#define gt_pch_n_9sat_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_sat_entry (X);\
+ } while (0)
+extern void gt_pch_nx_sat_entry (void *);
+#define gt_pch_n_22hash_table_sat_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_sat_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_sat_hasher_ (void *);
+#define gt_pch_n_14coroutine_info(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_coroutine_info (X);\
+ } while (0)
+extern void gt_pch_nx_coroutine_info (void *);
+#define gt_pch_n_33hash_table_coroutine_info_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_coroutine_info_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_coroutine_info_hasher_ (void *);
+#define gt_pch_n_27source_location_table_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_source_location_table_entry (X);\
+ } while (0)
+extern void gt_pch_nx_source_location_table_entry (void *);
+#define gt_pch_n_44hash_table_source_location_table_entry_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_source_location_table_entry_hash_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_source_location_table_entry_hash_ (void *);
+#define gt_pch_n_21named_label_use_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_named_label_use_entry (X);\
+ } while (0)
+extern void gt_pch_nx_named_label_use_entry (void *);
+#define gt_pch_n_25vec_incomplete_var_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_incomplete_var_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_incomplete_var_va_gc_ (void *);
+#define gt_pch_n_27hash_table_typename_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_typename_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_typename_hasher_ (void *);
+#define gt_pch_n_29hash_table_mangled_decl_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_mangled_decl_hash_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_mangled_decl_hash_ (void *);
+#define gt_pch_n_43hash_map_unsigned_tree_priority_map_traits_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_unsigned_tree_priority_map_traits_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_unsigned_tree_priority_map_traits_ (void *);
+#define gt_pch_n_27vec_pending_noexcept_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_pending_noexcept_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_pending_noexcept_va_gc_ (void *);
+#define gt_pch_n_27vec_lambda_sig_count_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_lambda_sig_count_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_lambda_sig_count_va_gc_ (void *);
+#define gt_pch_n_31vec_lambda_discriminator_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_lambda_discriminator_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_lambda_discriminator_va_gc_ (void *);
+#define gt_pch_n_28hash_table_conv_type_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_conv_type_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_conv_type_hasher_ (void *);
+#define gt_pch_n_17subsumption_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_subsumption_entry (X);\
+ } while (0)
+extern void gt_pch_nx_subsumption_entry (void *);
+#define gt_pch_n_30hash_table_subsumption_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_subsumption_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_subsumption_hasher_ (void *);
+#define gt_pch_n_8slurping(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_slurping (X);\
+ } while (0)
+extern void gt_pch_nx_slurping (void *);
+#define gt_pch_n_24vec_module_state__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_module_state__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_module_state__va_gc_ (void *);
+#define gt_pch_n_29hash_table_module_state_hash_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_module_state_hash_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_module_state_hash_ (void *);
+#define gt_pch_n_33hash_table_note_def_cache_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_note_def_cache_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_note_def_cache_hasher_ (void *);
+#define gt_pch_n_23vec_macro_export_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_macro_export_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_macro_export_va_gc_ (void *);
+#define gt_pch_n_16pending_template(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_pending_template (X);\
+ } while (0)
+extern void gt_pch_nx_pending_template (void *);
+#define gt_pch_n_23hash_table_spec_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_spec_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_spec_hasher_ (void *);
+#define gt_pch_n_22hash_table_ctp_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_ctp_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_ctp_hasher_ (void *);
+#define gt_pch_n_26hash_map_tree_tree_pair_p_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_tree_tree_pair_p_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_tree_tree_pair_p_ (void *);
+#define gt_pch_n_18vec_tinfo_s_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_tinfo_s_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_tinfo_s_va_gc_ (void *);
+#define gt_pch_n_26vec_deferred_access_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_deferred_access_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_deferred_access_va_gc_ (void *);
+#define gt_pch_n_30hash_table_cplus_array_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_cplus_array_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_cplus_array_hasher_ (void *);
+#define gt_pch_n_23hash_table_list_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_list_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_list_hasher_ (void *);
+#define gt_pch_n_9Statement(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_Statement (X);\
+ } while (0)
+extern void gt_pch_nx_Statement (void *);
+#define gt_pch_n_13binding_level(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_binding_level (X);\
+ } while (0)
+extern void gt_pch_nx_binding_level (void *);
+#define gt_pch_n_17d_label_use_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_d_label_use_entry (X);\
+ } while (0)
+extern void gt_pch_nx_d_label_use_entry (void *);
+#define gt_pch_n_34hash_map_Statement__d_label_entry_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_map_Statement__d_label_entry_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_map_Statement__d_label_entry_ (void *);
+#define gt_pch_n_25hash_table_module_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_module_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_module_hasher_ (void *);
+#define gt_pch_n_17module_htab_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_module_htab_entry (X);\
+ } while (0)
+extern void gt_pch_nx_module_htab_entry (void *);
+#define gt_pch_n_30hash_table_module_decl_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_module_decl_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_module_decl_hasher_ (void *);
+#define gt_pch_n_7rtenode(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_rtenode (X);\
+ } while (0)
+extern void gt_pch_nx_rtenode (void *);
+#define gt_pch_n_19vec_rtenode__va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_rtenode__va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_rtenode__va_gc_ (void *);
+#define gt_pch_n_18struct_constructor(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_struct_constructor (X);\
+ } while (0)
+extern void gt_pch_nx_struct_constructor (void *);
+#define gt_pch_n_10array_desc(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_array_desc (X);\
+ } while (0)
+extern void gt_pch_nx_array_desc (void *);
+#define gt_pch_n_16objc_map_private(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_objc_map_private (X);\
+ } while (0)
+extern void gt_pch_nx_objc_map_private (void *);
+#define gt_pch_n_12hashed_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hashed_entry (X);\
+ } while (0)
+extern void gt_pch_nx_hashed_entry (void *);
+#define gt_pch_n_16hashed_attribute(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hashed_attribute (X);\
+ } while (0)
+extern void gt_pch_nx_hashed_attribute (void *);
+#define gt_pch_n_9imp_entry(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_imp_entry (X);\
+ } while (0)
+extern void gt_pch_nx_imp_entry (void *);
+#define gt_pch_n_17string_descriptor(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_string_descriptor (X);\
+ } while (0)
+extern void gt_pch_nx_string_descriptor (void *);
+#define gt_pch_n_30hash_table_objc_string_hasher_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_hash_table_objc_string_hasher_ (X);\
+ } while (0)
+extern void gt_pch_nx_hash_table_objc_string_hasher_ (void *);
+#define gt_pch_n_27vec_ident_data_tuple_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ident_data_tuple_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ident_data_tuple_va_gc_ (void *);
+#define gt_pch_n_23vec_msgref_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_msgref_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_msgref_entry_va_gc_ (void *);
+#define gt_pch_n_26vec_prot_list_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_prot_list_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_prot_list_entry_va_gc_ (void *);
+#define gt_pch_n_24vec_ivarref_entry_va_gc_(X) do { \
+ if ((intptr_t)(X) != 0) gt_pch_nx_vec_ivarref_entry_va_gc_ (X);\
+ } while (0)
+extern void gt_pch_nx_vec_ivarref_entry_va_gc_ (void *);
+
+/* functions code */
+
+/* Local pointer-walking routines. */
+#define gt_pch_p_9tree_node gt_pch_p_14lang_tree_node
+extern void gt_pch_p_9line_maps
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9cpp_token
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9cpp_macro
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13string_concat
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16string_concat_db
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_38hash_map_location_hash_string_concat__
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11bitmap_head
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtx_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9rtvec_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11symtab_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11symtab_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11symtab_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11cgraph_edge
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7section
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16cl_target_option
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15cl_optimization
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8edge_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15basic_block_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16machine_function
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14bitmap_element
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_34generic_wide_int_wide_int_storage_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13coverage_data
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9mem_attrs
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9reg_attrs
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12object_block
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14vec_rtx_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11fixed_value
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23constant_descriptor_rtx
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8function
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10target_rtl
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15cgraph_rtl_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_42hash_map_tree_tree_decl_tree_cache_traits_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_42hash_map_tree_tree_type_tree_cache_traits_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_36hash_map_tree_tree_decl_tree_traits_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12ptr_info_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19irange_storage_slot
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10die_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_constructor_elt_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19frange_storage_slot
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15vec_tree_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9lang_type
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9lang_decl
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24tree_statement_list_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14target_globals
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14lang_tree_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8tree_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13tree_decl_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12tree_int_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12tree_vec_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21vec_alias_pair_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13libfunc_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26hash_table_libfunc_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15target_libfuncs
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14sequence_stack
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_rtx_insn__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18call_site_record_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16vec_uchar_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_call_site_record_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9gimple_df
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11dw_fde_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17rtx_constant_pool
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11frame_space
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_callinfo_callee_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_callinfo_dalloc_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11stack_usage
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9eh_status
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18control_flow_graph
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_5loops
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17language_function
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14hash_set_tree_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24types_used_by_vars_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_used_type_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13nb_iter_bound
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9loop_exit
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_4loop
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10control_iv
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17vec_loop_p_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10niter_desc
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_loop_exit_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22vec_basic_block_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11rtl_bb_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15vec_edge_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18section_hash_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18lto_file_decl_data
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15ipa_replace_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17cgraph_simd_clone
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28cgraph_function_version_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_cgraph_edge_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_25cgraph_indirect_call_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8asm_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10thunk_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29function_summary_thunk_info__
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10clone_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29function_summary_clone_info__
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12symbol_table
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31hash_table_section_name_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26hash_table_asmname_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_42hash_map_symtab_node__symbol_priority_map_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24constant_descriptor_tree
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28vec_unprocessed_thunk_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_ipa_replace_map__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21ipa_param_adjustments
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_map_alias_set_hash_int_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15alias_set_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_alias_set_entry__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_35hash_table_function_version_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17lto_in_decl_state
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_35hash_table_ipa_bit_ggc_hash_traits_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_34hash_table_ipa_vr_ggc_hash_traits_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15ipa_node_params
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13ipa_edge_args
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14ipa_fn_summary
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10odr_type_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29vec_ipa_adjusted_param_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12param_access
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_param_access__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17isra_func_summary
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_isra_param_desc_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26ipa_sra_function_summaries
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27modref_tree_alias_set_type_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14modref_summary
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18modref_summary_lto
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_44fast_function_summary_modref_summary__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_48fast_function_summary_modref_summary_lto__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17modref_tree_tree_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_37hash_map_location_hash_nowarn_spec_t_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11dw_cfi_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17dw_loc_descr_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18dw_loc_list_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18dw_discr_list_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15dw_cfa_location
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21vec_dw_cfi_ref_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16addr_table_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20indirect_string_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15dwarf_file_data
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20hash_map_char__tree_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10dw_cfi_row
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17reg_saved_in_data
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21vec_dw_fde_ref_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_34hash_table_indirect_string_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16vec_char__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16comdat_type_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29vec_dw_line_info_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18dw_line_info_table
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23vec_dw_attr_node_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16limbo_die_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_dwarf_file_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_decl_die_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21vec_dw_die_ref_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21variable_value_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_variable_value_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_block_die_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12var_loc_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16var_loc_list_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17call_arg_loc_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_decl_loc_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22cached_dw_loc_list_def
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_dw_loc_list_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30vec_dw_line_info_table__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_pubname_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_macinfo_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_dw_ranges_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29vec_dw_ranges_by_label_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_die_arg_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23hash_table_addr_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_map_tree_sym_off_pair_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17inline_entry_data
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_36hash_table_inline_entry_data_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10ctf_string
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9ctf_dmdef
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12ctf_func_arg
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9ctf_dtdef
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9ctf_dvdef
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_ctfc_dtd_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_ctfc_dvd_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13ctf_container
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_32hash_map_ctf_dvdef_ref_unsigned_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_ctf_dtdef_ref_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9temp_slot
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20initial_value_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22vec_temp_slot_p_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_const_int_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_const_wide_int_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_const_poly_int_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_reg_attr_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31hash_table_const_double_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_const_fixed_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11eh_region_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16eh_landing_pad_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10eh_catch_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_eh_region_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_25vec_eh_landing_pad_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21hash_map_gimple__int_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_insn_cache_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23temp_slot_address_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31hash_table_temp_address_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24hash_map_tree_hash_tree_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11test_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14test_of_length
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10test_other
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13test_of_union
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12example_base
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12example_base
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12example_base
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9test_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11user_struct
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31hash_table_libfunc_decl_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16string_pool_data
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9type_hash
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_type_cache_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26hash_table_int_cst_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31hash_table_poly_int_cst_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_cl_option_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_38hash_table_tree_decl_map_cache_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_37hash_table_tree_vec_map_cache_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26hash_table_section_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31hash_table_object_block_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_34hash_table_tree_descriptor_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_const_rtx_desc_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_tm_clone_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_6gimple
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15tm_restart_node
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19hash_map_tree_tree_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_ssa_name_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_tm_restart_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28vec_mem_addr_template_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13scev_info_str
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_scev_info_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20ssa_operand_memory_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_36vec_omp_declare_variant_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30omp_declare_variant_base_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_38hash_table_omp_declare_variant_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_42hash_table_omp_declare_variant_alt_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24hash_map_char__unsigned_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18vec_gimple__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12int_range_1_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_ipa_agg_jf_item_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19ipcp_transformation
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8ipa_bits
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31vec_ipa_param_descriptor_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_ipa_argagg_value_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_ipa_bits__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17vec_ipa_vr_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_ipa_jump_func_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_39vec_ipa_polymorphic_call_context_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17ipa_node_params_t
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19ipa_edge_args_sum_t
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_38function_summary_ipcp_transformation__
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_tm_wrapper_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_decl_state_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23vec_expr_eval_op_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_condition_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_37vec_ipa_freqcounting_predicate_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_44fast_function_summary_ipa_fn_summary__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13tree_type_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_38hash_table_tree_type_map_cache_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19vec_odr_type_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_35hash_table_value_annotation_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_Entity_Id_va_gc_atomic_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19tree_entity_vec_map
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_dummy_type_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11parm_attr_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_parm_attr_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10stmt_group
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9elab_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18range_check_info_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_range_check_info_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11loop_info_d
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_20vec_loop_info_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18gnat_binding_level
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18packable_type_hash
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_32hash_table_packable_type_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13pad_type_hash
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_pad_type_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12c_label_vars
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9c_binding
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7c_scope
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15c_goto_bindings
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28vec_c_goto_bindings_p_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_15c_inline_static
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18sorted_fields_type
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23vec_const_char_p_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22vec_tree_gc_vec_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11align_stack
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23vec_pending_weak_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31vec_pending_redefinition_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9opt_stack
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8c_parser
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18vec_c_token_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_36vec_c_omp_declare_target_attr_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16cp_binding_level
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11cxx_binding
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_cp_class_binding_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14cp_token_cache
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_32vec_deferred_access_check_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28vec_cxx_saved_binding_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_37vec_cp_omp_declare_target_attr_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_36vec_cp_omp_begin_assumes_data_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11saved_scope
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17named_label_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_named_label_hash_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11tree_pair_s
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22vec_tree_pair_s_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_named_decl_hash_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10spec_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_11tinst_level
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12module_state
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16constexpr_fundef
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10tree_check
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19vec_cp_token_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8cp_lexer
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31vec_cp_default_arg_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17cp_parser_context
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_38vec_cp_unparsed_functions_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9cp_parser
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18hash_map_tree_int_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_35hash_table_constexpr_fundef_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14constexpr_call
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_constexpr_call_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10norm_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23hash_table_norm_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23hash_table_atom_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9sat_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22hash_table_sat_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_14coroutine_info
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_coroutine_info_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27source_location_table_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_44hash_table_source_location_table_entry_hash_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_21named_label_use_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_25vec_incomplete_var_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27hash_table_typename_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_mangled_decl_hash_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_43hash_map_unsigned_tree_priority_map_traits_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_pending_noexcept_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_lambda_sig_count_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_31vec_lambda_discriminator_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_28hash_table_conv_type_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17subsumption_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_subsumption_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_8slurping
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_module_state__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_29hash_table_module_state_hash_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_33hash_table_note_def_cache_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23vec_macro_export_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16pending_template
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23hash_table_spec_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_22hash_table_ctp_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26hash_map_tree_tree_pair_p_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18vec_tinfo_s_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_deferred_access_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_cplus_array_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23hash_table_list_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9Statement
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_13binding_level
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17d_label_use_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_34hash_map_Statement__d_label_entry_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_25hash_table_module_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17module_htab_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_module_decl_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_7rtenode
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_19vec_rtenode__va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_18struct_constructor
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_10array_desc
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16objc_map_private
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_12hashed_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_16hashed_attribute
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_9imp_entry
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_17string_descriptor
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_30hash_table_objc_string_hasher_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_27vec_ident_data_tuple_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_23vec_msgref_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_26vec_prot_list_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
+extern void gt_pch_p_24vec_ivarref_entry_va_gc_
+ (void *, void *, gt_pointer_operator, void *);
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hard-reg-set.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hard-reg-set.h
new file mode 100644
index 0000000..b0bb9bc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hard-reg-set.h
@@ -0,0 +1,527 @@
+/* Sets (bit vectors) of hard registers, and operations on them.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_HARD_REG_SET_H
+#define GCC_HARD_REG_SET_H
+
+#include "array-traits.h"
+
+/* Define the type of a set of hard registers. */
+
+/* HARD_REG_ELT_TYPE is a typedef of the unsigned integral type which
+ will be used for hard reg sets, either alone or in an array.
+
+ If HARD_REG_SET is a macro, its definition is HARD_REG_ELT_TYPE,
+ and it has enough bits to represent all the target machine's hard
+ registers. Otherwise, it is a typedef for a suitably sized array
+ of HARD_REG_ELT_TYPEs. HARD_REG_SET_LONGS is defined as how many.
+
+ Note that lots of code assumes that the first part of a regset is
+ the same format as a HARD_REG_SET. To help make sure this is true,
+ we only try the widest fast integer mode (HOST_WIDEST_FAST_INT)
+ instead of all the smaller types. This approach loses only if
+ there are very few registers and then only in the few cases where
+ we have an array of HARD_REG_SETs, so it needn't be as complex as
+ it used to be. */
+
+typedef unsigned HOST_WIDEST_FAST_INT HARD_REG_ELT_TYPE;
+
+#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDEST_FAST_INT
+
+typedef HARD_REG_ELT_TYPE HARD_REG_SET;
+typedef const HARD_REG_SET const_hard_reg_set;
+
+#else
+
+#define HARD_REG_SET_LONGS \
+ ((FIRST_PSEUDO_REGISTER + HOST_BITS_PER_WIDEST_FAST_INT - 1) \
+ / HOST_BITS_PER_WIDEST_FAST_INT)
+
+struct HARD_REG_SET
+{
+ HARD_REG_SET
+ operator~ () const
+ {
+ HARD_REG_SET res;
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ res.elts[i] = ~elts[i];
+ return res;
+ }
+
+ HARD_REG_SET
+ operator& (const HARD_REG_SET &other) const
+ {
+ HARD_REG_SET res;
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ res.elts[i] = elts[i] & other.elts[i];
+ return res;
+ }
+
+ HARD_REG_SET &
+ operator&= (const HARD_REG_SET &other)
+ {
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ elts[i] &= other.elts[i];
+ return *this;
+ }
+
+ HARD_REG_SET
+ operator| (const HARD_REG_SET &other) const
+ {
+ HARD_REG_SET res;
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ res.elts[i] = elts[i] | other.elts[i];
+ return res;
+ }
+
+ HARD_REG_SET &
+ operator|= (const HARD_REG_SET &other)
+ {
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ elts[i] |= other.elts[i];
+ return *this;
+ }
+
+ bool
+ operator== (const HARD_REG_SET &other) const
+ {
+ HARD_REG_ELT_TYPE bad = 0;
+ for (unsigned int i = 0; i < ARRAY_SIZE (elts); ++i)
+ bad |= (elts[i] ^ other.elts[i]);
+ return bad == 0;
+ }
+
+ bool
+ operator!= (const HARD_REG_SET &other) const
+ {
+ return !operator== (other);
+ }
+
+ HARD_REG_ELT_TYPE elts[HARD_REG_SET_LONGS];
+};
+typedef const HARD_REG_SET &const_hard_reg_set;
+
+template<>
+struct array_traits<HARD_REG_SET>
+{
+ typedef HARD_REG_ELT_TYPE element_type;
+ static const bool has_constant_size = true;
+ static const size_t constant_size = HARD_REG_SET_LONGS;
+ static const element_type *base (const HARD_REG_SET &x) { return x.elts; }
+ static size_t size (const HARD_REG_SET &) { return HARD_REG_SET_LONGS; }
+};
+
+#endif
+
+/* HARD_REG_SET wrapped into a structure, to make it possible to
+ use HARD_REG_SET even in APIs that should not include
+ hard-reg-set.h. */
+struct hard_reg_set_container
+{
+ HARD_REG_SET set;
+};
+
+/* HARD_CONST is used to cast a constant to the appropriate type
+ for use with a HARD_REG_SET. */
+
+#define HARD_CONST(X) ((HARD_REG_ELT_TYPE) (X))
+
+/* Define macros SET_HARD_REG_BIT, CLEAR_HARD_REG_BIT and TEST_HARD_REG_BIT
+ to set, clear or test one bit in a hard reg set of type HARD_REG_SET.
+ All three take two arguments: the set and the register number.
+
+ In the case where sets are arrays of longs, the first argument
+ is actually a pointer to a long.
+
+ Define two macros for initializing a set:
+ CLEAR_HARD_REG_SET and SET_HARD_REG_SET.
+ These take just one argument.
+
+ Also define:
+
+ hard_reg_set_subset_p (X, Y), which returns true if X is a subset of Y.
+ hard_reg_set_intersect_p (X, Y), which returns true if X and Y intersect.
+ hard_reg_set_empty_p (X), which returns true if X is empty. */
+
+#define UHOST_BITS_PER_WIDE_INT ((unsigned) HOST_BITS_PER_WIDEST_FAST_INT)
+
+#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDEST_FAST_INT
+
+#define SET_HARD_REG_BIT(SET, BIT) \
+ ((SET) |= HARD_CONST (1) << (BIT))
+#define CLEAR_HARD_REG_BIT(SET, BIT) \
+ ((SET) &= ~(HARD_CONST (1) << (BIT)))
+#define TEST_HARD_REG_BIT(SET, BIT) \
+ (!!((SET) & (HARD_CONST (1) << (BIT))))
+
+#define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0))
+#define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0))
+
+inline bool
+hard_reg_set_subset_p (const_hard_reg_set x, const_hard_reg_set y)
+{
+ return (x & ~y) == HARD_CONST (0);
+}
+
+inline bool
+hard_reg_set_intersect_p (const_hard_reg_set x, const_hard_reg_set y)
+{
+ return (x & y) != HARD_CONST (0);
+}
+
+inline bool
+hard_reg_set_empty_p (const_hard_reg_set x)
+{
+ return x == HARD_CONST (0);
+}
+
+#else
+
+inline void
+SET_HARD_REG_BIT (HARD_REG_SET &set, unsigned int bit)
+{
+ set.elts[bit / UHOST_BITS_PER_WIDE_INT]
+ |= HARD_CONST (1) << (bit % UHOST_BITS_PER_WIDE_INT);
+}
+
+inline void
+CLEAR_HARD_REG_BIT (HARD_REG_SET &set, unsigned int bit)
+{
+ set.elts[bit / UHOST_BITS_PER_WIDE_INT]
+ &= ~(HARD_CONST (1) << (bit % UHOST_BITS_PER_WIDE_INT));
+}
+
+inline bool
+TEST_HARD_REG_BIT (const_hard_reg_set set, unsigned int bit)
+{
+ return (set.elts[bit / UHOST_BITS_PER_WIDE_INT]
+ & (HARD_CONST (1) << (bit % UHOST_BITS_PER_WIDE_INT)));
+}
+
+inline void
+CLEAR_HARD_REG_SET (HARD_REG_SET &set)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE (set.elts); ++i)
+ set.elts[i] = 0;
+}
+
+inline void
+SET_HARD_REG_SET (HARD_REG_SET &set)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE (set.elts); ++i)
+ set.elts[i] = -1;
+}
+
+inline bool
+hard_reg_set_subset_p (const_hard_reg_set x, const_hard_reg_set y)
+{
+ HARD_REG_ELT_TYPE bad = 0;
+ for (unsigned int i = 0; i < ARRAY_SIZE (x.elts); ++i)
+ bad |= (x.elts[i] & ~y.elts[i]);
+ return bad == 0;
+}
+
+inline bool
+hard_reg_set_intersect_p (const_hard_reg_set x, const_hard_reg_set y)
+{
+ HARD_REG_ELT_TYPE good = 0;
+ for (unsigned int i = 0; i < ARRAY_SIZE (x.elts); ++i)
+ good |= (x.elts[i] & y.elts[i]);
+ return good != 0;
+}
+
+inline bool
+hard_reg_set_empty_p (const_hard_reg_set x)
+{
+ HARD_REG_ELT_TYPE bad = 0;
+ for (unsigned int i = 0; i < ARRAY_SIZE (x.elts); ++i)
+ bad |= x.elts[i];
+ return bad == 0;
+}
+#endif
+
+/* Iterator for hard register sets. */
+
+struct hard_reg_set_iterator
+{
+ /* Pointer to the current element. */
+ const HARD_REG_ELT_TYPE *pelt;
+
+ /* The length of the set. */
+ unsigned short length;
+
+ /* Word within the current element. */
+ unsigned short word_no;
+
+ /* Contents of the actually processed word. When finding next bit
+ it is shifted right, so that the actual bit is always the least
+ significant bit of ACTUAL. */
+ HARD_REG_ELT_TYPE bits;
+};
+
+#define HARD_REG_ELT_BITS UHOST_BITS_PER_WIDE_INT
+
+/* The implementation of the iterator functions is fully analogous to
+ the bitmap iterators. */
+inline void
+hard_reg_set_iter_init (hard_reg_set_iterator *iter, const_hard_reg_set set,
+ unsigned min, unsigned *regno)
+{
+#ifdef HARD_REG_SET_LONGS
+ iter->pelt = set.elts;
+ iter->length = HARD_REG_SET_LONGS;
+#else
+ iter->pelt = &set;
+ iter->length = 1;
+#endif
+ iter->word_no = min / HARD_REG_ELT_BITS;
+ if (iter->word_no < iter->length)
+ {
+ iter->bits = iter->pelt[iter->word_no];
+ iter->bits >>= min % HARD_REG_ELT_BITS;
+
+ /* This is required for correct search of the next bit. */
+ min += !iter->bits;
+ }
+ *regno = min;
+}
+
+inline bool
+hard_reg_set_iter_set (hard_reg_set_iterator *iter, unsigned *regno)
+{
+ while (1)
+ {
+ /* Return false when we're advanced past the end of the set. */
+ if (iter->word_no >= iter->length)
+ return false;
+
+ if (iter->bits)
+ {
+ /* Find the correct bit and return it. */
+ while (!(iter->bits & 1))
+ {
+ iter->bits >>= 1;
+ *regno += 1;
+ }
+ return (*regno < FIRST_PSEUDO_REGISTER);
+ }
+
+ /* Round to the beginning of the next word. */
+ *regno = (*regno + HARD_REG_ELT_BITS - 1);
+ *regno -= *regno % HARD_REG_ELT_BITS;
+
+ /* Find the next non-zero word. */
+ while (++iter->word_no < iter->length)
+ {
+ iter->bits = iter->pelt[iter->word_no];
+ if (iter->bits)
+ break;
+ *regno += HARD_REG_ELT_BITS;
+ }
+ }
+}
+
+inline void
+hard_reg_set_iter_next (hard_reg_set_iterator *iter, unsigned *regno)
+{
+ iter->bits >>= 1;
+ *regno += 1;
+}
+
+#define EXECUTE_IF_SET_IN_HARD_REG_SET(SET, MIN, REGNUM, ITER) \
+ for (hard_reg_set_iter_init (&(ITER), (SET), (MIN), &(REGNUM)); \
+ hard_reg_set_iter_set (&(ITER), &(REGNUM)); \
+ hard_reg_set_iter_next (&(ITER), &(REGNUM)))
+
+
+/* Define some standard sets of registers. */
+
+/* Indexed by hard register number, contains 1 for registers
+ that are being used for global register decls.
+ These must be exempt from ordinary flow analysis
+ and are also considered fixed. */
+
+extern char global_regs[FIRST_PSEUDO_REGISTER];
+
+extern HARD_REG_SET global_reg_set;
+
+class simplifiable_subreg;
+class subreg_shape;
+
+struct simplifiable_subregs_hasher : nofree_ptr_hash <simplifiable_subreg>
+{
+ typedef const subreg_shape *compare_type;
+
+ static inline hashval_t hash (const simplifiable_subreg *);
+ static inline bool equal (const simplifiable_subreg *, const subreg_shape *);
+};
+
+struct target_hard_regs {
+ void finalize ();
+
+ /* The set of registers that actually exist on the current target. */
+ HARD_REG_SET x_accessible_reg_set;
+
+ /* The set of registers that should be considered to be register
+ operands. It is a subset of x_accessible_reg_set. */
+ HARD_REG_SET x_operand_reg_set;
+
+ /* Indexed by hard register number, contains 1 for registers
+ that are fixed use (stack pointer, pc, frame pointer, etc.;.
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life does not cross calls. */
+ char x_fixed_regs[FIRST_PSEUDO_REGISTER];
+
+ /* The same info as a HARD_REG_SET. */
+ HARD_REG_SET x_fixed_reg_set;
+
+ /* Indexed by hard register number, contains 1 for registers
+ that are fixed use or are clobbered by function calls.
+ These are the registers that cannot be used to allocate
+ a pseudo reg whose life crosses calls. */
+ char x_call_used_regs[FIRST_PSEUDO_REGISTER];
+
+ /* For targets that use reload rather than LRA, this is the set
+ of registers that we are able to save and restore around calls
+ (i.e. those for which we know a suitable mode and set of
+ load/store instructions exist). For LRA targets it contains
+ all registers.
+
+ This is legacy information and should be removed if all targets
+ switch to LRA. */
+ HARD_REG_SET x_savable_regs;
+
+ /* Contains registers that are fixed use -- i.e. in fixed_reg_set -- but
+ only if they are not merely part of that set because they are global
+ regs. Global regs that are not otherwise fixed can still take part
+ in register allocation. */
+ HARD_REG_SET x_fixed_nonglobal_reg_set;
+
+ /* Contains 1 for registers that are set or clobbered by calls. */
+ /* ??? Ideally, this would be just call_used_regs plus global_regs, but
+ for someone's bright idea to have call_used_regs strictly include
+ fixed_regs. Which leaves us guessing as to the set of fixed_regs
+ that are actually preserved. We know for sure that those associated
+ with the local stack frame are safe, but scant others. */
+ HARD_REG_SET x_regs_invalidated_by_call;
+
+ /* Table of register numbers in the order in which to try to use them. */
+ int x_reg_alloc_order[FIRST_PSEUDO_REGISTER];
+
+ /* The inverse of reg_alloc_order. */
+ int x_inv_reg_alloc_order[FIRST_PSEUDO_REGISTER];
+
+ /* For each reg class, a HARD_REG_SET saying which registers are in it. */
+ HARD_REG_SET x_reg_class_contents[N_REG_CLASSES];
+
+ /* For each reg class, a boolean saying whether the class contains only
+ fixed registers. */
+ bool x_class_only_fixed_regs[N_REG_CLASSES];
+
+ /* For each reg class, number of regs it contains. */
+ unsigned int x_reg_class_size[N_REG_CLASSES];
+
+ /* For each reg class, table listing all the classes contained in it. */
+ enum reg_class x_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* For each pair of reg classes,
+ a largest reg class contained in their union. */
+ enum reg_class x_reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* For each pair of reg classes,
+ the smallest reg class that contains their union. */
+ enum reg_class x_reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* Vector indexed by hardware reg giving its name. */
+ const char *x_reg_names[FIRST_PSEUDO_REGISTER];
+
+ /* Records which registers can form a particular subreg, with the subreg
+ being identified by its outer mode, inner mode and offset. */
+ hash_table <simplifiable_subregs_hasher> *x_simplifiable_subregs;
+};
+
+extern struct target_hard_regs default_target_hard_regs;
+#if SWITCHABLE_TARGET
+extern struct target_hard_regs *this_target_hard_regs;
+#else
+#define this_target_hard_regs (&default_target_hard_regs)
+#endif
+
+#define accessible_reg_set \
+ (this_target_hard_regs->x_accessible_reg_set)
+#define operand_reg_set \
+ (this_target_hard_regs->x_operand_reg_set)
+#define fixed_regs \
+ (this_target_hard_regs->x_fixed_regs)
+#define fixed_reg_set \
+ (this_target_hard_regs->x_fixed_reg_set)
+#define fixed_nonglobal_reg_set \
+ (this_target_hard_regs->x_fixed_nonglobal_reg_set)
+#ifdef IN_TARGET_CODE
+#define call_used_regs \
+ (this_target_hard_regs->x_call_used_regs)
+#endif
+#define savable_regs \
+ (this_target_hard_regs->x_savable_regs)
+#ifdef IN_TARGET_CODE
+#define regs_invalidated_by_call \
+ (this_target_hard_regs->x_regs_invalidated_by_call)
+#define call_used_or_fixed_regs \
+ (regs_invalidated_by_call | fixed_reg_set)
+#endif
+#define reg_alloc_order \
+ (this_target_hard_regs->x_reg_alloc_order)
+#define inv_reg_alloc_order \
+ (this_target_hard_regs->x_inv_reg_alloc_order)
+#define reg_class_contents \
+ (this_target_hard_regs->x_reg_class_contents)
+#define class_only_fixed_regs \
+ (this_target_hard_regs->x_class_only_fixed_regs)
+#define reg_class_size \
+ (this_target_hard_regs->x_reg_class_size)
+#define reg_class_subclasses \
+ (this_target_hard_regs->x_reg_class_subclasses)
+#define reg_class_subunion \
+ (this_target_hard_regs->x_reg_class_subunion)
+#define reg_class_superunion \
+ (this_target_hard_regs->x_reg_class_superunion)
+#define reg_names \
+ (this_target_hard_regs->x_reg_names)
+
+/* Vector indexed by reg class giving its name. */
+
+extern const char * reg_class_names[];
+
+/* Given a hard REGN a FROM mode and a TO mode, return true if
+ REGN can change from mode FROM to mode TO. */
+#define REG_CAN_CHANGE_MODE_P(REGN, FROM, TO) \
+ (targetm.can_change_mode_class (FROM, TO, REGNO_REG_CLASS (REGN)))
+
+#ifdef IN_TARGET_CODE
+/* Return true if register REGNO is either fixed or call-used
+ (aka call-clobbered). */
+
+inline bool
+call_used_or_fixed_reg_p (unsigned int regno)
+{
+ return fixed_regs[regno] || this_target_hard_regs->x_call_used_regs[regno];
+}
+#endif
+
+#endif /* ! GCC_HARD_REG_SET_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map-traits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map-traits.h
new file mode 100644
index 0000000..52f6d4c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map-traits.h
@@ -0,0 +1,194 @@
+/* A hash map traits.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef HASH_MAP_TRAITS_H
+#define HASH_MAP_TRAITS_H
+
+/* Bacause mem-stats.h uses default hashmap traits, we have to
+ put the class to this separate header file. */
+
+#include "hash-traits.h"
+
+/* Implement hash_map traits for a key with hash traits H. Empty and
+ deleted map entries are represented as empty and deleted keys. */
+
+template <typename H, typename Value>
+struct simple_hashmap_traits
+{
+ typedef typename H::value_type key_type;
+ static const bool maybe_mx = true;
+ static inline hashval_t hash (const key_type &);
+ static inline bool equal_keys (const key_type &, const key_type &);
+ template <typename T> static inline void remove (T &);
+ static const bool empty_zero_p = H::empty_zero_p;
+ template <typename T> static inline bool is_empty (const T &);
+ template <typename T> static inline bool is_deleted (const T &);
+ template <typename T> static inline void mark_empty (T &);
+ template <typename T> static inline void mark_deleted (T &);
+};
+
+template <typename H, typename Value>
+inline hashval_t
+simple_hashmap_traits <H, Value>::hash (const key_type &h)
+{
+ return H::hash (h);
+}
+
+template <typename H, typename Value>
+inline bool
+simple_hashmap_traits <H, Value>::equal_keys (const key_type &k1,
+ const key_type &k2)
+{
+ return H::equal (k1, k2);
+}
+
+template <typename H, typename Value>
+template <typename T>
+inline void
+simple_hashmap_traits <H, Value>::remove (T &entry)
+{
+ H::remove (entry.m_key);
+ entry.m_value.~Value ();
+}
+
+template <typename H, typename Value>
+template <typename T>
+inline bool
+simple_hashmap_traits <H, Value>::is_empty (const T &entry)
+{
+ return H::is_empty (entry.m_key);
+}
+
+template <typename H, typename Value>
+template <typename T>
+inline bool
+simple_hashmap_traits <H, Value>::is_deleted (const T &entry)
+{
+ return H::is_deleted (entry.m_key);
+}
+
+template <typename H, typename Value>
+template <typename T>
+inline void
+simple_hashmap_traits <H, Value>::mark_empty (T &entry)
+{
+ H::mark_empty (entry.m_key);
+}
+
+template <typename H, typename Value>
+template <typename T>
+inline void
+simple_hashmap_traits <H, Value>::mark_deleted (T &entry)
+{
+ H::mark_deleted (entry.m_key);
+}
+
+template <typename H, typename Value>
+struct simple_cache_map_traits: public simple_hashmap_traits<H,Value>
+{
+ static const bool maybe_mx = false;
+};
+
+/* Implement traits for a hash_map with keys of type Key and values of
+ type Value for cases in which the key cannot represent empty and
+ deleted slots. Instead record empty and deleted entries in Value. */
+
+template <typename Key, typename Value>
+struct unbounded_hashmap_traits
+{
+ typedef typename Key::value_type key_type;
+
+ static hashval_t hash (const typename Key::value_type &);
+ static bool equal_keys (const typename Key::value_type &,
+ const typename Key::compare_type &);
+
+ template <typename T> static inline void remove (T &);
+ static const bool empty_zero_p = default_hash_traits <Value>::empty_zero_p;
+ template <typename T> static inline bool is_empty (const T &);
+ template <typename T> static inline bool is_deleted (const T &);
+ template <typename T> static inline void mark_empty (T &);
+ template <typename T> static inline void mark_deleted (T &);
+};
+
+template <typename Key, typename Value>
+inline hashval_t
+unbounded_hashmap_traits <Key, Value>
+::hash (const typename Key::value_type &key)
+{
+ return Key::hash (key);
+}
+
+template <typename Key, typename Value>
+inline bool
+unbounded_hashmap_traits <Key, Value>
+::equal_keys (const typename Key::value_type &x,
+ const typename Key::compare_type &y)
+{
+ return Key::equal (x, y);
+}
+
+template <typename Key, typename Value>
+template <typename T>
+inline void
+unbounded_hashmap_traits <Key, Value>::remove (T &entry)
+{
+ default_hash_traits <Value>::remove (entry.m_value);
+}
+
+template <typename Key, typename Value>
+template <typename T>
+inline bool
+unbounded_hashmap_traits <Key, Value>::is_empty (const T &entry)
+{
+ return default_hash_traits <Value>::is_empty (entry.m_value);
+}
+
+template <typename Key, typename Value>
+template <typename T>
+inline bool
+unbounded_hashmap_traits <Key, Value>::is_deleted (const T &entry)
+{
+ return default_hash_traits <Value>::is_deleted (entry.m_value);
+}
+
+template <typename Key, typename Value>
+template <typename T>
+inline void
+unbounded_hashmap_traits <Key, Value>::mark_empty (T &entry)
+{
+ default_hash_traits <Value>::mark_empty (entry.m_value);
+}
+
+template <typename Key, typename Value>
+template <typename T>
+inline void
+unbounded_hashmap_traits <Key, Value>::mark_deleted (T &entry)
+{
+ default_hash_traits <Value>::mark_deleted (entry.m_value);
+}
+
+/* Implement traits for a hash_map from integer type Key to Value in
+ cases where Key has no spare values for recording empty and deleted
+ slots. */
+
+template <typename Key, typename Value>
+using unbounded_int_hashmap_traits
+ = unbounded_hashmap_traits <int_hash_base <Key>, Value>;
+
+#endif // HASH_MAP_TRAITS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map.h
new file mode 100644
index 0000000..1c5189b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-map.h
@@ -0,0 +1,388 @@
+/* A type-safe hash map.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef hash_map_h
+#define hash_map_h
+
+/* Class hash_map is a hash-value based container mapping objects of
+ KeyId type to those of the Value type.
+ Both KeyId and Value may be non-trivial (non-POD) types provided
+ a suitabe Traits class. A few default Traits specializations are
+ provided for basic types such as integers, pointers, and std::pair.
+ Inserted elements are value-initialized either to zero for POD types
+ or by invoking their default ctor. Removed elements are destroyed
+ by invoking their dtor. On hash_map destruction all elements are
+ removed. Objects of hash_map type are copy-constructible but not
+ assignable. */
+
+const size_t default_hash_map_size = 13;
+template<typename KeyId, typename Value,
+ typename Traits /* = simple_hashmap_traits<default_hash_traits<Key>,
+ Value> */>
+class GTY((user)) hash_map
+{
+ typedef typename Traits::key_type Key;
+ struct hash_entry
+ {
+ Key m_key;
+ Value m_value;
+
+ typedef hash_entry value_type;
+ typedef Key compare_type;
+
+ static hashval_t hash (const hash_entry &e)
+ {
+ return Traits::hash (e.m_key);
+ }
+
+ static bool equal (const hash_entry &a, const Key &b)
+ {
+ return Traits::equal_keys (a.m_key, b);
+ }
+
+ static void remove (hash_entry &e) { Traits::remove (e); }
+
+ static void mark_deleted (hash_entry &e) { Traits::mark_deleted (e); }
+
+ static bool is_deleted (const hash_entry &e)
+ {
+ return Traits::is_deleted (e);
+ }
+
+ static const bool empty_zero_p = Traits::empty_zero_p;
+ static void mark_empty (hash_entry &e) { Traits::mark_empty (e); }
+ static bool is_empty (const hash_entry &e) { return Traits::is_empty (e); }
+
+ static void ggc_mx (hash_entry &e)
+ {
+ gt_ggc_mx (e.m_key);
+ gt_ggc_mx (e.m_value);
+ }
+
+ static void ggc_maybe_mx (hash_entry &e)
+ {
+ if (Traits::maybe_mx)
+ ggc_mx (e);
+ }
+
+ static void pch_nx (hash_entry &e)
+ {
+ gt_pch_nx (e.m_key);
+ gt_pch_nx (e.m_value);
+ }
+
+ static void pch_nx (hash_entry &e, gt_pointer_operator op, void *c)
+ {
+ pch_nx_helper (e.m_key, op, c);
+ pch_nx_helper (e.m_value, op, c);
+ }
+
+ static int keep_cache_entry (hash_entry &e)
+ {
+ return ggc_marked_p (e.m_key);
+ }
+
+ private:
+ template<typename T>
+ static void
+ pch_nx_helper (T &x, gt_pointer_operator op, void *cookie)
+ {
+ gt_pch_nx (&x, op, cookie);
+ }
+
+ template<typename T>
+ static void
+ pch_nx_helper (T *&x, gt_pointer_operator op, void *cookie)
+ {
+ op (&x, NULL, cookie);
+ }
+
+ /* The overloads below should match those in ggc.h. */
+#define DEFINE_PCH_HELPER(T) \
+ static void pch_nx_helper (T, gt_pointer_operator, void *) { }
+
+ DEFINE_PCH_HELPER (bool);
+ DEFINE_PCH_HELPER (char);
+ DEFINE_PCH_HELPER (signed char);
+ DEFINE_PCH_HELPER (unsigned char);
+ DEFINE_PCH_HELPER (short);
+ DEFINE_PCH_HELPER (unsigned short);
+ DEFINE_PCH_HELPER (int);
+ DEFINE_PCH_HELPER (unsigned int);
+ DEFINE_PCH_HELPER (long);
+ DEFINE_PCH_HELPER (unsigned long);
+ DEFINE_PCH_HELPER (long long);
+ DEFINE_PCH_HELPER (unsigned long long);
+
+#undef DEFINE_PCH_HELPER
+ };
+
+public:
+ explicit hash_map (size_t n = default_hash_map_size, bool ggc = false,
+ bool sanitize_eq_and_hash = true,
+ bool gather_mem_stats = GATHER_STATISTICS
+ CXX_MEM_STAT_INFO)
+ : m_table (n, ggc, sanitize_eq_and_hash, gather_mem_stats,
+ HASH_MAP_ORIGIN PASS_MEM_STAT)
+ {
+ }
+
+ explicit hash_map (const hash_map &h, bool ggc = false,
+ bool sanitize_eq_and_hash = true,
+ bool gather_mem_stats = GATHER_STATISTICS
+ CXX_MEM_STAT_INFO)
+ : m_table (h.m_table, ggc, sanitize_eq_and_hash, gather_mem_stats,
+ HASH_MAP_ORIGIN PASS_MEM_STAT) {}
+
+ /* Create a hash_map in ggc memory. */
+ static hash_map *create_ggc (size_t size = default_hash_map_size,
+ bool gather_mem_stats = GATHER_STATISTICS
+ CXX_MEM_STAT_INFO)
+ {
+ hash_map *map = ggc_alloc<hash_map> ();
+ new (map) hash_map (size, true, true, gather_mem_stats PASS_MEM_STAT);
+ return map;
+ }
+
+ /* If key k isn't already in the map add key k with value v to the map, and
+ return false. Otherwise set the value of the entry for key k to be v and
+ return true. */
+
+ bool put (const Key &k, const Value &v)
+ {
+ hash_entry *e = m_table.find_slot_with_hash (k, Traits::hash (k),
+ INSERT);
+ bool ins = Traits::is_empty (*e);
+ if (ins)
+ {
+ e->m_key = k;
+ new ((void *)&e->m_value) Value (v);
+ gcc_checking_assert (!Traits::is_empty (*e)
+ && !Traits::is_deleted (*e));
+ }
+ else
+ e->m_value = v;
+
+ return !ins;
+ }
+
+ /* If the passed in key is in the map return pointer to its value
+ otherwise NULL. */
+
+ Value *get (const Key &k)
+ {
+ hash_entry &e = m_table.find_with_hash (k, Traits::hash (k));
+ return Traits::is_empty (e) ? NULL : &e.m_value;
+ }
+
+ /* Return a reference to the value for the passed in key, creating the entry
+ if it doesn't already exist. If existed is not NULL then it is set to
+ false if the key was not previously in the map, and true otherwise. */
+
+ Value &get_or_insert (const Key &k, bool *existed = NULL)
+ {
+ hash_entry *e = m_table.find_slot_with_hash (k, Traits::hash (k),
+ INSERT);
+ bool ins = Traits::is_empty (*e);
+ if (ins)
+ {
+ e->m_key = k;
+ new ((void *)&e->m_value) Value ();
+ gcc_checking_assert (!Traits::is_empty (*e)
+ && !Traits::is_deleted (*e));
+ }
+
+ if (existed != NULL)
+ *existed = !ins;
+
+ return e->m_value;
+ }
+
+ void remove (const Key &k)
+ {
+ m_table.remove_elt_with_hash (k, Traits::hash (k));
+ }
+
+ /* Call the call back on each pair of key and value with the passed in
+ arg until either the call back returns false or all pairs have been seen.
+ The traversal is unordered. */
+
+ template<typename Arg, bool (*f)(const typename Traits::key_type &,
+ const Value &, Arg)>
+ void traverse (Arg a) const
+ {
+ for (typename hash_table<hash_entry>::iterator iter = m_table.begin ();
+ iter != m_table.end (); ++iter)
+ if (!f ((*iter).m_key, (*iter).m_value, a))
+ break;
+ }
+
+ template<typename Arg, bool (*f)(const typename Traits::key_type &,
+ Value *, Arg)>
+ void traverse (Arg a) const
+ {
+ for (typename hash_table<hash_entry>::iterator iter = m_table.begin ();
+ iter != m_table.end (); ++iter)
+ if (!f ((*iter).m_key, &(*iter).m_value, a))
+ break;
+ }
+
+ size_t elements () const { return m_table.elements (); }
+
+ void empty () { m_table.empty(); }
+
+ /* Return true when there are no elements in this hash map. */
+ bool is_empty () const { return m_table.is_empty (); }
+
+ class iterator
+ {
+ public:
+ explicit iterator (const typename hash_table<hash_entry>::iterator &iter) :
+ m_iter (iter) {}
+
+ iterator &operator++ ()
+ {
+ ++m_iter;
+ return *this;
+ }
+
+ /* Can't use std::pair here, because GCC before 4.3 don't handle
+ std::pair where template parameters are references well.
+ See PR86739. */
+ class reference_pair {
+ public:
+ const Key &first;
+ Value &second;
+
+ reference_pair (const Key &key, Value &value) : first (key), second (value) {}
+
+ template <typename K, typename V>
+ operator std::pair<K, V> () const { return std::pair<K, V> (first, second); }
+ };
+
+ reference_pair operator* ()
+ {
+ hash_entry &e = *m_iter;
+ return reference_pair (e.m_key, e.m_value);
+ }
+
+ bool operator== (const iterator &other) const
+ {
+ return m_iter == other.m_iter;
+ }
+
+ bool operator != (const iterator &other) const
+ {
+ return m_iter != other.m_iter;
+ }
+
+ private:
+ typename hash_table<hash_entry>::iterator m_iter;
+ };
+
+ /* Standard iterator retrieval methods. */
+
+ iterator begin () const { return iterator (m_table.begin ()); }
+ iterator end () const { return iterator (m_table.end ()); }
+
+private:
+
+ template<typename T, typename U, typename V> friend void gt_ggc_mx (hash_map<T, U, V> *);
+ template<typename T, typename U, typename V> friend void gt_pch_nx (hash_map<T, U, V> *);
+ template<typename T, typename U, typename V> friend void gt_pch_nx (hash_map<T, U, V> *, gt_pointer_operator, void *);
+ template<typename T, typename U, typename V> friend void gt_cleare_cache (hash_map<T, U, V> *);
+
+ hash_table<hash_entry> m_table;
+};
+
+/* ggc marking routines. */
+
+template<typename K, typename V, typename H>
+inline void
+gt_ggc_mx (hash_map<K, V, H> *h)
+{
+ gt_ggc_mx (&h->m_table);
+}
+
+template<typename K, typename V, typename H>
+inline void
+gt_pch_nx (hash_map<K, V, H> *h)
+{
+ gt_pch_nx (&h->m_table);
+}
+
+template<typename K, typename V, typename H>
+inline void
+gt_cleare_cache (hash_map<K, V, H> *h)
+{
+ if (h)
+ gt_cleare_cache (&h->m_table);
+}
+
+template<typename K, typename V, typename H>
+inline void
+gt_pch_nx (hash_map<K, V, H> *h, gt_pointer_operator op, void *cookie)
+{
+ op (&h->m_table.m_entries, NULL, cookie);
+}
+
+enum hm_alloc { hm_heap = false, hm_ggc = true };
+template<bool ggc, typename K, typename V, typename H>
+inline hash_map<K,V,H> *
+hash_map_maybe_create (hash_map<K,V,H> *&h,
+ size_t size = default_hash_map_size)
+{
+ if (!h)
+ {
+ if (ggc)
+ h = hash_map<K,V,H>::create_ggc (size);
+ else
+ h = new hash_map<K,V,H> (size);
+ }
+ return h;
+}
+
+/* Like h->get, but handles null h. */
+template<typename K, typename V, typename H>
+inline V*
+hash_map_safe_get (hash_map<K,V,H> *h, const K& k)
+{
+ return h ? h->get (k) : NULL;
+}
+
+/* Like h->get, but handles null h. */
+template<bool ggc, typename K, typename V, typename H>
+inline V&
+hash_map_safe_get_or_insert (hash_map<K,V,H> *&h, const K& k, bool *e = NULL,
+ size_t size = default_hash_map_size)
+{
+ return hash_map_maybe_create<ggc> (h, size)->get_or_insert (k, e);
+}
+
+/* Like h->put, but handles null h. */
+template<bool ggc, typename K, typename V, typename H>
+inline bool
+hash_map_safe_put (hash_map<K,V,H> *&h, const K& k, const V& v,
+ size_t size = default_hash_map_size)
+{
+ return hash_map_maybe_create<ggc> (h, size)->put (k, v);
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-set.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-set.h
new file mode 100644
index 0000000..f6125eb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-set.h
@@ -0,0 +1,217 @@
+/* A type-safe hash set.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef hash_set_h
+#define hash_set_h
+
+/* Class hash_set is a hash-value based container for objects of
+ KeyId type.
+ KeyId may be a non-trivial (non-POD) type provided a suitabe Traits
+ class. Default Traits specializations are provided for basic types
+ such as integers, pointers, and std::pair. Inserted elements are
+ value-initialized either to zero for POD types or by invoking their
+ default ctor. Removed elements are destroyed by invoking their dtor.
+ On hash_set destruction all elements are removed. Objects of
+ hash_set type are copy-constructible but not assignable. */
+
+template<typename KeyId, bool Lazy = false,
+ typename Traits = default_hash_traits<KeyId> >
+class hash_set
+{
+public:
+ typedef typename Traits::value_type Key;
+ explicit hash_set (size_t n = 13, bool ggc = false CXX_MEM_STAT_INFO)
+ : m_table (n, ggc, true, GATHER_STATISTICS, HASH_SET_ORIGIN PASS_MEM_STAT) {}
+
+ /* Create a hash_set in gc memory with space for at least n elements. */
+
+ static hash_set *
+ create_ggc (size_t n)
+ {
+ hash_set *set = ggc_alloc<hash_set> ();
+ new (set) hash_set (n, true);
+ return set;
+ }
+
+ /* If key k isn't already in the map add it to the map, and
+ return false. Otherwise return true. */
+
+ bool add (const Key &k)
+ {
+ Key *e = m_table.find_slot_with_hash (k, Traits::hash (k), INSERT);
+ bool existed = !Traits::is_empty (*e);
+ if (!existed)
+ {
+ new (e) Key (k);
+ // Catch attempts to insert e.g. a NULL pointer.
+ gcc_checking_assert (!Traits::is_empty (*e)
+ && !Traits::is_deleted (*e));
+ }
+
+ return existed;
+ }
+
+ /* if the passed in key is in the map return its value otherwise NULL. */
+
+ bool contains (const Key &k)
+ {
+ if (Lazy)
+ return (m_table.find_slot_with_hash (k, Traits::hash (k), NO_INSERT)
+ != NULL);
+ Key &e = m_table.find_with_hash (k, Traits::hash (k));
+ return !Traits::is_empty (e);
+ }
+
+ void remove (const Key &k)
+ {
+ m_table.remove_elt_with_hash (k, Traits::hash (k));
+ }
+
+ /* Call the call back on each pair of key and value with the passed in
+ arg. */
+
+ template<typename Arg, bool (*f)(const typename Traits::value_type &, Arg)>
+ void traverse (Arg a) const
+ {
+ for (typename hash_table<Traits, Lazy>::iterator iter = m_table.begin ();
+ iter != m_table.end (); ++iter)
+ f (*iter, a);
+ }
+
+ /* Return the number of elements in the set. */
+
+ size_t elements () const { return m_table.elements (); }
+
+ /* Clear the hash table. */
+
+ void empty () { m_table.empty (); }
+
+ /* Return true when there are no elements in this hash set. */
+ bool is_empty () const { return m_table.is_empty (); }
+
+ class iterator
+ {
+ public:
+ explicit iterator (const typename hash_table<Traits,
+ Lazy>::iterator &iter) :
+ m_iter (iter) {}
+
+ iterator &operator++ ()
+ {
+ ++m_iter;
+ return *this;
+ }
+
+ Key
+ operator* ()
+ {
+ return *m_iter;
+ }
+
+ bool
+ operator != (const iterator &other) const
+ {
+ return m_iter != other.m_iter;
+ }
+
+ private:
+ typename hash_table<Traits, Lazy>::iterator m_iter;
+ };
+
+ /* Standard iterator retrieval methods. */
+
+ iterator begin () const { return iterator (m_table.begin ()); }
+ iterator end () const { return iterator (m_table.end ()); }
+
+
+private:
+
+ template<typename T, typename U>
+ friend void gt_ggc_mx (hash_set<T, false, U> *);
+ template<typename T, typename U>
+ friend void gt_pch_nx (hash_set<T, false, U> *);
+ template<typename T, typename U>
+ friend void gt_pch_nx (hash_set<T, false, U> *, gt_pointer_operator, void *);
+
+ hash_table<Traits, Lazy> m_table;
+};
+
+/* Generic hash_set<TYPE> debug helper.
+
+ This needs to be instantiated for each hash_set<TYPE> used throughout
+ the compiler like this:
+
+ DEFINE_DEBUG_HASH_SET (TYPE)
+
+ The reason we have a debug_helper() is because GDB can't
+ disambiguate a plain call to debug(some_hash), and it must be called
+ like debug<TYPE>(some_hash). */
+template<typename T>
+void
+debug_helper (hash_set<T> &ref)
+{
+ for (typename hash_set<T>::iterator it = ref.begin ();
+ it != ref.end (); ++it)
+ {
+ debug_slim (*it);
+ fputc ('\n', stderr);
+ }
+}
+
+#define DEFINE_DEBUG_HASH_SET(T) \
+ template void debug_helper (hash_set<T> &); \
+ DEBUG_FUNCTION void \
+ debug (hash_set<T> &ref) \
+ { \
+ debug_helper <T> (ref); \
+ } \
+ DEBUG_FUNCTION void \
+ debug (hash_set<T> *ptr) \
+ { \
+ if (ptr) \
+ debug (*ptr); \
+ else \
+ fprintf (stderr, "<nil>\n"); \
+ }
+
+/* ggc marking routines. */
+
+template<typename K, typename H>
+inline void
+gt_ggc_mx (hash_set<K, false, H> *h)
+{
+ gt_ggc_mx (&h->m_table);
+}
+
+template<typename K, typename H>
+inline void
+gt_pch_nx (hash_set<K, false, H> *h)
+{
+ gt_pch_nx (&h->m_table);
+}
+
+template<typename K, typename H>
+inline void
+gt_pch_nx (hash_set<K, false, H> *h, gt_pointer_operator op, void *cookie)
+{
+ op (&h->m_table.m_entries, NULL, cookie);
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-table.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-table.h
new file mode 100644
index 0000000..c0c6e1c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-table.h
@@ -0,0 +1,1321 @@
+/* A type-safe hash table template.
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+ Contributed by Lawrence Crowl <crowl@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* This file implements a typed hash table.
+ The implementation borrows from libiberty's htab_t in hashtab.h.
+
+
+ INTRODUCTION TO TYPES
+
+ Users of the hash table generally need to be aware of three types.
+
+ 1. The type being placed into the hash table. This type is called
+ the value type.
+
+ 2. The type used to describe how to handle the value type within
+ the hash table. This descriptor type provides the hash table with
+ several things.
+
+ - A typedef named 'value_type' to the value type (from above).
+ Provided a suitable Descriptor class it may be a user-defined,
+ non-POD type.
+
+ - A static member function named 'hash' that takes a value_type
+ (or 'const value_type &') and returns a hashval_t value.
+
+ - A typedef named 'compare_type' that is used to test when a value
+ is found. This type is the comparison type. Usually, it will be
+ the same as value_type and may be a user-defined, non-POD type.
+ If it is not the same type, you must generally explicitly compute
+ hash values and pass them to the hash table.
+
+ - A static member function named 'equal' that takes a value_type
+ and a compare_type, and returns a bool. Both arguments can be
+ const references.
+
+ - A static function named 'remove' that takes an value_type pointer
+ and frees the memory allocated by it. This function is used when
+ individual elements of the table need to be disposed of (e.g.,
+ when deleting a hash table, removing elements from the table, etc).
+
+ - An optional static function named 'keep_cache_entry'. This
+ function is provided only for garbage-collected elements that
+ are not marked by the normal gc mark pass. It describes what
+ what should happen to the element at the end of the gc mark phase.
+ The return value should be:
+ - 0 if the element should be deleted
+ - 1 if the element should be kept and needs to be marked
+ - -1 if the element should be kept and is already marked.
+ Returning -1 rather than 1 is purely an optimization.
+
+ 3. The type of the hash table itself. (More later.)
+
+ In very special circumstances, users may need to know about a fourth type.
+
+ 4. The template type used to describe how hash table memory
+ is allocated. This type is called the allocator type. It is
+ parameterized on the value type. It provides two functions:
+
+ - A static member function named 'data_alloc'. This function
+ allocates the data elements in the table.
+
+ - A static member function named 'data_free'. This function
+ deallocates the data elements in the table.
+
+ Hash table are instantiated with two type arguments.
+
+ * The descriptor type, (2) above.
+
+ * The allocator type, (4) above. In general, you will not need to
+ provide your own allocator type. By default, hash tables will use
+ the class template xcallocator, which uses malloc/free for allocation.
+
+
+ DEFINING A DESCRIPTOR TYPE
+
+ The first task in using the hash table is to describe the element type.
+ We compose this into a few steps.
+
+ 1. Decide on a removal policy for values stored in the table.
+ hash-traits.h provides class templates for the four most common
+ policies:
+
+ * typed_free_remove implements the static 'remove' member function
+ by calling free().
+
+ * typed_noop_remove implements the static 'remove' member function
+ by doing nothing.
+
+ * ggc_remove implements the static 'remove' member by doing nothing,
+ but instead provides routines for gc marking and for PCH streaming.
+ Use this for garbage-collected data that needs to be preserved across
+ collections.
+
+ * ggc_cache_remove is like ggc_remove, except that it does not
+ mark the entries during the normal gc mark phase. Instead it
+ uses 'keep_cache_entry' (described above) to keep elements that
+ were not collected and delete those that were. Use this for
+ garbage-collected caches that should not in themselves stop
+ the data from being collected.
+
+ You can use these policies by simply deriving the descriptor type
+ from one of those class template, with the appropriate argument.
+
+ Otherwise, you need to write the static 'remove' member function
+ in the descriptor class.
+
+ 2. Choose a hash function. Write the static 'hash' member function.
+
+ 3. Decide whether the lookup function should take as input an object
+ of type value_type or something more restricted. Define compare_type
+ accordingly.
+
+ 4. Choose an equality testing function 'equal' that compares a value_type
+ and a compare_type.
+
+ If your elements are pointers, it is usually easiest to start with one
+ of the generic pointer descriptors described below and override the bits
+ you need to change.
+
+ AN EXAMPLE DESCRIPTOR TYPE
+
+ Suppose you want to put some_type into the hash table. You could define
+ the descriptor type as follows.
+
+ struct some_type_hasher : nofree_ptr_hash <some_type>
+ // Deriving from nofree_ptr_hash means that we get a 'remove' that does
+ // nothing. This choice is good for raw values.
+ {
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+ };
+
+ inline hashval_t
+ some_type_hasher::hash (const value_type *e)
+ { ... compute and return a hash value for E ... }
+
+ inline bool
+ some_type_hasher::equal (const value_type *p1, const compare_type *p2)
+ { ... compare P1 vs P2. Return true if they are the 'same' ... }
+
+
+ AN EXAMPLE HASH_TABLE DECLARATION
+
+ To instantiate a hash table for some_type:
+
+ hash_table <some_type_hasher> some_type_hash_table;
+
+ There is no need to mention some_type directly, as the hash table will
+ obtain it using some_type_hasher::value_type.
+
+ You can then use any of the functions in hash_table's public interface.
+ See hash_table for details. The interface is very similar to libiberty's
+ htab_t.
+
+ If a hash table is used only in some rare cases, it is possible
+ to construct the hash_table lazily before first use. This is done
+ through:
+
+ hash_table <some_type_hasher, true> some_type_hash_table;
+
+ which will cause whatever methods actually need the allocated entries
+ array to allocate it later.
+
+
+ EASY DESCRIPTORS FOR POINTERS
+
+ There are four descriptors for pointer elements, one for each of
+ the removal policies above:
+
+ * nofree_ptr_hash (based on typed_noop_remove)
+ * free_ptr_hash (based on typed_free_remove)
+ * ggc_ptr_hash (based on ggc_remove)
+ * ggc_cache_ptr_hash (based on ggc_cache_remove)
+
+ These descriptors hash and compare elements by their pointer value,
+ rather than what they point to. So, to instantiate a hash table over
+ pointers to whatever_type, without freeing the whatever_types, use:
+
+ hash_table <nofree_ptr_hash <whatever_type> > whatever_type_hash_table;
+
+
+ HASH TABLE ITERATORS
+
+ The hash table provides standard C++ iterators. For example, consider a
+ hash table of some_info. We wish to consume each element of the table:
+
+ extern void consume (some_info *);
+
+ We define a convenience typedef and the hash table:
+
+ typedef hash_table <some_info_hasher> info_table_type;
+ info_table_type info_table;
+
+ Then we write the loop in typical C++ style:
+
+ for (info_table_type::iterator iter = info_table.begin ();
+ iter != info_table.end ();
+ ++iter)
+ if ((*iter).status == INFO_READY)
+ consume (&*iter);
+
+ Or with common sub-expression elimination:
+
+ for (info_table_type::iterator iter = info_table.begin ();
+ iter != info_table.end ();
+ ++iter)
+ {
+ some_info &elem = *iter;
+ if (elem.status == INFO_READY)
+ consume (&elem);
+ }
+
+ One can also use a more typical GCC style:
+
+ typedef some_info *some_info_p;
+ some_info *elem_ptr;
+ info_table_type::iterator iter;
+ FOR_EACH_HASH_TABLE_ELEMENT (info_table, elem_ptr, some_info_p, iter)
+ if (elem_ptr->status == INFO_READY)
+ consume (elem_ptr);
+
+*/
+
+
+#ifndef TYPED_HASHTAB_H
+#define TYPED_HASHTAB_H
+
+#include "statistics.h"
+#include "ggc.h"
+#include "vec.h"
+#include "hashtab.h"
+#include "inchash.h"
+#include "mem-stats-traits.h"
+#include "hash-traits.h"
+#include "hash-map-traits.h"
+
+template<typename, typename, typename> class hash_map;
+template<typename, bool, typename> class hash_set;
+
+/* The ordinary memory allocator. */
+/* FIXME (crowl): This allocator may be extracted for wider sharing later. */
+
+template <typename Type>
+struct xcallocator
+{
+ static Type *data_alloc (size_t count);
+ static void data_free (Type *memory);
+};
+
+
+/* Allocate memory for COUNT data blocks. */
+
+template <typename Type>
+inline Type *
+xcallocator <Type>::data_alloc (size_t count)
+{
+ return static_cast <Type *> (xcalloc (count, sizeof (Type)));
+}
+
+
+/* Free memory for data blocks. */
+
+template <typename Type>
+inline void
+xcallocator <Type>::data_free (Type *memory)
+{
+ return ::free (memory);
+}
+
+
+/* Table of primes and their inversion information. */
+
+struct prime_ent
+{
+ hashval_t prime;
+ hashval_t inv;
+ hashval_t inv_m2; /* inverse of prime-2 */
+ hashval_t shift;
+};
+
+extern struct prime_ent const prime_tab[];
+
+/* Limit number of comparisons when calling hash_table<>::verify. */
+extern unsigned int hash_table_sanitize_eq_limit;
+
+/* Functions for computing hash table indexes. */
+
+extern unsigned int hash_table_higher_prime_index (unsigned long n)
+ ATTRIBUTE_PURE;
+
+extern ATTRIBUTE_NORETURN ATTRIBUTE_COLD void hashtab_chk_error ();
+
+/* Return X % Y using multiplicative inverse values INV and SHIFT.
+
+ The multiplicative inverses computed above are for 32-bit types,
+ and requires that we be able to compute a highpart multiply.
+
+ FIX: I am not at all convinced that
+ 3 loads, 2 multiplications, 3 shifts, and 3 additions
+ will be faster than
+ 1 load and 1 modulus
+ on modern systems running a compiler. */
+
+inline hashval_t
+mul_mod (hashval_t x, hashval_t y, hashval_t inv, int shift)
+{
+ hashval_t t1, t2, t3, t4, q, r;
+
+ t1 = ((uint64_t)x * inv) >> 32;
+ t2 = x - t1;
+ t3 = t2 >> 1;
+ t4 = t1 + t3;
+ q = t4 >> shift;
+ r = x - (q * y);
+
+ return r;
+}
+
+/* Compute the primary table index for HASH given current prime index. */
+
+inline hashval_t
+hash_table_mod1 (hashval_t hash, unsigned int index)
+{
+ const struct prime_ent *p = &prime_tab[index];
+ gcc_checking_assert (sizeof (hashval_t) * CHAR_BIT <= 32);
+ return mul_mod (hash, p->prime, p->inv, p->shift);
+}
+
+/* Compute the secondary table index for HASH given current prime index. */
+
+inline hashval_t
+hash_table_mod2 (hashval_t hash, unsigned int index)
+{
+ const struct prime_ent *p = &prime_tab[index];
+ gcc_checking_assert (sizeof (hashval_t) * CHAR_BIT <= 32);
+ return 1 + mul_mod (hash, p->prime - 2, p->inv_m2, p->shift);
+}
+
+class mem_usage;
+
+/* User-facing hash table type.
+
+ The table stores elements of type Descriptor::value_type and uses
+ the static descriptor functions described at the top of the file
+ to hash, compare and remove elements.
+
+ Specify the template Allocator to allocate and free memory.
+ The default is xcallocator.
+
+ Storage is an implementation detail and should not be used outside the
+ hash table code.
+
+*/
+template <typename Descriptor, bool Lazy = false,
+ template<typename Type> class Allocator = xcallocator>
+class hash_table
+{
+ typedef typename Descriptor::value_type value_type;
+ typedef typename Descriptor::compare_type compare_type;
+
+public:
+ explicit hash_table (size_t, bool ggc = false,
+ bool sanitize_eq_and_hash = true,
+ bool gather_mem_stats = GATHER_STATISTICS,
+ mem_alloc_origin origin = HASH_TABLE_ORIGIN
+ CXX_MEM_STAT_INFO);
+ explicit hash_table (const hash_table &, bool ggc = false,
+ bool sanitize_eq_and_hash = true,
+ bool gather_mem_stats = GATHER_STATISTICS,
+ mem_alloc_origin origin = HASH_TABLE_ORIGIN
+ CXX_MEM_STAT_INFO);
+ ~hash_table ();
+
+ /* Create a hash_table in gc memory. */
+ static hash_table *
+ create_ggc (size_t n, bool sanitize_eq_and_hash = true CXX_MEM_STAT_INFO)
+ {
+ hash_table *table = ggc_alloc<hash_table> ();
+ new (table) hash_table (n, true, sanitize_eq_and_hash, GATHER_STATISTICS,
+ HASH_TABLE_ORIGIN PASS_MEM_STAT);
+ return table;
+ }
+
+ /* Current size (in entries) of the hash table. */
+ size_t size () const { return m_size; }
+
+ /* Return the current number of elements in this hash table. */
+ size_t elements () const { return m_n_elements - m_n_deleted; }
+
+ /* Return the current number of elements in this hash table. */
+ size_t elements_with_deleted () const { return m_n_elements; }
+
+ /* This function clears all entries in this hash table. */
+ void empty () { if (elements ()) empty_slow (); }
+
+ /* Return true when there are no elements in this hash table. */
+ bool is_empty () const { return elements () == 0; }
+
+ /* This function clears a specified SLOT in a hash table. It is
+ useful when you've already done the lookup and don't want to do it
+ again. */
+ void clear_slot (value_type *);
+
+ /* This function searches for a hash table entry equal to the given
+ COMPARABLE element starting with the given HASH value. It cannot
+ be used to insert or delete an element. */
+ value_type &find_with_hash (const compare_type &, hashval_t);
+
+ /* Like find_slot_with_hash, but compute the hash value from the element. */
+ value_type &find (const value_type &value)
+ {
+ return find_with_hash (value, Descriptor::hash (value));
+ }
+
+ value_type *find_slot (const value_type &value, insert_option insert)
+ {
+ return find_slot_with_hash (value, Descriptor::hash (value), insert);
+ }
+
+ /* This function searches for a hash table slot containing an entry
+ equal to the given COMPARABLE element and starting with the given
+ HASH. To delete an entry, call this with insert=NO_INSERT, then
+ call clear_slot on the slot returned (possibly after doing some
+ checks). To insert an entry, call this with insert=INSERT, then
+ write the value you want into the returned slot. When inserting an
+ entry, NULL may be returned if memory allocation fails. */
+ value_type *find_slot_with_hash (const compare_type &comparable,
+ hashval_t hash, enum insert_option insert);
+
+ /* This function deletes an element with the given COMPARABLE value
+ from hash table starting with the given HASH. If there is no
+ matching element in the hash table, this function does nothing. */
+ void remove_elt_with_hash (const compare_type &, hashval_t);
+
+ /* Like remove_elt_with_hash, but compute the hash value from the
+ element. */
+ void remove_elt (const value_type &value)
+ {
+ remove_elt_with_hash (value, Descriptor::hash (value));
+ }
+
+ /* This function scans over the entire hash table calling CALLBACK for
+ each live entry. If CALLBACK returns false, the iteration stops.
+ ARGUMENT is passed as CALLBACK's second argument. */
+ template <typename Argument,
+ int (*Callback) (value_type *slot, Argument argument)>
+ void traverse_noresize (Argument argument);
+
+ /* Like traverse_noresize, but does resize the table when it is too empty
+ to improve effectivity of subsequent calls. */
+ template <typename Argument,
+ int (*Callback) (value_type *slot, Argument argument)>
+ void traverse (Argument argument);
+
+ class iterator
+ {
+ public:
+ iterator () : m_slot (NULL), m_limit (NULL) {}
+
+ iterator (value_type *slot, value_type *limit) :
+ m_slot (slot), m_limit (limit) {}
+
+ inline value_type &operator * () { return *m_slot; }
+ void slide ();
+ inline iterator &operator ++ ();
+ bool operator != (const iterator &other) const
+ {
+ return m_slot != other.m_slot || m_limit != other.m_limit;
+ }
+
+ private:
+ value_type *m_slot;
+ value_type *m_limit;
+ };
+
+ iterator begin () const
+ {
+ if (Lazy && m_entries == NULL)
+ return iterator ();
+ check_complete_insertion ();
+ iterator iter (m_entries, m_entries + m_size);
+ iter.slide ();
+ return iter;
+ }
+
+ iterator end () const { return iterator (); }
+
+ double collisions () const
+ {
+ return m_searches ? static_cast <double> (m_collisions) / m_searches : 0;
+ }
+
+private:
+ /* FIXME: Make the class assignable. See pr90959. */
+ void operator= (hash_table&);
+
+ template<typename T> friend void gt_ggc_mx (hash_table<T> *);
+ template<typename T> friend void gt_pch_nx (hash_table<T> *);
+ template<typename T> friend void
+ hashtab_entry_note_pointers (void *, void *, gt_pointer_operator, void *);
+ template<typename T, typename U, typename V> friend void
+ gt_pch_nx (hash_map<T, U, V> *, gt_pointer_operator, void *);
+ template<typename T, typename U>
+ friend void gt_pch_nx (hash_set<T, false, U> *, gt_pointer_operator, void *);
+ template<typename T> friend void gt_pch_nx (hash_table<T> *,
+ gt_pointer_operator, void *);
+
+ template<typename T> friend void gt_cleare_cache (hash_table<T> *);
+
+ void empty_slow ();
+
+ value_type *alloc_entries (size_t n CXX_MEM_STAT_INFO) const;
+ value_type *find_empty_slot_for_expand (hashval_t);
+ void verify (const compare_type &comparable, hashval_t hash);
+ bool too_empty_p (unsigned int);
+ void expand ();
+ static bool is_deleted (value_type &v)
+ {
+ /* Traits are supposed to avoid recognizing elements as both empty
+ and deleted, but to fail safe in case custom traits fail to do
+ that, make sure we never test for is_deleted without having
+ first ruled out is_empty. */
+ gcc_checking_assert (!Descriptor::is_empty (v));
+ return Descriptor::is_deleted (v);
+ }
+
+ static bool is_empty (value_type &v)
+ {
+ return Descriptor::is_empty (v);
+ }
+
+ static void mark_deleted (value_type &v)
+ {
+ Descriptor::mark_deleted (v);
+ /* Traits are supposed to refuse to set elements as deleted if
+ those would be indistinguishable from empty, but to fail safe
+ in case custom traits fail to do that, check that the
+ just-deleted element does not look empty. */
+ gcc_checking_assert (!Descriptor::is_empty (v));
+ }
+
+ static void mark_empty (value_type &v)
+ {
+ Descriptor::mark_empty (v);
+ }
+
+public:
+ void check_complete_insertion () const
+ {
+#if CHECKING_P
+ if (!m_inserting_slot)
+ return;
+
+ gcc_checking_assert (m_inserting_slot >= &m_entries[0]
+ && m_inserting_slot < &m_entries[m_size]);
+
+ if (!is_empty (*m_inserting_slot))
+ m_inserting_slot = NULL;
+ else
+ gcc_unreachable ();
+#endif
+ }
+
+private:
+ value_type *check_insert_slot (value_type *ret)
+ {
+#if CHECKING_P
+ gcc_checking_assert (is_empty (*ret));
+ m_inserting_slot = ret;
+#endif
+ return ret;
+ }
+
+#if CHECKING_P
+ mutable value_type *m_inserting_slot;
+#endif
+
+ /* Table itself. */
+ value_type *m_entries;
+
+ size_t m_size;
+
+ /* Current number of elements including also deleted elements. */
+ size_t m_n_elements;
+
+ /* Current number of deleted elements in the table. */
+ size_t m_n_deleted;
+
+ /* The following member is used for debugging. Its value is number
+ of all calls of `htab_find_slot' for the hash table. */
+ unsigned int m_searches;
+
+ /* The following member is used for debugging. Its value is number
+ of collisions fixed for time of work with the hash table. */
+ unsigned int m_collisions;
+
+ /* Current size (in entries) of the hash table, as an index into the
+ table of primes. */
+ unsigned int m_size_prime_index;
+
+ /* if m_entries is stored in ggc memory. */
+ bool m_ggc;
+
+ /* True if the table should be sanitized for equal and hash functions. */
+ bool m_sanitize_eq_and_hash;
+
+ /* If we should gather memory statistics for the table. */
+#if GATHER_STATISTICS
+ bool m_gather_mem_stats;
+#else
+ static const bool m_gather_mem_stats = false;
+#endif
+};
+
+/* As mem-stats.h heavily utilizes hash maps (hash tables), we have to include
+ mem-stats.h after hash_table declaration. */
+
+#include "mem-stats.h"
+#include "hash-map.h"
+
+extern mem_alloc_description<mem_usage>& hash_table_usage (void);
+
+/* Support function for statistics. */
+extern void dump_hash_table_loc_statistics (void);
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+hash_table<Descriptor, Lazy, Allocator>::hash_table (size_t size, bool ggc,
+ bool sanitize_eq_and_hash,
+ bool gather_mem_stats
+ ATTRIBUTE_UNUSED,
+ mem_alloc_origin origin
+ MEM_STAT_DECL) :
+#if CHECKING_P
+ m_inserting_slot (0),
+#endif
+ m_n_elements (0), m_n_deleted (0), m_searches (0), m_collisions (0),
+ m_ggc (ggc), m_sanitize_eq_and_hash (sanitize_eq_and_hash)
+#if GATHER_STATISTICS
+ , m_gather_mem_stats (gather_mem_stats)
+#endif
+{
+ unsigned int size_prime_index;
+
+ size_prime_index = hash_table_higher_prime_index (size);
+ size = prime_tab[size_prime_index].prime;
+
+ if (m_gather_mem_stats)
+ hash_table_usage ().register_descriptor (this, origin, ggc
+ FINAL_PASS_MEM_STAT);
+
+ if (Lazy)
+ m_entries = NULL;
+ else
+ m_entries = alloc_entries (size PASS_MEM_STAT);
+ m_size = size;
+ m_size_prime_index = size_prime_index;
+}
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+hash_table<Descriptor, Lazy, Allocator>::hash_table (const hash_table &h,
+ bool ggc,
+ bool sanitize_eq_and_hash,
+ bool gather_mem_stats
+ ATTRIBUTE_UNUSED,
+ mem_alloc_origin origin
+ MEM_STAT_DECL) :
+#if CHECKING_P
+ m_inserting_slot (0),
+#endif
+ m_n_elements (h.m_n_elements), m_n_deleted (h.m_n_deleted),
+ m_searches (0), m_collisions (0), m_ggc (ggc),
+ m_sanitize_eq_and_hash (sanitize_eq_and_hash)
+#if GATHER_STATISTICS
+ , m_gather_mem_stats (gather_mem_stats)
+#endif
+{
+ h.check_complete_insertion ();
+
+ size_t size = h.m_size;
+
+ if (m_gather_mem_stats)
+ hash_table_usage ().register_descriptor (this, origin, ggc
+ FINAL_PASS_MEM_STAT);
+
+ if (Lazy && h.m_entries == NULL)
+ m_entries = NULL;
+ else
+ {
+ value_type *nentries = alloc_entries (size PASS_MEM_STAT);
+ for (size_t i = 0; i < size; ++i)
+ {
+ value_type &entry = h.m_entries[i];
+ if (is_empty (entry))
+ continue;
+ else if (is_deleted (entry))
+ mark_deleted (nentries[i]);
+ else
+ new ((void*) (nentries + i)) value_type (entry);
+ }
+ m_entries = nentries;
+ }
+ m_size = size;
+ m_size_prime_index = h.m_size_prime_index;
+}
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+hash_table<Descriptor, Lazy, Allocator>::~hash_table ()
+{
+ check_complete_insertion ();
+
+ if (!Lazy || m_entries)
+ {
+ for (size_t i = m_size - 1; i < m_size; i--)
+ if (!is_empty (m_entries[i]) && !is_deleted (m_entries[i]))
+ Descriptor::remove (m_entries[i]);
+
+ if (!m_ggc)
+ Allocator <value_type> ::data_free (m_entries);
+ else
+ ggc_free (m_entries);
+ if (m_gather_mem_stats)
+ hash_table_usage ().release_instance_overhead (this,
+ sizeof (value_type)
+ * m_size, true);
+ }
+ else if (m_gather_mem_stats)
+ hash_table_usage ().unregister_descriptor (this);
+}
+
+/* This function returns an array of empty hash table elements. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+inline typename hash_table<Descriptor, Lazy, Allocator>::value_type *
+hash_table<Descriptor, Lazy,
+ Allocator>::alloc_entries (size_t n MEM_STAT_DECL) const
+{
+ value_type *nentries;
+
+ if (m_gather_mem_stats)
+ hash_table_usage ().register_instance_overhead (sizeof (value_type) * n, this);
+
+ if (!m_ggc)
+ nentries = Allocator <value_type> ::data_alloc (n);
+ else
+ nentries = ::ggc_cleared_vec_alloc<value_type> (n PASS_MEM_STAT);
+
+ gcc_assert (nentries != NULL);
+ if (!Descriptor::empty_zero_p)
+ for (size_t i = 0; i < n; i++)
+ mark_empty (nentries[i]);
+
+ return nentries;
+}
+
+/* Similar to find_slot, but without several unwanted side effects:
+ - Does not call equal when it finds an existing entry.
+ - Does not change the count of elements/searches/collisions in the
+ hash table.
+ This function also assumes there are no deleted entries in the table.
+ HASH is the hash value for the element to be inserted. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+typename hash_table<Descriptor, Lazy, Allocator>::value_type *
+hash_table<Descriptor, Lazy,
+ Allocator>::find_empty_slot_for_expand (hashval_t hash)
+{
+ hashval_t index = hash_table_mod1 (hash, m_size_prime_index);
+ size_t size = m_size;
+ value_type *slot = m_entries + index;
+ hashval_t hash2;
+
+ if (is_empty (*slot))
+ return slot;
+ gcc_checking_assert (!is_deleted (*slot));
+
+ hash2 = hash_table_mod2 (hash, m_size_prime_index);
+ for (;;)
+ {
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ slot = m_entries + index;
+ if (is_empty (*slot))
+ return slot;
+ gcc_checking_assert (!is_deleted (*slot));
+ }
+}
+
+/* Return true if the current table is excessively big for ELTS elements. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+inline bool
+hash_table<Descriptor, Lazy, Allocator>::too_empty_p (unsigned int elts)
+{
+ return elts * 8 < m_size && m_size > 32;
+}
+
+/* The following function changes size of memory allocated for the
+ entries and repeatedly inserts the table elements. The occupancy
+ of the table after the call will be about 50%. Naturally the hash
+ table must already exist. Remember also that the place of the
+ table entries is changed. If memory allocation fails, this function
+ will abort. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Lazy, Allocator>::expand ()
+{
+ check_complete_insertion ();
+
+ value_type *oentries = m_entries;
+ unsigned int oindex = m_size_prime_index;
+ size_t osize = size ();
+ value_type *olimit = oentries + osize;
+ size_t elts = elements ();
+
+ /* Resize only when table after removal of unused elements is either
+ too full or too empty. */
+ unsigned int nindex;
+ size_t nsize;
+ if (elts * 2 > osize || too_empty_p (elts))
+ {
+ nindex = hash_table_higher_prime_index (elts * 2);
+ nsize = prime_tab[nindex].prime;
+ }
+ else
+ {
+ nindex = oindex;
+ nsize = osize;
+ }
+
+ value_type *nentries = alloc_entries (nsize);
+
+ if (m_gather_mem_stats)
+ hash_table_usage ().release_instance_overhead (this, sizeof (value_type)
+ * osize);
+
+ size_t n_deleted = m_n_deleted;
+
+ m_entries = nentries;
+ m_size = nsize;
+ m_size_prime_index = nindex;
+ m_n_elements -= m_n_deleted;
+ m_n_deleted = 0;
+
+ size_t n_elements = m_n_elements;
+
+ value_type *p = oentries;
+ do
+ {
+ value_type &x = *p;
+
+ if (is_empty (x))
+ ;
+ else if (is_deleted (x))
+ n_deleted--;
+ else
+ {
+ n_elements--;
+ value_type *q = find_empty_slot_for_expand (Descriptor::hash (x));
+ new ((void*) q) value_type (std::move (x));
+ /* After the resources of 'x' have been moved to a new object at 'q',
+ we now have to destroy the 'x' object, to end its lifetime. */
+ x.~value_type ();
+ }
+
+ p++;
+ }
+ while (p < olimit);
+
+ gcc_checking_assert (!n_elements && !n_deleted);
+
+ if (!m_ggc)
+ Allocator <value_type> ::data_free (oentries);
+ else
+ ggc_free (oentries);
+}
+
+/* Implements empty() in cases where it isn't a no-op. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Lazy, Allocator>::empty_slow ()
+{
+ check_complete_insertion ();
+
+ size_t size = m_size;
+ size_t nsize = size;
+ value_type *entries = m_entries;
+
+ for (size_t i = size - 1; i < size; i--)
+ if (!is_empty (entries[i]) && !is_deleted (entries[i]))
+ Descriptor::remove (entries[i]);
+
+ /* Instead of clearing megabyte, downsize the table. */
+ if (size > 1024*1024 / sizeof (value_type))
+ nsize = 1024 / sizeof (value_type);
+ else if (too_empty_p (m_n_elements))
+ nsize = m_n_elements * 2;
+
+ if (nsize != size)
+ {
+ unsigned int nindex = hash_table_higher_prime_index (nsize);
+
+ nsize = prime_tab[nindex].prime;
+
+ if (!m_ggc)
+ Allocator <value_type> ::data_free (m_entries);
+ else
+ ggc_free (m_entries);
+
+ m_entries = alloc_entries (nsize);
+ m_size = nsize;
+ m_size_prime_index = nindex;
+ }
+ else if (Descriptor::empty_zero_p)
+ memset ((void *) entries, 0, size * sizeof (value_type));
+ else
+ for (size_t i = 0; i < size; i++)
+ mark_empty (entries[i]);
+
+ m_n_deleted = 0;
+ m_n_elements = 0;
+}
+
+/* This function clears a specified SLOT in a hash table. It is
+ useful when you've already done the lookup and don't want to do it
+ again. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Lazy, Allocator>::clear_slot (value_type *slot)
+{
+ check_complete_insertion ();
+
+ gcc_checking_assert (!(slot < m_entries || slot >= m_entries + size ()
+ || is_empty (*slot) || is_deleted (*slot)));
+
+ Descriptor::remove (*slot);
+
+ mark_deleted (*slot);
+ m_n_deleted++;
+}
+
+/* This function searches for a hash table entry equal to the given
+ COMPARABLE element starting with the given HASH value. It cannot
+ be used to insert or delete an element. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+typename hash_table<Descriptor, Lazy, Allocator>::value_type &
+hash_table<Descriptor, Lazy, Allocator>
+::find_with_hash (const compare_type &comparable, hashval_t hash)
+{
+ m_searches++;
+ size_t size = m_size;
+ hashval_t index = hash_table_mod1 (hash, m_size_prime_index);
+
+ if (Lazy && m_entries == NULL)
+ m_entries = alloc_entries (size);
+
+ check_complete_insertion ();
+
+#if CHECKING_P
+ if (m_sanitize_eq_and_hash)
+ verify (comparable, hash);
+#endif
+
+ value_type *entry = &m_entries[index];
+ if (is_empty (*entry)
+ || (!is_deleted (*entry) && Descriptor::equal (*entry, comparable)))
+ return *entry;
+
+ hashval_t hash2 = hash_table_mod2 (hash, m_size_prime_index);
+ for (;;)
+ {
+ m_collisions++;
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ entry = &m_entries[index];
+ if (is_empty (*entry)
+ || (!is_deleted (*entry) && Descriptor::equal (*entry, comparable)))
+ return *entry;
+ }
+}
+
+/* This function searches for a hash table slot containing an entry
+ equal to the given COMPARABLE element and starting with the given
+ HASH. To delete an entry, call this with insert=NO_INSERT, then
+ call clear_slot on the slot returned (possibly after doing some
+ checks). To insert an entry, call this with insert=INSERT, then
+ write the value you want into the returned slot. When inserting an
+ entry, NULL may be returned if memory allocation fails. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+typename hash_table<Descriptor, Lazy, Allocator>::value_type *
+hash_table<Descriptor, Lazy, Allocator>
+::find_slot_with_hash (const compare_type &comparable, hashval_t hash,
+ enum insert_option insert)
+{
+ if (Lazy && m_entries == NULL)
+ {
+ if (insert == INSERT)
+ m_entries = alloc_entries (m_size);
+ else
+ return NULL;
+ }
+ if (insert == INSERT && m_size * 3 <= m_n_elements * 4)
+ expand ();
+ else
+ check_complete_insertion ();
+
+#if CHECKING_P
+ if (m_sanitize_eq_and_hash)
+ verify (comparable, hash);
+#endif
+
+ m_searches++;
+ value_type *first_deleted_slot = NULL;
+ hashval_t index = hash_table_mod1 (hash, m_size_prime_index);
+ hashval_t hash2 = hash_table_mod2 (hash, m_size_prime_index);
+ value_type *entry = &m_entries[index];
+ size_t size = m_size;
+ if (is_empty (*entry))
+ goto empty_entry;
+ else if (is_deleted (*entry))
+ first_deleted_slot = &m_entries[index];
+ else if (Descriptor::equal (*entry, comparable))
+ return &m_entries[index];
+
+ for (;;)
+ {
+ m_collisions++;
+ index += hash2;
+ if (index >= size)
+ index -= size;
+
+ entry = &m_entries[index];
+ if (is_empty (*entry))
+ goto empty_entry;
+ else if (is_deleted (*entry))
+ {
+ if (!first_deleted_slot)
+ first_deleted_slot = &m_entries[index];
+ }
+ else if (Descriptor::equal (*entry, comparable))
+ return &m_entries[index];
+ }
+
+ empty_entry:
+ if (insert == NO_INSERT)
+ return NULL;
+
+ if (first_deleted_slot)
+ {
+ m_n_deleted--;
+ mark_empty (*first_deleted_slot);
+ return check_insert_slot (first_deleted_slot);
+ }
+
+ m_n_elements++;
+ return check_insert_slot (&m_entries[index]);
+}
+
+/* Verify that all existing elements in the hash table which are
+ equal to COMPARABLE have an equal HASH value provided as argument.
+ Also check that the hash table element counts are correct. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Lazy, Allocator>
+::verify (const compare_type &comparable, hashval_t hash)
+{
+ size_t n_elements = m_n_elements;
+ size_t n_deleted = m_n_deleted;
+ for (size_t i = 0; i < MIN (hash_table_sanitize_eq_limit, m_size); i++)
+ {
+ value_type *entry = &m_entries[i];
+ if (!is_empty (*entry))
+ {
+ n_elements--;
+ if (is_deleted (*entry))
+ n_deleted--;
+ else if (hash != Descriptor::hash (*entry)
+ && Descriptor::equal (*entry, comparable))
+ hashtab_chk_error ();
+ }
+ }
+ if (hash_table_sanitize_eq_limit >= m_size)
+ gcc_checking_assert (!n_elements && !n_deleted);
+}
+
+/* This function deletes an element with the given COMPARABLE value
+ from hash table starting with the given HASH. If there is no
+ matching element in the hash table, this function does nothing. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Lazy, Allocator>
+::remove_elt_with_hash (const compare_type &comparable, hashval_t hash)
+{
+ check_complete_insertion ();
+
+ value_type *slot = find_slot_with_hash (comparable, hash, NO_INSERT);
+ if (slot == NULL)
+ return;
+
+ Descriptor::remove (*slot);
+
+ mark_deleted (*slot);
+ m_n_deleted++;
+}
+
+/* This function scans over the entire hash table calling CALLBACK for
+ each live entry. If CALLBACK returns false, the iteration stops.
+ ARGUMENT is passed as CALLBACK's second argument. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+template<typename Argument,
+ int (*Callback)
+ (typename hash_table<Descriptor, Lazy, Allocator>::value_type *slot,
+ Argument argument)>
+void
+hash_table<Descriptor, Lazy, Allocator>::traverse_noresize (Argument argument)
+{
+ if (Lazy && m_entries == NULL)
+ return;
+
+ check_complete_insertion ();
+
+ value_type *slot = m_entries;
+ value_type *limit = slot + size ();
+
+ do
+ {
+ value_type &x = *slot;
+
+ if (!is_empty (x) && !is_deleted (x))
+ if (! Callback (slot, argument))
+ break;
+ }
+ while (++slot < limit);
+}
+
+/* Like traverse_noresize, but does resize the table when it is too empty
+ to improve effectivity of subsequent calls. */
+
+template <typename Descriptor, bool Lazy,
+ template <typename Type> class Allocator>
+template <typename Argument,
+ int (*Callback)
+ (typename hash_table<Descriptor, Lazy, Allocator>::value_type *slot,
+ Argument argument)>
+void
+hash_table<Descriptor, Lazy, Allocator>::traverse (Argument argument)
+{
+ if (too_empty_p (elements ()) && (!Lazy || m_entries))
+ expand ();
+
+ traverse_noresize <Argument, Callback> (argument);
+}
+
+/* Slide down the iterator slots until an active entry is found. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Lazy, Allocator>::iterator::slide ()
+{
+ for ( ; m_slot < m_limit; ++m_slot )
+ {
+ value_type &x = *m_slot;
+ if (!is_empty (x) && !is_deleted (x))
+ return;
+ }
+ m_slot = NULL;
+ m_limit = NULL;
+}
+
+/* Bump the iterator. */
+
+template<typename Descriptor, bool Lazy,
+ template<typename Type> class Allocator>
+inline typename hash_table<Descriptor, Lazy, Allocator>::iterator &
+hash_table<Descriptor, Lazy, Allocator>::iterator::operator ++ ()
+{
+ ++m_slot;
+ slide ();
+ return *this;
+}
+
+
+/* Iterate through the elements of hash_table HTAB,
+ using hash_table <....>::iterator ITER,
+ storing each element in RESULT, which is of type TYPE. */
+
+#define FOR_EACH_HASH_TABLE_ELEMENT(HTAB, RESULT, TYPE, ITER) \
+ for ((ITER) = (HTAB).begin (); \
+ (ITER) != (HTAB).end () ? (RESULT = *(ITER) , true) : false; \
+ ++(ITER))
+
+/* ggc walking routines. */
+
+template<typename E>
+inline void
+gt_ggc_mx (hash_table<E> *h)
+{
+ typedef hash_table<E> table;
+
+ if (!ggc_test_and_set_mark (h->m_entries))
+ return;
+
+ for (size_t i = 0; i < h->m_size; i++)
+ {
+ if (table::is_empty (h->m_entries[i])
+ || table::is_deleted (h->m_entries[i]))
+ continue;
+
+ /* Use ggc_maxbe_mx so we don't mark right away for cache tables; we'll
+ mark in gt_cleare_cache if appropriate. */
+ E::ggc_maybe_mx (h->m_entries[i]);
+ }
+}
+
+template<typename D>
+inline void
+hashtab_entry_note_pointers (void *obj, void *h, gt_pointer_operator op,
+ void *cookie)
+{
+ hash_table<D> *map = static_cast<hash_table<D> *> (h);
+ gcc_checking_assert (map->m_entries == obj);
+ for (size_t i = 0; i < map->m_size; i++)
+ {
+ typedef hash_table<D> table;
+ if (table::is_empty (map->m_entries[i])
+ || table::is_deleted (map->m_entries[i]))
+ continue;
+
+ D::pch_nx (map->m_entries[i], op, cookie);
+ }
+}
+
+template<typename D>
+void
+gt_pch_nx (hash_table<D> *h)
+{
+ h->check_complete_insertion ();
+ bool success
+ = gt_pch_note_object (h->m_entries, h, hashtab_entry_note_pointers<D>);
+ gcc_checking_assert (success);
+ for (size_t i = 0; i < h->m_size; i++)
+ {
+ if (hash_table<D>::is_empty (h->m_entries[i])
+ || hash_table<D>::is_deleted (h->m_entries[i]))
+ continue;
+
+ D::pch_nx (h->m_entries[i]);
+ }
+}
+
+template<typename D>
+inline void
+gt_pch_nx (hash_table<D> *h, gt_pointer_operator op, void *cookie)
+{
+ op (&h->m_entries, NULL, cookie);
+}
+
+template<typename H>
+inline void
+gt_cleare_cache (hash_table<H> *h)
+{
+ typedef hash_table<H> table;
+ if (!h)
+ return;
+
+ for (typename table::iterator iter = h->begin (); iter != h->end (); ++iter)
+ if (!table::is_empty (*iter) && !table::is_deleted (*iter))
+ {
+ int res = H::keep_cache_entry (*iter);
+ if (res == 0)
+ h->clear_slot (&*iter);
+ else if (res != -1)
+ H::ggc_mx (*iter);
+ }
+}
+
+#endif /* TYPED_HASHTAB_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-traits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-traits.h
new file mode 100644
index 0000000..47d80f5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hash-traits.h
@@ -0,0 +1,471 @@
+/* Traits for hashable types.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef hash_traits_h
+#define hash_traits_h
+
+/* Helpful type for removing with free. */
+
+template <typename Type>
+struct typed_free_remove
+{
+ static inline void remove (Type *p);
+};
+
+template <typename Type>
+struct typed_const_free_remove
+{
+ static inline void remove (const Type *p);
+};
+
+/* Remove with free. */
+
+template <typename Type>
+inline void
+typed_free_remove <Type>::remove (Type *p)
+{
+ free (p);
+}
+
+template <typename Type>
+inline void
+typed_const_free_remove <Type>::remove (const Type *p)
+{
+ free (const_cast <Type *> (p));
+}
+
+/* Helpful type for removing with delete. */
+
+template <typename Type>
+struct typed_delete_remove
+{
+ static inline void remove (Type *p);
+};
+
+
+/* Remove with delete. */
+
+template <typename Type>
+inline void
+typed_delete_remove <Type>::remove (Type *p)
+{
+ delete p;
+}
+
+/* Helpful type for a no-op remove. */
+
+template <typename Type>
+struct typed_noop_remove
+{
+ static inline void remove (Type &);
+};
+
+
+/* Remove doing nothing. */
+
+template <typename Type>
+inline void
+typed_noop_remove <Type>::remove (Type &)
+{
+}
+
+/* Base traits for integer type Type, providing just the hash and
+ comparison functionality. */
+
+template <typename Type>
+struct int_hash_base : typed_noop_remove <Type>
+{
+ typedef Type value_type;
+ typedef Type compare_type;
+
+ static inline hashval_t hash (value_type);
+ static inline bool equal (value_type existing, value_type candidate);
+};
+
+template <typename Type>
+inline hashval_t
+int_hash_base <Type>::hash (value_type x)
+{
+ return x;
+}
+
+template <typename Type>
+inline bool
+int_hash_base <Type>::equal (value_type x, value_type y)
+{
+ return x == y;
+}
+
+/* Hasher for integer type Type in which Empty is a spare value that can be
+ used to mark empty slots. If Deleted != Empty then Deleted is another
+ spare value that can be used for deleted slots; if Deleted == Empty then
+ hash table entries cannot be deleted. */
+
+template <typename Type, Type Empty, Type Deleted = Empty>
+struct int_hash : int_hash_base <Type>
+{
+ typedef Type value_type;
+ typedef Type compare_type;
+
+ static inline void mark_deleted (Type &);
+ static const bool empty_zero_p = Empty == 0;
+ static inline void mark_empty (Type &);
+ static inline bool is_deleted (Type);
+ static inline bool is_empty (Type);
+};
+
+template <typename Type, Type Empty, Type Deleted>
+inline void
+int_hash <Type, Empty, Deleted>::mark_deleted (Type &x)
+{
+ gcc_assert (Empty != Deleted);
+ x = Deleted;
+}
+
+template <typename Type, Type Empty, Type Deleted>
+inline void
+int_hash <Type, Empty, Deleted>::mark_empty (Type &x)
+{
+ x = Empty;
+}
+
+template <typename Type, Type Empty, Type Deleted>
+inline bool
+int_hash <Type, Empty, Deleted>::is_deleted (Type x)
+{
+ return Empty != Deleted && x == Deleted;
+}
+
+template <typename Type, Type Empty, Type Deleted>
+inline bool
+int_hash <Type, Empty, Deleted>::is_empty (Type x)
+{
+ return x == Empty;
+}
+
+/* Pointer hasher based on pointer equality. Other types of pointer hash
+ can inherit this and override the hash and equal functions with some
+ other form of equality (such as string equality). */
+
+template <typename Type>
+struct pointer_hash
+{
+ typedef Type *value_type;
+ typedef Type *compare_type;
+
+ static inline hashval_t hash (const value_type &);
+ static inline bool equal (const value_type &existing,
+ const compare_type &candidate);
+ static inline void mark_deleted (Type *&);
+ static const bool empty_zero_p = true;
+ static inline void mark_empty (Type *&);
+ static inline bool is_deleted (Type *);
+ static inline bool is_empty (Type *);
+};
+
+template <typename Type>
+inline hashval_t
+pointer_hash <Type>::hash (const value_type &candidate)
+{
+ /* This is a really poor hash function, but it is what the current code uses,
+ so I am reusing it to avoid an additional axis in testing. */
+ return (hashval_t) ((intptr_t)candidate >> 3);
+}
+
+template <typename Type>
+inline bool
+pointer_hash <Type>::equal (const value_type &existing,
+ const compare_type &candidate)
+{
+ return existing == candidate;
+}
+
+template <typename Type>
+inline void
+pointer_hash <Type>::mark_deleted (Type *&e)
+{
+ e = reinterpret_cast<Type *> (1);
+}
+
+template <typename Type>
+inline void
+pointer_hash <Type>::mark_empty (Type *&e)
+{
+ e = NULL;
+}
+
+template <typename Type>
+inline bool
+pointer_hash <Type>::is_deleted (Type *e)
+{
+ return e == reinterpret_cast<Type *> (1);
+}
+
+template <typename Type>
+inline bool
+pointer_hash <Type>::is_empty (Type *e)
+{
+ return e == NULL;
+}
+
+/* Hasher for "const char *" strings, using string rather than pointer
+ equality. */
+
+struct string_hash : pointer_hash <const char>
+{
+ static inline hashval_t hash (const char *);
+ static inline bool equal (const char *, const char *);
+};
+
+inline hashval_t
+string_hash::hash (const char *id)
+{
+ return htab_hash_string (id);
+}
+
+inline bool
+string_hash::equal (const char *id1, const char *id2)
+{
+ return strcmp (id1, id2) == 0;
+}
+
+/* Remover and marker for entries in gc memory. */
+
+template<typename T>
+struct ggc_remove
+{
+ static void remove (T &) {}
+
+ static void
+ ggc_mx (T &p)
+ {
+ extern void gt_ggc_mx (T &);
+ gt_ggc_mx (p);
+ }
+
+ /* Overridden in ggc_cache_remove. */
+ static void
+ ggc_maybe_mx (T &p)
+ {
+ ggc_mx (p);
+ }
+
+ static void
+ pch_nx (T &p)
+ {
+ extern void gt_pch_nx (T &);
+ gt_pch_nx (p);
+ }
+
+ static void
+ pch_nx (T &p, gt_pointer_operator op, void *cookie)
+ {
+ op (&p, NULL, cookie);
+ }
+};
+
+/* Remover and marker for "cache" entries in gc memory. These entries can
+ be deleted if there are no non-cache references to the data. */
+
+template<typename T>
+struct ggc_cache_remove : ggc_remove<T>
+{
+ /* Entries are weakly held because this is for caches. */
+ static void ggc_maybe_mx (T &) {}
+
+ static int
+ keep_cache_entry (T &e)
+ {
+ return ggc_marked_p (e) ? -1 : 0;
+ }
+};
+
+/* Traits for pointer elements that should not be freed when an element
+ is deleted. */
+
+template <typename T>
+struct nofree_ptr_hash : pointer_hash <T>, typed_noop_remove <T *> {};
+
+/* Traits for pointer elements that should be freed via free() when an
+ element is deleted. */
+
+template <typename T>
+struct free_ptr_hash : pointer_hash <T>, typed_free_remove <T> {};
+
+/* Traits for pointer elements that should be freed via delete operand when an
+ element is deleted. */
+
+template <typename T>
+struct delete_ptr_hash : pointer_hash <T>, typed_delete_remove <T> {};
+
+/* Traits for elements that point to gc memory. The pointed-to data
+ must be kept across collections. */
+
+template <typename T>
+struct ggc_ptr_hash : pointer_hash <T>, ggc_remove <T *> {};
+
+/* Traits for elements that point to gc memory. The elements don't
+ in themselves keep the pointed-to data alive and they can be deleted
+ if the pointed-to data is going to be collected. */
+
+template <typename T>
+struct ggc_cache_ptr_hash : pointer_hash <T>, ggc_cache_remove <T *> {};
+
+/* Traits for string elements that should be freed when an element is
+ deleted. */
+
+struct free_string_hash : string_hash, typed_const_free_remove <char> {};
+
+/* Traits for string elements that should not be freed when an element
+ is deleted. */
+
+struct nofree_string_hash : string_hash, typed_noop_remove <const char *> {};
+
+/* Traits for pairs of values, using the first to record empty and
+ deleted slots. */
+
+template <typename T1, typename T2>
+struct pair_hash
+{
+ typedef std::pair <typename T1::value_type,
+ typename T2::value_type> value_type;
+ typedef std::pair <typename T1::compare_type,
+ typename T2::compare_type> compare_type;
+
+ static inline hashval_t hash (const value_type &);
+ static inline bool equal (const value_type &, const compare_type &);
+ static inline void remove (value_type &);
+ static inline void mark_deleted (value_type &);
+ static const bool empty_zero_p = T1::empty_zero_p;
+ static inline void mark_empty (value_type &);
+ static inline bool is_deleted (const value_type &);
+ static inline bool is_empty (const value_type &);
+};
+
+template <typename T1, typename T2>
+inline hashval_t
+pair_hash <T1, T2>::hash (const value_type &x)
+{
+ return iterative_hash_hashval_t (T1::hash (x.first), T2::hash (x.second));
+}
+
+template <typename T1, typename T2>
+inline bool
+pair_hash <T1, T2>::equal (const value_type &x, const compare_type &y)
+{
+ return T1::equal (x.first, y.first) && T2::equal (x.second, y.second);
+}
+
+template <typename T1, typename T2>
+inline void
+pair_hash <T1, T2>::remove (value_type &x)
+{
+ T1::remove (x.first);
+ T2::remove (x.second);
+}
+
+template <typename T1, typename T2>
+inline void
+pair_hash <T1, T2>::mark_deleted (value_type &x)
+{
+ T1::mark_deleted (x.first);
+}
+
+template <typename T1, typename T2>
+inline void
+pair_hash <T1, T2>::mark_empty (value_type &x)
+{
+ T1::mark_empty (x.first);
+}
+
+template <typename T1, typename T2>
+inline bool
+pair_hash <T1, T2>::is_deleted (const value_type &x)
+{
+ return T1::is_deleted (x.first);
+}
+
+template <typename T1, typename T2>
+inline bool
+pair_hash <T1, T2>::is_empty (const value_type &x)
+{
+ return T1::is_empty (x.first);
+}
+
+/* Base traits for vectors, providing just the hash and comparison
+ functionality. Type gives the corresponding traits for the element
+ type. */
+
+template <typename Type>
+struct vec_hash_base
+{
+ typedef vec<typename Type::value_type> value_type;
+ typedef vec<typename Type::compare_type> compare_type;
+
+ static inline hashval_t hash (value_type);
+ static inline bool equal (value_type, compare_type);
+};
+
+template <typename Type>
+inline hashval_t
+vec_hash_base <Type>::hash (value_type x)
+{
+ inchash::hash hstate;
+ hstate.add_int (x.length ());
+ for (auto &value : x)
+ hstate.merge_hash (Type::hash (value));
+ return hstate.end ();
+}
+
+template <typename Type>
+inline bool
+vec_hash_base <Type>::equal (value_type x, compare_type y)
+{
+ if (x.length () != y.length ())
+ return false;
+ for (unsigned int i = 0; i < x.length (); ++i)
+ if (!Type::equal (x[i], y[i]))
+ return false;
+ return true;
+}
+
+/* Traits for vectors whose contents should be freed normally. */
+
+template <typename Type>
+struct vec_free_hash_base : vec_hash_base <Type>
+{
+ static void remove (typename vec_hash_base <Type>::value_type &);
+};
+
+template <typename Type>
+void
+vec_free_hash_base <Type>
+::remove (typename vec_hash_base <Type>::value_type &x)
+{
+ for (auto &value : x)
+ Type::remove (x);
+ x.release ();
+}
+
+template <typename T> struct default_hash_traits : T {};
+
+template <typename T>
+struct default_hash_traits <T *> : ggc_ptr_hash <T> {};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hashtab.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hashtab.h
new file mode 100644
index 0000000..2d78282
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hashtab.h
@@ -0,0 +1,207 @@
+/* An expandable hash tables datatype.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+ Contributed by Vladimir Makarov (vmakarov@cygnus.com).
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* This package implements basic hash table functionality. It is possible
+ to search for an entry, create an entry and destroy an entry.
+
+ Elements in the table are generic pointers.
+
+ The size of the table is not fixed; if the occupancy of the table
+ grows too high the hash table will be expanded.
+
+ The abstract data implementation is based on generalized Algorithm D
+ from Knuth's book "The art of computer programming". Hash table is
+ expanded by creation of new hash table and transferring elements from
+ the old table to the new table. */
+
+#ifndef __HASHTAB_H__
+#define __HASHTAB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "ansidecl.h"
+
+/* The type for a hash code. */
+typedef unsigned int hashval_t;
+
+/* Callback function pointer types. */
+
+/* Calculate hash of a table entry. */
+typedef hashval_t (*htab_hash) (const void *);
+
+/* Compare a table entry with a possible entry. The entry already in
+ the table always comes first, so the second element can be of a
+ different type (but in this case htab_find and htab_find_slot
+ cannot be used; instead the variants that accept a hash value
+ must be used). */
+typedef int (*htab_eq) (const void *, const void *);
+
+/* Cleanup function called whenever a live element is removed from
+ the hash table. */
+typedef void (*htab_del) (void *);
+
+/* Function called by htab_traverse for each live element. The first
+ arg is the slot of the element (which can be passed to htab_clear_slot
+ if desired), the second arg is the auxiliary pointer handed to
+ htab_traverse. Return 1 to continue scan, 0 to stop. */
+typedef int (*htab_trav) (void **, void *);
+
+/* Memory-allocation function, with the same functionality as calloc().
+ Iff it returns NULL, the hash table implementation will pass an error
+ code back to the user, so if your code doesn't handle errors,
+ best if you use xcalloc instead. */
+typedef void *(*htab_alloc) (size_t, size_t);
+
+/* We also need a free() routine. */
+typedef void (*htab_free) (void *);
+
+/* Memory allocation and deallocation; variants which take an extra
+ argument. */
+typedef void *(*htab_alloc_with_arg) (void *, size_t, size_t);
+typedef void (*htab_free_with_arg) (void *, void *);
+
+/* This macro defines reserved value for empty table entry. */
+
+#define HTAB_EMPTY_ENTRY ((void *) 0)
+
+/* This macro defines reserved value for table entry which contained
+ a deleted element. */
+
+#define HTAB_DELETED_ENTRY ((void *) 1)
+
+/* Hash tables are of the following type. The structure
+ (implementation) of this type is not needed for using the hash
+ tables. All work with hash table should be executed only through
+ functions mentioned below. The size of this structure is subject to
+ change. */
+
+struct htab {
+ /* Pointer to hash function. */
+ htab_hash hash_f;
+
+ /* Pointer to comparison function. */
+ htab_eq eq_f;
+
+ /* Pointer to cleanup function. */
+ htab_del del_f;
+
+ /* Table itself. */
+ void **entries;
+
+ /* Current size (in entries) of the hash table. */
+ size_t size;
+
+ /* Current number of elements including also deleted elements. */
+ size_t n_elements;
+
+ /* Current number of deleted elements in the table. */
+ size_t n_deleted;
+
+ /* The following member is used for debugging. Its value is number
+ of all calls of `htab_find_slot' for the hash table. */
+ unsigned int searches;
+
+ /* The following member is used for debugging. Its value is number
+ of collisions fixed for time of work with the hash table. */
+ unsigned int collisions;
+
+ /* Pointers to allocate/free functions. */
+ htab_alloc alloc_f;
+ htab_free free_f;
+
+ /* Alternate allocate/free functions, which take an extra argument. */
+ void *alloc_arg;
+ htab_alloc_with_arg alloc_with_arg_f;
+ htab_free_with_arg free_with_arg_f;
+
+ /* Current size (in entries) of the hash table, as an index into the
+ table of primes. */
+ unsigned int size_prime_index;
+};
+
+typedef struct htab *htab_t;
+
+/* An enum saying whether we insert into the hash table or not. */
+enum insert_option {NO_INSERT, INSERT};
+
+/* The prototypes of the package functions. */
+
+extern htab_t htab_create_alloc (size_t, htab_hash,
+ htab_eq, htab_del,
+ htab_alloc, htab_free);
+
+extern htab_t htab_create_alloc_ex (size_t, htab_hash,
+ htab_eq, htab_del,
+ void *, htab_alloc_with_arg,
+ htab_free_with_arg);
+
+extern htab_t htab_create_typed_alloc (size_t, htab_hash, htab_eq, htab_del,
+ htab_alloc, htab_alloc, htab_free);
+
+/* Backward-compatibility functions. */
+extern htab_t htab_create (size_t, htab_hash, htab_eq, htab_del);
+extern htab_t htab_try_create (size_t, htab_hash, htab_eq, htab_del);
+
+extern void htab_set_functions_ex (htab_t, htab_hash,
+ htab_eq, htab_del,
+ void *, htab_alloc_with_arg,
+ htab_free_with_arg);
+
+extern void htab_delete (htab_t);
+extern void htab_empty (htab_t);
+
+extern void * htab_find (htab_t, const void *);
+extern void ** htab_find_slot (htab_t, const void *, enum insert_option);
+extern void * htab_find_with_hash (htab_t, const void *, hashval_t);
+extern void ** htab_find_slot_with_hash (htab_t, const void *,
+ hashval_t, enum insert_option);
+extern void htab_clear_slot (htab_t, void **);
+extern void htab_remove_elt (htab_t, const void *);
+extern void htab_remove_elt_with_hash (htab_t, const void *, hashval_t);
+
+extern void htab_traverse (htab_t, htab_trav, void *);
+extern void htab_traverse_noresize (htab_t, htab_trav, void *);
+
+extern size_t htab_size (htab_t);
+extern size_t htab_elements (htab_t);
+extern double htab_collisions (htab_t);
+
+/* A hash function for pointers. */
+extern htab_hash htab_hash_pointer;
+
+/* An equality function for pointers. */
+extern htab_eq htab_eq_pointer;
+
+/* A hash function for null-terminated strings. */
+extern hashval_t htab_hash_string (const void *);
+
+/* An equality function for null-terminated strings. */
+extern int htab_eq_string (const void *, const void *);
+
+/* An iterative hash function for arbitrary data. */
+extern hashval_t iterative_hash (const void *, size_t, hashval_t);
+/* Shorthand for hashing something with an intrinsic size. */
+#define iterative_hash_object(OB,INIT) iterative_hash (&OB, sizeof (OB), INIT)
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __HASHTAB_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/highlev-plugin-common.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/highlev-plugin-common.h
new file mode 100644
index 0000000..653b536
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/highlev-plugin-common.h
@@ -0,0 +1,33 @@
+/* Interface for high-level plugins in GCC - Parts common between GCC,
+ ICI and high-level plugins.
+
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+ Contributed by INRIA.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef HIGHLEV_PLUGIN_COMMON_H
+#define HIGHLEV_PLUGIN_COMMON_H
+
+/* Return codes for invoke_plugin_callbacks / call_plugin_event . */
+#define PLUGEVT_SUCCESS 0
+#define PLUGEVT_NO_EVENTS 1
+#define PLUGEVT_NO_SUCH_EVENT 2
+#define PLUGEVT_NO_CALLBACK 3
+
+#endif /* HIGHLEV_PLUGIN_COMMON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hooks.h
new file mode 100644
index 0000000..6aa01fc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hooks.h
@@ -0,0 +1,137 @@
+/* General-purpose hooks.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#ifndef GCC_HOOKS_H
+#define GCC_HOOKS_H
+
+
+extern bool hook_bool_void_false (void);
+extern bool hook_bool_void_true (void);
+extern bool hook_bool_bool_false (bool);
+extern bool hook_bool_bool_gcc_optionsp_false (bool, struct gcc_options *);
+extern bool hook_bool_const_int_const_int_true (const int, const int);
+extern bool hook_bool_mode_false (machine_mode);
+extern bool hook_bool_mode_true (machine_mode);
+extern bool hook_bool_mode_mode_true (machine_mode, machine_mode);
+extern bool hook_bool_mode_const_rtx_false (machine_mode, const_rtx);
+extern bool hook_bool_mode_const_rtx_true (machine_mode, const_rtx);
+extern bool hook_bool_mode_rtx_false (machine_mode, rtx);
+extern bool hook_bool_mode_rtx_true (machine_mode, rtx);
+extern bool hook_bool_const_rtx_insn_const_rtx_insn_true (const rtx_insn *,
+ const rtx_insn *);
+extern bool hook_bool_mode_uhwi_false (machine_mode,
+ unsigned HOST_WIDE_INT);
+extern bool hook_bool_puint64_puint64_true (poly_uint64, poly_uint64);
+extern bool hook_bool_uint_uint_mode_false (unsigned int, unsigned int,
+ machine_mode);
+extern bool hook_bool_uint_mode_true (unsigned int, machine_mode);
+extern bool hook_bool_tree_false (tree);
+extern bool hook_bool_const_tree_false (const_tree);
+extern bool hook_bool_const_tree_const_tree_true (const_tree, const_tree);
+extern bool hook_bool_tree_true (tree);
+extern bool hook_bool_const_tree_true (const_tree);
+extern bool hook_bool_gsiptr_false (gimple_stmt_iterator *);
+extern bool hook_bool_const_tree_hwi_hwi_const_tree_false (const_tree,
+ HOST_WIDE_INT,
+ HOST_WIDE_INT,
+ const_tree);
+extern bool hook_bool_const_tree_hwi_hwi_const_tree_true (const_tree,
+ HOST_WIDE_INT,
+ HOST_WIDE_INT,
+ const_tree);
+extern bool hook_bool_rtx_insn_true (rtx_insn *);
+extern bool hook_bool_rtx_false (rtx);
+extern bool hook_bool_rtx_insn_int_false (rtx_insn *, int);
+extern bool hook_bool_uintp_uintp_false (unsigned int *, unsigned int *);
+extern bool hook_bool_reg_class_t_false (reg_class_t regclass);
+extern bool hook_bool_mode_mode_reg_class_t_true (machine_mode, machine_mode,
+ reg_class_t);
+extern bool hook_bool_mode_reg_class_t_reg_class_t_false (machine_mode,
+ reg_class_t,
+ reg_class_t);
+extern bool hook_bool_rtx_mode_int_int_intp_bool_false (rtx, machine_mode,
+ int, int, int *, bool);
+extern bool hook_bool_tree_tree_false (tree, tree);
+extern bool hook_bool_tree_tree_true (tree, tree);
+extern bool hook_bool_tree_bool_false (tree, bool);
+extern bool hook_bool_wint_wint_uint_bool_true (const widest_int &,
+ const widest_int &,
+ unsigned int, bool);
+
+extern void hook_void_void (void);
+extern void hook_void_constcharptr (const char *);
+extern void hook_void_rtx_insn_int (rtx_insn *, int);
+extern void hook_void_FILEptr_constcharptr (FILE *, const char *);
+extern void hook_void_FILEptr_constcharptr_const_tree (FILE *, const char *,
+ const_tree);
+extern bool hook_bool_FILEptr_rtx_false (FILE *, rtx);
+extern void hook_void_rtx_tree (rtx, tree);
+extern void hook_void_FILEptr_tree (FILE *, tree);
+extern void hook_void_tree (tree);
+extern void hook_void_tree_treeptr (tree, tree *);
+extern void hook_void_int_int (int, int);
+extern void hook_void_gcc_optionsp (struct gcc_options *);
+extern bool hook_bool_uint_true (unsigned int);
+extern bool hook_bool_uint_uintp_false (unsigned int, unsigned int *);
+
+extern int hook_int_uint_mode_1 (unsigned int, machine_mode);
+extern int hook_int_const_tree_0 (const_tree);
+extern int hook_int_const_tree_const_tree_1 (const_tree, const_tree);
+extern int hook_int_rtx_0 (rtx);
+extern int hook_int_rtx_1 (rtx);
+extern int hook_int_rtx_insn_0 (rtx_insn *);
+extern int hook_int_rtx_insn_unreachable (rtx_insn *);
+extern int hook_int_rtx_bool_0 (rtx, bool);
+extern int hook_int_rtx_mode_as_bool_0 (rtx, machine_mode, addr_space_t,
+ bool);
+
+extern HOST_WIDE_INT hook_hwi_void_0 (void);
+
+extern tree hook_tree_const_tree_null (const_tree);
+extern tree hook_tree_void_null (void);
+
+extern tree hook_tree_tree_bool_null (tree, bool);
+extern tree hook_tree_tree_tree_null (tree, tree);
+extern tree hook_tree_tree_tree_tree_null (tree, tree, tree);
+extern tree hook_tree_tree_int_treep_bool_null (tree, int, tree *, bool);
+extern tree hook_tree_treeptr_tree_tree_int_boolptr_null (tree *, tree, tree, int, bool *);
+
+extern unsigned hook_uint_void_0 (void);
+extern unsigned int hook_uint_mode_0 (machine_mode);
+
+extern bool default_can_output_mi_thunk_no_vcall (const_tree, HOST_WIDE_INT,
+ HOST_WIDE_INT, const_tree);
+
+extern rtx hook_rtx_rtx_identity (rtx);
+extern rtx hook_rtx_rtx_null (rtx);
+extern rtx hook_rtx_tree_int_null (tree, int);
+
+extern char *hook_charptr_void_null (void);
+extern const char *hook_constcharptr_void_null (void);
+extern const char *hook_constcharptr_const_tree_null (const_tree);
+extern const char *hook_constcharptr_const_rtx_insn_null (const rtx_insn *);
+extern const char *hook_constcharptr_const_tree_const_tree_null (const_tree, const_tree);
+extern const char *hook_constcharptr_int_const_tree_null (int, const_tree);
+extern const char *hook_constcharptr_int_const_tree_const_tree_null (int, const_tree, const_tree);
+
+extern opt_machine_mode hook_optmode_mode_uhwi_none (machine_mode,
+ unsigned HOST_WIDE_INT);
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks-def.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks-def.h
new file mode 100644
index 0000000..4768062
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks-def.h
@@ -0,0 +1,51 @@
+/* Default macros to initialize the lang_hooks data structure.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_HOST_HOOKS_DEF_H
+#define GCC_HOST_HOOKS_DEF_H
+
+#include "hooks.h"
+
+#define HOST_HOOKS_EXTRA_SIGNALS hook_void_void
+#if HAVE_MMAP_FILE
+#define HOST_HOOKS_GT_PCH_GET_ADDRESS mmap_gt_pch_get_address
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS mmap_gt_pch_use_address
+#else
+#define HOST_HOOKS_GT_PCH_GET_ADDRESS default_gt_pch_get_address
+#define HOST_HOOKS_GT_PCH_USE_ADDRESS default_gt_pch_use_address
+#endif
+
+#define HOST_HOOKS_GT_PCH_ALLOC_GRANULARITY \
+ default_gt_pch_alloc_granularity
+
+extern void* default_gt_pch_get_address (size_t, int);
+extern int default_gt_pch_use_address (void *&, size_t, int, size_t);
+extern size_t default_gt_pch_alloc_granularity (void);
+extern void* mmap_gt_pch_get_address (size_t, int);
+extern int mmap_gt_pch_use_address (void *&, size_t, int, size_t);
+
+/* The structure is defined in hosthooks.h. */
+#define HOST_HOOKS_INITIALIZER { \
+ HOST_HOOKS_EXTRA_SIGNALS, \
+ HOST_HOOKS_GT_PCH_GET_ADDRESS, \
+ HOST_HOOKS_GT_PCH_USE_ADDRESS, \
+ HOST_HOOKS_GT_PCH_ALLOC_GRANULARITY \
+}
+
+#endif /* GCC_HOST_HOOKS_DEF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks.h
new file mode 100644
index 0000000..f8ba635
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hosthooks.h
@@ -0,0 +1,50 @@
+/* The host_hooks data structure.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_HOST_HOOKS_H
+#define GCC_HOST_HOOKS_H
+
+struct host_hooks
+{
+ void (*extra_signals) (void);
+
+ /* Identify an address that's likely to be free in a subsequent invocation
+ of the compiler. The area should be able to hold SIZE bytes. FD is an
+ open file descriptor if the host would like to probe with mmap. */
+ void * (*gt_pch_get_address) (size_t size, int fd);
+
+ /* ADDR is an address returned by gt_pch_get_address. Attempt to allocate
+ SIZE bytes at the same address (preferrably) or some other address
+ and load it with the data from FD at OFFSET. Return -1 if we couldn't
+ allocate memory, otherwise update ADDR to the actual address where it got
+ allocated, return 0 if the memory is allocated but the data not loaded,
+ return 1 if done. */
+ int (*gt_pch_use_address) (void *&addr, size_t size, int fd, size_t offset);
+
+ /* Return the alignment required for allocating virtual memory. Usually
+ this is the same as pagesize. */
+ size_t (*gt_pch_alloc_granularity) (void);
+
+ /* Whenever you add entries here, make sure you adjust hosthooks-def.h. */
+};
+
+/* Each host provides its own. */
+extern const struct host_hooks host_hooks;
+
+#endif /* GCC_LANG_HOOKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hw-doloop.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hw-doloop.h
new file mode 100644
index 0000000..b683cad
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hw-doloop.h
@@ -0,0 +1,160 @@
+/* Code to analyze doloop loops in order for targets to perform late
+ optimizations converting doloops to other forms of hardware loops.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_HW_DOLOOP_H
+#define GCC_HW_DOLOOP_H
+
+/* We need to keep a vector of loops */
+typedef struct hwloop_info_d *hwloop_info;
+
+/* Information about a loop we have found (or are in the process of
+ finding). */
+struct GTY (()) hwloop_info_d
+{
+ /* loop number, for dumps */
+ int loop_no;
+
+ /* Next loop in the graph. */
+ hwloop_info next;
+
+ /* Vector of blocks only within the loop, including those within
+ inner loops. */
+ vec<basic_block> blocks;
+
+ /* Same information in a bitmap. */
+ bitmap block_bitmap;
+
+ /* Vector of inner loops within this loop. Includes loops of every
+ nesting level. */
+ vec<hwloop_info> loops;
+
+ /* All edges that jump into the loop. */
+ vec<edge, va_gc> *incoming;
+
+ /* The ports currently using this infrastructure can typically
+ handle two cases: all incoming edges have the same destination
+ block, or all incoming edges have the same source block. These
+ two members are set to the common source or destination we found,
+ or NULL if different blocks were found. If both are NULL the
+ loop can't be optimized. */
+ basic_block incoming_src;
+ basic_block incoming_dest;
+
+ /* First block in the loop. This is the one branched to by the loop_end
+ insn. */
+ basic_block head;
+
+ /* Last block in the loop (the one with the loop_end insn). */
+ basic_block tail;
+
+ /* The successor block of the loop. This is the one the loop_end insn
+ falls into. */
+ basic_block successor;
+
+ /* The last instruction in the tail. */
+ rtx_insn *last_insn;
+
+ /* The loop_end insn. */
+ rtx_insn *loop_end;
+
+ /* The iteration register. */
+ rtx iter_reg;
+
+ /* The new label placed at the beginning of the loop. */
+ rtx_insn *start_label;
+
+ /* The new label placed at the end of the loop. */
+ rtx end_label;
+
+ /* The length of the loop. */
+ int length;
+
+ /* The nesting depth of the loop. Innermost loops are given a depth
+ of 1. Only successfully optimized doloops are counted; if an inner
+ loop was marked as bad, it does not increase the depth of its parent
+ loop.
+ This value is valid when the target's optimize function is called. */
+ int depth;
+
+ /* True if we can't optimize this loop. */
+ bool bad;
+
+ /* True if we have visited this loop during the optimization phase. */
+ bool visited;
+
+ /* The following values are collected before calling the target's optimize
+ function and are not valid earlier. */
+
+ /* Record information about control flow: whether the loop has calls
+ or asm statements, whether it has edges that jump out of the loop,
+ or edges that jump within the loop. */
+ bool has_call;
+ bool has_asm;
+ bool jumps_within;
+ bool jumps_outof;
+
+ /* True if there is an instruction other than the doloop_end which uses the
+ iteration register. */
+ bool iter_reg_used;
+ /* True if the iteration register lives past the doloop instruction. */
+ bool iter_reg_used_outside;
+
+ /* Hard registers set at any point in the loop, except for the loop counter
+ register's set in the doloop_end instruction. */
+ HARD_REG_SET regs_set_in_loop;
+};
+
+/* A set of hooks to be defined by a target that wants to use the reorg_loops
+ functionality.
+
+ reorg_loops is intended to handle cases where special hardware loop
+ setup instructions are required before the loop, for example to set
+ up loop counter registers that are not exposed to the register
+ allocator, or to inform the hardware about loop bounds.
+
+ reorg_loops performs analysis to discover loop_end patterns created
+ by the earlier loop-doloop pass, and sets up a hwloop_info
+ structure for each such insn it finds. It then tries to discover
+ the basic blocks containing the loop by tracking the lifetime of
+ the iteration register.
+
+ If a valid loop can't be found, the FAIL function is called;
+ otherwise the OPT function is called for each loop, visiting
+ innermost loops first and ascending. */
+struct hw_doloop_hooks
+{
+ /* Examine INSN. If it is a suitable doloop_end pattern, return the
+ iteration register, which should be a single hard register.
+ Otherwise, return NULL_RTX. */
+ rtx (*end_pattern_reg) (rtx_insn *insn);
+ /* Optimize LOOP. The target should perform any additional analysis
+ (e.g. checking that the loop isn't too long), and then perform
+ its transformations. Return true if successful, false if the
+ loop should be marked bad. If it returns false, the FAIL
+ function is called. */
+ bool (*opt) (hwloop_info loop);
+ /* Handle a loop that was marked bad for any reason. This could be
+ used to split the doloop_end pattern. */
+ void (*fail) (hwloop_info loop);
+};
+
+extern void reorg_loops (bool, struct hw_doloop_hooks *);
+
+#endif /* GCC_HW_DOLOOP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hwint.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hwint.h
new file mode 100644
index 0000000..427e9b9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/hwint.h
@@ -0,0 +1,378 @@
+/* HOST_WIDE_INT definitions for the GNU compiler.
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ Provide definitions for macros which depend on HOST_BITS_PER_INT
+ and HOST_BITS_PER_LONG. */
+
+#ifndef GCC_HWINT_H
+#define GCC_HWINT_H
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR CHAR_BIT
+#define HOST_BITS_PER_SHORT (CHAR_BIT * SIZEOF_SHORT)
+#define HOST_BITS_PER_INT (CHAR_BIT * SIZEOF_INT)
+#define HOST_BITS_PER_LONG (CHAR_BIT * SIZEOF_LONG)
+#define HOST_BITS_PER_PTR (CHAR_BIT * SIZEOF_VOID_P)
+
+/* The string that should be inserted into a printf style format to
+ indicate a "long" operand. */
+#ifndef HOST_LONG_FORMAT
+#define HOST_LONG_FORMAT "l"
+#endif
+
+/* The string that should be inserted into a printf style format to
+ indicate a "long long" operand. */
+#ifndef HOST_LONG_LONG_FORMAT
+#define HOST_LONG_LONG_FORMAT "ll"
+#endif
+
+/* If HAVE_LONG_LONG and SIZEOF_LONG_LONG aren't defined, but
+ GCC_VERSION >= 3000, assume this is the second or later stage of a
+ bootstrap, we do have long long, and it's 64 bits. (This is
+ required by C99; we do have some ports that violate that assumption
+ but they're all cross-compile-only.) Just in case, force a
+ constraint violation if that assumption is incorrect. */
+#if !defined HAVE_LONG_LONG
+# if GCC_VERSION >= 3000
+# define HAVE_LONG_LONG 1
+# define SIZEOF_LONG_LONG 8
+extern char sizeof_long_long_must_be_8[sizeof (long long) == 8 ? 1 : -1];
+# endif
+#endif
+
+#ifdef HAVE_LONG_LONG
+# define HOST_BITS_PER_LONGLONG (CHAR_BIT * SIZEOF_LONG_LONG)
+#endif
+
+/* Set HOST_WIDE_INT, this should be always 64 bits.
+ The underlying type is matched to that of int64_t and assumed
+ to be either long or long long. */
+
+#define HOST_BITS_PER_WIDE_INT 64
+#if INT64_T_IS_LONG
+# define HOST_WIDE_INT long
+# define HOST_WIDE_INT_C(X) X ## L
+#else
+# if HOST_BITS_PER_LONGLONG == 64
+# define HOST_WIDE_INT long long
+# define HOST_WIDE_INT_C(X) X ## LL
+# else
+ #error "Unable to find a suitable type for HOST_WIDE_INT"
+# endif
+#endif
+
+#define HOST_WIDE_INT_UC(X) HOST_WIDE_INT_C (X ## U)
+#define HOST_WIDE_INT_0 HOST_WIDE_INT_C (0)
+#define HOST_WIDE_INT_0U HOST_WIDE_INT_UC (0)
+#define HOST_WIDE_INT_1 HOST_WIDE_INT_C (1)
+#define HOST_WIDE_INT_1U HOST_WIDE_INT_UC (1)
+#define HOST_WIDE_INT_M1 HOST_WIDE_INT_C (-1)
+#define HOST_WIDE_INT_M1U HOST_WIDE_INT_UC (-1)
+
+/* This is a magic identifier which allows GCC to figure out the type
+ of HOST_WIDE_INT for %wd specifier checks. You must issue this
+ typedef before using the __asm_fprintf__ format attribute. */
+typedef HOST_WIDE_INT __gcc_host_wide_int__;
+
+/* Provide C99 <inttypes.h> style format definitions for 64bits. */
+#ifndef HAVE_INTTYPES_H
+#if INT64_T_IS_LONG
+# define GCC_PRI64 HOST_LONG_FORMAT
+#else
+# define GCC_PRI64 HOST_LONG_LONG_FORMAT
+#endif
+#undef PRId64
+#define PRId64 GCC_PRI64 "d"
+#undef PRIi64
+#define PRIi64 GCC_PRI64 "i"
+#undef PRIo64
+#define PRIo64 GCC_PRI64 "o"
+#undef PRIu64
+#define PRIu64 GCC_PRI64 "u"
+#undef PRIx64
+#define PRIx64 GCC_PRI64 "x"
+#undef PRIX64
+#define PRIX64 GCC_PRI64 "X"
+#endif
+
+/* Various printf format strings for HOST_WIDE_INT. */
+
+#if INT64_T_IS_LONG
+# define HOST_WIDE_INT_PRINT HOST_LONG_FORMAT
+# define HOST_WIDE_INT_PRINT_C "L"
+#else
+# define HOST_WIDE_INT_PRINT HOST_LONG_LONG_FORMAT
+# define HOST_WIDE_INT_PRINT_C "LL"
+#endif
+
+#define HOST_WIDE_INT_PRINT_DEC "%" PRId64
+#define HOST_WIDE_INT_PRINT_DEC_C "%" PRId64 HOST_WIDE_INT_PRINT_C
+#define HOST_WIDE_INT_PRINT_UNSIGNED "%" PRIu64
+#define HOST_WIDE_INT_PRINT_HEX "%#" PRIx64
+#define HOST_WIDE_INT_PRINT_HEX_PURE "%" PRIx64
+#define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%" PRIx64 "%016" PRIx64
+#define HOST_WIDE_INT_PRINT_PADDED_HEX "%016" PRIx64
+
+/* Define HOST_WIDEST_FAST_INT to the widest integer type supported
+ efficiently in hardware. (That is, the widest integer type that fits
+ in a hardware register.) Normally this is "long" but on some hosts it
+ should be "long long" or "__int64". This is no convenient way to
+ autodetect this, so such systems must set a flag in config.host; see there
+ for details. */
+
+#ifdef USE_LONG_LONG_FOR_WIDEST_FAST_INT
+# ifdef HAVE_LONG_LONG
+# define HOST_WIDEST_FAST_INT long long
+# define HOST_BITS_PER_WIDEST_FAST_INT HOST_BITS_PER_LONGLONG
+# else
+# error "Your host said it wanted to use long long but that does not exist"
+# endif
+#else
+# define HOST_WIDEST_FAST_INT long
+# define HOST_BITS_PER_WIDEST_FAST_INT HOST_BITS_PER_LONG
+#endif
+
+/* Inline functions operating on HOST_WIDE_INT. */
+
+/* Return X with all but the lowest bit masked off. */
+
+inline unsigned HOST_WIDE_INT
+least_bit_hwi (unsigned HOST_WIDE_INT x)
+{
+ return (x & -x);
+}
+
+/* True if X is zero or a power of two. */
+
+inline bool
+pow2_or_zerop (unsigned HOST_WIDE_INT x)
+{
+ return least_bit_hwi (x) == x;
+}
+
+/* True if X is a power of two. */
+
+inline bool
+pow2p_hwi (unsigned HOST_WIDE_INT x)
+{
+ return x && pow2_or_zerop (x);
+}
+
+#if GCC_VERSION < 3004
+
+extern int clz_hwi (unsigned HOST_WIDE_INT x);
+extern int ctz_hwi (unsigned HOST_WIDE_INT x);
+extern int ffs_hwi (unsigned HOST_WIDE_INT x);
+
+/* Return the number of set bits in X. */
+extern int popcount_hwi (unsigned HOST_WIDE_INT x);
+
+/* Return log2, or -1 if not exact. */
+extern int exact_log2 (unsigned HOST_WIDE_INT);
+
+/* Return floor of log2, with -1 for zero. */
+extern int floor_log2 (unsigned HOST_WIDE_INT);
+
+/* Return the smallest n such that 2**n >= X. */
+extern int ceil_log2 (unsigned HOST_WIDE_INT);
+
+#else /* GCC_VERSION >= 3004 */
+
+/* For convenience, define 0 -> word_size. */
+inline int
+clz_hwi (unsigned HOST_WIDE_INT x)
+{
+ if (x == 0)
+ return HOST_BITS_PER_WIDE_INT;
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ return __builtin_clzl (x);
+# elif HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONGLONG
+ return __builtin_clzll (x);
+# else
+ return __builtin_clz (x);
+# endif
+}
+
+inline int
+ctz_hwi (unsigned HOST_WIDE_INT x)
+{
+ if (x == 0)
+ return HOST_BITS_PER_WIDE_INT;
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ return __builtin_ctzl (x);
+# elif HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONGLONG
+ return __builtin_ctzll (x);
+# else
+ return __builtin_ctz (x);
+# endif
+}
+
+inline int
+ffs_hwi (unsigned HOST_WIDE_INT x)
+{
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ return __builtin_ffsl (x);
+# elif HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONGLONG
+ return __builtin_ffsll (x);
+# else
+ return __builtin_ffs (x);
+# endif
+}
+
+inline int
+popcount_hwi (unsigned HOST_WIDE_INT x)
+{
+# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
+ return __builtin_popcountl (x);
+# elif HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONGLONG
+ return __builtin_popcountll (x);
+# else
+ return __builtin_popcount (x);
+# endif
+}
+
+inline int
+floor_log2 (unsigned HOST_WIDE_INT x)
+{
+ return HOST_BITS_PER_WIDE_INT - 1 - clz_hwi (x);
+}
+
+inline int
+ceil_log2 (unsigned HOST_WIDE_INT x)
+{
+ return x == 0 ? 0 : floor_log2 (x - 1) + 1;
+}
+
+inline int
+exact_log2 (unsigned HOST_WIDE_INT x)
+{
+ return pow2p_hwi (x) ? ctz_hwi (x) : -1;
+}
+
+#endif /* GCC_VERSION >= 3004 */
+
+#define HOST_WIDE_INT_MIN (HOST_WIDE_INT) \
+ (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))
+#define HOST_WIDE_INT_MAX (~(HOST_WIDE_INT_MIN))
+
+extern HOST_WIDE_INT abs_hwi (HOST_WIDE_INT);
+extern unsigned HOST_WIDE_INT absu_hwi (HOST_WIDE_INT);
+extern HOST_WIDE_INT gcd (HOST_WIDE_INT, HOST_WIDE_INT);
+extern HOST_WIDE_INT pos_mul_hwi (HOST_WIDE_INT, HOST_WIDE_INT);
+extern HOST_WIDE_INT mul_hwi (HOST_WIDE_INT, HOST_WIDE_INT);
+extern HOST_WIDE_INT least_common_multiple (HOST_WIDE_INT, HOST_WIDE_INT);
+
+/* Like ctz_hwi, except 0 when x == 0. */
+
+inline int
+ctz_or_zero (unsigned HOST_WIDE_INT x)
+{
+ return ffs_hwi (x) - 1;
+}
+
+/* Sign extend SRC starting from PREC. */
+
+inline HOST_WIDE_INT
+sext_hwi (HOST_WIDE_INT src, unsigned int prec)
+{
+ if (prec == HOST_BITS_PER_WIDE_INT)
+ return src;
+ else
+#if defined (__GNUC__)
+ {
+ /* Take the faster path if the implementation-defined bits it's relying
+ on are implemented the way we expect them to be. Namely, conversion
+ from unsigned to signed preserves bit pattern, and right shift of
+ a signed value propagates the sign bit.
+ We have to convert from signed to unsigned and back, because when left
+ shifting signed values, any overflow is undefined behavior. */
+ gcc_checking_assert (prec < HOST_BITS_PER_WIDE_INT);
+ int shift = HOST_BITS_PER_WIDE_INT - prec;
+ return ((HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) src << shift)) >> shift;
+ }
+#else
+ {
+ /* Fall back to the slower, well defined path otherwise. */
+ gcc_checking_assert (prec < HOST_BITS_PER_WIDE_INT);
+ HOST_WIDE_INT sign_mask = HOST_WIDE_INT_1 << (prec - 1);
+ HOST_WIDE_INT value_mask = (HOST_WIDE_INT_1U << prec) - HOST_WIDE_INT_1U;
+ return (((src & value_mask) ^ sign_mask) - sign_mask);
+ }
+#endif
+}
+
+/* Zero extend SRC starting from PREC. */
+inline unsigned HOST_WIDE_INT
+zext_hwi (unsigned HOST_WIDE_INT src, unsigned int prec)
+{
+ if (prec == HOST_BITS_PER_WIDE_INT)
+ return src;
+ else
+ {
+ gcc_checking_assert (prec < HOST_BITS_PER_WIDE_INT);
+ return src & ((HOST_WIDE_INT_1U << prec) - 1);
+ }
+}
+
+/* Compute the absolute value of X. */
+
+inline HOST_WIDE_INT
+abs_hwi (HOST_WIDE_INT x)
+{
+ gcc_checking_assert (x != HOST_WIDE_INT_MIN);
+ return x >= 0 ? x : -x;
+}
+
+/* Compute the absolute value of X as an unsigned type. */
+
+inline unsigned HOST_WIDE_INT
+absu_hwi (HOST_WIDE_INT x)
+{
+ return x >= 0 ? (unsigned HOST_WIDE_INT)x : -(unsigned HOST_WIDE_INT)x;
+}
+
+/* Compute the sum of signed A and B and indicate in *OVERFLOW whether
+ that operation overflowed. */
+
+inline HOST_WIDE_INT
+add_hwi (HOST_WIDE_INT a, HOST_WIDE_INT b, bool *overflow)
+{
+#if GCC_VERSION < 11000
+ unsigned HOST_WIDE_INT result = a + (unsigned HOST_WIDE_INT)b;
+ if ((((result ^ a) & (result ^ b))
+ >> (HOST_BITS_PER_WIDE_INT - 1)) & 1)
+ *overflow = true;
+ else
+ *overflow = false;
+ return result;
+#else
+ HOST_WIDE_INT result;
+ *overflow = __builtin_add_overflow (a, b, &result);
+ return result;
+#endif
+}
+
+/* Compute the product of signed A and B and indicate in *OVERFLOW whether
+ that operation overflowed. */
+
+inline HOST_WIDE_INT
+mul_hwi (HOST_WIDE_INT a, HOST_WIDE_INT b, bool *overflow)
+{
+#if GCC_VERSION < 11000
+ unsigned HOST_WIDE_INT result = a * (unsigned HOST_WIDE_INT)b;
+ if ((a == -1 && b == HOST_WIDE_INT_MIN)
+ || (a != 0 && (HOST_WIDE_INT)result / a != b))
+ *overflow = true;
+ else
+ *overflow = false;
+ return result;
+#else
+ HOST_WIDE_INT result;
+ *overflow = __builtin_mul_overflow (a, b, &result);
+ return result;
+#endif
+}
+
+#endif /* ! GCC_HWINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ifcvt.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ifcvt.h
new file mode 100644
index 0000000..be1385a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ifcvt.h
@@ -0,0 +1,121 @@
+/* If-conversion header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IFCVT_H
+#define GCC_IFCVT_H
+
+/* Structure to group all of the information to process IF-THEN and
+ IF-THEN-ELSE blocks for the conditional execution support. */
+
+struct ce_if_block
+{
+ basic_block test_bb; /* First test block. */
+ basic_block then_bb; /* THEN block. */
+ basic_block else_bb; /* ELSE block or NULL. */
+ basic_block join_bb; /* Join THEN/ELSE blocks. */
+ basic_block last_test_bb; /* Last bb to hold && or || tests. */
+ int num_multiple_test_blocks; /* # of && and || basic blocks. */
+ int num_and_and_blocks; /* # of && blocks. */
+ int num_or_or_blocks; /* # of || blocks. */
+ int num_multiple_test_insns; /* # of insns in && and || blocks. */
+ int and_and_p; /* Complex test is &&. */
+ int num_then_insns; /* # of insns in THEN block. */
+ int num_else_insns; /* # of insns in ELSE block. */
+ int pass; /* Pass number. */
+};
+
+/* Used by noce_process_if_block to communicate with its subroutines.
+
+ The subroutines know that A and B may be evaluated freely. They
+ know that X is a register. They should insert new instructions
+ before cond_earliest. */
+
+struct noce_if_info
+{
+ /* The basic blocks that make up the IF-THEN-{ELSE-,}JOIN block. */
+ basic_block test_bb, then_bb, else_bb, join_bb;
+
+ /* The jump that ends TEST_BB. */
+ rtx_insn *jump;
+
+ /* The jump condition. */
+ rtx cond;
+
+ /* Reversed jump condition. */
+ rtx rev_cond;
+
+ /* New insns should be inserted before this one. */
+ rtx_insn *cond_earliest;
+
+ /* Insns in the THEN and ELSE block. There is always just this
+ one insns in those blocks. The insns are single_set insns.
+ If there was no ELSE block, INSN_B is the last insn before
+ COND_EARLIEST, or NULL_RTX. In the former case, the insn
+ operands are still valid, as if INSN_B was moved down below
+ the jump. */
+ rtx_insn *insn_a, *insn_b;
+
+ /* The SET_SRC of INSN_A and INSN_B. */
+ rtx a, b;
+
+ /* The SET_DEST of INSN_A. */
+ rtx x;
+
+ /* The original set destination that the THEN and ELSE basic blocks finally
+ write their result to. */
+ rtx orig_x;
+ /* True if this if block is not canonical. In the canonical form of
+ if blocks, the THEN_BB is the block reached via the fallthru edge
+ from TEST_BB. For the noce transformations, we allow the symmetric
+ form as well. */
+ bool then_else_reversed;
+
+ /* True if THEN_BB is conditional on !COND rather than COND.
+ This is used if:
+
+ - JUMP branches to THEN_BB on COND
+ - JUMP falls through to JOIN_BB on !COND
+ - COND cannot be reversed. */
+ bool cond_inverted;
+
+ /* True if the contents of then_bb and else_bb are a
+ simple single set instruction. */
+ bool then_simple;
+ bool else_simple;
+
+ /* True if we're optimisizing the control block for speed, false if
+ we're optimizing for size. */
+ bool speed_p;
+
+ /* An estimate of the original costs. When optimizing for size, this is the
+ combined cost of COND, JUMP and the costs for THEN_BB and ELSE_BB.
+ When optimizing for speed, we use the costs of COND plus weighted average
+ of the costs for THEN_BB and ELSE_BB, as computed in the next field. */
+ unsigned int original_cost;
+
+ /* Maximum permissible cost for the unconditional sequence we should
+ generate to replace this branch. */
+ unsigned int max_seq_cost;
+
+ /* The name of the noce transform that succeeded in if-converting
+ this structure. Used for debugging. */
+ const char *transform_name;
+};
+
+#endif /* GCC_IFCVT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/inchash.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/inchash.h
new file mode 100644
index 0000000..bf76308
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/inchash.h
@@ -0,0 +1,211 @@
+/* An incremental hash abstract data type.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef INCHASH_H
+#define INCHASH_H 1
+
+
+/* This file implements an incremential hash function ADT, to be used
+ by code that incrementially hashes a lot of unrelated data
+ (not in a single memory block) into a single value. The goal
+ is to make it easy to plug in efficient hash algorithms.
+ Currently it just implements the plain old jhash based
+ incremental hash from gcc's tree.cc. */
+
+hashval_t iterative_hash_host_wide_int (HOST_WIDE_INT, hashval_t);
+hashval_t iterative_hash_hashval_t (hashval_t, hashval_t);
+
+namespace inchash
+{
+
+class hash
+{
+ public:
+
+ /* Start incremential hashing, optionally with SEED. */
+ hash (hashval_t seed = 0)
+ {
+ val = seed;
+ bits = 0;
+ }
+
+ /* End incremential hashing and provide the final value. */
+ hashval_t end ()
+ {
+ return val;
+ }
+
+ /* Add unsigned value V. */
+ void add_int (unsigned v)
+ {
+ val = iterative_hash_hashval_t (v, val);
+ }
+
+ /* Add polynomial value V, treating each element as an unsigned int. */
+ template<unsigned int N, typename T>
+ void add_poly_int (const poly_int_pod<N, T> &v)
+ {
+ for (unsigned int i = 0; i < N; ++i)
+ add_int (v.coeffs[i]);
+ }
+
+ /* Add HOST_WIDE_INT value V. */
+ void add_hwi (HOST_WIDE_INT v)
+ {
+ val = iterative_hash_host_wide_int (v, val);
+ }
+
+ /* Add polynomial value V, treating each element as a HOST_WIDE_INT. */
+ template<unsigned int N, typename T>
+ void add_poly_hwi (const poly_int_pod<N, T> &v)
+ {
+ for (unsigned int i = 0; i < N; ++i)
+ add_hwi (v.coeffs[i]);
+ }
+
+ /* Add wide_int-based value V. */
+ template<typename T>
+ void add_wide_int (const generic_wide_int<T> &x)
+ {
+ add_int (x.get_len ());
+ for (unsigned i = 0; i < x.get_len (); i++)
+ add_hwi (x.sext_elt (i));
+ }
+
+ /* Hash in pointer PTR. */
+ void add_ptr (const void *ptr)
+ {
+ add (&ptr, sizeof (ptr));
+ }
+
+ /* Add a memory block DATA with size LEN. */
+ void add (const void *data, size_t len)
+ {
+ val = iterative_hash (data, len, val);
+ }
+
+ /* Merge hash value OTHER. */
+ void merge_hash (hashval_t other)
+ {
+ val = iterative_hash_hashval_t (other, val);
+ }
+
+ /* Hash in state from other inchash OTHER. */
+ void merge (hash &other)
+ {
+ merge_hash (other.val);
+ }
+
+ template<class T> void add_object(T &obj)
+ {
+ add (&obj, sizeof(T));
+ }
+
+ /* Support for accumulating boolean flags */
+
+ void add_flag (bool flag)
+ {
+ bits = (bits << 1) | flag;
+ }
+
+ void commit_flag ()
+ {
+ add_int (bits);
+ bits = 0;
+ }
+
+ /* Support for commutative hashing. Add A and B in a defined order
+ based on their value. This is useful for hashing commutative
+ expressions, so that A+B and B+A get the same hash. */
+
+ void add_commutative (hash &a, hash &b)
+ {
+ if (a.end() > b.end())
+ {
+ merge (b);
+ merge (a);
+ }
+ else
+ {
+ merge (a);
+ merge (b);
+ }
+ }
+
+ private:
+ hashval_t val;
+ unsigned bits;
+};
+
+}
+
+/* Borrowed from hashtab.c iterative_hash implementation. */
+#define mix(a,b,c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<< 8); \
+ c -= a; c -= b; c ^= ((b&0xffffffff)>>13); \
+ a -= b; a -= c; a ^= ((c&0xffffffff)>>12); \
+ b -= c; b -= a; b = (b ^ (a<<16)) & 0xffffffff; \
+ c -= a; c -= b; c = (c ^ (b>> 5)) & 0xffffffff; \
+ a -= b; a -= c; a = (a ^ (c>> 3)) & 0xffffffff; \
+ b -= c; b -= a; b = (b ^ (a<<10)) & 0xffffffff; \
+ c -= a; c -= b; c = (c ^ (b>>15)) & 0xffffffff; \
+}
+
+
+/* Produce good hash value combining VAL and VAL2. */
+inline
+hashval_t
+iterative_hash_hashval_t (hashval_t val, hashval_t val2)
+{
+ /* the golden ratio; an arbitrary value. */
+ hashval_t a = 0x9e3779b9;
+
+ mix (a, val, val2);
+ return val2;
+}
+
+/* Produce good hash value combining VAL and VAL2. */
+
+inline
+hashval_t
+iterative_hash_host_wide_int (HOST_WIDE_INT val, hashval_t val2)
+{
+ if (sizeof (HOST_WIDE_INT) == sizeof (hashval_t))
+ return iterative_hash_hashval_t (val, val2);
+ else
+ {
+ hashval_t a = (hashval_t) val;
+ /* Avoid warnings about shifting of more than the width of the type on
+ hosts that won't execute this path. */
+ int zero = 0;
+ hashval_t b = (hashval_t) (val >> (sizeof (hashval_t) * 8 + zero));
+ mix (a, b, val2);
+ if (sizeof (HOST_WIDE_INT) > 2 * sizeof (hashval_t))
+ {
+ hashval_t a = (hashval_t) (val >> (sizeof (hashval_t) * 16 + zero));
+ hashval_t b = (hashval_t) (val >> (sizeof (hashval_t) * 24 + zero));
+ mix (a, b, val2);
+ }
+ return val2;
+ }
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/incpath.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/incpath.h
new file mode 100644
index 0000000..9d19222
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/incpath.h
@@ -0,0 +1,46 @@
+/* Set up combined include path for the preprocessor.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_INCPATH_H
+#define GCC_INCPATH_H
+
+/* Various fragments of include path. */
+enum incpath_kind {
+ INC_QUOTE = 0, /* include "foo" */
+ INC_BRACKET, /* include <foo> */
+ INC_SYSTEM, /* sys-include */
+ INC_AFTER, /* post-sysinclude */
+ INC_MAX
+};
+
+extern void split_quote_chain (void);
+extern void add_path (char *, incpath_kind, int, bool);
+extern void register_include_chains (cpp_reader *, const char *,
+ const char *, const char *,
+ int, int, int);
+extern void add_cpp_dir_path (struct cpp_dir *, incpath_kind);
+extern struct cpp_dir *get_added_cpp_dirs (incpath_kind);
+
+struct target_c_incpath_s {
+ /* Do extra includes processing. STDINC is false iff -nostdinc was given. */
+ void (*extra_pre_includes) (const char *, const char *, int);
+ void (*extra_includes) (const char *, const char *, int);
+};
+
+extern struct target_c_incpath_s target_c_incpath;
+
+#endif /* GCC_INCPATH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/input.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/input.h
new file mode 100644
index 0000000..d1087b7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/input.h
@@ -0,0 +1,292 @@
+/* Declarations for variables relating to reading the source file.
+ Used by parsers, lexical analyzers, and error message routines.
+ Copyright (C) 1993-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_INPUT_H
+#define GCC_INPUT_H
+
+#include "line-map.h"
+
+extern GTY(()) class line_maps *line_table;
+extern GTY(()) class line_maps *saved_line_table;
+
+/* A value which will never be used to represent a real location. */
+#define UNKNOWN_LOCATION ((location_t) 0)
+
+/* The location for declarations in "<built-in>" */
+#define BUILTINS_LOCATION ((location_t) 1)
+
+/* Returns the translated string referring to the special location. */
+const char *special_fname_builtin ();
+
+/* line-map.cc reserves RESERVED_LOCATION_COUNT to the user. Ensure
+ both UNKNOWN_LOCATION and BUILTINS_LOCATION fit into that. */
+STATIC_ASSERT (BUILTINS_LOCATION < RESERVED_LOCATION_COUNT);
+
+/* Hasher for 'location_t' values satisfying '!RESERVED_LOCATION_P', thus able
+ to use 'UNKNOWN_LOCATION'/'BUILTINS_LOCATION' as spare values for
+ 'Empty'/'Deleted'. */
+/* Per PR103157 "'gengtype': 'typedef' causing infinite-recursion code to be
+ generated", don't use
+ typedef int_hash<location_t, UNKNOWN_LOCATION, BUILTINS_LOCATION>
+ location_hash;
+ here.
+
+ It works for a single-use case, but when using a 'struct'-based variant
+ struct location_hash
+ : int_hash<location_t, UNKNOWN_LOCATION, BUILTINS_LOCATION> {};
+ in more than one place, 'gengtype' generates duplicate functions (thus:
+ "error: redefinition of 'void gt_ggc_mx(location_hash&)'" etc.).
+ Attempting to mark that one up with GTY options, we run into a 'gengtype'
+ "parse error: expected '{', have '<'", which probably falls into category
+ "understanding of C++ is limited", as documented in 'gcc/doc/gty.texi'.
+
+ Thus, use a plain ol' '#define':
+*/
+#define location_hash int_hash<location_t, UNKNOWN_LOCATION, BUILTINS_LOCATION>
+
+extern bool is_location_from_builtin_token (location_t);
+extern expanded_location expand_location (location_t);
+
+class cpp_char_column_policy;
+
+extern int
+location_compute_display_column (expanded_location exploc,
+ const cpp_char_column_policy &policy);
+
+/* A class capturing the bounds of a buffer, to allow for run-time
+ bounds-checking in a checked build. */
+
+class char_span
+{
+ public:
+ char_span (const char *ptr, size_t n_elts) : m_ptr (ptr), m_n_elts (n_elts) {}
+
+ /* Test for a non-NULL pointer. */
+ operator bool() const { return m_ptr; }
+
+ /* Get length, not including any 0-terminator (which may not be,
+ in fact, present). */
+ size_t length () const { return m_n_elts; }
+
+ const char *get_buffer () const { return m_ptr; }
+
+ char operator[] (int idx) const
+ {
+ gcc_assert (idx >= 0);
+ gcc_assert ((size_t)idx < m_n_elts);
+ return m_ptr[idx];
+ }
+
+ char_span subspan (int offset, int n_elts) const
+ {
+ gcc_assert (offset >= 0);
+ gcc_assert (offset < (int)m_n_elts);
+ gcc_assert (n_elts >= 0);
+ gcc_assert (offset + n_elts <= (int)m_n_elts);
+ return char_span (m_ptr + offset, n_elts);
+ }
+
+ char *xstrdup () const
+ {
+ return ::xstrndup (m_ptr, m_n_elts);
+ }
+
+ private:
+ const char *m_ptr;
+ size_t m_n_elts;
+};
+
+extern char_span location_get_source_line (const char *file_path, int line);
+extern char *get_source_text_between (location_t, location_t);
+extern char_span get_source_file_content (const char *file_path);
+
+extern bool location_missing_trailing_newline (const char *file_path);
+
+/* Forward decl of slot within file_cache, so that the definition doesn't
+ need to be in this header. */
+class file_cache_slot;
+
+/* A cache of source files for use when emitting diagnostics
+ (and in a few places in the C/C++ frontends).
+
+ Results are only valid until the next call to the cache, as
+ slots can be evicted.
+
+ Filenames are stored by pointer, and so must outlive the cache
+ instance. */
+
+class file_cache
+{
+ public:
+ file_cache ();
+ ~file_cache ();
+
+ file_cache_slot *lookup_or_add_file (const char *file_path);
+ void forcibly_evict_file (const char *file_path);
+
+ /* See comments in diagnostic.h about the input conversion context. */
+ struct input_context
+ {
+ diagnostic_input_charset_callback ccb;
+ bool should_skip_bom;
+ };
+ void initialize_input_context (diagnostic_input_charset_callback ccb,
+ bool should_skip_bom);
+
+ private:
+ file_cache_slot *evicted_cache_tab_entry (unsigned *highest_use_count);
+ file_cache_slot *add_file (const char *file_path);
+ file_cache_slot *lookup_file (const char *file_path);
+
+ private:
+ static const size_t num_file_slots = 16;
+ file_cache_slot *m_file_slots;
+ input_context in_context;
+};
+
+extern expanded_location
+expand_location_to_spelling_point (location_t,
+ enum location_aspect aspect
+ = LOCATION_ASPECT_CARET);
+extern location_t expansion_point_location_if_in_system_header (location_t);
+extern location_t expansion_point_location (location_t);
+
+extern location_t input_location;
+
+extern location_t location_with_discriminator (location_t, int);
+extern bool has_discriminator (location_t);
+extern int get_discriminator_from_loc (location_t);
+
+#define LOCATION_FILE(LOC) ((expand_location (LOC)).file)
+#define LOCATION_LINE(LOC) ((expand_location (LOC)).line)
+#define LOCATION_COLUMN(LOC)((expand_location (LOC)).column)
+#define LOCATION_LOCUS(LOC) \
+ ((IS_ADHOC_LOC (LOC)) ? get_location_from_adhoc_loc (line_table, LOC) \
+ : (LOC))
+#define LOCATION_BLOCK(LOC) \
+ ((tree) ((IS_ADHOC_LOC (LOC)) ? get_data_from_adhoc_loc (line_table, (LOC)) \
+ : NULL))
+#define RESERVED_LOCATION_P(LOC) \
+ (LOCATION_LOCUS (LOC) < RESERVED_LOCATION_COUNT)
+
+/* Return a positive value if LOCATION is the locus of a token that is
+ located in a system header, O otherwise. It returns 1 if LOCATION
+ is the locus of a token that is located in a system header, and 2
+ if LOCATION is the locus of a token located in a C system header
+ that therefore needs to be extern "C" protected in C++.
+
+ Note that this function returns 1 if LOCATION belongs to a token
+ that is part of a macro replacement-list defined in a system
+ header, but expanded in a non-system file. */
+
+inline int
+in_system_header_at (location_t loc)
+{
+ return linemap_location_in_system_header_p (line_table, loc);
+}
+
+/* Return true if LOCATION is the locus of a token that
+ comes from a macro expansion, false otherwise. */
+
+inline bool
+from_macro_expansion_at (location_t loc)
+{
+ return linemap_location_from_macro_expansion_p (line_table, loc);
+}
+
+/* Return true if LOCATION is the locus of a token that comes from
+ a macro definition, false otherwise. This differs from from_macro_expansion_at
+ in its treatment of macro arguments, for which this returns false. */
+
+inline bool
+from_macro_definition_at (location_t loc)
+{
+ return linemap_location_from_macro_definition_p (line_table, loc);
+}
+
+inline location_t
+get_pure_location (location_t loc)
+{
+ return get_pure_location (line_table, loc);
+}
+
+/* Get the start of any range encoded within location LOC. */
+
+inline location_t
+get_start (location_t loc)
+{
+ return get_range_from_loc (line_table, loc).m_start;
+}
+
+/* Get the endpoint of any range encoded within location LOC. */
+
+inline location_t
+get_finish (location_t loc)
+{
+ return get_range_from_loc (line_table, loc).m_finish;
+}
+
+extern location_t make_location (location_t caret,
+ location_t start, location_t finish);
+extern location_t make_location (location_t caret, source_range src_range);
+
+void dump_line_table_statistics (void);
+
+void dump_location_info (FILE *stream);
+
+void diagnostics_file_cache_fini (void);
+
+void diagnostics_file_cache_forcibly_evict_file (const char *file_path);
+
+class GTY(()) string_concat
+{
+public:
+ string_concat (int num, location_t *locs);
+
+ int m_num;
+ location_t * GTY ((atomic)) m_locs;
+};
+
+class GTY(()) string_concat_db
+{
+ public:
+ string_concat_db ();
+ void record_string_concatenation (int num, location_t *locs);
+
+ bool get_string_concatenation (location_t loc,
+ int *out_num,
+ location_t **out_locs);
+
+ private:
+ static location_t get_key_loc (location_t loc);
+
+ /* For the fields to be private, we must grant access to the
+ generated code in gtype-desc.cc. */
+
+ friend void ::gt_ggc_mx_string_concat_db (void *x_p);
+ friend void ::gt_pch_nx_string_concat_db (void *x_p);
+ friend void ::gt_pch_p_16string_concat_db (void *this_obj, void *x_p,
+ gt_pointer_operator op,
+ void *cookie);
+
+ hash_map <location_hash, string_concat *> *m_table;
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-addr.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-addr.h
new file mode 100644
index 0000000..baf4d99
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-addr.h
@@ -0,0 +1,63 @@
+/* Macros to support INSN_ADDRESSES
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_INSN_ADDR_H
+#define GCC_INSN_ADDR_H
+
+extern vec<int> insn_addresses_;
+extern int insn_current_address;
+
+#define INSN_ADDRESSES(id) (insn_addresses_[id])
+#define INSN_ADDRESSES_ALLOC(size) \
+ do \
+ { \
+ insn_addresses_.create (size); \
+ insn_addresses_.safe_grow_cleared (size, true); \
+ memset (insn_addresses_.address (), \
+ 0, sizeof (int) * size); \
+ } \
+ while (0)
+#define INSN_ADDRESSES_FREE() (insn_addresses_.release ())
+#define INSN_ADDRESSES_SET_P() (insn_addresses_.exists ())
+#define INSN_ADDRESSES_SIZE() (insn_addresses_.length ())
+
+inline void
+insn_addresses_new (rtx_insn *insn, int insn_addr)
+{
+ unsigned insn_uid = INSN_UID ((insn));
+
+ if (INSN_ADDRESSES_SET_P ())
+ {
+ size_t size = INSN_ADDRESSES_SIZE ();
+ if (size <= insn_uid)
+ {
+ int *p;
+ insn_addresses_.safe_grow (insn_uid + 1, true);
+ p = insn_addresses_.address ();
+ memset (&p[size],
+ 0, sizeof (int) * (insn_uid + 1 - size));
+ }
+ INSN_ADDRESSES (insn_uid) = insn_addr;
+ }
+}
+
+#define INSN_ADDRESSES_NEW(insn, addr) \
+ (insn_addresses_new (insn, addr))
+
+#endif /* ! GCC_INSN_ADDR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr-common.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr-common.h
new file mode 100644
index 0000000..f31bb62
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr-common.h
@@ -0,0 +1,66 @@
+/* Generated automatically by the program `genattr-common'
+ from the machine description file `md'. */
+
+#ifndef GCC_INSN_ATTR_COMMON_H
+#define GCC_INSN_ATTR_COMMON_H
+
+enum attr_nonce_enabled {NONCE_ENABLED_NO, NONCE_ENABLED_YES};
+enum attr_ce_enabled {CE_ENABLED_NO, CE_ENABLED_YES};
+enum attr_tune {TUNE_ARM8, TUNE_ARM810, TUNE_STRONGARM, TUNE_FA526, TUNE_FA626, TUNE_ARM7TDMI, TUNE_ARM710T, TUNE_ARM9, TUNE_ARM9TDMI, TUNE_ARM920T, TUNE_ARM10TDMI, TUNE_ARM9E, TUNE_ARM10E, TUNE_XSCALE, TUNE_IWMMXT, TUNE_IWMMXT2, TUNE_FA606TE, TUNE_FA626TE, TUNE_FMP626, TUNE_FA726TE, TUNE_ARM926EJS, TUNE_ARM1026EJS, TUNE_ARM1136JS, TUNE_ARM1136JFS, TUNE_ARM1176JZS, TUNE_ARM1176JZFS, TUNE_MPCORENOVFP, TUNE_MPCORE, TUNE_ARM1156T2S, TUNE_ARM1156T2FS, TUNE_CORTEXM1, TUNE_CORTEXM0, TUNE_CORTEXM0PLUS, TUNE_CORTEXM1SMALLMULTIPLY, TUNE_CORTEXM0SMALLMULTIPLY, TUNE_CORTEXM0PLUSSMALLMULTIPLY, TUNE_GENERICV7A, TUNE_CORTEXA5, TUNE_CORTEXA7, TUNE_CORTEXA8, TUNE_CORTEXA9, TUNE_CORTEXA12, TUNE_CORTEXA15, TUNE_CORTEXA17, TUNE_CORTEXR4, TUNE_CORTEXR4F, TUNE_CORTEXR5, TUNE_CORTEXR7, TUNE_CORTEXR8, TUNE_CORTEXM7, TUNE_CORTEXM4, TUNE_CORTEXM3, TUNE_MARVELL_PJ4, TUNE_CORTEXA15CORTEXA7, TUNE_CORTEXA17CORTEXA7, TUNE_CORTEXA32, TUNE_CORTEXA35, TUNE_CORTEXA53, TUNE_CORTEXA57, TUNE_CORTEXA72, TUNE_CORTEXA73, TUNE_EXYNOSM1, TUNE_XGENE1, TUNE_CORTEXA57CORTEXA53, TUNE_CORTEXA72CORTEXA53, TUNE_CORTEXA73CORTEXA35, TUNE_CORTEXA73CORTEXA53, TUNE_CORTEXA55, TUNE_CORTEXA75, TUNE_CORTEXA76, TUNE_CORTEXA76AE, TUNE_CORTEXA77, TUNE_CORTEXA78, TUNE_CORTEXA78AE, TUNE_CORTEXA78C, TUNE_CORTEXA710, TUNE_CORTEXX1, TUNE_CORTEXX1C, TUNE_NEOVERSEN1, TUNE_CORTEXA75CORTEXA55, TUNE_CORTEXA76CORTEXA55, TUNE_NEOVERSEV1, TUNE_NEOVERSEN2, TUNE_CORTEXM23, TUNE_CORTEXM33, TUNE_CORTEXM35P, TUNE_CORTEXM55, TUNE_STARMC1, TUNE_CORTEXM85, TUNE_CORTEXR52, TUNE_CORTEXR52PLUS};
+enum attr_autodetect_type {AUTODETECT_TYPE_NONE, AUTODETECT_TYPE_ALU_SHIFT_LSL_OP2, AUTODETECT_TYPE_ALU_SHIFT_LSR_OP2, AUTODETECT_TYPE_ALU_SHIFT_ASR_OP2, AUTODETECT_TYPE_ALU_SHIFT_MUL_OP3, AUTODETECT_TYPE_ALU_SHIFT_OPERATOR1, AUTODETECT_TYPE_ALU_SHIFT_OPERATOR2, AUTODETECT_TYPE_ALU_SHIFT_OPERATOR3, AUTODETECT_TYPE_ALU_SHIFT_OPERATOR4};
+enum attr_type {TYPE_ADC_IMM, TYPE_ADC_REG, TYPE_ADCS_IMM, TYPE_ADCS_REG, TYPE_ADR, TYPE_ALU_EXT, TYPE_ALU_IMM, TYPE_ALU_SREG, TYPE_ALU_SHIFT_IMM_LSL_1TO4, TYPE_ALU_SHIFT_IMM_OTHER, TYPE_ALU_SHIFT_REG, TYPE_ALU_DSP_REG, TYPE_ALUS_EXT, TYPE_ALUS_IMM, TYPE_ALUS_SREG, TYPE_ALUS_SHIFT_IMM, TYPE_ALUS_SHIFT_REG, TYPE_BFM, TYPE_BFX, TYPE_BLOCK, TYPE_BRANCH, TYPE_CALL, TYPE_CLZ, TYPE_NO_INSN, TYPE_CSEL, TYPE_CRC, TYPE_EXTEND, TYPE_F_CVT, TYPE_F_CVTF2I, TYPE_F_CVTI2F, TYPE_F_FLAG, TYPE_F_LOADD, TYPE_F_LOADS, TYPE_F_MCR, TYPE_F_MCRR, TYPE_F_MINMAXD, TYPE_F_MINMAXS, TYPE_F_MRC, TYPE_F_MRRC, TYPE_F_RINTD, TYPE_F_RINTS, TYPE_F_STORED, TYPE_F_STORES, TYPE_FADDD, TYPE_FADDS, TYPE_FCCMPD, TYPE_FCCMPS, TYPE_FCMPD, TYPE_FCMPS, TYPE_FCONSTD, TYPE_FCONSTS, TYPE_FCSEL, TYPE_FDIVD, TYPE_FDIVS, TYPE_FFARITHD, TYPE_FFARITHS, TYPE_FFMAD, TYPE_FFMAS, TYPE_FLOAT, TYPE_FMACD, TYPE_FMACS, TYPE_FMOV, TYPE_FMULD, TYPE_FMULS, TYPE_FSQRTS, TYPE_FSQRTD, TYPE_LOAD_ACQ, TYPE_LOAD_BYTE, TYPE_LOAD_4, TYPE_LOAD_8, TYPE_LOAD_12, TYPE_LOAD_16, TYPE_LOGIC_IMM, TYPE_LOGIC_REG, TYPE_LOGIC_SHIFT_IMM, TYPE_LOGIC_SHIFT_REG, TYPE_LOGICS_IMM, TYPE_LOGICS_REG, TYPE_LOGICS_SHIFT_IMM, TYPE_LOGICS_SHIFT_REG, TYPE_MLA, TYPE_MLAS, TYPE_MOV_IMM, TYPE_MOV_REG, TYPE_MOV_SHIFT, TYPE_MOV_SHIFT_REG, TYPE_MRS, TYPE_MUL, TYPE_MULS, TYPE_MULTIPLE, TYPE_MVN_IMM, TYPE_MVN_REG, TYPE_MVN_SHIFT, TYPE_MVN_SHIFT_REG, TYPE_NOP, TYPE_RBIT, TYPE_REV, TYPE_ROTATE_IMM, TYPE_SDIV, TYPE_SHIFT_IMM, TYPE_SHIFT_REG, TYPE_SMLAD, TYPE_SMLADX, TYPE_SMLAL, TYPE_SMLALD, TYPE_SMLALS, TYPE_SMLALXY, TYPE_SMLAWX, TYPE_SMLAWY, TYPE_SMLAXY, TYPE_SMLSD, TYPE_SMLSDX, TYPE_SMLSLD, TYPE_SMMLA, TYPE_SMMUL, TYPE_SMMULR, TYPE_SMUAD, TYPE_SMUADX, TYPE_SMULL, TYPE_SMULLS, TYPE_SMULWY, TYPE_SMULXY, TYPE_SMUSD, TYPE_SMUSDX, TYPE_STORE_REL, TYPE_STORE_4, TYPE_STORE_8, TYPE_STORE_12, TYPE_STORE_16, TYPE_TRAP, TYPE_UDIV, TYPE_UMAAL, TYPE_UMLAL, TYPE_UMLALS, TYPE_UMULL, TYPE_UMULLS, TYPE_UNTYPED, TYPE_WMMX_TANDC, TYPE_WMMX_TBCST, TYPE_WMMX_TEXTRC, TYPE_WMMX_TEXTRM, TYPE_WMMX_TINSR, TYPE_WMMX_TMCR, TYPE_WMMX_TMCRR, TYPE_WMMX_TMIA, TYPE_WMMX_TMIAPH, TYPE_WMMX_TMIAXY, TYPE_WMMX_TMRC, TYPE_WMMX_TMRRC, TYPE_WMMX_TMOVMSK, TYPE_WMMX_TORC, TYPE_WMMX_TORVSC, TYPE_WMMX_WABS, TYPE_WMMX_WABSDIFF, TYPE_WMMX_WACC, TYPE_WMMX_WADD, TYPE_WMMX_WADDBHUS, TYPE_WMMX_WADDSUBHX, TYPE_WMMX_WALIGNI, TYPE_WMMX_WALIGNR, TYPE_WMMX_WAND, TYPE_WMMX_WANDN, TYPE_WMMX_WAVG2, TYPE_WMMX_WAVG4, TYPE_WMMX_WCMPEQ, TYPE_WMMX_WCMPGT, TYPE_WMMX_WMAC, TYPE_WMMX_WMADD, TYPE_WMMX_WMAX, TYPE_WMMX_WMERGE, TYPE_WMMX_WMIAWXY, TYPE_WMMX_WMIAXY, TYPE_WMMX_WMIN, TYPE_WMMX_WMOV, TYPE_WMMX_WMUL, TYPE_WMMX_WMULW, TYPE_WMMX_WLDR, TYPE_WMMX_WOR, TYPE_WMMX_WPACK, TYPE_WMMX_WQMIAXY, TYPE_WMMX_WQMULM, TYPE_WMMX_WQMULWM, TYPE_WMMX_WROR, TYPE_WMMX_WSAD, TYPE_WMMX_WSHUFH, TYPE_WMMX_WSLL, TYPE_WMMX_WSRA, TYPE_WMMX_WSRL, TYPE_WMMX_WSTR, TYPE_WMMX_WSUB, TYPE_WMMX_WSUBADDHX, TYPE_WMMX_WUNPCKEH, TYPE_WMMX_WUNPCKEL, TYPE_WMMX_WUNPCKIH, TYPE_WMMX_WUNPCKIL, TYPE_WMMX_WXOR, TYPE_NEON_ADD, TYPE_NEON_ADD_Q, TYPE_NEON_ADD_WIDEN, TYPE_NEON_ADD_LONG, TYPE_NEON_QADD, TYPE_NEON_QADD_Q, TYPE_NEON_ADD_HALVE, TYPE_NEON_ADD_HALVE_Q, TYPE_NEON_ADD_HALVE_NARROW_Q, TYPE_NEON_SUB, TYPE_NEON_SUB_Q, TYPE_NEON_SUB_WIDEN, TYPE_NEON_SUB_LONG, TYPE_NEON_QSUB, TYPE_NEON_QSUB_Q, TYPE_NEON_SUB_HALVE, TYPE_NEON_SUB_HALVE_Q, TYPE_NEON_SUB_HALVE_NARROW_Q, TYPE_NEON_FCADD, TYPE_NEON_FCMLA, TYPE_NEON_ABS, TYPE_NEON_ABS_Q, TYPE_NEON_DOT, TYPE_NEON_DOT_Q, TYPE_NEON_NEG, TYPE_NEON_NEG_Q, TYPE_NEON_QNEG, TYPE_NEON_QNEG_Q, TYPE_NEON_QABS, TYPE_NEON_QABS_Q, TYPE_NEON_ABD, TYPE_NEON_ABD_Q, TYPE_NEON_ABD_LONG, TYPE_NEON_MINMAX, TYPE_NEON_MINMAX_Q, TYPE_NEON_COMPARE, TYPE_NEON_COMPARE_Q, TYPE_NEON_COMPARE_ZERO, TYPE_NEON_COMPARE_ZERO_Q, TYPE_NEON_ARITH_ACC, TYPE_NEON_ARITH_ACC_Q, TYPE_NEON_REDUC_ADD, TYPE_NEON_REDUC_ADD_Q, TYPE_NEON_REDUC_ADD_LONG, TYPE_NEON_REDUC_ADD_ACC, TYPE_NEON_REDUC_ADD_ACC_Q, TYPE_NEON_REDUC_MINMAX, TYPE_NEON_REDUC_MINMAX_Q, TYPE_NEON_LOGIC, TYPE_NEON_LOGIC_Q, TYPE_NEON_TST, TYPE_NEON_TST_Q, TYPE_NEON_SHIFT_IMM, TYPE_NEON_SHIFT_IMM_Q, TYPE_NEON_SHIFT_IMM_NARROW_Q, TYPE_NEON_SHIFT_IMM_LONG, TYPE_NEON_SHIFT_REG, TYPE_NEON_SHIFT_REG_Q, TYPE_NEON_SHIFT_ACC, TYPE_NEON_SHIFT_ACC_Q, TYPE_NEON_SAT_SHIFT_IMM, TYPE_NEON_SAT_SHIFT_IMM_Q, TYPE_NEON_SAT_SHIFT_IMM_NARROW_Q, TYPE_NEON_SAT_SHIFT_REG, TYPE_NEON_SAT_SHIFT_REG_Q, TYPE_NEON_INS, TYPE_NEON_INS_Q, TYPE_NEON_MOVE, TYPE_NEON_MOVE_Q, TYPE_NEON_MOVE_NARROW_Q, TYPE_NEON_PERMUTE, TYPE_NEON_PERMUTE_Q, TYPE_NEON_ZIP, TYPE_NEON_ZIP_Q, TYPE_NEON_TBL1, TYPE_NEON_TBL1_Q, TYPE_NEON_TBL2, TYPE_NEON_TBL2_Q, TYPE_NEON_TBL3, TYPE_NEON_TBL3_Q, TYPE_NEON_TBL4, TYPE_NEON_TBL4_Q, TYPE_NEON_BSL, TYPE_NEON_BSL_Q, TYPE_NEON_CLS, TYPE_NEON_CLS_Q, TYPE_NEON_CNT, TYPE_NEON_CNT_Q, TYPE_NEON_DUP, TYPE_NEON_DUP_Q, TYPE_NEON_EXT, TYPE_NEON_EXT_Q, TYPE_NEON_RBIT, TYPE_NEON_RBIT_Q, TYPE_NEON_REV, TYPE_NEON_REV_Q, TYPE_NEON_MUL_B, TYPE_NEON_MUL_B_Q, TYPE_NEON_MUL_H, TYPE_NEON_MUL_H_Q, TYPE_NEON_MUL_S, TYPE_NEON_MUL_S_Q, TYPE_NEON_MUL_B_LONG, TYPE_NEON_MUL_H_LONG, TYPE_NEON_MUL_S_LONG, TYPE_NEON_MUL_D_LONG, TYPE_NEON_MUL_H_SCALAR, TYPE_NEON_MUL_H_SCALAR_Q, TYPE_NEON_MUL_S_SCALAR, TYPE_NEON_MUL_S_SCALAR_Q, TYPE_NEON_MUL_H_SCALAR_LONG, TYPE_NEON_MUL_S_SCALAR_LONG, TYPE_NEON_SAT_MUL_B, TYPE_NEON_SAT_MUL_B_Q, TYPE_NEON_SAT_MUL_H, TYPE_NEON_SAT_MUL_H_Q, TYPE_NEON_SAT_MUL_S, TYPE_NEON_SAT_MUL_S_Q, TYPE_NEON_SAT_MUL_B_LONG, TYPE_NEON_SAT_MUL_H_LONG, TYPE_NEON_SAT_MUL_S_LONG, TYPE_NEON_SAT_MUL_H_SCALAR, TYPE_NEON_SAT_MUL_H_SCALAR_Q, TYPE_NEON_SAT_MUL_S_SCALAR, TYPE_NEON_SAT_MUL_S_SCALAR_Q, TYPE_NEON_SAT_MUL_H_SCALAR_LONG, TYPE_NEON_SAT_MUL_S_SCALAR_LONG, TYPE_NEON_MLA_B, TYPE_NEON_MLA_B_Q, TYPE_NEON_MLA_H, TYPE_NEON_MLA_H_Q, TYPE_NEON_MLA_S, TYPE_NEON_MLA_S_Q, TYPE_NEON_MLA_B_LONG, TYPE_NEON_MLA_H_LONG, TYPE_NEON_MLA_S_LONG, TYPE_NEON_MLA_H_SCALAR, TYPE_NEON_MLA_H_SCALAR_Q, TYPE_NEON_MLA_S_SCALAR, TYPE_NEON_MLA_S_SCALAR_Q, TYPE_NEON_MLA_H_SCALAR_LONG, TYPE_NEON_MLA_S_SCALAR_LONG, TYPE_NEON_SAT_MLA_B_LONG, TYPE_NEON_SAT_MLA_H_LONG, TYPE_NEON_SAT_MLA_S_LONG, TYPE_NEON_SAT_MLA_H_SCALAR_LONG, TYPE_NEON_SAT_MLA_S_SCALAR_LONG, TYPE_NEON_TO_GP, TYPE_NEON_TO_GP_Q, TYPE_NEON_FROM_GP, TYPE_NEON_FROM_GP_Q, TYPE_NEON_LDR, TYPE_NEON_LDP, TYPE_NEON_LDP_Q, TYPE_NEON_LOAD1_1REG, TYPE_NEON_LOAD1_1REG_Q, TYPE_NEON_LOAD1_2REG, TYPE_NEON_LOAD1_2REG_Q, TYPE_NEON_LOAD1_3REG, TYPE_NEON_LOAD1_3REG_Q, TYPE_NEON_LOAD1_4REG, TYPE_NEON_LOAD1_4REG_Q, TYPE_NEON_LOAD1_ALL_LANES, TYPE_NEON_LOAD1_ALL_LANES_Q, TYPE_NEON_LOAD1_ONE_LANE, TYPE_NEON_LOAD1_ONE_LANE_Q, TYPE_NEON_LOAD2_2REG, TYPE_NEON_LOAD2_2REG_Q, TYPE_NEON_LOAD2_4REG, TYPE_NEON_LOAD2_4REG_Q, TYPE_NEON_LOAD2_ALL_LANES, TYPE_NEON_LOAD2_ALL_LANES_Q, TYPE_NEON_LOAD2_ONE_LANE, TYPE_NEON_LOAD2_ONE_LANE_Q, TYPE_NEON_LOAD3_3REG, TYPE_NEON_LOAD3_3REG_Q, TYPE_NEON_LOAD3_ALL_LANES, TYPE_NEON_LOAD3_ALL_LANES_Q, TYPE_NEON_LOAD3_ONE_LANE, TYPE_NEON_LOAD3_ONE_LANE_Q, TYPE_NEON_LOAD4_4REG, TYPE_NEON_LOAD4_4REG_Q, TYPE_NEON_LOAD4_ALL_LANES, TYPE_NEON_LOAD4_ALL_LANES_Q, TYPE_NEON_LOAD4_ONE_LANE, TYPE_NEON_LOAD4_ONE_LANE_Q, TYPE_NEON_STR, TYPE_NEON_STP, TYPE_NEON_STP_Q, TYPE_NEON_STORE1_1REG, TYPE_NEON_STORE1_1REG_Q, TYPE_NEON_STORE1_2REG, TYPE_NEON_STORE1_2REG_Q, TYPE_NEON_STORE1_3REG, TYPE_NEON_STORE1_3REG_Q, TYPE_NEON_STORE1_4REG, TYPE_NEON_STORE1_4REG_Q, TYPE_NEON_STORE1_ONE_LANE, TYPE_NEON_STORE1_ONE_LANE_Q, TYPE_NEON_STORE2_2REG, TYPE_NEON_STORE2_2REG_Q, TYPE_NEON_STORE2_4REG, TYPE_NEON_STORE2_4REG_Q, TYPE_NEON_STORE2_ONE_LANE, TYPE_NEON_STORE2_ONE_LANE_Q, TYPE_NEON_STORE3_3REG, TYPE_NEON_STORE3_3REG_Q, TYPE_NEON_STORE3_ONE_LANE, TYPE_NEON_STORE3_ONE_LANE_Q, TYPE_NEON_STORE4_4REG, TYPE_NEON_STORE4_4REG_Q, TYPE_NEON_STORE4_ONE_LANE, TYPE_NEON_STORE4_ONE_LANE_Q, TYPE_NEON_FP_ABS_S, TYPE_NEON_FP_ABS_S_Q, TYPE_NEON_FP_ABS_D, TYPE_NEON_FP_ABS_D_Q, TYPE_NEON_FP_NEG_S, TYPE_NEON_FP_NEG_S_Q, TYPE_NEON_FP_NEG_D, TYPE_NEON_FP_NEG_D_Q, TYPE_NEON_FP_ABD_S, TYPE_NEON_FP_ABD_S_Q, TYPE_NEON_FP_ABD_D, TYPE_NEON_FP_ABD_D_Q, TYPE_NEON_FP_ADDSUB_S, TYPE_NEON_FP_ADDSUB_S_Q, TYPE_NEON_FP_ADDSUB_D, TYPE_NEON_FP_ADDSUB_D_Q, TYPE_NEON_FP_COMPARE_S, TYPE_NEON_FP_COMPARE_S_Q, TYPE_NEON_FP_COMPARE_D, TYPE_NEON_FP_COMPARE_D_Q, TYPE_NEON_FP_MINMAX_S, TYPE_NEON_FP_MINMAX_S_Q, TYPE_NEON_FP_MINMAX_D, TYPE_NEON_FP_MINMAX_D_Q, TYPE_NEON_FP_REDUC_ADD_S, TYPE_NEON_FP_REDUC_ADD_S_Q, TYPE_NEON_FP_REDUC_ADD_D, TYPE_NEON_FP_REDUC_ADD_D_Q, TYPE_NEON_FP_REDUC_MINMAX_S, TYPE_NEON_FP_REDUC_MINMAX_S_Q, TYPE_NEON_FP_REDUC_MINMAX_D, TYPE_NEON_FP_REDUC_MINMAX_D_Q, TYPE_NEON_FP_CVT_NARROW_S_Q, TYPE_NEON_FP_CVT_NARROW_D_Q, TYPE_NEON_FP_CVT_WIDEN_H, TYPE_NEON_FP_CVT_WIDEN_S, TYPE_NEON_FP_TO_INT_S, TYPE_NEON_FP_TO_INT_S_Q, TYPE_NEON_FP_TO_INT_D, TYPE_NEON_FP_TO_INT_D_Q, TYPE_NEON_INT_TO_FP_S, TYPE_NEON_INT_TO_FP_S_Q, TYPE_NEON_INT_TO_FP_D, TYPE_NEON_INT_TO_FP_D_Q, TYPE_NEON_FP_ROUND_S, TYPE_NEON_FP_ROUND_S_Q, TYPE_NEON_FP_ROUND_D, TYPE_NEON_FP_ROUND_D_Q, TYPE_NEON_FP_RECPE_S, TYPE_NEON_FP_RECPE_S_Q, TYPE_NEON_FP_RECPE_D, TYPE_NEON_FP_RECPE_D_Q, TYPE_NEON_FP_RECPS_S, TYPE_NEON_FP_RECPS_S_Q, TYPE_NEON_FP_RECPS_D, TYPE_NEON_FP_RECPS_D_Q, TYPE_NEON_FP_RECPX_S, TYPE_NEON_FP_RECPX_S_Q, TYPE_NEON_FP_RECPX_D, TYPE_NEON_FP_RECPX_D_Q, TYPE_NEON_FP_RSQRTE_S, TYPE_NEON_FP_RSQRTE_S_Q, TYPE_NEON_FP_RSQRTE_D, TYPE_NEON_FP_RSQRTE_D_Q, TYPE_NEON_FP_RSQRTS_S, TYPE_NEON_FP_RSQRTS_S_Q, TYPE_NEON_FP_RSQRTS_D, TYPE_NEON_FP_RSQRTS_D_Q, TYPE_NEON_FP_MUL_S, TYPE_NEON_FP_MUL_S_Q, TYPE_NEON_FP_MUL_S_SCALAR, TYPE_NEON_FP_MUL_S_SCALAR_Q, TYPE_NEON_FP_MUL_D, TYPE_NEON_FP_MUL_D_Q, TYPE_NEON_FP_MUL_D_SCALAR_Q, TYPE_NEON_FP_MLA_S, TYPE_NEON_FP_MLA_S_Q, TYPE_NEON_FP_MLA_S_SCALAR, TYPE_NEON_FP_MLA_S_SCALAR_Q, TYPE_NEON_FP_MLA_D, TYPE_NEON_FP_MLA_D_Q, TYPE_NEON_FP_MLA_D_SCALAR_Q, TYPE_NEON_FP_SQRT_S, TYPE_NEON_FP_SQRT_S_Q, TYPE_NEON_FP_SQRT_D, TYPE_NEON_FP_SQRT_D_Q, TYPE_NEON_FP_DIV_S, TYPE_NEON_FP_DIV_S_Q, TYPE_NEON_FP_DIV_D, TYPE_NEON_FP_DIV_D_Q, TYPE_CRYPTO_AESE, TYPE_CRYPTO_AESMC, TYPE_CRYPTO_SHA1_XOR, TYPE_CRYPTO_SHA1_FAST, TYPE_CRYPTO_SHA1_SLOW, TYPE_CRYPTO_SHA256_FAST, TYPE_CRYPTO_SHA256_SLOW, TYPE_CRYPTO_PMULL, TYPE_CRYPTO_SHA512, TYPE_CRYPTO_SHA3, TYPE_CRYPTO_SM3, TYPE_CRYPTO_SM4, TYPE_COPROC, TYPE_TME, TYPE_MEMTAG, TYPE_LS64, TYPE_MVE_MOVE, TYPE_MVE_STORE, TYPE_MVE_LOAD};
+enum attr_mul32 {MUL32_NO, MUL32_YES};
+enum attr_widen_mul64 {WIDEN_MUL64_NO, WIDEN_MUL64_YES};
+enum attr_is_neon_type {IS_NEON_TYPE_YES, IS_NEON_TYPE_NO};
+enum attr_is_mve_type {IS_MVE_TYPE_YES, IS_MVE_TYPE_NO};
+enum attr_is_thumb {IS_THUMB_YES, IS_THUMB_NO};
+enum attr_is_arch6 {IS_ARCH6_NO, IS_ARCH6_YES};
+enum attr_is_thumb1 {IS_THUMB1_YES, IS_THUMB1_NO};
+enum attr_predicable_short_it {PREDICABLE_SHORT_IT_NO, PREDICABLE_SHORT_IT_YES};
+enum attr_enabled_for_short_it {ENABLED_FOR_SHORT_IT_NO, ENABLED_FOR_SHORT_IT_YES};
+enum attr_required_for_purecode {REQUIRED_FOR_PURECODE_NO, REQUIRED_FOR_PURECODE_YES};
+enum attr_fp {FP_NO, FP_YES};
+enum attr_fpu {FPU_NONE, FPU_VFP};
+enum attr_predicated {PREDICATED_YES, PREDICATED_NO};
+enum attr_arch {ARCH_ANY, ARCH_A, ARCH_T, ARCH_32, ARCH_T1, ARCH_T2, ARCH_V6, ARCH_NOV6, ARCH_V6T2, ARCH_V8MB, ARCH_FIX_VLLDM, ARCH_IWMMXT, ARCH_IWMMXT2, ARCH_ARMV6_OR_VFPV3, ARCH_NEON, ARCH_MVE};
+enum attr_arch_enabled {ARCH_ENABLED_NO, ARCH_ENABLED_YES};
+enum attr_opt {OPT_ANY, OPT_SPEED, OPT_SIZE};
+enum attr_opt_enabled {OPT_ENABLED_NO, OPT_ENABLED_YES};
+enum attr_use_literal_pool {USE_LITERAL_POOL_NO, USE_LITERAL_POOL_YES};
+enum attr_enabled {ENABLED_NO, ENABLED_YES};
+enum attr_ldsched {LDSCHED_NO, LDSCHED_YES};
+enum attr_conds {CONDS_USE, CONDS_SET, CONDS_CLOB, CONDS_UNCONDITIONAL, CONDS_NOCOND};
+enum attr_predicable {PREDICABLE_NO, PREDICABLE_YES};
+enum attr_model_wbuf {MODEL_WBUF_NO, MODEL_WBUF_YES};
+enum attr_write_conflict {WRITE_CONFLICT_NO, WRITE_CONFLICT_YES};
+enum attr_core_cycles {CORE_CYCLES_SINGLE, CORE_CYCLES_MULTI};
+enum attr_far_jump {FAR_JUMP_YES, FAR_JUMP_NO};
+enum attr_tune_cortexr4 {TUNE_CORTEXR4_YES, TUNE_CORTEXR4_NO};
+enum attr_generic_sched {GENERIC_SCHED_YES, GENERIC_SCHED_NO};
+enum attr_generic_vfp {GENERIC_VFP_YES, GENERIC_VFP_NO};
+enum attr_marvell_f_iwmmxt {MARVELL_F_IWMMXT_YES, MARVELL_F_IWMMXT_NO};
+enum attr_wmmxt_shift {WMMXT_SHIFT_YES, WMMXT_SHIFT_NO};
+enum attr_wmmxt_pack {WMMXT_PACK_YES, WMMXT_PACK_NO};
+enum attr_wmmxt_mult_c1 {WMMXT_MULT_C1_YES, WMMXT_MULT_C1_NO};
+enum attr_wmmxt_mult_c2 {WMMXT_MULT_C2_YES, WMMXT_MULT_C2_NO};
+enum attr_wmmxt_alu_c1 {WMMXT_ALU_C1_YES, WMMXT_ALU_C1_NO};
+enum attr_wmmxt_alu_c2 {WMMXT_ALU_C2_YES, WMMXT_ALU_C2_NO};
+enum attr_wmmxt_alu_c3 {WMMXT_ALU_C3_YES, WMMXT_ALU_C3_NO};
+enum attr_wmmxt_transfer_c1 {WMMXT_TRANSFER_C1_YES, WMMXT_TRANSFER_C1_NO};
+enum attr_wmmxt_transfer_c2 {WMMXT_TRANSFER_C2_YES, WMMXT_TRANSFER_C2_NO};
+enum attr_wmmxt_transfer_c3 {WMMXT_TRANSFER_C3_YES, WMMXT_TRANSFER_C3_NO};
+enum attr_vfp10 {VFP10_YES, VFP10_NO};
+enum attr_cortex_a7_neon_type {CORTEX_A7_NEON_TYPE_NEON_MUL, CORTEX_A7_NEON_TYPE_NEON_MLA, CORTEX_A7_NEON_TYPE_NEON_OTHER};
+enum attr_cortex_a8_neon_type {CORTEX_A8_NEON_TYPE_NEON_INT_1, CORTEX_A8_NEON_TYPE_NEON_INT_2, CORTEX_A8_NEON_TYPE_NEON_INT_3, CORTEX_A8_NEON_TYPE_NEON_INT_4, CORTEX_A8_NEON_TYPE_NEON_INT_5, CORTEX_A8_NEON_TYPE_NEON_VQNEG_VQABS, CORTEX_A8_NEON_TYPE_NEON_BIT_OPS_Q, CORTEX_A8_NEON_TYPE_NEON_VABA, CORTEX_A8_NEON_TYPE_NEON_VABA_QQQ, CORTEX_A8_NEON_TYPE_NEON_VMOV, CORTEX_A8_NEON_TYPE_NEON_MUL_DDD_8_16_QDD_16_8_LONG_32_16_LONG, CORTEX_A8_NEON_TYPE_NEON_MUL_QQQ_8_16_32_DDD_32, CORTEX_A8_NEON_TYPE_NEON_MUL_QDD_64_32_LONG_QQD_16_DDD_32_SCALAR_64_32_LONG_SCALAR, CORTEX_A8_NEON_TYPE_NEON_MLA_DDD_8_16_QDD_16_8_LONG_32_16_LONG, CORTEX_A8_NEON_TYPE_NEON_MLA_QQQ_8_16, CORTEX_A8_NEON_TYPE_NEON_MLA_DDD_32_QQD_16_DDD_32_SCALAR_QDD_64_32_LONG_SCALAR_QDD_64_32_LONG, CORTEX_A8_NEON_TYPE_NEON_MLA_QQQ_32_QQD_32_SCALAR, CORTEX_A8_NEON_TYPE_NEON_MUL_DDD_16_SCALAR_32_16_LONG_SCALAR, CORTEX_A8_NEON_TYPE_NEON_MUL_QQD_32_SCALAR, CORTEX_A8_NEON_TYPE_NEON_MLA_DDD_16_SCALAR_QDD_32_16_LONG_SCALAR, CORTEX_A8_NEON_TYPE_NEON_SHIFT_1, CORTEX_A8_NEON_TYPE_NEON_SHIFT_2, CORTEX_A8_NEON_TYPE_NEON_SHIFT_3, CORTEX_A8_NEON_TYPE_NEON_VQSHL_VRSHL_VQRSHL_QQQ, CORTEX_A8_NEON_TYPE_NEON_VSRA_VRSRA, CORTEX_A8_NEON_TYPE_NEON_FP_VADD_DDD_VABS_DD, CORTEX_A8_NEON_TYPE_NEON_FP_VADD_QQQ_VABS_QQ, CORTEX_A8_NEON_TYPE_NEON_FP_VSUM, CORTEX_A8_NEON_TYPE_NEON_FP_VMUL_DDD, CORTEX_A8_NEON_TYPE_NEON_FP_VMUL_QQD, CORTEX_A8_NEON_TYPE_NEON_FP_VMLA_DDD, CORTEX_A8_NEON_TYPE_NEON_FP_VMLA_QQQ, CORTEX_A8_NEON_TYPE_NEON_FP_VMLA_DDD_SCALAR, CORTEX_A8_NEON_TYPE_NEON_FP_VMLA_QQQ_SCALAR, CORTEX_A8_NEON_TYPE_NEON_FP_VRECPS_VRSQRTS_DDD, CORTEX_A8_NEON_TYPE_NEON_FP_VRECPS_VRSQRTS_QQQ, CORTEX_A8_NEON_TYPE_NEON_BP_SIMPLE, CORTEX_A8_NEON_TYPE_NEON_BP_2CYCLE, CORTEX_A8_NEON_TYPE_NEON_BP_3CYCLE, CORTEX_A8_NEON_TYPE_NEON_LDR, CORTEX_A8_NEON_TYPE_NEON_STR, CORTEX_A8_NEON_TYPE_NEON_VLD1_1_2_REGS, CORTEX_A8_NEON_TYPE_NEON_VLD1_3_4_REGS, CORTEX_A8_NEON_TYPE_NEON_VLD2_2_REGS_VLD1_VLD2_ALL_LANES, CORTEX_A8_NEON_TYPE_NEON_VLD2_4_REGS, CORTEX_A8_NEON_TYPE_NEON_VLD3_VLD4, CORTEX_A8_NEON_TYPE_NEON_VST1_1_2_REGS_VST2_2_REGS, CORTEX_A8_NEON_TYPE_NEON_VST1_3_4_REGS, CORTEX_A8_NEON_TYPE_NEON_VST2_4_REGS_VST3_VST4, CORTEX_A8_NEON_TYPE_NEON_VLD1_VLD2_LANE, CORTEX_A8_NEON_TYPE_NEON_VLD3_VLD4_LANE, CORTEX_A8_NEON_TYPE_NEON_VST1_VST2_LANE, CORTEX_A8_NEON_TYPE_NEON_VST3_VST4_LANE, CORTEX_A8_NEON_TYPE_NEON_VLD3_VLD4_ALL_LANES, CORTEX_A8_NEON_TYPE_NEON_MCR, CORTEX_A8_NEON_TYPE_NEON_MCR_2_MCRR, CORTEX_A8_NEON_TYPE_NEON_MRC, CORTEX_A8_NEON_TYPE_NEON_MRRC, CORTEX_A8_NEON_TYPE_NEON_LDM_2, CORTEX_A8_NEON_TYPE_NEON_STM_2, CORTEX_A8_NEON_TYPE_NONE, CORTEX_A8_NEON_TYPE_UNKNOWN};
+enum attr_cortex_a9_neon_type {CORTEX_A9_NEON_TYPE_NEON_INT_1, CORTEX_A9_NEON_TYPE_NEON_INT_2, CORTEX_A9_NEON_TYPE_NEON_INT_3, CORTEX_A9_NEON_TYPE_NEON_INT_4, CORTEX_A9_NEON_TYPE_NEON_INT_5, CORTEX_A9_NEON_TYPE_NEON_VQNEG_VQABS, CORTEX_A9_NEON_TYPE_NEON_BIT_OPS_Q, CORTEX_A9_NEON_TYPE_NEON_VABA, CORTEX_A9_NEON_TYPE_NEON_VABA_QQQ, CORTEX_A9_NEON_TYPE_NEON_VMOV, CORTEX_A9_NEON_TYPE_NEON_MUL_DDD_8_16_QDD_16_8_LONG_32_16_LONG, CORTEX_A9_NEON_TYPE_NEON_MUL_QQQ_8_16_32_DDD_32, CORTEX_A9_NEON_TYPE_NEON_MUL_QDD_64_32_LONG_QQD_16_DDD_32_SCALAR_64_32_LONG_SCALAR, CORTEX_A9_NEON_TYPE_NEON_MLA_DDD_8_16_QDD_16_8_LONG_32_16_LONG, CORTEX_A9_NEON_TYPE_NEON_MLA_QQQ_8_16, CORTEX_A9_NEON_TYPE_NEON_MLA_DDD_32_QQD_16_DDD_32_SCALAR_QDD_64_32_LONG_SCALAR_QDD_64_32_LONG, CORTEX_A9_NEON_TYPE_NEON_MLA_QQQ_32_QQD_32_SCALAR, CORTEX_A9_NEON_TYPE_NEON_MUL_DDD_16_SCALAR_32_16_LONG_SCALAR, CORTEX_A9_NEON_TYPE_NEON_MUL_QQD_32_SCALAR, CORTEX_A9_NEON_TYPE_NEON_MLA_DDD_16_SCALAR_QDD_32_16_LONG_SCALAR, CORTEX_A9_NEON_TYPE_NEON_SHIFT_1, CORTEX_A9_NEON_TYPE_NEON_SHIFT_2, CORTEX_A9_NEON_TYPE_NEON_SHIFT_3, CORTEX_A9_NEON_TYPE_NEON_VQSHL_VRSHL_VQRSHL_QQQ, CORTEX_A9_NEON_TYPE_NEON_VSRA_VRSRA, CORTEX_A9_NEON_TYPE_NEON_FP_VADD_DDD_VABS_DD, CORTEX_A9_NEON_TYPE_NEON_FP_VADD_QQQ_VABS_QQ, CORTEX_A9_NEON_TYPE_NEON_FP_VSUM, CORTEX_A9_NEON_TYPE_NEON_FP_VMUL_DDD, CORTEX_A9_NEON_TYPE_NEON_FP_VMUL_QQD, CORTEX_A9_NEON_TYPE_NEON_FP_VMLA_DDD, CORTEX_A9_NEON_TYPE_NEON_FP_VMLA_QQQ, CORTEX_A9_NEON_TYPE_NEON_FP_VMLA_DDD_SCALAR, CORTEX_A9_NEON_TYPE_NEON_FP_VMLA_QQQ_SCALAR, CORTEX_A9_NEON_TYPE_NEON_FP_VRECPS_VRSQRTS_DDD, CORTEX_A9_NEON_TYPE_NEON_FP_VRECPS_VRSQRTS_QQQ, CORTEX_A9_NEON_TYPE_NEON_BP_SIMPLE, CORTEX_A9_NEON_TYPE_NEON_BP_2CYCLE, CORTEX_A9_NEON_TYPE_NEON_BP_3CYCLE, CORTEX_A9_NEON_TYPE_NEON_LDR, CORTEX_A9_NEON_TYPE_NEON_STR, CORTEX_A9_NEON_TYPE_NEON_VLD1_1_2_REGS, CORTEX_A9_NEON_TYPE_NEON_VLD1_3_4_REGS, CORTEX_A9_NEON_TYPE_NEON_VLD2_2_REGS_VLD1_VLD2_ALL_LANES, CORTEX_A9_NEON_TYPE_NEON_VLD2_4_REGS, CORTEX_A9_NEON_TYPE_NEON_VLD3_VLD4, CORTEX_A9_NEON_TYPE_NEON_VST1_1_2_REGS_VST2_2_REGS, CORTEX_A9_NEON_TYPE_NEON_VST1_3_4_REGS, CORTEX_A9_NEON_TYPE_NEON_VST2_4_REGS_VST3_VST4, CORTEX_A9_NEON_TYPE_NEON_VLD1_VLD2_LANE, CORTEX_A9_NEON_TYPE_NEON_VLD3_VLD4_LANE, CORTEX_A9_NEON_TYPE_NEON_VST1_VST2_LANE, CORTEX_A9_NEON_TYPE_NEON_VST3_VST4_LANE, CORTEX_A9_NEON_TYPE_NEON_VLD3_VLD4_ALL_LANES, CORTEX_A9_NEON_TYPE_NEON_MCR, CORTEX_A9_NEON_TYPE_NEON_MCR_2_MCRR, CORTEX_A9_NEON_TYPE_NEON_MRC, CORTEX_A9_NEON_TYPE_NEON_MRRC, CORTEX_A9_NEON_TYPE_NEON_LDM_2, CORTEX_A9_NEON_TYPE_NEON_STM_2, CORTEX_A9_NEON_TYPE_NONE, CORTEX_A9_NEON_TYPE_UNKNOWN};
+enum attr_cortex_a15_neon_type {CORTEX_A15_NEON_TYPE_NEON_ABD, CORTEX_A15_NEON_TYPE_NEON_ABD_Q, CORTEX_A15_NEON_TYPE_NEON_ARITH_ACC, CORTEX_A15_NEON_TYPE_NEON_ARITH_ACC_Q, CORTEX_A15_NEON_TYPE_NEON_ARITH_BASIC, CORTEX_A15_NEON_TYPE_NEON_ARITH_COMPLEX, CORTEX_A15_NEON_TYPE_NEON_REDUC_ADD_ACC, CORTEX_A15_NEON_TYPE_NEON_MULTIPLY, CORTEX_A15_NEON_TYPE_NEON_MULTIPLY_Q, CORTEX_A15_NEON_TYPE_NEON_MULTIPLY_LONG, CORTEX_A15_NEON_TYPE_NEON_MLA, CORTEX_A15_NEON_TYPE_NEON_MLA_Q, CORTEX_A15_NEON_TYPE_NEON_MLA_LONG, CORTEX_A15_NEON_TYPE_NEON_SAT_MLA_LONG, CORTEX_A15_NEON_TYPE_NEON_SHIFT_ACC, CORTEX_A15_NEON_TYPE_NEON_SHIFT_IMM_BASIC, CORTEX_A15_NEON_TYPE_NEON_SHIFT_IMM_COMPLEX, CORTEX_A15_NEON_TYPE_NEON_SHIFT_REG_BASIC, CORTEX_A15_NEON_TYPE_NEON_SHIFT_REG_BASIC_Q, CORTEX_A15_NEON_TYPE_NEON_SHIFT_REG_COMPLEX, CORTEX_A15_NEON_TYPE_NEON_SHIFT_REG_COMPLEX_Q, CORTEX_A15_NEON_TYPE_NEON_FP_NEGABS, CORTEX_A15_NEON_TYPE_NEON_FP_ARITH, CORTEX_A15_NEON_TYPE_NEON_FP_ARITH_Q, CORTEX_A15_NEON_TYPE_NEON_FP_CVT_INT, CORTEX_A15_NEON_TYPE_NEON_FP_CVT_INT_Q, CORTEX_A15_NEON_TYPE_NEON_FP_CVT16, CORTEX_A15_NEON_TYPE_NEON_FP_MINMAX, CORTEX_A15_NEON_TYPE_NEON_FP_MUL, CORTEX_A15_NEON_TYPE_NEON_FP_MUL_Q, CORTEX_A15_NEON_TYPE_NEON_FP_MLA, CORTEX_A15_NEON_TYPE_NEON_FP_MLA_Q, CORTEX_A15_NEON_TYPE_NEON_FP_RECPE_RSQRTE, CORTEX_A15_NEON_TYPE_NEON_FP_RECPE_RSQRTE_Q, CORTEX_A15_NEON_TYPE_NEON_BITOPS, CORTEX_A15_NEON_TYPE_NEON_BITOPS_Q, CORTEX_A15_NEON_TYPE_NEON_FROM_GP, CORTEX_A15_NEON_TYPE_NEON_FROM_GP_Q, CORTEX_A15_NEON_TYPE_NEON_MOVE, CORTEX_A15_NEON_TYPE_NEON_TBL3_TBL4, CORTEX_A15_NEON_TYPE_NEON_ZIP_Q, CORTEX_A15_NEON_TYPE_NEON_TO_GP, CORTEX_A15_NEON_TYPE_NEON_LOAD_A, CORTEX_A15_NEON_TYPE_NEON_LOAD_B, CORTEX_A15_NEON_TYPE_NEON_LOAD_C, CORTEX_A15_NEON_TYPE_NEON_LOAD_D, CORTEX_A15_NEON_TYPE_NEON_LOAD_E, CORTEX_A15_NEON_TYPE_NEON_LOAD_F, CORTEX_A15_NEON_TYPE_NEON_STORE_A, CORTEX_A15_NEON_TYPE_NEON_STORE_B, CORTEX_A15_NEON_TYPE_NEON_STORE_C, CORTEX_A15_NEON_TYPE_NEON_STORE_D, CORTEX_A15_NEON_TYPE_NEON_STORE_E, CORTEX_A15_NEON_TYPE_NEON_STORE_F, CORTEX_A15_NEON_TYPE_NEON_STORE_G, CORTEX_A15_NEON_TYPE_NEON_STORE_H, CORTEX_A15_NEON_TYPE_UNKNOWN};
+enum attr_cortex_a17_neon_type {CORTEX_A17_NEON_TYPE_NEON_ABD, CORTEX_A17_NEON_TYPE_NEON_ABD_Q, CORTEX_A17_NEON_TYPE_NEON_ARITH_ACC, CORTEX_A17_NEON_TYPE_NEON_ARITH_ACC_Q, CORTEX_A17_NEON_TYPE_NEON_ARITH_BASIC, CORTEX_A17_NEON_TYPE_NEON_ARITH_COMPLEX, CORTEX_A17_NEON_TYPE_NEON_REDUC_ADD_ACC, CORTEX_A17_NEON_TYPE_NEON_MULTIPLY, CORTEX_A17_NEON_TYPE_NEON_MULTIPLY_Q, CORTEX_A17_NEON_TYPE_NEON_MULTIPLY_LONG, CORTEX_A17_NEON_TYPE_NEON_MLA, CORTEX_A17_NEON_TYPE_NEON_MLA_Q, CORTEX_A17_NEON_TYPE_NEON_MLA_LONG, CORTEX_A17_NEON_TYPE_NEON_SAT_MLA_LONG, CORTEX_A17_NEON_TYPE_NEON_SHIFT_ACC, CORTEX_A17_NEON_TYPE_NEON_SHIFT_IMM_BASIC, CORTEX_A17_NEON_TYPE_NEON_SHIFT_IMM_COMPLEX, CORTEX_A17_NEON_TYPE_NEON_SHIFT_REG_BASIC, CORTEX_A17_NEON_TYPE_NEON_SHIFT_REG_BASIC_Q, CORTEX_A17_NEON_TYPE_NEON_SHIFT_REG_COMPLEX, CORTEX_A17_NEON_TYPE_NEON_SHIFT_REG_COMPLEX_Q, CORTEX_A17_NEON_TYPE_NEON_FP_NEGABS, CORTEX_A17_NEON_TYPE_NEON_FP_ARITH, CORTEX_A17_NEON_TYPE_NEON_FP_ARITH_Q, CORTEX_A17_NEON_TYPE_NEON_FP_CVT_INT, CORTEX_A17_NEON_TYPE_NEON_FP_CVT_INT_Q, CORTEX_A17_NEON_TYPE_NEON_FP_CVT16, CORTEX_A17_NEON_TYPE_NEON_FP_MINMAX, CORTEX_A17_NEON_TYPE_NEON_FP_MUL, CORTEX_A17_NEON_TYPE_NEON_FP_MUL_Q, CORTEX_A17_NEON_TYPE_NEON_FP_MLA, CORTEX_A17_NEON_TYPE_NEON_FP_MLA_Q, CORTEX_A17_NEON_TYPE_NEON_FP_RECPE_RSQRTE, CORTEX_A17_NEON_TYPE_NEON_FP_RECPE_RSQRTE_Q, CORTEX_A17_NEON_TYPE_NEON_BITOPS, CORTEX_A17_NEON_TYPE_NEON_BITOPS_Q, CORTEX_A17_NEON_TYPE_NEON_FROM_GP, CORTEX_A17_NEON_TYPE_NEON_FROM_GP_Q, CORTEX_A17_NEON_TYPE_NEON_MOVE, CORTEX_A17_NEON_TYPE_NEON_TBL3_TBL4, CORTEX_A17_NEON_TYPE_NEON_ZIP_Q, CORTEX_A17_NEON_TYPE_NEON_TO_GP, CORTEX_A17_NEON_TYPE_NEON_LOAD_A, CORTEX_A17_NEON_TYPE_NEON_LOAD_B, CORTEX_A17_NEON_TYPE_NEON_LOAD_C, CORTEX_A17_NEON_TYPE_NEON_LOAD_D, CORTEX_A17_NEON_TYPE_NEON_LOAD_E, CORTEX_A17_NEON_TYPE_NEON_LOAD_F, CORTEX_A17_NEON_TYPE_NEON_LOAD_G, CORTEX_A17_NEON_TYPE_NEON_LOAD_H, CORTEX_A17_NEON_TYPE_NEON_STORE_A, CORTEX_A17_NEON_TYPE_NEON_STORE_B, CORTEX_A17_NEON_TYPE_UNKNOWN};
+enum attr_cortex_a53_advsimd_type {CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_ALU, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_ALU_Q, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_MUL, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_MUL_Q, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_DIV_S, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_DIV_S_Q, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_DIV_D, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_DIV_D_Q, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_LOAD_64, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_STORE_64, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_LOAD_128, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_STORE_128, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_LOAD_LOTS, CORTEX_A53_ADVSIMD_TYPE_ADVSIMD_STORE_LOTS, CORTEX_A53_ADVSIMD_TYPE_UNKNOWN};
+enum attr_cortex_a57_neon_type {CORTEX_A57_NEON_TYPE_NEON_ABD, CORTEX_A57_NEON_TYPE_NEON_ABD_Q, CORTEX_A57_NEON_TYPE_NEON_ARITH_ACC, CORTEX_A57_NEON_TYPE_NEON_ARITH_ACC_Q, CORTEX_A57_NEON_TYPE_NEON_ARITH_BASIC, CORTEX_A57_NEON_TYPE_NEON_ARITH_COMPLEX, CORTEX_A57_NEON_TYPE_NEON_REDUC_ADD_ACC, CORTEX_A57_NEON_TYPE_NEON_MULTIPLY, CORTEX_A57_NEON_TYPE_NEON_MULTIPLY_Q, CORTEX_A57_NEON_TYPE_NEON_MULTIPLY_LONG, CORTEX_A57_NEON_TYPE_NEON_MLA, CORTEX_A57_NEON_TYPE_NEON_MLA_Q, CORTEX_A57_NEON_TYPE_NEON_MLA_LONG, CORTEX_A57_NEON_TYPE_NEON_SAT_MLA_LONG, CORTEX_A57_NEON_TYPE_NEON_SHIFT_ACC, CORTEX_A57_NEON_TYPE_NEON_SHIFT_IMM_BASIC, CORTEX_A57_NEON_TYPE_NEON_SHIFT_IMM_COMPLEX, CORTEX_A57_NEON_TYPE_NEON_SHIFT_REG_BASIC, CORTEX_A57_NEON_TYPE_NEON_SHIFT_REG_BASIC_Q, CORTEX_A57_NEON_TYPE_NEON_SHIFT_REG_COMPLEX, CORTEX_A57_NEON_TYPE_NEON_SHIFT_REG_COMPLEX_Q, CORTEX_A57_NEON_TYPE_NEON_FP_NEGABS, CORTEX_A57_NEON_TYPE_NEON_FP_ARITH, CORTEX_A57_NEON_TYPE_NEON_FP_ARITH_Q, CORTEX_A57_NEON_TYPE_NEON_FP_REDUCTIONS_Q, CORTEX_A57_NEON_TYPE_NEON_FP_CVT_INT, CORTEX_A57_NEON_TYPE_NEON_FP_CVT_INT_Q, CORTEX_A57_NEON_TYPE_NEON_FP_CVT16, CORTEX_A57_NEON_TYPE_NEON_FP_MINMAX, CORTEX_A57_NEON_TYPE_NEON_FP_MUL, CORTEX_A57_NEON_TYPE_NEON_FP_MUL_Q, CORTEX_A57_NEON_TYPE_NEON_FP_MLA, CORTEX_A57_NEON_TYPE_NEON_FP_MLA_Q, CORTEX_A57_NEON_TYPE_NEON_FP_RECPE_RSQRTE, CORTEX_A57_NEON_TYPE_NEON_FP_RECPE_RSQRTE_Q, CORTEX_A57_NEON_TYPE_NEON_FP_RECPS_RSQRTS, CORTEX_A57_NEON_TYPE_NEON_FP_RECPS_RSQRTS_Q, CORTEX_A57_NEON_TYPE_NEON_BITOPS, CORTEX_A57_NEON_TYPE_NEON_BITOPS_Q, CORTEX_A57_NEON_TYPE_NEON_FROM_GP, CORTEX_A57_NEON_TYPE_NEON_FROM_GP_Q, CORTEX_A57_NEON_TYPE_NEON_MOVE, CORTEX_A57_NEON_TYPE_NEON_TBL3_TBL4, CORTEX_A57_NEON_TYPE_NEON_ZIP_Q, CORTEX_A57_NEON_TYPE_NEON_TO_GP, CORTEX_A57_NEON_TYPE_NEON_LOAD_A, CORTEX_A57_NEON_TYPE_NEON_LOAD_B, CORTEX_A57_NEON_TYPE_NEON_LOAD_C, CORTEX_A57_NEON_TYPE_NEON_LOAD_D, CORTEX_A57_NEON_TYPE_NEON_LOAD_E, CORTEX_A57_NEON_TYPE_NEON_LOAD_F, CORTEX_A57_NEON_TYPE_NEON_STORE_A, CORTEX_A57_NEON_TYPE_NEON_STORE_B, CORTEX_A57_NEON_TYPE_NEON_STORE_COMPLEX, CORTEX_A57_NEON_TYPE_UNKNOWN};
+enum attr_exynos_m1_neon_type {EXYNOS_M1_NEON_TYPE_NEON_ARITH_SIMPLE, EXYNOS_M1_NEON_TYPE_NEON_ARITH_BASIC, EXYNOS_M1_NEON_TYPE_NEON_ARITH_COMPLEX, EXYNOS_M1_NEON_TYPE_NEON_MULTIPLY, EXYNOS_M1_NEON_TYPE_NEON_MLA, EXYNOS_M1_NEON_TYPE_NEON_MLA_Q, EXYNOS_M1_NEON_TYPE_NEON_MLA_LONG, EXYNOS_M1_NEON_TYPE_NEON_SAT_MLA_LONG, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_ACC, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_IMM_BASIC, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_IMM_COMPLEX, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_REG_BASIC, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_REG_BASIC_Q, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_REG_COMPLEX, EXYNOS_M1_NEON_TYPE_NEON_SHIFT_REG_COMPLEX_Q, EXYNOS_M1_NEON_TYPE_NEON_FP_UNARY, EXYNOS_M1_NEON_TYPE_NEON_FP_ADD, EXYNOS_M1_NEON_TYPE_NEON_FP_ABD, EXYNOS_M1_NEON_TYPE_NEON_FP_COMPARE, EXYNOS_M1_NEON_TYPE_NEON_FP_REDUC_MINMAX, EXYNOS_M1_NEON_TYPE_NEON_FP_REDUC_ADD, EXYNOS_M1_NEON_TYPE_NEON_FP_ROUND, EXYNOS_M1_NEON_TYPE_NEON_FP_CVT, EXYNOS_M1_NEON_TYPE_NEON_FP_MINMAX, EXYNOS_M1_NEON_TYPE_NEON_FP_MUL, EXYNOS_M1_NEON_TYPE_NEON_FP_MUL_Q, EXYNOS_M1_NEON_TYPE_NEON_FP_MLA, EXYNOS_M1_NEON_TYPE_NEON_FP_MLA_Q, EXYNOS_M1_NEON_TYPE_NEON_FP_ESTIMATE, EXYNOS_M1_NEON_TYPE_NEON_FP_ESTIMATEX, EXYNOS_M1_NEON_TYPE_NEON_FP_STEP, EXYNOS_M1_NEON_TYPE_NEON_BITOPS, EXYNOS_M1_NEON_TYPE_NEON_BITOPS_Q, EXYNOS_M1_NEON_TYPE_NEON_BITINS, EXYNOS_M1_NEON_TYPE_NEON_TO_GP, EXYNOS_M1_NEON_TYPE_NEON_FROM_GP, EXYNOS_M1_NEON_TYPE_NEON_MOVE, EXYNOS_M1_NEON_TYPE_NEON_TBL, EXYNOS_M1_NEON_TYPE_NEON_LOAD1_1, EXYNOS_M1_NEON_TYPE_NEON_LOAD1_2, EXYNOS_M1_NEON_TYPE_NEON_LOAD1_3, EXYNOS_M1_NEON_TYPE_NEON_LOAD1_4, EXYNOS_M1_NEON_TYPE_NEON_LOAD1_ONE, EXYNOS_M1_NEON_TYPE_NEON_LOAD1_ALL, EXYNOS_M1_NEON_TYPE_NEON_LOAD2_2, EXYNOS_M1_NEON_TYPE_NEON_LOAD2_ONE, EXYNOS_M1_NEON_TYPE_NEON_LOAD2_ALL, EXYNOS_M1_NEON_TYPE_NEON_LOAD3_3, EXYNOS_M1_NEON_TYPE_NEON_LOAD3_ONE, EXYNOS_M1_NEON_TYPE_NEON_LOAD3_ALL, EXYNOS_M1_NEON_TYPE_NEON_LOAD4_4, EXYNOS_M1_NEON_TYPE_NEON_LOAD4_ONE, EXYNOS_M1_NEON_TYPE_NEON_LOAD4_ALL, EXYNOS_M1_NEON_TYPE_NEON_STORE, EXYNOS_M1_NEON_TYPE_NEON_STORE1_1, EXYNOS_M1_NEON_TYPE_NEON_STORE1_2, EXYNOS_M1_NEON_TYPE_NEON_STORE1_3, EXYNOS_M1_NEON_TYPE_NEON_STORE1_4, EXYNOS_M1_NEON_TYPE_NEON_STORE1_ONE, EXYNOS_M1_NEON_TYPE_NEON_STORE2_2, EXYNOS_M1_NEON_TYPE_NEON_STORE2_ONE, EXYNOS_M1_NEON_TYPE_NEON_STORE3_3, EXYNOS_M1_NEON_TYPE_NEON_STORE3_ONE, EXYNOS_M1_NEON_TYPE_NEON_STORE4_4, EXYNOS_M1_NEON_TYPE_NEON_STORE4_ONE, EXYNOS_M1_NEON_TYPE_UNKNOWN};
+enum attr_add_setq {ADD_SETQ_NO, ADD_SETQ_YES};
+enum attr_vqh_mnem {VQH_MNEM_VADD, VQH_MNEM_VMIN, VQH_MNEM_VMAX};
+#define INSN_SCHEDULING
+#define DELAY_SLOTS 0
+
+#endif /* GCC_INSN_ATTR_COMMON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr.h
new file mode 100644
index 0000000..cfbe1b0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-attr.h
@@ -0,0 +1,370 @@
+/* Generated automatically by the program `genattr'
+ from the machine description file `md'. */
+
+#ifndef GCC_INSN_ATTR_H
+#define GCC_INSN_ATTR_H
+
+#include "insn-attr-common.h"
+
+#define HAVE_ATTR_nonce_enabled 1
+extern enum attr_nonce_enabled get_attr_nonce_enabled (rtx_insn *);
+
+#define HAVE_ATTR_ce_enabled 1
+extern enum attr_ce_enabled get_attr_ce_enabled (rtx_insn *);
+
+#define HAVE_ATTR_tune 1
+extern enum attr_tune get_attr_tune (void);
+
+#define HAVE_ATTR_autodetect_type 1
+extern enum attr_autodetect_type get_attr_autodetect_type (rtx_insn *);
+
+#define HAVE_ATTR_type 1
+extern enum attr_type get_attr_type (rtx_insn *);
+
+#define HAVE_ATTR_mul32 1
+extern enum attr_mul32 get_attr_mul32 (rtx_insn *);
+
+#define HAVE_ATTR_widen_mul64 1
+extern enum attr_widen_mul64 get_attr_widen_mul64 (rtx_insn *);
+
+#define HAVE_ATTR_is_neon_type 1
+extern enum attr_is_neon_type get_attr_is_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_is_mve_type 1
+extern enum attr_is_mve_type get_attr_is_mve_type (rtx_insn *);
+
+#define HAVE_ATTR_is_thumb 1
+extern enum attr_is_thumb get_attr_is_thumb (void);
+
+#define HAVE_ATTR_is_arch6 1
+extern enum attr_is_arch6 get_attr_is_arch6 (void);
+
+#define HAVE_ATTR_is_thumb1 1
+extern enum attr_is_thumb1 get_attr_is_thumb1 (void);
+
+#define HAVE_ATTR_predicable_short_it 1
+extern enum attr_predicable_short_it get_attr_predicable_short_it (rtx_insn *);
+
+#define HAVE_ATTR_enabled_for_short_it 1
+extern enum attr_enabled_for_short_it get_attr_enabled_for_short_it (rtx_insn *);
+
+#define HAVE_ATTR_required_for_purecode 1
+extern enum attr_required_for_purecode get_attr_required_for_purecode (rtx_insn *);
+
+#define HAVE_ATTR_shift 1
+extern int get_attr_shift (rtx_insn *);
+#define HAVE_ATTR_fp 1
+extern enum attr_fp get_attr_fp (rtx_insn *);
+
+#define HAVE_ATTR_fpu 1
+extern enum attr_fpu get_attr_fpu (void);
+
+#define HAVE_ATTR_predicated 1
+extern enum attr_predicated get_attr_predicated (rtx_insn *);
+
+#define HAVE_ATTR_length 1
+extern int get_attr_length (rtx_insn *);
+extern void shorten_branches (rtx_insn *);
+extern int insn_default_length (rtx_insn *);
+extern int insn_min_length (rtx_insn *);
+extern int insn_variable_length_p (rtx_insn *);
+extern int insn_current_length (rtx_insn *);
+
+#include "insn-addr.h"
+
+#define HAVE_ATTR_arch 1
+extern enum attr_arch get_attr_arch (rtx_insn *);
+
+#define HAVE_ATTR_arch_enabled 1
+extern enum attr_arch_enabled get_attr_arch_enabled (rtx_insn *);
+
+#define HAVE_ATTR_opt 1
+extern enum attr_opt get_attr_opt (rtx_insn *);
+
+#define HAVE_ATTR_opt_enabled 1
+extern enum attr_opt_enabled get_attr_opt_enabled (rtx_insn *);
+
+#define HAVE_ATTR_use_literal_pool 1
+extern enum attr_use_literal_pool get_attr_use_literal_pool (rtx_insn *);
+
+#define HAVE_ATTR_enabled 1
+extern enum attr_enabled get_attr_enabled (rtx_insn *);
+
+#define HAVE_ATTR_arm_pool_range 1
+extern int get_attr_arm_pool_range (rtx_insn *);
+#define HAVE_ATTR_thumb2_pool_range 1
+extern int get_attr_thumb2_pool_range (rtx_insn *);
+#define HAVE_ATTR_arm_neg_pool_range 1
+extern int get_attr_arm_neg_pool_range (rtx_insn *);
+#define HAVE_ATTR_thumb2_neg_pool_range 1
+extern int get_attr_thumb2_neg_pool_range (rtx_insn *);
+#define HAVE_ATTR_pool_range 1
+extern int get_attr_pool_range (rtx_insn *);
+#define HAVE_ATTR_neg_pool_range 1
+extern int get_attr_neg_pool_range (rtx_insn *);
+#define HAVE_ATTR_ldsched 1
+extern enum attr_ldsched get_attr_ldsched (void);
+
+#define HAVE_ATTR_conds 1
+extern enum attr_conds get_attr_conds (rtx_insn *);
+
+#define HAVE_ATTR_predicable 1
+extern enum attr_predicable get_attr_predicable (rtx_insn *);
+
+#define HAVE_ATTR_model_wbuf 1
+extern enum attr_model_wbuf get_attr_model_wbuf (void);
+
+#define HAVE_ATTR_write_conflict 1
+extern enum attr_write_conflict get_attr_write_conflict (rtx_insn *);
+
+#define HAVE_ATTR_core_cycles 1
+extern enum attr_core_cycles get_attr_core_cycles (rtx_insn *);
+
+#define HAVE_ATTR_far_jump 1
+extern enum attr_far_jump get_attr_far_jump (rtx_insn *);
+
+#define HAVE_ATTR_ce_count 1
+extern int get_attr_ce_count (rtx_insn *);
+#define HAVE_ATTR_tune_cortexr4 1
+extern enum attr_tune_cortexr4 get_attr_tune_cortexr4 (void);
+
+#define HAVE_ATTR_generic_sched 1
+extern enum attr_generic_sched get_attr_generic_sched (void);
+
+#define HAVE_ATTR_generic_vfp 1
+extern enum attr_generic_vfp get_attr_generic_vfp (void);
+
+#define HAVE_ATTR_marvell_f_iwmmxt 1
+extern enum attr_marvell_f_iwmmxt get_attr_marvell_f_iwmmxt (void);
+
+#define HAVE_ATTR_wmmxt_shift 1
+extern enum attr_wmmxt_shift get_attr_wmmxt_shift (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_pack 1
+extern enum attr_wmmxt_pack get_attr_wmmxt_pack (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_mult_c1 1
+extern enum attr_wmmxt_mult_c1 get_attr_wmmxt_mult_c1 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_mult_c2 1
+extern enum attr_wmmxt_mult_c2 get_attr_wmmxt_mult_c2 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_alu_c1 1
+extern enum attr_wmmxt_alu_c1 get_attr_wmmxt_alu_c1 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_alu_c2 1
+extern enum attr_wmmxt_alu_c2 get_attr_wmmxt_alu_c2 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_alu_c3 1
+extern enum attr_wmmxt_alu_c3 get_attr_wmmxt_alu_c3 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_transfer_c1 1
+extern enum attr_wmmxt_transfer_c1 get_attr_wmmxt_transfer_c1 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_transfer_c2 1
+extern enum attr_wmmxt_transfer_c2 get_attr_wmmxt_transfer_c2 (rtx_insn *);
+
+#define HAVE_ATTR_wmmxt_transfer_c3 1
+extern enum attr_wmmxt_transfer_c3 get_attr_wmmxt_transfer_c3 (rtx_insn *);
+
+#define HAVE_ATTR_vfp10 1
+extern enum attr_vfp10 get_attr_vfp10 (void);
+
+#define HAVE_ATTR_cortex_a7_neon_type 1
+extern enum attr_cortex_a7_neon_type get_attr_cortex_a7_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_cortex_a8_neon_type 1
+extern enum attr_cortex_a8_neon_type get_attr_cortex_a8_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_cortex_a9_neon_type 1
+extern enum attr_cortex_a9_neon_type get_attr_cortex_a9_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_cortex_a15_neon_type 1
+extern enum attr_cortex_a15_neon_type get_attr_cortex_a15_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_cortex_a17_neon_type 1
+extern enum attr_cortex_a17_neon_type get_attr_cortex_a17_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_cortex_a53_advsimd_type 1
+extern enum attr_cortex_a53_advsimd_type get_attr_cortex_a53_advsimd_type (rtx_insn *);
+
+#define HAVE_ATTR_cortex_a57_neon_type 1
+extern enum attr_cortex_a57_neon_type get_attr_cortex_a57_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_exynos_m1_neon_type 1
+extern enum attr_exynos_m1_neon_type get_attr_exynos_m1_neon_type (rtx_insn *);
+
+#define HAVE_ATTR_add_setq 1
+extern enum attr_add_setq get_attr_add_setq (rtx_insn *);
+
+#define HAVE_ATTR_vqh_mnem 1
+extern enum attr_vqh_mnem get_attr_vqh_mnem (rtx_insn *);
+
+extern int num_delay_slots (rtx_insn *);
+extern int eligible_for_delay (rtx_insn *, int, rtx_insn *, int);
+
+extern int const_num_delay_slots (rtx_insn *);
+
+#define ANNUL_IFTRUE_SLOTS 0
+extern int eligible_for_annul_true (rtx_insn *, int, rtx_insn *, int);
+#define ANNUL_IFFALSE_SLOTS 0
+extern int eligible_for_annul_false (rtx_insn *, int, rtx_insn *, int);
+
+/* DFA based pipeline interface. */
+#ifndef AUTOMATON_ALTS
+#define AUTOMATON_ALTS 0
+#endif
+
+
+#ifndef AUTOMATON_STATE_ALTS
+#define AUTOMATON_STATE_ALTS 0
+#endif
+
+#ifndef CPU_UNITS_QUERY
+#define CPU_UNITS_QUERY 0
+#endif
+
+#define init_sched_attrs() do { } while (0)
+
+/* Internal insn code number used by automata. */
+extern int internal_dfa_insn_code (rtx_insn *);
+
+/* Insn latency time defined in define_insn_reservation. */
+extern int insn_default_latency (rtx_insn *);
+
+/* Return nonzero if there is a bypass for given insn
+ which is a data producer. */
+extern int bypass_p (rtx_insn *);
+
+/* Insn latency time on data consumed by the 2nd insn.
+ Use the function if bypass_p returns nonzero for
+ the 1st insn. */
+extern int insn_latency (rtx_insn *, rtx_insn *);
+
+/* Maximal insn latency time possible of all bypasses for this insn.
+ Use the function if bypass_p returns nonzero for
+ the 1st insn. */
+extern int maximal_insn_latency (rtx_insn *);
+
+
+#if AUTOMATON_ALTS
+/* The following function returns number of alternative
+ reservations of given insn. It may be used for better
+ insns scheduling heuristics. */
+extern int insn_alts (rtx);
+
+#endif
+
+/* Maximal possible number of insns waiting results being
+ produced by insns whose execution is not finished. */
+extern const int max_insn_queue_index;
+
+/* Pointer to data describing current state of DFA. */
+typedef void *state_t;
+
+/* Size of the data in bytes. */
+extern int state_size (void);
+
+/* Initiate given DFA state, i.e. Set up the state
+ as all functional units were not reserved. */
+extern void state_reset (state_t);
+/* The following function returns negative value if given
+ insn can be issued in processor state described by given
+ DFA state. In this case, the DFA state is changed to
+ reflect the current and future reservations by given
+ insn. Otherwise the function returns minimal time
+ delay to issue the insn. This delay may be zero
+ for superscalar or VLIW processors. If the second
+ parameter is NULL the function changes given DFA state
+ as new processor cycle started. */
+extern int state_transition (state_t, rtx);
+
+#if AUTOMATON_STATE_ALTS
+/* The following function returns number of possible
+ alternative reservations of given insn in given
+ DFA state. It may be used for better insns scheduling
+ heuristics. By default the function is defined if
+ macro AUTOMATON_STATE_ALTS is defined because its
+ implementation may require much memory. */
+extern int state_alts (state_t, rtx);
+#endif
+
+extern int min_issue_delay (state_t, rtx_insn *);
+/* The following function returns nonzero if no one insn
+ can be issued in current DFA state. */
+extern int state_dead_lock_p (state_t);
+/* The function returns minimal delay of issue of the 2nd
+ insn after issuing the 1st insn in given DFA state.
+ The 1st insn should be issued in given state (i.e.
+ state_transition should return negative value for
+ the insn and the state). Data dependencies between
+ the insns are ignored by the function. */
+extern int min_insn_conflict_delay (state_t, rtx_insn *, rtx_insn *);
+/* The following function outputs reservations for given
+ insn as they are described in the corresponding
+ define_insn_reservation. */
+extern void print_reservation (FILE *, rtx_insn *);
+
+#if CPU_UNITS_QUERY
+/* The following function returns code of functional unit
+ with given name (see define_cpu_unit). */
+extern int get_cpu_unit_code (const char *);
+/* The following function returns nonzero if functional
+ unit with given code is currently reserved in given
+ DFA state. */
+extern int cpu_unit_reservation_p (state_t, int);
+#endif
+
+/* The following function returns true if insn
+ has a dfa reservation. */
+extern bool insn_has_dfa_reservation_p (rtx_insn *);
+
+/* Clean insn code cache. It should be called if there
+ is a chance that condition value in a
+ define_insn_reservation will be changed after
+ last call of dfa_start. */
+extern void dfa_clean_insn_cache (void);
+
+extern void dfa_clear_single_insn_cache (rtx_insn *);
+
+/* Initiate and finish work with DFA. They should be
+ called as the first and the last interface
+ functions. */
+extern void dfa_start (void);
+extern void dfa_finish (void);
+#ifndef HAVE_ATTR_length
+#define HAVE_ATTR_length 0
+#endif
+#ifndef HAVE_ATTR_enabled
+#define HAVE_ATTR_enabled 0
+#endif
+#ifndef HAVE_ATTR_preferred_for_size
+#define HAVE_ATTR_preferred_for_size 0
+#endif
+#ifndef HAVE_ATTR_preferred_for_speed
+#define HAVE_ATTR_preferred_for_speed 0
+#endif
+#if !HAVE_ATTR_length
+extern int hook_int_rtx_insn_unreachable (rtx_insn *);
+#define insn_default_length hook_int_rtx_insn_unreachable
+#define insn_min_length hook_int_rtx_insn_unreachable
+#define insn_variable_length_p hook_int_rtx_insn_unreachable
+#define insn_current_length hook_int_rtx_insn_unreachable
+#include "insn-addr.h"
+#endif
+extern int hook_int_rtx_1 (rtx);
+#if !HAVE_ATTR_enabled
+#define get_attr_enabled hook_int_rtx_1
+#endif
+#if !HAVE_ATTR_preferred_for_size
+#define get_attr_preferred_for_size hook_int_rtx_1
+#endif
+#if !HAVE_ATTR_preferred_for_speed
+#define get_attr_preferred_for_speed hook_int_rtx_1
+#endif
+
+
+#define ATTR_FLAG_forward 0x1
+#define ATTR_FLAG_backward 0x2
+
+#endif /* GCC_INSN_ATTR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-codes.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-codes.h
new file mode 100644
index 0000000..0a531b2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-codes.h
@@ -0,0 +1,5598 @@
+/* Generated automatically by the program `gencodes'
+ from the machine description file `md'. */
+
+#ifndef GCC_INSN_CODES_H
+#define GCC_INSN_CODES_H
+
+enum insn_code {
+ CODE_FOR_nothing = 0,
+ CODE_FOR_addsi3_compareV_reg = 8,
+ CODE_FOR_subvsi3_intmin = 10,
+ CODE_FOR_addsi3_compareV_imm = 11,
+ CODE_FOR_addsi3_compareV_imm_nosum = 12,
+ CODE_FOR_addsi3_compare0 = 13,
+ CODE_FOR_cmpsi2_addneg = 16,
+ CODE_FOR_addsi3_compare_op1 = 17,
+ CODE_FOR_addsi3_carryin = 21,
+ CODE_FOR_add0si3_carryin = 22,
+ CODE_FOR_subsi3_compare1 = 26,
+ CODE_FOR_subvsi3 = 27,
+ CODE_FOR_subvsi3_imm1 = 28,
+ CODE_FOR_subsi3_carryin = 29,
+ CODE_FOR_rsbsi_carryin_reg = 30,
+ CODE_FOR_add_not_shift_cin = 32,
+ CODE_FOR_cmpsi3_carryin_CC_NVout = 33,
+ CODE_FOR_cmpsi3_carryin_CC_Bout = 34,
+ CODE_FOR_cmpsi3_imm_carryin_CC_NVout = 35,
+ CODE_FOR_cmpsi3_imm_carryin_CC_Bout = 36,
+ CODE_FOR_cmpsi3_0_carryin_CC_NVout = 37,
+ CODE_FOR_cmpsi3_0_carryin_CC_Bout = 38,
+ CODE_FOR_subsi3_compare0 = 46,
+ CODE_FOR_subsi3_compare = 47,
+ CODE_FOR_rsb_imm_compare = 48,
+ CODE_FOR_rsb_imm_compare_scratch = 49,
+ CODE_FOR_rscsi3_CC_NVout_scratch = 50,
+ CODE_FOR_rscsi3_CC_Bout_scratch = 51,
+ CODE_FOR_usubvsi3_borrow = 52,
+ CODE_FOR_usubvsi3_borrow_imm = 53,
+ CODE_FOR_subvsi3_borrow = 54,
+ CODE_FOR_subvsi3_borrow_imm = 55,
+ CODE_FOR_smull = 67,
+ CODE_FOR_umull = 68,
+ CODE_FOR_smlal = 69,
+ CODE_FOR_umlal = 70,
+ CODE_FOR_mulhisi3 = 73,
+ CODE_FOR_arm_smlabb_setq = 78,
+ CODE_FOR_maddhisi4tb = 79,
+ CODE_FOR_arm_smlatb_setq = 80,
+ CODE_FOR_maddhisi4tt = 81,
+ CODE_FOR_arm_smlatt_setq = 82,
+ CODE_FOR_maddhidi4 = 83,
+ CODE_FOR_arm_smlawb_insn = 86,
+ CODE_FOR_arm_smlawb_setq_insn = 87,
+ CODE_FOR_arm_smlawt_insn = 88,
+ CODE_FOR_arm_smlawt_setq_insn = 89,
+ CODE_FOR_insv_zero = 98,
+ CODE_FOR_insv_t2 = 99,
+ CODE_FOR_andsi_notsi_si = 104,
+ CODE_FOR_andsi_not_shiftsi_si = 105,
+ CODE_FOR_andsi_not_shiftsi_si_scc_no_reuse = 106,
+ CODE_FOR_andsi_not_shiftsi_si_scc = 107,
+ CODE_FOR_arm_qadd_insn = 127,
+ CODE_FOR_arm_qadd_setq_insn = 128,
+ CODE_FOR_arm_qsub_insn = 129,
+ CODE_FOR_arm_qsub_setq_insn = 130,
+ CODE_FOR_arm_get_apsr = 131,
+ CODE_FOR_arm_set_apsr = 132,
+ CODE_FOR_satsi_smin = 133,
+ CODE_FOR_satsi_smin_setq = 134,
+ CODE_FOR_satsi_smax = 135,
+ CODE_FOR_satsi_smax_setq = 136,
+ CODE_FOR_arm_cx1si = 139,
+ CODE_FOR_arm_cx1di = 140,
+ CODE_FOR_arm_cx1asi = 141,
+ CODE_FOR_arm_cx1adi = 142,
+ CODE_FOR_arm_cx2si = 143,
+ CODE_FOR_arm_cx2di = 144,
+ CODE_FOR_arm_cx2asi = 145,
+ CODE_FOR_arm_cx2adi = 146,
+ CODE_FOR_arm_cx3si = 147,
+ CODE_FOR_arm_cx3di = 148,
+ CODE_FOR_arm_cx3asi = 149,
+ CODE_FOR_arm_cx3adi = 150,
+ CODE_FOR_unaligned_loaddi = 157,
+ CODE_FOR_unaligned_loadsi = 158,
+ CODE_FOR_unaligned_loadhis = 159,
+ CODE_FOR_unaligned_loadhiu = 160,
+ CODE_FOR_unaligned_storedi = 161,
+ CODE_FOR_unaligned_storesi = 162,
+ CODE_FOR_unaligned_storehi = 163,
+ CODE_FOR_extzv_t2 = 165,
+ CODE_FOR_divsi3 = 166,
+ CODE_FOR_udivsi3 = 167,
+ CODE_FOR_negsi2_0compare = 169,
+ CODE_FOR_negsi2_carryin = 170,
+ CODE_FOR_arm_sxtb16 = 190,
+ CODE_FOR_arm_uxtb16 = 191,
+ CODE_FOR_arm_qadd8 = 192,
+ CODE_FOR_arm_qsub8 = 193,
+ CODE_FOR_arm_shadd8 = 194,
+ CODE_FOR_arm_shsub8 = 195,
+ CODE_FOR_arm_uhadd8 = 196,
+ CODE_FOR_arm_uhsub8 = 197,
+ CODE_FOR_arm_uqadd8 = 198,
+ CODE_FOR_arm_uqsub8 = 199,
+ CODE_FOR_arm_qadd16 = 200,
+ CODE_FOR_arm_qasx = 201,
+ CODE_FOR_arm_qsax = 202,
+ CODE_FOR_arm_qsub16 = 203,
+ CODE_FOR_arm_shadd16 = 204,
+ CODE_FOR_arm_shasx = 205,
+ CODE_FOR_arm_shsax = 206,
+ CODE_FOR_arm_shsub16 = 207,
+ CODE_FOR_arm_uhadd16 = 208,
+ CODE_FOR_arm_uhasx = 209,
+ CODE_FOR_arm_uhsax = 210,
+ CODE_FOR_arm_uhsub16 = 211,
+ CODE_FOR_arm_uqadd16 = 212,
+ CODE_FOR_arm_uqasx = 213,
+ CODE_FOR_arm_uqsax = 214,
+ CODE_FOR_arm_uqsub16 = 215,
+ CODE_FOR_arm_smusd = 216,
+ CODE_FOR_arm_smusdx = 217,
+ CODE_FOR_arm_sxtab16 = 218,
+ CODE_FOR_arm_uxtab16 = 219,
+ CODE_FOR_arm_usad8 = 220,
+ CODE_FOR_arm_usada8 = 221,
+ CODE_FOR_arm_smlald = 222,
+ CODE_FOR_arm_smlaldx = 223,
+ CODE_FOR_arm_smlsld = 224,
+ CODE_FOR_arm_smlsldx = 225,
+ CODE_FOR_arm_sadd8 = 226,
+ CODE_FOR_arm_ssub8 = 227,
+ CODE_FOR_arm_uadd8 = 228,
+ CODE_FOR_arm_usub8 = 229,
+ CODE_FOR_arm_sadd16 = 230,
+ CODE_FOR_arm_sasx = 231,
+ CODE_FOR_arm_ssax = 232,
+ CODE_FOR_arm_ssub16 = 233,
+ CODE_FOR_arm_uadd16 = 234,
+ CODE_FOR_arm_uasx = 235,
+ CODE_FOR_arm_usax = 236,
+ CODE_FOR_arm_usub16 = 237,
+ CODE_FOR_arm_smlad_insn = 238,
+ CODE_FOR_arm_smlad_setq_insn = 239,
+ CODE_FOR_arm_smladx_insn = 240,
+ CODE_FOR_arm_smladx_setq_insn = 241,
+ CODE_FOR_arm_smlsd_insn = 242,
+ CODE_FOR_arm_smlsd_setq_insn = 243,
+ CODE_FOR_arm_smlsdx_insn = 244,
+ CODE_FOR_arm_smlsdx_setq_insn = 245,
+ CODE_FOR_arm_smuad_insn = 246,
+ CODE_FOR_arm_smuad_setq_insn = 247,
+ CODE_FOR_arm_smuadx_insn = 248,
+ CODE_FOR_arm_smuadx_setq_insn = 249,
+ CODE_FOR_arm_ssat16_insn = 250,
+ CODE_FOR_arm_ssat16_setq_insn = 251,
+ CODE_FOR_arm_usat16_insn = 252,
+ CODE_FOR_arm_usat16_setq_insn = 253,
+ CODE_FOR_arm_sel = 254,
+ CODE_FOR_pic_load_addr_unified = 258,
+ CODE_FOR_pic_load_addr_32bit = 259,
+ CODE_FOR_pic_load_addr_thumb1 = 260,
+ CODE_FOR_pic_add_dot_plus_four = 261,
+ CODE_FOR_pic_add_dot_plus_eight = 262,
+ CODE_FOR_tls_load_dot_plus_eight = 263,
+ CODE_FOR_pic_offset_arm = CODE_FOR_nothing,
+ CODE_FOR_arm_cond_branch = 277,
+ CODE_FOR_restore_pic_register_after_call = 289,
+ CODE_FOR_blockage = 305,
+ CODE_FOR_probe_stack = 306,
+ CODE_FOR_probe_stack_range = 307,
+ CODE_FOR_arm_stack_protect_test_insn = 311,
+ CODE_FOR_stack_protect_set_tls = 312,
+ CODE_FOR_stack_protect_test_tls = 313,
+ CODE_FOR_nop = 317,
+ CODE_FOR_trap = 318,
+ CODE_FOR_movcond_addsi = 350,
+ CODE_FOR_movcond = 351,
+ CODE_FOR_stack_tie = 385,
+ CODE_FOR_align_4 = 391,
+ CODE_FOR_align_8 = 392,
+ CODE_FOR_consttable_end = 393,
+ CODE_FOR_consttable_1 = 394,
+ CODE_FOR_consttable_2 = 395,
+ CODE_FOR_consttable_4 = 396,
+ CODE_FOR_consttable_8 = 397,
+ CODE_FOR_consttable_16 = 398,
+ CODE_FOR_clzsi2 = 399,
+ CODE_FOR_rbitsi2 = 400,
+ CODE_FOR_ctzsi2 = 401,
+ CODE_FOR_prefetch = 402,
+ CODE_FOR_force_register_use = 403,
+ CODE_FOR_arm_eh_return = 404,
+ CODE_FOR_load_tp_hard = 405,
+ CODE_FOR_reload_tp_hard = 406,
+ CODE_FOR_load_tp_soft_fdpic = 407,
+ CODE_FOR_load_tp_soft = 408,
+ CODE_FOR_tlscall = 409,
+ CODE_FOR_arm_rev16si2 = 414,
+ CODE_FOR_arm_rev16si2_alt = 415,
+ CODE_FOR_arm_crc32b = 422,
+ CODE_FOR_arm_crc32h = 423,
+ CODE_FOR_arm_crc32w = 424,
+ CODE_FOR_arm_crc32cb = 425,
+ CODE_FOR_arm_crc32ch = 426,
+ CODE_FOR_arm_crc32cw = 427,
+ CODE_FOR_arm_cdp = 488,
+ CODE_FOR_arm_cdp2 = 489,
+ CODE_FOR_arm_mcr = 498,
+ CODE_FOR_arm_mcr2 = 499,
+ CODE_FOR_arm_mrc = 500,
+ CODE_FOR_arm_mrc2 = 501,
+ CODE_FOR_arm_mcrr = 502,
+ CODE_FOR_arm_mcrr2 = 503,
+ CODE_FOR_arm_mrrc = 504,
+ CODE_FOR_arm_mrrc2 = 505,
+ CODE_FOR_pac_nop = 507,
+ CODE_FOR_pacbti_nop = 508,
+ CODE_FOR_aut_nop = 509,
+ CODE_FOR_bti_nop = 510,
+ CODE_FOR_mve_vshlq_sv8qi = 511,
+ CODE_FOR_mve_vshlq_uv8qi = 512,
+ CODE_FOR_mve_vshlq_sv16qi = 513,
+ CODE_FOR_mve_vshlq_uv16qi = 514,
+ CODE_FOR_mve_vshlq_sv4hi = 515,
+ CODE_FOR_mve_vshlq_uv4hi = 516,
+ CODE_FOR_mve_vshlq_sv8hi = 517,
+ CODE_FOR_mve_vshlq_uv8hi = 518,
+ CODE_FOR_mve_vshlq_sv2si = 519,
+ CODE_FOR_mve_vshlq_uv2si = 520,
+ CODE_FOR_mve_vshlq_sv4si = 521,
+ CODE_FOR_mve_vshlq_uv4si = 522,
+ CODE_FOR_tbcstv8qi = 523,
+ CODE_FOR_tbcstv4hi = 524,
+ CODE_FOR_tbcstv2si = 525,
+ CODE_FOR_iwmmxt_iordi3 = 526,
+ CODE_FOR_iwmmxt_xordi3 = 527,
+ CODE_FOR_iwmmxt_anddi3 = 528,
+ CODE_FOR_iwmmxt_nanddi3 = 529,
+ CODE_FOR_movv2si_internal = 533,
+ CODE_FOR_movv4hi_internal = 534,
+ CODE_FOR_movv8qi_internal = 535,
+ CODE_FOR_ssaddv8qi3 = 548,
+ CODE_FOR_ssaddv4hi3 = 549,
+ CODE_FOR_ssaddv2si3 = 550,
+ CODE_FOR_usaddv8qi3 = 551,
+ CODE_FOR_usaddv4hi3 = 552,
+ CODE_FOR_usaddv2si3 = 553,
+ CODE_FOR_sssubv8qi3 = 557,
+ CODE_FOR_sssubv4hi3 = 558,
+ CODE_FOR_sssubv2si3 = 559,
+ CODE_FOR_ussubv8qi3 = 560,
+ CODE_FOR_ussubv4hi3 = 561,
+ CODE_FOR_ussubv2si3 = 562,
+ CODE_FOR_smulv4hi3_highpart = 564,
+ CODE_FOR_umulv4hi3_highpart = 565,
+ CODE_FOR_iwmmxt_wmacs = 566,
+ CODE_FOR_iwmmxt_wmacsz = 567,
+ CODE_FOR_iwmmxt_wmacu = 568,
+ CODE_FOR_iwmmxt_wmacuz = 569,
+ CODE_FOR_iwmmxt_clrdi = 570,
+ CODE_FOR_iwmmxt_clrv8qi = 571,
+ CODE_FOR_iwmmxt_clrv4hi = 572,
+ CODE_FOR_iwmmxt_clrv2si = 573,
+ CODE_FOR_iwmmxt_uavgrndv8qi3 = 574,
+ CODE_FOR_iwmmxt_uavgrndv4hi3 = 575,
+ CODE_FOR_iwmmxt_uavgv8qi3 = 576,
+ CODE_FOR_iwmmxt_uavgv4hi3 = 577,
+ CODE_FOR_iwmmxt_tinsrb = 578,
+ CODE_FOR_iwmmxt_tinsrh = 579,
+ CODE_FOR_iwmmxt_tinsrw = 580,
+ CODE_FOR_iwmmxt_textrmub = 581,
+ CODE_FOR_iwmmxt_textrmsb = 582,
+ CODE_FOR_iwmmxt_textrmuh = 583,
+ CODE_FOR_iwmmxt_textrmsh = 584,
+ CODE_FOR_iwmmxt_textrmw = 585,
+ CODE_FOR_iwmmxt_wshufh = 586,
+ CODE_FOR_eqv8qi3 = 587,
+ CODE_FOR_eqv4hi3 = 588,
+ CODE_FOR_eqv2si3 = 589,
+ CODE_FOR_gtuv8qi3 = 590,
+ CODE_FOR_gtuv4hi3 = 591,
+ CODE_FOR_gtuv2si3 = 592,
+ CODE_FOR_gtv8qi3 = 593,
+ CODE_FOR_gtv4hi3 = 594,
+ CODE_FOR_gtv2si3 = 595,
+ CODE_FOR_iwmmxt_wpackhss = 608,
+ CODE_FOR_iwmmxt_wpackwss = 609,
+ CODE_FOR_iwmmxt_wpackdss = 610,
+ CODE_FOR_iwmmxt_wpackhus = 611,
+ CODE_FOR_iwmmxt_wpackwus = 612,
+ CODE_FOR_iwmmxt_wpackdus = 613,
+ CODE_FOR_iwmmxt_wunpckihb = 614,
+ CODE_FOR_iwmmxt_wunpckihh = 615,
+ CODE_FOR_iwmmxt_wunpckihw = 616,
+ CODE_FOR_iwmmxt_wunpckilb = 617,
+ CODE_FOR_iwmmxt_wunpckilh = 618,
+ CODE_FOR_iwmmxt_wunpckilw = 619,
+ CODE_FOR_iwmmxt_wunpckehub = 620,
+ CODE_FOR_iwmmxt_wunpckehuh = 621,
+ CODE_FOR_iwmmxt_wunpckehuw = 622,
+ CODE_FOR_iwmmxt_wunpckehsb = 623,
+ CODE_FOR_iwmmxt_wunpckehsh = 624,
+ CODE_FOR_iwmmxt_wunpckehsw = 625,
+ CODE_FOR_iwmmxt_wunpckelub = 626,
+ CODE_FOR_iwmmxt_wunpckeluh = 627,
+ CODE_FOR_iwmmxt_wunpckeluw = 628,
+ CODE_FOR_iwmmxt_wunpckelsb = 629,
+ CODE_FOR_iwmmxt_wunpckelsh = 630,
+ CODE_FOR_iwmmxt_wunpckelsw = 631,
+ CODE_FOR_rorv4hi3 = 632,
+ CODE_FOR_rorv2si3 = 633,
+ CODE_FOR_rordi3 = 634,
+ CODE_FOR_ashrv4hi3_iwmmxt = 635,
+ CODE_FOR_ashrv2si3_iwmmxt = 636,
+ CODE_FOR_ashrdi3_iwmmxt = 637,
+ CODE_FOR_lshrv4hi3_iwmmxt = 638,
+ CODE_FOR_lshrv2si3_iwmmxt = 639,
+ CODE_FOR_lshrdi3_iwmmxt = 640,
+ CODE_FOR_ashlv4hi3_iwmmxt = 641,
+ CODE_FOR_ashlv2si3_iwmmxt = 642,
+ CODE_FOR_ashldi3_iwmmxt = 643,
+ CODE_FOR_rorv4hi3_di = 644,
+ CODE_FOR_rorv2si3_di = 645,
+ CODE_FOR_rordi3_di = 646,
+ CODE_FOR_ashrv4hi3_di = 647,
+ CODE_FOR_ashrv2si3_di = 648,
+ CODE_FOR_ashrdi3_di = 649,
+ CODE_FOR_lshrv4hi3_di = 650,
+ CODE_FOR_lshrv2si3_di = 651,
+ CODE_FOR_lshrdi3_di = 652,
+ CODE_FOR_ashlv4hi3_di = 653,
+ CODE_FOR_ashlv2si3_di = 654,
+ CODE_FOR_ashldi3_di = 655,
+ CODE_FOR_iwmmxt_wmadds = 656,
+ CODE_FOR_iwmmxt_wmaddu = 657,
+ CODE_FOR_iwmmxt_tmia = 658,
+ CODE_FOR_iwmmxt_tmiaph = 659,
+ CODE_FOR_iwmmxt_tmiabb = 660,
+ CODE_FOR_iwmmxt_tmiatb = 661,
+ CODE_FOR_iwmmxt_tmiabt = 662,
+ CODE_FOR_iwmmxt_tmiatt = 663,
+ CODE_FOR_iwmmxt_tmovmskb = 664,
+ CODE_FOR_iwmmxt_tmovmskh = 665,
+ CODE_FOR_iwmmxt_tmovmskw = 666,
+ CODE_FOR_iwmmxt_waccb = 667,
+ CODE_FOR_iwmmxt_wacch = 668,
+ CODE_FOR_iwmmxt_waccw = 669,
+ CODE_FOR_iwmmxt_waligni = 670,
+ CODE_FOR_iwmmxt_walignr = 671,
+ CODE_FOR_iwmmxt_walignr0 = 672,
+ CODE_FOR_iwmmxt_walignr1 = 673,
+ CODE_FOR_iwmmxt_walignr2 = 674,
+ CODE_FOR_iwmmxt_walignr3 = 675,
+ CODE_FOR_iwmmxt_wsadb = 676,
+ CODE_FOR_iwmmxt_wsadh = 677,
+ CODE_FOR_iwmmxt_wsadbz = 678,
+ CODE_FOR_iwmmxt_wsadhz = 679,
+ CODE_FOR_iwmmxt_wabsv2si3 = 680,
+ CODE_FOR_iwmmxt_wabsv4hi3 = 681,
+ CODE_FOR_iwmmxt_wabsv8qi3 = 682,
+ CODE_FOR_iwmmxt_wabsdiffb = 683,
+ CODE_FOR_iwmmxt_wabsdiffh = 684,
+ CODE_FOR_iwmmxt_wabsdiffw = 685,
+ CODE_FOR_iwmmxt_waddsubhx = 686,
+ CODE_FOR_iwmmxt_wsubaddhx = 687,
+ CODE_FOR_addcv4hi3 = 688,
+ CODE_FOR_addcv2si3 = 689,
+ CODE_FOR_iwmmxt_avg4 = 690,
+ CODE_FOR_iwmmxt_avg4r = 691,
+ CODE_FOR_iwmmxt_wmaddsx = 692,
+ CODE_FOR_iwmmxt_wmaddux = 693,
+ CODE_FOR_iwmmxt_wmaddsn = 694,
+ CODE_FOR_iwmmxt_wmaddun = 695,
+ CODE_FOR_iwmmxt_wmulwsm = 696,
+ CODE_FOR_iwmmxt_wmulwum = 697,
+ CODE_FOR_iwmmxt_wmulsmr = 698,
+ CODE_FOR_iwmmxt_wmulumr = 699,
+ CODE_FOR_iwmmxt_wmulwsmr = 700,
+ CODE_FOR_iwmmxt_wmulwumr = 701,
+ CODE_FOR_iwmmxt_wmulwl = 702,
+ CODE_FOR_iwmmxt_wqmulm = 703,
+ CODE_FOR_iwmmxt_wqmulwm = 704,
+ CODE_FOR_iwmmxt_wqmulmr = 705,
+ CODE_FOR_iwmmxt_wqmulwmr = 706,
+ CODE_FOR_iwmmxt_waddbhusm = 707,
+ CODE_FOR_iwmmxt_waddbhusl = 708,
+ CODE_FOR_iwmmxt_wqmiabb = 709,
+ CODE_FOR_iwmmxt_wqmiabt = 710,
+ CODE_FOR_iwmmxt_wqmiatb = 711,
+ CODE_FOR_iwmmxt_wqmiatt = 712,
+ CODE_FOR_iwmmxt_wqmiabbn = 713,
+ CODE_FOR_iwmmxt_wqmiabtn = 714,
+ CODE_FOR_iwmmxt_wqmiatbn = 715,
+ CODE_FOR_iwmmxt_wqmiattn = 716,
+ CODE_FOR_iwmmxt_wmiabb = 717,
+ CODE_FOR_iwmmxt_wmiabt = 718,
+ CODE_FOR_iwmmxt_wmiatb = 719,
+ CODE_FOR_iwmmxt_wmiatt = 720,
+ CODE_FOR_iwmmxt_wmiabbn = 721,
+ CODE_FOR_iwmmxt_wmiabtn = 722,
+ CODE_FOR_iwmmxt_wmiatbn = 723,
+ CODE_FOR_iwmmxt_wmiattn = 724,
+ CODE_FOR_iwmmxt_wmiawbb = 725,
+ CODE_FOR_iwmmxt_wmiawbt = 726,
+ CODE_FOR_iwmmxt_wmiawtb = 727,
+ CODE_FOR_iwmmxt_wmiawtt = 728,
+ CODE_FOR_iwmmxt_wmiawbbn = 729,
+ CODE_FOR_iwmmxt_wmiawbtn = 730,
+ CODE_FOR_iwmmxt_wmiawtbn = 731,
+ CODE_FOR_iwmmxt_wmiawttn = 732,
+ CODE_FOR_iwmmxt_wmerge = 733,
+ CODE_FOR_iwmmxt_tandcv2si3 = 734,
+ CODE_FOR_iwmmxt_tandcv4hi3 = 735,
+ CODE_FOR_iwmmxt_tandcv8qi3 = 736,
+ CODE_FOR_iwmmxt_torcv2si3 = 737,
+ CODE_FOR_iwmmxt_torcv4hi3 = 738,
+ CODE_FOR_iwmmxt_torcv8qi3 = 739,
+ CODE_FOR_iwmmxt_torvscv2si3 = 740,
+ CODE_FOR_iwmmxt_torvscv4hi3 = 741,
+ CODE_FOR_iwmmxt_torvscv8qi3 = 742,
+ CODE_FOR_iwmmxt_textrcv2si3 = 743,
+ CODE_FOR_iwmmxt_textrcv4hi3 = 744,
+ CODE_FOR_iwmmxt_textrcv8qi3 = 745,
+ CODE_FOR_abshf2 = 779,
+ CODE_FOR_neghf2 = 780,
+ CODE_FOR_neon_vrndhf = 781,
+ CODE_FOR_neon_vrndahf = 782,
+ CODE_FOR_neon_vrndmhf = 783,
+ CODE_FOR_neon_vrndnhf = 784,
+ CODE_FOR_neon_vrndphf = 785,
+ CODE_FOR_neon_vrndxhf = 786,
+ CODE_FOR_neon_vrndihf = 787,
+ CODE_FOR_addhf3 = 788,
+ CODE_FOR_subhf3 = 791,
+ CODE_FOR_divhf3 = 794,
+ CODE_FOR_mulhf3 = 797,
+ CODE_FOR_fmahf4 = 818,
+ CODE_FOR_fmasf4 = 819,
+ CODE_FOR_fmadf4 = 820,
+ CODE_FOR_fmsubhf4_fp16 = 821,
+ CODE_FOR_extendhfsf2 = 832,
+ CODE_FOR_truncsfhf2 = 835,
+ CODE_FOR_fixuns_truncsfsi2 = 838,
+ CODE_FOR_fixuns_truncdfsi2 = 839,
+ CODE_FOR_floatunssisf2 = 842,
+ CODE_FOR_floatunssidf2 = 843,
+ CODE_FOR_neon_vsqrthf = 844,
+ CODE_FOR_neon_vrsqrtshf = 845,
+ CODE_FOR_push_fpsysreg_insn = 849,
+ CODE_FOR_pop_fpsysreg_insn = 850,
+ CODE_FOR_lazy_store_multiple_insn = 852,
+ CODE_FOR_lazy_load_multiple_insn = 853,
+ CODE_FOR_neon_vcvthshf = 867,
+ CODE_FOR_neon_vcvthuhf = 868,
+ CODE_FOR_neon_vcvthssi = 869,
+ CODE_FOR_neon_vcvthusi = 870,
+ CODE_FOR_neon_vcvths_nhf_unspec = 871,
+ CODE_FOR_neon_vcvthu_nhf_unspec = 872,
+ CODE_FOR_neon_vcvths_nsi_unspec = 873,
+ CODE_FOR_neon_vcvthu_nsi_unspec = 874,
+ CODE_FOR_neon_vcvtahssi = 875,
+ CODE_FOR_neon_vcvtahusi = 876,
+ CODE_FOR_neon_vcvtmhssi = 877,
+ CODE_FOR_neon_vcvtmhusi = 878,
+ CODE_FOR_neon_vcvtnhssi = 879,
+ CODE_FOR_neon_vcvtnhusi = 880,
+ CODE_FOR_neon_vcvtphssi = 881,
+ CODE_FOR_neon_vcvtphusi = 882,
+ CODE_FOR_btruncsf2 = 884,
+ CODE_FOR_ceilsf2 = 885,
+ CODE_FOR_floorsf2 = 886,
+ CODE_FOR_nearbyintsf2 = 887,
+ CODE_FOR_rintsf2 = 888,
+ CODE_FOR_roundsf2 = 889,
+ CODE_FOR_btruncdf2 = 890,
+ CODE_FOR_ceildf2 = 891,
+ CODE_FOR_floordf2 = 892,
+ CODE_FOR_nearbyintdf2 = 893,
+ CODE_FOR_rintdf2 = 894,
+ CODE_FOR_rounddf2 = 895,
+ CODE_FOR_lceilsfsi2 = 896,
+ CODE_FOR_lfloorsfsi2 = 897,
+ CODE_FOR_lroundsfsi2 = 898,
+ CODE_FOR_lceilusfsi2 = 899,
+ CODE_FOR_lfloorusfsi2 = 900,
+ CODE_FOR_lroundusfsi2 = 901,
+ CODE_FOR_lceildfsi2 = 902,
+ CODE_FOR_lfloordfsi2 = 903,
+ CODE_FOR_lrounddfsi2 = 904,
+ CODE_FOR_lceiludfsi2 = 905,
+ CODE_FOR_lfloorudfsi2 = 906,
+ CODE_FOR_lroundudfsi2 = 907,
+ CODE_FOR_smaxsf3 = 908,
+ CODE_FOR_smaxdf3 = 909,
+ CODE_FOR_sminsf3 = 910,
+ CODE_FOR_smindf3 = 911,
+ CODE_FOR_neon_vmaxnmhf = 912,
+ CODE_FOR_neon_vminnmhf = 913,
+ CODE_FOR_fmaxsf3 = 914,
+ CODE_FOR_fminsf3 = 915,
+ CODE_FOR_fmaxdf3 = 916,
+ CODE_FOR_fmindf3 = 917,
+ CODE_FOR_set_fpscr = 918,
+ CODE_FOR_get_fpscr = 919,
+ CODE_FOR_no_literal_pool_df_immediate = 920,
+ CODE_FOR_no_literal_pool_sf_immediate = 921,
+ CODE_FOR_arm_vcx1si = 922,
+ CODE_FOR_arm_vcx1di = 923,
+ CODE_FOR_arm_vcx1asi = 924,
+ CODE_FOR_arm_vcx1adi = 925,
+ CODE_FOR_arm_vcx2si = 926,
+ CODE_FOR_arm_vcx2di = 927,
+ CODE_FOR_arm_vcx2asi = 928,
+ CODE_FOR_arm_vcx2adi = 929,
+ CODE_FOR_arm_vcx3si = 930,
+ CODE_FOR_arm_vcx3di = 931,
+ CODE_FOR_arm_vcx3asi = 932,
+ CODE_FOR_arm_vcx3adi = 933,
+ CODE_FOR_thumb1_subsi3_insn = 937,
+ CODE_FOR_thumb1_bicsi3 = 941,
+ CODE_FOR_thumb1_extendhisi2 = 956,
+ CODE_FOR_thumb1_extendqisi2 = 957,
+ CODE_FOR_cpymem12b = 965,
+ CODE_FOR_cpymem8b = 966,
+ CODE_FOR_thumb1_cbz = 967,
+ CODE_FOR_cbranchsi4_insn = 968,
+ CODE_FOR_cbranchsi4_scratch = 969,
+ CODE_FOR_cstoresi_nltu_thumb1 = 980,
+ CODE_FOR_cstoresi_ltu_thumb1 = 981,
+ CODE_FOR_thumb1_addsi3_addgeu = 982,
+ CODE_FOR_thumb1_casesi_dispatch = 992,
+ CODE_FOR_prologue_thumb1_interwork = 994,
+ CODE_FOR_thumb_eh_return = 998,
+ CODE_FOR_thumb1_stack_protect_test_insn = 999,
+ CODE_FOR_tls_load_dot_plus_four = 1008,
+ CODE_FOR_thumb2_zero_extendqisi2_v6 = 1039,
+ CODE_FOR_thumb2_eh_return = 1044,
+ CODE_FOR_thumb2_addsi3_compare0 = 1052,
+ CODE_FOR_thumb2_asrl = 1065,
+ CODE_FOR_thumb2_lsll = 1066,
+ CODE_FOR_thumb2_lsrl = 1067,
+ CODE_FOR_dls_insn = 1069,
+ CODE_FOR_unaligned_storev8qi = 1070,
+ CODE_FOR_vec_setv8qi_internal = 1118,
+ CODE_FOR_vec_setv4hi_internal = 1119,
+ CODE_FOR_vec_setv4hf_internal = 1120,
+ CODE_FOR_vec_setv4bf_internal = 1121,
+ CODE_FOR_vec_setv2si_internal = 1122,
+ CODE_FOR_vec_setv2sf_internal = 1123,
+ CODE_FOR_vec_setv16qi_internal = 1124,
+ CODE_FOR_vec_setv8hi_internal = 1125,
+ CODE_FOR_vec_setv8hf_internal = 1126,
+ CODE_FOR_vec_setv4si_internal = 1127,
+ CODE_FOR_vec_setv4sf_internal = 1128,
+ CODE_FOR_vec_setv2di_internal = 1129,
+ CODE_FOR_vec_extractv8qiqi = 1130,
+ CODE_FOR_vec_extractv4hihi = 1131,
+ CODE_FOR_vec_extractv4hfhf = 1132,
+ CODE_FOR_vec_extractv4bfbf = 1133,
+ CODE_FOR_vec_extractv2sisi = 1134,
+ CODE_FOR_vec_extractv2sfsf = 1135,
+ CODE_FOR_neon_vec_extractv16qiqi = 1136,
+ CODE_FOR_neon_vec_extractv8hihi = 1137,
+ CODE_FOR_neon_vec_extractv8hfhf = 1138,
+ CODE_FOR_neon_vec_extractv4sisi = 1139,
+ CODE_FOR_neon_vec_extractv4sfsf = 1140,
+ CODE_FOR_neon_vec_extractv2didi = 1141,
+ CODE_FOR_mulv8qi3addv8qi_neon = 1172,
+ CODE_FOR_mulv16qi3addv16qi_neon = 1173,
+ CODE_FOR_mulv4hi3addv4hi_neon = 1174,
+ CODE_FOR_mulv8hi3addv8hi_neon = 1175,
+ CODE_FOR_mulv2si3addv2si_neon = 1176,
+ CODE_FOR_mulv4si3addv4si_neon = 1177,
+ CODE_FOR_mulv2sf3addv2sf_neon = 1178,
+ CODE_FOR_mulv4sf3addv4sf_neon = 1179,
+ CODE_FOR_mulv8hf3addv8hf_neon = 1180,
+ CODE_FOR_mulv4hf3addv4hf_neon = 1181,
+ CODE_FOR_mulv8qi3negv8qiaddv8qi_neon = 1182,
+ CODE_FOR_mulv16qi3negv16qiaddv16qi_neon = 1183,
+ CODE_FOR_mulv4hi3negv4hiaddv4hi_neon = 1184,
+ CODE_FOR_mulv8hi3negv8hiaddv8hi_neon = 1185,
+ CODE_FOR_mulv2si3negv2siaddv2si_neon = 1186,
+ CODE_FOR_mulv4si3negv4siaddv4si_neon = 1187,
+ CODE_FOR_mulv2sf3negv2sfaddv2sf_neon = 1188,
+ CODE_FOR_mulv4sf3negv4sfaddv4sf_neon = 1189,
+ CODE_FOR_fmav2sf4 = 1190,
+ CODE_FOR_fmav4sf4 = 1191,
+ CODE_FOR_fmav2sf4_intrinsic = 1192,
+ CODE_FOR_fmav4sf4_intrinsic = 1193,
+ CODE_FOR_fmav8hf4 = 1194,
+ CODE_FOR_fmav4hf4 = 1195,
+ CODE_FOR_fmsubv2sf4_intrinsic = 1198,
+ CODE_FOR_fmsubv4sf4_intrinsic = 1199,
+ CODE_FOR_fmsubv8hf4_intrinsic = 1200,
+ CODE_FOR_fmsubv4hf4_intrinsic = 1201,
+ CODE_FOR_neon_vrintpv2sf = 1202,
+ CODE_FOR_neon_vrintzv2sf = 1203,
+ CODE_FOR_neon_vrintmv2sf = 1204,
+ CODE_FOR_neon_vrintxv2sf = 1205,
+ CODE_FOR_neon_vrintav2sf = 1206,
+ CODE_FOR_neon_vrintnv2sf = 1207,
+ CODE_FOR_neon_vrintpv4sf = 1208,
+ CODE_FOR_neon_vrintzv4sf = 1209,
+ CODE_FOR_neon_vrintmv4sf = 1210,
+ CODE_FOR_neon_vrintxv4sf = 1211,
+ CODE_FOR_neon_vrintav4sf = 1212,
+ CODE_FOR_neon_vrintnv4sf = 1213,
+ CODE_FOR_neon_vcvtpv2sfv2si = 1214,
+ CODE_FOR_neon_vcvtmv2sfv2si = 1215,
+ CODE_FOR_neon_vcvtav2sfv2si = 1216,
+ CODE_FOR_neon_vcvtpuv2sfv2si = 1217,
+ CODE_FOR_neon_vcvtmuv2sfv2si = 1218,
+ CODE_FOR_neon_vcvtauv2sfv2si = 1219,
+ CODE_FOR_neon_vcvtpv4sfv4si = 1220,
+ CODE_FOR_neon_vcvtmv4sfv4si = 1221,
+ CODE_FOR_neon_vcvtav4sfv4si = 1222,
+ CODE_FOR_neon_vcvtpuv4sfv4si = 1223,
+ CODE_FOR_neon_vcvtmuv4sfv4si = 1224,
+ CODE_FOR_neon_vcvtauv4sfv4si = 1225,
+ CODE_FOR_iorv8qi3_neon = 1226,
+ CODE_FOR_iorv16qi3_neon = 1227,
+ CODE_FOR_iorv4hi3_neon = 1228,
+ CODE_FOR_iorv8hi3_neon = 1229,
+ CODE_FOR_iorv2si3_neon = 1230,
+ CODE_FOR_iorv4si3_neon = 1231,
+ CODE_FOR_iorv4hf3_neon = 1232,
+ CODE_FOR_iorv8hf3_neon = 1233,
+ CODE_FOR_iorv2sf3_neon = 1234,
+ CODE_FOR_iorv4sf3_neon = 1235,
+ CODE_FOR_iorv2di3_neon = 1236,
+ CODE_FOR_andv8qi3_neon = 1237,
+ CODE_FOR_andv16qi3_neon = 1238,
+ CODE_FOR_andv4hi3_neon = 1239,
+ CODE_FOR_andv8hi3_neon = 1240,
+ CODE_FOR_andv2si3_neon = 1241,
+ CODE_FOR_andv4si3_neon = 1242,
+ CODE_FOR_andv4hf3_neon = 1243,
+ CODE_FOR_andv8hf3_neon = 1244,
+ CODE_FOR_andv2sf3_neon = 1245,
+ CODE_FOR_andv4sf3_neon = 1246,
+ CODE_FOR_andv2di3_neon = 1247,
+ CODE_FOR_ornv8qi3_neon = 1248,
+ CODE_FOR_ornv16qi3_neon = 1249,
+ CODE_FOR_ornv4hi3_neon = 1250,
+ CODE_FOR_ornv8hi3_neon = 1251,
+ CODE_FOR_ornv2si3_neon = 1252,
+ CODE_FOR_ornv4si3_neon = 1253,
+ CODE_FOR_ornv4hf3_neon = 1254,
+ CODE_FOR_ornv8hf3_neon = 1255,
+ CODE_FOR_ornv2sf3_neon = 1256,
+ CODE_FOR_ornv4sf3_neon = 1257,
+ CODE_FOR_ornv2di3_neon = 1258,
+ CODE_FOR_bicv8qi3_neon = 1259,
+ CODE_FOR_bicv16qi3_neon = 1260,
+ CODE_FOR_bicv4hi3_neon = 1261,
+ CODE_FOR_bicv8hi3_neon = 1262,
+ CODE_FOR_bicv2si3_neon = 1263,
+ CODE_FOR_bicv4si3_neon = 1264,
+ CODE_FOR_bicv4hf3_neon = 1265,
+ CODE_FOR_bicv8hf3_neon = 1266,
+ CODE_FOR_bicv2sf3_neon = 1267,
+ CODE_FOR_bicv4sf3_neon = 1268,
+ CODE_FOR_bicv2di3_neon = 1269,
+ CODE_FOR_xorv8qi3_neon = 1270,
+ CODE_FOR_xorv16qi3_neon = 1271,
+ CODE_FOR_xorv4hi3_neon = 1272,
+ CODE_FOR_xorv8hi3_neon = 1273,
+ CODE_FOR_xorv2si3_neon = 1274,
+ CODE_FOR_xorv4si3_neon = 1275,
+ CODE_FOR_xorv4hf3_neon = 1276,
+ CODE_FOR_xorv8hf3_neon = 1277,
+ CODE_FOR_xorv2sf3_neon = 1278,
+ CODE_FOR_xorv4sf3_neon = 1279,
+ CODE_FOR_xorv2di3_neon = 1280,
+ CODE_FOR_one_cmplv8qi2_neon = 1281,
+ CODE_FOR_one_cmplv16qi2_neon = 1282,
+ CODE_FOR_one_cmplv4hi2_neon = 1283,
+ CODE_FOR_one_cmplv8hi2_neon = 1284,
+ CODE_FOR_one_cmplv2si2_neon = 1285,
+ CODE_FOR_one_cmplv4si2_neon = 1286,
+ CODE_FOR_one_cmplv4hf2_neon = 1287,
+ CODE_FOR_one_cmplv8hf2_neon = 1288,
+ CODE_FOR_one_cmplv2sf2_neon = 1289,
+ CODE_FOR_one_cmplv4sf2_neon = 1290,
+ CODE_FOR_one_cmplv2di2_neon = 1291,
+ CODE_FOR_neon_absv8qi2 = 1292,
+ CODE_FOR_neon_absv16qi2 = 1293,
+ CODE_FOR_neon_absv4hi2 = 1294,
+ CODE_FOR_neon_absv8hi2 = 1295,
+ CODE_FOR_neon_absv2si2 = 1296,
+ CODE_FOR_neon_absv4si2 = 1297,
+ CODE_FOR_neon_absv2sf2 = 1298,
+ CODE_FOR_neon_absv4sf2 = 1299,
+ CODE_FOR_neon_negv8qi2 = 1300,
+ CODE_FOR_neon_negv16qi2 = 1301,
+ CODE_FOR_neon_negv4hi2 = 1302,
+ CODE_FOR_neon_negv8hi2 = 1303,
+ CODE_FOR_neon_negv2si2 = 1304,
+ CODE_FOR_neon_negv4si2 = 1305,
+ CODE_FOR_neon_negv2sf2 = 1306,
+ CODE_FOR_neon_negv4sf2 = 1307,
+ CODE_FOR_neon_absv8hf2 = 1308,
+ CODE_FOR_neon_negv8hf2 = 1309,
+ CODE_FOR_neon_absv4hf2 = 1310,
+ CODE_FOR_neon_negv4hf2 = 1311,
+ CODE_FOR_neon_vrndv8hf = 1312,
+ CODE_FOR_neon_vrndav8hf = 1313,
+ CODE_FOR_neon_vrndmv8hf = 1314,
+ CODE_FOR_neon_vrndnv8hf = 1315,
+ CODE_FOR_neon_vrndpv8hf = 1316,
+ CODE_FOR_neon_vrndxv8hf = 1317,
+ CODE_FOR_neon_vrndv4hf = 1318,
+ CODE_FOR_neon_vrndav4hf = 1319,
+ CODE_FOR_neon_vrndmv4hf = 1320,
+ CODE_FOR_neon_vrndnv4hf = 1321,
+ CODE_FOR_neon_vrndpv4hf = 1322,
+ CODE_FOR_neon_vrndxv4hf = 1323,
+ CODE_FOR_neon_vrsqrtev8hf = 1324,
+ CODE_FOR_neon_vrsqrtev4hf = 1325,
+ CODE_FOR_vashrv8qi3_imm = 1354,
+ CODE_FOR_vashrv16qi3_imm = 1355,
+ CODE_FOR_vashrv4hi3_imm = 1356,
+ CODE_FOR_vashrv8hi3_imm = 1357,
+ CODE_FOR_vashrv2si3_imm = 1358,
+ CODE_FOR_vashrv4si3_imm = 1359,
+ CODE_FOR_vlshrv8qi3_imm = 1360,
+ CODE_FOR_vlshrv16qi3_imm = 1361,
+ CODE_FOR_vlshrv4hi3_imm = 1362,
+ CODE_FOR_vlshrv8hi3_imm = 1363,
+ CODE_FOR_vlshrv2si3_imm = 1364,
+ CODE_FOR_vlshrv4si3_imm = 1365,
+ CODE_FOR_ashlv8qi3_signed = 1366,
+ CODE_FOR_ashlv16qi3_signed = 1367,
+ CODE_FOR_ashlv4hi3_signed = 1368,
+ CODE_FOR_ashlv8hi3_signed = 1369,
+ CODE_FOR_ashlv2si3_signed = 1370,
+ CODE_FOR_ashlv4si3_signed = 1371,
+ CODE_FOR_ashlv2di3_signed = 1372,
+ CODE_FOR_ashlv8qi3_unsigned = 1373,
+ CODE_FOR_ashlv16qi3_unsigned = 1374,
+ CODE_FOR_ashlv4hi3_unsigned = 1375,
+ CODE_FOR_ashlv8hi3_unsigned = 1376,
+ CODE_FOR_ashlv2si3_unsigned = 1377,
+ CODE_FOR_ashlv4si3_unsigned = 1378,
+ CODE_FOR_ashlv2di3_unsigned = 1379,
+ CODE_FOR_neon_load_count = 1380,
+ CODE_FOR_vec_sel_widen_ssum_lov16qiv8qi3 = 1381,
+ CODE_FOR_vec_sel_widen_ssum_lov8hiv4hi3 = 1382,
+ CODE_FOR_vec_sel_widen_ssum_lov4siv2si3 = 1383,
+ CODE_FOR_vec_sel_widen_ssum_hiv16qiv8qi3 = 1384,
+ CODE_FOR_vec_sel_widen_ssum_hiv8hiv4hi3 = 1385,
+ CODE_FOR_vec_sel_widen_ssum_hiv4siv2si3 = 1386,
+ CODE_FOR_widen_ssumv8qi3 = 1387,
+ CODE_FOR_widen_ssumv4hi3 = 1388,
+ CODE_FOR_widen_ssumv2si3 = 1389,
+ CODE_FOR_vec_sel_widen_usum_lov16qiv8qi3 = 1390,
+ CODE_FOR_vec_sel_widen_usum_lov8hiv4hi3 = 1391,
+ CODE_FOR_vec_sel_widen_usum_lov4siv2si3 = 1392,
+ CODE_FOR_vec_sel_widen_usum_hiv16qiv8qi3 = 1393,
+ CODE_FOR_vec_sel_widen_usum_hiv8hiv4hi3 = 1394,
+ CODE_FOR_vec_sel_widen_usum_hiv4siv2si3 = 1395,
+ CODE_FOR_widen_usumv8qi3 = 1396,
+ CODE_FOR_widen_usumv4hi3 = 1397,
+ CODE_FOR_widen_usumv2si3 = 1398,
+ CODE_FOR_quad_halves_plusv4si = 1399,
+ CODE_FOR_quad_halves_sminv4si = 1400,
+ CODE_FOR_quad_halves_smaxv4si = 1401,
+ CODE_FOR_quad_halves_uminv4si = 1402,
+ CODE_FOR_quad_halves_umaxv4si = 1403,
+ CODE_FOR_quad_halves_plusv4sf = 1404,
+ CODE_FOR_quad_halves_sminv4sf = 1405,
+ CODE_FOR_quad_halves_smaxv4sf = 1406,
+ CODE_FOR_quad_halves_plusv8hi = 1407,
+ CODE_FOR_quad_halves_sminv8hi = 1408,
+ CODE_FOR_quad_halves_smaxv8hi = 1409,
+ CODE_FOR_quad_halves_uminv8hi = 1410,
+ CODE_FOR_quad_halves_umaxv8hi = 1411,
+ CODE_FOR_quad_halves_plusv16qi = 1412,
+ CODE_FOR_quad_halves_sminv16qi = 1413,
+ CODE_FOR_quad_halves_smaxv16qi = 1414,
+ CODE_FOR_quad_halves_uminv16qi = 1415,
+ CODE_FOR_quad_halves_umaxv16qi = 1416,
+ CODE_FOR_arm_reduc_plus_internal_v2di = 1417,
+ CODE_FOR_neon_vpadd_internalv8qi = 1418,
+ CODE_FOR_neon_vpadd_internalv4hi = 1419,
+ CODE_FOR_neon_vpadd_internalv2si = 1420,
+ CODE_FOR_neon_vpadd_internalv2sf = 1421,
+ CODE_FOR_neon_vpaddv4hf = 1422,
+ CODE_FOR_neon_vpsminv8qi = 1423,
+ CODE_FOR_neon_vpsminv4hi = 1424,
+ CODE_FOR_neon_vpsminv2si = 1425,
+ CODE_FOR_neon_vpsminv2sf = 1426,
+ CODE_FOR_neon_vpsmaxv8qi = 1427,
+ CODE_FOR_neon_vpsmaxv4hi = 1428,
+ CODE_FOR_neon_vpsmaxv2si = 1429,
+ CODE_FOR_neon_vpsmaxv2sf = 1430,
+ CODE_FOR_neon_vpuminv8qi = 1431,
+ CODE_FOR_neon_vpuminv4hi = 1432,
+ CODE_FOR_neon_vpuminv2si = 1433,
+ CODE_FOR_neon_vpumaxv8qi = 1434,
+ CODE_FOR_neon_vpumaxv4hi = 1435,
+ CODE_FOR_neon_vpumaxv2si = 1436,
+ CODE_FOR_neon_vaddv2sf_unspec = 1453,
+ CODE_FOR_neon_vaddv4sf_unspec = 1454,
+ CODE_FOR_neon_vaddlsv8qi = 1455,
+ CODE_FOR_neon_vaddluv8qi = 1456,
+ CODE_FOR_neon_vaddlsv4hi = 1457,
+ CODE_FOR_neon_vaddluv4hi = 1458,
+ CODE_FOR_neon_vaddlsv2si = 1459,
+ CODE_FOR_neon_vaddluv2si = 1460,
+ CODE_FOR_neon_vaddwsv8qi = 1461,
+ CODE_FOR_neon_vaddwuv8qi = 1462,
+ CODE_FOR_neon_vaddwsv4hi = 1463,
+ CODE_FOR_neon_vaddwuv4hi = 1464,
+ CODE_FOR_neon_vaddwsv2si = 1465,
+ CODE_FOR_neon_vaddwuv2si = 1466,
+ CODE_FOR_neon_vrhaddsv8qi = 1467,
+ CODE_FOR_neon_vrhadduv8qi = 1468,
+ CODE_FOR_neon_vhaddsv8qi = 1469,
+ CODE_FOR_neon_vhadduv8qi = 1470,
+ CODE_FOR_neon_vrhaddsv16qi = 1471,
+ CODE_FOR_neon_vrhadduv16qi = 1472,
+ CODE_FOR_neon_vhaddsv16qi = 1473,
+ CODE_FOR_neon_vhadduv16qi = 1474,
+ CODE_FOR_neon_vrhaddsv4hi = 1475,
+ CODE_FOR_neon_vrhadduv4hi = 1476,
+ CODE_FOR_neon_vhaddsv4hi = 1477,
+ CODE_FOR_neon_vhadduv4hi = 1478,
+ CODE_FOR_neon_vrhaddsv8hi = 1479,
+ CODE_FOR_neon_vrhadduv8hi = 1480,
+ CODE_FOR_neon_vhaddsv8hi = 1481,
+ CODE_FOR_neon_vhadduv8hi = 1482,
+ CODE_FOR_neon_vrhaddsv2si = 1483,
+ CODE_FOR_neon_vrhadduv2si = 1484,
+ CODE_FOR_neon_vhaddsv2si = 1485,
+ CODE_FOR_neon_vhadduv2si = 1486,
+ CODE_FOR_neon_vrhaddsv4si = 1487,
+ CODE_FOR_neon_vrhadduv4si = 1488,
+ CODE_FOR_neon_vhaddsv4si = 1489,
+ CODE_FOR_neon_vhadduv4si = 1490,
+ CODE_FOR_neon_vqaddsv8qi = 1491,
+ CODE_FOR_neon_vqadduv8qi = 1492,
+ CODE_FOR_neon_vqaddsv16qi = 1493,
+ CODE_FOR_neon_vqadduv16qi = 1494,
+ CODE_FOR_neon_vqaddsv4hi = 1495,
+ CODE_FOR_neon_vqadduv4hi = 1496,
+ CODE_FOR_neon_vqaddsv8hi = 1497,
+ CODE_FOR_neon_vqadduv8hi = 1498,
+ CODE_FOR_neon_vqaddsv2si = 1499,
+ CODE_FOR_neon_vqadduv2si = 1500,
+ CODE_FOR_neon_vqaddsv4si = 1501,
+ CODE_FOR_neon_vqadduv4si = 1502,
+ CODE_FOR_neon_vqaddsdi = 1503,
+ CODE_FOR_neon_vqaddudi = 1504,
+ CODE_FOR_neon_vqaddsv2di = 1505,
+ CODE_FOR_neon_vqadduv2di = 1506,
+ CODE_FOR_neon_vaddhnv8hi = 1507,
+ CODE_FOR_neon_vraddhnv8hi = 1508,
+ CODE_FOR_neon_vaddhnv4si = 1509,
+ CODE_FOR_neon_vraddhnv4si = 1510,
+ CODE_FOR_neon_vaddhnv2di = 1511,
+ CODE_FOR_neon_vraddhnv2di = 1512,
+ CODE_FOR_neon_vmulpv8qi = 1513,
+ CODE_FOR_neon_vmulpv16qi = 1514,
+ CODE_FOR_neon_vmulfv2sf = 1515,
+ CODE_FOR_neon_vmulfv4sf = 1516,
+ CODE_FOR_neon_vmulfv8hf = 1517,
+ CODE_FOR_neon_vmulfv4hf = 1518,
+ CODE_FOR_vfmal_lowv2sf_intrinsic = 1519,
+ CODE_FOR_vfmal_lowv4sf_intrinsic = 1520,
+ CODE_FOR_vfmsl_highv2sf_intrinsic = 1521,
+ CODE_FOR_vfmsl_highv4sf_intrinsic = 1522,
+ CODE_FOR_vfmal_highv2sf_intrinsic = 1523,
+ CODE_FOR_vfmal_highv4sf_intrinsic = 1524,
+ CODE_FOR_vfmsl_lowv2sf_intrinsic = 1525,
+ CODE_FOR_vfmsl_lowv4sf_intrinsic = 1526,
+ CODE_FOR_vfmal_lane_lowv2sf_intrinsic = 1527,
+ CODE_FOR_vfmal_lane_lowv4sf_intrinsic = 1528,
+ CODE_FOR_vfmal_lane_lowv8hfv2sf_intrinsic = 1529,
+ CODE_FOR_vfmal_lane_lowv4hfv4sf_intrinsic = 1530,
+ CODE_FOR_vfmal_lane_highv8hfv2sf_intrinsic = 1531,
+ CODE_FOR_vfmal_lane_highv4hfv4sf_intrinsic = 1532,
+ CODE_FOR_vfmal_lane_highv2sf_intrinsic = 1533,
+ CODE_FOR_vfmal_lane_highv4sf_intrinsic = 1534,
+ CODE_FOR_vfmsl_lane_lowv2sf_intrinsic = 1535,
+ CODE_FOR_vfmsl_lane_lowv4sf_intrinsic = 1536,
+ CODE_FOR_vfmsl_lane_lowv8hfv2sf_intrinsic = 1537,
+ CODE_FOR_vfmsl_lane_lowv4hfv4sf_intrinsic = 1538,
+ CODE_FOR_vfmsl_lane_highv8hfv2sf_intrinsic = 1539,
+ CODE_FOR_vfmsl_lane_highv4hfv4sf_intrinsic = 1540,
+ CODE_FOR_vfmsl_lane_highv2sf_intrinsic = 1541,
+ CODE_FOR_vfmsl_lane_highv4sf_intrinsic = 1542,
+ CODE_FOR_neon_vmlav8qi_unspec = 1543,
+ CODE_FOR_neon_vmlav16qi_unspec = 1544,
+ CODE_FOR_neon_vmlav4hi_unspec = 1545,
+ CODE_FOR_neon_vmlav8hi_unspec = 1546,
+ CODE_FOR_neon_vmlav2si_unspec = 1547,
+ CODE_FOR_neon_vmlav4si_unspec = 1548,
+ CODE_FOR_neon_vmlav2sf_unspec = 1549,
+ CODE_FOR_neon_vmlav4sf_unspec = 1550,
+ CODE_FOR_neon_vmlalsv8qi = 1551,
+ CODE_FOR_neon_vmlaluv8qi = 1552,
+ CODE_FOR_neon_vmlalsv4hi = 1553,
+ CODE_FOR_neon_vmlaluv4hi = 1554,
+ CODE_FOR_neon_vmlalsv2si = 1555,
+ CODE_FOR_neon_vmlaluv2si = 1556,
+ CODE_FOR_neon_vmlsv8qi_unspec = 1557,
+ CODE_FOR_neon_vmlsv16qi_unspec = 1558,
+ CODE_FOR_neon_vmlsv4hi_unspec = 1559,
+ CODE_FOR_neon_vmlsv8hi_unspec = 1560,
+ CODE_FOR_neon_vmlsv2si_unspec = 1561,
+ CODE_FOR_neon_vmlsv4si_unspec = 1562,
+ CODE_FOR_neon_vmlsv2sf_unspec = 1563,
+ CODE_FOR_neon_vmlsv4sf_unspec = 1564,
+ CODE_FOR_neon_vmlslsv8qi = 1565,
+ CODE_FOR_neon_vmlsluv8qi = 1566,
+ CODE_FOR_neon_vmlslsv4hi = 1567,
+ CODE_FOR_neon_vmlsluv4hi = 1568,
+ CODE_FOR_neon_vmlslsv2si = 1569,
+ CODE_FOR_neon_vmlsluv2si = 1570,
+ CODE_FOR_neon_vqdmulhv4hi = 1571,
+ CODE_FOR_neon_vqrdmulhv4hi = 1572,
+ CODE_FOR_neon_vqdmulhv2si = 1573,
+ CODE_FOR_neon_vqrdmulhv2si = 1574,
+ CODE_FOR_neon_vqdmulhv8hi = 1575,
+ CODE_FOR_neon_vqrdmulhv8hi = 1576,
+ CODE_FOR_neon_vqdmulhv4si = 1577,
+ CODE_FOR_neon_vqrdmulhv4si = 1578,
+ CODE_FOR_neon_vqrdmlahv4hi = 1579,
+ CODE_FOR_neon_vqrdmlshv4hi = 1580,
+ CODE_FOR_neon_vqrdmlahv2si = 1581,
+ CODE_FOR_neon_vqrdmlshv2si = 1582,
+ CODE_FOR_neon_vqrdmlahv8hi = 1583,
+ CODE_FOR_neon_vqrdmlshv8hi = 1584,
+ CODE_FOR_neon_vqrdmlahv4si = 1585,
+ CODE_FOR_neon_vqrdmlshv4si = 1586,
+ CODE_FOR_neon_vqdmlalv4hi = 1587,
+ CODE_FOR_neon_vqdmlalv2si = 1588,
+ CODE_FOR_neon_vqdmlslv4hi = 1589,
+ CODE_FOR_neon_vqdmlslv2si = 1590,
+ CODE_FOR_neon_vmullsv8qi = 1591,
+ CODE_FOR_neon_vmulluv8qi = 1592,
+ CODE_FOR_neon_vmullpv8qi = 1593,
+ CODE_FOR_neon_vmullsv4hi = 1594,
+ CODE_FOR_neon_vmulluv4hi = 1595,
+ CODE_FOR_neon_vmullpv4hi = 1596,
+ CODE_FOR_neon_vmullsv2si = 1597,
+ CODE_FOR_neon_vmulluv2si = 1598,
+ CODE_FOR_neon_vmullpv2si = 1599,
+ CODE_FOR_neon_vqdmullv4hi = 1600,
+ CODE_FOR_neon_vqdmullv2si = 1601,
+ CODE_FOR_neon_vsubv2sf_unspec = 1602,
+ CODE_FOR_neon_vsubv4sf_unspec = 1603,
+ CODE_FOR_neon_vsublsv8qi = 1604,
+ CODE_FOR_neon_vsubluv8qi = 1605,
+ CODE_FOR_neon_vsublsv4hi = 1606,
+ CODE_FOR_neon_vsubluv4hi = 1607,
+ CODE_FOR_neon_vsublsv2si = 1608,
+ CODE_FOR_neon_vsubluv2si = 1609,
+ CODE_FOR_neon_vsubwsv8qi = 1610,
+ CODE_FOR_neon_vsubwuv8qi = 1611,
+ CODE_FOR_neon_vsubwsv4hi = 1612,
+ CODE_FOR_neon_vsubwuv4hi = 1613,
+ CODE_FOR_neon_vsubwsv2si = 1614,
+ CODE_FOR_neon_vsubwuv2si = 1615,
+ CODE_FOR_neon_vqsubsv8qi = 1616,
+ CODE_FOR_neon_vqsubuv8qi = 1617,
+ CODE_FOR_neon_vqsubsv16qi = 1618,
+ CODE_FOR_neon_vqsubuv16qi = 1619,
+ CODE_FOR_neon_vqsubsv4hi = 1620,
+ CODE_FOR_neon_vqsubuv4hi = 1621,
+ CODE_FOR_neon_vqsubsv8hi = 1622,
+ CODE_FOR_neon_vqsubuv8hi = 1623,
+ CODE_FOR_neon_vqsubsv2si = 1624,
+ CODE_FOR_neon_vqsubuv2si = 1625,
+ CODE_FOR_neon_vqsubsv4si = 1626,
+ CODE_FOR_neon_vqsubuv4si = 1627,
+ CODE_FOR_neon_vqsubsdi = 1628,
+ CODE_FOR_neon_vqsubudi = 1629,
+ CODE_FOR_neon_vqsubsv2di = 1630,
+ CODE_FOR_neon_vqsubuv2di = 1631,
+ CODE_FOR_neon_vhsubsv8qi = 1632,
+ CODE_FOR_neon_vhsubuv8qi = 1633,
+ CODE_FOR_neon_vhsubsv16qi = 1634,
+ CODE_FOR_neon_vhsubuv16qi = 1635,
+ CODE_FOR_neon_vhsubsv4hi = 1636,
+ CODE_FOR_neon_vhsubuv4hi = 1637,
+ CODE_FOR_neon_vhsubsv8hi = 1638,
+ CODE_FOR_neon_vhsubuv8hi = 1639,
+ CODE_FOR_neon_vhsubsv2si = 1640,
+ CODE_FOR_neon_vhsubuv2si = 1641,
+ CODE_FOR_neon_vhsubsv4si = 1642,
+ CODE_FOR_neon_vhsubuv4si = 1643,
+ CODE_FOR_neon_vsubhnv8hi = 1644,
+ CODE_FOR_neon_vrsubhnv8hi = 1645,
+ CODE_FOR_neon_vsubhnv4si = 1646,
+ CODE_FOR_neon_vrsubhnv4si = 1647,
+ CODE_FOR_neon_vsubhnv2di = 1648,
+ CODE_FOR_neon_vrsubhnv2di = 1649,
+ CODE_FOR_neon_vceqv8qi_insn = 1650,
+ CODE_FOR_neon_vcgtv8qi_insn = 1651,
+ CODE_FOR_neon_vcgev8qi_insn = 1652,
+ CODE_FOR_neon_vclev8qi_insn = 1653,
+ CODE_FOR_neon_vcltv8qi_insn = 1654,
+ CODE_FOR_neon_vceqv16qi_insn = 1655,
+ CODE_FOR_neon_vcgtv16qi_insn = 1656,
+ CODE_FOR_neon_vcgev16qi_insn = 1657,
+ CODE_FOR_neon_vclev16qi_insn = 1658,
+ CODE_FOR_neon_vcltv16qi_insn = 1659,
+ CODE_FOR_neon_vceqv4hi_insn = 1660,
+ CODE_FOR_neon_vcgtv4hi_insn = 1661,
+ CODE_FOR_neon_vcgev4hi_insn = 1662,
+ CODE_FOR_neon_vclev4hi_insn = 1663,
+ CODE_FOR_neon_vcltv4hi_insn = 1664,
+ CODE_FOR_neon_vceqv8hi_insn = 1665,
+ CODE_FOR_neon_vcgtv8hi_insn = 1666,
+ CODE_FOR_neon_vcgev8hi_insn = 1667,
+ CODE_FOR_neon_vclev8hi_insn = 1668,
+ CODE_FOR_neon_vcltv8hi_insn = 1669,
+ CODE_FOR_neon_vceqv2si_insn = 1670,
+ CODE_FOR_neon_vcgtv2si_insn = 1671,
+ CODE_FOR_neon_vcgev2si_insn = 1672,
+ CODE_FOR_neon_vclev2si_insn = 1673,
+ CODE_FOR_neon_vcltv2si_insn = 1674,
+ CODE_FOR_neon_vceqv4si_insn = 1675,
+ CODE_FOR_neon_vcgtv4si_insn = 1676,
+ CODE_FOR_neon_vcgev4si_insn = 1677,
+ CODE_FOR_neon_vclev4si_insn = 1678,
+ CODE_FOR_neon_vcltv4si_insn = 1679,
+ CODE_FOR_neon_vceqv2sf_insn = 1680,
+ CODE_FOR_neon_vcgtv2sf_insn = 1681,
+ CODE_FOR_neon_vcgev2sf_insn = 1682,
+ CODE_FOR_neon_vclev2sf_insn = 1683,
+ CODE_FOR_neon_vcltv2sf_insn = 1684,
+ CODE_FOR_neon_vceqv4sf_insn = 1685,
+ CODE_FOR_neon_vcgtv4sf_insn = 1686,
+ CODE_FOR_neon_vcgev4sf_insn = 1687,
+ CODE_FOR_neon_vclev4sf_insn = 1688,
+ CODE_FOR_neon_vcltv4sf_insn = 1689,
+ CODE_FOR_neon_vceqv2sf_insn_unspec = 1690,
+ CODE_FOR_neon_vcgtv2sf_insn_unspec = 1691,
+ CODE_FOR_neon_vcgev2sf_insn_unspec = 1692,
+ CODE_FOR_neon_vcltv2sf_insn_unspec = 1693,
+ CODE_FOR_neon_vclev2sf_insn_unspec = 1694,
+ CODE_FOR_neon_vceqv4sf_insn_unspec = 1695,
+ CODE_FOR_neon_vcgtv4sf_insn_unspec = 1696,
+ CODE_FOR_neon_vcgev4sf_insn_unspec = 1697,
+ CODE_FOR_neon_vcltv4sf_insn_unspec = 1698,
+ CODE_FOR_neon_vclev4sf_insn_unspec = 1699,
+ CODE_FOR_neon_vceqv8hf_fp16insn = 1700,
+ CODE_FOR_neon_vcgtv8hf_fp16insn = 1701,
+ CODE_FOR_neon_vcgev8hf_fp16insn = 1702,
+ CODE_FOR_neon_vclev8hf_fp16insn = 1703,
+ CODE_FOR_neon_vcltv8hf_fp16insn = 1704,
+ CODE_FOR_neon_vceqv4hf_fp16insn = 1705,
+ CODE_FOR_neon_vcgtv4hf_fp16insn = 1706,
+ CODE_FOR_neon_vcgev4hf_fp16insn = 1707,
+ CODE_FOR_neon_vclev4hf_fp16insn = 1708,
+ CODE_FOR_neon_vcltv4hf_fp16insn = 1709,
+ CODE_FOR_neon_vceqv8hf_fp16insn_unspec = 1710,
+ CODE_FOR_neon_vcgtv8hf_fp16insn_unspec = 1711,
+ CODE_FOR_neon_vcgev8hf_fp16insn_unspec = 1712,
+ CODE_FOR_neon_vcltv8hf_fp16insn_unspec = 1713,
+ CODE_FOR_neon_vclev8hf_fp16insn_unspec = 1714,
+ CODE_FOR_neon_vceqv4hf_fp16insn_unspec = 1715,
+ CODE_FOR_neon_vcgtv4hf_fp16insn_unspec = 1716,
+ CODE_FOR_neon_vcgev4hf_fp16insn_unspec = 1717,
+ CODE_FOR_neon_vcltv4hf_fp16insn_unspec = 1718,
+ CODE_FOR_neon_vclev4hf_fp16insn_unspec = 1719,
+ CODE_FOR_neon_vcgtuv8qi = 1720,
+ CODE_FOR_neon_vcgeuv8qi = 1721,
+ CODE_FOR_neon_vcgtuv16qi = 1722,
+ CODE_FOR_neon_vcgeuv16qi = 1723,
+ CODE_FOR_neon_vcgtuv4hi = 1724,
+ CODE_FOR_neon_vcgeuv4hi = 1725,
+ CODE_FOR_neon_vcgtuv8hi = 1726,
+ CODE_FOR_neon_vcgeuv8hi = 1727,
+ CODE_FOR_neon_vcgtuv2si = 1728,
+ CODE_FOR_neon_vcgeuv2si = 1729,
+ CODE_FOR_neon_vcgtuv4si = 1730,
+ CODE_FOR_neon_vcgeuv4si = 1731,
+ CODE_FOR_neon_vcagtv2sf_insn = 1732,
+ CODE_FOR_neon_vcagev2sf_insn = 1733,
+ CODE_FOR_neon_vcaltv2sf_insn = 1734,
+ CODE_FOR_neon_vcalev2sf_insn = 1735,
+ CODE_FOR_neon_vcagtv4sf_insn = 1736,
+ CODE_FOR_neon_vcagev4sf_insn = 1737,
+ CODE_FOR_neon_vcaltv4sf_insn = 1738,
+ CODE_FOR_neon_vcalev4sf_insn = 1739,
+ CODE_FOR_neon_vcagev2sf_insn_unspec = 1740,
+ CODE_FOR_neon_vcagtv2sf_insn_unspec = 1741,
+ CODE_FOR_neon_vcalev2sf_insn_unspec = 1742,
+ CODE_FOR_neon_vcaltv2sf_insn_unspec = 1743,
+ CODE_FOR_neon_vcagev4sf_insn_unspec = 1744,
+ CODE_FOR_neon_vcagtv4sf_insn_unspec = 1745,
+ CODE_FOR_neon_vcalev4sf_insn_unspec = 1746,
+ CODE_FOR_neon_vcaltv4sf_insn_unspec = 1747,
+ CODE_FOR_neon_vcagtv8hf_fp16insn = 1748,
+ CODE_FOR_neon_vcagev8hf_fp16insn = 1749,
+ CODE_FOR_neon_vcaltv8hf_fp16insn = 1750,
+ CODE_FOR_neon_vcalev8hf_fp16insn = 1751,
+ CODE_FOR_neon_vcagtv4hf_fp16insn = 1752,
+ CODE_FOR_neon_vcagev4hf_fp16insn = 1753,
+ CODE_FOR_neon_vcaltv4hf_fp16insn = 1754,
+ CODE_FOR_neon_vcalev4hf_fp16insn = 1755,
+ CODE_FOR_neon_vcagev8hf_fp16insn_unspec = 1756,
+ CODE_FOR_neon_vcagtv8hf_fp16insn_unspec = 1757,
+ CODE_FOR_neon_vcalev8hf_fp16insn_unspec = 1758,
+ CODE_FOR_neon_vcaltv8hf_fp16insn_unspec = 1759,
+ CODE_FOR_neon_vcagev4hf_fp16insn_unspec = 1760,
+ CODE_FOR_neon_vcagtv4hf_fp16insn_unspec = 1761,
+ CODE_FOR_neon_vcalev4hf_fp16insn_unspec = 1762,
+ CODE_FOR_neon_vcaltv4hf_fp16insn_unspec = 1763,
+ CODE_FOR_neon_vtst_combinev8qi = 1764,
+ CODE_FOR_neon_vtst_combinev16qi = 1765,
+ CODE_FOR_neon_vtst_combinev4hi = 1766,
+ CODE_FOR_neon_vtst_combinev8hi = 1767,
+ CODE_FOR_neon_vtst_combinev2si = 1768,
+ CODE_FOR_neon_vtst_combinev4si = 1769,
+ CODE_FOR_neon_vabdsv8qi = 1770,
+ CODE_FOR_neon_vabduv8qi = 1771,
+ CODE_FOR_neon_vabdsv16qi = 1772,
+ CODE_FOR_neon_vabduv16qi = 1773,
+ CODE_FOR_neon_vabdsv4hi = 1774,
+ CODE_FOR_neon_vabduv4hi = 1775,
+ CODE_FOR_neon_vabdsv8hi = 1776,
+ CODE_FOR_neon_vabduv8hi = 1777,
+ CODE_FOR_neon_vabdsv2si = 1778,
+ CODE_FOR_neon_vabduv2si = 1779,
+ CODE_FOR_neon_vabdsv4si = 1780,
+ CODE_FOR_neon_vabduv4si = 1781,
+ CODE_FOR_neon_vabdv8hf = 1782,
+ CODE_FOR_neon_vabdv4hf = 1783,
+ CODE_FOR_neon_vabdfv2sf = 1784,
+ CODE_FOR_neon_vabdfv4sf = 1785,
+ CODE_FOR_neon_vabdlsv8qi = 1786,
+ CODE_FOR_neon_vabdluv8qi = 1787,
+ CODE_FOR_neon_vabdlsv4hi = 1788,
+ CODE_FOR_neon_vabdluv4hi = 1789,
+ CODE_FOR_neon_vabdlsv2si = 1790,
+ CODE_FOR_neon_vabdluv2si = 1791,
+ CODE_FOR_neon_vabasv8qi = 1792,
+ CODE_FOR_neon_vabauv8qi = 1793,
+ CODE_FOR_neon_vabasv16qi = 1794,
+ CODE_FOR_neon_vabauv16qi = 1795,
+ CODE_FOR_neon_vabasv4hi = 1796,
+ CODE_FOR_neon_vabauv4hi = 1797,
+ CODE_FOR_neon_vabasv8hi = 1798,
+ CODE_FOR_neon_vabauv8hi = 1799,
+ CODE_FOR_neon_vabasv2si = 1800,
+ CODE_FOR_neon_vabauv2si = 1801,
+ CODE_FOR_neon_vabasv4si = 1802,
+ CODE_FOR_neon_vabauv4si = 1803,
+ CODE_FOR_neon_vabalsv8qi = 1804,
+ CODE_FOR_neon_vabaluv8qi = 1805,
+ CODE_FOR_neon_vabalsv4hi = 1806,
+ CODE_FOR_neon_vabaluv4hi = 1807,
+ CODE_FOR_neon_vabalsv2si = 1808,
+ CODE_FOR_neon_vabaluv2si = 1809,
+ CODE_FOR_neon_vmaxsv8qi = 1810,
+ CODE_FOR_neon_vmaxuv8qi = 1811,
+ CODE_FOR_neon_vminsv8qi = 1812,
+ CODE_FOR_neon_vminuv8qi = 1813,
+ CODE_FOR_neon_vmaxsv16qi = 1814,
+ CODE_FOR_neon_vmaxuv16qi = 1815,
+ CODE_FOR_neon_vminsv16qi = 1816,
+ CODE_FOR_neon_vminuv16qi = 1817,
+ CODE_FOR_neon_vmaxsv4hi = 1818,
+ CODE_FOR_neon_vmaxuv4hi = 1819,
+ CODE_FOR_neon_vminsv4hi = 1820,
+ CODE_FOR_neon_vminuv4hi = 1821,
+ CODE_FOR_neon_vmaxsv8hi = 1822,
+ CODE_FOR_neon_vmaxuv8hi = 1823,
+ CODE_FOR_neon_vminsv8hi = 1824,
+ CODE_FOR_neon_vminuv8hi = 1825,
+ CODE_FOR_neon_vmaxsv2si = 1826,
+ CODE_FOR_neon_vmaxuv2si = 1827,
+ CODE_FOR_neon_vminsv2si = 1828,
+ CODE_FOR_neon_vminuv2si = 1829,
+ CODE_FOR_neon_vmaxsv4si = 1830,
+ CODE_FOR_neon_vmaxuv4si = 1831,
+ CODE_FOR_neon_vminsv4si = 1832,
+ CODE_FOR_neon_vminuv4si = 1833,
+ CODE_FOR_neon_vmaxfv2sf = 1834,
+ CODE_FOR_neon_vminfv2sf = 1835,
+ CODE_FOR_neon_vmaxfv4sf = 1836,
+ CODE_FOR_neon_vminfv4sf = 1837,
+ CODE_FOR_neon_vmaxfv8hf = 1838,
+ CODE_FOR_neon_vminfv8hf = 1839,
+ CODE_FOR_neon_vmaxfv4hf = 1840,
+ CODE_FOR_neon_vminfv4hf = 1841,
+ CODE_FOR_neon_vpmaxfv4hf = 1842,
+ CODE_FOR_neon_vpminfv4hf = 1843,
+ CODE_FOR_neon_vmaxnmv8hf = 1844,
+ CODE_FOR_neon_vminnmv8hf = 1845,
+ CODE_FOR_neon_vmaxnmv4hf = 1846,
+ CODE_FOR_neon_vminnmv4hf = 1847,
+ CODE_FOR_neon_vmaxnmv2sf = 1848,
+ CODE_FOR_neon_vminnmv2sf = 1849,
+ CODE_FOR_neon_vmaxnmv4sf = 1850,
+ CODE_FOR_neon_vminnmv4sf = 1851,
+ CODE_FOR_fmaxv2sf3 = 1852,
+ CODE_FOR_fminv2sf3 = 1853,
+ CODE_FOR_fmaxv4sf3 = 1854,
+ CODE_FOR_fminv4sf3 = 1855,
+ CODE_FOR_neon_vpaddlsv8qi = 1856,
+ CODE_FOR_neon_vpaddluv8qi = 1857,
+ CODE_FOR_neon_vpaddlsv16qi = 1858,
+ CODE_FOR_neon_vpaddluv16qi = 1859,
+ CODE_FOR_neon_vpaddlsv4hi = 1860,
+ CODE_FOR_neon_vpaddluv4hi = 1861,
+ CODE_FOR_neon_vpaddlsv8hi = 1862,
+ CODE_FOR_neon_vpaddluv8hi = 1863,
+ CODE_FOR_neon_vpaddlsv2si = 1864,
+ CODE_FOR_neon_vpaddluv2si = 1865,
+ CODE_FOR_neon_vpaddlsv4si = 1866,
+ CODE_FOR_neon_vpaddluv4si = 1867,
+ CODE_FOR_neon_vpadalsv8qi = 1868,
+ CODE_FOR_neon_vpadaluv8qi = 1869,
+ CODE_FOR_neon_vpadalsv16qi = 1870,
+ CODE_FOR_neon_vpadaluv16qi = 1871,
+ CODE_FOR_neon_vpadalsv4hi = 1872,
+ CODE_FOR_neon_vpadaluv4hi = 1873,
+ CODE_FOR_neon_vpadalsv8hi = 1874,
+ CODE_FOR_neon_vpadaluv8hi = 1875,
+ CODE_FOR_neon_vpadalsv2si = 1876,
+ CODE_FOR_neon_vpadaluv2si = 1877,
+ CODE_FOR_neon_vpadalsv4si = 1878,
+ CODE_FOR_neon_vpadaluv4si = 1879,
+ CODE_FOR_neon_vpmaxsv8qi = 1880,
+ CODE_FOR_neon_vpmaxuv8qi = 1881,
+ CODE_FOR_neon_vpminsv8qi = 1882,
+ CODE_FOR_neon_vpminuv8qi = 1883,
+ CODE_FOR_neon_vpmaxsv4hi = 1884,
+ CODE_FOR_neon_vpmaxuv4hi = 1885,
+ CODE_FOR_neon_vpminsv4hi = 1886,
+ CODE_FOR_neon_vpminuv4hi = 1887,
+ CODE_FOR_neon_vpmaxsv2si = 1888,
+ CODE_FOR_neon_vpmaxuv2si = 1889,
+ CODE_FOR_neon_vpminsv2si = 1890,
+ CODE_FOR_neon_vpminuv2si = 1891,
+ CODE_FOR_neon_vpmaxfv2sf = 1892,
+ CODE_FOR_neon_vpminfv2sf = 1893,
+ CODE_FOR_neon_vpmaxfv4sf = 1894,
+ CODE_FOR_neon_vpminfv4sf = 1895,
+ CODE_FOR_neon_vrecpsv2sf = 1896,
+ CODE_FOR_neon_vrecpsv4sf = 1897,
+ CODE_FOR_neon_vrecpsv8hf = 1898,
+ CODE_FOR_neon_vrecpsv4hf = 1899,
+ CODE_FOR_neon_vrsqrtsv2sf = 1900,
+ CODE_FOR_neon_vrsqrtsv4sf = 1901,
+ CODE_FOR_neon_vrsqrtsv8hf = 1902,
+ CODE_FOR_neon_vrsqrtsv4hf = 1903,
+ CODE_FOR_neon_vqabsv8qi = 1904,
+ CODE_FOR_neon_vqabsv16qi = 1905,
+ CODE_FOR_neon_vqabsv4hi = 1906,
+ CODE_FOR_neon_vqabsv8hi = 1907,
+ CODE_FOR_neon_vqabsv2si = 1908,
+ CODE_FOR_neon_vqabsv4si = 1909,
+ CODE_FOR_neon_bswapv4hi = 1910,
+ CODE_FOR_neon_bswapv8hi = 1911,
+ CODE_FOR_neon_bswapv2si = 1912,
+ CODE_FOR_neon_bswapv4si = 1913,
+ CODE_FOR_neon_bswapv2di = 1914,
+ CODE_FOR_neon_vcadd90v4hf = 1915,
+ CODE_FOR_neon_vcadd270v4hf = 1916,
+ CODE_FOR_neon_vcadd90v8hf = 1917,
+ CODE_FOR_neon_vcadd270v8hf = 1918,
+ CODE_FOR_neon_vcadd90v2sf = 1919,
+ CODE_FOR_neon_vcadd270v2sf = 1920,
+ CODE_FOR_neon_vcadd90v4sf = 1921,
+ CODE_FOR_neon_vcadd270v4sf = 1922,
+ CODE_FOR_neon_vcmla0v4hf = 1923,
+ CODE_FOR_neon_vcmla90v4hf = 1924,
+ CODE_FOR_neon_vcmla180v4hf = 1925,
+ CODE_FOR_neon_vcmla270v4hf = 1926,
+ CODE_FOR_neon_vcmla0v8hf = 1927,
+ CODE_FOR_neon_vcmla90v8hf = 1928,
+ CODE_FOR_neon_vcmla180v8hf = 1929,
+ CODE_FOR_neon_vcmla270v8hf = 1930,
+ CODE_FOR_neon_vcmla0v2sf = 1931,
+ CODE_FOR_neon_vcmla90v2sf = 1932,
+ CODE_FOR_neon_vcmla180v2sf = 1933,
+ CODE_FOR_neon_vcmla270v2sf = 1934,
+ CODE_FOR_neon_vcmla0v4sf = 1935,
+ CODE_FOR_neon_vcmla90v4sf = 1936,
+ CODE_FOR_neon_vcmla180v4sf = 1937,
+ CODE_FOR_neon_vcmla270v4sf = 1938,
+ CODE_FOR_neon_vcmla_lane0v4hf = 1939,
+ CODE_FOR_neon_vcmla_lane90v4hf = 1940,
+ CODE_FOR_neon_vcmla_lane180v4hf = 1941,
+ CODE_FOR_neon_vcmla_lane270v4hf = 1942,
+ CODE_FOR_neon_vcmla_lane0v8hf = 1943,
+ CODE_FOR_neon_vcmla_lane90v8hf = 1944,
+ CODE_FOR_neon_vcmla_lane180v8hf = 1945,
+ CODE_FOR_neon_vcmla_lane270v8hf = 1946,
+ CODE_FOR_neon_vcmla_lane0v2sf = 1947,
+ CODE_FOR_neon_vcmla_lane90v2sf = 1948,
+ CODE_FOR_neon_vcmla_lane180v2sf = 1949,
+ CODE_FOR_neon_vcmla_lane270v2sf = 1950,
+ CODE_FOR_neon_vcmla_lane0v4sf = 1951,
+ CODE_FOR_neon_vcmla_lane90v4sf = 1952,
+ CODE_FOR_neon_vcmla_lane180v4sf = 1953,
+ CODE_FOR_neon_vcmla_lane270v4sf = 1954,
+ CODE_FOR_neon_vcmla_laneq0v2sf = 1955,
+ CODE_FOR_neon_vcmla_laneq90v2sf = 1956,
+ CODE_FOR_neon_vcmla_laneq180v2sf = 1957,
+ CODE_FOR_neon_vcmla_laneq270v2sf = 1958,
+ CODE_FOR_neon_vcmla_laneq0v4hf = 1959,
+ CODE_FOR_neon_vcmla_laneq90v4hf = 1960,
+ CODE_FOR_neon_vcmla_laneq180v4hf = 1961,
+ CODE_FOR_neon_vcmla_laneq270v4hf = 1962,
+ CODE_FOR_neon_vcmlaq_lane0v8hf = 1963,
+ CODE_FOR_neon_vcmlaq_lane90v8hf = 1964,
+ CODE_FOR_neon_vcmlaq_lane180v8hf = 1965,
+ CODE_FOR_neon_vcmlaq_lane270v8hf = 1966,
+ CODE_FOR_neon_vcmlaq_lane0v4sf = 1967,
+ CODE_FOR_neon_vcmlaq_lane90v4sf = 1968,
+ CODE_FOR_neon_vcmlaq_lane180v4sf = 1969,
+ CODE_FOR_neon_vcmlaq_lane270v4sf = 1970,
+ CODE_FOR_sdot_prodv8qi = 1971,
+ CODE_FOR_udot_prodv8qi = 1972,
+ CODE_FOR_sdot_prodv16qi = 1973,
+ CODE_FOR_udot_prodv16qi = 1974,
+ CODE_FOR_neon_usdotv8qi = 1975,
+ CODE_FOR_neon_usdotv16qi = 1976,
+ CODE_FOR_neon_sdot_lanev8qi = 1977,
+ CODE_FOR_neon_udot_lanev8qi = 1978,
+ CODE_FOR_neon_sdot_lanev16qi = 1979,
+ CODE_FOR_neon_udot_lanev16qi = 1980,
+ CODE_FOR_neon_sdot_laneqv8qi = 1981,
+ CODE_FOR_neon_udot_laneqv8qi = 1982,
+ CODE_FOR_neon_sdot_laneqv16qi = 1983,
+ CODE_FOR_neon_udot_laneqv16qi = 1984,
+ CODE_FOR_neon_usdot_lanev8qi = 1985,
+ CODE_FOR_neon_sudot_lanev8qi = 1986,
+ CODE_FOR_neon_usdot_lanev16qi = 1987,
+ CODE_FOR_neon_sudot_lanev16qi = 1988,
+ CODE_FOR_neon_usdot_laneqv8qi = 1989,
+ CODE_FOR_neon_sudot_laneqv8qi = 1990,
+ CODE_FOR_neon_usdot_laneqv16qi = 1991,
+ CODE_FOR_neon_sudot_laneqv16qi = 1992,
+ CODE_FOR_neon_vqnegv8qi = 1993,
+ CODE_FOR_neon_vqnegv16qi = 1994,
+ CODE_FOR_neon_vqnegv4hi = 1995,
+ CODE_FOR_neon_vqnegv8hi = 1996,
+ CODE_FOR_neon_vqnegv2si = 1997,
+ CODE_FOR_neon_vqnegv4si = 1998,
+ CODE_FOR_neon_vclsv8qi = 1999,
+ CODE_FOR_neon_vclsv16qi = 2000,
+ CODE_FOR_neon_vclsv4hi = 2001,
+ CODE_FOR_neon_vclsv8hi = 2002,
+ CODE_FOR_neon_vclsv2si = 2003,
+ CODE_FOR_neon_vclsv4si = 2004,
+ CODE_FOR_neon_vclzv8qi = 2005,
+ CODE_FOR_neon_vclzv16qi = 2006,
+ CODE_FOR_neon_vclzv4hi = 2007,
+ CODE_FOR_neon_vclzv8hi = 2008,
+ CODE_FOR_neon_vclzv2si = 2009,
+ CODE_FOR_neon_vclzv4si = 2010,
+ CODE_FOR_popcountv8qi2 = 2011,
+ CODE_FOR_popcountv16qi2 = 2012,
+ CODE_FOR_neon_vrecpev8hf = 2013,
+ CODE_FOR_neon_vrecpev4hf = 2014,
+ CODE_FOR_neon_vrecpev2si = 2015,
+ CODE_FOR_neon_vrecpev2sf = 2016,
+ CODE_FOR_neon_vrecpev4si = 2017,
+ CODE_FOR_neon_vrecpev4sf = 2018,
+ CODE_FOR_neon_vrsqrtev2si = 2019,
+ CODE_FOR_neon_vrsqrtev2sf = 2020,
+ CODE_FOR_neon_vrsqrtev4si = 2021,
+ CODE_FOR_neon_vrsqrtev4sf = 2022,
+ CODE_FOR_neon_vget_lanev8qi_sext_internal = 2023,
+ CODE_FOR_neon_vget_lanev4hi_sext_internal = 2024,
+ CODE_FOR_neon_vget_lanev2si_sext_internal = 2025,
+ CODE_FOR_neon_vget_lanev2sf_sext_internal = 2026,
+ CODE_FOR_neon_vget_lanev8qi_zext_internal = 2027,
+ CODE_FOR_neon_vget_lanev4hi_zext_internal = 2028,
+ CODE_FOR_neon_vget_lanev2si_zext_internal = 2029,
+ CODE_FOR_neon_vget_lanev2sf_zext_internal = 2030,
+ CODE_FOR_neon_vget_lanev16qi_sext_internal = 2031,
+ CODE_FOR_neon_vget_lanev8hi_sext_internal = 2032,
+ CODE_FOR_neon_vget_lanev8hf_sext_internal = 2033,
+ CODE_FOR_neon_vget_lanev4si_sext_internal = 2034,
+ CODE_FOR_neon_vget_lanev4sf_sext_internal = 2035,
+ CODE_FOR_neon_vget_lanev16qi_zext_internal = 2036,
+ CODE_FOR_neon_vget_lanev8hi_zext_internal = 2037,
+ CODE_FOR_neon_vget_lanev8hf_zext_internal = 2038,
+ CODE_FOR_neon_vget_lanev4si_zext_internal = 2039,
+ CODE_FOR_neon_vget_lanev4sf_zext_internal = 2040,
+ CODE_FOR_neon_vdup_nv8qi = 2041,
+ CODE_FOR_neon_vdup_nv4hi = 2042,
+ CODE_FOR_neon_vdup_nv16qi = 2043,
+ CODE_FOR_neon_vdup_nv8hi = 2044,
+ CODE_FOR_neon_vdup_nv4hf = 2045,
+ CODE_FOR_neon_vdup_nv8hf = 2046,
+ CODE_FOR_neon_vdup_nv4bf = 2047,
+ CODE_FOR_neon_vdup_nv8bf = 2048,
+ CODE_FOR_neon_vdup_nv2si = 2049,
+ CODE_FOR_neon_vdup_nv2sf = 2050,
+ CODE_FOR_neon_vdup_nv4si = 2051,
+ CODE_FOR_neon_vdup_nv4sf = 2052,
+ CODE_FOR_neon_vdup_nv2di = 2053,
+ CODE_FOR_neon_vdup_lanev8qi_internal = 2054,
+ CODE_FOR_neon_vdup_lanev16qi_internal = 2055,
+ CODE_FOR_neon_vdup_lanev4hi_internal = 2056,
+ CODE_FOR_neon_vdup_lanev8hi_internal = 2057,
+ CODE_FOR_neon_vdup_lanev2si_internal = 2058,
+ CODE_FOR_neon_vdup_lanev4si_internal = 2059,
+ CODE_FOR_neon_vdup_lanev2sf_internal = 2060,
+ CODE_FOR_neon_vdup_lanev4sf_internal = 2061,
+ CODE_FOR_neon_vdup_lanev8hf_internal = 2062,
+ CODE_FOR_neon_vdup_lanev4hf_internal = 2063,
+ CODE_FOR_neon_vdup_lanev4bf_internal = 2064,
+ CODE_FOR_neon_vdup_lanev8bf_internal = 2065,
+ CODE_FOR_neon_vcombinev8qi = 2080,
+ CODE_FOR_neon_vcombinev4hi = 2081,
+ CODE_FOR_neon_vcombinev4hf = 2082,
+ CODE_FOR_neon_vcombinev4bf = 2083,
+ CODE_FOR_neon_vcombinev2si = 2084,
+ CODE_FOR_neon_vcombinev2sf = 2085,
+ CODE_FOR_neon_vcombinedi = 2086,
+ CODE_FOR_floatv2siv2sf2 = 2087,
+ CODE_FOR_floatv4siv4sf2 = 2088,
+ CODE_FOR_floatunsv2siv2sf2 = 2089,
+ CODE_FOR_floatunsv4siv4sf2 = 2090,
+ CODE_FOR_fix_truncv2sfv2si2 = 2091,
+ CODE_FOR_fix_truncv4sfv4si2 = 2092,
+ CODE_FOR_fixuns_truncv2sfv2si2 = 2093,
+ CODE_FOR_fixuns_truncv4sfv4si2 = 2094,
+ CODE_FOR_neon_vcvtsv2sf = 2095,
+ CODE_FOR_neon_vcvtuv2sf = 2096,
+ CODE_FOR_neon_vcvtsv4sf = 2097,
+ CODE_FOR_neon_vcvtuv4sf = 2098,
+ CODE_FOR_neon_vcvtsv2si = 2099,
+ CODE_FOR_neon_vcvtuv2si = 2100,
+ CODE_FOR_neon_vcvtsv4si = 2101,
+ CODE_FOR_neon_vcvtuv4si = 2102,
+ CODE_FOR_neon_vcvtv4sfv4hf = 2103,
+ CODE_FOR_neon_vcvtv4hfv4sf = 2104,
+ CODE_FOR_neon_vcvtsv4hi = 2105,
+ CODE_FOR_neon_vcvtuv4hi = 2106,
+ CODE_FOR_neon_vcvtsv8hi = 2107,
+ CODE_FOR_neon_vcvtuv8hi = 2108,
+ CODE_FOR_neon_vcvtsv8hf = 2109,
+ CODE_FOR_neon_vcvtuv8hf = 2110,
+ CODE_FOR_neon_vcvtsv4hf = 2111,
+ CODE_FOR_neon_vcvtuv4hf = 2112,
+ CODE_FOR_neon_vcvts_nv2sf = 2113,
+ CODE_FOR_neon_vcvtu_nv2sf = 2114,
+ CODE_FOR_neon_vcvts_nv4sf = 2115,
+ CODE_FOR_neon_vcvtu_nv4sf = 2116,
+ CODE_FOR_neon_vcvts_nv8hf = 2117,
+ CODE_FOR_neon_vcvtu_nv8hf = 2118,
+ CODE_FOR_neon_vcvts_nv4hf = 2119,
+ CODE_FOR_neon_vcvtu_nv4hf = 2120,
+ CODE_FOR_neon_vcvts_nv2si = 2121,
+ CODE_FOR_neon_vcvtu_nv2si = 2122,
+ CODE_FOR_neon_vcvts_nv4si = 2123,
+ CODE_FOR_neon_vcvtu_nv4si = 2124,
+ CODE_FOR_neon_vcvts_nv4hi = 2125,
+ CODE_FOR_neon_vcvtu_nv4hi = 2126,
+ CODE_FOR_neon_vcvts_nv8hi = 2127,
+ CODE_FOR_neon_vcvtu_nv8hi = 2128,
+ CODE_FOR_neon_vcvtasv8hf = 2129,
+ CODE_FOR_neon_vcvtauv8hf = 2130,
+ CODE_FOR_neon_vcvtmsv8hf = 2131,
+ CODE_FOR_neon_vcvtmuv8hf = 2132,
+ CODE_FOR_neon_vcvtnsv8hf = 2133,
+ CODE_FOR_neon_vcvtnuv8hf = 2134,
+ CODE_FOR_neon_vcvtpsv8hf = 2135,
+ CODE_FOR_neon_vcvtpuv8hf = 2136,
+ CODE_FOR_neon_vcvtasv4hf = 2137,
+ CODE_FOR_neon_vcvtauv4hf = 2138,
+ CODE_FOR_neon_vcvtmsv4hf = 2139,
+ CODE_FOR_neon_vcvtmuv4hf = 2140,
+ CODE_FOR_neon_vcvtnsv4hf = 2141,
+ CODE_FOR_neon_vcvtnuv4hf = 2142,
+ CODE_FOR_neon_vcvtpsv4hf = 2143,
+ CODE_FOR_neon_vcvtpuv4hf = 2144,
+ CODE_FOR_neon_vmovnv8hi = 2145,
+ CODE_FOR_neon_vmovnv4si = 2146,
+ CODE_FOR_neon_vmovnv2di = 2147,
+ CODE_FOR_neon_vqmovnsv8hi = 2148,
+ CODE_FOR_neon_vqmovnuv8hi = 2149,
+ CODE_FOR_neon_vqmovnsv4si = 2150,
+ CODE_FOR_neon_vqmovnuv4si = 2151,
+ CODE_FOR_neon_vqmovnsv2di = 2152,
+ CODE_FOR_neon_vqmovnuv2di = 2153,
+ CODE_FOR_neon_vqmovunv8hi = 2154,
+ CODE_FOR_neon_vqmovunv4si = 2155,
+ CODE_FOR_neon_vqmovunv2di = 2156,
+ CODE_FOR_neon_vmovlsv8qi = 2157,
+ CODE_FOR_neon_vmovluv8qi = 2158,
+ CODE_FOR_neon_vmovlsv4hi = 2159,
+ CODE_FOR_neon_vmovluv4hi = 2160,
+ CODE_FOR_neon_vmovlsv2si = 2161,
+ CODE_FOR_neon_vmovluv2si = 2162,
+ CODE_FOR_neon_vmul_lanev4hi = 2163,
+ CODE_FOR_neon_vmul_lanev2si = 2164,
+ CODE_FOR_neon_vmul_lanev2sf = 2165,
+ CODE_FOR_neon_vmul_lanev8hi = 2166,
+ CODE_FOR_neon_vmul_lanev4si = 2167,
+ CODE_FOR_neon_vmul_lanev4sf = 2168,
+ CODE_FOR_neon_vmul_lanev8hf = 2169,
+ CODE_FOR_neon_vmul_lanev4hf = 2170,
+ CODE_FOR_neon_vmulls_lanev4hi = 2171,
+ CODE_FOR_neon_vmullu_lanev4hi = 2172,
+ CODE_FOR_neon_vmulls_lanev2si = 2173,
+ CODE_FOR_neon_vmullu_lanev2si = 2174,
+ CODE_FOR_neon_vqdmull_lanev4hi = 2175,
+ CODE_FOR_neon_vqdmull_lanev2si = 2176,
+ CODE_FOR_neon_vqdmulh_lanev8hi = 2177,
+ CODE_FOR_neon_vqrdmulh_lanev8hi = 2178,
+ CODE_FOR_neon_vqdmulh_lanev4si = 2179,
+ CODE_FOR_neon_vqrdmulh_lanev4si = 2180,
+ CODE_FOR_neon_vqdmulh_lanev4hi = 2181,
+ CODE_FOR_neon_vqrdmulh_lanev4hi = 2182,
+ CODE_FOR_neon_vqdmulh_lanev2si = 2183,
+ CODE_FOR_neon_vqrdmulh_lanev2si = 2184,
+ CODE_FOR_neon_vqrdmlah_lanev8hi = 2185,
+ CODE_FOR_neon_vqrdmlsh_lanev8hi = 2186,
+ CODE_FOR_neon_vqrdmlah_lanev4si = 2187,
+ CODE_FOR_neon_vqrdmlsh_lanev4si = 2188,
+ CODE_FOR_neon_vqrdmlah_lanev4hi = 2189,
+ CODE_FOR_neon_vqrdmlsh_lanev4hi = 2190,
+ CODE_FOR_neon_vqrdmlah_lanev2si = 2191,
+ CODE_FOR_neon_vqrdmlsh_lanev2si = 2192,
+ CODE_FOR_neon_vmla_lanev4hi = 2193,
+ CODE_FOR_neon_vmla_lanev2si = 2194,
+ CODE_FOR_neon_vmla_lanev2sf = 2195,
+ CODE_FOR_neon_vmla_lanev8hi = 2196,
+ CODE_FOR_neon_vmla_lanev4si = 2197,
+ CODE_FOR_neon_vmla_lanev4sf = 2198,
+ CODE_FOR_neon_vmlals_lanev4hi = 2199,
+ CODE_FOR_neon_vmlalu_lanev4hi = 2200,
+ CODE_FOR_neon_vmlals_lanev2si = 2201,
+ CODE_FOR_neon_vmlalu_lanev2si = 2202,
+ CODE_FOR_neon_vqdmlal_lanev4hi = 2203,
+ CODE_FOR_neon_vqdmlal_lanev2si = 2204,
+ CODE_FOR_neon_vmls_lanev4hi = 2205,
+ CODE_FOR_neon_vmls_lanev2si = 2206,
+ CODE_FOR_neon_vmls_lanev2sf = 2207,
+ CODE_FOR_neon_vmls_lanev8hi = 2208,
+ CODE_FOR_neon_vmls_lanev4si = 2209,
+ CODE_FOR_neon_vmls_lanev4sf = 2210,
+ CODE_FOR_neon_vmlsls_lanev4hi = 2211,
+ CODE_FOR_neon_vmlslu_lanev4hi = 2212,
+ CODE_FOR_neon_vmlsls_lanev2si = 2213,
+ CODE_FOR_neon_vmlslu_lanev2si = 2214,
+ CODE_FOR_neon_vqdmlsl_lanev4hi = 2215,
+ CODE_FOR_neon_vqdmlsl_lanev2si = 2216,
+ CODE_FOR_neon_vextv8qi = 2217,
+ CODE_FOR_neon_vextv16qi = 2218,
+ CODE_FOR_neon_vextv4hi = 2219,
+ CODE_FOR_neon_vextv8hi = 2220,
+ CODE_FOR_neon_vextv2si = 2221,
+ CODE_FOR_neon_vextv4si = 2222,
+ CODE_FOR_neon_vextv4hf = 2223,
+ CODE_FOR_neon_vextv8hf = 2224,
+ CODE_FOR_neon_vextv4bf = 2225,
+ CODE_FOR_neon_vextv8bf = 2226,
+ CODE_FOR_neon_vextv2sf = 2227,
+ CODE_FOR_neon_vextv4sf = 2228,
+ CODE_FOR_neon_vextdi = 2229,
+ CODE_FOR_neon_vextv2di = 2230,
+ CODE_FOR_neon_vrev64v8qi = 2231,
+ CODE_FOR_neon_vrev64v16qi = 2232,
+ CODE_FOR_neon_vrev64v4hi = 2233,
+ CODE_FOR_neon_vrev64v8hi = 2234,
+ CODE_FOR_neon_vrev64v2si = 2235,
+ CODE_FOR_neon_vrev64v4si = 2236,
+ CODE_FOR_neon_vrev64v4hf = 2237,
+ CODE_FOR_neon_vrev64v8hf = 2238,
+ CODE_FOR_neon_vrev64v2sf = 2239,
+ CODE_FOR_neon_vrev64v4sf = 2240,
+ CODE_FOR_neon_vrev64v2di = 2241,
+ CODE_FOR_neon_vrev32v8qi = 2242,
+ CODE_FOR_neon_vrev32v4hi = 2243,
+ CODE_FOR_neon_vrev32v16qi = 2244,
+ CODE_FOR_neon_vrev32v8hi = 2245,
+ CODE_FOR_neon_vrev16v8qi = 2246,
+ CODE_FOR_neon_vrev16v16qi = 2247,
+ CODE_FOR_neon_vbslv8qi_internal = 2248,
+ CODE_FOR_neon_vbslv16qi_internal = 2249,
+ CODE_FOR_neon_vbslv4hi_internal = 2250,
+ CODE_FOR_neon_vbslv8hi_internal = 2251,
+ CODE_FOR_neon_vbslv2si_internal = 2252,
+ CODE_FOR_neon_vbslv4si_internal = 2253,
+ CODE_FOR_neon_vbslv4hf_internal = 2254,
+ CODE_FOR_neon_vbslv8hf_internal = 2255,
+ CODE_FOR_neon_vbslv4bf_internal = 2256,
+ CODE_FOR_neon_vbslv8bf_internal = 2257,
+ CODE_FOR_neon_vbslv2sf_internal = 2258,
+ CODE_FOR_neon_vbslv4sf_internal = 2259,
+ CODE_FOR_neon_vbsldi_internal = 2260,
+ CODE_FOR_neon_vbslv2di_internal = 2261,
+ CODE_FOR_neon_vshlsv8qi = 2262,
+ CODE_FOR_neon_vshluv8qi = 2263,
+ CODE_FOR_neon_vrshlsv8qi = 2264,
+ CODE_FOR_neon_vrshluv8qi = 2265,
+ CODE_FOR_neon_vshlsv16qi = 2266,
+ CODE_FOR_neon_vshluv16qi = 2267,
+ CODE_FOR_neon_vrshlsv16qi = 2268,
+ CODE_FOR_neon_vrshluv16qi = 2269,
+ CODE_FOR_neon_vshlsv4hi = 2270,
+ CODE_FOR_neon_vshluv4hi = 2271,
+ CODE_FOR_neon_vrshlsv4hi = 2272,
+ CODE_FOR_neon_vrshluv4hi = 2273,
+ CODE_FOR_neon_vshlsv8hi = 2274,
+ CODE_FOR_neon_vshluv8hi = 2275,
+ CODE_FOR_neon_vrshlsv8hi = 2276,
+ CODE_FOR_neon_vrshluv8hi = 2277,
+ CODE_FOR_neon_vshlsv2si = 2278,
+ CODE_FOR_neon_vshluv2si = 2279,
+ CODE_FOR_neon_vrshlsv2si = 2280,
+ CODE_FOR_neon_vrshluv2si = 2281,
+ CODE_FOR_neon_vshlsv4si = 2282,
+ CODE_FOR_neon_vshluv4si = 2283,
+ CODE_FOR_neon_vrshlsv4si = 2284,
+ CODE_FOR_neon_vrshluv4si = 2285,
+ CODE_FOR_neon_vshlsdi = 2286,
+ CODE_FOR_neon_vshludi = 2287,
+ CODE_FOR_neon_vrshlsdi = 2288,
+ CODE_FOR_neon_vrshludi = 2289,
+ CODE_FOR_neon_vshlsv2di = 2290,
+ CODE_FOR_neon_vshluv2di = 2291,
+ CODE_FOR_neon_vrshlsv2di = 2292,
+ CODE_FOR_neon_vrshluv2di = 2293,
+ CODE_FOR_neon_vqshlsv8qi = 2294,
+ CODE_FOR_neon_vqshluv8qi = 2295,
+ CODE_FOR_neon_vqrshlsv8qi = 2296,
+ CODE_FOR_neon_vqrshluv8qi = 2297,
+ CODE_FOR_neon_vqshlsv16qi = 2298,
+ CODE_FOR_neon_vqshluv16qi = 2299,
+ CODE_FOR_neon_vqrshlsv16qi = 2300,
+ CODE_FOR_neon_vqrshluv16qi = 2301,
+ CODE_FOR_neon_vqshlsv4hi = 2302,
+ CODE_FOR_neon_vqshluv4hi = 2303,
+ CODE_FOR_neon_vqrshlsv4hi = 2304,
+ CODE_FOR_neon_vqrshluv4hi = 2305,
+ CODE_FOR_neon_vqshlsv8hi = 2306,
+ CODE_FOR_neon_vqshluv8hi = 2307,
+ CODE_FOR_neon_vqrshlsv8hi = 2308,
+ CODE_FOR_neon_vqrshluv8hi = 2309,
+ CODE_FOR_neon_vqshlsv2si = 2310,
+ CODE_FOR_neon_vqshluv2si = 2311,
+ CODE_FOR_neon_vqrshlsv2si = 2312,
+ CODE_FOR_neon_vqrshluv2si = 2313,
+ CODE_FOR_neon_vqshlsv4si = 2314,
+ CODE_FOR_neon_vqshluv4si = 2315,
+ CODE_FOR_neon_vqrshlsv4si = 2316,
+ CODE_FOR_neon_vqrshluv4si = 2317,
+ CODE_FOR_neon_vqshlsdi = 2318,
+ CODE_FOR_neon_vqshludi = 2319,
+ CODE_FOR_neon_vqrshlsdi = 2320,
+ CODE_FOR_neon_vqrshludi = 2321,
+ CODE_FOR_neon_vqshlsv2di = 2322,
+ CODE_FOR_neon_vqshluv2di = 2323,
+ CODE_FOR_neon_vqrshlsv2di = 2324,
+ CODE_FOR_neon_vqrshluv2di = 2325,
+ CODE_FOR_neon_vshrs_nv8qi = 2326,
+ CODE_FOR_neon_vshru_nv8qi = 2327,
+ CODE_FOR_neon_vrshrs_nv8qi = 2328,
+ CODE_FOR_neon_vrshru_nv8qi = 2329,
+ CODE_FOR_neon_vshrs_nv16qi = 2330,
+ CODE_FOR_neon_vshru_nv16qi = 2331,
+ CODE_FOR_neon_vrshrs_nv16qi = 2332,
+ CODE_FOR_neon_vrshru_nv16qi = 2333,
+ CODE_FOR_neon_vshrs_nv4hi = 2334,
+ CODE_FOR_neon_vshru_nv4hi = 2335,
+ CODE_FOR_neon_vrshrs_nv4hi = 2336,
+ CODE_FOR_neon_vrshru_nv4hi = 2337,
+ CODE_FOR_neon_vshrs_nv8hi = 2338,
+ CODE_FOR_neon_vshru_nv8hi = 2339,
+ CODE_FOR_neon_vrshrs_nv8hi = 2340,
+ CODE_FOR_neon_vrshru_nv8hi = 2341,
+ CODE_FOR_neon_vshrs_nv2si = 2342,
+ CODE_FOR_neon_vshru_nv2si = 2343,
+ CODE_FOR_neon_vrshrs_nv2si = 2344,
+ CODE_FOR_neon_vrshru_nv2si = 2345,
+ CODE_FOR_neon_vshrs_nv4si = 2346,
+ CODE_FOR_neon_vshru_nv4si = 2347,
+ CODE_FOR_neon_vrshrs_nv4si = 2348,
+ CODE_FOR_neon_vrshru_nv4si = 2349,
+ CODE_FOR_neon_vshrs_ndi = 2350,
+ CODE_FOR_neon_vshru_ndi = 2351,
+ CODE_FOR_neon_vrshrs_ndi = 2352,
+ CODE_FOR_neon_vrshru_ndi = 2353,
+ CODE_FOR_neon_vshrs_nv2di = 2354,
+ CODE_FOR_neon_vshru_nv2di = 2355,
+ CODE_FOR_neon_vrshrs_nv2di = 2356,
+ CODE_FOR_neon_vrshru_nv2di = 2357,
+ CODE_FOR_neon_vshrn_nv8hi = 2358,
+ CODE_FOR_neon_vrshrn_nv8hi = 2359,
+ CODE_FOR_neon_vshrn_nv4si = 2360,
+ CODE_FOR_neon_vrshrn_nv4si = 2361,
+ CODE_FOR_neon_vshrn_nv2di = 2362,
+ CODE_FOR_neon_vrshrn_nv2di = 2363,
+ CODE_FOR_neon_vqshrns_nv8hi = 2364,
+ CODE_FOR_neon_vqshrnu_nv8hi = 2365,
+ CODE_FOR_neon_vqrshrns_nv8hi = 2366,
+ CODE_FOR_neon_vqrshrnu_nv8hi = 2367,
+ CODE_FOR_neon_vqshrns_nv4si = 2368,
+ CODE_FOR_neon_vqshrnu_nv4si = 2369,
+ CODE_FOR_neon_vqrshrns_nv4si = 2370,
+ CODE_FOR_neon_vqrshrnu_nv4si = 2371,
+ CODE_FOR_neon_vqshrns_nv2di = 2372,
+ CODE_FOR_neon_vqshrnu_nv2di = 2373,
+ CODE_FOR_neon_vqrshrns_nv2di = 2374,
+ CODE_FOR_neon_vqrshrnu_nv2di = 2375,
+ CODE_FOR_neon_vqshrun_nv8hi = 2376,
+ CODE_FOR_neon_vqrshrun_nv8hi = 2377,
+ CODE_FOR_neon_vqshrun_nv4si = 2378,
+ CODE_FOR_neon_vqrshrun_nv4si = 2379,
+ CODE_FOR_neon_vqshrun_nv2di = 2380,
+ CODE_FOR_neon_vqrshrun_nv2di = 2381,
+ CODE_FOR_neon_vshl_nv8qi = 2382,
+ CODE_FOR_neon_vshl_nv16qi = 2383,
+ CODE_FOR_neon_vshl_nv4hi = 2384,
+ CODE_FOR_neon_vshl_nv8hi = 2385,
+ CODE_FOR_neon_vshl_nv2si = 2386,
+ CODE_FOR_neon_vshl_nv4si = 2387,
+ CODE_FOR_neon_vshl_ndi = 2388,
+ CODE_FOR_neon_vshl_nv2di = 2389,
+ CODE_FOR_neon_vqshl_s_nv8qi = 2390,
+ CODE_FOR_neon_vqshl_u_nv8qi = 2391,
+ CODE_FOR_neon_vqshl_s_nv16qi = 2392,
+ CODE_FOR_neon_vqshl_u_nv16qi = 2393,
+ CODE_FOR_neon_vqshl_s_nv4hi = 2394,
+ CODE_FOR_neon_vqshl_u_nv4hi = 2395,
+ CODE_FOR_neon_vqshl_s_nv8hi = 2396,
+ CODE_FOR_neon_vqshl_u_nv8hi = 2397,
+ CODE_FOR_neon_vqshl_s_nv2si = 2398,
+ CODE_FOR_neon_vqshl_u_nv2si = 2399,
+ CODE_FOR_neon_vqshl_s_nv4si = 2400,
+ CODE_FOR_neon_vqshl_u_nv4si = 2401,
+ CODE_FOR_neon_vqshl_s_ndi = 2402,
+ CODE_FOR_neon_vqshl_u_ndi = 2403,
+ CODE_FOR_neon_vqshl_s_nv2di = 2404,
+ CODE_FOR_neon_vqshl_u_nv2di = 2405,
+ CODE_FOR_neon_vqshlu_nv8qi = 2406,
+ CODE_FOR_neon_vqshlu_nv16qi = 2407,
+ CODE_FOR_neon_vqshlu_nv4hi = 2408,
+ CODE_FOR_neon_vqshlu_nv8hi = 2409,
+ CODE_FOR_neon_vqshlu_nv2si = 2410,
+ CODE_FOR_neon_vqshlu_nv4si = 2411,
+ CODE_FOR_neon_vqshlu_ndi = 2412,
+ CODE_FOR_neon_vqshlu_nv2di = 2413,
+ CODE_FOR_neon_vshlls_nv8qi = 2414,
+ CODE_FOR_neon_vshllu_nv8qi = 2415,
+ CODE_FOR_neon_vshlls_nv4hi = 2416,
+ CODE_FOR_neon_vshllu_nv4hi = 2417,
+ CODE_FOR_neon_vshlls_nv2si = 2418,
+ CODE_FOR_neon_vshllu_nv2si = 2419,
+ CODE_FOR_neon_vsras_nv8qi = 2420,
+ CODE_FOR_neon_vsrau_nv8qi = 2421,
+ CODE_FOR_neon_vrsras_nv8qi = 2422,
+ CODE_FOR_neon_vrsrau_nv8qi = 2423,
+ CODE_FOR_neon_vsras_nv16qi = 2424,
+ CODE_FOR_neon_vsrau_nv16qi = 2425,
+ CODE_FOR_neon_vrsras_nv16qi = 2426,
+ CODE_FOR_neon_vrsrau_nv16qi = 2427,
+ CODE_FOR_neon_vsras_nv4hi = 2428,
+ CODE_FOR_neon_vsrau_nv4hi = 2429,
+ CODE_FOR_neon_vrsras_nv4hi = 2430,
+ CODE_FOR_neon_vrsrau_nv4hi = 2431,
+ CODE_FOR_neon_vsras_nv8hi = 2432,
+ CODE_FOR_neon_vsrau_nv8hi = 2433,
+ CODE_FOR_neon_vrsras_nv8hi = 2434,
+ CODE_FOR_neon_vrsrau_nv8hi = 2435,
+ CODE_FOR_neon_vsras_nv2si = 2436,
+ CODE_FOR_neon_vsrau_nv2si = 2437,
+ CODE_FOR_neon_vrsras_nv2si = 2438,
+ CODE_FOR_neon_vrsrau_nv2si = 2439,
+ CODE_FOR_neon_vsras_nv4si = 2440,
+ CODE_FOR_neon_vsrau_nv4si = 2441,
+ CODE_FOR_neon_vrsras_nv4si = 2442,
+ CODE_FOR_neon_vrsrau_nv4si = 2443,
+ CODE_FOR_neon_vsras_ndi = 2444,
+ CODE_FOR_neon_vsrau_ndi = 2445,
+ CODE_FOR_neon_vrsras_ndi = 2446,
+ CODE_FOR_neon_vrsrau_ndi = 2447,
+ CODE_FOR_neon_vsras_nv2di = 2448,
+ CODE_FOR_neon_vsrau_nv2di = 2449,
+ CODE_FOR_neon_vrsras_nv2di = 2450,
+ CODE_FOR_neon_vrsrau_nv2di = 2451,
+ CODE_FOR_neon_vsri_nv8qi = 2452,
+ CODE_FOR_neon_vsri_nv16qi = 2453,
+ CODE_FOR_neon_vsri_nv4hi = 2454,
+ CODE_FOR_neon_vsri_nv8hi = 2455,
+ CODE_FOR_neon_vsri_nv2si = 2456,
+ CODE_FOR_neon_vsri_nv4si = 2457,
+ CODE_FOR_neon_vsri_ndi = 2458,
+ CODE_FOR_neon_vsri_nv2di = 2459,
+ CODE_FOR_neon_vsli_nv8qi = 2460,
+ CODE_FOR_neon_vsli_nv16qi = 2461,
+ CODE_FOR_neon_vsli_nv4hi = 2462,
+ CODE_FOR_neon_vsli_nv8hi = 2463,
+ CODE_FOR_neon_vsli_nv2si = 2464,
+ CODE_FOR_neon_vsli_nv4si = 2465,
+ CODE_FOR_neon_vsli_ndi = 2466,
+ CODE_FOR_neon_vsli_nv2di = 2467,
+ CODE_FOR_neon_vtbl1v8qi = 2468,
+ CODE_FOR_neon_vtbl2v8qi = 2469,
+ CODE_FOR_neon_vtbl3v8qi = 2470,
+ CODE_FOR_neon_vtbl4v8qi = 2471,
+ CODE_FOR_neon_vtbl1v16qi = 2472,
+ CODE_FOR_neon_vtbl2v16qi = 2473,
+ CODE_FOR_neon_vcombinev16qi = 2474,
+ CODE_FOR_neon_vtbx1v8qi = 2475,
+ CODE_FOR_neon_vtbx2v8qi = 2476,
+ CODE_FOR_neon_vtbx3v8qi = 2477,
+ CODE_FOR_neon_vtbx4v8qi = 2478,
+ CODE_FOR_neon_vld1v8qi = 2509,
+ CODE_FOR_neon_vld1v16qi = 2510,
+ CODE_FOR_neon_vld1v4hi = 2511,
+ CODE_FOR_neon_vld1v8hi = 2512,
+ CODE_FOR_neon_vld1v2si = 2513,
+ CODE_FOR_neon_vld1v4si = 2514,
+ CODE_FOR_neon_vld1v4hf = 2515,
+ CODE_FOR_neon_vld1v8hf = 2516,
+ CODE_FOR_neon_vld1v4bf = 2517,
+ CODE_FOR_neon_vld1v8bf = 2518,
+ CODE_FOR_neon_vld1v2sf = 2519,
+ CODE_FOR_neon_vld1v4sf = 2520,
+ CODE_FOR_neon_vld1di = 2521,
+ CODE_FOR_neon_vld1v2di = 2522,
+ CODE_FOR_neon_vld1_lanev8qi = 2523,
+ CODE_FOR_neon_vld1_lanev4hi = 2524,
+ CODE_FOR_neon_vld1_lanev4hf = 2525,
+ CODE_FOR_neon_vld1_lanev4bf = 2526,
+ CODE_FOR_neon_vld1_lanev2si = 2527,
+ CODE_FOR_neon_vld1_lanev2sf = 2528,
+ CODE_FOR_neon_vld1_lanedi = 2529,
+ CODE_FOR_neon_vld1_lanev16qi = 2530,
+ CODE_FOR_neon_vld1_lanev8hi = 2531,
+ CODE_FOR_neon_vld1_lanev8hf = 2532,
+ CODE_FOR_neon_vld1_lanev8bf = 2533,
+ CODE_FOR_neon_vld1_lanev4si = 2534,
+ CODE_FOR_neon_vld1_lanev4sf = 2535,
+ CODE_FOR_neon_vld1_lanev2di = 2536,
+ CODE_FOR_neon_vld1_dupv8qi = 2537,
+ CODE_FOR_neon_vld1_dupv4hi = 2538,
+ CODE_FOR_neon_vld1_dupv4hf = 2539,
+ CODE_FOR_neon_vld1_dupv4bf = 2540,
+ CODE_FOR_neon_vld1_dupv2si = 2541,
+ CODE_FOR_neon_vld1_dupv2sf = 2542,
+ CODE_FOR_neon_vld1_dupv16qi = 2543,
+ CODE_FOR_neon_vld1_dupv8hi = 2544,
+ CODE_FOR_neon_vld1_dupv8hf = 2545,
+ CODE_FOR_neon_vld1_dupv4si = 2546,
+ CODE_FOR_neon_vld1_dupv4sf = 2547,
+ CODE_FOR_neon_vld1_dupv2di = 2548,
+ CODE_FOR_neon_vst1v8qi = 2549,
+ CODE_FOR_neon_vst1v16qi = 2550,
+ CODE_FOR_neon_vst1v4hi = 2551,
+ CODE_FOR_neon_vst1v8hi = 2552,
+ CODE_FOR_neon_vst1v2si = 2553,
+ CODE_FOR_neon_vst1v4si = 2554,
+ CODE_FOR_neon_vst1v4hf = 2555,
+ CODE_FOR_neon_vst1v8hf = 2556,
+ CODE_FOR_neon_vst1v4bf = 2557,
+ CODE_FOR_neon_vst1v8bf = 2558,
+ CODE_FOR_neon_vst1v2sf = 2559,
+ CODE_FOR_neon_vst1v4sf = 2560,
+ CODE_FOR_neon_vst1di = 2561,
+ CODE_FOR_neon_vst1v2di = 2562,
+ CODE_FOR_neon_vst1_lanev8qi = 2563,
+ CODE_FOR_neon_vst1_lanev4hi = 2564,
+ CODE_FOR_neon_vst1_lanev4hf = 2565,
+ CODE_FOR_neon_vst1_lanev4bf = 2566,
+ CODE_FOR_neon_vst1_lanev2si = 2567,
+ CODE_FOR_neon_vst1_lanev2sf = 2568,
+ CODE_FOR_neon_vst1_lanedi = 2569,
+ CODE_FOR_neon_vst1_lanev16qi = 2570,
+ CODE_FOR_neon_vst1_lanev8hi = 2571,
+ CODE_FOR_neon_vst1_lanev8hf = 2572,
+ CODE_FOR_neon_vst1_lanev8bf = 2573,
+ CODE_FOR_neon_vst1_lanev4si = 2574,
+ CODE_FOR_neon_vst1_lanev4sf = 2575,
+ CODE_FOR_neon_vst1_lanev2di = 2576,
+ CODE_FOR_neon_vld2v8qi = 2577,
+ CODE_FOR_neon_vld2v4hi = 2578,
+ CODE_FOR_neon_vld2v4hf = 2579,
+ CODE_FOR_neon_vld2v4bf = 2580,
+ CODE_FOR_neon_vld2v2si = 2581,
+ CODE_FOR_neon_vld2v2sf = 2582,
+ CODE_FOR_neon_vld2di = 2583,
+ CODE_FOR_neon_vld2v16qi = 2584,
+ CODE_FOR_neon_vld2v8hi = 2585,
+ CODE_FOR_neon_vld2v8hf = 2586,
+ CODE_FOR_neon_vld2v8bf = 2587,
+ CODE_FOR_neon_vld2v4si = 2588,
+ CODE_FOR_neon_vld2v4sf = 2589,
+ CODE_FOR_neon_vld2_lanev8qi = 2590,
+ CODE_FOR_neon_vld2_lanev4hi = 2591,
+ CODE_FOR_neon_vld2_lanev4hf = 2592,
+ CODE_FOR_neon_vld2_lanev4bf = 2593,
+ CODE_FOR_neon_vld2_lanev2si = 2594,
+ CODE_FOR_neon_vld2_lanev2sf = 2595,
+ CODE_FOR_neon_vld2_lanev8hi = 2596,
+ CODE_FOR_neon_vld2_lanev8hf = 2597,
+ CODE_FOR_neon_vld2_lanev4si = 2598,
+ CODE_FOR_neon_vld2_lanev4sf = 2599,
+ CODE_FOR_neon_vld2_lanev8bf = 2600,
+ CODE_FOR_neon_vld2_dupv8qi = 2601,
+ CODE_FOR_neon_vld2_dupv4hi = 2602,
+ CODE_FOR_neon_vld2_dupv4hf = 2603,
+ CODE_FOR_neon_vld2_dupv4bf = 2604,
+ CODE_FOR_neon_vld2_dupv2si = 2605,
+ CODE_FOR_neon_vld2_dupv2sf = 2606,
+ CODE_FOR_neon_vld2_dupdi = 2607,
+ CODE_FOR_neon_vld2_dupv8bf = 2608,
+ CODE_FOR_neon_vst2v8qi = 2609,
+ CODE_FOR_neon_vst2v4hi = 2610,
+ CODE_FOR_neon_vst2v4hf = 2611,
+ CODE_FOR_neon_vst2v4bf = 2612,
+ CODE_FOR_neon_vst2v2si = 2613,
+ CODE_FOR_neon_vst2v2sf = 2614,
+ CODE_FOR_neon_vst2di = 2615,
+ CODE_FOR_neon_vst2v16qi = 2616,
+ CODE_FOR_neon_vst2v8hi = 2617,
+ CODE_FOR_neon_vst2v8hf = 2618,
+ CODE_FOR_neon_vst2v8bf = 2619,
+ CODE_FOR_neon_vst2v4si = 2620,
+ CODE_FOR_neon_vst2v4sf = 2621,
+ CODE_FOR_neon_vst2_lanev8qi = 2622,
+ CODE_FOR_neon_vst2_lanev4hi = 2623,
+ CODE_FOR_neon_vst2_lanev4hf = 2624,
+ CODE_FOR_neon_vst2_lanev4bf = 2625,
+ CODE_FOR_neon_vst2_lanev2si = 2626,
+ CODE_FOR_neon_vst2_lanev2sf = 2627,
+ CODE_FOR_neon_vst2_lanev8hi = 2628,
+ CODE_FOR_neon_vst2_lanev8hf = 2629,
+ CODE_FOR_neon_vst2_lanev4si = 2630,
+ CODE_FOR_neon_vst2_lanev4sf = 2631,
+ CODE_FOR_neon_vst2_lanev8bf = 2632,
+ CODE_FOR_neon_vld3v8qi = 2633,
+ CODE_FOR_neon_vld3v4hi = 2634,
+ CODE_FOR_neon_vld3v4hf = 2635,
+ CODE_FOR_neon_vld3v4bf = 2636,
+ CODE_FOR_neon_vld3v2si = 2637,
+ CODE_FOR_neon_vld3v2sf = 2638,
+ CODE_FOR_neon_vld3di = 2639,
+ CODE_FOR_neon_vld3qav16qi = 2640,
+ CODE_FOR_neon_vld3qav8hi = 2641,
+ CODE_FOR_neon_vld3qav8hf = 2642,
+ CODE_FOR_neon_vld3qav8bf = 2643,
+ CODE_FOR_neon_vld3qav4si = 2644,
+ CODE_FOR_neon_vld3qav4sf = 2645,
+ CODE_FOR_neon_vld3qbv16qi = 2646,
+ CODE_FOR_neon_vld3qbv8hi = 2647,
+ CODE_FOR_neon_vld3qbv8hf = 2648,
+ CODE_FOR_neon_vld3qbv8bf = 2649,
+ CODE_FOR_neon_vld3qbv4si = 2650,
+ CODE_FOR_neon_vld3qbv4sf = 2651,
+ CODE_FOR_neon_vld3_lanev8qi = 2652,
+ CODE_FOR_neon_vld3_lanev4hi = 2653,
+ CODE_FOR_neon_vld3_lanev4hf = 2654,
+ CODE_FOR_neon_vld3_lanev4bf = 2655,
+ CODE_FOR_neon_vld3_lanev2si = 2656,
+ CODE_FOR_neon_vld3_lanev2sf = 2657,
+ CODE_FOR_neon_vld3_lanev8hi = 2658,
+ CODE_FOR_neon_vld3_lanev8hf = 2659,
+ CODE_FOR_neon_vld3_lanev4si = 2660,
+ CODE_FOR_neon_vld3_lanev4sf = 2661,
+ CODE_FOR_neon_vld3_lanev8bf = 2662,
+ CODE_FOR_neon_vld3_dupv8qi = 2663,
+ CODE_FOR_neon_vld3_dupv4hi = 2664,
+ CODE_FOR_neon_vld3_dupv4hf = 2665,
+ CODE_FOR_neon_vld3_dupv4bf = 2666,
+ CODE_FOR_neon_vld3_dupv2si = 2667,
+ CODE_FOR_neon_vld3_dupv2sf = 2668,
+ CODE_FOR_neon_vld3_dupdi = 2669,
+ CODE_FOR_neon_vld3_dupv8bf = 2670,
+ CODE_FOR_neon_vst3v8qi = 2671,
+ CODE_FOR_neon_vst3v4hi = 2672,
+ CODE_FOR_neon_vst3v4hf = 2673,
+ CODE_FOR_neon_vst3v4bf = 2674,
+ CODE_FOR_neon_vst3v2si = 2675,
+ CODE_FOR_neon_vst3v2sf = 2676,
+ CODE_FOR_neon_vst3di = 2677,
+ CODE_FOR_neon_vst3qav16qi = 2678,
+ CODE_FOR_neon_vst3qav8hi = 2679,
+ CODE_FOR_neon_vst3qav8hf = 2680,
+ CODE_FOR_neon_vst3qav8bf = 2681,
+ CODE_FOR_neon_vst3qav4si = 2682,
+ CODE_FOR_neon_vst3qav4sf = 2683,
+ CODE_FOR_neon_vst3qbv16qi = 2684,
+ CODE_FOR_neon_vst3qbv8hi = 2685,
+ CODE_FOR_neon_vst3qbv8hf = 2686,
+ CODE_FOR_neon_vst3qbv8bf = 2687,
+ CODE_FOR_neon_vst3qbv4si = 2688,
+ CODE_FOR_neon_vst3qbv4sf = 2689,
+ CODE_FOR_neon_vst3_lanev8qi = 2690,
+ CODE_FOR_neon_vst3_lanev4hi = 2691,
+ CODE_FOR_neon_vst3_lanev4hf = 2692,
+ CODE_FOR_neon_vst3_lanev4bf = 2693,
+ CODE_FOR_neon_vst3_lanev2si = 2694,
+ CODE_FOR_neon_vst3_lanev2sf = 2695,
+ CODE_FOR_neon_vst3_lanev8hi = 2696,
+ CODE_FOR_neon_vst3_lanev8hf = 2697,
+ CODE_FOR_neon_vst3_lanev4si = 2698,
+ CODE_FOR_neon_vst3_lanev4sf = 2699,
+ CODE_FOR_neon_vst3_lanev8bf = 2700,
+ CODE_FOR_neon_vld4v8qi = 2701,
+ CODE_FOR_neon_vld4v4hi = 2702,
+ CODE_FOR_neon_vld4v4hf = 2703,
+ CODE_FOR_neon_vld4v4bf = 2704,
+ CODE_FOR_neon_vld4v2si = 2705,
+ CODE_FOR_neon_vld4v2sf = 2706,
+ CODE_FOR_neon_vld4di = 2707,
+ CODE_FOR_neon_vld4qav16qi = 2708,
+ CODE_FOR_neon_vld4qav8hi = 2709,
+ CODE_FOR_neon_vld4qav8hf = 2710,
+ CODE_FOR_neon_vld4qav8bf = 2711,
+ CODE_FOR_neon_vld4qav4si = 2712,
+ CODE_FOR_neon_vld4qav4sf = 2713,
+ CODE_FOR_neon_vld4qbv16qi = 2714,
+ CODE_FOR_neon_vld4qbv8hi = 2715,
+ CODE_FOR_neon_vld4qbv8hf = 2716,
+ CODE_FOR_neon_vld4qbv8bf = 2717,
+ CODE_FOR_neon_vld4qbv4si = 2718,
+ CODE_FOR_neon_vld4qbv4sf = 2719,
+ CODE_FOR_neon_vld4_lanev8qi = 2720,
+ CODE_FOR_neon_vld4_lanev4hi = 2721,
+ CODE_FOR_neon_vld4_lanev4hf = 2722,
+ CODE_FOR_neon_vld4_lanev4bf = 2723,
+ CODE_FOR_neon_vld4_lanev2si = 2724,
+ CODE_FOR_neon_vld4_lanev2sf = 2725,
+ CODE_FOR_neon_vld4_lanev8hi = 2726,
+ CODE_FOR_neon_vld4_lanev8hf = 2727,
+ CODE_FOR_neon_vld4_lanev4si = 2728,
+ CODE_FOR_neon_vld4_lanev4sf = 2729,
+ CODE_FOR_neon_vld4_lanev8bf = 2730,
+ CODE_FOR_neon_vld4_dupv8qi = 2731,
+ CODE_FOR_neon_vld4_dupv4hi = 2732,
+ CODE_FOR_neon_vld4_dupv4hf = 2733,
+ CODE_FOR_neon_vld4_dupv4bf = 2734,
+ CODE_FOR_neon_vld4_dupv2si = 2735,
+ CODE_FOR_neon_vld4_dupv2sf = 2736,
+ CODE_FOR_neon_vld4_dupdi = 2737,
+ CODE_FOR_neon_vld4_dupv8bf = 2738,
+ CODE_FOR_neon_vst4v8qi = 2739,
+ CODE_FOR_neon_vst4v4hi = 2740,
+ CODE_FOR_neon_vst4v4hf = 2741,
+ CODE_FOR_neon_vst4v4bf = 2742,
+ CODE_FOR_neon_vst4v2si = 2743,
+ CODE_FOR_neon_vst4v2sf = 2744,
+ CODE_FOR_neon_vst4di = 2745,
+ CODE_FOR_neon_vst4qav16qi = 2746,
+ CODE_FOR_neon_vst4qav8hi = 2747,
+ CODE_FOR_neon_vst4qav8hf = 2748,
+ CODE_FOR_neon_vst4qav8bf = 2749,
+ CODE_FOR_neon_vst4qav4si = 2750,
+ CODE_FOR_neon_vst4qav4sf = 2751,
+ CODE_FOR_neon_vst4qbv16qi = 2752,
+ CODE_FOR_neon_vst4qbv8hi = 2753,
+ CODE_FOR_neon_vst4qbv8hf = 2754,
+ CODE_FOR_neon_vst4qbv8bf = 2755,
+ CODE_FOR_neon_vst4qbv4si = 2756,
+ CODE_FOR_neon_vst4qbv4sf = 2757,
+ CODE_FOR_neon_vst4_lanev8qi = 2758,
+ CODE_FOR_neon_vst4_lanev4hi = 2759,
+ CODE_FOR_neon_vst4_lanev4hf = 2760,
+ CODE_FOR_neon_vst4_lanev4bf = 2761,
+ CODE_FOR_neon_vst4_lanev2si = 2762,
+ CODE_FOR_neon_vst4_lanev2sf = 2763,
+ CODE_FOR_neon_vst4_lanev8hi = 2764,
+ CODE_FOR_neon_vst4_lanev8hf = 2765,
+ CODE_FOR_neon_vst4_lanev4si = 2766,
+ CODE_FOR_neon_vst4_lanev4sf = 2767,
+ CODE_FOR_neon_vst4_lanev8bf = 2768,
+ CODE_FOR_neon_vec_unpacks_lo_v16qi = 2769,
+ CODE_FOR_neon_vec_unpacku_lo_v16qi = 2770,
+ CODE_FOR_neon_vec_unpacks_lo_v8hi = 2771,
+ CODE_FOR_neon_vec_unpacku_lo_v8hi = 2772,
+ CODE_FOR_neon_vec_unpacks_lo_v4si = 2773,
+ CODE_FOR_neon_vec_unpacku_lo_v4si = 2774,
+ CODE_FOR_neon_vec_unpacks_hi_v16qi = 2775,
+ CODE_FOR_neon_vec_unpacku_hi_v16qi = 2776,
+ CODE_FOR_neon_vec_unpacks_hi_v8hi = 2777,
+ CODE_FOR_neon_vec_unpacku_hi_v8hi = 2778,
+ CODE_FOR_neon_vec_unpacks_hi_v4si = 2779,
+ CODE_FOR_neon_vec_unpacku_hi_v4si = 2780,
+ CODE_FOR_neon_vec_smult_lo_v16qi = 2781,
+ CODE_FOR_neon_vec_umult_lo_v16qi = 2782,
+ CODE_FOR_neon_vec_smult_lo_v8hi = 2783,
+ CODE_FOR_neon_vec_umult_lo_v8hi = 2784,
+ CODE_FOR_neon_vec_smult_lo_v4si = 2785,
+ CODE_FOR_neon_vec_umult_lo_v4si = 2786,
+ CODE_FOR_neon_vec_smult_hi_v16qi = 2787,
+ CODE_FOR_neon_vec_umult_hi_v16qi = 2788,
+ CODE_FOR_neon_vec_smult_hi_v8hi = 2789,
+ CODE_FOR_neon_vec_umult_hi_v8hi = 2790,
+ CODE_FOR_neon_vec_smult_hi_v4si = 2791,
+ CODE_FOR_neon_vec_umult_hi_v4si = 2792,
+ CODE_FOR_neon_vec_sshiftl_v8qi = 2793,
+ CODE_FOR_neon_vec_ushiftl_v8qi = 2794,
+ CODE_FOR_neon_vec_sshiftl_v4hi = 2795,
+ CODE_FOR_neon_vec_ushiftl_v4hi = 2796,
+ CODE_FOR_neon_vec_sshiftl_v2si = 2797,
+ CODE_FOR_neon_vec_ushiftl_v2si = 2798,
+ CODE_FOR_neon_unpacks_v8qi = 2799,
+ CODE_FOR_neon_unpacku_v8qi = 2800,
+ CODE_FOR_neon_unpacks_v4hi = 2801,
+ CODE_FOR_neon_unpacku_v4hi = 2802,
+ CODE_FOR_neon_unpacks_v2si = 2803,
+ CODE_FOR_neon_unpacku_v2si = 2804,
+ CODE_FOR_neon_vec_smult_v8qi = 2805,
+ CODE_FOR_neon_vec_umult_v8qi = 2806,
+ CODE_FOR_neon_vec_smult_v4hi = 2807,
+ CODE_FOR_neon_vec_umult_v4hi = 2808,
+ CODE_FOR_neon_vec_smult_v2si = 2809,
+ CODE_FOR_neon_vec_umult_v2si = 2810,
+ CODE_FOR_vec_pack_trunc_v8hi = 2811,
+ CODE_FOR_vec_pack_trunc_v4si = 2812,
+ CODE_FOR_vec_pack_trunc_v2di = 2813,
+ CODE_FOR_neon_vec_pack_trunc_v8hi = 2814,
+ CODE_FOR_neon_vec_pack_trunc_v4si = 2815,
+ CODE_FOR_neon_vec_pack_trunc_v2di = 2816,
+ CODE_FOR_neon_vabdv4hf_2 = 2817,
+ CODE_FOR_neon_vabdv8hf_2 = 2818,
+ CODE_FOR_neon_vabdv2sf_2 = 2819,
+ CODE_FOR_neon_vabdv4sf_2 = 2820,
+ CODE_FOR_neon_vabdv4hf_3 = 2821,
+ CODE_FOR_neon_vabdv8hf_3 = 2822,
+ CODE_FOR_neon_vabdv2sf_3 = 2823,
+ CODE_FOR_neon_vabdv4sf_3 = 2824,
+ CODE_FOR_neon_smmlav16qi = 2825,
+ CODE_FOR_neon_ummlav16qi = 2826,
+ CODE_FOR_neon_usmmlav16qi = 2827,
+ CODE_FOR_neon_vbfdotv2sf = 2828,
+ CODE_FOR_neon_vbfdotv4sf = 2829,
+ CODE_FOR_neon_vbfdot_lanev4bfv2sf = 2830,
+ CODE_FOR_neon_vbfdot_lanev4bfv4sf = 2831,
+ CODE_FOR_neon_vbfdot_lanev8bfv2sf = 2832,
+ CODE_FOR_neon_vbfdot_lanev8bfv4sf = 2833,
+ CODE_FOR_neon_vbfcvtv4sfv4bf = 2834,
+ CODE_FOR_neon_vbfcvtv4sfv8bf = 2835,
+ CODE_FOR_neon_vbfcvtv4sf_highv8bf = 2836,
+ CODE_FOR_neon_vbfcvtsf = 2837,
+ CODE_FOR_neon_vbfcvtv4bf = 2838,
+ CODE_FOR_neon_vbfcvtv8bf = 2839,
+ CODE_FOR_neon_vbfcvt_highv8bf = 2840,
+ CODE_FOR_neon_vbfcvtbf_cvtmodev2si = 2841,
+ CODE_FOR_neon_vbfcvtbf_cvtmodesf = 2842,
+ CODE_FOR_neon_vmmlav8bf = 2843,
+ CODE_FOR_neon_vfmabv8bf = 2844,
+ CODE_FOR_neon_vfmatv8bf = 2845,
+ CODE_FOR_neon_vfmab_lanev8bf = 2846,
+ CODE_FOR_neon_vfmat_lanev8bf = 2847,
+ CODE_FOR_crypto_aesmc = 2848,
+ CODE_FOR_crypto_aesimc = 2849,
+ CODE_FOR_aes_op_protect = 2852,
+ CODE_FOR_aes_op_protect_neon_vld1v16qi = 2854,
+ CODE_FOR_crypto_sha1su1 = 2861,
+ CODE_FOR_crypto_sha256su0 = 2862,
+ CODE_FOR_crypto_sha1su0 = 2863,
+ CODE_FOR_crypto_sha256h = 2864,
+ CODE_FOR_crypto_sha256h2 = 2865,
+ CODE_FOR_crypto_sha256su1 = 2866,
+ CODE_FOR_crypto_sha1h_lb = 2867,
+ CODE_FOR_crypto_vmullp64 = 2868,
+ CODE_FOR_crypto_sha1c_lb = 2869,
+ CODE_FOR_crypto_sha1m_lb = 2870,
+ CODE_FOR_crypto_sha1p_lb = 2871,
+ CODE_FOR_atomic_loadqi = 2873,
+ CODE_FOR_atomic_loadhi = 2874,
+ CODE_FOR_atomic_loadsi = 2875,
+ CODE_FOR_atomic_storeqi = 2876,
+ CODE_FOR_atomic_storehi = 2877,
+ CODE_FOR_atomic_storesi = 2878,
+ CODE_FOR_arm_atomic_loaddi2_ldrd = 2879,
+ CODE_FOR_atomic_compare_and_swap32qi_1 = 2880,
+ CODE_FOR_atomic_compare_and_swap32hi_1 = 2881,
+ CODE_FOR_atomic_compare_and_swapt1qi_1 = 2882,
+ CODE_FOR_atomic_compare_and_swapt1hi_1 = 2883,
+ CODE_FOR_atomic_compare_and_swap32si_1 = 2884,
+ CODE_FOR_atomic_compare_and_swap32di_1 = 2885,
+ CODE_FOR_atomic_compare_and_swapt1si_1 = 2886,
+ CODE_FOR_atomic_compare_and_swapt1di_1 = 2887,
+ CODE_FOR_atomic_exchangeqi = 2888,
+ CODE_FOR_atomic_exchangehi = 2889,
+ CODE_FOR_atomic_exchangesi = 2890,
+ CODE_FOR_atomic_exchangedi = 2891,
+ CODE_FOR_atomic_addqi = 2892,
+ CODE_FOR_atomic_subqi = 2893,
+ CODE_FOR_atomic_orqi = 2894,
+ CODE_FOR_atomic_xorqi = 2895,
+ CODE_FOR_atomic_andqi = 2896,
+ CODE_FOR_atomic_addhi = 2897,
+ CODE_FOR_atomic_subhi = 2898,
+ CODE_FOR_atomic_orhi = 2899,
+ CODE_FOR_atomic_xorhi = 2900,
+ CODE_FOR_atomic_andhi = 2901,
+ CODE_FOR_atomic_addsi = 2902,
+ CODE_FOR_atomic_subsi = 2903,
+ CODE_FOR_atomic_orsi = 2904,
+ CODE_FOR_atomic_xorsi = 2905,
+ CODE_FOR_atomic_andsi = 2906,
+ CODE_FOR_atomic_adddi = 2907,
+ CODE_FOR_atomic_subdi = 2908,
+ CODE_FOR_atomic_ordi = 2909,
+ CODE_FOR_atomic_xordi = 2910,
+ CODE_FOR_atomic_anddi = 2911,
+ CODE_FOR_atomic_nandqi = 2912,
+ CODE_FOR_atomic_nandhi = 2913,
+ CODE_FOR_atomic_nandsi = 2914,
+ CODE_FOR_atomic_nanddi = 2915,
+ CODE_FOR_atomic_fetch_addqi = 2916,
+ CODE_FOR_atomic_fetch_subqi = 2917,
+ CODE_FOR_atomic_fetch_orqi = 2918,
+ CODE_FOR_atomic_fetch_xorqi = 2919,
+ CODE_FOR_atomic_fetch_andqi = 2920,
+ CODE_FOR_atomic_fetch_addhi = 2921,
+ CODE_FOR_atomic_fetch_subhi = 2922,
+ CODE_FOR_atomic_fetch_orhi = 2923,
+ CODE_FOR_atomic_fetch_xorhi = 2924,
+ CODE_FOR_atomic_fetch_andhi = 2925,
+ CODE_FOR_atomic_fetch_addsi = 2926,
+ CODE_FOR_atomic_fetch_subsi = 2927,
+ CODE_FOR_atomic_fetch_orsi = 2928,
+ CODE_FOR_atomic_fetch_xorsi = 2929,
+ CODE_FOR_atomic_fetch_andsi = 2930,
+ CODE_FOR_atomic_fetch_adddi = 2931,
+ CODE_FOR_atomic_fetch_subdi = 2932,
+ CODE_FOR_atomic_fetch_ordi = 2933,
+ CODE_FOR_atomic_fetch_xordi = 2934,
+ CODE_FOR_atomic_fetch_anddi = 2935,
+ CODE_FOR_atomic_fetch_nandqi = 2936,
+ CODE_FOR_atomic_fetch_nandhi = 2937,
+ CODE_FOR_atomic_fetch_nandsi = 2938,
+ CODE_FOR_atomic_fetch_nanddi = 2939,
+ CODE_FOR_atomic_add_fetchqi = 2940,
+ CODE_FOR_atomic_sub_fetchqi = 2941,
+ CODE_FOR_atomic_or_fetchqi = 2942,
+ CODE_FOR_atomic_xor_fetchqi = 2943,
+ CODE_FOR_atomic_and_fetchqi = 2944,
+ CODE_FOR_atomic_add_fetchhi = 2945,
+ CODE_FOR_atomic_sub_fetchhi = 2946,
+ CODE_FOR_atomic_or_fetchhi = 2947,
+ CODE_FOR_atomic_xor_fetchhi = 2948,
+ CODE_FOR_atomic_and_fetchhi = 2949,
+ CODE_FOR_atomic_add_fetchsi = 2950,
+ CODE_FOR_atomic_sub_fetchsi = 2951,
+ CODE_FOR_atomic_or_fetchsi = 2952,
+ CODE_FOR_atomic_xor_fetchsi = 2953,
+ CODE_FOR_atomic_and_fetchsi = 2954,
+ CODE_FOR_atomic_add_fetchdi = 2955,
+ CODE_FOR_atomic_sub_fetchdi = 2956,
+ CODE_FOR_atomic_or_fetchdi = 2957,
+ CODE_FOR_atomic_xor_fetchdi = 2958,
+ CODE_FOR_atomic_and_fetchdi = 2959,
+ CODE_FOR_atomic_nand_fetchqi = 2960,
+ CODE_FOR_atomic_nand_fetchhi = 2961,
+ CODE_FOR_atomic_nand_fetchsi = 2962,
+ CODE_FOR_atomic_nand_fetchdi = 2963,
+ CODE_FOR_arm_load_exclusiveqi = 2964,
+ CODE_FOR_arm_load_exclusivehi = 2965,
+ CODE_FOR_arm_load_acquire_exclusiveqi = 2966,
+ CODE_FOR_arm_load_acquire_exclusivehi = 2967,
+ CODE_FOR_arm_load_exclusivesi = 2968,
+ CODE_FOR_arm_load_acquire_exclusivesi = 2969,
+ CODE_FOR_arm_load_exclusivedi = 2970,
+ CODE_FOR_arm_load_acquire_exclusivedi = 2971,
+ CODE_FOR_arm_store_exclusiveqi = 2972,
+ CODE_FOR_arm_store_exclusivehi = 2973,
+ CODE_FOR_arm_store_exclusivesi = 2974,
+ CODE_FOR_arm_store_exclusivedi = 2975,
+ CODE_FOR_arm_store_release_exclusivedi = 2976,
+ CODE_FOR_arm_store_release_exclusiveqi = 2977,
+ CODE_FOR_arm_store_release_exclusivehi = 2978,
+ CODE_FOR_arm_store_release_exclusivesi = 2979,
+ CODE_FOR_addqq3 = 2980,
+ CODE_FOR_addhq3 = 2981,
+ CODE_FOR_addsq3 = 2982,
+ CODE_FOR_adduqq3 = 2983,
+ CODE_FOR_adduhq3 = 2984,
+ CODE_FOR_addusq3 = 2985,
+ CODE_FOR_addha3 = 2986,
+ CODE_FOR_addsa3 = 2987,
+ CODE_FOR_adduha3 = 2988,
+ CODE_FOR_addusa3 = 2989,
+ CODE_FOR_usaddv4uqq3 = 2993,
+ CODE_FOR_usaddv2uhq3 = 2994,
+ CODE_FOR_usadduqq3 = 2995,
+ CODE_FOR_usadduhq3 = 2996,
+ CODE_FOR_usaddv2uha3 = 2997,
+ CODE_FOR_usadduha3 = 2998,
+ CODE_FOR_subqq3 = 3007,
+ CODE_FOR_subhq3 = 3008,
+ CODE_FOR_subsq3 = 3009,
+ CODE_FOR_subuqq3 = 3010,
+ CODE_FOR_subuhq3 = 3011,
+ CODE_FOR_subusq3 = 3012,
+ CODE_FOR_subha3 = 3013,
+ CODE_FOR_subsa3 = 3014,
+ CODE_FOR_subuha3 = 3015,
+ CODE_FOR_subusa3 = 3016,
+ CODE_FOR_ussubv4uqq3 = 3020,
+ CODE_FOR_ussubv2uhq3 = 3021,
+ CODE_FOR_ussubuqq3 = 3022,
+ CODE_FOR_ussubuhq3 = 3023,
+ CODE_FOR_ussubv2uha3 = 3024,
+ CODE_FOR_ussubuha3 = 3025,
+ CODE_FOR_arm_ssatsihi_shift = 3036,
+ CODE_FOR_arm_usatsihi = 3037,
+ CODE_FOR_mve_vst4qv16qi = 3053,
+ CODE_FOR_mve_vst4qv8hi = 3054,
+ CODE_FOR_mve_vst4qv4si = 3055,
+ CODE_FOR_mve_vst4qv8hf = 3056,
+ CODE_FOR_mve_vst4qv4sf = 3057,
+ CODE_FOR_mve_vrndq_m_fv8hf = 3058,
+ CODE_FOR_mve_vrndq_m_fv4sf = 3059,
+ CODE_FOR_mve_vrndxq_fv8hf = 3060,
+ CODE_FOR_mve_vrndxq_fv4sf = 3061,
+ CODE_FOR_mve_vrndq_fv8hf = 3062,
+ CODE_FOR_mve_vrndq_fv4sf = 3063,
+ CODE_FOR_mve_vrndpq_fv8hf = 3064,
+ CODE_FOR_mve_vrndpq_fv4sf = 3065,
+ CODE_FOR_mve_vrndnq_fv8hf = 3066,
+ CODE_FOR_mve_vrndnq_fv4sf = 3067,
+ CODE_FOR_mve_vrndmq_fv8hf = 3068,
+ CODE_FOR_mve_vrndmq_fv4sf = 3069,
+ CODE_FOR_mve_vrndaq_fv8hf = 3070,
+ CODE_FOR_mve_vrndaq_fv4sf = 3071,
+ CODE_FOR_mve_vrev64q_fv8hf = 3072,
+ CODE_FOR_mve_vrev64q_fv4sf = 3073,
+ CODE_FOR_mve_vnegq_fv8hf = 3074,
+ CODE_FOR_mve_vnegq_fv4sf = 3075,
+ CODE_FOR_mve_vdupq_n_fv8hf = 3076,
+ CODE_FOR_mve_vdupq_n_fv4sf = 3077,
+ CODE_FOR_mve_vabsq_fv8hf = 3078,
+ CODE_FOR_mve_vabsq_fv4sf = 3079,
+ CODE_FOR_mve_vrev32q_fv8hf = 3080,
+ CODE_FOR_mve_vcvttq_f32_f16v4sf = 3081,
+ CODE_FOR_mve_vcvtbq_f32_f16v4sf = 3082,
+ CODE_FOR_mve_vcvtq_to_f_sv8hf = 3083,
+ CODE_FOR_mve_vcvtq_to_f_uv8hf = 3084,
+ CODE_FOR_mve_vcvtq_to_f_sv4sf = 3085,
+ CODE_FOR_mve_vcvtq_to_f_uv4sf = 3086,
+ CODE_FOR_mve_vrev64q_sv16qi = 3087,
+ CODE_FOR_mve_vrev64q_uv16qi = 3088,
+ CODE_FOR_mve_vrev64q_sv8hi = 3089,
+ CODE_FOR_mve_vrev64q_uv8hi = 3090,
+ CODE_FOR_mve_vrev64q_sv4si = 3091,
+ CODE_FOR_mve_vrev64q_uv4si = 3092,
+ CODE_FOR_mve_vcvtq_from_f_sv8hi = 3093,
+ CODE_FOR_mve_vcvtq_from_f_uv8hi = 3094,
+ CODE_FOR_mve_vcvtq_from_f_sv4si = 3095,
+ CODE_FOR_mve_vcvtq_from_f_uv4si = 3096,
+ CODE_FOR_mve_vqnegq_sv16qi = 3097,
+ CODE_FOR_mve_vqnegq_sv8hi = 3098,
+ CODE_FOR_mve_vqnegq_sv4si = 3099,
+ CODE_FOR_mve_vqabsq_sv16qi = 3100,
+ CODE_FOR_mve_vqabsq_sv8hi = 3101,
+ CODE_FOR_mve_vqabsq_sv4si = 3102,
+ CODE_FOR_mve_vnegq_sv16qi = 3103,
+ CODE_FOR_mve_vnegq_sv8hi = 3104,
+ CODE_FOR_mve_vnegq_sv4si = 3105,
+ CODE_FOR_mve_vmvnq_uv16qi = 3106,
+ CODE_FOR_mve_vmvnq_uv8hi = 3107,
+ CODE_FOR_mve_vmvnq_uv4si = 3108,
+ CODE_FOR_mve_vdupq_n_uv16qi = 3109,
+ CODE_FOR_mve_vdupq_n_sv16qi = 3110,
+ CODE_FOR_mve_vdupq_n_uv8hi = 3111,
+ CODE_FOR_mve_vdupq_n_sv8hi = 3112,
+ CODE_FOR_mve_vdupq_n_uv4si = 3113,
+ CODE_FOR_mve_vdupq_n_sv4si = 3114,
+ CODE_FOR_mve_vclzq_sv16qi = 3115,
+ CODE_FOR_mve_vclzq_sv8hi = 3116,
+ CODE_FOR_mve_vclzq_sv4si = 3117,
+ CODE_FOR_mve_vclsq_sv16qi = 3118,
+ CODE_FOR_mve_vclsq_sv8hi = 3119,
+ CODE_FOR_mve_vclsq_sv4si = 3120,
+ CODE_FOR_mve_vaddvq_uv16qi = 3121,
+ CODE_FOR_mve_vaddvq_sv16qi = 3122,
+ CODE_FOR_mve_vaddvq_uv8hi = 3123,
+ CODE_FOR_mve_vaddvq_sv8hi = 3124,
+ CODE_FOR_mve_vaddvq_uv4si = 3125,
+ CODE_FOR_mve_vaddvq_sv4si = 3126,
+ CODE_FOR_mve_vabsq_sv16qi = 3127,
+ CODE_FOR_mve_vabsq_sv8hi = 3128,
+ CODE_FOR_mve_vabsq_sv4si = 3129,
+ CODE_FOR_mve_vrev32q_uv16qi = 3130,
+ CODE_FOR_mve_vrev32q_sv16qi = 3131,
+ CODE_FOR_mve_vrev32q_uv8hi = 3132,
+ CODE_FOR_mve_vrev32q_sv8hi = 3133,
+ CODE_FOR_mve_vmovltq_uv16qi = 3134,
+ CODE_FOR_mve_vmovltq_sv16qi = 3135,
+ CODE_FOR_mve_vmovltq_uv8hi = 3136,
+ CODE_FOR_mve_vmovltq_sv8hi = 3137,
+ CODE_FOR_mve_vmovlbq_sv16qi = 3138,
+ CODE_FOR_mve_vmovlbq_uv16qi = 3139,
+ CODE_FOR_mve_vmovlbq_sv8hi = 3140,
+ CODE_FOR_mve_vmovlbq_uv8hi = 3141,
+ CODE_FOR_mve_vcvtpq_sv8hi = 3142,
+ CODE_FOR_mve_vcvtpq_uv8hi = 3143,
+ CODE_FOR_mve_vcvtpq_sv4si = 3144,
+ CODE_FOR_mve_vcvtpq_uv4si = 3145,
+ CODE_FOR_mve_vcvtnq_sv8hi = 3146,
+ CODE_FOR_mve_vcvtnq_uv8hi = 3147,
+ CODE_FOR_mve_vcvtnq_sv4si = 3148,
+ CODE_FOR_mve_vcvtnq_uv4si = 3149,
+ CODE_FOR_mve_vcvtmq_sv8hi = 3150,
+ CODE_FOR_mve_vcvtmq_uv8hi = 3151,
+ CODE_FOR_mve_vcvtmq_sv4si = 3152,
+ CODE_FOR_mve_vcvtmq_uv4si = 3153,
+ CODE_FOR_mve_vcvtaq_uv8hi = 3154,
+ CODE_FOR_mve_vcvtaq_sv8hi = 3155,
+ CODE_FOR_mve_vcvtaq_uv4si = 3156,
+ CODE_FOR_mve_vcvtaq_sv4si = 3157,
+ CODE_FOR_mve_vmvnq_n_uv8hi = 3158,
+ CODE_FOR_mve_vmvnq_n_sv8hi = 3159,
+ CODE_FOR_mve_vmvnq_n_uv4si = 3160,
+ CODE_FOR_mve_vmvnq_n_sv4si = 3161,
+ CODE_FOR_mve_vrev16q_uv16qi = 3162,
+ CODE_FOR_mve_vrev16q_sv16qi = 3163,
+ CODE_FOR_mve_vaddlvq_uv4si = 3164,
+ CODE_FOR_mve_vaddlvq_sv4si = 3165,
+ CODE_FOR_mve_vctp8qv16bi = 3166,
+ CODE_FOR_mve_vctp16qv8bi = 3167,
+ CODE_FOR_mve_vctp32qv4bi = 3168,
+ CODE_FOR_mve_vctp64qv2qi = 3169,
+ CODE_FOR_mve_vpnotv16bi = 3170,
+ CODE_FOR_mve_vsubq_n_fv8hf = 3171,
+ CODE_FOR_mve_vsubq_n_fv4sf = 3172,
+ CODE_FOR_mve_vbrsrq_n_fv8hf = 3173,
+ CODE_FOR_mve_vbrsrq_n_fv4sf = 3174,
+ CODE_FOR_mve_vcvtq_n_to_f_sv8hf = 3175,
+ CODE_FOR_mve_vcvtq_n_to_f_uv8hf = 3176,
+ CODE_FOR_mve_vcvtq_n_to_f_sv4sf = 3177,
+ CODE_FOR_mve_vcvtq_n_to_f_uv4sf = 3178,
+ CODE_FOR_mve_vcreateq_fv8hf = 3179,
+ CODE_FOR_mve_vcreateq_fv4sf = 3180,
+ CODE_FOR_mve_vcreateq_uv16qi = 3181,
+ CODE_FOR_mve_vcreateq_sv16qi = 3182,
+ CODE_FOR_mve_vcreateq_uv8hi = 3183,
+ CODE_FOR_mve_vcreateq_sv8hi = 3184,
+ CODE_FOR_mve_vcreateq_uv4si = 3185,
+ CODE_FOR_mve_vcreateq_sv4si = 3186,
+ CODE_FOR_mve_vcreateq_uv2di = 3187,
+ CODE_FOR_mve_vcreateq_sv2di = 3188,
+ CODE_FOR_mve_vshrq_n_sv16qi = 3189,
+ CODE_FOR_mve_vshrq_n_uv16qi = 3190,
+ CODE_FOR_mve_vshrq_n_sv8hi = 3191,
+ CODE_FOR_mve_vshrq_n_uv8hi = 3192,
+ CODE_FOR_mve_vshrq_n_sv4si = 3193,
+ CODE_FOR_mve_vshrq_n_uv4si = 3194,
+ CODE_FOR_mve_vshrq_n_sv16qi_imm = 3195,
+ CODE_FOR_mve_vshrq_n_sv8hi_imm = 3196,
+ CODE_FOR_mve_vshrq_n_sv4si_imm = 3197,
+ CODE_FOR_mve_vshrq_n_uv16qi_imm = 3198,
+ CODE_FOR_mve_vshrq_n_uv8hi_imm = 3199,
+ CODE_FOR_mve_vshrq_n_uv4si_imm = 3200,
+ CODE_FOR_mve_vcvtq_n_from_f_sv8hi = 3201,
+ CODE_FOR_mve_vcvtq_n_from_f_uv8hi = 3202,
+ CODE_FOR_mve_vcvtq_n_from_f_sv4si = 3203,
+ CODE_FOR_mve_vcvtq_n_from_f_uv4si = 3204,
+ CODE_FOR_mve_vaddlvq_p_sv4si = 3205,
+ CODE_FOR_mve_vaddlvq_p_uv4si = 3206,
+ CODE_FOR_mve_vcmpeqq_v16qi = 3207,
+ CODE_FOR_mve_vcmpgeq_v16qi = 3208,
+ CODE_FOR_mve_vcmpcsq_v16qi = 3209,
+ CODE_FOR_mve_vcmpgtq_v16qi = 3210,
+ CODE_FOR_mve_vcmphiq_v16qi = 3211,
+ CODE_FOR_mve_vcmpleq_v16qi = 3212,
+ CODE_FOR_mve_vcmpltq_v16qi = 3213,
+ CODE_FOR_mve_vcmpneq_v16qi = 3214,
+ CODE_FOR_mve_vcmpeqq_v8hi = 3215,
+ CODE_FOR_mve_vcmpgeq_v8hi = 3216,
+ CODE_FOR_mve_vcmpcsq_v8hi = 3217,
+ CODE_FOR_mve_vcmpgtq_v8hi = 3218,
+ CODE_FOR_mve_vcmphiq_v8hi = 3219,
+ CODE_FOR_mve_vcmpleq_v8hi = 3220,
+ CODE_FOR_mve_vcmpltq_v8hi = 3221,
+ CODE_FOR_mve_vcmpneq_v8hi = 3222,
+ CODE_FOR_mve_vcmpeqq_v4si = 3223,
+ CODE_FOR_mve_vcmpgeq_v4si = 3224,
+ CODE_FOR_mve_vcmpcsq_v4si = 3225,
+ CODE_FOR_mve_vcmpgtq_v4si = 3226,
+ CODE_FOR_mve_vcmphiq_v4si = 3227,
+ CODE_FOR_mve_vcmpleq_v4si = 3228,
+ CODE_FOR_mve_vcmpltq_v4si = 3229,
+ CODE_FOR_mve_vcmpneq_v4si = 3230,
+ CODE_FOR_mve_vcmpeqq_n_v16qi = 3231,
+ CODE_FOR_mve_vcmpgeq_n_v16qi = 3232,
+ CODE_FOR_mve_vcmpcsq_n_v16qi = 3233,
+ CODE_FOR_mve_vcmpgtq_n_v16qi = 3234,
+ CODE_FOR_mve_vcmphiq_n_v16qi = 3235,
+ CODE_FOR_mve_vcmpleq_n_v16qi = 3236,
+ CODE_FOR_mve_vcmpltq_n_v16qi = 3237,
+ CODE_FOR_mve_vcmpneq_n_v16qi = 3238,
+ CODE_FOR_mve_vcmpeqq_n_v8hi = 3239,
+ CODE_FOR_mve_vcmpgeq_n_v8hi = 3240,
+ CODE_FOR_mve_vcmpcsq_n_v8hi = 3241,
+ CODE_FOR_mve_vcmpgtq_n_v8hi = 3242,
+ CODE_FOR_mve_vcmphiq_n_v8hi = 3243,
+ CODE_FOR_mve_vcmpleq_n_v8hi = 3244,
+ CODE_FOR_mve_vcmpltq_n_v8hi = 3245,
+ CODE_FOR_mve_vcmpneq_n_v8hi = 3246,
+ CODE_FOR_mve_vcmpeqq_n_v4si = 3247,
+ CODE_FOR_mve_vcmpgeq_n_v4si = 3248,
+ CODE_FOR_mve_vcmpcsq_n_v4si = 3249,
+ CODE_FOR_mve_vcmpgtq_n_v4si = 3250,
+ CODE_FOR_mve_vcmphiq_n_v4si = 3251,
+ CODE_FOR_mve_vcmpleq_n_v4si = 3252,
+ CODE_FOR_mve_vcmpltq_n_v4si = 3253,
+ CODE_FOR_mve_vcmpneq_n_v4si = 3254,
+ CODE_FOR_mve_vabdq_sv16qi = 3255,
+ CODE_FOR_mve_vabdq_uv16qi = 3256,
+ CODE_FOR_mve_vabdq_sv8hi = 3257,
+ CODE_FOR_mve_vabdq_uv8hi = 3258,
+ CODE_FOR_mve_vabdq_sv4si = 3259,
+ CODE_FOR_mve_vabdq_uv4si = 3260,
+ CODE_FOR_mve_vaddq_n_sv16qi = 3261,
+ CODE_FOR_mve_vaddq_n_uv16qi = 3262,
+ CODE_FOR_mve_vaddq_n_sv8hi = 3263,
+ CODE_FOR_mve_vaddq_n_uv8hi = 3264,
+ CODE_FOR_mve_vaddq_n_sv4si = 3265,
+ CODE_FOR_mve_vaddq_n_uv4si = 3266,
+ CODE_FOR_mve_vaddvaq_sv16qi = 3267,
+ CODE_FOR_mve_vaddvaq_uv16qi = 3268,
+ CODE_FOR_mve_vaddvaq_sv8hi = 3269,
+ CODE_FOR_mve_vaddvaq_uv8hi = 3270,
+ CODE_FOR_mve_vaddvaq_sv4si = 3271,
+ CODE_FOR_mve_vaddvaq_uv4si = 3272,
+ CODE_FOR_mve_vaddvq_p_uv16qi = 3273,
+ CODE_FOR_mve_vaddvq_p_sv16qi = 3274,
+ CODE_FOR_mve_vaddvq_p_uv8hi = 3275,
+ CODE_FOR_mve_vaddvq_p_sv8hi = 3276,
+ CODE_FOR_mve_vaddvq_p_uv4si = 3277,
+ CODE_FOR_mve_vaddvq_p_sv4si = 3278,
+ CODE_FOR_mve_vandq_uv16qi = 3279,
+ CODE_FOR_mve_vandq_uv8hi = 3280,
+ CODE_FOR_mve_vandq_uv4si = 3281,
+ CODE_FOR_mve_vbicq_uv16qi = 3282,
+ CODE_FOR_mve_vbicq_uv8hi = 3283,
+ CODE_FOR_mve_vbicq_uv4si = 3284,
+ CODE_FOR_mve_vbrsrq_n_uv16qi = 3285,
+ CODE_FOR_mve_vbrsrq_n_sv16qi = 3286,
+ CODE_FOR_mve_vbrsrq_n_uv8hi = 3287,
+ CODE_FOR_mve_vbrsrq_n_sv8hi = 3288,
+ CODE_FOR_mve_vbrsrq_n_uv4si = 3289,
+ CODE_FOR_mve_vbrsrq_n_sv4si = 3290,
+ CODE_FOR_mve_vcaddq_rot90v16qi = 3291,
+ CODE_FOR_mve_vcaddq_rot270v16qi = 3292,
+ CODE_FOR_mve_vcaddq_rot90v8hi = 3293,
+ CODE_FOR_mve_vcaddq_rot270v8hi = 3294,
+ CODE_FOR_mve_vcaddq_rot90v4si = 3295,
+ CODE_FOR_mve_vcaddq_rot270v4si = 3296,
+ CODE_FOR_mve_veorq_uv16qi = 3297,
+ CODE_FOR_mve_veorq_uv8hi = 3298,
+ CODE_FOR_mve_veorq_uv4si = 3299,
+ CODE_FOR_mve_vhaddq_n_uv16qi = 3300,
+ CODE_FOR_mve_vhaddq_n_sv16qi = 3301,
+ CODE_FOR_mve_vhaddq_n_uv8hi = 3302,
+ CODE_FOR_mve_vhaddq_n_sv8hi = 3303,
+ CODE_FOR_mve_vhaddq_n_uv4si = 3304,
+ CODE_FOR_mve_vhaddq_n_sv4si = 3305,
+ CODE_FOR_mve_vhaddq_sv16qi = 3306,
+ CODE_FOR_mve_vhaddq_uv16qi = 3307,
+ CODE_FOR_mve_vhaddq_sv8hi = 3308,
+ CODE_FOR_mve_vhaddq_uv8hi = 3309,
+ CODE_FOR_mve_vhaddq_sv4si = 3310,
+ CODE_FOR_mve_vhaddq_uv4si = 3311,
+ CODE_FOR_mve_vhcaddq_rot270_sv16qi = 3312,
+ CODE_FOR_mve_vhcaddq_rot270_sv8hi = 3313,
+ CODE_FOR_mve_vhcaddq_rot270_sv4si = 3314,
+ CODE_FOR_mve_vhcaddq_rot90_sv16qi = 3315,
+ CODE_FOR_mve_vhcaddq_rot90_sv8hi = 3316,
+ CODE_FOR_mve_vhcaddq_rot90_sv4si = 3317,
+ CODE_FOR_mve_vhsubq_n_uv16qi = 3318,
+ CODE_FOR_mve_vhsubq_n_sv16qi = 3319,
+ CODE_FOR_mve_vhsubq_n_uv8hi = 3320,
+ CODE_FOR_mve_vhsubq_n_sv8hi = 3321,
+ CODE_FOR_mve_vhsubq_n_uv4si = 3322,
+ CODE_FOR_mve_vhsubq_n_sv4si = 3323,
+ CODE_FOR_mve_vhsubq_sv16qi = 3324,
+ CODE_FOR_mve_vhsubq_uv16qi = 3325,
+ CODE_FOR_mve_vhsubq_sv8hi = 3326,
+ CODE_FOR_mve_vhsubq_uv8hi = 3327,
+ CODE_FOR_mve_vhsubq_sv4si = 3328,
+ CODE_FOR_mve_vhsubq_uv4si = 3329,
+ CODE_FOR_mve_vmaxaq_sv16qi = 3330,
+ CODE_FOR_mve_vmaxaq_sv8hi = 3331,
+ CODE_FOR_mve_vmaxaq_sv4si = 3332,
+ CODE_FOR_mve_vmaxavq_sv16qi = 3333,
+ CODE_FOR_mve_vmaxavq_sv8hi = 3334,
+ CODE_FOR_mve_vmaxavq_sv4si = 3335,
+ CODE_FOR_mve_vmaxq_sv16qi = 3336,
+ CODE_FOR_mve_vmaxq_sv8hi = 3337,
+ CODE_FOR_mve_vmaxq_sv4si = 3338,
+ CODE_FOR_mve_vmaxq_uv16qi = 3339,
+ CODE_FOR_mve_vmaxq_uv8hi = 3340,
+ CODE_FOR_mve_vmaxq_uv4si = 3341,
+ CODE_FOR_mve_vmaxvq_uv16qi = 3342,
+ CODE_FOR_mve_vmaxvq_sv16qi = 3343,
+ CODE_FOR_mve_vmaxvq_uv8hi = 3344,
+ CODE_FOR_mve_vmaxvq_sv8hi = 3345,
+ CODE_FOR_mve_vmaxvq_uv4si = 3346,
+ CODE_FOR_mve_vmaxvq_sv4si = 3347,
+ CODE_FOR_mve_vminaq_sv16qi = 3348,
+ CODE_FOR_mve_vminaq_sv8hi = 3349,
+ CODE_FOR_mve_vminaq_sv4si = 3350,
+ CODE_FOR_mve_vminavq_sv16qi = 3351,
+ CODE_FOR_mve_vminavq_sv8hi = 3352,
+ CODE_FOR_mve_vminavq_sv4si = 3353,
+ CODE_FOR_mve_vminq_sv16qi = 3354,
+ CODE_FOR_mve_vminq_sv8hi = 3355,
+ CODE_FOR_mve_vminq_sv4si = 3356,
+ CODE_FOR_mve_vminq_uv16qi = 3357,
+ CODE_FOR_mve_vminq_uv8hi = 3358,
+ CODE_FOR_mve_vminq_uv4si = 3359,
+ CODE_FOR_mve_vminvq_uv16qi = 3360,
+ CODE_FOR_mve_vminvq_sv16qi = 3361,
+ CODE_FOR_mve_vminvq_uv8hi = 3362,
+ CODE_FOR_mve_vminvq_sv8hi = 3363,
+ CODE_FOR_mve_vminvq_uv4si = 3364,
+ CODE_FOR_mve_vminvq_sv4si = 3365,
+ CODE_FOR_mve_vmladavq_uv16qi = 3366,
+ CODE_FOR_mve_vmladavq_sv16qi = 3367,
+ CODE_FOR_mve_vmladavq_uv8hi = 3368,
+ CODE_FOR_mve_vmladavq_sv8hi = 3369,
+ CODE_FOR_mve_vmladavq_uv4si = 3370,
+ CODE_FOR_mve_vmladavq_sv4si = 3371,
+ CODE_FOR_mve_vmladavxq_sv16qi = 3372,
+ CODE_FOR_mve_vmladavxq_sv8hi = 3373,
+ CODE_FOR_mve_vmladavxq_sv4si = 3374,
+ CODE_FOR_mve_vmlsdavq_sv16qi = 3375,
+ CODE_FOR_mve_vmlsdavq_sv8hi = 3376,
+ CODE_FOR_mve_vmlsdavq_sv4si = 3377,
+ CODE_FOR_mve_vmlsdavxq_sv16qi = 3378,
+ CODE_FOR_mve_vmlsdavxq_sv8hi = 3379,
+ CODE_FOR_mve_vmlsdavxq_sv4si = 3380,
+ CODE_FOR_mve_vmulhq_sv16qi = 3381,
+ CODE_FOR_mve_vmulhq_uv16qi = 3382,
+ CODE_FOR_mve_vmulhq_sv8hi = 3383,
+ CODE_FOR_mve_vmulhq_uv8hi = 3384,
+ CODE_FOR_mve_vmulhq_sv4si = 3385,
+ CODE_FOR_mve_vmulhq_uv4si = 3386,
+ CODE_FOR_mve_vmullbq_int_uv16qi = 3387,
+ CODE_FOR_mve_vmullbq_int_sv16qi = 3388,
+ CODE_FOR_mve_vmullbq_int_uv8hi = 3389,
+ CODE_FOR_mve_vmullbq_int_sv8hi = 3390,
+ CODE_FOR_mve_vmullbq_int_uv4si = 3391,
+ CODE_FOR_mve_vmullbq_int_sv4si = 3392,
+ CODE_FOR_mve_vmulltq_int_uv16qi = 3393,
+ CODE_FOR_mve_vmulltq_int_sv16qi = 3394,
+ CODE_FOR_mve_vmulltq_int_uv8hi = 3395,
+ CODE_FOR_mve_vmulltq_int_sv8hi = 3396,
+ CODE_FOR_mve_vmulltq_int_uv4si = 3397,
+ CODE_FOR_mve_vmulltq_int_sv4si = 3398,
+ CODE_FOR_mve_vmulq_n_uv16qi = 3399,
+ CODE_FOR_mve_vmulq_n_sv16qi = 3400,
+ CODE_FOR_mve_vmulq_n_uv8hi = 3401,
+ CODE_FOR_mve_vmulq_n_sv8hi = 3402,
+ CODE_FOR_mve_vmulq_n_uv4si = 3403,
+ CODE_FOR_mve_vmulq_n_sv4si = 3404,
+ CODE_FOR_mve_vmulq_uv16qi = 3405,
+ CODE_FOR_mve_vmulq_sv16qi = 3406,
+ CODE_FOR_mve_vmulq_uv8hi = 3407,
+ CODE_FOR_mve_vmulq_sv8hi = 3408,
+ CODE_FOR_mve_vmulq_uv4si = 3409,
+ CODE_FOR_mve_vmulq_sv4si = 3410,
+ CODE_FOR_mve_vmulqv16qi = 3411,
+ CODE_FOR_mve_vmulqv8hi = 3412,
+ CODE_FOR_mve_vmulqv4si = 3413,
+ CODE_FOR_mve_vornq_sv16qi = 3414,
+ CODE_FOR_mve_vornq_sv8hi = 3415,
+ CODE_FOR_mve_vornq_sv4si = 3416,
+ CODE_FOR_mve_vorrq_sv16qi = 3417,
+ CODE_FOR_mve_vorrq_sv8hi = 3418,
+ CODE_FOR_mve_vorrq_sv4si = 3419,
+ CODE_FOR_mve_vqaddq_n_sv16qi = 3420,
+ CODE_FOR_mve_vqaddq_n_uv16qi = 3421,
+ CODE_FOR_mve_vqaddq_n_sv8hi = 3422,
+ CODE_FOR_mve_vqaddq_n_uv8hi = 3423,
+ CODE_FOR_mve_vqaddq_n_sv4si = 3424,
+ CODE_FOR_mve_vqaddq_n_uv4si = 3425,
+ CODE_FOR_mve_vqaddq_uv16qi = 3426,
+ CODE_FOR_mve_vqaddq_sv16qi = 3427,
+ CODE_FOR_mve_vqaddq_uv8hi = 3428,
+ CODE_FOR_mve_vqaddq_sv8hi = 3429,
+ CODE_FOR_mve_vqaddq_uv4si = 3430,
+ CODE_FOR_mve_vqaddq_sv4si = 3431,
+ CODE_FOR_mve_vqdmulhq_n_sv16qi = 3432,
+ CODE_FOR_mve_vqdmulhq_n_sv8hi = 3433,
+ CODE_FOR_mve_vqdmulhq_n_sv4si = 3434,
+ CODE_FOR_mve_vqdmulhq_sv16qi = 3435,
+ CODE_FOR_mve_vqdmulhq_sv8hi = 3436,
+ CODE_FOR_mve_vqdmulhq_sv4si = 3437,
+ CODE_FOR_mve_vqrdmulhq_n_sv16qi = 3438,
+ CODE_FOR_mve_vqrdmulhq_n_sv8hi = 3439,
+ CODE_FOR_mve_vqrdmulhq_n_sv4si = 3440,
+ CODE_FOR_mve_vqrdmulhq_sv16qi = 3441,
+ CODE_FOR_mve_vqrdmulhq_sv8hi = 3442,
+ CODE_FOR_mve_vqrdmulhq_sv4si = 3443,
+ CODE_FOR_mve_vqrshlq_n_sv16qi = 3444,
+ CODE_FOR_mve_vqrshlq_n_uv16qi = 3445,
+ CODE_FOR_mve_vqrshlq_n_sv8hi = 3446,
+ CODE_FOR_mve_vqrshlq_n_uv8hi = 3447,
+ CODE_FOR_mve_vqrshlq_n_sv4si = 3448,
+ CODE_FOR_mve_vqrshlq_n_uv4si = 3449,
+ CODE_FOR_mve_vqrshlq_sv16qi = 3450,
+ CODE_FOR_mve_vqrshlq_uv16qi = 3451,
+ CODE_FOR_mve_vqrshlq_sv8hi = 3452,
+ CODE_FOR_mve_vqrshlq_uv8hi = 3453,
+ CODE_FOR_mve_vqrshlq_sv4si = 3454,
+ CODE_FOR_mve_vqrshlq_uv4si = 3455,
+ CODE_FOR_mve_vqshlq_n_sv16qi = 3456,
+ CODE_FOR_mve_vqshlq_n_uv16qi = 3457,
+ CODE_FOR_mve_vqshlq_n_sv8hi = 3458,
+ CODE_FOR_mve_vqshlq_n_uv8hi = 3459,
+ CODE_FOR_mve_vqshlq_n_sv4si = 3460,
+ CODE_FOR_mve_vqshlq_n_uv4si = 3461,
+ CODE_FOR_mve_vqshlq_r_uv16qi = 3462,
+ CODE_FOR_mve_vqshlq_r_sv16qi = 3463,
+ CODE_FOR_mve_vqshlq_r_uv8hi = 3464,
+ CODE_FOR_mve_vqshlq_r_sv8hi = 3465,
+ CODE_FOR_mve_vqshlq_r_uv4si = 3466,
+ CODE_FOR_mve_vqshlq_r_sv4si = 3467,
+ CODE_FOR_mve_vqshlq_sv16qi = 3468,
+ CODE_FOR_mve_vqshlq_uv16qi = 3469,
+ CODE_FOR_mve_vqshlq_sv8hi = 3470,
+ CODE_FOR_mve_vqshlq_uv8hi = 3471,
+ CODE_FOR_mve_vqshlq_sv4si = 3472,
+ CODE_FOR_mve_vqshlq_uv4si = 3473,
+ CODE_FOR_mve_vqshluq_n_sv16qi = 3474,
+ CODE_FOR_mve_vqshluq_n_sv8hi = 3475,
+ CODE_FOR_mve_vqshluq_n_sv4si = 3476,
+ CODE_FOR_mve_vqsubq_n_sv16qi = 3477,
+ CODE_FOR_mve_vqsubq_n_uv16qi = 3478,
+ CODE_FOR_mve_vqsubq_n_sv8hi = 3479,
+ CODE_FOR_mve_vqsubq_n_uv8hi = 3480,
+ CODE_FOR_mve_vqsubq_n_sv4si = 3481,
+ CODE_FOR_mve_vqsubq_n_uv4si = 3482,
+ CODE_FOR_mve_vqsubq_uv16qi = 3483,
+ CODE_FOR_mve_vqsubq_sv16qi = 3484,
+ CODE_FOR_mve_vqsubq_uv8hi = 3485,
+ CODE_FOR_mve_vqsubq_sv8hi = 3486,
+ CODE_FOR_mve_vqsubq_uv4si = 3487,
+ CODE_FOR_mve_vqsubq_sv4si = 3488,
+ CODE_FOR_mve_vrhaddq_sv16qi = 3489,
+ CODE_FOR_mve_vrhaddq_uv16qi = 3490,
+ CODE_FOR_mve_vrhaddq_sv8hi = 3491,
+ CODE_FOR_mve_vrhaddq_uv8hi = 3492,
+ CODE_FOR_mve_vrhaddq_sv4si = 3493,
+ CODE_FOR_mve_vrhaddq_uv4si = 3494,
+ CODE_FOR_mve_vrmulhq_sv16qi = 3495,
+ CODE_FOR_mve_vrmulhq_uv16qi = 3496,
+ CODE_FOR_mve_vrmulhq_sv8hi = 3497,
+ CODE_FOR_mve_vrmulhq_uv8hi = 3498,
+ CODE_FOR_mve_vrmulhq_sv4si = 3499,
+ CODE_FOR_mve_vrmulhq_uv4si = 3500,
+ CODE_FOR_mve_vrshlq_n_uv16qi = 3501,
+ CODE_FOR_mve_vrshlq_n_sv16qi = 3502,
+ CODE_FOR_mve_vrshlq_n_uv8hi = 3503,
+ CODE_FOR_mve_vrshlq_n_sv8hi = 3504,
+ CODE_FOR_mve_vrshlq_n_uv4si = 3505,
+ CODE_FOR_mve_vrshlq_n_sv4si = 3506,
+ CODE_FOR_mve_vrshlq_sv16qi = 3507,
+ CODE_FOR_mve_vrshlq_uv16qi = 3508,
+ CODE_FOR_mve_vrshlq_sv8hi = 3509,
+ CODE_FOR_mve_vrshlq_uv8hi = 3510,
+ CODE_FOR_mve_vrshlq_sv4si = 3511,
+ CODE_FOR_mve_vrshlq_uv4si = 3512,
+ CODE_FOR_mve_vrshrq_n_sv16qi = 3513,
+ CODE_FOR_mve_vrshrq_n_uv16qi = 3514,
+ CODE_FOR_mve_vrshrq_n_sv8hi = 3515,
+ CODE_FOR_mve_vrshrq_n_uv8hi = 3516,
+ CODE_FOR_mve_vrshrq_n_sv4si = 3517,
+ CODE_FOR_mve_vrshrq_n_uv4si = 3518,
+ CODE_FOR_mve_vshlq_n_uv16qi = 3519,
+ CODE_FOR_mve_vshlq_n_sv16qi = 3520,
+ CODE_FOR_mve_vshlq_n_uv8hi = 3521,
+ CODE_FOR_mve_vshlq_n_sv8hi = 3522,
+ CODE_FOR_mve_vshlq_n_uv4si = 3523,
+ CODE_FOR_mve_vshlq_n_sv4si = 3524,
+ CODE_FOR_mve_vshlq_r_sv16qi = 3525,
+ CODE_FOR_mve_vshlq_r_uv16qi = 3526,
+ CODE_FOR_mve_vshlq_r_sv8hi = 3527,
+ CODE_FOR_mve_vshlq_r_uv8hi = 3528,
+ CODE_FOR_mve_vshlq_r_sv4si = 3529,
+ CODE_FOR_mve_vshlq_r_uv4si = 3530,
+ CODE_FOR_mve_vsubq_n_sv16qi = 3531,
+ CODE_FOR_mve_vsubq_n_uv16qi = 3532,
+ CODE_FOR_mve_vsubq_n_sv8hi = 3533,
+ CODE_FOR_mve_vsubq_n_uv8hi = 3534,
+ CODE_FOR_mve_vsubq_n_sv4si = 3535,
+ CODE_FOR_mve_vsubq_n_uv4si = 3536,
+ CODE_FOR_mve_vsubq_sv16qi = 3537,
+ CODE_FOR_mve_vsubq_uv16qi = 3538,
+ CODE_FOR_mve_vsubq_sv8hi = 3539,
+ CODE_FOR_mve_vsubq_uv8hi = 3540,
+ CODE_FOR_mve_vsubq_sv4si = 3541,
+ CODE_FOR_mve_vsubq_uv4si = 3542,
+ CODE_FOR_mve_vsubqv16qi = 3543,
+ CODE_FOR_mve_vsubqv8hi = 3544,
+ CODE_FOR_mve_vsubqv4si = 3545,
+ CODE_FOR_mve_vabdq_fv8hf = 3546,
+ CODE_FOR_mve_vabdq_fv4sf = 3547,
+ CODE_FOR_mve_vaddlvaq_sv4si = 3548,
+ CODE_FOR_mve_vaddlvaq_uv4si = 3549,
+ CODE_FOR_mve_vaddq_n_fv8hf = 3550,
+ CODE_FOR_mve_vaddq_n_fv4sf = 3551,
+ CODE_FOR_mve_vandq_fv8hf = 3552,
+ CODE_FOR_mve_vandq_fv4sf = 3553,
+ CODE_FOR_mve_vbicq_fv8hf = 3554,
+ CODE_FOR_mve_vbicq_fv4sf = 3555,
+ CODE_FOR_mve_vbicq_n_sv8hi = 3556,
+ CODE_FOR_mve_vbicq_n_uv8hi = 3557,
+ CODE_FOR_mve_vbicq_n_sv4si = 3558,
+ CODE_FOR_mve_vbicq_n_uv4si = 3559,
+ CODE_FOR_mve_vcaddq_rot90v8hf = 3560,
+ CODE_FOR_mve_vcaddq_rot270v8hf = 3561,
+ CODE_FOR_mve_vcaddq_rot90v4sf = 3562,
+ CODE_FOR_mve_vcaddq_rot270v4sf = 3563,
+ CODE_FOR_mve_vcmpeqq_fv8hf = 3564,
+ CODE_FOR_mve_vcmpgeq_fv8hf = 3565,
+ CODE_FOR_mve_vcmpgtq_fv8hf = 3566,
+ CODE_FOR_mve_vcmpleq_fv8hf = 3567,
+ CODE_FOR_mve_vcmpltq_fv8hf = 3568,
+ CODE_FOR_mve_vcmpneq_fv8hf = 3569,
+ CODE_FOR_mve_vcmpeqq_fv4sf = 3570,
+ CODE_FOR_mve_vcmpgeq_fv4sf = 3571,
+ CODE_FOR_mve_vcmpgtq_fv4sf = 3572,
+ CODE_FOR_mve_vcmpleq_fv4sf = 3573,
+ CODE_FOR_mve_vcmpltq_fv4sf = 3574,
+ CODE_FOR_mve_vcmpneq_fv4sf = 3575,
+ CODE_FOR_mve_vcmpeqq_n_fv8hf = 3576,
+ CODE_FOR_mve_vcmpgeq_n_fv8hf = 3577,
+ CODE_FOR_mve_vcmpgtq_n_fv8hf = 3578,
+ CODE_FOR_mve_vcmpleq_n_fv8hf = 3579,
+ CODE_FOR_mve_vcmpltq_n_fv8hf = 3580,
+ CODE_FOR_mve_vcmpneq_n_fv8hf = 3581,
+ CODE_FOR_mve_vcmpeqq_n_fv4sf = 3582,
+ CODE_FOR_mve_vcmpgeq_n_fv4sf = 3583,
+ CODE_FOR_mve_vcmpgtq_n_fv4sf = 3584,
+ CODE_FOR_mve_vcmpleq_n_fv4sf = 3585,
+ CODE_FOR_mve_vcmpltq_n_fv4sf = 3586,
+ CODE_FOR_mve_vcmpneq_n_fv4sf = 3587,
+ CODE_FOR_mve_vcmulqv8hf = 3588,
+ CODE_FOR_mve_vcmulq_rot90v8hf = 3589,
+ CODE_FOR_mve_vcmulq_rot180v8hf = 3590,
+ CODE_FOR_mve_vcmulq_rot270v8hf = 3591,
+ CODE_FOR_mve_vcmulqv4sf = 3592,
+ CODE_FOR_mve_vcmulq_rot90v4sf = 3593,
+ CODE_FOR_mve_vcmulq_rot180v4sf = 3594,
+ CODE_FOR_mve_vcmulq_rot270v4sf = 3595,
+ CODE_FOR_mve_vctp8q_mv16bi = 3596,
+ CODE_FOR_mve_vctp16q_mv8bi = 3597,
+ CODE_FOR_mve_vctp32q_mv4bi = 3598,
+ CODE_FOR_mve_vctp64q_mv2qi = 3599,
+ CODE_FOR_mve_vcvtbq_f16_f32v8hf = 3600,
+ CODE_FOR_mve_vcvttq_f16_f32v8hf = 3601,
+ CODE_FOR_mve_veorq_fv8hf = 3602,
+ CODE_FOR_mve_veorq_fv4sf = 3603,
+ CODE_FOR_mve_vmaxnmaq_fv8hf = 3604,
+ CODE_FOR_mve_vmaxnmaq_fv4sf = 3605,
+ CODE_FOR_mve_vmaxnmavq_fv8hf = 3606,
+ CODE_FOR_mve_vmaxnmavq_fv4sf = 3607,
+ CODE_FOR_mve_vmaxnmq_fv8hf = 3608,
+ CODE_FOR_mve_vmaxnmq_fv4sf = 3609,
+ CODE_FOR_mve_vmaxnmvq_fv8hf = 3610,
+ CODE_FOR_mve_vmaxnmvq_fv4sf = 3611,
+ CODE_FOR_mve_vminnmaq_fv8hf = 3612,
+ CODE_FOR_mve_vminnmaq_fv4sf = 3613,
+ CODE_FOR_mve_vminnmavq_fv8hf = 3614,
+ CODE_FOR_mve_vminnmavq_fv4sf = 3615,
+ CODE_FOR_mve_vminnmq_fv8hf = 3616,
+ CODE_FOR_mve_vminnmq_fv4sf = 3617,
+ CODE_FOR_mve_vminnmvq_fv8hf = 3618,
+ CODE_FOR_mve_vminnmvq_fv4sf = 3619,
+ CODE_FOR_mve_vmlaldavq_uv8hi = 3620,
+ CODE_FOR_mve_vmlaldavq_sv8hi = 3621,
+ CODE_FOR_mve_vmlaldavq_uv4si = 3622,
+ CODE_FOR_mve_vmlaldavq_sv4si = 3623,
+ CODE_FOR_mve_vmlaldavxq_sv8hi = 3624,
+ CODE_FOR_mve_vmlaldavxq_sv4si = 3625,
+ CODE_FOR_mve_vmlsldavq_sv8hi = 3626,
+ CODE_FOR_mve_vmlsldavq_sv4si = 3627,
+ CODE_FOR_mve_vmlsldavxq_sv8hi = 3628,
+ CODE_FOR_mve_vmlsldavxq_sv4si = 3629,
+ CODE_FOR_mve_vmovnbq_uv8hi = 3630,
+ CODE_FOR_mve_vmovnbq_sv8hi = 3631,
+ CODE_FOR_mve_vmovnbq_uv4si = 3632,
+ CODE_FOR_mve_vmovnbq_sv4si = 3633,
+ CODE_FOR_mve_vmovntq_sv8hi = 3634,
+ CODE_FOR_mve_vmovntq_uv8hi = 3635,
+ CODE_FOR_mve_vmovntq_sv4si = 3636,
+ CODE_FOR_mve_vmovntq_uv4si = 3637,
+ CODE_FOR_mve_vmulq_fv8hf = 3638,
+ CODE_FOR_mve_vmulq_fv4sf = 3639,
+ CODE_FOR_mve_vmulq_n_fv8hf = 3640,
+ CODE_FOR_mve_vmulq_n_fv4sf = 3641,
+ CODE_FOR_mve_vornq_fv8hf = 3642,
+ CODE_FOR_mve_vornq_fv4sf = 3643,
+ CODE_FOR_mve_vorrq_fv8hf = 3644,
+ CODE_FOR_mve_vorrq_fv4sf = 3645,
+ CODE_FOR_mve_vorrq_n_uv8hi = 3646,
+ CODE_FOR_mve_vorrq_n_sv8hi = 3647,
+ CODE_FOR_mve_vorrq_n_uv4si = 3648,
+ CODE_FOR_mve_vorrq_n_sv4si = 3649,
+ CODE_FOR_mve_vqdmullbq_n_sv8hi = 3650,
+ CODE_FOR_mve_vqdmullbq_n_sv4si = 3651,
+ CODE_FOR_mve_vqdmullbq_sv8hi = 3652,
+ CODE_FOR_mve_vqdmullbq_sv4si = 3653,
+ CODE_FOR_mve_vqdmulltq_n_sv8hi = 3654,
+ CODE_FOR_mve_vqdmulltq_n_sv4si = 3655,
+ CODE_FOR_mve_vqdmulltq_sv8hi = 3656,
+ CODE_FOR_mve_vqdmulltq_sv4si = 3657,
+ CODE_FOR_mve_vqmovnbq_uv8hi = 3658,
+ CODE_FOR_mve_vqmovnbq_sv8hi = 3659,
+ CODE_FOR_mve_vqmovnbq_uv4si = 3660,
+ CODE_FOR_mve_vqmovnbq_sv4si = 3661,
+ CODE_FOR_mve_vqmovntq_uv8hi = 3662,
+ CODE_FOR_mve_vqmovntq_sv8hi = 3663,
+ CODE_FOR_mve_vqmovntq_uv4si = 3664,
+ CODE_FOR_mve_vqmovntq_sv4si = 3665,
+ CODE_FOR_mve_vqmovunbq_sv8hi = 3666,
+ CODE_FOR_mve_vqmovunbq_sv4si = 3667,
+ CODE_FOR_mve_vqmovuntq_sv8hi = 3668,
+ CODE_FOR_mve_vqmovuntq_sv4si = 3669,
+ CODE_FOR_mve_vrmlaldavhxq_sv4si = 3670,
+ CODE_FOR_mve_vrmlsldavhq_sv4si = 3671,
+ CODE_FOR_mve_vrmlsldavhxq_sv4si = 3672,
+ CODE_FOR_mve_vshllbq_n_sv16qi = 3673,
+ CODE_FOR_mve_vshllbq_n_uv16qi = 3674,
+ CODE_FOR_mve_vshllbq_n_sv8hi = 3675,
+ CODE_FOR_mve_vshllbq_n_uv8hi = 3676,
+ CODE_FOR_mve_vshlltq_n_uv16qi = 3677,
+ CODE_FOR_mve_vshlltq_n_sv16qi = 3678,
+ CODE_FOR_mve_vshlltq_n_uv8hi = 3679,
+ CODE_FOR_mve_vshlltq_n_sv8hi = 3680,
+ CODE_FOR_mve_vsubq_fv8hf = 3681,
+ CODE_FOR_mve_vsubq_fv4sf = 3682,
+ CODE_FOR_mve_vmulltq_poly_pv16qi = 3683,
+ CODE_FOR_mve_vmulltq_poly_pv8hi = 3684,
+ CODE_FOR_mve_vmullbq_poly_pv16qi = 3685,
+ CODE_FOR_mve_vmullbq_poly_pv8hi = 3686,
+ CODE_FOR_mve_vrmlaldavhq_uv4si = 3687,
+ CODE_FOR_mve_vrmlaldavhq_sv4si = 3688,
+ CODE_FOR_mve_vbicq_m_n_sv8hi = 3689,
+ CODE_FOR_mve_vbicq_m_n_uv8hi = 3690,
+ CODE_FOR_mve_vbicq_m_n_sv4si = 3691,
+ CODE_FOR_mve_vbicq_m_n_uv4si = 3692,
+ CODE_FOR_mve_vcmpeqq_m_fv8hf = 3693,
+ CODE_FOR_mve_vcmpeqq_m_fv4sf = 3694,
+ CODE_FOR_mve_vcvtaq_m_sv8hi = 3695,
+ CODE_FOR_mve_vcvtaq_m_uv8hi = 3696,
+ CODE_FOR_mve_vcvtaq_m_sv4si = 3697,
+ CODE_FOR_mve_vcvtaq_m_uv4si = 3698,
+ CODE_FOR_mve_vcvtq_m_to_f_sv8hf = 3699,
+ CODE_FOR_mve_vcvtq_m_to_f_uv8hf = 3700,
+ CODE_FOR_mve_vcvtq_m_to_f_sv4sf = 3701,
+ CODE_FOR_mve_vcvtq_m_to_f_uv4sf = 3702,
+ CODE_FOR_mve_vqrshrnbq_n_uv8hi = 3703,
+ CODE_FOR_mve_vqrshrnbq_n_sv8hi = 3704,
+ CODE_FOR_mve_vqrshrnbq_n_uv4si = 3705,
+ CODE_FOR_mve_vqrshrnbq_n_sv4si = 3706,
+ CODE_FOR_mve_vqrshrunbq_n_sv8hi = 3707,
+ CODE_FOR_mve_vqrshrunbq_n_sv4si = 3708,
+ CODE_FOR_mve_vrmlaldavhaq_sv4si = 3709,
+ CODE_FOR_mve_vrmlaldavhaq_uv4si = 3710,
+ CODE_FOR_mve_vabavq_sv16qi = 3711,
+ CODE_FOR_mve_vabavq_uv16qi = 3712,
+ CODE_FOR_mve_vabavq_sv8hi = 3713,
+ CODE_FOR_mve_vabavq_uv8hi = 3714,
+ CODE_FOR_mve_vabavq_sv4si = 3715,
+ CODE_FOR_mve_vabavq_uv4si = 3716,
+ CODE_FOR_mve_vshlcq_sv16qi = 3717,
+ CODE_FOR_mve_vshlcq_uv16qi = 3718,
+ CODE_FOR_mve_vshlcq_sv8hi = 3719,
+ CODE_FOR_mve_vshlcq_uv8hi = 3720,
+ CODE_FOR_mve_vshlcq_sv4si = 3721,
+ CODE_FOR_mve_vshlcq_uv4si = 3722,
+ CODE_FOR_mve_vabsq_m_sv16qi = 3723,
+ CODE_FOR_mve_vabsq_m_sv8hi = 3724,
+ CODE_FOR_mve_vabsq_m_sv4si = 3725,
+ CODE_FOR_mve_vaddvaq_p_sv16qi = 3726,
+ CODE_FOR_mve_vaddvaq_p_uv16qi = 3727,
+ CODE_FOR_mve_vaddvaq_p_sv8hi = 3728,
+ CODE_FOR_mve_vaddvaq_p_uv8hi = 3729,
+ CODE_FOR_mve_vaddvaq_p_sv4si = 3730,
+ CODE_FOR_mve_vaddvaq_p_uv4si = 3731,
+ CODE_FOR_mve_vclsq_m_sv16qi = 3732,
+ CODE_FOR_mve_vclsq_m_sv8hi = 3733,
+ CODE_FOR_mve_vclsq_m_sv4si = 3734,
+ CODE_FOR_mve_vclzq_m_sv16qi = 3735,
+ CODE_FOR_mve_vclzq_m_uv16qi = 3736,
+ CODE_FOR_mve_vclzq_m_sv8hi = 3737,
+ CODE_FOR_mve_vclzq_m_uv8hi = 3738,
+ CODE_FOR_mve_vclzq_m_sv4si = 3739,
+ CODE_FOR_mve_vclzq_m_uv4si = 3740,
+ CODE_FOR_mve_vcmpcsq_m_n_uv16qi = 3741,
+ CODE_FOR_mve_vcmpcsq_m_n_uv8hi = 3742,
+ CODE_FOR_mve_vcmpcsq_m_n_uv4si = 3743,
+ CODE_FOR_mve_vcmpcsq_m_uv16qi = 3744,
+ CODE_FOR_mve_vcmpcsq_m_uv8hi = 3745,
+ CODE_FOR_mve_vcmpcsq_m_uv4si = 3746,
+ CODE_FOR_mve_vcmpeqq_m_n_sv16qi = 3747,
+ CODE_FOR_mve_vcmpeqq_m_n_uv16qi = 3748,
+ CODE_FOR_mve_vcmpeqq_m_n_sv8hi = 3749,
+ CODE_FOR_mve_vcmpeqq_m_n_uv8hi = 3750,
+ CODE_FOR_mve_vcmpeqq_m_n_sv4si = 3751,
+ CODE_FOR_mve_vcmpeqq_m_n_uv4si = 3752,
+ CODE_FOR_mve_vcmpeqq_m_sv16qi = 3753,
+ CODE_FOR_mve_vcmpeqq_m_uv16qi = 3754,
+ CODE_FOR_mve_vcmpeqq_m_sv8hi = 3755,
+ CODE_FOR_mve_vcmpeqq_m_uv8hi = 3756,
+ CODE_FOR_mve_vcmpeqq_m_sv4si = 3757,
+ CODE_FOR_mve_vcmpeqq_m_uv4si = 3758,
+ CODE_FOR_mve_vcmpgeq_m_n_sv16qi = 3759,
+ CODE_FOR_mve_vcmpgeq_m_n_sv8hi = 3760,
+ CODE_FOR_mve_vcmpgeq_m_n_sv4si = 3761,
+ CODE_FOR_mve_vcmpgeq_m_sv16qi = 3762,
+ CODE_FOR_mve_vcmpgeq_m_sv8hi = 3763,
+ CODE_FOR_mve_vcmpgeq_m_sv4si = 3764,
+ CODE_FOR_mve_vcmpgtq_m_n_sv16qi = 3765,
+ CODE_FOR_mve_vcmpgtq_m_n_sv8hi = 3766,
+ CODE_FOR_mve_vcmpgtq_m_n_sv4si = 3767,
+ CODE_FOR_mve_vcmpgtq_m_sv16qi = 3768,
+ CODE_FOR_mve_vcmpgtq_m_sv8hi = 3769,
+ CODE_FOR_mve_vcmpgtq_m_sv4si = 3770,
+ CODE_FOR_mve_vcmphiq_m_n_uv16qi = 3771,
+ CODE_FOR_mve_vcmphiq_m_n_uv8hi = 3772,
+ CODE_FOR_mve_vcmphiq_m_n_uv4si = 3773,
+ CODE_FOR_mve_vcmphiq_m_uv16qi = 3774,
+ CODE_FOR_mve_vcmphiq_m_uv8hi = 3775,
+ CODE_FOR_mve_vcmphiq_m_uv4si = 3776,
+ CODE_FOR_mve_vcmpleq_m_n_sv16qi = 3777,
+ CODE_FOR_mve_vcmpleq_m_n_sv8hi = 3778,
+ CODE_FOR_mve_vcmpleq_m_n_sv4si = 3779,
+ CODE_FOR_mve_vcmpleq_m_sv16qi = 3780,
+ CODE_FOR_mve_vcmpleq_m_sv8hi = 3781,
+ CODE_FOR_mve_vcmpleq_m_sv4si = 3782,
+ CODE_FOR_mve_vcmpltq_m_n_sv16qi = 3783,
+ CODE_FOR_mve_vcmpltq_m_n_sv8hi = 3784,
+ CODE_FOR_mve_vcmpltq_m_n_sv4si = 3785,
+ CODE_FOR_mve_vcmpltq_m_sv16qi = 3786,
+ CODE_FOR_mve_vcmpltq_m_sv8hi = 3787,
+ CODE_FOR_mve_vcmpltq_m_sv4si = 3788,
+ CODE_FOR_mve_vcmpneq_m_n_sv16qi = 3789,
+ CODE_FOR_mve_vcmpneq_m_n_uv16qi = 3790,
+ CODE_FOR_mve_vcmpneq_m_n_sv8hi = 3791,
+ CODE_FOR_mve_vcmpneq_m_n_uv8hi = 3792,
+ CODE_FOR_mve_vcmpneq_m_n_sv4si = 3793,
+ CODE_FOR_mve_vcmpneq_m_n_uv4si = 3794,
+ CODE_FOR_mve_vcmpneq_m_sv16qi = 3795,
+ CODE_FOR_mve_vcmpneq_m_uv16qi = 3796,
+ CODE_FOR_mve_vcmpneq_m_sv8hi = 3797,
+ CODE_FOR_mve_vcmpneq_m_uv8hi = 3798,
+ CODE_FOR_mve_vcmpneq_m_sv4si = 3799,
+ CODE_FOR_mve_vcmpneq_m_uv4si = 3800,
+ CODE_FOR_mve_vdupq_m_n_sv16qi = 3801,
+ CODE_FOR_mve_vdupq_m_n_uv16qi = 3802,
+ CODE_FOR_mve_vdupq_m_n_sv8hi = 3803,
+ CODE_FOR_mve_vdupq_m_n_uv8hi = 3804,
+ CODE_FOR_mve_vdupq_m_n_sv4si = 3805,
+ CODE_FOR_mve_vdupq_m_n_uv4si = 3806,
+ CODE_FOR_mve_vmaxaq_m_sv16qi = 3807,
+ CODE_FOR_mve_vmaxaq_m_sv8hi = 3808,
+ CODE_FOR_mve_vmaxaq_m_sv4si = 3809,
+ CODE_FOR_mve_vmaxavq_p_sv16qi = 3810,
+ CODE_FOR_mve_vmaxavq_p_sv8hi = 3811,
+ CODE_FOR_mve_vmaxavq_p_sv4si = 3812,
+ CODE_FOR_mve_vmaxvq_p_sv16qi = 3813,
+ CODE_FOR_mve_vmaxvq_p_uv16qi = 3814,
+ CODE_FOR_mve_vmaxvq_p_sv8hi = 3815,
+ CODE_FOR_mve_vmaxvq_p_uv8hi = 3816,
+ CODE_FOR_mve_vmaxvq_p_sv4si = 3817,
+ CODE_FOR_mve_vmaxvq_p_uv4si = 3818,
+ CODE_FOR_mve_vminaq_m_sv16qi = 3819,
+ CODE_FOR_mve_vminaq_m_sv8hi = 3820,
+ CODE_FOR_mve_vminaq_m_sv4si = 3821,
+ CODE_FOR_mve_vminavq_p_sv16qi = 3822,
+ CODE_FOR_mve_vminavq_p_sv8hi = 3823,
+ CODE_FOR_mve_vminavq_p_sv4si = 3824,
+ CODE_FOR_mve_vminvq_p_sv16qi = 3825,
+ CODE_FOR_mve_vminvq_p_uv16qi = 3826,
+ CODE_FOR_mve_vminvq_p_sv8hi = 3827,
+ CODE_FOR_mve_vminvq_p_uv8hi = 3828,
+ CODE_FOR_mve_vminvq_p_sv4si = 3829,
+ CODE_FOR_mve_vminvq_p_uv4si = 3830,
+ CODE_FOR_mve_vmladavaq_sv16qi = 3831,
+ CODE_FOR_mve_vmladavaq_uv16qi = 3832,
+ CODE_FOR_mve_vmladavaq_sv8hi = 3833,
+ CODE_FOR_mve_vmladavaq_uv8hi = 3834,
+ CODE_FOR_mve_vmladavaq_sv4si = 3835,
+ CODE_FOR_mve_vmladavaq_uv4si = 3836,
+ CODE_FOR_mve_vmladavq_p_sv16qi = 3837,
+ CODE_FOR_mve_vmladavq_p_uv16qi = 3838,
+ CODE_FOR_mve_vmladavq_p_sv8hi = 3839,
+ CODE_FOR_mve_vmladavq_p_uv8hi = 3840,
+ CODE_FOR_mve_vmladavq_p_sv4si = 3841,
+ CODE_FOR_mve_vmladavq_p_uv4si = 3842,
+ CODE_FOR_mve_vmladavxq_p_sv16qi = 3843,
+ CODE_FOR_mve_vmladavxq_p_sv8hi = 3844,
+ CODE_FOR_mve_vmladavxq_p_sv4si = 3845,
+ CODE_FOR_mve_vmlaq_n_sv16qi = 3846,
+ CODE_FOR_mve_vmlaq_n_uv16qi = 3847,
+ CODE_FOR_mve_vmlaq_n_sv8hi = 3848,
+ CODE_FOR_mve_vmlaq_n_uv8hi = 3849,
+ CODE_FOR_mve_vmlaq_n_sv4si = 3850,
+ CODE_FOR_mve_vmlaq_n_uv4si = 3851,
+ CODE_FOR_mve_vmlasq_n_sv16qi = 3852,
+ CODE_FOR_mve_vmlasq_n_uv16qi = 3853,
+ CODE_FOR_mve_vmlasq_n_sv8hi = 3854,
+ CODE_FOR_mve_vmlasq_n_uv8hi = 3855,
+ CODE_FOR_mve_vmlasq_n_sv4si = 3856,
+ CODE_FOR_mve_vmlasq_n_uv4si = 3857,
+ CODE_FOR_mve_vmlsdavq_p_sv16qi = 3858,
+ CODE_FOR_mve_vmlsdavq_p_sv8hi = 3859,
+ CODE_FOR_mve_vmlsdavq_p_sv4si = 3860,
+ CODE_FOR_mve_vmlsdavxq_p_sv16qi = 3861,
+ CODE_FOR_mve_vmlsdavxq_p_sv8hi = 3862,
+ CODE_FOR_mve_vmlsdavxq_p_sv4si = 3863,
+ CODE_FOR_mve_vmvnq_m_sv16qi = 3864,
+ CODE_FOR_mve_vmvnq_m_uv16qi = 3865,
+ CODE_FOR_mve_vmvnq_m_sv8hi = 3866,
+ CODE_FOR_mve_vmvnq_m_uv8hi = 3867,
+ CODE_FOR_mve_vmvnq_m_sv4si = 3868,
+ CODE_FOR_mve_vmvnq_m_uv4si = 3869,
+ CODE_FOR_mve_vnegq_m_sv16qi = 3870,
+ CODE_FOR_mve_vnegq_m_sv8hi = 3871,
+ CODE_FOR_mve_vnegq_m_sv4si = 3872,
+ CODE_FOR_mve_vpselq_sv16qi = 3873,
+ CODE_FOR_mve_vpselq_uv16qi = 3874,
+ CODE_FOR_mve_vpselq_sv8hi = 3875,
+ CODE_FOR_mve_vpselq_uv8hi = 3876,
+ CODE_FOR_mve_vpselq_sv4si = 3877,
+ CODE_FOR_mve_vpselq_uv4si = 3878,
+ CODE_FOR_mve_vpselq_sv2di = 3879,
+ CODE_FOR_mve_vpselq_uv2di = 3880,
+ CODE_FOR_mve_vqabsq_m_sv16qi = 3881,
+ CODE_FOR_mve_vqabsq_m_sv8hi = 3882,
+ CODE_FOR_mve_vqabsq_m_sv4si = 3883,
+ CODE_FOR_mve_vqdmlahq_n_sv16qi = 3884,
+ CODE_FOR_mve_vqdmlahq_n_sv8hi = 3885,
+ CODE_FOR_mve_vqdmlahq_n_sv4si = 3886,
+ CODE_FOR_mve_vqdmlashq_n_sv16qi = 3887,
+ CODE_FOR_mve_vqdmlashq_n_sv8hi = 3888,
+ CODE_FOR_mve_vqdmlashq_n_sv4si = 3889,
+ CODE_FOR_mve_vqnegq_m_sv16qi = 3890,
+ CODE_FOR_mve_vqnegq_m_sv8hi = 3891,
+ CODE_FOR_mve_vqnegq_m_sv4si = 3892,
+ CODE_FOR_mve_vqrdmladhq_sv16qi = 3893,
+ CODE_FOR_mve_vqrdmladhq_sv8hi = 3894,
+ CODE_FOR_mve_vqrdmladhq_sv4si = 3895,
+ CODE_FOR_mve_vqrdmladhxq_sv16qi = 3896,
+ CODE_FOR_mve_vqrdmladhxq_sv8hi = 3897,
+ CODE_FOR_mve_vqrdmladhxq_sv4si = 3898,
+ CODE_FOR_mve_vqrdmlahq_n_sv16qi = 3899,
+ CODE_FOR_mve_vqrdmlahq_n_sv8hi = 3900,
+ CODE_FOR_mve_vqrdmlahq_n_sv4si = 3901,
+ CODE_FOR_mve_vqrdmlashq_n_sv16qi = 3902,
+ CODE_FOR_mve_vqrdmlashq_n_sv8hi = 3903,
+ CODE_FOR_mve_vqrdmlashq_n_sv4si = 3904,
+ CODE_FOR_mve_vqrdmlsdhq_sv16qi = 3905,
+ CODE_FOR_mve_vqrdmlsdhq_sv8hi = 3906,
+ CODE_FOR_mve_vqrdmlsdhq_sv4si = 3907,
+ CODE_FOR_mve_vqrdmlsdhxq_sv16qi = 3908,
+ CODE_FOR_mve_vqrdmlsdhxq_sv8hi = 3909,
+ CODE_FOR_mve_vqrdmlsdhxq_sv4si = 3910,
+ CODE_FOR_mve_vqrshlq_m_n_sv16qi = 3911,
+ CODE_FOR_mve_vqrshlq_m_n_uv16qi = 3912,
+ CODE_FOR_mve_vqrshlq_m_n_sv8hi = 3913,
+ CODE_FOR_mve_vqrshlq_m_n_uv8hi = 3914,
+ CODE_FOR_mve_vqrshlq_m_n_sv4si = 3915,
+ CODE_FOR_mve_vqrshlq_m_n_uv4si = 3916,
+ CODE_FOR_mve_vqshlq_m_r_sv16qi = 3917,
+ CODE_FOR_mve_vqshlq_m_r_uv16qi = 3918,
+ CODE_FOR_mve_vqshlq_m_r_sv8hi = 3919,
+ CODE_FOR_mve_vqshlq_m_r_uv8hi = 3920,
+ CODE_FOR_mve_vqshlq_m_r_sv4si = 3921,
+ CODE_FOR_mve_vqshlq_m_r_uv4si = 3922,
+ CODE_FOR_mve_vrev64q_m_sv16qi = 3923,
+ CODE_FOR_mve_vrev64q_m_uv16qi = 3924,
+ CODE_FOR_mve_vrev64q_m_sv8hi = 3925,
+ CODE_FOR_mve_vrev64q_m_uv8hi = 3926,
+ CODE_FOR_mve_vrev64q_m_sv4si = 3927,
+ CODE_FOR_mve_vrev64q_m_uv4si = 3928,
+ CODE_FOR_mve_vrshlq_m_n_sv16qi = 3929,
+ CODE_FOR_mve_vrshlq_m_n_uv16qi = 3930,
+ CODE_FOR_mve_vrshlq_m_n_sv8hi = 3931,
+ CODE_FOR_mve_vrshlq_m_n_uv8hi = 3932,
+ CODE_FOR_mve_vrshlq_m_n_sv4si = 3933,
+ CODE_FOR_mve_vrshlq_m_n_uv4si = 3934,
+ CODE_FOR_mve_vshlq_m_r_sv16qi = 3935,
+ CODE_FOR_mve_vshlq_m_r_uv16qi = 3936,
+ CODE_FOR_mve_vshlq_m_r_sv8hi = 3937,
+ CODE_FOR_mve_vshlq_m_r_uv8hi = 3938,
+ CODE_FOR_mve_vshlq_m_r_sv4si = 3939,
+ CODE_FOR_mve_vshlq_m_r_uv4si = 3940,
+ CODE_FOR_mve_vsliq_n_sv16qi = 3941,
+ CODE_FOR_mve_vsliq_n_uv16qi = 3942,
+ CODE_FOR_mve_vsliq_n_sv8hi = 3943,
+ CODE_FOR_mve_vsliq_n_uv8hi = 3944,
+ CODE_FOR_mve_vsliq_n_sv4si = 3945,
+ CODE_FOR_mve_vsliq_n_uv4si = 3946,
+ CODE_FOR_mve_vsriq_n_sv16qi = 3947,
+ CODE_FOR_mve_vsriq_n_uv16qi = 3948,
+ CODE_FOR_mve_vsriq_n_sv8hi = 3949,
+ CODE_FOR_mve_vsriq_n_uv8hi = 3950,
+ CODE_FOR_mve_vsriq_n_sv4si = 3951,
+ CODE_FOR_mve_vsriq_n_uv4si = 3952,
+ CODE_FOR_mve_vqdmlsdhxq_sv16qi = 3953,
+ CODE_FOR_mve_vqdmlsdhxq_sv8hi = 3954,
+ CODE_FOR_mve_vqdmlsdhxq_sv4si = 3955,
+ CODE_FOR_mve_vqdmlsdhq_sv16qi = 3956,
+ CODE_FOR_mve_vqdmlsdhq_sv8hi = 3957,
+ CODE_FOR_mve_vqdmlsdhq_sv4si = 3958,
+ CODE_FOR_mve_vqdmladhxq_sv16qi = 3959,
+ CODE_FOR_mve_vqdmladhxq_sv8hi = 3960,
+ CODE_FOR_mve_vqdmladhxq_sv4si = 3961,
+ CODE_FOR_mve_vqdmladhq_sv16qi = 3962,
+ CODE_FOR_mve_vqdmladhq_sv8hi = 3963,
+ CODE_FOR_mve_vqdmladhq_sv4si = 3964,
+ CODE_FOR_mve_vmlsdavaxq_sv16qi = 3965,
+ CODE_FOR_mve_vmlsdavaxq_sv8hi = 3966,
+ CODE_FOR_mve_vmlsdavaxq_sv4si = 3967,
+ CODE_FOR_mve_vmlsdavaq_sv16qi = 3968,
+ CODE_FOR_mve_vmlsdavaq_sv8hi = 3969,
+ CODE_FOR_mve_vmlsdavaq_sv4si = 3970,
+ CODE_FOR_mve_vmladavaxq_sv16qi = 3971,
+ CODE_FOR_mve_vmladavaxq_sv8hi = 3972,
+ CODE_FOR_mve_vmladavaxq_sv4si = 3973,
+ CODE_FOR_mve_vabsq_m_fv8hf = 3974,
+ CODE_FOR_mve_vabsq_m_fv4sf = 3975,
+ CODE_FOR_mve_vaddlvaq_p_uv4si = 3976,
+ CODE_FOR_mve_vaddlvaq_p_sv4si = 3977,
+ CODE_FOR_mve_vcmlaqv8hf = 3978,
+ CODE_FOR_mve_vcmlaq_rot90v8hf = 3979,
+ CODE_FOR_mve_vcmlaq_rot180v8hf = 3980,
+ CODE_FOR_mve_vcmlaq_rot270v8hf = 3981,
+ CODE_FOR_mve_vcmlaqv4sf = 3982,
+ CODE_FOR_mve_vcmlaq_rot90v4sf = 3983,
+ CODE_FOR_mve_vcmlaq_rot180v4sf = 3984,
+ CODE_FOR_mve_vcmlaq_rot270v4sf = 3985,
+ CODE_FOR_mve_vcmpeqq_m_n_fv8hf = 3986,
+ CODE_FOR_mve_vcmpeqq_m_n_fv4sf = 3987,
+ CODE_FOR_mve_vcmpgeq_m_fv8hf = 3988,
+ CODE_FOR_mve_vcmpgeq_m_fv4sf = 3989,
+ CODE_FOR_mve_vcmpgeq_m_n_fv8hf = 3990,
+ CODE_FOR_mve_vcmpgeq_m_n_fv4sf = 3991,
+ CODE_FOR_mve_vcmpgtq_m_fv8hf = 3992,
+ CODE_FOR_mve_vcmpgtq_m_fv4sf = 3993,
+ CODE_FOR_mve_vcmpgtq_m_n_fv8hf = 3994,
+ CODE_FOR_mve_vcmpgtq_m_n_fv4sf = 3995,
+ CODE_FOR_mve_vcmpleq_m_fv8hf = 3996,
+ CODE_FOR_mve_vcmpleq_m_fv4sf = 3997,
+ CODE_FOR_mve_vcmpleq_m_n_fv8hf = 3998,
+ CODE_FOR_mve_vcmpleq_m_n_fv4sf = 3999,
+ CODE_FOR_mve_vcmpltq_m_fv8hf = 4000,
+ CODE_FOR_mve_vcmpltq_m_fv4sf = 4001,
+ CODE_FOR_mve_vcmpltq_m_n_fv8hf = 4002,
+ CODE_FOR_mve_vcmpltq_m_n_fv4sf = 4003,
+ CODE_FOR_mve_vcmpneq_m_fv8hf = 4004,
+ CODE_FOR_mve_vcmpneq_m_fv4sf = 4005,
+ CODE_FOR_mve_vcmpneq_m_n_fv8hf = 4006,
+ CODE_FOR_mve_vcmpneq_m_n_fv4sf = 4007,
+ CODE_FOR_mve_vcvtbq_m_f16_f32v8hf = 4008,
+ CODE_FOR_mve_vcvtbq_m_f32_f16v4sf = 4009,
+ CODE_FOR_mve_vcvttq_m_f16_f32v8hf = 4010,
+ CODE_FOR_mve_vcvttq_m_f32_f16v4sf = 4011,
+ CODE_FOR_mve_vdupq_m_n_fv8hf = 4012,
+ CODE_FOR_mve_vdupq_m_n_fv4sf = 4013,
+ CODE_FOR_mve_vfmaq_fv8hf = 4014,
+ CODE_FOR_mve_vfmaq_fv4sf = 4015,
+ CODE_FOR_mve_vfmaq_n_fv8hf = 4016,
+ CODE_FOR_mve_vfmaq_n_fv4sf = 4017,
+ CODE_FOR_mve_vfmasq_n_fv8hf = 4018,
+ CODE_FOR_mve_vfmasq_n_fv4sf = 4019,
+ CODE_FOR_mve_vfmsq_fv8hf = 4020,
+ CODE_FOR_mve_vfmsq_fv4sf = 4021,
+ CODE_FOR_mve_vmaxnmaq_m_fv8hf = 4022,
+ CODE_FOR_mve_vmaxnmaq_m_fv4sf = 4023,
+ CODE_FOR_mve_vmaxnmavq_p_fv8hf = 4024,
+ CODE_FOR_mve_vmaxnmavq_p_fv4sf = 4025,
+ CODE_FOR_mve_vmaxnmvq_p_fv8hf = 4026,
+ CODE_FOR_mve_vmaxnmvq_p_fv4sf = 4027,
+ CODE_FOR_mve_vminnmaq_m_fv8hf = 4028,
+ CODE_FOR_mve_vminnmaq_m_fv4sf = 4029,
+ CODE_FOR_mve_vminnmavq_p_fv8hf = 4030,
+ CODE_FOR_mve_vminnmavq_p_fv4sf = 4031,
+ CODE_FOR_mve_vminnmvq_p_fv8hf = 4032,
+ CODE_FOR_mve_vminnmvq_p_fv4sf = 4033,
+ CODE_FOR_mve_vmlaldavaq_sv8hi = 4034,
+ CODE_FOR_mve_vmlaldavaq_uv8hi = 4035,
+ CODE_FOR_mve_vmlaldavaq_sv4si = 4036,
+ CODE_FOR_mve_vmlaldavaq_uv4si = 4037,
+ CODE_FOR_mve_vmlaldavaxq_sv8hi = 4038,
+ CODE_FOR_mve_vmlaldavaxq_sv4si = 4039,
+ CODE_FOR_mve_vmlaldavq_p_uv8hi = 4040,
+ CODE_FOR_mve_vmlaldavq_p_sv8hi = 4041,
+ CODE_FOR_mve_vmlaldavq_p_uv4si = 4042,
+ CODE_FOR_mve_vmlaldavq_p_sv4si = 4043,
+ CODE_FOR_mve_vmlaldavxq_p_sv8hi = 4044,
+ CODE_FOR_mve_vmlaldavxq_p_sv4si = 4045,
+ CODE_FOR_mve_vmlsldavaq_sv8hi = 4046,
+ CODE_FOR_mve_vmlsldavaq_sv4si = 4047,
+ CODE_FOR_mve_vmlsldavaxq_sv8hi = 4048,
+ CODE_FOR_mve_vmlsldavaxq_sv4si = 4049,
+ CODE_FOR_mve_vmlsldavq_p_sv8hi = 4050,
+ CODE_FOR_mve_vmlsldavq_p_sv4si = 4051,
+ CODE_FOR_mve_vmlsldavxq_p_sv8hi = 4052,
+ CODE_FOR_mve_vmlsldavxq_p_sv4si = 4053,
+ CODE_FOR_mve_vmovlbq_m_uv16qi = 4054,
+ CODE_FOR_mve_vmovlbq_m_sv16qi = 4055,
+ CODE_FOR_mve_vmovlbq_m_uv8hi = 4056,
+ CODE_FOR_mve_vmovlbq_m_sv8hi = 4057,
+ CODE_FOR_mve_vmovltq_m_uv16qi = 4058,
+ CODE_FOR_mve_vmovltq_m_sv16qi = 4059,
+ CODE_FOR_mve_vmovltq_m_uv8hi = 4060,
+ CODE_FOR_mve_vmovltq_m_sv8hi = 4061,
+ CODE_FOR_mve_vmovnbq_m_uv8hi = 4062,
+ CODE_FOR_mve_vmovnbq_m_sv8hi = 4063,
+ CODE_FOR_mve_vmovnbq_m_uv4si = 4064,
+ CODE_FOR_mve_vmovnbq_m_sv4si = 4065,
+ CODE_FOR_mve_vmovntq_m_uv8hi = 4066,
+ CODE_FOR_mve_vmovntq_m_sv8hi = 4067,
+ CODE_FOR_mve_vmovntq_m_uv4si = 4068,
+ CODE_FOR_mve_vmovntq_m_sv4si = 4069,
+ CODE_FOR_mve_vmvnq_m_n_uv8hi = 4070,
+ CODE_FOR_mve_vmvnq_m_n_sv8hi = 4071,
+ CODE_FOR_mve_vmvnq_m_n_uv4si = 4072,
+ CODE_FOR_mve_vmvnq_m_n_sv4si = 4073,
+ CODE_FOR_mve_vnegq_m_fv8hf = 4074,
+ CODE_FOR_mve_vnegq_m_fv4sf = 4075,
+ CODE_FOR_mve_vorrq_m_n_sv8hi = 4076,
+ CODE_FOR_mve_vorrq_m_n_uv8hi = 4077,
+ CODE_FOR_mve_vorrq_m_n_sv4si = 4078,
+ CODE_FOR_mve_vorrq_m_n_uv4si = 4079,
+ CODE_FOR_mve_vpselq_fv8hf = 4080,
+ CODE_FOR_mve_vpselq_fv4sf = 4081,
+ CODE_FOR_mve_vqmovnbq_m_sv8hi = 4082,
+ CODE_FOR_mve_vqmovnbq_m_uv8hi = 4083,
+ CODE_FOR_mve_vqmovnbq_m_sv4si = 4084,
+ CODE_FOR_mve_vqmovnbq_m_uv4si = 4085,
+ CODE_FOR_mve_vqmovntq_m_uv8hi = 4086,
+ CODE_FOR_mve_vqmovntq_m_sv8hi = 4087,
+ CODE_FOR_mve_vqmovntq_m_uv4si = 4088,
+ CODE_FOR_mve_vqmovntq_m_sv4si = 4089,
+ CODE_FOR_mve_vqmovunbq_m_sv8hi = 4090,
+ CODE_FOR_mve_vqmovunbq_m_sv4si = 4091,
+ CODE_FOR_mve_vqmovuntq_m_sv8hi = 4092,
+ CODE_FOR_mve_vqmovuntq_m_sv4si = 4093,
+ CODE_FOR_mve_vqrshrntq_n_uv8hi = 4094,
+ CODE_FOR_mve_vqrshrntq_n_sv8hi = 4095,
+ CODE_FOR_mve_vqrshrntq_n_uv4si = 4096,
+ CODE_FOR_mve_vqrshrntq_n_sv4si = 4097,
+ CODE_FOR_mve_vqrshruntq_n_sv8hi = 4098,
+ CODE_FOR_mve_vqrshruntq_n_sv4si = 4099,
+ CODE_FOR_mve_vqshrnbq_n_uv8hi = 4100,
+ CODE_FOR_mve_vqshrnbq_n_sv8hi = 4101,
+ CODE_FOR_mve_vqshrnbq_n_uv4si = 4102,
+ CODE_FOR_mve_vqshrnbq_n_sv4si = 4103,
+ CODE_FOR_mve_vqshrntq_n_uv8hi = 4104,
+ CODE_FOR_mve_vqshrntq_n_sv8hi = 4105,
+ CODE_FOR_mve_vqshrntq_n_uv4si = 4106,
+ CODE_FOR_mve_vqshrntq_n_sv4si = 4107,
+ CODE_FOR_mve_vqshrunbq_n_sv8hi = 4108,
+ CODE_FOR_mve_vqshrunbq_n_sv4si = 4109,
+ CODE_FOR_mve_vqshruntq_n_sv8hi = 4110,
+ CODE_FOR_mve_vqshruntq_n_sv4si = 4111,
+ CODE_FOR_mve_vrev32q_m_fv8hf = 4112,
+ CODE_FOR_mve_vrev32q_m_sv16qi = 4113,
+ CODE_FOR_mve_vrev32q_m_uv16qi = 4114,
+ CODE_FOR_mve_vrev32q_m_sv8hi = 4115,
+ CODE_FOR_mve_vrev32q_m_uv8hi = 4116,
+ CODE_FOR_mve_vrev64q_m_fv8hf = 4117,
+ CODE_FOR_mve_vrev64q_m_fv4sf = 4118,
+ CODE_FOR_mve_vrmlaldavhaxq_sv4si = 4119,
+ CODE_FOR_mve_vrmlaldavhxq_p_sv4si = 4120,
+ CODE_FOR_mve_vrmlsldavhaxq_sv4si = 4121,
+ CODE_FOR_mve_vrmlsldavhq_p_sv4si = 4122,
+ CODE_FOR_mve_vrmlsldavhxq_p_sv4si = 4123,
+ CODE_FOR_mve_vrndaq_m_fv8hf = 4124,
+ CODE_FOR_mve_vrndaq_m_fv4sf = 4125,
+ CODE_FOR_mve_vrndmq_m_fv8hf = 4126,
+ CODE_FOR_mve_vrndmq_m_fv4sf = 4127,
+ CODE_FOR_mve_vrndnq_m_fv8hf = 4128,
+ CODE_FOR_mve_vrndnq_m_fv4sf = 4129,
+ CODE_FOR_mve_vrndpq_m_fv8hf = 4130,
+ CODE_FOR_mve_vrndpq_m_fv4sf = 4131,
+ CODE_FOR_mve_vrndxq_m_fv8hf = 4132,
+ CODE_FOR_mve_vrndxq_m_fv4sf = 4133,
+ CODE_FOR_mve_vrshrnbq_n_sv8hi = 4134,
+ CODE_FOR_mve_vrshrnbq_n_uv8hi = 4135,
+ CODE_FOR_mve_vrshrnbq_n_sv4si = 4136,
+ CODE_FOR_mve_vrshrnbq_n_uv4si = 4137,
+ CODE_FOR_mve_vrshrntq_n_uv8hi = 4138,
+ CODE_FOR_mve_vrshrntq_n_sv8hi = 4139,
+ CODE_FOR_mve_vrshrntq_n_uv4si = 4140,
+ CODE_FOR_mve_vrshrntq_n_sv4si = 4141,
+ CODE_FOR_mve_vshrnbq_n_uv8hi = 4142,
+ CODE_FOR_mve_vshrnbq_n_sv8hi = 4143,
+ CODE_FOR_mve_vshrnbq_n_uv4si = 4144,
+ CODE_FOR_mve_vshrnbq_n_sv4si = 4145,
+ CODE_FOR_mve_vshrntq_n_sv8hi = 4146,
+ CODE_FOR_mve_vshrntq_n_uv8hi = 4147,
+ CODE_FOR_mve_vshrntq_n_sv4si = 4148,
+ CODE_FOR_mve_vshrntq_n_uv4si = 4149,
+ CODE_FOR_mve_vcvtmq_m_sv8hi = 4150,
+ CODE_FOR_mve_vcvtmq_m_uv8hi = 4151,
+ CODE_FOR_mve_vcvtmq_m_sv4si = 4152,
+ CODE_FOR_mve_vcvtmq_m_uv4si = 4153,
+ CODE_FOR_mve_vcvtpq_m_sv8hi = 4154,
+ CODE_FOR_mve_vcvtpq_m_uv8hi = 4155,
+ CODE_FOR_mve_vcvtpq_m_sv4si = 4156,
+ CODE_FOR_mve_vcvtpq_m_uv4si = 4157,
+ CODE_FOR_mve_vcvtnq_m_sv8hi = 4158,
+ CODE_FOR_mve_vcvtnq_m_uv8hi = 4159,
+ CODE_FOR_mve_vcvtnq_m_sv4si = 4160,
+ CODE_FOR_mve_vcvtnq_m_uv4si = 4161,
+ CODE_FOR_mve_vcvtq_m_n_from_f_sv8hi = 4162,
+ CODE_FOR_mve_vcvtq_m_n_from_f_uv8hi = 4163,
+ CODE_FOR_mve_vcvtq_m_n_from_f_sv4si = 4164,
+ CODE_FOR_mve_vcvtq_m_n_from_f_uv4si = 4165,
+ CODE_FOR_mve_vrev16q_m_sv16qi = 4166,
+ CODE_FOR_mve_vrev16q_m_uv16qi = 4167,
+ CODE_FOR_mve_vcvtq_m_from_f_uv8hi = 4168,
+ CODE_FOR_mve_vcvtq_m_from_f_sv8hi = 4169,
+ CODE_FOR_mve_vcvtq_m_from_f_uv4si = 4170,
+ CODE_FOR_mve_vcvtq_m_from_f_sv4si = 4171,
+ CODE_FOR_mve_vrmlaldavhq_p_sv4si = 4172,
+ CODE_FOR_mve_vrmlaldavhq_p_uv4si = 4173,
+ CODE_FOR_mve_vrmlsldavhaq_sv4si = 4174,
+ CODE_FOR_mve_vabavq_p_sv16qi = 4175,
+ CODE_FOR_mve_vabavq_p_uv16qi = 4176,
+ CODE_FOR_mve_vabavq_p_sv8hi = 4177,
+ CODE_FOR_mve_vabavq_p_uv8hi = 4178,
+ CODE_FOR_mve_vabavq_p_sv4si = 4179,
+ CODE_FOR_mve_vabavq_p_uv4si = 4180,
+ CODE_FOR_mve_vqshluq_m_n_sv16qi = 4181,
+ CODE_FOR_mve_vqshluq_m_n_sv8hi = 4182,
+ CODE_FOR_mve_vqshluq_m_n_sv4si = 4183,
+ CODE_FOR_mve_vshlq_m_sv16qi = 4184,
+ CODE_FOR_mve_vshlq_m_uv16qi = 4185,
+ CODE_FOR_mve_vshlq_m_sv8hi = 4186,
+ CODE_FOR_mve_vshlq_m_uv8hi = 4187,
+ CODE_FOR_mve_vshlq_m_sv4si = 4188,
+ CODE_FOR_mve_vshlq_m_uv4si = 4189,
+ CODE_FOR_mve_vsriq_m_n_sv16qi = 4190,
+ CODE_FOR_mve_vsriq_m_n_uv16qi = 4191,
+ CODE_FOR_mve_vsriq_m_n_sv8hi = 4192,
+ CODE_FOR_mve_vsriq_m_n_uv8hi = 4193,
+ CODE_FOR_mve_vsriq_m_n_sv4si = 4194,
+ CODE_FOR_mve_vsriq_m_n_uv4si = 4195,
+ CODE_FOR_mve_vsubq_m_uv16qi = 4196,
+ CODE_FOR_mve_vsubq_m_sv16qi = 4197,
+ CODE_FOR_mve_vsubq_m_uv8hi = 4198,
+ CODE_FOR_mve_vsubq_m_sv8hi = 4199,
+ CODE_FOR_mve_vsubq_m_uv4si = 4200,
+ CODE_FOR_mve_vsubq_m_sv4si = 4201,
+ CODE_FOR_mve_vcvtq_m_n_to_f_uv8hf = 4202,
+ CODE_FOR_mve_vcvtq_m_n_to_f_sv8hf = 4203,
+ CODE_FOR_mve_vcvtq_m_n_to_f_uv4sf = 4204,
+ CODE_FOR_mve_vcvtq_m_n_to_f_sv4sf = 4205,
+ CODE_FOR_mve_vabdq_m_sv16qi = 4206,
+ CODE_FOR_mve_vabdq_m_uv16qi = 4207,
+ CODE_FOR_mve_vabdq_m_sv8hi = 4208,
+ CODE_FOR_mve_vabdq_m_uv8hi = 4209,
+ CODE_FOR_mve_vabdq_m_sv4si = 4210,
+ CODE_FOR_mve_vabdq_m_uv4si = 4211,
+ CODE_FOR_mve_vaddq_m_n_sv16qi = 4212,
+ CODE_FOR_mve_vaddq_m_n_uv16qi = 4213,
+ CODE_FOR_mve_vaddq_m_n_sv8hi = 4214,
+ CODE_FOR_mve_vaddq_m_n_uv8hi = 4215,
+ CODE_FOR_mve_vaddq_m_n_sv4si = 4216,
+ CODE_FOR_mve_vaddq_m_n_uv4si = 4217,
+ CODE_FOR_mve_vaddq_m_uv16qi = 4218,
+ CODE_FOR_mve_vaddq_m_sv16qi = 4219,
+ CODE_FOR_mve_vaddq_m_uv8hi = 4220,
+ CODE_FOR_mve_vaddq_m_sv8hi = 4221,
+ CODE_FOR_mve_vaddq_m_uv4si = 4222,
+ CODE_FOR_mve_vaddq_m_sv4si = 4223,
+ CODE_FOR_mve_vandq_m_uv16qi = 4224,
+ CODE_FOR_mve_vandq_m_sv16qi = 4225,
+ CODE_FOR_mve_vandq_m_uv8hi = 4226,
+ CODE_FOR_mve_vandq_m_sv8hi = 4227,
+ CODE_FOR_mve_vandq_m_uv4si = 4228,
+ CODE_FOR_mve_vandq_m_sv4si = 4229,
+ CODE_FOR_mve_vbicq_m_uv16qi = 4230,
+ CODE_FOR_mve_vbicq_m_sv16qi = 4231,
+ CODE_FOR_mve_vbicq_m_uv8hi = 4232,
+ CODE_FOR_mve_vbicq_m_sv8hi = 4233,
+ CODE_FOR_mve_vbicq_m_uv4si = 4234,
+ CODE_FOR_mve_vbicq_m_sv4si = 4235,
+ CODE_FOR_mve_vbrsrq_m_n_uv16qi = 4236,
+ CODE_FOR_mve_vbrsrq_m_n_sv16qi = 4237,
+ CODE_FOR_mve_vbrsrq_m_n_uv8hi = 4238,
+ CODE_FOR_mve_vbrsrq_m_n_sv8hi = 4239,
+ CODE_FOR_mve_vbrsrq_m_n_uv4si = 4240,
+ CODE_FOR_mve_vbrsrq_m_n_sv4si = 4241,
+ CODE_FOR_mve_vcaddq_rot270_m_uv16qi = 4242,
+ CODE_FOR_mve_vcaddq_rot270_m_sv16qi = 4243,
+ CODE_FOR_mve_vcaddq_rot270_m_uv8hi = 4244,
+ CODE_FOR_mve_vcaddq_rot270_m_sv8hi = 4245,
+ CODE_FOR_mve_vcaddq_rot270_m_uv4si = 4246,
+ CODE_FOR_mve_vcaddq_rot270_m_sv4si = 4247,
+ CODE_FOR_mve_vcaddq_rot90_m_uv16qi = 4248,
+ CODE_FOR_mve_vcaddq_rot90_m_sv16qi = 4249,
+ CODE_FOR_mve_vcaddq_rot90_m_uv8hi = 4250,
+ CODE_FOR_mve_vcaddq_rot90_m_sv8hi = 4251,
+ CODE_FOR_mve_vcaddq_rot90_m_uv4si = 4252,
+ CODE_FOR_mve_vcaddq_rot90_m_sv4si = 4253,
+ CODE_FOR_mve_veorq_m_sv16qi = 4254,
+ CODE_FOR_mve_veorq_m_uv16qi = 4255,
+ CODE_FOR_mve_veorq_m_sv8hi = 4256,
+ CODE_FOR_mve_veorq_m_uv8hi = 4257,
+ CODE_FOR_mve_veorq_m_sv4si = 4258,
+ CODE_FOR_mve_veorq_m_uv4si = 4259,
+ CODE_FOR_mve_vhaddq_m_n_sv16qi = 4260,
+ CODE_FOR_mve_vhaddq_m_n_uv16qi = 4261,
+ CODE_FOR_mve_vhaddq_m_n_sv8hi = 4262,
+ CODE_FOR_mve_vhaddq_m_n_uv8hi = 4263,
+ CODE_FOR_mve_vhaddq_m_n_sv4si = 4264,
+ CODE_FOR_mve_vhaddq_m_n_uv4si = 4265,
+ CODE_FOR_mve_vhaddq_m_sv16qi = 4266,
+ CODE_FOR_mve_vhaddq_m_uv16qi = 4267,
+ CODE_FOR_mve_vhaddq_m_sv8hi = 4268,
+ CODE_FOR_mve_vhaddq_m_uv8hi = 4269,
+ CODE_FOR_mve_vhaddq_m_sv4si = 4270,
+ CODE_FOR_mve_vhaddq_m_uv4si = 4271,
+ CODE_FOR_mve_vhsubq_m_n_sv16qi = 4272,
+ CODE_FOR_mve_vhsubq_m_n_uv16qi = 4273,
+ CODE_FOR_mve_vhsubq_m_n_sv8hi = 4274,
+ CODE_FOR_mve_vhsubq_m_n_uv8hi = 4275,
+ CODE_FOR_mve_vhsubq_m_n_sv4si = 4276,
+ CODE_FOR_mve_vhsubq_m_n_uv4si = 4277,
+ CODE_FOR_mve_vhsubq_m_sv16qi = 4278,
+ CODE_FOR_mve_vhsubq_m_uv16qi = 4279,
+ CODE_FOR_mve_vhsubq_m_sv8hi = 4280,
+ CODE_FOR_mve_vhsubq_m_uv8hi = 4281,
+ CODE_FOR_mve_vhsubq_m_sv4si = 4282,
+ CODE_FOR_mve_vhsubq_m_uv4si = 4283,
+ CODE_FOR_mve_vmaxq_m_sv16qi = 4284,
+ CODE_FOR_mve_vmaxq_m_uv16qi = 4285,
+ CODE_FOR_mve_vmaxq_m_sv8hi = 4286,
+ CODE_FOR_mve_vmaxq_m_uv8hi = 4287,
+ CODE_FOR_mve_vmaxq_m_sv4si = 4288,
+ CODE_FOR_mve_vmaxq_m_uv4si = 4289,
+ CODE_FOR_mve_vminq_m_sv16qi = 4290,
+ CODE_FOR_mve_vminq_m_uv16qi = 4291,
+ CODE_FOR_mve_vminq_m_sv8hi = 4292,
+ CODE_FOR_mve_vminq_m_uv8hi = 4293,
+ CODE_FOR_mve_vminq_m_sv4si = 4294,
+ CODE_FOR_mve_vminq_m_uv4si = 4295,
+ CODE_FOR_mve_vmladavaq_p_uv16qi = 4296,
+ CODE_FOR_mve_vmladavaq_p_sv16qi = 4297,
+ CODE_FOR_mve_vmladavaq_p_uv8hi = 4298,
+ CODE_FOR_mve_vmladavaq_p_sv8hi = 4299,
+ CODE_FOR_mve_vmladavaq_p_uv4si = 4300,
+ CODE_FOR_mve_vmladavaq_p_sv4si = 4301,
+ CODE_FOR_mve_vmlaq_m_n_sv16qi = 4302,
+ CODE_FOR_mve_vmlaq_m_n_uv16qi = 4303,
+ CODE_FOR_mve_vmlaq_m_n_sv8hi = 4304,
+ CODE_FOR_mve_vmlaq_m_n_uv8hi = 4305,
+ CODE_FOR_mve_vmlaq_m_n_sv4si = 4306,
+ CODE_FOR_mve_vmlaq_m_n_uv4si = 4307,
+ CODE_FOR_mve_vmlasq_m_n_uv16qi = 4308,
+ CODE_FOR_mve_vmlasq_m_n_sv16qi = 4309,
+ CODE_FOR_mve_vmlasq_m_n_uv8hi = 4310,
+ CODE_FOR_mve_vmlasq_m_n_sv8hi = 4311,
+ CODE_FOR_mve_vmlasq_m_n_uv4si = 4312,
+ CODE_FOR_mve_vmlasq_m_n_sv4si = 4313,
+ CODE_FOR_mve_vmulhq_m_sv16qi = 4314,
+ CODE_FOR_mve_vmulhq_m_uv16qi = 4315,
+ CODE_FOR_mve_vmulhq_m_sv8hi = 4316,
+ CODE_FOR_mve_vmulhq_m_uv8hi = 4317,
+ CODE_FOR_mve_vmulhq_m_sv4si = 4318,
+ CODE_FOR_mve_vmulhq_m_uv4si = 4319,
+ CODE_FOR_mve_vmullbq_int_m_uv16qi = 4320,
+ CODE_FOR_mve_vmullbq_int_m_sv16qi = 4321,
+ CODE_FOR_mve_vmullbq_int_m_uv8hi = 4322,
+ CODE_FOR_mve_vmullbq_int_m_sv8hi = 4323,
+ CODE_FOR_mve_vmullbq_int_m_uv4si = 4324,
+ CODE_FOR_mve_vmullbq_int_m_sv4si = 4325,
+ CODE_FOR_mve_vmulltq_int_m_sv16qi = 4326,
+ CODE_FOR_mve_vmulltq_int_m_uv16qi = 4327,
+ CODE_FOR_mve_vmulltq_int_m_sv8hi = 4328,
+ CODE_FOR_mve_vmulltq_int_m_uv8hi = 4329,
+ CODE_FOR_mve_vmulltq_int_m_sv4si = 4330,
+ CODE_FOR_mve_vmulltq_int_m_uv4si = 4331,
+ CODE_FOR_mve_vmulq_m_n_uv16qi = 4332,
+ CODE_FOR_mve_vmulq_m_n_sv16qi = 4333,
+ CODE_FOR_mve_vmulq_m_n_uv8hi = 4334,
+ CODE_FOR_mve_vmulq_m_n_sv8hi = 4335,
+ CODE_FOR_mve_vmulq_m_n_uv4si = 4336,
+ CODE_FOR_mve_vmulq_m_n_sv4si = 4337,
+ CODE_FOR_mve_vmulq_m_sv16qi = 4338,
+ CODE_FOR_mve_vmulq_m_uv16qi = 4339,
+ CODE_FOR_mve_vmulq_m_sv8hi = 4340,
+ CODE_FOR_mve_vmulq_m_uv8hi = 4341,
+ CODE_FOR_mve_vmulq_m_sv4si = 4342,
+ CODE_FOR_mve_vmulq_m_uv4si = 4343,
+ CODE_FOR_mve_vornq_m_uv16qi = 4344,
+ CODE_FOR_mve_vornq_m_sv16qi = 4345,
+ CODE_FOR_mve_vornq_m_uv8hi = 4346,
+ CODE_FOR_mve_vornq_m_sv8hi = 4347,
+ CODE_FOR_mve_vornq_m_uv4si = 4348,
+ CODE_FOR_mve_vornq_m_sv4si = 4349,
+ CODE_FOR_mve_vorrq_m_sv16qi = 4350,
+ CODE_FOR_mve_vorrq_m_uv16qi = 4351,
+ CODE_FOR_mve_vorrq_m_sv8hi = 4352,
+ CODE_FOR_mve_vorrq_m_uv8hi = 4353,
+ CODE_FOR_mve_vorrq_m_sv4si = 4354,
+ CODE_FOR_mve_vorrq_m_uv4si = 4355,
+ CODE_FOR_mve_vqaddq_m_n_uv16qi = 4356,
+ CODE_FOR_mve_vqaddq_m_n_sv16qi = 4357,
+ CODE_FOR_mve_vqaddq_m_n_uv8hi = 4358,
+ CODE_FOR_mve_vqaddq_m_n_sv8hi = 4359,
+ CODE_FOR_mve_vqaddq_m_n_uv4si = 4360,
+ CODE_FOR_mve_vqaddq_m_n_sv4si = 4361,
+ CODE_FOR_mve_vqaddq_m_uv16qi = 4362,
+ CODE_FOR_mve_vqaddq_m_sv16qi = 4363,
+ CODE_FOR_mve_vqaddq_m_uv8hi = 4364,
+ CODE_FOR_mve_vqaddq_m_sv8hi = 4365,
+ CODE_FOR_mve_vqaddq_m_uv4si = 4366,
+ CODE_FOR_mve_vqaddq_m_sv4si = 4367,
+ CODE_FOR_mve_vqdmlahq_m_n_sv16qi = 4368,
+ CODE_FOR_mve_vqdmlahq_m_n_sv8hi = 4369,
+ CODE_FOR_mve_vqdmlahq_m_n_sv4si = 4370,
+ CODE_FOR_mve_vqdmlashq_m_n_sv16qi = 4371,
+ CODE_FOR_mve_vqdmlashq_m_n_sv8hi = 4372,
+ CODE_FOR_mve_vqdmlashq_m_n_sv4si = 4373,
+ CODE_FOR_mve_vqrdmlahq_m_n_sv16qi = 4374,
+ CODE_FOR_mve_vqrdmlahq_m_n_sv8hi = 4375,
+ CODE_FOR_mve_vqrdmlahq_m_n_sv4si = 4376,
+ CODE_FOR_mve_vqrdmlashq_m_n_sv16qi = 4377,
+ CODE_FOR_mve_vqrdmlashq_m_n_sv8hi = 4378,
+ CODE_FOR_mve_vqrdmlashq_m_n_sv4si = 4379,
+ CODE_FOR_mve_vqrshlq_m_uv16qi = 4380,
+ CODE_FOR_mve_vqrshlq_m_sv16qi = 4381,
+ CODE_FOR_mve_vqrshlq_m_uv8hi = 4382,
+ CODE_FOR_mve_vqrshlq_m_sv8hi = 4383,
+ CODE_FOR_mve_vqrshlq_m_uv4si = 4384,
+ CODE_FOR_mve_vqrshlq_m_sv4si = 4385,
+ CODE_FOR_mve_vqshlq_m_n_sv16qi = 4386,
+ CODE_FOR_mve_vqshlq_m_n_uv16qi = 4387,
+ CODE_FOR_mve_vqshlq_m_n_sv8hi = 4388,
+ CODE_FOR_mve_vqshlq_m_n_uv8hi = 4389,
+ CODE_FOR_mve_vqshlq_m_n_sv4si = 4390,
+ CODE_FOR_mve_vqshlq_m_n_uv4si = 4391,
+ CODE_FOR_mve_vqshlq_m_uv16qi = 4392,
+ CODE_FOR_mve_vqshlq_m_sv16qi = 4393,
+ CODE_FOR_mve_vqshlq_m_uv8hi = 4394,
+ CODE_FOR_mve_vqshlq_m_sv8hi = 4395,
+ CODE_FOR_mve_vqshlq_m_uv4si = 4396,
+ CODE_FOR_mve_vqshlq_m_sv4si = 4397,
+ CODE_FOR_mve_vqsubq_m_n_uv16qi = 4398,
+ CODE_FOR_mve_vqsubq_m_n_sv16qi = 4399,
+ CODE_FOR_mve_vqsubq_m_n_uv8hi = 4400,
+ CODE_FOR_mve_vqsubq_m_n_sv8hi = 4401,
+ CODE_FOR_mve_vqsubq_m_n_uv4si = 4402,
+ CODE_FOR_mve_vqsubq_m_n_sv4si = 4403,
+ CODE_FOR_mve_vqsubq_m_uv16qi = 4404,
+ CODE_FOR_mve_vqsubq_m_sv16qi = 4405,
+ CODE_FOR_mve_vqsubq_m_uv8hi = 4406,
+ CODE_FOR_mve_vqsubq_m_sv8hi = 4407,
+ CODE_FOR_mve_vqsubq_m_uv4si = 4408,
+ CODE_FOR_mve_vqsubq_m_sv4si = 4409,
+ CODE_FOR_mve_vrhaddq_m_uv16qi = 4410,
+ CODE_FOR_mve_vrhaddq_m_sv16qi = 4411,
+ CODE_FOR_mve_vrhaddq_m_uv8hi = 4412,
+ CODE_FOR_mve_vrhaddq_m_sv8hi = 4413,
+ CODE_FOR_mve_vrhaddq_m_uv4si = 4414,
+ CODE_FOR_mve_vrhaddq_m_sv4si = 4415,
+ CODE_FOR_mve_vrmulhq_m_uv16qi = 4416,
+ CODE_FOR_mve_vrmulhq_m_sv16qi = 4417,
+ CODE_FOR_mve_vrmulhq_m_uv8hi = 4418,
+ CODE_FOR_mve_vrmulhq_m_sv8hi = 4419,
+ CODE_FOR_mve_vrmulhq_m_uv4si = 4420,
+ CODE_FOR_mve_vrmulhq_m_sv4si = 4421,
+ CODE_FOR_mve_vrshlq_m_sv16qi = 4422,
+ CODE_FOR_mve_vrshlq_m_uv16qi = 4423,
+ CODE_FOR_mve_vrshlq_m_sv8hi = 4424,
+ CODE_FOR_mve_vrshlq_m_uv8hi = 4425,
+ CODE_FOR_mve_vrshlq_m_sv4si = 4426,
+ CODE_FOR_mve_vrshlq_m_uv4si = 4427,
+ CODE_FOR_mve_vrshrq_m_n_sv16qi = 4428,
+ CODE_FOR_mve_vrshrq_m_n_uv16qi = 4429,
+ CODE_FOR_mve_vrshrq_m_n_sv8hi = 4430,
+ CODE_FOR_mve_vrshrq_m_n_uv8hi = 4431,
+ CODE_FOR_mve_vrshrq_m_n_sv4si = 4432,
+ CODE_FOR_mve_vrshrq_m_n_uv4si = 4433,
+ CODE_FOR_mve_vshlq_m_n_sv16qi = 4434,
+ CODE_FOR_mve_vshlq_m_n_uv16qi = 4435,
+ CODE_FOR_mve_vshlq_m_n_sv8hi = 4436,
+ CODE_FOR_mve_vshlq_m_n_uv8hi = 4437,
+ CODE_FOR_mve_vshlq_m_n_sv4si = 4438,
+ CODE_FOR_mve_vshlq_m_n_uv4si = 4439,
+ CODE_FOR_mve_vshrq_m_n_sv16qi = 4440,
+ CODE_FOR_mve_vshrq_m_n_uv16qi = 4441,
+ CODE_FOR_mve_vshrq_m_n_sv8hi = 4442,
+ CODE_FOR_mve_vshrq_m_n_uv8hi = 4443,
+ CODE_FOR_mve_vshrq_m_n_sv4si = 4444,
+ CODE_FOR_mve_vshrq_m_n_uv4si = 4445,
+ CODE_FOR_mve_vsliq_m_n_uv16qi = 4446,
+ CODE_FOR_mve_vsliq_m_n_sv16qi = 4447,
+ CODE_FOR_mve_vsliq_m_n_uv8hi = 4448,
+ CODE_FOR_mve_vsliq_m_n_sv8hi = 4449,
+ CODE_FOR_mve_vsliq_m_n_uv4si = 4450,
+ CODE_FOR_mve_vsliq_m_n_sv4si = 4451,
+ CODE_FOR_mve_vsubq_m_n_sv16qi = 4452,
+ CODE_FOR_mve_vsubq_m_n_uv16qi = 4453,
+ CODE_FOR_mve_vsubq_m_n_sv8hi = 4454,
+ CODE_FOR_mve_vsubq_m_n_uv8hi = 4455,
+ CODE_FOR_mve_vsubq_m_n_sv4si = 4456,
+ CODE_FOR_mve_vsubq_m_n_uv4si = 4457,
+ CODE_FOR_mve_vhcaddq_rot270_m_sv16qi = 4458,
+ CODE_FOR_mve_vhcaddq_rot270_m_sv8hi = 4459,
+ CODE_FOR_mve_vhcaddq_rot270_m_sv4si = 4460,
+ CODE_FOR_mve_vhcaddq_rot90_m_sv16qi = 4461,
+ CODE_FOR_mve_vhcaddq_rot90_m_sv8hi = 4462,
+ CODE_FOR_mve_vhcaddq_rot90_m_sv4si = 4463,
+ CODE_FOR_mve_vmladavaxq_p_sv16qi = 4464,
+ CODE_FOR_mve_vmladavaxq_p_sv8hi = 4465,
+ CODE_FOR_mve_vmladavaxq_p_sv4si = 4466,
+ CODE_FOR_mve_vmlsdavaq_p_sv16qi = 4467,
+ CODE_FOR_mve_vmlsdavaq_p_sv8hi = 4468,
+ CODE_FOR_mve_vmlsdavaq_p_sv4si = 4469,
+ CODE_FOR_mve_vmlsdavaxq_p_sv16qi = 4470,
+ CODE_FOR_mve_vmlsdavaxq_p_sv8hi = 4471,
+ CODE_FOR_mve_vmlsdavaxq_p_sv4si = 4472,
+ CODE_FOR_mve_vqdmladhq_m_sv16qi = 4473,
+ CODE_FOR_mve_vqdmladhq_m_sv8hi = 4474,
+ CODE_FOR_mve_vqdmladhq_m_sv4si = 4475,
+ CODE_FOR_mve_vqdmladhxq_m_sv16qi = 4476,
+ CODE_FOR_mve_vqdmladhxq_m_sv8hi = 4477,
+ CODE_FOR_mve_vqdmladhxq_m_sv4si = 4478,
+ CODE_FOR_mve_vqdmlsdhq_m_sv16qi = 4479,
+ CODE_FOR_mve_vqdmlsdhq_m_sv8hi = 4480,
+ CODE_FOR_mve_vqdmlsdhq_m_sv4si = 4481,
+ CODE_FOR_mve_vqdmlsdhxq_m_sv16qi = 4482,
+ CODE_FOR_mve_vqdmlsdhxq_m_sv8hi = 4483,
+ CODE_FOR_mve_vqdmlsdhxq_m_sv4si = 4484,
+ CODE_FOR_mve_vqdmulhq_m_n_sv16qi = 4485,
+ CODE_FOR_mve_vqdmulhq_m_n_sv8hi = 4486,
+ CODE_FOR_mve_vqdmulhq_m_n_sv4si = 4487,
+ CODE_FOR_mve_vqdmulhq_m_sv16qi = 4488,
+ CODE_FOR_mve_vqdmulhq_m_sv8hi = 4489,
+ CODE_FOR_mve_vqdmulhq_m_sv4si = 4490,
+ CODE_FOR_mve_vqrdmladhq_m_sv16qi = 4491,
+ CODE_FOR_mve_vqrdmladhq_m_sv8hi = 4492,
+ CODE_FOR_mve_vqrdmladhq_m_sv4si = 4493,
+ CODE_FOR_mve_vqrdmladhxq_m_sv16qi = 4494,
+ CODE_FOR_mve_vqrdmladhxq_m_sv8hi = 4495,
+ CODE_FOR_mve_vqrdmladhxq_m_sv4si = 4496,
+ CODE_FOR_mve_vqrdmlsdhq_m_sv16qi = 4497,
+ CODE_FOR_mve_vqrdmlsdhq_m_sv8hi = 4498,
+ CODE_FOR_mve_vqrdmlsdhq_m_sv4si = 4499,
+ CODE_FOR_mve_vqrdmlsdhxq_m_sv16qi = 4500,
+ CODE_FOR_mve_vqrdmlsdhxq_m_sv8hi = 4501,
+ CODE_FOR_mve_vqrdmlsdhxq_m_sv4si = 4502,
+ CODE_FOR_mve_vqrdmulhq_m_n_sv16qi = 4503,
+ CODE_FOR_mve_vqrdmulhq_m_n_sv8hi = 4504,
+ CODE_FOR_mve_vqrdmulhq_m_n_sv4si = 4505,
+ CODE_FOR_mve_vqrdmulhq_m_sv16qi = 4506,
+ CODE_FOR_mve_vqrdmulhq_m_sv8hi = 4507,
+ CODE_FOR_mve_vqrdmulhq_m_sv4si = 4508,
+ CODE_FOR_mve_vmlaldavaq_p_uv8hi = 4509,
+ CODE_FOR_mve_vmlaldavaq_p_sv8hi = 4510,
+ CODE_FOR_mve_vmlaldavaq_p_uv4si = 4511,
+ CODE_FOR_mve_vmlaldavaq_p_sv4si = 4512,
+ CODE_FOR_mve_vmlaldavaxq_p_sv8hi = 4513,
+ CODE_FOR_mve_vmlaldavaxq_p_sv4si = 4514,
+ CODE_FOR_mve_vqrshrnbq_m_n_uv8hi = 4515,
+ CODE_FOR_mve_vqrshrnbq_m_n_sv8hi = 4516,
+ CODE_FOR_mve_vqrshrnbq_m_n_uv4si = 4517,
+ CODE_FOR_mve_vqrshrnbq_m_n_sv4si = 4518,
+ CODE_FOR_mve_vqrshrntq_m_n_sv8hi = 4519,
+ CODE_FOR_mve_vqrshrntq_m_n_uv8hi = 4520,
+ CODE_FOR_mve_vqrshrntq_m_n_sv4si = 4521,
+ CODE_FOR_mve_vqrshrntq_m_n_uv4si = 4522,
+ CODE_FOR_mve_vqshrnbq_m_n_uv8hi = 4523,
+ CODE_FOR_mve_vqshrnbq_m_n_sv8hi = 4524,
+ CODE_FOR_mve_vqshrnbq_m_n_uv4si = 4525,
+ CODE_FOR_mve_vqshrnbq_m_n_sv4si = 4526,
+ CODE_FOR_mve_vqshrntq_m_n_sv8hi = 4527,
+ CODE_FOR_mve_vqshrntq_m_n_uv8hi = 4528,
+ CODE_FOR_mve_vqshrntq_m_n_sv4si = 4529,
+ CODE_FOR_mve_vqshrntq_m_n_uv4si = 4530,
+ CODE_FOR_mve_vrmlaldavhaq_p_sv4si = 4531,
+ CODE_FOR_mve_vrshrnbq_m_n_uv8hi = 4532,
+ CODE_FOR_mve_vrshrnbq_m_n_sv8hi = 4533,
+ CODE_FOR_mve_vrshrnbq_m_n_uv4si = 4534,
+ CODE_FOR_mve_vrshrnbq_m_n_sv4si = 4535,
+ CODE_FOR_mve_vrshrntq_m_n_uv8hi = 4536,
+ CODE_FOR_mve_vrshrntq_m_n_sv8hi = 4537,
+ CODE_FOR_mve_vrshrntq_m_n_uv4si = 4538,
+ CODE_FOR_mve_vrshrntq_m_n_sv4si = 4539,
+ CODE_FOR_mve_vshllbq_m_n_uv16qi = 4540,
+ CODE_FOR_mve_vshllbq_m_n_sv16qi = 4541,
+ CODE_FOR_mve_vshllbq_m_n_uv8hi = 4542,
+ CODE_FOR_mve_vshllbq_m_n_sv8hi = 4543,
+ CODE_FOR_mve_vshlltq_m_n_uv16qi = 4544,
+ CODE_FOR_mve_vshlltq_m_n_sv16qi = 4545,
+ CODE_FOR_mve_vshlltq_m_n_uv8hi = 4546,
+ CODE_FOR_mve_vshlltq_m_n_sv8hi = 4547,
+ CODE_FOR_mve_vshrnbq_m_n_sv8hi = 4548,
+ CODE_FOR_mve_vshrnbq_m_n_uv8hi = 4549,
+ CODE_FOR_mve_vshrnbq_m_n_sv4si = 4550,
+ CODE_FOR_mve_vshrnbq_m_n_uv4si = 4551,
+ CODE_FOR_mve_vshrntq_m_n_sv8hi = 4552,
+ CODE_FOR_mve_vshrntq_m_n_uv8hi = 4553,
+ CODE_FOR_mve_vshrntq_m_n_sv4si = 4554,
+ CODE_FOR_mve_vshrntq_m_n_uv4si = 4555,
+ CODE_FOR_mve_vmlsldavaq_p_sv8hi = 4556,
+ CODE_FOR_mve_vmlsldavaq_p_sv4si = 4557,
+ CODE_FOR_mve_vmlsldavaxq_p_sv8hi = 4558,
+ CODE_FOR_mve_vmlsldavaxq_p_sv4si = 4559,
+ CODE_FOR_mve_vmullbq_poly_m_pv16qi = 4560,
+ CODE_FOR_mve_vmullbq_poly_m_pv8hi = 4561,
+ CODE_FOR_mve_vmulltq_poly_m_pv16qi = 4562,
+ CODE_FOR_mve_vmulltq_poly_m_pv8hi = 4563,
+ CODE_FOR_mve_vqdmullbq_m_n_sv8hi = 4564,
+ CODE_FOR_mve_vqdmullbq_m_n_sv4si = 4565,
+ CODE_FOR_mve_vqdmullbq_m_sv8hi = 4566,
+ CODE_FOR_mve_vqdmullbq_m_sv4si = 4567,
+ CODE_FOR_mve_vqdmulltq_m_n_sv8hi = 4568,
+ CODE_FOR_mve_vqdmulltq_m_n_sv4si = 4569,
+ CODE_FOR_mve_vqdmulltq_m_sv8hi = 4570,
+ CODE_FOR_mve_vqdmulltq_m_sv4si = 4571,
+ CODE_FOR_mve_vqrshrunbq_m_n_sv8hi = 4572,
+ CODE_FOR_mve_vqrshrunbq_m_n_sv4si = 4573,
+ CODE_FOR_mve_vqrshruntq_m_n_sv8hi = 4574,
+ CODE_FOR_mve_vqrshruntq_m_n_sv4si = 4575,
+ CODE_FOR_mve_vqshrunbq_m_n_sv8hi = 4576,
+ CODE_FOR_mve_vqshrunbq_m_n_sv4si = 4577,
+ CODE_FOR_mve_vqshruntq_m_n_sv8hi = 4578,
+ CODE_FOR_mve_vqshruntq_m_n_sv4si = 4579,
+ CODE_FOR_mve_vrmlaldavhaq_p_uv4si = 4580,
+ CODE_FOR_mve_vrmlaldavhaxq_p_sv4si = 4581,
+ CODE_FOR_mve_vrmlsldavhaq_p_sv4si = 4582,
+ CODE_FOR_mve_vrmlsldavhaxq_p_sv4si = 4583,
+ CODE_FOR_mve_vabdq_m_fv8hf = 4584,
+ CODE_FOR_mve_vabdq_m_fv4sf = 4585,
+ CODE_FOR_mve_vaddq_m_fv8hf = 4586,
+ CODE_FOR_mve_vaddq_m_fv4sf = 4587,
+ CODE_FOR_mve_vaddq_m_n_fv8hf = 4588,
+ CODE_FOR_mve_vaddq_m_n_fv4sf = 4589,
+ CODE_FOR_mve_vandq_m_fv8hf = 4590,
+ CODE_FOR_mve_vandq_m_fv4sf = 4591,
+ CODE_FOR_mve_vbicq_m_fv8hf = 4592,
+ CODE_FOR_mve_vbicq_m_fv4sf = 4593,
+ CODE_FOR_mve_vbrsrq_m_n_fv8hf = 4594,
+ CODE_FOR_mve_vbrsrq_m_n_fv4sf = 4595,
+ CODE_FOR_mve_vcaddq_rot270_m_fv8hf = 4596,
+ CODE_FOR_mve_vcaddq_rot270_m_fv4sf = 4597,
+ CODE_FOR_mve_vcaddq_rot90_m_fv8hf = 4598,
+ CODE_FOR_mve_vcaddq_rot90_m_fv4sf = 4599,
+ CODE_FOR_mve_vcmlaq_m_fv8hf = 4600,
+ CODE_FOR_mve_vcmlaq_m_fv4sf = 4601,
+ CODE_FOR_mve_vcmlaq_rot180_m_fv8hf = 4602,
+ CODE_FOR_mve_vcmlaq_rot180_m_fv4sf = 4603,
+ CODE_FOR_mve_vcmlaq_rot270_m_fv8hf = 4604,
+ CODE_FOR_mve_vcmlaq_rot270_m_fv4sf = 4605,
+ CODE_FOR_mve_vcmlaq_rot90_m_fv8hf = 4606,
+ CODE_FOR_mve_vcmlaq_rot90_m_fv4sf = 4607,
+ CODE_FOR_mve_vcmulq_m_fv8hf = 4608,
+ CODE_FOR_mve_vcmulq_m_fv4sf = 4609,
+ CODE_FOR_mve_vcmulq_rot180_m_fv8hf = 4610,
+ CODE_FOR_mve_vcmulq_rot180_m_fv4sf = 4611,
+ CODE_FOR_mve_vcmulq_rot270_m_fv8hf = 4612,
+ CODE_FOR_mve_vcmulq_rot270_m_fv4sf = 4613,
+ CODE_FOR_mve_vcmulq_rot90_m_fv8hf = 4614,
+ CODE_FOR_mve_vcmulq_rot90_m_fv4sf = 4615,
+ CODE_FOR_mve_veorq_m_fv8hf = 4616,
+ CODE_FOR_mve_veorq_m_fv4sf = 4617,
+ CODE_FOR_mve_vfmaq_m_fv8hf = 4618,
+ CODE_FOR_mve_vfmaq_m_fv4sf = 4619,
+ CODE_FOR_mve_vfmaq_m_n_fv8hf = 4620,
+ CODE_FOR_mve_vfmaq_m_n_fv4sf = 4621,
+ CODE_FOR_mve_vfmasq_m_n_fv8hf = 4622,
+ CODE_FOR_mve_vfmasq_m_n_fv4sf = 4623,
+ CODE_FOR_mve_vfmsq_m_fv8hf = 4624,
+ CODE_FOR_mve_vfmsq_m_fv4sf = 4625,
+ CODE_FOR_mve_vmaxnmq_m_fv8hf = 4626,
+ CODE_FOR_mve_vmaxnmq_m_fv4sf = 4627,
+ CODE_FOR_mve_vminnmq_m_fv8hf = 4628,
+ CODE_FOR_mve_vminnmq_m_fv4sf = 4629,
+ CODE_FOR_mve_vmulq_m_fv8hf = 4630,
+ CODE_FOR_mve_vmulq_m_fv4sf = 4631,
+ CODE_FOR_mve_vmulq_m_n_fv8hf = 4632,
+ CODE_FOR_mve_vmulq_m_n_fv4sf = 4633,
+ CODE_FOR_mve_vornq_m_fv8hf = 4634,
+ CODE_FOR_mve_vornq_m_fv4sf = 4635,
+ CODE_FOR_mve_vorrq_m_fv8hf = 4636,
+ CODE_FOR_mve_vorrq_m_fv4sf = 4637,
+ CODE_FOR_mve_vsubq_m_fv8hf = 4638,
+ CODE_FOR_mve_vsubq_m_fv4sf = 4639,
+ CODE_FOR_mve_vsubq_m_n_fv8hf = 4640,
+ CODE_FOR_mve_vsubq_m_n_fv4sf = 4641,
+ CODE_FOR_mve_vstrbq_sv16qi = 4642,
+ CODE_FOR_mve_vstrbq_uv16qi = 4643,
+ CODE_FOR_mve_vstrbq_sv8hi = 4644,
+ CODE_FOR_mve_vstrbq_uv8hi = 4645,
+ CODE_FOR_mve_vstrbq_sv4si = 4646,
+ CODE_FOR_mve_vstrbq_uv4si = 4647,
+ CODE_FOR_mve_vstrbq_scatter_offset_sv16qi_insn = 4648,
+ CODE_FOR_mve_vstrbq_scatter_offset_uv16qi_insn = 4649,
+ CODE_FOR_mve_vstrbq_scatter_offset_sv8hi_insn = 4650,
+ CODE_FOR_mve_vstrbq_scatter_offset_uv8hi_insn = 4651,
+ CODE_FOR_mve_vstrbq_scatter_offset_sv4si_insn = 4652,
+ CODE_FOR_mve_vstrbq_scatter_offset_uv4si_insn = 4653,
+ CODE_FOR_mve_vstrwq_scatter_base_sv4si = 4654,
+ CODE_FOR_mve_vstrwq_scatter_base_uv4si = 4655,
+ CODE_FOR_mve_vldrbq_gather_offset_sv16qi = 4656,
+ CODE_FOR_mve_vldrbq_gather_offset_uv16qi = 4657,
+ CODE_FOR_mve_vldrbq_gather_offset_sv8hi = 4658,
+ CODE_FOR_mve_vldrbq_gather_offset_uv8hi = 4659,
+ CODE_FOR_mve_vldrbq_gather_offset_sv4si = 4660,
+ CODE_FOR_mve_vldrbq_gather_offset_uv4si = 4661,
+ CODE_FOR_mve_vldrbq_sv16qi = 4662,
+ CODE_FOR_mve_vldrbq_uv16qi = 4663,
+ CODE_FOR_mve_vldrbq_sv8hi = 4664,
+ CODE_FOR_mve_vldrbq_uv8hi = 4665,
+ CODE_FOR_mve_vldrbq_sv4si = 4666,
+ CODE_FOR_mve_vldrbq_uv4si = 4667,
+ CODE_FOR_mve_vldrwq_gather_base_sv4si = 4668,
+ CODE_FOR_mve_vldrwq_gather_base_uv4si = 4669,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_sv16qi_insn = 4670,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_uv16qi_insn = 4671,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_sv8hi_insn = 4672,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_uv8hi_insn = 4673,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_sv4si_insn = 4674,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_uv4si_insn = 4675,
+ CODE_FOR_mve_vstrwq_scatter_base_p_sv4si = 4676,
+ CODE_FOR_mve_vstrwq_scatter_base_p_uv4si = 4677,
+ CODE_FOR_mve_vstrbq_p_sv16qi = 4678,
+ CODE_FOR_mve_vstrbq_p_uv16qi = 4679,
+ CODE_FOR_mve_vstrbq_p_sv8hi = 4680,
+ CODE_FOR_mve_vstrbq_p_uv8hi = 4681,
+ CODE_FOR_mve_vstrbq_p_sv4si = 4682,
+ CODE_FOR_mve_vstrbq_p_uv4si = 4683,
+ CODE_FOR_mve_vldrbq_gather_offset_z_sv16qi = 4684,
+ CODE_FOR_mve_vldrbq_gather_offset_z_uv16qi = 4685,
+ CODE_FOR_mve_vldrbq_gather_offset_z_sv8hi = 4686,
+ CODE_FOR_mve_vldrbq_gather_offset_z_uv8hi = 4687,
+ CODE_FOR_mve_vldrbq_gather_offset_z_sv4si = 4688,
+ CODE_FOR_mve_vldrbq_gather_offset_z_uv4si = 4689,
+ CODE_FOR_mve_vldrbq_z_sv16qi = 4690,
+ CODE_FOR_mve_vldrbq_z_uv16qi = 4691,
+ CODE_FOR_mve_vldrbq_z_sv8hi = 4692,
+ CODE_FOR_mve_vldrbq_z_uv8hi = 4693,
+ CODE_FOR_mve_vldrbq_z_sv4si = 4694,
+ CODE_FOR_mve_vldrbq_z_uv4si = 4695,
+ CODE_FOR_mve_vldrwq_gather_base_z_sv4si = 4696,
+ CODE_FOR_mve_vldrwq_gather_base_z_uv4si = 4697,
+ CODE_FOR_mve_vldrhq_fv8hf = 4698,
+ CODE_FOR_mve_vldrhq_gather_offset_sv8hi = 4699,
+ CODE_FOR_mve_vldrhq_gather_offset_uv8hi = 4700,
+ CODE_FOR_mve_vldrhq_gather_offset_sv4si = 4701,
+ CODE_FOR_mve_vldrhq_gather_offset_uv4si = 4702,
+ CODE_FOR_mve_vldrhq_gather_offset_z_sv8hi = 4703,
+ CODE_FOR_mve_vldrhq_gather_offset_z_uv8hi = 4704,
+ CODE_FOR_mve_vldrhq_gather_offset_z_sv4si = 4705,
+ CODE_FOR_mve_vldrhq_gather_offset_z_uv4si = 4706,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_sv8hi = 4707,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_uv8hi = 4708,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_sv4si = 4709,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_uv4si = 4710,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_z_sv8hi = 4711,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_z_uv8hi = 4712,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_z_sv4si = 4713,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_z_uv4si = 4714,
+ CODE_FOR_mve_vldrhq_sv8hi = 4715,
+ CODE_FOR_mve_vldrhq_uv8hi = 4716,
+ CODE_FOR_mve_vldrhq_sv4si = 4717,
+ CODE_FOR_mve_vldrhq_uv4si = 4718,
+ CODE_FOR_mve_vldrhq_z_fv8hf = 4719,
+ CODE_FOR_mve_vldrhq_z_sv8hi = 4720,
+ CODE_FOR_mve_vldrhq_z_uv8hi = 4721,
+ CODE_FOR_mve_vldrhq_z_sv4si = 4722,
+ CODE_FOR_mve_vldrhq_z_uv4si = 4723,
+ CODE_FOR_mve_vldrwq_fv4sf = 4724,
+ CODE_FOR_mve_vldrwq_sv4si = 4725,
+ CODE_FOR_mve_vldrwq_uv4si = 4726,
+ CODE_FOR_mve_vldrwq_z_fv4sf = 4727,
+ CODE_FOR_mve_vldrwq_z_sv4si = 4728,
+ CODE_FOR_mve_vldrwq_z_uv4si = 4729,
+ CODE_FOR_mve_vldrdq_gather_base_sv2di = 4730,
+ CODE_FOR_mve_vldrdq_gather_base_uv2di = 4731,
+ CODE_FOR_mve_vldrdq_gather_base_z_sv2di = 4732,
+ CODE_FOR_mve_vldrdq_gather_base_z_uv2di = 4733,
+ CODE_FOR_mve_vldrdq_gather_offset_sv2di = 4734,
+ CODE_FOR_mve_vldrdq_gather_offset_uv2di = 4735,
+ CODE_FOR_mve_vldrdq_gather_offset_z_sv2di = 4736,
+ CODE_FOR_mve_vldrdq_gather_offset_z_uv2di = 4737,
+ CODE_FOR_mve_vldrdq_gather_shifted_offset_sv2di = 4738,
+ CODE_FOR_mve_vldrdq_gather_shifted_offset_uv2di = 4739,
+ CODE_FOR_mve_vldrdq_gather_shifted_offset_z_sv2di = 4740,
+ CODE_FOR_mve_vldrdq_gather_shifted_offset_z_uv2di = 4741,
+ CODE_FOR_mve_vldrhq_gather_offset_fv8hf = 4742,
+ CODE_FOR_mve_vldrhq_gather_offset_z_fv8hf = 4743,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_fv8hf = 4744,
+ CODE_FOR_mve_vldrhq_gather_shifted_offset_z_fv8hf = 4745,
+ CODE_FOR_mve_vldrwq_gather_base_fv4sf = 4746,
+ CODE_FOR_mve_vldrwq_gather_base_z_fv4sf = 4747,
+ CODE_FOR_mve_vldrwq_gather_offset_fv4sf = 4748,
+ CODE_FOR_mve_vldrwq_gather_offset_sv4si = 4749,
+ CODE_FOR_mve_vldrwq_gather_offset_uv4si = 4750,
+ CODE_FOR_mve_vldrwq_gather_offset_z_fv4sf = 4751,
+ CODE_FOR_mve_vldrwq_gather_offset_z_sv4si = 4752,
+ CODE_FOR_mve_vldrwq_gather_offset_z_uv4si = 4753,
+ CODE_FOR_mve_vldrwq_gather_shifted_offset_fv4sf = 4754,
+ CODE_FOR_mve_vldrwq_gather_shifted_offset_sv4si = 4755,
+ CODE_FOR_mve_vldrwq_gather_shifted_offset_uv4si = 4756,
+ CODE_FOR_mve_vldrwq_gather_shifted_offset_z_fv4sf = 4757,
+ CODE_FOR_mve_vldrwq_gather_shifted_offset_z_sv4si = 4758,
+ CODE_FOR_mve_vldrwq_gather_shifted_offset_z_uv4si = 4759,
+ CODE_FOR_mve_vstrhq_fv8hf = 4760,
+ CODE_FOR_mve_vstrhq_p_fv8hf = 4761,
+ CODE_FOR_mve_vstrhq_p_sv8hi = 4762,
+ CODE_FOR_mve_vstrhq_p_uv8hi = 4763,
+ CODE_FOR_mve_vstrhq_p_sv4si = 4764,
+ CODE_FOR_mve_vstrhq_p_uv4si = 4765,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_sv8hi_insn = 4766,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_uv8hi_insn = 4767,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_sv4si_insn = 4768,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_uv4si_insn = 4769,
+ CODE_FOR_mve_vstrhq_scatter_offset_sv8hi_insn = 4770,
+ CODE_FOR_mve_vstrhq_scatter_offset_uv8hi_insn = 4771,
+ CODE_FOR_mve_vstrhq_scatter_offset_sv4si_insn = 4772,
+ CODE_FOR_mve_vstrhq_scatter_offset_uv4si_insn = 4773,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_sv8hi_insn = 4774,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_uv8hi_insn = 4775,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_sv4si_insn = 4776,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_uv4si_insn = 4777,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_sv8hi_insn = 4778,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_uv8hi_insn = 4779,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_sv4si_insn = 4780,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_uv4si_insn = 4781,
+ CODE_FOR_mve_vstrhq_sv8hi = 4782,
+ CODE_FOR_mve_vstrhq_uv8hi = 4783,
+ CODE_FOR_mve_vstrhq_sv4si = 4784,
+ CODE_FOR_mve_vstrhq_uv4si = 4785,
+ CODE_FOR_mve_vstrwq_fv4sf = 4786,
+ CODE_FOR_mve_vstrwq_p_fv4sf = 4787,
+ CODE_FOR_mve_vstrwq_p_sv4si = 4788,
+ CODE_FOR_mve_vstrwq_p_uv4si = 4789,
+ CODE_FOR_mve_vstrwq_sv4si = 4790,
+ CODE_FOR_mve_vstrwq_uv4si = 4791,
+ CODE_FOR_mve_vstrdq_scatter_base_p_sv2di = 4792,
+ CODE_FOR_mve_vstrdq_scatter_base_p_uv2di = 4793,
+ CODE_FOR_mve_vstrdq_scatter_base_sv2di = 4794,
+ CODE_FOR_mve_vstrdq_scatter_base_uv2di = 4795,
+ CODE_FOR_mve_vstrdq_scatter_offset_p_sv2di_insn = 4796,
+ CODE_FOR_mve_vstrdq_scatter_offset_p_uv2di_insn = 4797,
+ CODE_FOR_mve_vstrdq_scatter_offset_sv2di_insn = 4798,
+ CODE_FOR_mve_vstrdq_scatter_offset_uv2di_insn = 4799,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_p_sv2di_insn = 4800,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_p_uv2di_insn = 4801,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_sv2di_insn = 4802,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_uv2di_insn = 4803,
+ CODE_FOR_mve_vstrhq_scatter_offset_fv8hf_insn = 4804,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_fv8hf_insn = 4805,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_fv8hf_insn = 4806,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_fv8hf_insn = 4807,
+ CODE_FOR_mve_vstrwq_scatter_base_fv4sf = 4808,
+ CODE_FOR_mve_vstrwq_scatter_base_p_fv4sf = 4809,
+ CODE_FOR_mve_vstrwq_scatter_offset_fv4sf_insn = 4810,
+ CODE_FOR_mve_vstrwq_scatter_offset_p_fv4sf_insn = 4811,
+ CODE_FOR_mve_vstrwq_scatter_offset_p_sv4si_insn = 4812,
+ CODE_FOR_mve_vstrwq_scatter_offset_p_uv4si_insn = 4813,
+ CODE_FOR_mve_vstrwq_scatter_offset_sv4si_insn = 4814,
+ CODE_FOR_mve_vstrwq_scatter_offset_uv4si_insn = 4815,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_fv4sf_insn = 4816,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_p_fv4sf_insn = 4817,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_p_sv4si_insn = 4818,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_p_uv4si_insn = 4819,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_sv4si_insn = 4820,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_uv4si_insn = 4821,
+ CODE_FOR_mve_vaddqv16qi = 4822,
+ CODE_FOR_mve_vaddqv8hi = 4823,
+ CODE_FOR_mve_vaddqv4si = 4824,
+ CODE_FOR_mve_vaddq_fv8hf = 4825,
+ CODE_FOR_mve_vaddq_fv4sf = 4826,
+ CODE_FOR_mve_vidupq_uv16qi_insn = 4827,
+ CODE_FOR_mve_vidupq_uv8hi_insn = 4828,
+ CODE_FOR_mve_vidupq_uv4si_insn = 4829,
+ CODE_FOR_mve_vidupq_m_wb_uv16qi_insn = 4830,
+ CODE_FOR_mve_vidupq_m_wb_uv8hi_insn = 4831,
+ CODE_FOR_mve_vidupq_m_wb_uv4si_insn = 4832,
+ CODE_FOR_mve_vddupq_uv16qi_insn = 4833,
+ CODE_FOR_mve_vddupq_uv8hi_insn = 4834,
+ CODE_FOR_mve_vddupq_uv4si_insn = 4835,
+ CODE_FOR_mve_vddupq_m_wb_uv16qi_insn = 4836,
+ CODE_FOR_mve_vddupq_m_wb_uv8hi_insn = 4837,
+ CODE_FOR_mve_vddupq_m_wb_uv4si_insn = 4838,
+ CODE_FOR_mve_vdwdupq_wb_uv16qi_insn = 4839,
+ CODE_FOR_mve_vdwdupq_wb_uv8hi_insn = 4840,
+ CODE_FOR_mve_vdwdupq_wb_uv4si_insn = 4841,
+ CODE_FOR_mve_vdwdupq_m_wb_uv16qi_insn = 4842,
+ CODE_FOR_mve_vdwdupq_m_wb_uv8hi_insn = 4843,
+ CODE_FOR_mve_vdwdupq_m_wb_uv4si_insn = 4844,
+ CODE_FOR_mve_viwdupq_wb_uv16qi_insn = 4845,
+ CODE_FOR_mve_viwdupq_wb_uv8hi_insn = 4846,
+ CODE_FOR_mve_viwdupq_wb_uv4si_insn = 4847,
+ CODE_FOR_mve_viwdupq_m_wb_uv16qi_insn = 4848,
+ CODE_FOR_mve_viwdupq_m_wb_uv8hi_insn = 4849,
+ CODE_FOR_mve_viwdupq_m_wb_uv4si_insn = 4850,
+ CODE_FOR_mve_vstrwq_scatter_base_wb_sv4si = 4851,
+ CODE_FOR_mve_vstrwq_scatter_base_wb_uv4si = 4852,
+ CODE_FOR_mve_vstrwq_scatter_base_wb_p_sv4si = 4853,
+ CODE_FOR_mve_vstrwq_scatter_base_wb_p_uv4si = 4854,
+ CODE_FOR_mve_vstrwq_scatter_base_wb_fv4sf = 4855,
+ CODE_FOR_mve_vstrwq_scatter_base_wb_p_fv4sf = 4856,
+ CODE_FOR_mve_vstrdq_scatter_base_wb_sv2di = 4857,
+ CODE_FOR_mve_vstrdq_scatter_base_wb_uv2di = 4858,
+ CODE_FOR_mve_vstrdq_scatter_base_wb_p_sv2di = 4859,
+ CODE_FOR_mve_vstrdq_scatter_base_wb_p_uv2di = 4860,
+ CODE_FOR_mve_vldrwq_gather_base_wb_sv4si_insn = 4861,
+ CODE_FOR_mve_vldrwq_gather_base_wb_uv4si_insn = 4862,
+ CODE_FOR_mve_vldrwq_gather_base_wb_z_sv4si_insn = 4863,
+ CODE_FOR_mve_vldrwq_gather_base_wb_z_uv4si_insn = 4864,
+ CODE_FOR_mve_vldrwq_gather_base_wb_fv4sf_insn = 4865,
+ CODE_FOR_mve_vldrwq_gather_base_wb_z_fv4sf_insn = 4866,
+ CODE_FOR_mve_vldrdq_gather_base_wb_sv2di_insn = 4867,
+ CODE_FOR_mve_vldrdq_gather_base_wb_uv2di_insn = 4868,
+ CODE_FOR_get_fpscr_nzcvqc = 4869,
+ CODE_FOR_set_fpscr_nzcvqc = 4870,
+ CODE_FOR_mve_vldrdq_gather_base_wb_z_sv2di_insn = 4871,
+ CODE_FOR_mve_vldrdq_gather_base_wb_z_uv2di_insn = 4872,
+ CODE_FOR_mve_vadciq_m_uv4si = 4873,
+ CODE_FOR_mve_vadciq_m_sv4si = 4874,
+ CODE_FOR_mve_vadciq_uv4si = 4875,
+ CODE_FOR_mve_vadciq_sv4si = 4876,
+ CODE_FOR_mve_vadcq_m_uv4si = 4877,
+ CODE_FOR_mve_vadcq_m_sv4si = 4878,
+ CODE_FOR_mve_vadcq_uv4si = 4879,
+ CODE_FOR_mve_vadcq_sv4si = 4880,
+ CODE_FOR_mve_vsbciq_m_uv4si = 4881,
+ CODE_FOR_mve_vsbciq_m_sv4si = 4882,
+ CODE_FOR_mve_vsbciq_uv4si = 4883,
+ CODE_FOR_mve_vsbciq_sv4si = 4884,
+ CODE_FOR_mve_vsbcq_m_uv4si = 4885,
+ CODE_FOR_mve_vsbcq_m_sv4si = 4886,
+ CODE_FOR_mve_vsbcq_uv4si = 4887,
+ CODE_FOR_mve_vsbcq_sv4si = 4888,
+ CODE_FOR_mve_vst2qv16qi = 4889,
+ CODE_FOR_mve_vst2qv8hi = 4890,
+ CODE_FOR_mve_vst2qv4si = 4891,
+ CODE_FOR_mve_vst2qv8hf = 4892,
+ CODE_FOR_mve_vst2qv4sf = 4893,
+ CODE_FOR_mve_vld2qv16qi = 4894,
+ CODE_FOR_mve_vld2qv8hi = 4895,
+ CODE_FOR_mve_vld2qv4si = 4896,
+ CODE_FOR_mve_vld2qv8hf = 4897,
+ CODE_FOR_mve_vld2qv4sf = 4898,
+ CODE_FOR_mve_vld4qv16qi = 4899,
+ CODE_FOR_mve_vld4qv8hi = 4900,
+ CODE_FOR_mve_vld4qv4si = 4901,
+ CODE_FOR_mve_vld4qv8hf = 4902,
+ CODE_FOR_mve_vld4qv4sf = 4903,
+ CODE_FOR_mve_vec_extractv16qiqi = 4904,
+ CODE_FOR_mve_vec_extractv8hihi = 4905,
+ CODE_FOR_mve_vec_extractv4sisi = 4906,
+ CODE_FOR_mve_vec_extractv8hfhf = 4907,
+ CODE_FOR_mve_vec_extractv4sfsf = 4908,
+ CODE_FOR_mve_vec_extractv2didi = 4909,
+ CODE_FOR_mve_vec_setv16qi_internal = 4916,
+ CODE_FOR_mve_vec_setv8hi_internal = 4917,
+ CODE_FOR_mve_vec_setv8hf_internal = 4918,
+ CODE_FOR_mve_vec_setv4si_internal = 4919,
+ CODE_FOR_mve_vec_setv4sf_internal = 4920,
+ CODE_FOR_mve_vec_setv2di_internal = 4921,
+ CODE_FOR_mve_uqrshll_sat64_di = 4922,
+ CODE_FOR_mve_uqrshll_sat48_di = 4923,
+ CODE_FOR_mve_sqrshrl_sat64_di = 4924,
+ CODE_FOR_mve_sqrshrl_sat48_di = 4925,
+ CODE_FOR_mve_uqrshl_si = 4926,
+ CODE_FOR_mve_sqrshr_si = 4927,
+ CODE_FOR_mve_uqshll_di = 4928,
+ CODE_FOR_mve_urshrl_di = 4929,
+ CODE_FOR_mve_uqshl_si = 4930,
+ CODE_FOR_mve_urshr_si = 4931,
+ CODE_FOR_mve_sqshl_si = 4932,
+ CODE_FOR_mve_srshr_si = 4933,
+ CODE_FOR_mve_srshrl_di = 4934,
+ CODE_FOR_mve_sqshll_di = 4935,
+ CODE_FOR_mve_vshlcq_m_sv16qi = 4936,
+ CODE_FOR_mve_vshlcq_m_uv16qi = 4937,
+ CODE_FOR_mve_vshlcq_m_sv8hi = 4938,
+ CODE_FOR_mve_vshlcq_m_uv8hi = 4939,
+ CODE_FOR_mve_vshlcq_m_sv4si = 4940,
+ CODE_FOR_mve_vshlcq_m_uv4si = 4941,
+ CODE_FOR_arm_vcx1qv16qi = 4942,
+ CODE_FOR_arm_vcx1qav16qi = 4943,
+ CODE_FOR_arm_vcx2qv16qi = 4944,
+ CODE_FOR_arm_vcx2qav16qi = 4945,
+ CODE_FOR_arm_vcx3qv16qi = 4946,
+ CODE_FOR_arm_vcx3qav16qi = 4947,
+ CODE_FOR_arm_vcx1q_p_v16qi = 4948,
+ CODE_FOR_arm_vcx1qa_p_v16qi = 4949,
+ CODE_FOR_arm_vcx2q_p_v16qi = 4950,
+ CODE_FOR_arm_vcx2qa_p_v16qi = 4951,
+ CODE_FOR_arm_vcx3q_p_v16qi = 4952,
+ CODE_FOR_arm_vcx3qa_p_v16qi = 4953,
+ CODE_FOR_adddi3 = 4964,
+ CODE_FOR_addvsi4 = 4965,
+ CODE_FOR_addvdi4 = 4966,
+ CODE_FOR_addsi3_cin_vout_reg = 4967,
+ CODE_FOR_addsi3_cin_vout_imm = 4968,
+ CODE_FOR_addsi3_cin_vout_0 = 4969,
+ CODE_FOR_uaddvsi4 = 4970,
+ CODE_FOR_uaddvdi4 = 4971,
+ CODE_FOR_addsi3_cin_cout_reg = 4972,
+ CODE_FOR_addsi3_cin_cout_imm = 4973,
+ CODE_FOR_addsi3_cin_cout_0 = 4974,
+ CODE_FOR_addsi3 = 4975,
+ CODE_FOR_subvsi4 = 4976,
+ CODE_FOR_subvdi4 = 4977,
+ CODE_FOR_usubvsi4 = 4978,
+ CODE_FOR_usubvdi4 = 4979,
+ CODE_FOR_addsf3 = 4980,
+ CODE_FOR_adddf3 = 4981,
+ CODE_FOR_subdi3 = 4982,
+ CODE_FOR_subsi3 = 4983,
+ CODE_FOR_subsf3 = 4984,
+ CODE_FOR_subdf3 = 4985,
+ CODE_FOR_mulhi3 = 4986,
+ CODE_FOR_mulsi3 = 4987,
+ CODE_FOR_mulsidi3 = 4988,
+ CODE_FOR_umulsidi3 = 4989,
+ CODE_FOR_maddsidi4 = 4990,
+ CODE_FOR_umaddsidi4 = 4991,
+ CODE_FOR_smulsi3_highpart = 4992,
+ CODE_FOR_umulsi3_highpart = 4993,
+ CODE_FOR_maddhisi4 = 4994,
+ CODE_FOR_arm_smlabb = 4995,
+ CODE_FOR_arm_smlatb = 4996,
+ CODE_FOR_arm_smlatt = 4997,
+ CODE_FOR_arm_smlawb = 4998,
+ CODE_FOR_arm_smlawt = 4999,
+ CODE_FOR_mulsf3 = 5000,
+ CODE_FOR_muldf3 = 5001,
+ CODE_FOR_divsf3 = 5002,
+ CODE_FOR_divdf3 = 5003,
+ CODE_FOR_anddi3 = 5004,
+ CODE_FOR_iordi3 = 5005,
+ CODE_FOR_xordi3 = 5006,
+ CODE_FOR_one_cmpldi2 = 5007,
+ CODE_FOR_andsi3 = 5008,
+ CODE_FOR_insv = 5009,
+ CODE_FOR_iorsi3 = 5010,
+ CODE_FOR_xorsi3 = 5011,
+ CODE_FOR_smaxsi3 = 5012,
+ CODE_FOR_sminsi3 = 5013,
+ CODE_FOR_umaxsi3 = 5014,
+ CODE_FOR_uminsi3 = 5015,
+ CODE_FOR_arm_qadd = 5016,
+ CODE_FOR_arm_qsub = 5017,
+ CODE_FOR_arm_ssat = 5018,
+ CODE_FOR_arm_usat = 5019,
+ CODE_FOR_arm_saturation_occurred = 5020,
+ CODE_FOR_arm_set_saturation = 5021,
+ CODE_FOR_ashldi3 = 5022,
+ CODE_FOR_ashlsi3 = 5023,
+ CODE_FOR_ashrdi3 = 5024,
+ CODE_FOR_ashrsi3 = 5025,
+ CODE_FOR_lshrdi3 = 5026,
+ CODE_FOR_lshrsi3 = 5027,
+ CODE_FOR_rotlsi3 = 5028,
+ CODE_FOR_rotrsi3 = 5029,
+ CODE_FOR_extzv = 5030,
+ CODE_FOR_extzv_t1 = 5031,
+ CODE_FOR_extv = 5032,
+ CODE_FOR_extv_regsi = 5033,
+ CODE_FOR_negvsi3 = 5034,
+ CODE_FOR_negvdi3 = 5035,
+ CODE_FOR_negsi2 = 5036,
+ CODE_FOR_negsf2 = 5037,
+ CODE_FOR_negdf2 = 5038,
+ CODE_FOR_abssi2 = 5039,
+ CODE_FOR_abssf2 = 5040,
+ CODE_FOR_absdf2 = 5041,
+ CODE_FOR_sqrtsf2 = 5042,
+ CODE_FOR_sqrtdf2 = 5043,
+ CODE_FOR_one_cmplsi2 = 5044,
+ CODE_FOR_floatsihf2 = 5045,
+ CODE_FOR_floatdihf2 = 5046,
+ CODE_FOR_floatsisf2 = 5047,
+ CODE_FOR_floatsidf2 = 5048,
+ CODE_FOR_fix_trunchfsi2 = 5049,
+ CODE_FOR_fix_trunchfdi2 = 5050,
+ CODE_FOR_fix_truncsfsi2 = 5051,
+ CODE_FOR_fix_truncdfsi2 = 5052,
+ CODE_FOR_truncdfsf2 = 5053,
+ CODE_FOR_truncdfhf2 = 5054,
+ CODE_FOR_zero_extendqidi2 = 5055,
+ CODE_FOR_zero_extendhidi2 = 5056,
+ CODE_FOR_zero_extendsidi2 = 5057,
+ CODE_FOR_extendqidi2 = 5058,
+ CODE_FOR_extendhidi2 = 5059,
+ CODE_FOR_extendsidi2 = 5060,
+ CODE_FOR_zero_extendhisi2 = 5061,
+ CODE_FOR_zero_extendqisi2 = 5062,
+ CODE_FOR_extendhisi2 = 5063,
+ CODE_FOR_extendhisi2_mem = 5064,
+ CODE_FOR_extendqihi2 = 5065,
+ CODE_FOR_extendqisi2 = 5066,
+ CODE_FOR_arm_smlad = 5067,
+ CODE_FOR_arm_smladx = 5068,
+ CODE_FOR_arm_smlsd = 5069,
+ CODE_FOR_arm_smlsdx = 5070,
+ CODE_FOR_arm_smuad = 5071,
+ CODE_FOR_arm_smuadx = 5072,
+ CODE_FOR_arm_ssat16 = 5073,
+ CODE_FOR_arm_usat16 = 5074,
+ CODE_FOR_extendsfdf2 = 5075,
+ CODE_FOR_extendhfdf2 = 5076,
+ CODE_FOR_movdi = 5077,
+ CODE_FOR_movsi = 5078,
+ CODE_FOR_calculate_pic_address = 5079,
+ CODE_FOR_builtin_setjmp_receiver = 5080,
+ CODE_FOR_storehi = 5081,
+ CODE_FOR_storehi_bigend = 5082,
+ CODE_FOR_storeinthi = 5083,
+ CODE_FOR_storehi_single_op = 5084,
+ CODE_FOR_movhi = 5085,
+ CODE_FOR_movhi_bytes = 5086,
+ CODE_FOR_movhi_bigend = 5087,
+ CODE_FOR_reload_outhi = 5088,
+ CODE_FOR_reload_inhi = 5089,
+ CODE_FOR_movqi = 5090,
+ CODE_FOR_movhf = 5091,
+ CODE_FOR_movbf = 5092,
+ CODE_FOR_movsf = 5093,
+ CODE_FOR_movdf = 5094,
+ CODE_FOR_reload_outdf = 5095,
+ CODE_FOR_load_multiple = 5096,
+ CODE_FOR_store_multiple = 5097,
+ CODE_FOR_setmemsi = 5098,
+ CODE_FOR_cpymemqi = 5099,
+ CODE_FOR_cbranchsi4 = 5100,
+ CODE_FOR_cbranchsf4 = 5101,
+ CODE_FOR_cbranchdf4 = 5102,
+ CODE_FOR_cbranchdi4 = 5103,
+ CODE_FOR_cbranch_cc = 5104,
+ CODE_FOR_cstore_cc = 5105,
+ CODE_FOR_cstoresi4 = 5106,
+ CODE_FOR_cstorehf4 = 5107,
+ CODE_FOR_cstoresf4 = 5108,
+ CODE_FOR_cstoredf4 = 5109,
+ CODE_FOR_cstoredi4 = 5110,
+ CODE_FOR_movsicc = 5111,
+ CODE_FOR_movhfcc = 5112,
+ CODE_FOR_movsfcc = 5113,
+ CODE_FOR_movdfcc = 5114,
+ CODE_FOR_jump = 5115,
+ CODE_FOR_call = 5116,
+ CODE_FOR_call_internal = 5117,
+ CODE_FOR_nonsecure_call_internal = 5118,
+ CODE_FOR_call_value = 5119,
+ CODE_FOR_call_value_internal = 5120,
+ CODE_FOR_nonsecure_call_value_internal = 5121,
+ CODE_FOR_sibcall_internal = 5122,
+ CODE_FOR_sibcall = 5123,
+ CODE_FOR_sibcall_value_internal = 5124,
+ CODE_FOR_sibcall_value = 5125,
+ CODE_FOR_return = 5126,
+ CODE_FOR_simple_return = 5127,
+ CODE_FOR_return_addr_mask = 5128,
+ CODE_FOR_untyped_call = 5129,
+ CODE_FOR_untyped_return = 5130,
+ CODE_FOR_stack_protect_combined_set = 5131,
+ CODE_FOR_stack_protect_combined_test = 5132,
+ CODE_FOR_stack_protect_set = 5133,
+ CODE_FOR_stack_protect_test = 5134,
+ CODE_FOR_casesi = 5135,
+ CODE_FOR_arm_casesi_internal = 5136,
+ CODE_FOR_indirect_jump = 5137,
+ CODE_FOR_prologue = 5138,
+ CODE_FOR_epilogue = 5139,
+ CODE_FOR_sibcall_epilogue = 5140,
+ CODE_FOR_eh_epilogue = 5141,
+ CODE_FOR_eh_return = 5142,
+ CODE_FOR_get_thread_pointersi = 5143,
+ CODE_FOR_arm_legacy_rev = 5144,
+ CODE_FOR_thumb_legacy_rev = 5145,
+ CODE_FOR_modsi3 = 5146,
+ CODE_FOR_bswapsi2 = 5147,
+ CODE_FOR_bswaphi2 = 5148,
+ CODE_FOR_copysignsf3 = 5149,
+ CODE_FOR_copysigndf3 = 5150,
+ CODE_FOR_movmisaligndi = 5151,
+ CODE_FOR_movmisalignhi = 5152,
+ CODE_FOR_movmisalignsi = 5153,
+ CODE_FOR_arm_ldc = 5154,
+ CODE_FOR_arm_ldc2 = 5155,
+ CODE_FOR_arm_ldcl = 5156,
+ CODE_FOR_arm_ldc2l = 5157,
+ CODE_FOR_arm_stc = 5158,
+ CODE_FOR_arm_stc2 = 5159,
+ CODE_FOR_arm_stcl = 5160,
+ CODE_FOR_arm_stc2l = 5161,
+ CODE_FOR_speculation_barrier = 5162,
+ CODE_FOR_movv16qi = 5163,
+ CODE_FOR_movv8hi = 5164,
+ CODE_FOR_movv4si = 5165,
+ CODE_FOR_movv4sf = 5166,
+ CODE_FOR_movv2di = 5167,
+ CODE_FOR_movv2si = 5168,
+ CODE_FOR_movv4hi = 5169,
+ CODE_FOR_movv8qi = 5170,
+ CODE_FOR_movv2sf = 5171,
+ CODE_FOR_movv8hf = 5172,
+ CODE_FOR_addv8qi3 = 5173,
+ CODE_FOR_addv16qi3 = 5174,
+ CODE_FOR_addv4hi3 = 5175,
+ CODE_FOR_addv8hi3 = 5176,
+ CODE_FOR_addv2si3 = 5177,
+ CODE_FOR_addv4si3 = 5178,
+ CODE_FOR_addv4hf3 = 5179,
+ CODE_FOR_addv8hf3 = 5180,
+ CODE_FOR_addv2sf3 = 5181,
+ CODE_FOR_addv4sf3 = 5182,
+ CODE_FOR_addv2di3 = 5183,
+ CODE_FOR_subv8qi3 = 5184,
+ CODE_FOR_subv16qi3 = 5185,
+ CODE_FOR_subv4hi3 = 5186,
+ CODE_FOR_subv8hi3 = 5187,
+ CODE_FOR_subv2si3 = 5188,
+ CODE_FOR_subv4si3 = 5189,
+ CODE_FOR_subv4hf3 = 5190,
+ CODE_FOR_subv8hf3 = 5191,
+ CODE_FOR_subv2sf3 = 5192,
+ CODE_FOR_subv4sf3 = 5193,
+ CODE_FOR_subv2di3 = 5194,
+ CODE_FOR_mulv8qi3 = 5195,
+ CODE_FOR_mulv16qi3 = 5196,
+ CODE_FOR_mulv4hi3 = 5197,
+ CODE_FOR_mulv8hi3 = 5198,
+ CODE_FOR_mulv2si3 = 5199,
+ CODE_FOR_mulv4si3 = 5200,
+ CODE_FOR_mulv2sf3 = 5201,
+ CODE_FOR_mulv4sf3 = 5202,
+ CODE_FOR_mulv8hf3 = 5203,
+ CODE_FOR_mulv4hf3 = 5204,
+ CODE_FOR_sminv2si3 = 5205,
+ CODE_FOR_sminv4hi3 = 5206,
+ CODE_FOR_sminv8qi3 = 5207,
+ CODE_FOR_sminv2sf3 = 5208,
+ CODE_FOR_sminv4si3 = 5209,
+ CODE_FOR_sminv8hi3 = 5210,
+ CODE_FOR_sminv16qi3 = 5211,
+ CODE_FOR_sminv4sf3 = 5212,
+ CODE_FOR_uminv2si3 = 5213,
+ CODE_FOR_uminv4hi3 = 5214,
+ CODE_FOR_uminv8qi3 = 5215,
+ CODE_FOR_uminv4si3 = 5216,
+ CODE_FOR_uminv8hi3 = 5217,
+ CODE_FOR_uminv16qi3 = 5218,
+ CODE_FOR_smaxv2si3 = 5219,
+ CODE_FOR_smaxv4hi3 = 5220,
+ CODE_FOR_smaxv8qi3 = 5221,
+ CODE_FOR_smaxv2sf3 = 5222,
+ CODE_FOR_smaxv4si3 = 5223,
+ CODE_FOR_smaxv8hi3 = 5224,
+ CODE_FOR_smaxv16qi3 = 5225,
+ CODE_FOR_smaxv4sf3 = 5226,
+ CODE_FOR_umaxv2si3 = 5227,
+ CODE_FOR_umaxv4hi3 = 5228,
+ CODE_FOR_umaxv8qi3 = 5229,
+ CODE_FOR_umaxv4si3 = 5230,
+ CODE_FOR_umaxv8hi3 = 5231,
+ CODE_FOR_umaxv16qi3 = 5232,
+ CODE_FOR_vec_permv8qi = 5233,
+ CODE_FOR_vec_permv16qi = 5234,
+ CODE_FOR_vec_extractv16qiqi = 5235,
+ CODE_FOR_vec_extractv8hihi = 5236,
+ CODE_FOR_vec_extractv8hfhf = 5237,
+ CODE_FOR_vec_extractv4sisi = 5238,
+ CODE_FOR_vec_extractv4sfsf = 5239,
+ CODE_FOR_vec_extractv2didi = 5240,
+ CODE_FOR_vec_setv16qi = 5241,
+ CODE_FOR_vec_setv8hi = 5242,
+ CODE_FOR_vec_setv8hf = 5243,
+ CODE_FOR_vec_setv4si = 5244,
+ CODE_FOR_vec_setv4sf = 5245,
+ CODE_FOR_vec_setv2di = 5246,
+ CODE_FOR_andv8qi3 = 5247,
+ CODE_FOR_andv16qi3 = 5248,
+ CODE_FOR_andv4hi3 = 5249,
+ CODE_FOR_andv8hi3 = 5250,
+ CODE_FOR_andv2si3 = 5251,
+ CODE_FOR_andv4si3 = 5252,
+ CODE_FOR_andv4hf3 = 5253,
+ CODE_FOR_andv8hf3 = 5254,
+ CODE_FOR_andv2sf3 = 5255,
+ CODE_FOR_andv4sf3 = 5256,
+ CODE_FOR_andv2di3 = 5257,
+ CODE_FOR_iorv8qi3 = 5258,
+ CODE_FOR_iorv16qi3 = 5259,
+ CODE_FOR_iorv4hi3 = 5260,
+ CODE_FOR_iorv8hi3 = 5261,
+ CODE_FOR_iorv2si3 = 5262,
+ CODE_FOR_iorv4si3 = 5263,
+ CODE_FOR_iorv4hf3 = 5264,
+ CODE_FOR_iorv8hf3 = 5265,
+ CODE_FOR_iorv2sf3 = 5266,
+ CODE_FOR_iorv4sf3 = 5267,
+ CODE_FOR_iorv2di3 = 5268,
+ CODE_FOR_xorv8qi3 = 5269,
+ CODE_FOR_xorv16qi3 = 5270,
+ CODE_FOR_xorv4hi3 = 5271,
+ CODE_FOR_xorv8hi3 = 5272,
+ CODE_FOR_xorv2si3 = 5273,
+ CODE_FOR_xorv4si3 = 5274,
+ CODE_FOR_xorv4hf3 = 5275,
+ CODE_FOR_xorv8hf3 = 5276,
+ CODE_FOR_xorv2sf3 = 5277,
+ CODE_FOR_xorv4sf3 = 5278,
+ CODE_FOR_xorv2di3 = 5279,
+ CODE_FOR_one_cmplv8qi2 = 5280,
+ CODE_FOR_one_cmplv16qi2 = 5281,
+ CODE_FOR_one_cmplv4hi2 = 5282,
+ CODE_FOR_one_cmplv8hi2 = 5283,
+ CODE_FOR_one_cmplv2si2 = 5284,
+ CODE_FOR_one_cmplv4si2 = 5285,
+ CODE_FOR_one_cmplv4hf2 = 5286,
+ CODE_FOR_one_cmplv8hf2 = 5287,
+ CODE_FOR_one_cmplv2sf2 = 5288,
+ CODE_FOR_one_cmplv4sf2 = 5289,
+ CODE_FOR_one_cmplv2di2 = 5290,
+ CODE_FOR_absv8qi2 = 5291,
+ CODE_FOR_negv8qi2 = 5292,
+ CODE_FOR_absv16qi2 = 5293,
+ CODE_FOR_negv16qi2 = 5294,
+ CODE_FOR_absv4hi2 = 5295,
+ CODE_FOR_negv4hi2 = 5296,
+ CODE_FOR_absv8hi2 = 5297,
+ CODE_FOR_negv8hi2 = 5298,
+ CODE_FOR_absv2si2 = 5299,
+ CODE_FOR_negv2si2 = 5300,
+ CODE_FOR_absv4si2 = 5301,
+ CODE_FOR_negv4si2 = 5302,
+ CODE_FOR_absv2sf2 = 5303,
+ CODE_FOR_negv2sf2 = 5304,
+ CODE_FOR_absv4sf2 = 5305,
+ CODE_FOR_negv4sf2 = 5306,
+ CODE_FOR_absv8hf2 = 5307,
+ CODE_FOR_negv8hf2 = 5308,
+ CODE_FOR_absv4hf2 = 5309,
+ CODE_FOR_negv4hf2 = 5310,
+ CODE_FOR_cadd90v4hf3 = 5311,
+ CODE_FOR_cadd270v4hf3 = 5312,
+ CODE_FOR_cadd90v8hf3 = 5313,
+ CODE_FOR_cadd270v8hf3 = 5314,
+ CODE_FOR_cadd90v2sf3 = 5315,
+ CODE_FOR_cadd270v2sf3 = 5316,
+ CODE_FOR_cadd90v4sf3 = 5317,
+ CODE_FOR_cadd270v4sf3 = 5318,
+ CODE_FOR_cmulv8hf3 = 5319,
+ CODE_FOR_cmul_conjv8hf3 = 5320,
+ CODE_FOR_cmulv4sf3 = 5321,
+ CODE_FOR_cmul_conjv4sf3 = 5322,
+ CODE_FOR_arm_vcmla0v4hf = 5323,
+ CODE_FOR_arm_vcmla90v4hf = 5324,
+ CODE_FOR_arm_vcmla180v4hf = 5325,
+ CODE_FOR_arm_vcmla270v4hf = 5326,
+ CODE_FOR_arm_vcmla0v8hf = 5327,
+ CODE_FOR_arm_vcmla90v8hf = 5328,
+ CODE_FOR_arm_vcmla180v8hf = 5329,
+ CODE_FOR_arm_vcmla270v8hf = 5330,
+ CODE_FOR_arm_vcmla0v2sf = 5331,
+ CODE_FOR_arm_vcmla90v2sf = 5332,
+ CODE_FOR_arm_vcmla180v2sf = 5333,
+ CODE_FOR_arm_vcmla270v2sf = 5334,
+ CODE_FOR_arm_vcmla0v4sf = 5335,
+ CODE_FOR_arm_vcmla90v4sf = 5336,
+ CODE_FOR_arm_vcmla180v4sf = 5337,
+ CODE_FOR_arm_vcmla270v4sf = 5338,
+ CODE_FOR_cmlav4hf4 = 5339,
+ CODE_FOR_cmla_conjv4hf4 = 5340,
+ CODE_FOR_cmlsv4hf4 = 5341,
+ CODE_FOR_cmls_conjv4hf4 = 5342,
+ CODE_FOR_cmlav8hf4 = 5343,
+ CODE_FOR_cmla_conjv8hf4 = 5344,
+ CODE_FOR_cmlsv8hf4 = 5345,
+ CODE_FOR_cmls_conjv8hf4 = 5346,
+ CODE_FOR_cmlav2sf4 = 5347,
+ CODE_FOR_cmla_conjv2sf4 = 5348,
+ CODE_FOR_cmlsv2sf4 = 5349,
+ CODE_FOR_cmls_conjv2sf4 = 5350,
+ CODE_FOR_cmlav4sf4 = 5351,
+ CODE_FOR_cmla_conjv4sf4 = 5352,
+ CODE_FOR_cmlsv4sf4 = 5353,
+ CODE_FOR_cmls_conjv4sf4 = 5354,
+ CODE_FOR_movmisalignv8qi = 5355,
+ CODE_FOR_movmisalignv16qi = 5356,
+ CODE_FOR_movmisalignv4hi = 5357,
+ CODE_FOR_movmisalignv8hi = 5358,
+ CODE_FOR_movmisalignv2si = 5359,
+ CODE_FOR_movmisalignv4si = 5360,
+ CODE_FOR_movmisalignv4hf = 5361,
+ CODE_FOR_movmisalignv8hf = 5362,
+ CODE_FOR_movmisalignv2sf = 5363,
+ CODE_FOR_movmisalignv4sf = 5364,
+ CODE_FOR_movmisalignv2di = 5365,
+ CODE_FOR_vashlv8qi3 = 5366,
+ CODE_FOR_vashlv16qi3 = 5367,
+ CODE_FOR_vashlv4hi3 = 5368,
+ CODE_FOR_vashlv8hi3 = 5369,
+ CODE_FOR_vashlv2si3 = 5370,
+ CODE_FOR_vashlv4si3 = 5371,
+ CODE_FOR_vashrv8qi3 = 5372,
+ CODE_FOR_vashrv16qi3 = 5373,
+ CODE_FOR_vashrv4hi3 = 5374,
+ CODE_FOR_vashrv8hi3 = 5375,
+ CODE_FOR_vashrv2si3 = 5376,
+ CODE_FOR_vashrv4si3 = 5377,
+ CODE_FOR_vlshrv8qi3 = 5378,
+ CODE_FOR_vlshrv16qi3 = 5379,
+ CODE_FOR_vlshrv4hi3 = 5380,
+ CODE_FOR_vlshrv8hi3 = 5381,
+ CODE_FOR_vlshrv2si3 = 5382,
+ CODE_FOR_vlshrv4si3 = 5383,
+ CODE_FOR_vcondv8qiv8qi = 5384,
+ CODE_FOR_vcondv16qiv16qi = 5385,
+ CODE_FOR_vcondv4hiv4hi = 5386,
+ CODE_FOR_vcondv8hiv8hi = 5387,
+ CODE_FOR_vcondv2siv2si = 5388,
+ CODE_FOR_vcondv4siv4si = 5389,
+ CODE_FOR_vcondv2sfv2sf = 5390,
+ CODE_FOR_vcondv4sfv4sf = 5391,
+ CODE_FOR_vcondv8hfv8hf = 5392,
+ CODE_FOR_vcondv4hfv4hf = 5393,
+ CODE_FOR_vcondv2sfv2si = 5394,
+ CODE_FOR_vcondv2siv2sf = 5395,
+ CODE_FOR_vcondv4sfv4si = 5396,
+ CODE_FOR_vcondv4siv4sf = 5397,
+ CODE_FOR_vcondv4hfv4hi = 5398,
+ CODE_FOR_vcondv4hiv4hf = 5399,
+ CODE_FOR_vcondv8hfv8hi = 5400,
+ CODE_FOR_vcondv8hiv8hf = 5401,
+ CODE_FOR_vconduv8qiv8qi = 5402,
+ CODE_FOR_vconduv16qiv16qi = 5403,
+ CODE_FOR_vconduv4hiv4hi = 5404,
+ CODE_FOR_vconduv8hiv8hi = 5405,
+ CODE_FOR_vconduv2siv2si = 5406,
+ CODE_FOR_vconduv4siv4si = 5407,
+ CODE_FOR_vconduv2sfv2si = 5408,
+ CODE_FOR_vconduv4sfv4si = 5409,
+ CODE_FOR_vec_load_lanesoiv16qi = 5410,
+ CODE_FOR_vec_load_lanesoiv8hi = 5411,
+ CODE_FOR_vec_load_lanesoiv8hf = 5412,
+ CODE_FOR_vec_load_lanesoiv4si = 5413,
+ CODE_FOR_vec_load_lanesoiv4sf = 5414,
+ CODE_FOR_vec_store_lanesoiv16qi = 5415,
+ CODE_FOR_vec_store_lanesoiv8hi = 5416,
+ CODE_FOR_vec_store_lanesoiv8hf = 5417,
+ CODE_FOR_vec_store_lanesoiv4si = 5418,
+ CODE_FOR_vec_store_lanesoiv4sf = 5419,
+ CODE_FOR_vec_load_lanesxiv16qi = 5420,
+ CODE_FOR_vec_load_lanesxiv8hi = 5421,
+ CODE_FOR_vec_load_lanesxiv8hf = 5422,
+ CODE_FOR_vec_load_lanesxiv4si = 5423,
+ CODE_FOR_vec_load_lanesxiv4sf = 5424,
+ CODE_FOR_vec_store_lanesxiv16qi = 5425,
+ CODE_FOR_vec_store_lanesxiv8hi = 5426,
+ CODE_FOR_vec_store_lanesxiv8hf = 5427,
+ CODE_FOR_vec_store_lanesxiv4si = 5428,
+ CODE_FOR_vec_store_lanesxiv4sf = 5429,
+ CODE_FOR_reduc_plus_scal_v16qi = 5430,
+ CODE_FOR_reduc_plus_scal_v8hi = 5431,
+ CODE_FOR_reduc_plus_scal_v4si = 5432,
+ CODE_FOR_reduc_plus_scal_v4sf = 5433,
+ CODE_FOR_avgv16qi3_floor = 5434,
+ CODE_FOR_avgv8hi3_floor = 5435,
+ CODE_FOR_avgv4si3_floor = 5436,
+ CODE_FOR_uavgv16qi3_floor = 5437,
+ CODE_FOR_uavgv8hi3_floor = 5438,
+ CODE_FOR_uavgv4si3_floor = 5439,
+ CODE_FOR_avgv16qi3_ceil = 5440,
+ CODE_FOR_avgv8hi3_ceil = 5441,
+ CODE_FOR_avgv4si3_ceil = 5442,
+ CODE_FOR_uavgv16qi3_ceil = 5443,
+ CODE_FOR_uavgv8hi3_ceil = 5444,
+ CODE_FOR_uavgv4si3_ceil = 5445,
+ CODE_FOR_clzv8qi2 = 5446,
+ CODE_FOR_clzv16qi2 = 5447,
+ CODE_FOR_clzv4hi2 = 5448,
+ CODE_FOR_clzv8hi2 = 5449,
+ CODE_FOR_clzv2si2 = 5450,
+ CODE_FOR_clzv4si2 = 5451,
+ CODE_FOR_vec_initv8qiqi = 5452,
+ CODE_FOR_vec_initv16qiqi = 5453,
+ CODE_FOR_vec_initv4hihi = 5454,
+ CODE_FOR_vec_initv8hihi = 5455,
+ CODE_FOR_vec_initv2sisi = 5456,
+ CODE_FOR_vec_initv4sisi = 5457,
+ CODE_FOR_vec_initv4hfhf = 5458,
+ CODE_FOR_vec_initv8hfhf = 5459,
+ CODE_FOR_vec_initv4bfbf = 5460,
+ CODE_FOR_vec_initv8bfbf = 5461,
+ CODE_FOR_vec_initv2sfsf = 5462,
+ CODE_FOR_vec_initv4sfsf = 5463,
+ CODE_FOR_vec_initdidi = 5464,
+ CODE_FOR_vec_initv2didi = 5465,
+ CODE_FOR_iwmmxt_setwcgr0 = 5466,
+ CODE_FOR_iwmmxt_setwcgr1 = 5467,
+ CODE_FOR_iwmmxt_setwcgr2 = 5468,
+ CODE_FOR_iwmmxt_setwcgr3 = 5469,
+ CODE_FOR_iwmmxt_getwcgr0 = 5470,
+ CODE_FOR_iwmmxt_getwcgr1 = 5471,
+ CODE_FOR_iwmmxt_getwcgr2 = 5472,
+ CODE_FOR_iwmmxt_getwcgr3 = 5473,
+ CODE_FOR_neon_vabshf = 5474,
+ CODE_FOR_neon_vfmahf = 5475,
+ CODE_FOR_neon_vfmshf = 5476,
+ CODE_FOR_neon_vcvths_nhf = 5477,
+ CODE_FOR_neon_vcvthu_nhf = 5478,
+ CODE_FOR_neon_vcvths_nsi = 5479,
+ CODE_FOR_neon_vcvthu_nsi = 5480,
+ CODE_FOR_thumb_movhi_clobber = 5481,
+ CODE_FOR_cbranchqi4 = 5482,
+ CODE_FOR_cbranchsi4_neg_late = 5483,
+ CODE_FOR_cstoresi_eq0_thumb1 = 5484,
+ CODE_FOR_cstoresi_ne0_thumb1 = 5485,
+ CODE_FOR_thumb1_casesi_internal_pic = 5486,
+ CODE_FOR_tablejump = 5487,
+ CODE_FOR_thumb2_casesi_internal = 5488,
+ CODE_FOR_thumb2_casesi_internal_pic = 5489,
+ CODE_FOR_doloop_end = 5490,
+ CODE_FOR_doloop_begin = 5491,
+ CODE_FOR_movti = 5492,
+ CODE_FOR_movei = 5493,
+ CODE_FOR_movoi = 5494,
+ CODE_FOR_movci = 5495,
+ CODE_FOR_movxi = 5496,
+ CODE_FOR_movv4hf = 5497,
+ CODE_FOR_movv4bf = 5498,
+ CODE_FOR_movv8bf = 5499,
+ CODE_FOR_divv2sf3 = 5500,
+ CODE_FOR_divv4sf3 = 5501,
+ CODE_FOR_ceilv2sf2 = 5502,
+ CODE_FOR_btruncv2sf2 = 5503,
+ CODE_FOR_floorv2sf2 = 5504,
+ CODE_FOR_rintv2sf2 = 5505,
+ CODE_FOR_roundv2sf2 = 5506,
+ CODE_FOR_roundevenv2sf2 = 5507,
+ CODE_FOR_ceilv4sf2 = 5508,
+ CODE_FOR_btruncv4sf2 = 5509,
+ CODE_FOR_floorv4sf2 = 5510,
+ CODE_FOR_rintv4sf2 = 5511,
+ CODE_FOR_roundv4sf2 = 5512,
+ CODE_FOR_roundevenv4sf2 = 5513,
+ CODE_FOR_lceilv2sfv2si2 = 5514,
+ CODE_FOR_lfloorv2sfv2si2 = 5515,
+ CODE_FOR_lroundv2sfv2si2 = 5516,
+ CODE_FOR_lceiluv2sfv2si2 = 5517,
+ CODE_FOR_lflooruv2sfv2si2 = 5518,
+ CODE_FOR_lrounduv2sfv2si2 = 5519,
+ CODE_FOR_lceilv4sfv4si2 = 5520,
+ CODE_FOR_lfloorv4sfv4si2 = 5521,
+ CODE_FOR_lroundv4sfv4si2 = 5522,
+ CODE_FOR_lceiluv4sfv4si2 = 5523,
+ CODE_FOR_lflooruv4sfv4si2 = 5524,
+ CODE_FOR_lrounduv4sfv4si2 = 5525,
+ CODE_FOR_neon_vabsv8hf = 5526,
+ CODE_FOR_neon_vnegv8hf = 5527,
+ CODE_FOR_neon_vabsv4hf = 5528,
+ CODE_FOR_neon_vnegv4hf = 5529,
+ CODE_FOR_widen_ssumv16qi3 = 5530,
+ CODE_FOR_widen_ssumv8hi3 = 5531,
+ CODE_FOR_widen_ssumv4si3 = 5532,
+ CODE_FOR_widen_usumv16qi3 = 5533,
+ CODE_FOR_widen_usumv8hi3 = 5534,
+ CODE_FOR_widen_usumv4si3 = 5535,
+ CODE_FOR_move_hi_quad_v2di = 5536,
+ CODE_FOR_move_hi_quad_v2df = 5537,
+ CODE_FOR_move_hi_quad_v16qi = 5538,
+ CODE_FOR_move_hi_quad_v8hi = 5539,
+ CODE_FOR_move_hi_quad_v4si = 5540,
+ CODE_FOR_move_hi_quad_v4sf = 5541,
+ CODE_FOR_move_lo_quad_v2di = 5542,
+ CODE_FOR_move_lo_quad_v2df = 5543,
+ CODE_FOR_move_lo_quad_v16qi = 5544,
+ CODE_FOR_move_lo_quad_v8hi = 5545,
+ CODE_FOR_move_lo_quad_v4si = 5546,
+ CODE_FOR_move_lo_quad_v4sf = 5547,
+ CODE_FOR_reduc_plus_scal_v8qi = 5548,
+ CODE_FOR_reduc_plus_scal_v4hi = 5549,
+ CODE_FOR_reduc_plus_scal_v2si = 5550,
+ CODE_FOR_reduc_plus_scal_v2sf = 5551,
+ CODE_FOR_reduc_plus_scal_v2di = 5552,
+ CODE_FOR_reduc_smin_scal_v8qi = 5553,
+ CODE_FOR_reduc_smin_scal_v4hi = 5554,
+ CODE_FOR_reduc_smin_scal_v2si = 5555,
+ CODE_FOR_reduc_smin_scal_v2sf = 5556,
+ CODE_FOR_reduc_smin_scal_v16qi = 5557,
+ CODE_FOR_reduc_smin_scal_v8hi = 5558,
+ CODE_FOR_reduc_smin_scal_v4si = 5559,
+ CODE_FOR_reduc_smin_scal_v4sf = 5560,
+ CODE_FOR_reduc_smax_scal_v8qi = 5561,
+ CODE_FOR_reduc_smax_scal_v4hi = 5562,
+ CODE_FOR_reduc_smax_scal_v2si = 5563,
+ CODE_FOR_reduc_smax_scal_v2sf = 5564,
+ CODE_FOR_reduc_smax_scal_v16qi = 5565,
+ CODE_FOR_reduc_smax_scal_v8hi = 5566,
+ CODE_FOR_reduc_smax_scal_v4si = 5567,
+ CODE_FOR_reduc_smax_scal_v4sf = 5568,
+ CODE_FOR_reduc_umin_scal_v8qi = 5569,
+ CODE_FOR_reduc_umin_scal_v4hi = 5570,
+ CODE_FOR_reduc_umin_scal_v2si = 5571,
+ CODE_FOR_reduc_umin_scal_v16qi = 5572,
+ CODE_FOR_reduc_umin_scal_v8hi = 5573,
+ CODE_FOR_reduc_umin_scal_v4si = 5574,
+ CODE_FOR_reduc_umax_scal_v8qi = 5575,
+ CODE_FOR_reduc_umax_scal_v4hi = 5576,
+ CODE_FOR_reduc_umax_scal_v2si = 5577,
+ CODE_FOR_reduc_umax_scal_v16qi = 5578,
+ CODE_FOR_reduc_umax_scal_v8hi = 5579,
+ CODE_FOR_reduc_umax_scal_v4si = 5580,
+ CODE_FOR_vec_cmpv8qiv8qi = 5581,
+ CODE_FOR_vec_cmpv16qiv16qi = 5582,
+ CODE_FOR_vec_cmpv4hiv4hi = 5583,
+ CODE_FOR_vec_cmpv8hiv8hi = 5584,
+ CODE_FOR_vec_cmpv2siv2si = 5585,
+ CODE_FOR_vec_cmpv4siv4si = 5586,
+ CODE_FOR_vec_cmpv2sfv2si = 5587,
+ CODE_FOR_vec_cmpv4sfv4si = 5588,
+ CODE_FOR_vec_cmpv8hfv8hi = 5589,
+ CODE_FOR_vec_cmpv4hfv4hi = 5590,
+ CODE_FOR_vec_cmpuv8qiv8qi = 5591,
+ CODE_FOR_vec_cmpuv16qiv16qi = 5592,
+ CODE_FOR_vec_cmpuv4hiv4hi = 5593,
+ CODE_FOR_vec_cmpuv8hiv8hi = 5594,
+ CODE_FOR_vec_cmpuv2siv2si = 5595,
+ CODE_FOR_vec_cmpuv4siv4si = 5596,
+ CODE_FOR_vcond_mask_v8qiv8qi = 5597,
+ CODE_FOR_vcond_mask_v16qiv16qi = 5598,
+ CODE_FOR_vcond_mask_v4hiv4hi = 5599,
+ CODE_FOR_vcond_mask_v8hiv8hi = 5600,
+ CODE_FOR_vcond_mask_v2siv2si = 5601,
+ CODE_FOR_vcond_mask_v4siv4si = 5602,
+ CODE_FOR_vcond_mask_v2sfv2si = 5603,
+ CODE_FOR_vcond_mask_v4sfv4si = 5604,
+ CODE_FOR_vcond_mask_v8hfv8hi = 5605,
+ CODE_FOR_vcond_mask_v4hfv4hi = 5606,
+ CODE_FOR_neon_vaddv2sf = 5607,
+ CODE_FOR_neon_vaddv4sf = 5608,
+ CODE_FOR_neon_vaddv8hf = 5609,
+ CODE_FOR_neon_vaddv4hf = 5610,
+ CODE_FOR_neon_vsubv8hf = 5611,
+ CODE_FOR_neon_vsubv4hf = 5612,
+ CODE_FOR_neon_vmlav8qi = 5613,
+ CODE_FOR_neon_vmlav16qi = 5614,
+ CODE_FOR_neon_vmlav4hi = 5615,
+ CODE_FOR_neon_vmlav8hi = 5616,
+ CODE_FOR_neon_vmlav2si = 5617,
+ CODE_FOR_neon_vmlav4si = 5618,
+ CODE_FOR_neon_vmlav2sf = 5619,
+ CODE_FOR_neon_vmlav4sf = 5620,
+ CODE_FOR_neon_vfmav2sf = 5621,
+ CODE_FOR_neon_vfmav4sf = 5622,
+ CODE_FOR_neon_vfmav8hf = 5623,
+ CODE_FOR_neon_vfmav4hf = 5624,
+ CODE_FOR_neon_vfmsv2sf = 5625,
+ CODE_FOR_neon_vfmsv4sf = 5626,
+ CODE_FOR_neon_vfmsv8hf = 5627,
+ CODE_FOR_neon_vfmsv4hf = 5628,
+ CODE_FOR_neon_vfmal_lowv2sf = 5629,
+ CODE_FOR_neon_vfmal_highv2sf = 5630,
+ CODE_FOR_neon_vfmsl_lowv2sf = 5631,
+ CODE_FOR_neon_vfmsl_highv2sf = 5632,
+ CODE_FOR_neon_vfmal_lowv4sf = 5633,
+ CODE_FOR_neon_vfmal_highv4sf = 5634,
+ CODE_FOR_neon_vfmsl_lowv4sf = 5635,
+ CODE_FOR_neon_vfmsl_highv4sf = 5636,
+ CODE_FOR_neon_vfmal_lane_lowv2sf = 5637,
+ CODE_FOR_neon_vfmal_lane_highv2sf = 5638,
+ CODE_FOR_neon_vfmsl_lane_lowv2sf = 5639,
+ CODE_FOR_neon_vfmsl_lane_highv2sf = 5640,
+ CODE_FOR_neon_vfmal_lane_lowv4sf = 5641,
+ CODE_FOR_neon_vfmal_lane_highv4sf = 5642,
+ CODE_FOR_neon_vfmsl_lane_lowv4sf = 5643,
+ CODE_FOR_neon_vfmsl_lane_highv4sf = 5644,
+ CODE_FOR_neon_vfmal_lane_lowv8hfv2sf = 5645,
+ CODE_FOR_neon_vfmal_lane_highv8hfv2sf = 5646,
+ CODE_FOR_neon_vfmsl_lane_lowv8hfv2sf = 5647,
+ CODE_FOR_neon_vfmsl_lane_highv8hfv2sf = 5648,
+ CODE_FOR_neon_vfmal_lane_lowv4hfv4sf = 5649,
+ CODE_FOR_neon_vfmal_lane_highv4hfv4sf = 5650,
+ CODE_FOR_neon_vfmsl_lane_lowv4hfv4sf = 5651,
+ CODE_FOR_neon_vfmsl_lane_highv4hfv4sf = 5652,
+ CODE_FOR_neon_vmlsv8qi = 5653,
+ CODE_FOR_neon_vmlsv16qi = 5654,
+ CODE_FOR_neon_vmlsv4hi = 5655,
+ CODE_FOR_neon_vmlsv8hi = 5656,
+ CODE_FOR_neon_vmlsv2si = 5657,
+ CODE_FOR_neon_vmlsv4si = 5658,
+ CODE_FOR_neon_vmlsv2sf = 5659,
+ CODE_FOR_neon_vmlsv4sf = 5660,
+ CODE_FOR_neon_vsubv2sf = 5661,
+ CODE_FOR_neon_vsubv4sf = 5662,
+ CODE_FOR_neon_vceqv8qi = 5663,
+ CODE_FOR_neon_vcgtv8qi = 5664,
+ CODE_FOR_neon_vcgev8qi = 5665,
+ CODE_FOR_neon_vclev8qi = 5666,
+ CODE_FOR_neon_vcltv8qi = 5667,
+ CODE_FOR_neon_vceqv16qi = 5668,
+ CODE_FOR_neon_vcgtv16qi = 5669,
+ CODE_FOR_neon_vcgev16qi = 5670,
+ CODE_FOR_neon_vclev16qi = 5671,
+ CODE_FOR_neon_vcltv16qi = 5672,
+ CODE_FOR_neon_vceqv4hi = 5673,
+ CODE_FOR_neon_vcgtv4hi = 5674,
+ CODE_FOR_neon_vcgev4hi = 5675,
+ CODE_FOR_neon_vclev4hi = 5676,
+ CODE_FOR_neon_vcltv4hi = 5677,
+ CODE_FOR_neon_vceqv8hi = 5678,
+ CODE_FOR_neon_vcgtv8hi = 5679,
+ CODE_FOR_neon_vcgev8hi = 5680,
+ CODE_FOR_neon_vclev8hi = 5681,
+ CODE_FOR_neon_vcltv8hi = 5682,
+ CODE_FOR_neon_vceqv2si = 5683,
+ CODE_FOR_neon_vcgtv2si = 5684,
+ CODE_FOR_neon_vcgev2si = 5685,
+ CODE_FOR_neon_vclev2si = 5686,
+ CODE_FOR_neon_vcltv2si = 5687,
+ CODE_FOR_neon_vceqv4si = 5688,
+ CODE_FOR_neon_vcgtv4si = 5689,
+ CODE_FOR_neon_vcgev4si = 5690,
+ CODE_FOR_neon_vclev4si = 5691,
+ CODE_FOR_neon_vcltv4si = 5692,
+ CODE_FOR_neon_vceqv2sf = 5693,
+ CODE_FOR_neon_vcgtv2sf = 5694,
+ CODE_FOR_neon_vcgev2sf = 5695,
+ CODE_FOR_neon_vclev2sf = 5696,
+ CODE_FOR_neon_vcltv2sf = 5697,
+ CODE_FOR_neon_vceqv4sf = 5698,
+ CODE_FOR_neon_vcgtv4sf = 5699,
+ CODE_FOR_neon_vcgev4sf = 5700,
+ CODE_FOR_neon_vclev4sf = 5701,
+ CODE_FOR_neon_vcltv4sf = 5702,
+ CODE_FOR_neon_vceqv8hf = 5703,
+ CODE_FOR_neon_vcgtv8hf = 5704,
+ CODE_FOR_neon_vcgev8hf = 5705,
+ CODE_FOR_neon_vclev8hf = 5706,
+ CODE_FOR_neon_vcltv8hf = 5707,
+ CODE_FOR_neon_vceqv4hf = 5708,
+ CODE_FOR_neon_vcgtv4hf = 5709,
+ CODE_FOR_neon_vcgev4hf = 5710,
+ CODE_FOR_neon_vclev4hf = 5711,
+ CODE_FOR_neon_vcltv4hf = 5712,
+ CODE_FOR_neon_vcagtv2sf = 5713,
+ CODE_FOR_neon_vcagev2sf = 5714,
+ CODE_FOR_neon_vcaltv2sf = 5715,
+ CODE_FOR_neon_vcalev2sf = 5716,
+ CODE_FOR_neon_vcagtv4sf = 5717,
+ CODE_FOR_neon_vcagev4sf = 5718,
+ CODE_FOR_neon_vcaltv4sf = 5719,
+ CODE_FOR_neon_vcalev4sf = 5720,
+ CODE_FOR_neon_vcagtv8hf = 5721,
+ CODE_FOR_neon_vcagev8hf = 5722,
+ CODE_FOR_neon_vcaltv8hf = 5723,
+ CODE_FOR_neon_vcalev8hf = 5724,
+ CODE_FOR_neon_vcagtv4hf = 5725,
+ CODE_FOR_neon_vcagev4hf = 5726,
+ CODE_FOR_neon_vcaltv4hf = 5727,
+ CODE_FOR_neon_vcalev4hf = 5728,
+ CODE_FOR_neon_vceqzv8hf = 5729,
+ CODE_FOR_neon_vcgtzv8hf = 5730,
+ CODE_FOR_neon_vcgezv8hf = 5731,
+ CODE_FOR_neon_vclezv8hf = 5732,
+ CODE_FOR_neon_vcltzv8hf = 5733,
+ CODE_FOR_neon_vceqzv4hf = 5734,
+ CODE_FOR_neon_vcgtzv4hf = 5735,
+ CODE_FOR_neon_vcgezv4hf = 5736,
+ CODE_FOR_neon_vclezv4hf = 5737,
+ CODE_FOR_neon_vcltzv4hf = 5738,
+ CODE_FOR_ssadv16qi = 5739,
+ CODE_FOR_usadv16qi = 5740,
+ CODE_FOR_neon_vpaddv8qi = 5741,
+ CODE_FOR_neon_vpaddv4hi = 5742,
+ CODE_FOR_neon_vpaddv2si = 5743,
+ CODE_FOR_neon_vpaddv2sf = 5744,
+ CODE_FOR_neon_vabsv8qi = 5745,
+ CODE_FOR_neon_vabsv16qi = 5746,
+ CODE_FOR_neon_vabsv4hi = 5747,
+ CODE_FOR_neon_vabsv8hi = 5748,
+ CODE_FOR_neon_vabsv2si = 5749,
+ CODE_FOR_neon_vabsv4si = 5750,
+ CODE_FOR_neon_vabsv2sf = 5751,
+ CODE_FOR_neon_vabsv4sf = 5752,
+ CODE_FOR_neon_vnegv8qi = 5753,
+ CODE_FOR_neon_vnegv16qi = 5754,
+ CODE_FOR_neon_vnegv4hi = 5755,
+ CODE_FOR_neon_vnegv8hi = 5756,
+ CODE_FOR_neon_vnegv2si = 5757,
+ CODE_FOR_neon_vnegv4si = 5758,
+ CODE_FOR_neon_vnegv2sf = 5759,
+ CODE_FOR_neon_vnegv4sf = 5760,
+ CODE_FOR_cmulv2sf3 = 5761,
+ CODE_FOR_cmul_conjv2sf3 = 5762,
+ CODE_FOR_cmulv4hf3 = 5763,
+ CODE_FOR_cmul_conjv4hf3 = 5764,
+ CODE_FOR_neon_sdotv8qi = 5765,
+ CODE_FOR_neon_udotv8qi = 5766,
+ CODE_FOR_neon_sdotv16qi = 5767,
+ CODE_FOR_neon_udotv16qi = 5768,
+ CODE_FOR_usdot_prodv8qi = 5769,
+ CODE_FOR_usdot_prodv16qi = 5770,
+ CODE_FOR_copysignv2sf3 = 5771,
+ CODE_FOR_copysignv4sf3 = 5772,
+ CODE_FOR_neon_vcntv8qi = 5773,
+ CODE_FOR_neon_vcntv16qi = 5774,
+ CODE_FOR_neon_vmvnv8qi = 5775,
+ CODE_FOR_neon_vmvnv16qi = 5776,
+ CODE_FOR_neon_vmvnv4hi = 5777,
+ CODE_FOR_neon_vmvnv8hi = 5778,
+ CODE_FOR_neon_vmvnv2si = 5779,
+ CODE_FOR_neon_vmvnv4si = 5780,
+ CODE_FOR_neon_vget_lanev8qi = 5781,
+ CODE_FOR_neon_vget_lanev16qi = 5782,
+ CODE_FOR_neon_vget_lanev4hi = 5783,
+ CODE_FOR_neon_vget_lanev8hi = 5784,
+ CODE_FOR_neon_vget_lanev2si = 5785,
+ CODE_FOR_neon_vget_lanev4si = 5786,
+ CODE_FOR_neon_vget_lanev2sf = 5787,
+ CODE_FOR_neon_vget_lanev4sf = 5788,
+ CODE_FOR_neon_vget_laneuv8qi = 5789,
+ CODE_FOR_neon_vget_laneuv16qi = 5790,
+ CODE_FOR_neon_vget_laneuv4hi = 5791,
+ CODE_FOR_neon_vget_laneuv8hi = 5792,
+ CODE_FOR_neon_vget_laneuv2si = 5793,
+ CODE_FOR_neon_vget_laneuv4si = 5794,
+ CODE_FOR_neon_vget_lanedi = 5795,
+ CODE_FOR_neon_vget_lanev2di = 5796,
+ CODE_FOR_neon_vset_lanev8qi = 5797,
+ CODE_FOR_neon_vset_lanev16qi = 5798,
+ CODE_FOR_neon_vset_lanev4hi = 5799,
+ CODE_FOR_neon_vset_lanev8hi = 5800,
+ CODE_FOR_neon_vset_lanev2si = 5801,
+ CODE_FOR_neon_vset_lanev4si = 5802,
+ CODE_FOR_neon_vset_lanev4hf = 5803,
+ CODE_FOR_neon_vset_lanev8hf = 5804,
+ CODE_FOR_neon_vset_lanev2sf = 5805,
+ CODE_FOR_neon_vset_lanev4sf = 5806,
+ CODE_FOR_neon_vset_lanev2di = 5807,
+ CODE_FOR_neon_vset_lanedi = 5808,
+ CODE_FOR_neon_vcreatev8qi = 5809,
+ CODE_FOR_neon_vcreatev4hi = 5810,
+ CODE_FOR_neon_vcreatev2si = 5811,
+ CODE_FOR_neon_vcreatev2sf = 5812,
+ CODE_FOR_neon_vcreatedi = 5813,
+ CODE_FOR_neon_vdup_ndi = 5814,
+ CODE_FOR_neon_vdup_lanev8qi = 5815,
+ CODE_FOR_neon_vdup_lanev16qi = 5816,
+ CODE_FOR_neon_vdup_lanev4hi = 5817,
+ CODE_FOR_neon_vdup_lanev8hi = 5818,
+ CODE_FOR_neon_vdup_lanev2si = 5819,
+ CODE_FOR_neon_vdup_lanev4si = 5820,
+ CODE_FOR_neon_vdup_lanev2sf = 5821,
+ CODE_FOR_neon_vdup_lanev4sf = 5822,
+ CODE_FOR_neon_vdup_lanev8hf = 5823,
+ CODE_FOR_neon_vdup_lanev4hf = 5824,
+ CODE_FOR_neon_vdup_lanev4bf = 5825,
+ CODE_FOR_neon_vdup_lanev8bf = 5826,
+ CODE_FOR_neon_vdup_lanedi = 5827,
+ CODE_FOR_neon_vdup_lanev2di = 5828,
+ CODE_FOR_neon_vget_highv16qi = 5829,
+ CODE_FOR_neon_vget_highv8hi = 5830,
+ CODE_FOR_neon_vget_highv8hf = 5831,
+ CODE_FOR_neon_vget_highv8bf = 5832,
+ CODE_FOR_neon_vget_highv4si = 5833,
+ CODE_FOR_neon_vget_highv4sf = 5834,
+ CODE_FOR_neon_vget_highv2di = 5835,
+ CODE_FOR_neon_vget_lowv16qi = 5836,
+ CODE_FOR_neon_vget_lowv8hi = 5837,
+ CODE_FOR_neon_vget_lowv8hf = 5838,
+ CODE_FOR_neon_vget_lowv8bf = 5839,
+ CODE_FOR_neon_vget_lowv4si = 5840,
+ CODE_FOR_neon_vget_lowv4sf = 5841,
+ CODE_FOR_neon_vget_lowv2di = 5842,
+ CODE_FOR_neon_vmul_nv4hi = 5843,
+ CODE_FOR_neon_vmul_nv2si = 5844,
+ CODE_FOR_neon_vmul_nv2sf = 5845,
+ CODE_FOR_neon_vmul_nv8hi = 5846,
+ CODE_FOR_neon_vmul_nv4si = 5847,
+ CODE_FOR_neon_vmul_nv4sf = 5848,
+ CODE_FOR_neon_vmul_nv8hf = 5849,
+ CODE_FOR_neon_vmul_nv4hf = 5850,
+ CODE_FOR_neon_vmulls_nv4hi = 5851,
+ CODE_FOR_neon_vmulls_nv2si = 5852,
+ CODE_FOR_neon_vmullu_nv4hi = 5853,
+ CODE_FOR_neon_vmullu_nv2si = 5854,
+ CODE_FOR_neon_vqdmull_nv4hi = 5855,
+ CODE_FOR_neon_vqdmull_nv2si = 5856,
+ CODE_FOR_neon_vqdmulh_nv4hi = 5857,
+ CODE_FOR_neon_vqdmulh_nv2si = 5858,
+ CODE_FOR_neon_vqrdmulh_nv4hi = 5859,
+ CODE_FOR_neon_vqrdmulh_nv2si = 5860,
+ CODE_FOR_neon_vqdmulh_nv8hi = 5861,
+ CODE_FOR_neon_vqdmulh_nv4si = 5862,
+ CODE_FOR_neon_vqrdmulh_nv8hi = 5863,
+ CODE_FOR_neon_vqrdmulh_nv4si = 5864,
+ CODE_FOR_neon_vmla_nv4hi = 5865,
+ CODE_FOR_neon_vmla_nv2si = 5866,
+ CODE_FOR_neon_vmla_nv2sf = 5867,
+ CODE_FOR_neon_vmla_nv8hi = 5868,
+ CODE_FOR_neon_vmla_nv4si = 5869,
+ CODE_FOR_neon_vmla_nv4sf = 5870,
+ CODE_FOR_neon_vmlals_nv4hi = 5871,
+ CODE_FOR_neon_vmlals_nv2si = 5872,
+ CODE_FOR_neon_vmlalu_nv4hi = 5873,
+ CODE_FOR_neon_vmlalu_nv2si = 5874,
+ CODE_FOR_neon_vqdmlal_nv4hi = 5875,
+ CODE_FOR_neon_vqdmlal_nv2si = 5876,
+ CODE_FOR_neon_vmls_nv4hi = 5877,
+ CODE_FOR_neon_vmls_nv2si = 5878,
+ CODE_FOR_neon_vmls_nv2sf = 5879,
+ CODE_FOR_neon_vmls_nv8hi = 5880,
+ CODE_FOR_neon_vmls_nv4si = 5881,
+ CODE_FOR_neon_vmls_nv4sf = 5882,
+ CODE_FOR_neon_vmlsls_nv4hi = 5883,
+ CODE_FOR_neon_vmlsls_nv2si = 5884,
+ CODE_FOR_neon_vmlslu_nv4hi = 5885,
+ CODE_FOR_neon_vmlslu_nv2si = 5886,
+ CODE_FOR_neon_vqdmlsl_nv4hi = 5887,
+ CODE_FOR_neon_vqdmlsl_nv2si = 5888,
+ CODE_FOR_neon_vbslv8qi = 5889,
+ CODE_FOR_neon_vbslv16qi = 5890,
+ CODE_FOR_neon_vbslv4hi = 5891,
+ CODE_FOR_neon_vbslv8hi = 5892,
+ CODE_FOR_neon_vbslv2si = 5893,
+ CODE_FOR_neon_vbslv4si = 5894,
+ CODE_FOR_neon_vbslv4hf = 5895,
+ CODE_FOR_neon_vbslv8hf = 5896,
+ CODE_FOR_neon_vbslv4bf = 5897,
+ CODE_FOR_neon_vbslv8bf = 5898,
+ CODE_FOR_neon_vbslv2sf = 5899,
+ CODE_FOR_neon_vbslv4sf = 5900,
+ CODE_FOR_neon_vbsldi = 5901,
+ CODE_FOR_neon_vbslv2di = 5902,
+ CODE_FOR_neon_vtrnv8qi_internal = 5903,
+ CODE_FOR_neon_vtrnv16qi_internal = 5904,
+ CODE_FOR_neon_vtrnv4hi_internal = 5905,
+ CODE_FOR_neon_vtrnv8hi_internal = 5906,
+ CODE_FOR_neon_vtrnv2si_internal = 5907,
+ CODE_FOR_neon_vtrnv4si_internal = 5908,
+ CODE_FOR_neon_vtrnv2sf_internal = 5909,
+ CODE_FOR_neon_vtrnv4sf_internal = 5910,
+ CODE_FOR_neon_vtrnv8hf_internal = 5911,
+ CODE_FOR_neon_vtrnv4hf_internal = 5912,
+ CODE_FOR_neon_vzipv8qi_internal = 5913,
+ CODE_FOR_neon_vzipv16qi_internal = 5914,
+ CODE_FOR_neon_vzipv4hi_internal = 5915,
+ CODE_FOR_neon_vzipv8hi_internal = 5916,
+ CODE_FOR_neon_vzipv2si_internal = 5917,
+ CODE_FOR_neon_vzipv4si_internal = 5918,
+ CODE_FOR_neon_vzipv2sf_internal = 5919,
+ CODE_FOR_neon_vzipv4sf_internal = 5920,
+ CODE_FOR_neon_vzipv8hf_internal = 5921,
+ CODE_FOR_neon_vzipv4hf_internal = 5922,
+ CODE_FOR_neon_vuzpv8qi_internal = 5923,
+ CODE_FOR_neon_vuzpv16qi_internal = 5924,
+ CODE_FOR_neon_vuzpv4hi_internal = 5925,
+ CODE_FOR_neon_vuzpv8hi_internal = 5926,
+ CODE_FOR_neon_vuzpv2si_internal = 5927,
+ CODE_FOR_neon_vuzpv4si_internal = 5928,
+ CODE_FOR_neon_vuzpv2sf_internal = 5929,
+ CODE_FOR_neon_vuzpv4sf_internal = 5930,
+ CODE_FOR_neon_vuzpv8hf_internal = 5931,
+ CODE_FOR_neon_vuzpv4hf_internal = 5932,
+ CODE_FOR_vec_load_lanesv8qiv8qi = 5933,
+ CODE_FOR_vec_load_lanesv16qiv16qi = 5934,
+ CODE_FOR_vec_load_lanesv4hiv4hi = 5935,
+ CODE_FOR_vec_load_lanesv8hiv8hi = 5936,
+ CODE_FOR_vec_load_lanesv2siv2si = 5937,
+ CODE_FOR_vec_load_lanesv4siv4si = 5938,
+ CODE_FOR_vec_load_lanesv4hfv4hf = 5939,
+ CODE_FOR_vec_load_lanesv8hfv8hf = 5940,
+ CODE_FOR_vec_load_lanesv4bfv4bf = 5941,
+ CODE_FOR_vec_load_lanesv8bfv8bf = 5942,
+ CODE_FOR_vec_load_lanesv2sfv2sf = 5943,
+ CODE_FOR_vec_load_lanesv4sfv4sf = 5944,
+ CODE_FOR_vec_load_lanesdidi = 5945,
+ CODE_FOR_vec_load_lanesv2div2di = 5946,
+ CODE_FOR_neon_vld1_dupdi = 5947,
+ CODE_FOR_vec_store_lanesv8qiv8qi = 5948,
+ CODE_FOR_vec_store_lanesv16qiv16qi = 5949,
+ CODE_FOR_vec_store_lanesv4hiv4hi = 5950,
+ CODE_FOR_vec_store_lanesv8hiv8hi = 5951,
+ CODE_FOR_vec_store_lanesv2siv2si = 5952,
+ CODE_FOR_vec_store_lanesv4siv4si = 5953,
+ CODE_FOR_vec_store_lanesv4hfv4hf = 5954,
+ CODE_FOR_vec_store_lanesv8hfv8hf = 5955,
+ CODE_FOR_vec_store_lanesv4bfv4bf = 5956,
+ CODE_FOR_vec_store_lanesv8bfv8bf = 5957,
+ CODE_FOR_vec_store_lanesv2sfv2sf = 5958,
+ CODE_FOR_vec_store_lanesv4sfv4sf = 5959,
+ CODE_FOR_vec_store_lanesdidi = 5960,
+ CODE_FOR_vec_store_lanesv2div2di = 5961,
+ CODE_FOR_vec_load_lanestiv8qi = 5962,
+ CODE_FOR_vec_load_lanestiv4hi = 5963,
+ CODE_FOR_vec_load_lanestiv4hf = 5964,
+ CODE_FOR_vec_load_lanestiv4bf = 5965,
+ CODE_FOR_vec_load_lanestiv2si = 5966,
+ CODE_FOR_vec_load_lanestiv2sf = 5967,
+ CODE_FOR_vec_load_lanestidi = 5968,
+ CODE_FOR_vec_store_lanestiv8qi = 5969,
+ CODE_FOR_vec_store_lanestiv4hi = 5970,
+ CODE_FOR_vec_store_lanestiv4hf = 5971,
+ CODE_FOR_vec_store_lanestiv4bf = 5972,
+ CODE_FOR_vec_store_lanestiv2si = 5973,
+ CODE_FOR_vec_store_lanestiv2sf = 5974,
+ CODE_FOR_vec_store_lanestidi = 5975,
+ CODE_FOR_vec_load_laneseiv8qi = 5976,
+ CODE_FOR_vec_load_laneseiv4hi = 5977,
+ CODE_FOR_vec_load_laneseiv4hf = 5978,
+ CODE_FOR_vec_load_laneseiv4bf = 5979,
+ CODE_FOR_vec_load_laneseiv2si = 5980,
+ CODE_FOR_vec_load_laneseiv2sf = 5981,
+ CODE_FOR_vec_load_laneseidi = 5982,
+ CODE_FOR_vec_load_lanesciv16qi = 5983,
+ CODE_FOR_vec_load_lanesciv8hi = 5984,
+ CODE_FOR_vec_load_lanesciv8hf = 5985,
+ CODE_FOR_vec_load_lanesciv4si = 5986,
+ CODE_FOR_vec_load_lanesciv4sf = 5987,
+ CODE_FOR_neon_vld3v16qi = 5988,
+ CODE_FOR_neon_vld3v8hi = 5989,
+ CODE_FOR_neon_vld3v8hf = 5990,
+ CODE_FOR_neon_vld3v8bf = 5991,
+ CODE_FOR_neon_vld3v4si = 5992,
+ CODE_FOR_neon_vld3v4sf = 5993,
+ CODE_FOR_vec_store_laneseiv8qi = 5994,
+ CODE_FOR_vec_store_laneseiv4hi = 5995,
+ CODE_FOR_vec_store_laneseiv4hf = 5996,
+ CODE_FOR_vec_store_laneseiv4bf = 5997,
+ CODE_FOR_vec_store_laneseiv2si = 5998,
+ CODE_FOR_vec_store_laneseiv2sf = 5999,
+ CODE_FOR_vec_store_laneseidi = 6000,
+ CODE_FOR_vec_store_lanesciv16qi = 6001,
+ CODE_FOR_vec_store_lanesciv8hi = 6002,
+ CODE_FOR_vec_store_lanesciv8hf = 6003,
+ CODE_FOR_vec_store_lanesciv4si = 6004,
+ CODE_FOR_vec_store_lanesciv4sf = 6005,
+ CODE_FOR_neon_vst3v16qi = 6006,
+ CODE_FOR_neon_vst3v8hi = 6007,
+ CODE_FOR_neon_vst3v8hf = 6008,
+ CODE_FOR_neon_vst3v8bf = 6009,
+ CODE_FOR_neon_vst3v4si = 6010,
+ CODE_FOR_neon_vst3v4sf = 6011,
+ CODE_FOR_vec_load_lanesoiv8qi = 6012,
+ CODE_FOR_vec_load_lanesoiv4hi = 6013,
+ CODE_FOR_vec_load_lanesoiv4hf = 6014,
+ CODE_FOR_vec_load_lanesoiv4bf = 6015,
+ CODE_FOR_vec_load_lanesoiv2si = 6016,
+ CODE_FOR_vec_load_lanesoiv2sf = 6017,
+ CODE_FOR_vec_load_lanesoidi = 6018,
+ CODE_FOR_neon_vld4v16qi = 6019,
+ CODE_FOR_neon_vld4v8hi = 6020,
+ CODE_FOR_neon_vld4v8hf = 6021,
+ CODE_FOR_neon_vld4v8bf = 6022,
+ CODE_FOR_neon_vld4v4si = 6023,
+ CODE_FOR_neon_vld4v4sf = 6024,
+ CODE_FOR_vec_store_lanesoiv8qi = 6025,
+ CODE_FOR_vec_store_lanesoiv4hi = 6026,
+ CODE_FOR_vec_store_lanesoiv4hf = 6027,
+ CODE_FOR_vec_store_lanesoiv4bf = 6028,
+ CODE_FOR_vec_store_lanesoiv2si = 6029,
+ CODE_FOR_vec_store_lanesoiv2sf = 6030,
+ CODE_FOR_vec_store_lanesoidi = 6031,
+ CODE_FOR_neon_vst4v16qi = 6032,
+ CODE_FOR_neon_vst4v8hi = 6033,
+ CODE_FOR_neon_vst4v8hf = 6034,
+ CODE_FOR_neon_vst4v8bf = 6035,
+ CODE_FOR_neon_vst4v4si = 6036,
+ CODE_FOR_neon_vst4v4sf = 6037,
+ CODE_FOR_vec_unpacks_hi_v16qi = 6038,
+ CODE_FOR_vec_unpacku_hi_v16qi = 6039,
+ CODE_FOR_vec_unpacks_hi_v8hi = 6040,
+ CODE_FOR_vec_unpacku_hi_v8hi = 6041,
+ CODE_FOR_vec_unpacks_hi_v4si = 6042,
+ CODE_FOR_vec_unpacku_hi_v4si = 6043,
+ CODE_FOR_vec_unpacks_lo_v16qi = 6044,
+ CODE_FOR_vec_unpacku_lo_v16qi = 6045,
+ CODE_FOR_vec_unpacks_lo_v8hi = 6046,
+ CODE_FOR_vec_unpacku_lo_v8hi = 6047,
+ CODE_FOR_vec_unpacks_lo_v4si = 6048,
+ CODE_FOR_vec_unpacku_lo_v4si = 6049,
+ CODE_FOR_vec_widen_smult_lo_v16qi = 6050,
+ CODE_FOR_vec_widen_umult_lo_v16qi = 6051,
+ CODE_FOR_vec_widen_smult_lo_v8hi = 6052,
+ CODE_FOR_vec_widen_umult_lo_v8hi = 6053,
+ CODE_FOR_vec_widen_smult_lo_v4si = 6054,
+ CODE_FOR_vec_widen_umult_lo_v4si = 6055,
+ CODE_FOR_vec_widen_smult_hi_v16qi = 6056,
+ CODE_FOR_vec_widen_umult_hi_v16qi = 6057,
+ CODE_FOR_vec_widen_smult_hi_v8hi = 6058,
+ CODE_FOR_vec_widen_umult_hi_v8hi = 6059,
+ CODE_FOR_vec_widen_smult_hi_v4si = 6060,
+ CODE_FOR_vec_widen_umult_hi_v4si = 6061,
+ CODE_FOR_vec_widen_sshiftl_lo_v16qi = 6062,
+ CODE_FOR_vec_widen_ushiftl_lo_v16qi = 6063,
+ CODE_FOR_vec_widen_sshiftl_lo_v8hi = 6064,
+ CODE_FOR_vec_widen_ushiftl_lo_v8hi = 6065,
+ CODE_FOR_vec_widen_sshiftl_lo_v4si = 6066,
+ CODE_FOR_vec_widen_ushiftl_lo_v4si = 6067,
+ CODE_FOR_vec_widen_sshiftl_hi_v16qi = 6068,
+ CODE_FOR_vec_widen_ushiftl_hi_v16qi = 6069,
+ CODE_FOR_vec_widen_sshiftl_hi_v8hi = 6070,
+ CODE_FOR_vec_widen_ushiftl_hi_v8hi = 6071,
+ CODE_FOR_vec_widen_sshiftl_hi_v4si = 6072,
+ CODE_FOR_vec_widen_ushiftl_hi_v4si = 6073,
+ CODE_FOR_vec_unpacks_lo_v8qi = 6074,
+ CODE_FOR_vec_unpacku_lo_v8qi = 6075,
+ CODE_FOR_vec_unpacks_lo_v4hi = 6076,
+ CODE_FOR_vec_unpacku_lo_v4hi = 6077,
+ CODE_FOR_vec_unpacks_lo_v2si = 6078,
+ CODE_FOR_vec_unpacku_lo_v2si = 6079,
+ CODE_FOR_vec_unpacks_hi_v8qi = 6080,
+ CODE_FOR_vec_unpacku_hi_v8qi = 6081,
+ CODE_FOR_vec_unpacks_hi_v4hi = 6082,
+ CODE_FOR_vec_unpacku_hi_v4hi = 6083,
+ CODE_FOR_vec_unpacks_hi_v2si = 6084,
+ CODE_FOR_vec_unpacku_hi_v2si = 6085,
+ CODE_FOR_vec_widen_smult_hi_v8qi = 6086,
+ CODE_FOR_vec_widen_umult_hi_v8qi = 6087,
+ CODE_FOR_vec_widen_smult_hi_v4hi = 6088,
+ CODE_FOR_vec_widen_umult_hi_v4hi = 6089,
+ CODE_FOR_vec_widen_smult_hi_v2si = 6090,
+ CODE_FOR_vec_widen_umult_hi_v2si = 6091,
+ CODE_FOR_vec_widen_smult_lo_v8qi = 6092,
+ CODE_FOR_vec_widen_umult_lo_v8qi = 6093,
+ CODE_FOR_vec_widen_smult_lo_v4hi = 6094,
+ CODE_FOR_vec_widen_umult_lo_v4hi = 6095,
+ CODE_FOR_vec_widen_smult_lo_v2si = 6096,
+ CODE_FOR_vec_widen_umult_lo_v2si = 6097,
+ CODE_FOR_vec_widen_sshiftl_hi_v8qi = 6098,
+ CODE_FOR_vec_widen_ushiftl_hi_v8qi = 6099,
+ CODE_FOR_vec_widen_sshiftl_hi_v4hi = 6100,
+ CODE_FOR_vec_widen_ushiftl_hi_v4hi = 6101,
+ CODE_FOR_vec_widen_sshiftl_hi_v2si = 6102,
+ CODE_FOR_vec_widen_ushiftl_hi_v2si = 6103,
+ CODE_FOR_vec_widen_sshiftl_lo_v8qi = 6104,
+ CODE_FOR_vec_widen_ushiftl_lo_v8qi = 6105,
+ CODE_FOR_vec_widen_sshiftl_lo_v4hi = 6106,
+ CODE_FOR_vec_widen_ushiftl_lo_v4hi = 6107,
+ CODE_FOR_vec_widen_sshiftl_lo_v2si = 6108,
+ CODE_FOR_vec_widen_ushiftl_lo_v2si = 6109,
+ CODE_FOR_vec_pack_trunc_v4hi = 6110,
+ CODE_FOR_vec_pack_trunc_v2si = 6111,
+ CODE_FOR_vec_pack_trunc_di = 6112,
+ CODE_FOR_neon_vbfcvtbf = 6113,
+ CODE_FOR_neon_vfmab_laneqv8bf = 6114,
+ CODE_FOR_neon_vfmat_laneqv8bf = 6115,
+ CODE_FOR_crypto_aesd = 6116,
+ CODE_FOR_crypto_aese = 6117,
+ CODE_FOR_crypto_sha1h = 6118,
+ CODE_FOR_crypto_sha1c = 6119,
+ CODE_FOR_crypto_sha1m = 6120,
+ CODE_FOR_crypto_sha1p = 6121,
+ CODE_FOR_memory_barrier = 6122,
+ CODE_FOR_atomic_loaddi = 6123,
+ CODE_FOR_atomic_compare_and_swapqi = 6124,
+ CODE_FOR_atomic_compare_and_swaphi = 6125,
+ CODE_FOR_atomic_compare_and_swapsi = 6126,
+ CODE_FOR_atomic_compare_and_swapdi = 6127,
+ CODE_FOR_addv4qq3 = 6128,
+ CODE_FOR_addv2hq3 = 6129,
+ CODE_FOR_addv2ha3 = 6130,
+ CODE_FOR_ssaddv4qq3 = 6131,
+ CODE_FOR_ssaddv2hq3 = 6132,
+ CODE_FOR_ssaddqq3 = 6133,
+ CODE_FOR_ssaddhq3 = 6134,
+ CODE_FOR_ssaddv2ha3 = 6135,
+ CODE_FOR_ssaddha3 = 6136,
+ CODE_FOR_ssaddsq3 = 6137,
+ CODE_FOR_ssaddsa3 = 6138,
+ CODE_FOR_subv4qq3 = 6139,
+ CODE_FOR_subv2hq3 = 6140,
+ CODE_FOR_subv2ha3 = 6141,
+ CODE_FOR_sssubv4qq3 = 6142,
+ CODE_FOR_sssubv2hq3 = 6143,
+ CODE_FOR_sssubqq3 = 6144,
+ CODE_FOR_sssubhq3 = 6145,
+ CODE_FOR_sssubv2ha3 = 6146,
+ CODE_FOR_sssubha3 = 6147,
+ CODE_FOR_sssubsq3 = 6148,
+ CODE_FOR_sssubsa3 = 6149,
+ CODE_FOR_mulqq3 = 6150,
+ CODE_FOR_mulhq3 = 6151,
+ CODE_FOR_mulsq3 = 6152,
+ CODE_FOR_mulsa3 = 6153,
+ CODE_FOR_mulusa3 = 6154,
+ CODE_FOR_ssmulsa3 = 6155,
+ CODE_FOR_usmulusa3 = 6156,
+ CODE_FOR_mulha3 = 6157,
+ CODE_FOR_muluha3 = 6158,
+ CODE_FOR_ssmulha3 = 6159,
+ CODE_FOR_usmuluha3 = 6160,
+ CODE_FOR_mve_vmvnq_sv16qi = 6161,
+ CODE_FOR_mve_vmvnq_sv8hi = 6162,
+ CODE_FOR_mve_vmvnq_sv4si = 6163,
+ CODE_FOR_mve_vclzq_uv16qi = 6164,
+ CODE_FOR_mve_vclzq_uv8hi = 6165,
+ CODE_FOR_mve_vclzq_uv4si = 6166,
+ CODE_FOR_mve_vandq_sv16qi = 6167,
+ CODE_FOR_mve_vandq_sv8hi = 6168,
+ CODE_FOR_mve_vandq_sv4si = 6169,
+ CODE_FOR_mve_vbicq_sv16qi = 6170,
+ CODE_FOR_mve_vbicq_sv8hi = 6171,
+ CODE_FOR_mve_vbicq_sv4si = 6172,
+ CODE_FOR_cadd90v16qi3 = 6173,
+ CODE_FOR_cadd270v16qi3 = 6174,
+ CODE_FOR_cadd90v8hi3 = 6175,
+ CODE_FOR_cadd270v8hi3 = 6176,
+ CODE_FOR_cadd90v4si3 = 6177,
+ CODE_FOR_cadd270v4si3 = 6178,
+ CODE_FOR_mve_veorq_sv16qi = 6179,
+ CODE_FOR_mve_veorq_sv8hi = 6180,
+ CODE_FOR_mve_veorq_sv4si = 6181,
+ CODE_FOR_mve_vornq_uv16qi = 6182,
+ CODE_FOR_mve_vornq_uv8hi = 6183,
+ CODE_FOR_mve_vornq_uv4si = 6184,
+ CODE_FOR_mve_vorrq_uv16qi = 6185,
+ CODE_FOR_mve_vorrq_uv8hi = 6186,
+ CODE_FOR_mve_vorrq_uv4si = 6187,
+ CODE_FOR_mve_vshlcq_vec_sv16qi = 6188,
+ CODE_FOR_mve_vshlcq_vec_uv16qi = 6189,
+ CODE_FOR_mve_vshlcq_vec_sv8hi = 6190,
+ CODE_FOR_mve_vshlcq_vec_uv8hi = 6191,
+ CODE_FOR_mve_vshlcq_vec_sv4si = 6192,
+ CODE_FOR_mve_vshlcq_vec_uv4si = 6193,
+ CODE_FOR_mve_vshlcq_carry_sv16qi = 6194,
+ CODE_FOR_mve_vshlcq_carry_uv16qi = 6195,
+ CODE_FOR_mve_vshlcq_carry_sv8hi = 6196,
+ CODE_FOR_mve_vshlcq_carry_uv8hi = 6197,
+ CODE_FOR_mve_vshlcq_carry_sv4si = 6198,
+ CODE_FOR_mve_vshlcq_carry_uv4si = 6199,
+ CODE_FOR_mve_vstrbq_scatter_offset_sv16qi = 6200,
+ CODE_FOR_mve_vstrbq_scatter_offset_uv16qi = 6201,
+ CODE_FOR_mve_vstrbq_scatter_offset_sv8hi = 6202,
+ CODE_FOR_mve_vstrbq_scatter_offset_uv8hi = 6203,
+ CODE_FOR_mve_vstrbq_scatter_offset_sv4si = 6204,
+ CODE_FOR_mve_vstrbq_scatter_offset_uv4si = 6205,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_sv16qi = 6206,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_uv16qi = 6207,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_sv8hi = 6208,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_uv8hi = 6209,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_sv4si = 6210,
+ CODE_FOR_mve_vstrbq_scatter_offset_p_uv4si = 6211,
+ CODE_FOR_mve_vld1q_fv8hf = 6212,
+ CODE_FOR_mve_vld1q_fv4sf = 6213,
+ CODE_FOR_mve_vld1q_sv16qi = 6214,
+ CODE_FOR_mve_vld1q_uv16qi = 6215,
+ CODE_FOR_mve_vld1q_sv8hi = 6216,
+ CODE_FOR_mve_vld1q_uv8hi = 6217,
+ CODE_FOR_mve_vld1q_sv4si = 6218,
+ CODE_FOR_mve_vld1q_uv4si = 6219,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_sv8hi = 6220,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_uv8hi = 6221,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_sv4si = 6222,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_uv4si = 6223,
+ CODE_FOR_mve_vstrhq_scatter_offset_sv8hi = 6224,
+ CODE_FOR_mve_vstrhq_scatter_offset_uv8hi = 6225,
+ CODE_FOR_mve_vstrhq_scatter_offset_sv4si = 6226,
+ CODE_FOR_mve_vstrhq_scatter_offset_uv4si = 6227,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_sv8hi = 6228,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_uv8hi = 6229,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_sv4si = 6230,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_uv4si = 6231,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_sv8hi = 6232,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_uv8hi = 6233,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_sv4si = 6234,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_uv4si = 6235,
+ CODE_FOR_mve_vst1q_fv8hf = 6236,
+ CODE_FOR_mve_vst1q_fv4sf = 6237,
+ CODE_FOR_mve_vst1q_sv16qi = 6238,
+ CODE_FOR_mve_vst1q_uv16qi = 6239,
+ CODE_FOR_mve_vst1q_sv8hi = 6240,
+ CODE_FOR_mve_vst1q_uv8hi = 6241,
+ CODE_FOR_mve_vst1q_sv4si = 6242,
+ CODE_FOR_mve_vst1q_uv4si = 6243,
+ CODE_FOR_mve_vstrdq_scatter_offset_p_sv2di = 6244,
+ CODE_FOR_mve_vstrdq_scatter_offset_p_uv2di = 6245,
+ CODE_FOR_mve_vstrdq_scatter_offset_sv2di = 6246,
+ CODE_FOR_mve_vstrdq_scatter_offset_uv2di = 6247,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_p_sv2di = 6248,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_p_uv2di = 6249,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_sv2di = 6250,
+ CODE_FOR_mve_vstrdq_scatter_shifted_offset_uv2di = 6251,
+ CODE_FOR_mve_vstrhq_scatter_offset_fv8hf = 6252,
+ CODE_FOR_mve_vstrhq_scatter_offset_p_fv8hf = 6253,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_fv8hf = 6254,
+ CODE_FOR_mve_vstrhq_scatter_shifted_offset_p_fv8hf = 6255,
+ CODE_FOR_mve_vstrwq_scatter_offset_fv4sf = 6256,
+ CODE_FOR_mve_vstrwq_scatter_offset_p_fv4sf = 6257,
+ CODE_FOR_mve_vstrwq_scatter_offset_p_sv4si = 6258,
+ CODE_FOR_mve_vstrwq_scatter_offset_p_uv4si = 6259,
+ CODE_FOR_mve_vstrwq_scatter_offset_sv4si = 6260,
+ CODE_FOR_mve_vstrwq_scatter_offset_uv4si = 6261,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_fv4sf = 6262,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_p_fv4sf = 6263,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_p_sv4si = 6264,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_p_uv4si = 6265,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_sv4si = 6266,
+ CODE_FOR_mve_vstrwq_scatter_shifted_offset_uv4si = 6267,
+ CODE_FOR_mve_vidupq_n_uv16qi = 6268,
+ CODE_FOR_mve_vidupq_n_uv8hi = 6269,
+ CODE_FOR_mve_vidupq_n_uv4si = 6270,
+ CODE_FOR_mve_vidupq_m_n_uv16qi = 6271,
+ CODE_FOR_mve_vidupq_m_n_uv8hi = 6272,
+ CODE_FOR_mve_vidupq_m_n_uv4si = 6273,
+ CODE_FOR_mve_vddupq_n_uv16qi = 6274,
+ CODE_FOR_mve_vddupq_n_uv8hi = 6275,
+ CODE_FOR_mve_vddupq_n_uv4si = 6276,
+ CODE_FOR_mve_vddupq_m_n_uv16qi = 6277,
+ CODE_FOR_mve_vddupq_m_n_uv8hi = 6278,
+ CODE_FOR_mve_vddupq_m_n_uv4si = 6279,
+ CODE_FOR_mve_vdwdupq_n_uv16qi = 6280,
+ CODE_FOR_mve_vdwdupq_n_uv8hi = 6281,
+ CODE_FOR_mve_vdwdupq_n_uv4si = 6282,
+ CODE_FOR_mve_vdwdupq_wb_uv16qi = 6283,
+ CODE_FOR_mve_vdwdupq_wb_uv8hi = 6284,
+ CODE_FOR_mve_vdwdupq_wb_uv4si = 6285,
+ CODE_FOR_mve_vdwdupq_m_n_uv16qi = 6286,
+ CODE_FOR_mve_vdwdupq_m_n_uv8hi = 6287,
+ CODE_FOR_mve_vdwdupq_m_n_uv4si = 6288,
+ CODE_FOR_mve_vdwdupq_m_wb_uv16qi = 6289,
+ CODE_FOR_mve_vdwdupq_m_wb_uv8hi = 6290,
+ CODE_FOR_mve_vdwdupq_m_wb_uv4si = 6291,
+ CODE_FOR_mve_viwdupq_n_uv16qi = 6292,
+ CODE_FOR_mve_viwdupq_n_uv8hi = 6293,
+ CODE_FOR_mve_viwdupq_n_uv4si = 6294,
+ CODE_FOR_mve_viwdupq_wb_uv16qi = 6295,
+ CODE_FOR_mve_viwdupq_wb_uv8hi = 6296,
+ CODE_FOR_mve_viwdupq_wb_uv4si = 6297,
+ CODE_FOR_mve_viwdupq_m_n_uv16qi = 6298,
+ CODE_FOR_mve_viwdupq_m_n_uv8hi = 6299,
+ CODE_FOR_mve_viwdupq_m_n_uv4si = 6300,
+ CODE_FOR_mve_viwdupq_m_wb_uv16qi = 6301,
+ CODE_FOR_mve_viwdupq_m_wb_uv8hi = 6302,
+ CODE_FOR_mve_viwdupq_m_wb_uv4si = 6303,
+ CODE_FOR_mve_vldrwq_gather_base_wb_sv4si = 6304,
+ CODE_FOR_mve_vldrwq_gather_base_wb_uv4si = 6305,
+ CODE_FOR_mve_vldrwq_gather_base_nowb_sv4si = 6306,
+ CODE_FOR_mve_vldrwq_gather_base_nowb_uv4si = 6307,
+ CODE_FOR_mve_vldrwq_gather_base_wb_z_sv4si = 6308,
+ CODE_FOR_mve_vldrwq_gather_base_wb_z_uv4si = 6309,
+ CODE_FOR_mve_vldrwq_gather_base_nowb_z_sv4si = 6310,
+ CODE_FOR_mve_vldrwq_gather_base_nowb_z_uv4si = 6311,
+ CODE_FOR_mve_vldrwq_gather_base_wb_fv4sf = 6312,
+ CODE_FOR_mve_vldrwq_gather_base_nowb_fv4sf = 6313,
+ CODE_FOR_mve_vldrwq_gather_base_wb_z_fv4sf = 6314,
+ CODE_FOR_mve_vldrwq_gather_base_nowb_z_fv4sf = 6315,
+ CODE_FOR_mve_vldrdq_gather_base_wb_sv2di = 6316,
+ CODE_FOR_mve_vldrdq_gather_base_wb_uv2di = 6317,
+ CODE_FOR_mve_vldrdq_gather_base_nowb_sv2di = 6318,
+ CODE_FOR_mve_vldrdq_gather_base_nowb_uv2di = 6319,
+ CODE_FOR_mve_vldrdq_gather_base_wb_z_sv2di = 6320,
+ CODE_FOR_mve_vldrdq_gather_base_wb_z_uv2di = 6321,
+ CODE_FOR_mve_vldrdq_gather_base_nowb_z_sv2di = 6322,
+ CODE_FOR_mve_vldrdq_gather_base_nowb_z_uv2di = 6323,
+ CODE_FOR_mve_vshlcq_m_vec_sv16qi = 6324,
+ CODE_FOR_mve_vshlcq_m_vec_uv16qi = 6325,
+ CODE_FOR_mve_vshlcq_m_vec_sv8hi = 6326,
+ CODE_FOR_mve_vshlcq_m_vec_uv8hi = 6327,
+ CODE_FOR_mve_vshlcq_m_vec_sv4si = 6328,
+ CODE_FOR_mve_vshlcq_m_vec_uv4si = 6329,
+ CODE_FOR_mve_vshlcq_m_carry_sv16qi = 6330,
+ CODE_FOR_mve_vshlcq_m_carry_uv16qi = 6331,
+ CODE_FOR_mve_vshlcq_m_carry_sv8hi = 6332,
+ CODE_FOR_mve_vshlcq_m_carry_uv8hi = 6333,
+ CODE_FOR_mve_vshlcq_m_carry_sv4si = 6334,
+ CODE_FOR_mve_vshlcq_m_carry_uv4si = 6335,
+ CODE_FOR_movv16bi = 6336,
+ CODE_FOR_movv8bi = 6337,
+ CODE_FOR_movv4bi = 6338,
+ CODE_FOR_movv2qi = 6339,
+ CODE_FOR_vec_cmpv16qiv16bi = 6340,
+ CODE_FOR_vec_cmpv8hiv8bi = 6341,
+ CODE_FOR_vec_cmpv4siv4bi = 6342,
+ CODE_FOR_vec_cmpv8hfv8bi = 6343,
+ CODE_FOR_vec_cmpv4sfv4bi = 6344,
+ CODE_FOR_vec_cmpuv16qiv16bi = 6345,
+ CODE_FOR_vec_cmpuv8hiv8bi = 6346,
+ CODE_FOR_vec_cmpuv4siv4bi = 6347,
+ CODE_FOR_vcond_mask_v16qiv16bi = 6348,
+ CODE_FOR_vcond_mask_v8hiv8bi = 6349,
+ CODE_FOR_vcond_mask_v4siv4bi = 6350,
+ CODE_FOR_vcond_mask_v8hfv8bi = 6351,
+ CODE_FOR_vcond_mask_v4sfv4bi = 6352
+};
+
+const unsigned int NUM_INSN_CODES = 7041;
+#endif /* GCC_INSN_CODES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-config.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-config.h
new file mode 100644
index 0000000..0551f35
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-config.h
@@ -0,0 +1,20 @@
+/* Generated automatically by the program `genconfig'
+ from the machine description file `md'. */
+
+#ifndef GCC_INSN_CONFIG_H
+#define GCC_INSN_CONFIG_H
+
+#define MAX_RECOG_OPERANDS 30
+#define MAX_DUP_OPERANDS 6
+#ifndef MAX_INSNS_PER_SPLIT
+#define MAX_INSNS_PER_SPLIT 4
+#endif
+#define HAVE_conditional_move 1
+#define HAVE_conditional_execution 1
+#define HAVE_lo_sum 1
+#define HAVE_rotatert 1
+#define HAVE_peephole 0
+#define HAVE_peephole2 1
+#define MAX_INSNS_PER_PEEP2 8
+
+#endif /* GCC_INSN_CONFIG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-constants.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-constants.h
new file mode 100644
index 0000000..e2c8e7b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-constants.h
@@ -0,0 +1,1214 @@
+/* Generated automatically by the program `genconstants'
+ from the machine description file `md'. */
+
+#ifndef GCC_INSN_CONSTANTS_H
+#define GCC_INSN_CONSTANTS_H
+
+#define WCGR0 96
+#define DOM_CC_X_AND_Y 0
+#define WCGR1 97
+#define SP_REGNUM 13
+#define APSRGE_REGNUM 105
+#define VFPCC_REGNUM 101
+#define CMP_CMN 2
+#define CMP_CMP 0
+#define NUM_OF_COND_CMP 4
+#define R0_REGNUM 0
+#define VPR_REGNUM 106
+#define RA_AUTH_CODE 107
+#define DOM_CC_X_OR_Y 2
+#define APSRQ_REGNUM 104
+#define CMN_CMN 3
+#define CMN_CMP 1
+#define FDPIC_REGNUM 9
+#define CC_REGNUM 100
+#define LAST_ARM_REGNUM 15
+#define PC_REGNUM 15
+#define R4_REGNUM 4
+#define LR_REGNUM 14
+#define R1_REGNUM 1
+#define DOM_CC_NX_OR_Y 1
+#define IP_REGNUM 12
+#define WCGR2 98
+#define WCGR3 99
+
+enum unspec {
+ UNSPEC_PUSH_MULT = 0,
+ UNSPEC_PIC_SYM = 1,
+ UNSPEC_PIC_BASE = 2,
+ UNSPEC_PRLG_STK = 3,
+ UNSPEC_REGISTER_USE = 4,
+ UNSPEC_CHECK_ARCH = 5,
+ UNSPEC_WSHUFH = 6,
+ UNSPEC_WACC = 7,
+ UNSPEC_TMOVMSK = 8,
+ UNSPEC_WSAD = 9,
+ UNSPEC_WSADZ = 10,
+ UNSPEC_WMACS = 11,
+ UNSPEC_WMACU = 12,
+ UNSPEC_WMACSZ = 13,
+ UNSPEC_WMACUZ = 14,
+ UNSPEC_CLRDI = 15,
+ UNSPEC_WALIGNI = 16,
+ UNSPEC_TLS = 17,
+ UNSPEC_PIC_LABEL = 18,
+ UNSPEC_PIC_OFFSET = 19,
+ UNSPEC_GOTSYM_OFF = 20,
+ UNSPEC_THUMB1_CASESI = 21,
+ UNSPEC_RBIT = 22,
+ UNSPEC_SYMBOL_OFFSET = 23,
+ UNSPEC_MEMORY_BARRIER = 24,
+ UNSPEC_UNALIGNED_LOAD = 25,
+ UNSPEC_UNALIGNED_STORE = 26,
+ UNSPEC_PIC_UNIFIED = 27,
+ UNSPEC_Q_SET = 28,
+ UNSPEC_GE_SET = 29,
+ UNSPEC_APSR_READ = 30,
+ UNSPEC_LL = 31,
+ UNSPEC_VRINTZ = 32,
+ UNSPEC_VRINTP = 33,
+ UNSPEC_VRINTM = 34,
+ UNSPEC_VRINTR = 35,
+ UNSPEC_VRINTX = 36,
+ UNSPEC_VRINTA = 37,
+ UNSPEC_PROBE_STACK = 38,
+ UNSPEC_NONSECURE_MEM = 39,
+ UNSPEC_SP_SET = 40,
+ UNSPEC_SP_TEST = 41,
+ UNSPEC_PIC_RESTORE = 42,
+ UNSPEC_SXTAB16 = 43,
+ UNSPEC_UXTAB16 = 44,
+ UNSPEC_SXTB16 = 45,
+ UNSPEC_UXTB16 = 46,
+ UNSPEC_QADD8 = 47,
+ UNSPEC_QSUB8 = 48,
+ UNSPEC_SHADD8 = 49,
+ UNSPEC_SHSUB8 = 50,
+ UNSPEC_UHADD8 = 51,
+ UNSPEC_UHSUB8 = 52,
+ UNSPEC_UQADD8 = 53,
+ UNSPEC_UQSUB8 = 54,
+ UNSPEC_QADD16 = 55,
+ UNSPEC_QASX = 56,
+ UNSPEC_QSAX = 57,
+ UNSPEC_QSUB16 = 58,
+ UNSPEC_SHADD16 = 59,
+ UNSPEC_SHASX = 60,
+ UNSPEC_SHSAX = 61,
+ UNSPEC_SHSUB16 = 62,
+ UNSPEC_UHADD16 = 63,
+ UNSPEC_UHASX = 64,
+ UNSPEC_UHSAX = 65,
+ UNSPEC_UHSUB16 = 66,
+ UNSPEC_UQADD16 = 67,
+ UNSPEC_UQASX = 68,
+ UNSPEC_UQSAX = 69,
+ UNSPEC_UQSUB16 = 70,
+ UNSPEC_SMUSD = 71,
+ UNSPEC_SMUSDX = 72,
+ UNSPEC_USAD8 = 73,
+ UNSPEC_USADA8 = 74,
+ UNSPEC_SMLALD = 75,
+ UNSPEC_SMLALDX = 76,
+ UNSPEC_SMLSLD = 77,
+ UNSPEC_SMLSLDX = 78,
+ UNSPEC_SMLAWB = 79,
+ UNSPEC_SMLAWT = 80,
+ UNSPEC_SEL = 81,
+ UNSPEC_SADD8 = 82,
+ UNSPEC_SSUB8 = 83,
+ UNSPEC_UADD8 = 84,
+ UNSPEC_USUB8 = 85,
+ UNSPEC_SADD16 = 86,
+ UNSPEC_SASX = 87,
+ UNSPEC_SSAX = 88,
+ UNSPEC_SSUB16 = 89,
+ UNSPEC_UADD16 = 90,
+ UNSPEC_UASX = 91,
+ UNSPEC_USAX = 92,
+ UNSPEC_USUB16 = 93,
+ UNSPEC_SMLAD = 94,
+ UNSPEC_SMLADX = 95,
+ UNSPEC_SMLSD = 96,
+ UNSPEC_SMLSDX = 97,
+ UNSPEC_SMUAD = 98,
+ UNSPEC_SMUADX = 99,
+ UNSPEC_SSAT16 = 100,
+ UNSPEC_USAT16 = 101,
+ UNSPEC_CDE = 102,
+ UNSPEC_CDEA = 103,
+ UNSPEC_VCDE = 104,
+ UNSPEC_VCDEA = 105,
+ UNSPEC_DLS = 106,
+ UNSPEC_PAC_NOP = 107,
+ UNSPEC_WADDC = 108,
+ UNSPEC_WABS = 109,
+ UNSPEC_WQMULWMR = 110,
+ UNSPEC_WQMULMR = 111,
+ UNSPEC_WQMULWM = 112,
+ UNSPEC_WQMULM = 113,
+ UNSPEC_WQMIAxyn = 114,
+ UNSPEC_WQMIAxy = 115,
+ UNSPEC_TANDC = 116,
+ UNSPEC_TORC = 117,
+ UNSPEC_TORVSC = 118,
+ UNSPEC_TEXTRC = 119,
+ UNSPEC_GET_FPSCR_NZCVQC = 120,
+ UNSPEC_ASHIFT_SIGNED = 121,
+ UNSPEC_ASHIFT_UNSIGNED = 122,
+ UNSPEC_CRC32B = 123,
+ UNSPEC_CRC32H = 124,
+ UNSPEC_CRC32W = 125,
+ UNSPEC_CRC32CB = 126,
+ UNSPEC_CRC32CH = 127,
+ UNSPEC_CRC32CW = 128,
+ UNSPEC_AESD = 129,
+ UNSPEC_AESE = 130,
+ UNSPEC_AESIMC = 131,
+ UNSPEC_AESMC = 132,
+ UNSPEC_AES_PROTECT = 133,
+ UNSPEC_SHA1C = 134,
+ UNSPEC_SHA1M = 135,
+ UNSPEC_SHA1P = 136,
+ UNSPEC_SHA1H = 137,
+ UNSPEC_SHA1SU0 = 138,
+ UNSPEC_SHA1SU1 = 139,
+ UNSPEC_SHA256H = 140,
+ UNSPEC_SHA256H2 = 141,
+ UNSPEC_SHA256SU0 = 142,
+ UNSPEC_SHA256SU1 = 143,
+ UNSPEC_VMULLP64 = 144,
+ UNSPEC_LOAD_COUNT = 145,
+ UNSPEC_VABAL_S = 146,
+ UNSPEC_VABAL_U = 147,
+ UNSPEC_VABD_F = 148,
+ UNSPEC_VABD_S = 149,
+ UNSPEC_VABD_U = 150,
+ UNSPEC_VABDL_S = 151,
+ UNSPEC_VABDL_U = 152,
+ UNSPEC_VADD = 153,
+ UNSPEC_VADDHN = 154,
+ UNSPEC_VRADDHN = 155,
+ UNSPEC_VADDL_S = 156,
+ UNSPEC_VADDL_U = 157,
+ UNSPEC_VADDW_S = 158,
+ UNSPEC_VADDW_U = 159,
+ UNSPEC_VBSL = 160,
+ UNSPEC_VCAGE = 161,
+ UNSPEC_VCAGT = 162,
+ UNSPEC_VCALE = 163,
+ UNSPEC_VCALT = 164,
+ UNSPEC_VCEQ = 165,
+ UNSPEC_VCGE = 166,
+ UNSPEC_VCGEU = 167,
+ UNSPEC_VCGT = 168,
+ UNSPEC_VCGTU = 169,
+ UNSPEC_VCLS = 170,
+ UNSPEC_VCONCAT = 171,
+ UNSPEC_VCVT = 172,
+ UNSPEC_VCVT_S = 173,
+ UNSPEC_VCVT_U = 174,
+ UNSPEC_VCVT_S_N = 175,
+ UNSPEC_VCVT_U_N = 176,
+ UNSPEC_VCVT_HF_S_N = 177,
+ UNSPEC_VCVT_HF_U_N = 178,
+ UNSPEC_VCVT_SI_S_N = 179,
+ UNSPEC_VCVT_SI_U_N = 180,
+ UNSPEC_VCVTH_S = 181,
+ UNSPEC_VCVTH_U = 182,
+ UNSPEC_VCVTA_S = 183,
+ UNSPEC_VCVTA_U = 184,
+ UNSPEC_VCVTM_S = 185,
+ UNSPEC_VCVTM_U = 186,
+ UNSPEC_VCVTN_S = 187,
+ UNSPEC_VCVTN_U = 188,
+ UNSPEC_VCVTP_S = 189,
+ UNSPEC_VCVTP_U = 190,
+ UNSPEC_VEXT = 191,
+ UNSPEC_VHADD_S = 192,
+ UNSPEC_VHADD_U = 193,
+ UNSPEC_VRHADD_S = 194,
+ UNSPEC_VRHADD_U = 195,
+ UNSPEC_VHSUB_S = 196,
+ UNSPEC_VHSUB_U = 197,
+ UNSPEC_VLD1 = 198,
+ UNSPEC_VLD1_LANE = 199,
+ UNSPEC_VLD2 = 200,
+ UNSPEC_VLD2_DUP = 201,
+ UNSPEC_VLD2_LANE = 202,
+ UNSPEC_VLD3 = 203,
+ UNSPEC_VLD3A = 204,
+ UNSPEC_VLD3B = 205,
+ UNSPEC_VLD3_DUP = 206,
+ UNSPEC_VLD3_LANE = 207,
+ UNSPEC_VLD4 = 208,
+ UNSPEC_VLD4A = 209,
+ UNSPEC_VLD4B = 210,
+ UNSPEC_VLD4_DUP = 211,
+ UNSPEC_VLD4_LANE = 212,
+ UNSPEC_VMAX = 213,
+ UNSPEC_VMAX_U = 214,
+ UNSPEC_VMAXNM = 215,
+ UNSPEC_VMIN = 216,
+ UNSPEC_VMIN_U = 217,
+ UNSPEC_VMINNM = 218,
+ UNSPEC_VMLA = 219,
+ UNSPEC_VMLA_LANE = 220,
+ UNSPEC_VMLAL_S = 221,
+ UNSPEC_VMLAL_U = 222,
+ UNSPEC_VMLAL_S_LANE = 223,
+ UNSPEC_VMLAL_U_LANE = 224,
+ UNSPEC_VMLS = 225,
+ UNSPEC_VMLS_LANE = 226,
+ UNSPEC_VMLSL_S = 227,
+ UNSPEC_VMLSL_U = 228,
+ UNSPEC_VMLSL_S_LANE = 229,
+ UNSPEC_VMLSL_U_LANE = 230,
+ UNSPEC_VMLSL_LANE = 231,
+ UNSPEC_VFMA_LANE = 232,
+ UNSPEC_VFMS_LANE = 233,
+ UNSPEC_VMOVL_S = 234,
+ UNSPEC_VMOVL_U = 235,
+ UNSPEC_VMOVN = 236,
+ UNSPEC_VMUL = 237,
+ UNSPEC_VMULL_P = 238,
+ UNSPEC_VMULL_S = 239,
+ UNSPEC_VMULL_U = 240,
+ UNSPEC_VMUL_LANE = 241,
+ UNSPEC_VMULL_S_LANE = 242,
+ UNSPEC_VMULL_U_LANE = 243,
+ UNSPEC_VPADAL_S = 244,
+ UNSPEC_VPADAL_U = 245,
+ UNSPEC_VPADD = 246,
+ UNSPEC_VPADDL_S = 247,
+ UNSPEC_VPADDL_U = 248,
+ UNSPEC_VPMAX = 249,
+ UNSPEC_VPMAX_U = 250,
+ UNSPEC_VPMIN = 251,
+ UNSPEC_VPMIN_U = 252,
+ UNSPEC_VPSMAX = 253,
+ UNSPEC_VPSMIN = 254,
+ UNSPEC_VPUMAX = 255,
+ UNSPEC_VPUMIN = 256,
+ UNSPEC_VQABS = 257,
+ UNSPEC_VQADD_S = 258,
+ UNSPEC_VQADD_U = 259,
+ UNSPEC_VQDMLAL = 260,
+ UNSPEC_VQDMLAL_LANE = 261,
+ UNSPEC_VQDMLSL = 262,
+ UNSPEC_VQDMLSL_LANE = 263,
+ UNSPEC_VQDMULH = 264,
+ UNSPEC_VQDMULH_LANE = 265,
+ UNSPEC_VQRDMULH = 266,
+ UNSPEC_VQRDMULH_LANE = 267,
+ UNSPEC_VQDMULL = 268,
+ UNSPEC_VQDMULL_LANE = 269,
+ UNSPEC_VQMOVN_S = 270,
+ UNSPEC_VQMOVN_U = 271,
+ UNSPEC_VQMOVUN = 272,
+ UNSPEC_VQNEG = 273,
+ UNSPEC_VQSHL_S = 274,
+ UNSPEC_VQSHL_U = 275,
+ UNSPEC_VQRSHL_S = 276,
+ UNSPEC_VQRSHL_U = 277,
+ UNSPEC_VQSHL_S_N = 278,
+ UNSPEC_VQSHL_U_N = 279,
+ UNSPEC_VQSHLU_N = 280,
+ UNSPEC_VQSHRN_S_N = 281,
+ UNSPEC_VQSHRN_U_N = 282,
+ UNSPEC_VQRSHRN_S_N = 283,
+ UNSPEC_VQRSHRN_U_N = 284,
+ UNSPEC_VQSHRUN_N = 285,
+ UNSPEC_VQRSHRUN_N = 286,
+ UNSPEC_VQSUB_S = 287,
+ UNSPEC_VQSUB_U = 288,
+ UNSPEC_VRECPE = 289,
+ UNSPEC_VRECPS = 290,
+ UNSPEC_VREV16 = 291,
+ UNSPEC_VREV32 = 292,
+ UNSPEC_VREV64 = 293,
+ UNSPEC_VRSQRTE = 294,
+ UNSPEC_VRSQRTS = 295,
+ UNSPEC_VSHL_S = 296,
+ UNSPEC_VSHL_U = 297,
+ UNSPEC_VRSHL_S = 298,
+ UNSPEC_VRSHL_U = 299,
+ UNSPEC_VSHLL_S_N = 300,
+ UNSPEC_VSHLL_U_N = 301,
+ UNSPEC_VSHL_N = 302,
+ UNSPEC_VSHR_S_N = 303,
+ UNSPEC_VSHR_U_N = 304,
+ UNSPEC_VRSHR_S_N = 305,
+ UNSPEC_VRSHR_U_N = 306,
+ UNSPEC_VSHRN_N = 307,
+ UNSPEC_VRSHRN_N = 308,
+ UNSPEC_VSLI = 309,
+ UNSPEC_VSRA_S_N = 310,
+ UNSPEC_VSRA_U_N = 311,
+ UNSPEC_VRSRA_S_N = 312,
+ UNSPEC_VRSRA_U_N = 313,
+ UNSPEC_VSRI = 314,
+ UNSPEC_VST1 = 315,
+ UNSPEC_VST1_LANE = 316,
+ UNSPEC_VST2 = 317,
+ UNSPEC_VST2_LANE = 318,
+ UNSPEC_VST3 = 319,
+ UNSPEC_VST3A = 320,
+ UNSPEC_VST3B = 321,
+ UNSPEC_VST3_LANE = 322,
+ UNSPEC_VST4 = 323,
+ UNSPEC_VST4A = 324,
+ UNSPEC_VST4B = 325,
+ UNSPEC_VST4_LANE = 326,
+ UNSPEC_VSTRUCTDUMMY = 327,
+ UNSPEC_VSUB = 328,
+ UNSPEC_VSUBHN = 329,
+ UNSPEC_VRSUBHN = 330,
+ UNSPEC_VSUBL_S = 331,
+ UNSPEC_VSUBL_U = 332,
+ UNSPEC_VSUBW_S = 333,
+ UNSPEC_VSUBW_U = 334,
+ UNSPEC_VTBL = 335,
+ UNSPEC_VTBX = 336,
+ UNSPEC_VTRN1 = 337,
+ UNSPEC_VTRN2 = 338,
+ UNSPEC_VTST = 339,
+ UNSPEC_VUZP1 = 340,
+ UNSPEC_VUZP2 = 341,
+ UNSPEC_VZIP1 = 342,
+ UNSPEC_VZIP2 = 343,
+ UNSPEC_MISALIGNED_ACCESS = 344,
+ UNSPEC_VCLE = 345,
+ UNSPEC_VCLT = 346,
+ UNSPEC_NVRINTZ = 347,
+ UNSPEC_NVRINTP = 348,
+ UNSPEC_NVRINTM = 349,
+ UNSPEC_NVRINTX = 350,
+ UNSPEC_NVRINTA = 351,
+ UNSPEC_NVRINTN = 352,
+ UNSPEC_VQRDMLAH = 353,
+ UNSPEC_VQRDMLSH = 354,
+ UNSPEC_VRND = 355,
+ UNSPEC_VRNDA = 356,
+ UNSPEC_VRNDI = 357,
+ UNSPEC_VRNDM = 358,
+ UNSPEC_VRNDN = 359,
+ UNSPEC_VRNDP = 360,
+ UNSPEC_VRNDX = 361,
+ UNSPEC_DOT_S = 362,
+ UNSPEC_DOT_U = 363,
+ UNSPEC_DOT_US = 364,
+ UNSPEC_DOT_SU = 365,
+ UNSPEC_VFML_LO = 366,
+ UNSPEC_VFML_HI = 367,
+ UNSPEC_VCADD90 = 368,
+ UNSPEC_VCADD270 = 369,
+ UNSPEC_VCMLA = 370,
+ UNSPEC_VCMLA90 = 371,
+ UNSPEC_VCMLA180 = 372,
+ UNSPEC_VCMLA270 = 373,
+ UNSPEC_VCMLA_CONJ = 374,
+ UNSPEC_VCMLA180_CONJ = 375,
+ UNSPEC_VCMUL = 376,
+ UNSPEC_VCMUL90 = 377,
+ UNSPEC_VCMUL180 = 378,
+ UNSPEC_VCMUL270 = 379,
+ UNSPEC_VCMUL_CONJ = 380,
+ UNSPEC_MATMUL_S = 381,
+ UNSPEC_MATMUL_U = 382,
+ UNSPEC_MATMUL_US = 383,
+ UNSPEC_BFCVT = 384,
+ UNSPEC_BFCVT_HIGH = 385,
+ UNSPEC_BFMMLA = 386,
+ UNSPEC_BFMAB = 387,
+ UNSPEC_BFMAT = 388,
+ VST4Q = 389,
+ VRNDXQ_F = 390,
+ VRNDQ_F = 391,
+ VRNDPQ_F = 392,
+ VRNDNQ_F = 393,
+ VRNDMQ_F = 394,
+ VRNDAQ_F = 395,
+ VREV64Q_F = 396,
+ VDUPQ_N_F = 397,
+ VREV32Q_F = 398,
+ VCVTTQ_F32_F16 = 399,
+ VCVTBQ_F32_F16 = 400,
+ VCVTQ_TO_F_S = 401,
+ VQNEGQ_S = 402,
+ VCVTQ_TO_F_U = 403,
+ VREV16Q_S = 404,
+ VREV16Q_U = 405,
+ VADDLVQ_S = 406,
+ VMVNQ_N_S = 407,
+ VMVNQ_N_U = 408,
+ VCVTAQ_S = 409,
+ VCVTAQ_U = 410,
+ VREV64Q_S = 411,
+ VREV64Q_U = 412,
+ VQABSQ_S = 413,
+ VDUPQ_N_U = 414,
+ VDUPQ_N_S = 415,
+ VCLSQ_S = 416,
+ VADDVQ_S = 417,
+ VADDVQ_U = 418,
+ VREV32Q_U = 419,
+ VREV32Q_S = 420,
+ VMOVLTQ_U = 421,
+ VMOVLTQ_S = 422,
+ VMOVLBQ_S = 423,
+ VMOVLBQ_U = 424,
+ VCVTQ_FROM_F_S = 425,
+ VCVTQ_FROM_F_U = 426,
+ VCVTPQ_S = 427,
+ VCVTPQ_U = 428,
+ VCVTNQ_S = 429,
+ VCVTNQ_U = 430,
+ VCVTMQ_S = 431,
+ VCVTMQ_U = 432,
+ VADDLVQ_U = 433,
+ VCTP = 434,
+ VCTP_M = 435,
+ VPNOT = 436,
+ VCREATEQ_F = 437,
+ VCVTQ_N_TO_F_S = 438,
+ VCVTQ_N_TO_F_U = 439,
+ VBRSRQ_N_F = 440,
+ VSUBQ_N_F = 441,
+ VCREATEQ_U = 442,
+ VCREATEQ_S = 443,
+ VSHRQ_N_S = 444,
+ VSHRQ_N_U = 445,
+ VCVTQ_N_FROM_F_S = 446,
+ VCVTQ_N_FROM_F_U = 447,
+ VADDLVQ_P_S = 448,
+ VADDLVQ_P_U = 449,
+ VSHLQ_S = 450,
+ VSHLQ_U = 451,
+ VABDQ_S = 452,
+ VADDQ_N_S = 453,
+ VADDVAQ_S = 454,
+ VADDVQ_P_S = 455,
+ VBRSRQ_N_S = 456,
+ VHADDQ_S = 457,
+ VHADDQ_N_S = 458,
+ VHSUBQ_S = 459,
+ VHSUBQ_N_S = 460,
+ VMAXQ_S = 461,
+ VMAXVQ_S = 462,
+ VMINQ_S = 463,
+ VMINVQ_S = 464,
+ VMLADAVQ_S = 465,
+ VMULHQ_S = 466,
+ VMULLBQ_INT_S = 467,
+ VMULLTQ_INT_S = 468,
+ VMULQ_S = 469,
+ VMULQ_N_S = 470,
+ VQADDQ_S = 471,
+ VQADDQ_N_S = 472,
+ VQRSHLQ_S = 473,
+ VQRSHLQ_N_S = 474,
+ VQSHLQ_S = 475,
+ VQSHLQ_N_S = 476,
+ VQSHLQ_R_S = 477,
+ VQSUBQ_S = 478,
+ VQSUBQ_N_S = 479,
+ VRHADDQ_S = 480,
+ VRMULHQ_S = 481,
+ VRSHLQ_S = 482,
+ VRSHLQ_N_S = 483,
+ VRSHRQ_N_S = 484,
+ VSHLQ_N_S = 485,
+ VSHLQ_R_S = 486,
+ VSUBQ_S = 487,
+ VSUBQ_N_S = 488,
+ VABDQ_U = 489,
+ VADDQ_N_U = 490,
+ VADDVAQ_U = 491,
+ VADDVQ_P_U = 492,
+ VBRSRQ_N_U = 493,
+ VHADDQ_U = 494,
+ VHADDQ_N_U = 495,
+ VHSUBQ_U = 496,
+ VHSUBQ_N_U = 497,
+ VMAXQ_U = 498,
+ VMAXVQ_U = 499,
+ VMINQ_U = 500,
+ VMINVQ_U = 501,
+ VMLADAVQ_U = 502,
+ VMULHQ_U = 503,
+ VMULLBQ_INT_U = 504,
+ VMULLTQ_INT_U = 505,
+ VMULQ_U = 506,
+ VMULQ_N_U = 507,
+ VQADDQ_U = 508,
+ VQADDQ_N_U = 509,
+ VQRSHLQ_U = 510,
+ VQRSHLQ_N_U = 511,
+ VQSHLQ_U = 512,
+ VQSHLQ_N_U = 513,
+ VQSHLQ_R_U = 514,
+ VQSUBQ_U = 515,
+ VQSUBQ_N_U = 516,
+ VRHADDQ_U = 517,
+ VRMULHQ_U = 518,
+ VRSHLQ_U = 519,
+ VRSHLQ_N_U = 520,
+ VRSHRQ_N_U = 521,
+ VSHLQ_N_U = 522,
+ VSHLQ_R_U = 523,
+ VSUBQ_U = 524,
+ VSUBQ_N_U = 525,
+ VHCADDQ_ROT270_S = 526,
+ VHCADDQ_ROT90_S = 527,
+ VMAXAQ_S = 528,
+ VMAXAVQ_S = 529,
+ VMINAQ_S = 530,
+ VMINAVQ_S = 531,
+ VMLADAVXQ_S = 532,
+ VMLSDAVQ_S = 533,
+ VMLSDAVXQ_S = 534,
+ VQDMULHQ_N_S = 535,
+ VQDMULHQ_S = 536,
+ VQRDMULHQ_N_S = 537,
+ VQRDMULHQ_S = 538,
+ VQSHLUQ_N_S = 539,
+ VABDQ_M_S = 540,
+ VABDQ_M_U = 541,
+ VABDQ_F = 542,
+ VADDQ_N_F = 543,
+ VMAXNMAQ_F = 544,
+ VMAXNMAVQ_F = 545,
+ VMAXNMQ_F = 546,
+ VMAXNMVQ_F = 547,
+ VMINNMAQ_F = 548,
+ VMINNMAVQ_F = 549,
+ VMINNMQ_F = 550,
+ VMINNMVQ_F = 551,
+ VMULQ_F = 552,
+ VMULQ_N_F = 553,
+ VSUBQ_F = 554,
+ VADDLVAQ_U = 555,
+ VADDLVAQ_S = 556,
+ VBICQ_N_U = 557,
+ VBICQ_N_S = 558,
+ VCVTBQ_F16_F32 = 559,
+ VCVTTQ_F16_F32 = 560,
+ VMLALDAVQ_U = 561,
+ VMLALDAVXQ_U = 562,
+ VMLALDAVXQ_S = 563,
+ VMLALDAVQ_S = 564,
+ VMLSLDAVQ_S = 565,
+ VMLSLDAVXQ_S = 566,
+ VMOVNBQ_U = 567,
+ VMOVNBQ_S = 568,
+ VMOVNTQ_U = 569,
+ VMOVNTQ_S = 570,
+ VORRQ_N_S = 571,
+ VORRQ_N_U = 572,
+ VQDMULLBQ_N_S = 573,
+ VQDMULLBQ_S = 574,
+ VQDMULLTQ_N_S = 575,
+ VQDMULLTQ_S = 576,
+ VQMOVNBQ_U = 577,
+ VQMOVNBQ_S = 578,
+ VQMOVUNBQ_S = 579,
+ VQMOVUNTQ_S = 580,
+ VRMLALDAVHXQ_S = 581,
+ VRMLSLDAVHQ_S = 582,
+ VRMLSLDAVHXQ_S = 583,
+ VSHLLBQ_S = 584,
+ VSHLLBQ_U = 585,
+ VSHLLTQ_U = 586,
+ VSHLLTQ_S = 587,
+ VQMOVNTQ_U = 588,
+ VQMOVNTQ_S = 589,
+ VSHLLBQ_N_S = 590,
+ VSHLLBQ_N_U = 591,
+ VSHLLTQ_N_U = 592,
+ VSHLLTQ_N_S = 593,
+ VRMLALDAVHQ_U = 594,
+ VRMLALDAVHQ_S = 595,
+ VMULLTQ_POLY_P = 596,
+ VMULLBQ_POLY_P = 597,
+ VBICQ_M_N_S = 598,
+ VBICQ_M_N_U = 599,
+ VCMPEQQ_M_F = 600,
+ VCVTAQ_M_S = 601,
+ VCVTAQ_M_U = 602,
+ VCVTQ_M_TO_F_S = 603,
+ VCVTQ_M_TO_F_U = 604,
+ VQRSHRNBQ_N_U = 605,
+ VQRSHRNBQ_N_S = 606,
+ VQRSHRUNBQ_N_S = 607,
+ VRMLALDAVHAQ_S = 608,
+ VABAVQ_S = 609,
+ VABAVQ_U = 610,
+ VSHLCQ_S = 611,
+ VSHLCQ_U = 612,
+ VRMLALDAVHAQ_U = 613,
+ VABSQ_M_S = 614,
+ VADDVAQ_P_S = 615,
+ VADDVAQ_P_U = 616,
+ VCLSQ_M_S = 617,
+ VCLZQ_M_S = 618,
+ VCLZQ_M_U = 619,
+ VCMPCSQ_M_N_U = 620,
+ VCMPCSQ_M_U = 621,
+ VCMPEQQ_M_N_S = 622,
+ VCMPEQQ_M_N_U = 623,
+ VCMPEQQ_M_S = 624,
+ VCMPEQQ_M_U = 625,
+ VCMPGEQ_M_N_S = 626,
+ VCMPGEQ_M_S = 627,
+ VCMPGTQ_M_N_S = 628,
+ VCMPGTQ_M_S = 629,
+ VCMPHIQ_M_N_U = 630,
+ VCMPHIQ_M_U = 631,
+ VCMPLEQ_M_N_S = 632,
+ VCMPLEQ_M_S = 633,
+ VCMPLTQ_M_N_S = 634,
+ VCMPLTQ_M_S = 635,
+ VCMPNEQ_M_N_S = 636,
+ VCMPNEQ_M_N_U = 637,
+ VCMPNEQ_M_S = 638,
+ VCMPNEQ_M_U = 639,
+ VDUPQ_M_N_S = 640,
+ VDUPQ_M_N_U = 641,
+ VDWDUPQ_N_U = 642,
+ VDWDUPQ_WB_U = 643,
+ VIWDUPQ_N_U = 644,
+ VIWDUPQ_WB_U = 645,
+ VMAXAQ_M_S = 646,
+ VMAXAVQ_P_S = 647,
+ VMAXVQ_P_S = 648,
+ VMAXVQ_P_U = 649,
+ VMINAQ_M_S = 650,
+ VMINAVQ_P_S = 651,
+ VMINVQ_P_S = 652,
+ VMINVQ_P_U = 653,
+ VMLADAVAQ_S = 654,
+ VMLADAVAQ_U = 655,
+ VMLADAVQ_P_S = 656,
+ VMLADAVQ_P_U = 657,
+ VMLADAVXQ_P_S = 658,
+ VMLAQ_N_S = 659,
+ VMLAQ_N_U = 660,
+ VMLASQ_N_S = 661,
+ VMLASQ_N_U = 662,
+ VMLSDAVQ_P_S = 663,
+ VMLSDAVXQ_P_S = 664,
+ VMVNQ_M_S = 665,
+ VMVNQ_M_U = 666,
+ VNEGQ_M_S = 667,
+ VPSELQ_S = 668,
+ VPSELQ_U = 669,
+ VQABSQ_M_S = 670,
+ VQDMLAHQ_N_S = 671,
+ VQDMLASHQ_N_S = 672,
+ VQNEGQ_M_S = 673,
+ VQRDMLADHQ_S = 674,
+ VQRDMLADHXQ_S = 675,
+ VQRDMLAHQ_N_S = 676,
+ VQRDMLASHQ_N_S = 677,
+ VQRDMLSDHQ_S = 678,
+ VQRDMLSDHXQ_S = 679,
+ VQRSHLQ_M_N_S = 680,
+ VQRSHLQ_M_N_U = 681,
+ VQSHLQ_M_R_S = 682,
+ VQSHLQ_M_R_U = 683,
+ VREV64Q_M_S = 684,
+ VREV64Q_M_U = 685,
+ VRSHLQ_M_N_S = 686,
+ VRSHLQ_M_N_U = 687,
+ VSHLQ_M_R_S = 688,
+ VSHLQ_M_R_U = 689,
+ VSLIQ_N_S = 690,
+ VSLIQ_N_U = 691,
+ VSRIQ_N_S = 692,
+ VSRIQ_N_U = 693,
+ VQDMLSDHXQ_S = 694,
+ VQDMLSDHQ_S = 695,
+ VQDMLADHXQ_S = 696,
+ VQDMLADHQ_S = 697,
+ VMLSDAVAXQ_S = 698,
+ VMLSDAVAQ_S = 699,
+ VMLADAVAXQ_S = 700,
+ VCMPGEQ_M_F = 701,
+ VCMPGTQ_M_N_F = 702,
+ VMLSLDAVQ_P_S = 703,
+ VRMLALDAVHAXQ_S = 704,
+ VMLSLDAVXQ_P_S = 705,
+ VFMAQ_F = 706,
+ VMLSLDAVAQ_S = 707,
+ VQSHRUNBQ_N_S = 708,
+ VQRSHRUNTQ_N_S = 709,
+ VMINNMAQ_M_F = 710,
+ VFMASQ_N_F = 711,
+ VDUPQ_M_N_F = 712,
+ VCMPGTQ_M_F = 713,
+ VCMPLTQ_M_F = 714,
+ VRMLSLDAVHQ_P_S = 715,
+ VQSHRUNTQ_N_S = 716,
+ VABSQ_M_F = 717,
+ VMAXNMAVQ_P_F = 718,
+ VFMAQ_N_F = 719,
+ VRMLSLDAVHXQ_P_S = 720,
+ VREV32Q_M_F = 721,
+ VRMLSLDAVHAQ_S = 722,
+ VRMLSLDAVHAXQ_S = 723,
+ VCMPLTQ_M_N_F = 724,
+ VCMPNEQ_M_F = 725,
+ VRNDAQ_M_F = 726,
+ VRNDPQ_M_F = 727,
+ VADDLVAQ_P_S = 728,
+ VQMOVUNBQ_M_S = 729,
+ VCMPLEQ_M_F = 730,
+ VMLSLDAVAXQ_S = 731,
+ VRNDXQ_M_F = 732,
+ VFMSQ_F = 733,
+ VMINNMVQ_P_F = 734,
+ VMAXNMVQ_P_F = 735,
+ VPSELQ_F = 736,
+ VQMOVUNTQ_M_S = 737,
+ VREV64Q_M_F = 738,
+ VNEGQ_M_F = 739,
+ VRNDMQ_M_F = 740,
+ VCMPLEQ_M_N_F = 741,
+ VCMPGEQ_M_N_F = 742,
+ VRNDNQ_M_F = 743,
+ VMINNMAVQ_P_F = 744,
+ VCMPNEQ_M_N_F = 745,
+ VRMLALDAVHQ_P_S = 746,
+ VRMLALDAVHXQ_P_S = 747,
+ VCMPEQQ_M_N_F = 748,
+ VMAXNMAQ_M_F = 749,
+ VRNDQ_M_F = 750,
+ VMLALDAVQ_P_U = 751,
+ VMLALDAVQ_P_S = 752,
+ VQMOVNBQ_M_S = 753,
+ VQMOVNBQ_M_U = 754,
+ VMOVLTQ_M_U = 755,
+ VMOVLTQ_M_S = 756,
+ VMOVNBQ_M_U = 757,
+ VMOVNBQ_M_S = 758,
+ VRSHRNTQ_N_U = 759,
+ VRSHRNTQ_N_S = 760,
+ VORRQ_M_N_S = 761,
+ VORRQ_M_N_U = 762,
+ VREV32Q_M_S = 763,
+ VREV32Q_M_U = 764,
+ VQRSHRNTQ_N_U = 765,
+ VQRSHRNTQ_N_S = 766,
+ VMOVNTQ_M_U = 767,
+ VMOVNTQ_M_S = 768,
+ VMOVLBQ_M_U = 769,
+ VMOVLBQ_M_S = 770,
+ VMLALDAVAQ_S = 771,
+ VMLALDAVAQ_U = 772,
+ VQSHRNBQ_N_U = 773,
+ VQSHRNBQ_N_S = 774,
+ VSHRNBQ_N_U = 775,
+ VSHRNBQ_N_S = 776,
+ VRSHRNBQ_N_S = 777,
+ VRSHRNBQ_N_U = 778,
+ VMLALDAVXQ_P_U = 779,
+ VMLALDAVXQ_P_S = 780,
+ VQMOVNTQ_M_U = 781,
+ VQMOVNTQ_M_S = 782,
+ VMVNQ_M_N_U = 783,
+ VMVNQ_M_N_S = 784,
+ VQSHRNTQ_N_U = 785,
+ VQSHRNTQ_N_S = 786,
+ VMLALDAVAXQ_S = 787,
+ VMLALDAVAXQ_U = 788,
+ VSHRNTQ_N_S = 789,
+ VSHRNTQ_N_U = 790,
+ VCVTBQ_M_F16_F32 = 791,
+ VCVTBQ_M_F32_F16 = 792,
+ VCVTTQ_M_F16_F32 = 793,
+ VCVTTQ_M_F32_F16 = 794,
+ VCVTMQ_M_S = 795,
+ VCVTMQ_M_U = 796,
+ VCVTNQ_M_S = 797,
+ VCVTPQ_M_S = 798,
+ VCVTPQ_M_U = 799,
+ VCVTQ_M_N_FROM_F_S = 800,
+ VCVTNQ_M_U = 801,
+ VREV16Q_M_S = 802,
+ VREV16Q_M_U = 803,
+ VREV32Q_M = 804,
+ VCVTQ_M_FROM_F_U = 805,
+ VCVTQ_M_FROM_F_S = 806,
+ VRMLALDAVHQ_P_U = 807,
+ VADDLVAQ_P_U = 808,
+ VCVTQ_M_N_FROM_F_U = 809,
+ VQSHLUQ_M_N_S = 810,
+ VABAVQ_P_S = 811,
+ VABAVQ_P_U = 812,
+ VSHLQ_M_S = 813,
+ VSHLQ_M_U = 814,
+ VSRIQ_M_N_S = 815,
+ VSRIQ_M_N_U = 816,
+ VSUBQ_M_U = 817,
+ VSUBQ_M_S = 818,
+ VCVTQ_M_N_TO_F_U = 819,
+ VCVTQ_M_N_TO_F_S = 820,
+ VQADDQ_M_U = 821,
+ VQADDQ_M_S = 822,
+ VRSHRQ_M_N_S = 823,
+ VSUBQ_M_N_S = 824,
+ VSUBQ_M_N_U = 825,
+ VBRSRQ_M_N_S = 826,
+ VSUBQ_M_N_F = 827,
+ VBICQ_M_F = 828,
+ VHADDQ_M_U = 829,
+ VBICQ_M_U = 830,
+ VBICQ_M_S = 831,
+ VMULQ_M_N_U = 832,
+ VHADDQ_M_S = 833,
+ VORNQ_M_F = 834,
+ VMLAQ_M_N_S = 835,
+ VQSUBQ_M_U = 836,
+ VQSUBQ_M_S = 837,
+ VMLAQ_M_N_U = 838,
+ VQSUBQ_M_N_U = 839,
+ VQSUBQ_M_N_S = 840,
+ VMULLTQ_INT_M_S = 841,
+ VMULLTQ_INT_M_U = 842,
+ VMULQ_M_N_S = 843,
+ VMULQ_M_N_F = 844,
+ VMLASQ_M_N_U = 845,
+ VMLASQ_M_N_S = 846,
+ VMAXQ_M_U = 847,
+ VQRDMLAHQ_M_N_U = 848,
+ VCADDQ_ROT270_M_F = 849,
+ VCADDQ_ROT270_M_U = 850,
+ VCADDQ_ROT270_M_S = 851,
+ VQRSHLQ_M_S = 852,
+ VMULQ_M_F = 853,
+ VRHADDQ_M_U = 854,
+ VSHRQ_M_N_U = 855,
+ VRHADDQ_M_S = 856,
+ VMULQ_M_S = 857,
+ VMULQ_M_U = 858,
+ VQDMLASHQ_M_N_S = 859,
+ VQRDMLASHQ_M_N_S = 860,
+ VRSHLQ_M_S = 861,
+ VRSHLQ_M_U = 862,
+ VRSHRQ_M_N_U = 863,
+ VADDQ_M_N_F = 864,
+ VADDQ_M_N_S = 865,
+ VADDQ_M_N_U = 866,
+ VQRDMLASHQ_M_N_U = 867,
+ VMAXQ_M_S = 868,
+ VQRDMLAHQ_M_N_S = 869,
+ VORRQ_M_S = 870,
+ VORRQ_M_U = 871,
+ VORRQ_M_F = 872,
+ VQRSHLQ_M_U = 873,
+ VRMULHQ_M_U = 874,
+ VRMULHQ_M_S = 875,
+ VMINQ_M_S = 876,
+ VMINQ_M_U = 877,
+ VANDQ_M_F = 878,
+ VANDQ_M_U = 879,
+ VANDQ_M_S = 880,
+ VHSUBQ_M_N_S = 881,
+ VHSUBQ_M_N_U = 882,
+ VMULHQ_M_S = 883,
+ VMULHQ_M_U = 884,
+ VMULLBQ_INT_M_U = 885,
+ VMULLBQ_INT_M_S = 886,
+ VCADDQ_ROT90_M_F = 887,
+ VSHRQ_M_N_S = 888,
+ VADDQ_M_U = 889,
+ VSLIQ_M_N_U = 890,
+ VQADDQ_M_N_S = 891,
+ VBRSRQ_M_N_F = 892,
+ VABDQ_M_F = 893,
+ VBRSRQ_M_N_U = 894,
+ VEORQ_M_F = 895,
+ VSHLQ_M_N_S = 896,
+ VQDMLAHQ_M_N_U = 897,
+ VQDMLAHQ_M_N_S = 898,
+ VSHLQ_M_N_U = 899,
+ VMLADAVAQ_P_U = 900,
+ VMLADAVAQ_P_S = 901,
+ VSLIQ_M_N_S = 902,
+ VQSHLQ_M_U = 903,
+ VQSHLQ_M_S = 904,
+ VCADDQ_ROT90_M_U = 905,
+ VCADDQ_ROT90_M_S = 906,
+ VORNQ_M_U = 907,
+ VORNQ_M_S = 908,
+ VQSHLQ_M_N_S = 909,
+ VQSHLQ_M_N_U = 910,
+ VADDQ_M_S = 911,
+ VHADDQ_M_N_S = 912,
+ VADDQ_M_F = 913,
+ VQADDQ_M_N_U = 914,
+ VEORQ_M_S = 915,
+ VEORQ_M_U = 916,
+ VHSUBQ_M_S = 917,
+ VHSUBQ_M_U = 918,
+ VHADDQ_M_N_U = 919,
+ VHCADDQ_ROT90_M_S = 920,
+ VQRDMLSDHQ_M_S = 921,
+ VQRDMLSDHXQ_M_S = 922,
+ VQRDMLADHXQ_M_S = 923,
+ VQDMULHQ_M_S = 924,
+ VMLADAVAXQ_P_S = 925,
+ VQDMLADHXQ_M_S = 926,
+ VQRDMULHQ_M_S = 927,
+ VMLSDAVAXQ_P_S = 928,
+ VQDMULHQ_M_N_S = 929,
+ VHCADDQ_ROT270_M_S = 930,
+ VQDMLSDHQ_M_S = 931,
+ VQDMLSDHXQ_M_S = 932,
+ VMLSDAVAQ_P_S = 933,
+ VQRDMLADHQ_M_S = 934,
+ VQDMLADHQ_M_S = 935,
+ VMLALDAVAQ_P_U = 936,
+ VMLALDAVAQ_P_S = 937,
+ VQRSHRNBQ_M_N_U = 938,
+ VQRSHRNBQ_M_N_S = 939,
+ VQRSHRNTQ_M_N_S = 940,
+ VQSHRNBQ_M_N_U = 941,
+ VQSHRNBQ_M_N_S = 942,
+ VQSHRNTQ_M_N_S = 943,
+ VRSHRNBQ_M_N_U = 944,
+ VRSHRNBQ_M_N_S = 945,
+ VRSHRNTQ_M_N_U = 946,
+ VSHLLBQ_M_N_U = 947,
+ VSHLLBQ_M_N_S = 948,
+ VSHLLTQ_M_N_U = 949,
+ VSHLLTQ_M_N_S = 950,
+ VSHRNBQ_M_N_S = 951,
+ VSHRNBQ_M_N_U = 952,
+ VSHRNTQ_M_N_S = 953,
+ VSHRNTQ_M_N_U = 954,
+ VMLALDAVAXQ_P_S = 955,
+ VQRSHRNTQ_M_N_U = 956,
+ VQSHRNTQ_M_N_U = 957,
+ VRSHRNTQ_M_N_S = 958,
+ VQRDMULHQ_M_N_S = 959,
+ VRMLALDAVHAQ_P_S = 960,
+ VMLSLDAVAQ_P_S = 961,
+ VMLSLDAVAXQ_P_S = 962,
+ VMULLBQ_POLY_M_P = 963,
+ VMULLTQ_POLY_M_P = 964,
+ VQDMULLBQ_M_N_S = 965,
+ VQDMULLBQ_M_S = 966,
+ VQDMULLTQ_M_N_S = 967,
+ VQDMULLTQ_M_S = 968,
+ VQRSHRUNBQ_M_N_S = 969,
+ VQSHRUNBQ_M_N_S = 970,
+ VQSHRUNTQ_M_N_S = 971,
+ VRMLALDAVHAQ_P_U = 972,
+ VRMLALDAVHAXQ_P_S = 973,
+ VRMLSLDAVHAQ_P_S = 974,
+ VRMLSLDAVHAXQ_P_S = 975,
+ VQRSHRUNTQ_M_N_S = 976,
+ VCMLAQ_M_F = 977,
+ VCMLAQ_ROT180_M_F = 978,
+ VCMLAQ_ROT270_M_F = 979,
+ VCMLAQ_ROT90_M_F = 980,
+ VCMULQ_M_F = 981,
+ VCMULQ_ROT180_M_F = 982,
+ VCMULQ_ROT270_M_F = 983,
+ VCMULQ_ROT90_M_F = 984,
+ VFMAQ_M_F = 985,
+ VFMAQ_M_N_F = 986,
+ VFMASQ_M_N_F = 987,
+ VFMSQ_M_F = 988,
+ VMAXNMQ_M_F = 989,
+ VMINNMQ_M_F = 990,
+ VSUBQ_M_F = 991,
+ VSTRWQSB_S = 992,
+ VSTRWQSB_U = 993,
+ VSTRBQSO_S = 994,
+ VSTRBQSO_U = 995,
+ VSTRBQ_S = 996,
+ VSTRBQ_U = 997,
+ VLDRBQGO_S = 998,
+ VLDRBQGO_U = 999,
+ VLDRBQ_S = 1000,
+ VLDRBQ_U = 1001,
+ VLDRWQGB_S = 1002,
+ VLDRWQGB_U = 1003,
+ VLD1Q_F = 1004,
+ VLD1Q_S = 1005,
+ VLD1Q_U = 1006,
+ VLDRHQ_F = 1007,
+ VLDRHQGO_S = 1008,
+ VLDRHQGO_U = 1009,
+ VLDRHQGSO_S = 1010,
+ VLDRHQGSO_U = 1011,
+ VLDRHQ_S = 1012,
+ VLDRHQ_U = 1013,
+ VLDRWQ_F = 1014,
+ VLDRWQ_S = 1015,
+ VLDRWQ_U = 1016,
+ VLDRDQGB_S = 1017,
+ VLDRDQGB_U = 1018,
+ VLDRDQGO_S = 1019,
+ VLDRDQGO_U = 1020,
+ VLDRDQGSO_S = 1021,
+ VLDRDQGSO_U = 1022,
+ VLDRHQGO_F = 1023,
+ VLDRHQGSO_F = 1024,
+ VLDRWQGB_F = 1025,
+ VLDRWQGO_F = 1026,
+ VLDRWQGO_S = 1027,
+ VLDRWQGO_U = 1028,
+ VLDRWQGSO_F = 1029,
+ VLDRWQGSO_S = 1030,
+ VLDRWQGSO_U = 1031,
+ VSTRHQ_F = 1032,
+ VST1Q_S = 1033,
+ VST1Q_U = 1034,
+ VSTRHQSO_S = 1035,
+ VSTRHQ_U = 1036,
+ VSTRWQ_S = 1037,
+ VSTRWQ_U = 1038,
+ VSTRWQ_F = 1039,
+ VST1Q_F = 1040,
+ VSTRDQSB_S = 1041,
+ VSTRDQSB_U = 1042,
+ VSTRDQSO_S = 1043,
+ VSTRDQSO_U = 1044,
+ VSTRDQSSO_S = 1045,
+ VSTRDQSSO_U = 1046,
+ VSTRWQSO_S = 1047,
+ VSTRWQSO_U = 1048,
+ VSTRWQSSO_S = 1049,
+ VSTRWQSSO_U = 1050,
+ VSTRHQSO_F = 1051,
+ VSTRHQSSO_F = 1052,
+ VSTRWQSB_F = 1053,
+ VSTRWQSO_F = 1054,
+ VSTRWQSSO_F = 1055,
+ VDDUPQ = 1056,
+ VDDUPQ_M = 1057,
+ VDWDUPQ = 1058,
+ VDWDUPQ_M = 1059,
+ VIDUPQ = 1060,
+ VIDUPQ_M = 1061,
+ VIWDUPQ = 1062,
+ VIWDUPQ_M = 1063,
+ VSTRWQSBWB_S = 1064,
+ VSTRWQSBWB_U = 1065,
+ VLDRWQGBWB_S = 1066,
+ VLDRWQGBWB_U = 1067,
+ VSTRWQSBWB_F = 1068,
+ VLDRWQGBWB_F = 1069,
+ VSTRDQSBWB_S = 1070,
+ VSTRDQSBWB_U = 1071,
+ VLDRDQGBWB_S = 1072,
+ VLDRDQGBWB_U = 1073,
+ VADCQ_U = 1074,
+ VADCQ_M_U = 1075,
+ VADCQ_S = 1076,
+ VADCQ_M_S = 1077,
+ VSBCIQ_U = 1078,
+ VSBCIQ_S = 1079,
+ VSBCIQ_M_U = 1080,
+ VSBCIQ_M_S = 1081,
+ VSBCQ_U = 1082,
+ VSBCQ_S = 1083,
+ VSBCQ_M_U = 1084,
+ VSBCQ_M_S = 1085,
+ VADCIQ_U = 1086,
+ VADCIQ_M_U = 1087,
+ VADCIQ_S = 1088,
+ VADCIQ_M_S = 1089,
+ VLD2Q = 1090,
+ VLD4Q = 1091,
+ VST2Q = 1092,
+ VSHLCQ_M_U = 1093,
+ VSHLCQ_M_S = 1094,
+ VSTRHQSO_U = 1095,
+ VSTRHQSSO_S = 1096,
+ VSTRHQSSO_U = 1097,
+ VSTRHQ_S = 1098,
+ SRSHRL = 1099,
+ SRSHR = 1100,
+ URSHR = 1101,
+ URSHRL = 1102,
+ SQRSHR = 1103,
+ UQRSHL = 1104,
+ UQRSHLL_64 = 1105,
+ UQRSHLL_48 = 1106,
+ SQRSHRL_64 = 1107,
+ SQRSHRL_48 = 1108,
+ VSHLCQ_M_ = 1109
+};
+#define NUM_UNSPEC_VALUES 1110
+extern const char *const unspec_strings[];
+
+enum unspecv {
+ VUNSPEC_BLOCKAGE = 0,
+ VUNSPEC_EPILOGUE = 1,
+ VUNSPEC_THUMB1_INTERWORK = 2,
+ VUNSPEC_ALIGN = 3,
+ VUNSPEC_POOL_END = 4,
+ VUNSPEC_POOL_1 = 5,
+ VUNSPEC_POOL_2 = 6,
+ VUNSPEC_POOL_4 = 7,
+ VUNSPEC_POOL_8 = 8,
+ VUNSPEC_POOL_16 = 9,
+ VUNSPEC_TMRC = 10,
+ VUNSPEC_TMCR = 11,
+ VUNSPEC_ALIGN8 = 12,
+ VUNSPEC_WCMP_EQ = 13,
+ VUNSPEC_WCMP_GTU = 14,
+ VUNSPEC_WCMP_GT = 15,
+ VUNSPEC_EH_RETURN = 16,
+ VUNSPEC_ATOMIC_CAS = 17,
+ VUNSPEC_ATOMIC_XCHG = 18,
+ VUNSPEC_ATOMIC_OP = 19,
+ VUNSPEC_LL = 20,
+ VUNSPEC_LDRD_ATOMIC = 21,
+ VUNSPEC_SC = 22,
+ VUNSPEC_LAX = 23,
+ VUNSPEC_SLX = 24,
+ VUNSPEC_LDA = 25,
+ VUNSPEC_STL = 26,
+ VUNSPEC_GET_FPSCR = 27,
+ VUNSPEC_SET_FPSCR = 28,
+ VUNSPEC_SET_FPSCR_NZCVQC = 29,
+ VUNSPEC_PROBE_STACK_RANGE = 30,
+ VUNSPEC_CDP = 31,
+ VUNSPEC_CDP2 = 32,
+ VUNSPEC_LDC = 33,
+ VUNSPEC_LDC2 = 34,
+ VUNSPEC_LDCL = 35,
+ VUNSPEC_LDC2L = 36,
+ VUNSPEC_STC = 37,
+ VUNSPEC_STC2 = 38,
+ VUNSPEC_STCL = 39,
+ VUNSPEC_STC2L = 40,
+ VUNSPEC_MCR = 41,
+ VUNSPEC_MCR2 = 42,
+ VUNSPEC_MRC = 43,
+ VUNSPEC_MRC2 = 44,
+ VUNSPEC_MCRR = 45,
+ VUNSPEC_MCRR2 = 46,
+ VUNSPEC_MRRC = 47,
+ VUNSPEC_MRRC2 = 48,
+ VUNSPEC_SPECULATION_BARRIER = 49,
+ VUNSPEC_APSR_WRITE = 50,
+ VUNSPEC_VSTR_VLDR = 51,
+ VUNSPEC_CLRM_APSR = 52,
+ VUNSPEC_VSCCLRM_VPR = 53,
+ VUNSPEC_VLSTM = 54,
+ VUNSPEC_VLLDM = 55,
+ VUNSPEC_PACBTI_NOP = 56,
+ VUNSPEC_AUT_NOP = 57,
+ VUNSPEC_BTI_NOP = 58
+};
+#define NUM_UNSPECV_VALUES 59
+extern const char *const unspecv_strings[];
+
+#endif /* GCC_INSN_CONSTANTS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-flags.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-flags.h
new file mode 100644
index 0000000..1d3c4e8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-flags.h
@@ -0,0 +1,11510 @@
+/* Generated automatically by the program `genflags'
+ from the machine description file `md'. */
+
+#ifndef GCC_INSN_FLAGS_H
+#define GCC_INSN_FLAGS_H
+
+#define HAVE_addsi3_compareV_reg (TARGET_32BIT)
+#define HAVE_subvsi3_intmin (TARGET_32BIT)
+#define HAVE_addsi3_compareV_imm (TARGET_32BIT \
+ && INTVAL (operands[2]) == ARM_SIGN_EXTEND (INTVAL (operands[2])))
+#define HAVE_addsi3_compareV_imm_nosum (TARGET_32BIT \
+ && INTVAL (operands[1]) == ARM_SIGN_EXTEND (INTVAL (operands[1])))
+#define HAVE_addsi3_compare0 (TARGET_ARM)
+#define HAVE_cmpsi2_addneg (TARGET_32BIT \
+ && (INTVAL (operands[2]) \
+ == trunc_int_for_mode (-INTVAL (operands[3]), SImode)))
+#define HAVE_addsi3_compare_op1 (TARGET_32BIT)
+#define HAVE_addsi3_carryin (TARGET_32BIT)
+#define HAVE_add0si3_carryin (TARGET_32BIT)
+#define HAVE_subsi3_compare1 (TARGET_32BIT)
+#define HAVE_subvsi3 (TARGET_32BIT)
+#define HAVE_subvsi3_imm1 (TARGET_32BIT)
+#define HAVE_subsi3_carryin (TARGET_32BIT)
+#define HAVE_rsbsi_carryin_reg (TARGET_ARM)
+#define HAVE_add_not_shift_cin (TARGET_ARM)
+#define HAVE_cmpsi3_carryin_CC_NVout (TARGET_32BIT)
+#define HAVE_cmpsi3_carryin_CC_Bout (TARGET_32BIT)
+#define HAVE_cmpsi3_imm_carryin_CC_NVout (TARGET_32BIT)
+#define HAVE_cmpsi3_imm_carryin_CC_Bout (TARGET_32BIT)
+#define HAVE_cmpsi3_0_carryin_CC_NVout (TARGET_32BIT)
+#define HAVE_cmpsi3_0_carryin_CC_Bout (TARGET_32BIT)
+#define HAVE_subsi3_compare0 (TARGET_32BIT)
+#define HAVE_subsi3_compare (TARGET_32BIT)
+#define HAVE_rsb_imm_compare (TARGET_32BIT && ~UINTVAL (operands[1]) == UINTVAL (operands[3]))
+#define HAVE_rsb_imm_compare_scratch (TARGET_32BIT)
+#define HAVE_rscsi3_CC_NVout_scratch (TARGET_ARM)
+#define HAVE_rscsi3_CC_Bout_scratch (TARGET_ARM)
+#define HAVE_usubvsi3_borrow (TARGET_32BIT)
+#define HAVE_usubvsi3_borrow_imm (TARGET_32BIT \
+ && (UINTVAL (operands[2]) & 0xffffffff) == UINTVAL (operands[3]))
+#define HAVE_subvsi3_borrow (TARGET_32BIT)
+#define HAVE_subvsi3_borrow_imm (TARGET_32BIT \
+ && INTVAL (operands[2]) == ARM_SIGN_EXTEND (INTVAL (operands[2])))
+#define HAVE_smull (TARGET_32BIT)
+#define HAVE_umull (TARGET_32BIT)
+#define HAVE_smlal (TARGET_32BIT)
+#define HAVE_umlal (TARGET_32BIT)
+#define HAVE_mulhisi3 (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlabb_setq (TARGET_DSP_MULTIPLY)
+#define HAVE_maddhisi4tb (TARGET_DSP_MULTIPLY && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlatb_setq (TARGET_DSP_MULTIPLY)
+#define HAVE_maddhisi4tt (TARGET_DSP_MULTIPLY && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlatt_setq (TARGET_DSP_MULTIPLY)
+#define HAVE_maddhidi4 (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlawb_insn (TARGET_DSP_MULTIPLY && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlawb_setq_insn (TARGET_DSP_MULTIPLY && ARM_Q_BIT_READ)
+#define HAVE_arm_smlawt_insn (TARGET_DSP_MULTIPLY && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlawt_setq_insn (TARGET_DSP_MULTIPLY && ARM_Q_BIT_READ)
+#define HAVE_insv_zero (arm_arch_thumb2)
+#define HAVE_insv_t2 (arm_arch_thumb2)
+#define HAVE_andsi_notsi_si (TARGET_32BIT)
+#define HAVE_andsi_not_shiftsi_si (TARGET_32BIT)
+#define HAVE_andsi_not_shiftsi_si_scc_no_reuse (TARGET_32BIT)
+#define HAVE_andsi_not_shiftsi_si_scc (TARGET_32BIT)
+#define HAVE_arm_qadd_insn (TARGET_DSP_MULTIPLY && !ARM_Q_BIT_READ)
+#define HAVE_arm_qadd_setq_insn (TARGET_DSP_MULTIPLY && ARM_Q_BIT_READ)
+#define HAVE_arm_qsub_insn (TARGET_DSP_MULTIPLY && !ARM_Q_BIT_READ)
+#define HAVE_arm_qsub_setq_insn (TARGET_DSP_MULTIPLY && ARM_Q_BIT_READ)
+#define HAVE_arm_get_apsr (TARGET_ARM_QBIT)
+#define HAVE_arm_set_apsr (TARGET_ARM_QBIT)
+#define HAVE_satsi_smin (TARGET_32BIT && arm_arch6 && !ARM_Q_BIT_READ \
+ && arm_sat_operator_match (operands[1], operands[2], NULL, NULL))
+#define HAVE_satsi_smin_setq (TARGET_32BIT && arm_arch6 && ARM_Q_BIT_READ \
+ && arm_sat_operator_match (operands[1], operands[2], NULL, NULL))
+#define HAVE_satsi_smax (TARGET_32BIT && arm_arch6 && !ARM_Q_BIT_READ \
+ && arm_sat_operator_match (operands[2], operands[1], NULL, NULL))
+#define HAVE_satsi_smax_setq (TARGET_32BIT && arm_arch6 && ARM_Q_BIT_READ \
+ && arm_sat_operator_match (operands[2], operands[1], NULL, NULL))
+#define HAVE_arm_cx1si (TARGET_CDE)
+#define HAVE_arm_cx1di (TARGET_CDE)
+#define HAVE_arm_cx1asi (TARGET_CDE)
+#define HAVE_arm_cx1adi (TARGET_CDE)
+#define HAVE_arm_cx2si (TARGET_CDE)
+#define HAVE_arm_cx2di (TARGET_CDE)
+#define HAVE_arm_cx2asi (TARGET_CDE)
+#define HAVE_arm_cx2adi (TARGET_CDE)
+#define HAVE_arm_cx3si (TARGET_CDE)
+#define HAVE_arm_cx3di (TARGET_CDE)
+#define HAVE_arm_cx3asi (TARGET_CDE)
+#define HAVE_arm_cx3adi (TARGET_CDE)
+#define HAVE_unaligned_loaddi (TARGET_32BIT && TARGET_LDRD)
+#define HAVE_unaligned_loadsi (unaligned_access)
+#define HAVE_unaligned_loadhis (unaligned_access && TARGET_32BIT)
+#define HAVE_unaligned_loadhiu (unaligned_access)
+#define HAVE_unaligned_storedi (TARGET_32BIT && TARGET_LDRD)
+#define HAVE_unaligned_storesi (unaligned_access)
+#define HAVE_unaligned_storehi (unaligned_access)
+#define HAVE_extzv_t2 (arm_arch_thumb2 \
+ && IN_RANGE (INTVAL (operands[3]), 0, 31) \
+ && IN_RANGE (INTVAL (operands[2]), 1, 32 - INTVAL (operands[3])))
+#define HAVE_divsi3 (TARGET_IDIV)
+#define HAVE_udivsi3 (TARGET_IDIV)
+#define HAVE_negsi2_0compare (TARGET_32BIT)
+#define HAVE_negsi2_carryin (TARGET_32BIT)
+#define HAVE_arm_sxtb16 (TARGET_INT_SIMD)
+#define HAVE_arm_uxtb16 (TARGET_INT_SIMD)
+#define HAVE_arm_qadd8 (TARGET_INT_SIMD)
+#define HAVE_arm_qsub8 (TARGET_INT_SIMD)
+#define HAVE_arm_shadd8 (TARGET_INT_SIMD)
+#define HAVE_arm_shsub8 (TARGET_INT_SIMD)
+#define HAVE_arm_uhadd8 (TARGET_INT_SIMD)
+#define HAVE_arm_uhsub8 (TARGET_INT_SIMD)
+#define HAVE_arm_uqadd8 (TARGET_INT_SIMD)
+#define HAVE_arm_uqsub8 (TARGET_INT_SIMD)
+#define HAVE_arm_qadd16 (TARGET_INT_SIMD)
+#define HAVE_arm_qasx (TARGET_INT_SIMD)
+#define HAVE_arm_qsax (TARGET_INT_SIMD)
+#define HAVE_arm_qsub16 (TARGET_INT_SIMD)
+#define HAVE_arm_shadd16 (TARGET_INT_SIMD)
+#define HAVE_arm_shasx (TARGET_INT_SIMD)
+#define HAVE_arm_shsax (TARGET_INT_SIMD)
+#define HAVE_arm_shsub16 (TARGET_INT_SIMD)
+#define HAVE_arm_uhadd16 (TARGET_INT_SIMD)
+#define HAVE_arm_uhasx (TARGET_INT_SIMD)
+#define HAVE_arm_uhsax (TARGET_INT_SIMD)
+#define HAVE_arm_uhsub16 (TARGET_INT_SIMD)
+#define HAVE_arm_uqadd16 (TARGET_INT_SIMD)
+#define HAVE_arm_uqasx (TARGET_INT_SIMD)
+#define HAVE_arm_uqsax (TARGET_INT_SIMD)
+#define HAVE_arm_uqsub16 (TARGET_INT_SIMD)
+#define HAVE_arm_smusd (TARGET_INT_SIMD)
+#define HAVE_arm_smusdx (TARGET_INT_SIMD)
+#define HAVE_arm_sxtab16 (TARGET_INT_SIMD)
+#define HAVE_arm_uxtab16 (TARGET_INT_SIMD)
+#define HAVE_arm_usad8 (TARGET_INT_SIMD)
+#define HAVE_arm_usada8 (TARGET_INT_SIMD)
+#define HAVE_arm_smlald (TARGET_INT_SIMD)
+#define HAVE_arm_smlaldx (TARGET_INT_SIMD)
+#define HAVE_arm_smlsld (TARGET_INT_SIMD)
+#define HAVE_arm_smlsldx (TARGET_INT_SIMD)
+#define HAVE_arm_sadd8 (TARGET_INT_SIMD)
+#define HAVE_arm_ssub8 (TARGET_INT_SIMD)
+#define HAVE_arm_uadd8 (TARGET_INT_SIMD)
+#define HAVE_arm_usub8 (TARGET_INT_SIMD)
+#define HAVE_arm_sadd16 (TARGET_INT_SIMD)
+#define HAVE_arm_sasx (TARGET_INT_SIMD)
+#define HAVE_arm_ssax (TARGET_INT_SIMD)
+#define HAVE_arm_ssub16 (TARGET_INT_SIMD)
+#define HAVE_arm_uadd16 (TARGET_INT_SIMD)
+#define HAVE_arm_uasx (TARGET_INT_SIMD)
+#define HAVE_arm_usax (TARGET_INT_SIMD)
+#define HAVE_arm_usub16 (TARGET_INT_SIMD)
+#define HAVE_arm_smlad_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlad_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_smladx_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_smladx_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_smlsd_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlsd_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_smlsdx_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_smlsdx_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_smuad_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_smuad_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_smuadx_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_smuadx_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_ssat16_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_ssat16_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_usat16_insn (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_arm_usat16_setq_insn (TARGET_INT_SIMD && ARM_Q_BIT_READ)
+#define HAVE_arm_sel (TARGET_INT_SIMD)
+#define HAVE_pic_load_addr_unified (flag_pic)
+#define HAVE_pic_load_addr_32bit (TARGET_32BIT && flag_pic)
+#define HAVE_pic_load_addr_thumb1 (TARGET_THUMB1 && flag_pic)
+#define HAVE_pic_add_dot_plus_four (TARGET_THUMB)
+#define HAVE_pic_add_dot_plus_eight (TARGET_ARM)
+#define HAVE_tls_load_dot_plus_eight (TARGET_ARM)
+#define HAVE_arm_cond_branch (TARGET_32BIT)
+#define HAVE_restore_pic_register_after_call 1
+#define HAVE_blockage 1
+#define HAVE_probe_stack (TARGET_32BIT)
+#define HAVE_probe_stack_range (TARGET_32BIT)
+#define HAVE_arm_stack_protect_test_insn (TARGET_32BIT)
+#define HAVE_stack_protect_set_tls 1
+#define HAVE_stack_protect_test_tls 1
+#define HAVE_nop 1
+#define HAVE_trap 1
+#define HAVE_movcond_addsi (TARGET_32BIT)
+#define HAVE_movcond (TARGET_ARM)
+#define HAVE_stack_tie 1
+#define HAVE_align_4 1
+#define HAVE_align_8 1
+#define HAVE_consttable_end 1
+#define HAVE_consttable_1 1
+#define HAVE_consttable_2 1
+#define HAVE_consttable_4 1
+#define HAVE_consttable_8 1
+#define HAVE_consttable_16 1
+#define HAVE_clzsi2 (TARGET_32BIT && arm_arch5t)
+#define HAVE_rbitsi2 (TARGET_32BIT && arm_arch_thumb2)
+#define HAVE_ctzsi2 (TARGET_32BIT && arm_arch_thumb2)
+#define HAVE_prefetch (TARGET_32BIT && arm_arch5te)
+#define HAVE_force_register_use 1
+#define HAVE_arm_eh_return (TARGET_ARM)
+#define HAVE_load_tp_hard (TARGET_HARD_TP)
+#define HAVE_reload_tp_hard (TARGET_HARD_TP)
+#define HAVE_load_tp_soft_fdpic (TARGET_SOFT_TP && TARGET_FDPIC)
+#define HAVE_load_tp_soft (TARGET_SOFT_TP && !TARGET_FDPIC)
+#define HAVE_tlscall (TARGET_GNU2_TLS)
+#define HAVE_arm_rev16si2 (arm_arch6 \
+ && aarch_rev16_shleft_mask_imm_p (operands[3], SImode) \
+ && aarch_rev16_shright_mask_imm_p (operands[2], SImode))
+#define HAVE_arm_rev16si2_alt (arm_arch6 \
+ && aarch_rev16_shleft_mask_imm_p (operands[3], SImode) \
+ && aarch_rev16_shright_mask_imm_p (operands[2], SImode))
+#define HAVE_arm_crc32b (TARGET_CRC32)
+#define HAVE_arm_crc32h (TARGET_CRC32)
+#define HAVE_arm_crc32w (TARGET_CRC32)
+#define HAVE_arm_crc32cb (TARGET_CRC32)
+#define HAVE_arm_crc32ch (TARGET_CRC32)
+#define HAVE_arm_crc32cw (TARGET_CRC32)
+#define HAVE_arm_cdp (arm_coproc_builtin_available (VUNSPEC_CDP))
+#define HAVE_arm_cdp2 (arm_coproc_builtin_available (VUNSPEC_CDP2))
+#define HAVE_arm_mcr (arm_coproc_builtin_available (VUNSPEC_MCR))
+#define HAVE_arm_mcr2 (arm_coproc_builtin_available (VUNSPEC_MCR2))
+#define HAVE_arm_mrc (arm_coproc_builtin_available (VUNSPEC_MRC))
+#define HAVE_arm_mrc2 (arm_coproc_builtin_available (VUNSPEC_MRC2))
+#define HAVE_arm_mcrr (arm_coproc_builtin_available (VUNSPEC_MCRR))
+#define HAVE_arm_mcrr2 (arm_coproc_builtin_available (VUNSPEC_MCRR2))
+#define HAVE_arm_mrrc (arm_coproc_builtin_available (VUNSPEC_MRRC))
+#define HAVE_arm_mrrc2 (arm_coproc_builtin_available (VUNSPEC_MRRC2))
+#define HAVE_pac_nop (arm_arch8m_main)
+#define HAVE_pacbti_nop (arm_arch8m_main)
+#define HAVE_aut_nop (arm_arch8m_main)
+#define HAVE_bti_nop (arm_arch8m_main)
+#define HAVE_mve_vshlq_sv8qi (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_uv8qi (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_sv16qi (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_uv16qi (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_sv4hi (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_uv4hi (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_sv8hi (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_uv8hi (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_sv2si (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_uv2si (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_sv4si (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_mve_vshlq_uv4si (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_tbcstv8qi (TARGET_REALLY_IWMMXT)
+#define HAVE_tbcstv4hi (TARGET_REALLY_IWMMXT)
+#define HAVE_tbcstv2si (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_iordi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_xordi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_anddi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_nanddi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_movv2si_internal (TARGET_REALLY_IWMMXT)
+#define HAVE_movv4hi_internal (TARGET_REALLY_IWMMXT)
+#define HAVE_movv8qi_internal (TARGET_REALLY_IWMMXT)
+#define HAVE_ssaddv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_ssaddv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_ssaddv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_usaddv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_usaddv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_usaddv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_sssubv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_sssubv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_sssubv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_ussubv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_ussubv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_ussubv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_smulv4hi3_highpart (TARGET_REALLY_IWMMXT)
+#define HAVE_umulv4hi3_highpart (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmacs (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmacsz (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmacu (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmacuz (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_clrdi (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_clrv8qi (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_clrv4hi (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_clrv2si (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_uavgrndv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_uavgrndv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_uavgv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_uavgv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tinsrb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tinsrh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tinsrw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrmub (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrmsb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrmuh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrmsh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrmw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wshufh (TARGET_REALLY_IWMMXT)
+#define HAVE_eqv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_eqv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_eqv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_gtuv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_gtuv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_gtuv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_gtv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_gtv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_gtv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wpackhss (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wpackwss (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wpackdss (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wpackhus (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wpackwus (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wpackdus (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckihb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckihh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckihw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckilb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckilh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckilw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckehub (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckehuh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckehuw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckehsb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckehsh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckehsw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckelub (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckeluh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckeluw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckelsb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckelsh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wunpckelsw (TARGET_REALLY_IWMMXT)
+#define HAVE_rorv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_rorv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_rordi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_ashrv4hi3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_ashrv2si3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_ashrdi3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_lshrv4hi3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_lshrv2si3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_lshrdi3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_ashlv4hi3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_ashlv2si3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_ashldi3_iwmmxt (TARGET_REALLY_IWMMXT)
+#define HAVE_rorv4hi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_rorv2si3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_rordi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_ashrv4hi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_ashrv2si3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_ashrdi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_lshrv4hi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_lshrv2si3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_lshrdi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_ashlv4hi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_ashlv2si3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_ashldi3_di (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmadds (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmaddu (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmia (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmiaph (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmiabb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmiatb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmiabt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmiatt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmovmskb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmovmskh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tmovmskw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_waccb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wacch (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_waccw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_waligni (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_walignr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_walignr0 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_walignr1 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_walignr2 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_walignr3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wsadb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wsadh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wsadbz (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wsadhz (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wabsv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wabsv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wabsv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wabsdiffb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wabsdiffh (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wabsdiffw (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_waddsubhx (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wsubaddhx (TARGET_REALLY_IWMMXT)
+#define HAVE_addcv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_addcv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_avg4 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_avg4r (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmaddsx (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmaddux (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmaddsn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmaddun (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulwsm (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulwum (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulsmr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulumr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulwsmr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulwumr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmulwl (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmulm (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmulwm (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmulmr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmulwmr (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_waddbhusm (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_waddbhusl (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiabb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiabt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiatb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiatt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiabbn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiabtn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiatbn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wqmiattn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiabb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiabt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiatb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiatt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiabbn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiabtn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiatbn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiattn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawbb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawbt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawtb (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawtt (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawbbn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawbtn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawtbn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmiawttn (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_wmerge (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tandcv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tandcv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_tandcv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_torcv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_torcv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_torcv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_torvscv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_torvscv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_torvscv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrcv2si3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrcv4hi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_textrcv8qi3 (TARGET_REALLY_IWMMXT)
+#define HAVE_abshf2 (TARGET_VFP_FP16INST)
+#define HAVE_neghf2 (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndahf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndmhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndnhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndphf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndxhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrndihf (TARGET_VFP_FP16INST)
+#define HAVE_addhf3 (TARGET_VFP_FP16INST)
+#define HAVE_subhf3 (TARGET_VFP_FP16INST)
+#define HAVE_divhf3 (TARGET_VFP_FP16INST)
+#define HAVE_mulhf3 (TARGET_VFP_FP16INST)
+#define HAVE_fmahf4 (TARGET_VFP_FP16INST)
+#define HAVE_fmasf4 (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA )
+#define HAVE_fmadf4 ((TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_fmsubhf4_fp16 (TARGET_VFP_FP16INST)
+#define HAVE_extendhfsf2 (TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FP16 || TARGET_VFP_FP16INST))
+#define HAVE_truncsfhf2 (TARGET_32BIT && TARGET_HARD_FLOAT && (TARGET_FP16 || TARGET_VFP_FP16INST))
+#define HAVE_fixuns_truncsfsi2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_fixuns_truncdfsi2 (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE)
+#define HAVE_floatunssisf2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_floatunssidf2 (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE)
+#define HAVE_neon_vsqrthf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vrsqrtshf (TARGET_VFP_FP16INST)
+#define HAVE_push_fpsysreg_insn (TARGET_HAVE_FPCXT_CMSE && use_cmse)
+#define HAVE_pop_fpsysreg_insn (TARGET_HAVE_FPCXT_CMSE && use_cmse)
+#define HAVE_lazy_store_multiple_insn (use_cmse && reload_completed)
+#define HAVE_lazy_load_multiple_insn (use_cmse && reload_completed)
+#define HAVE_neon_vcvthshf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthuhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthssi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthusi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvths_nhf_unspec (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthu_nhf_unspec (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvths_nsi_unspec (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthu_nsi_unspec (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtahssi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtahusi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtmhssi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtmhusi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtnhssi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtnhusi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtphssi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvtphusi (TARGET_VFP_FP16INST)
+#define HAVE_btruncsf2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_ceilsf2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_floorsf2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_nearbyintsf2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_rintsf2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_roundsf2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_btruncdf2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_ceildf2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_floordf2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_nearbyintdf2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_rintdf2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_rounddf2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_lceilsfsi2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_lfloorsfsi2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_lroundsfsi2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_lceilusfsi2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_lfloorusfsi2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_lroundusfsi2 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_lceildfsi2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_lfloordfsi2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_lrounddfsi2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_lceiludfsi2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_lfloorudfsi2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_lroundudfsi2 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_smaxsf3 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_smaxdf3 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_sminsf3 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_smindf3 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_neon_vmaxnmhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vminnmhf (TARGET_VFP_FP16INST)
+#define HAVE_fmaxsf3 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_fminsf3 (TARGET_HARD_FLOAT && TARGET_VFP5 )
+#define HAVE_fmaxdf3 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_fmindf3 ((TARGET_HARD_FLOAT && TARGET_VFP5 && TARGET_VFP_DOUBLE) && (TARGET_VFP_DOUBLE))
+#define HAVE_set_fpscr (TARGET_VFP_BASE)
+#define HAVE_get_fpscr (TARGET_VFP_BASE)
+#define HAVE_no_literal_pool_df_immediate (arm_disable_literal_pool \
+ && TARGET_VFP_BASE \
+ && !arm_const_double_rtx (operands[1]) \
+ && !(TARGET_VFP_DOUBLE && vfp3_const_double_rtx (operands[1])))
+#define HAVE_no_literal_pool_sf_immediate (arm_disable_literal_pool \
+ && TARGET_VFP_BASE \
+ && !vfp3_const_double_rtx (operands[1]))
+#define HAVE_arm_vcx1si (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx1di (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx1asi (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx1adi (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx2si (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx2di (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx2asi (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx2adi (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx3si (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx3di (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx3asi (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_arm_vcx3adi (TARGET_CDE && (TARGET_ARM_FP || TARGET_HAVE_MVE))
+#define HAVE_thumb1_subsi3_insn (TARGET_THUMB1)
+#define HAVE_thumb1_bicsi3 (TARGET_THUMB1)
+#define HAVE_thumb1_extendhisi2 (TARGET_THUMB1)
+#define HAVE_thumb1_extendqisi2 (TARGET_THUMB1)
+#define HAVE_cpymem12b (TARGET_THUMB1)
+#define HAVE_cpymem8b (TARGET_THUMB1)
+#define HAVE_thumb1_cbz (TARGET_THUMB1 && TARGET_HAVE_CBZ)
+#define HAVE_cbranchsi4_insn (TARGET_THUMB1)
+#define HAVE_cbranchsi4_scratch (TARGET_THUMB1)
+#define HAVE_cstoresi_nltu_thumb1 (TARGET_THUMB1)
+#define HAVE_cstoresi_ltu_thumb1 (TARGET_THUMB1)
+#define HAVE_thumb1_addsi3_addgeu (TARGET_THUMB1)
+#define HAVE_thumb1_casesi_dispatch (TARGET_THUMB1)
+#define HAVE_prologue_thumb1_interwork (TARGET_THUMB1)
+#define HAVE_thumb_eh_return (TARGET_THUMB1)
+#define HAVE_thumb1_stack_protect_test_insn (TARGET_THUMB1)
+#define HAVE_tls_load_dot_plus_four (TARGET_THUMB2)
+#define HAVE_thumb2_zero_extendqisi2_v6 (TARGET_THUMB2 && arm_arch6)
+#define HAVE_thumb2_eh_return (TARGET_THUMB2)
+#define HAVE_thumb2_addsi3_compare0 (TARGET_THUMB2)
+#define HAVE_thumb2_asrl (TARGET_HAVE_MVE)
+#define HAVE_thumb2_lsll (TARGET_HAVE_MVE)
+#define HAVE_thumb2_lsrl (TARGET_HAVE_MVE)
+#define HAVE_dls_insn (TARGET_32BIT && TARGET_HAVE_LOB)
+#define HAVE_unaligned_storev8qi (TARGET_NEON)
+#define HAVE_vec_setv8qi_internal (TARGET_NEON)
+#define HAVE_vec_setv4hi_internal (TARGET_NEON)
+#define HAVE_vec_setv4hf_internal (TARGET_NEON)
+#define HAVE_vec_setv4bf_internal (TARGET_NEON)
+#define HAVE_vec_setv2si_internal (TARGET_NEON)
+#define HAVE_vec_setv2sf_internal (TARGET_NEON)
+#define HAVE_vec_setv16qi_internal (TARGET_NEON)
+#define HAVE_vec_setv8hi_internal (TARGET_NEON)
+#define HAVE_vec_setv8hf_internal (TARGET_NEON)
+#define HAVE_vec_setv4si_internal (TARGET_NEON)
+#define HAVE_vec_setv4sf_internal (TARGET_NEON)
+#define HAVE_vec_setv2di_internal (TARGET_NEON)
+#define HAVE_vec_extractv8qiqi (TARGET_NEON)
+#define HAVE_vec_extractv4hihi (TARGET_NEON)
+#define HAVE_vec_extractv4hfhf (TARGET_NEON)
+#define HAVE_vec_extractv4bfbf (TARGET_NEON)
+#define HAVE_vec_extractv2sisi (TARGET_NEON)
+#define HAVE_vec_extractv2sfsf (TARGET_NEON)
+#define HAVE_neon_vec_extractv16qiqi (TARGET_NEON)
+#define HAVE_neon_vec_extractv8hihi (TARGET_NEON)
+#define HAVE_neon_vec_extractv8hfhf (TARGET_NEON)
+#define HAVE_neon_vec_extractv4sisi (TARGET_NEON)
+#define HAVE_neon_vec_extractv4sfsf (TARGET_NEON)
+#define HAVE_neon_vec_extractv2didi (TARGET_NEON)
+#define HAVE_mulv8qi3addv8qi_neon (ARM_HAVE_NEON_V8QI_ARITH)
+#define HAVE_mulv16qi3addv16qi_neon (ARM_HAVE_NEON_V16QI_ARITH)
+#define HAVE_mulv4hi3addv4hi_neon (ARM_HAVE_NEON_V4HI_ARITH)
+#define HAVE_mulv8hi3addv8hi_neon (ARM_HAVE_NEON_V8HI_ARITH)
+#define HAVE_mulv2si3addv2si_neon (ARM_HAVE_NEON_V2SI_ARITH)
+#define HAVE_mulv4si3addv4si_neon (ARM_HAVE_NEON_V4SI_ARITH)
+#define HAVE_mulv2sf3addv2sf_neon (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_mulv4sf3addv4sf_neon (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_mulv8hf3addv8hf_neon (ARM_HAVE_NEON_V8HF_ARITH)
+#define HAVE_mulv4hf3addv4hf_neon (ARM_HAVE_NEON_V4HF_ARITH)
+#define HAVE_mulv8qi3negv8qiaddv8qi_neon (ARM_HAVE_NEON_V8QI_ARITH)
+#define HAVE_mulv16qi3negv16qiaddv16qi_neon (ARM_HAVE_NEON_V16QI_ARITH)
+#define HAVE_mulv4hi3negv4hiaddv4hi_neon (ARM_HAVE_NEON_V4HI_ARITH)
+#define HAVE_mulv8hi3negv8hiaddv8hi_neon (ARM_HAVE_NEON_V8HI_ARITH)
+#define HAVE_mulv2si3negv2siaddv2si_neon (ARM_HAVE_NEON_V2SI_ARITH)
+#define HAVE_mulv4si3negv4siaddv4si_neon (ARM_HAVE_NEON_V4SI_ARITH)
+#define HAVE_mulv2sf3negv2sfaddv2sf_neon (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_mulv4sf3negv4sfaddv4sf_neon (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_fmav2sf4 (ARM_HAVE_NEON_V2SF_ARITH && TARGET_FMA)
+#define HAVE_fmav4sf4 (ARM_HAVE_NEON_V4SF_ARITH && TARGET_FMA)
+#define HAVE_fmav2sf4_intrinsic (TARGET_NEON && TARGET_FMA)
+#define HAVE_fmav4sf4_intrinsic (TARGET_NEON && TARGET_FMA)
+#define HAVE_fmav8hf4 (ARM_HAVE_NEON_V8HF_ARITH)
+#define HAVE_fmav4hf4 (ARM_HAVE_NEON_V4HF_ARITH)
+#define HAVE_fmsubv2sf4_intrinsic (TARGET_NEON && TARGET_FMA)
+#define HAVE_fmsubv4sf4_intrinsic (TARGET_NEON && TARGET_FMA)
+#define HAVE_fmsubv8hf4_intrinsic (TARGET_NEON_FP16INST)
+#define HAVE_fmsubv4hf4_intrinsic (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrintpv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintzv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintmv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintxv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintav2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintnv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintpv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintzv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintmv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintxv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintav4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vrintnv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtpv2sfv2si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtmv2sfv2si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtav2sfv2si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtpuv2sfv2si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtmuv2sfv2si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtauv2sfv2si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtpv4sfv4si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtmv4sfv4si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtav4sfv4si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtpuv4sfv4si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtmuv4sfv4si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vcvtauv4sfv4si (TARGET_NEON && TARGET_VFP5)
+#define HAVE_iorv8qi3_neon (TARGET_NEON)
+#define HAVE_iorv16qi3_neon (TARGET_NEON)
+#define HAVE_iorv4hi3_neon (TARGET_NEON)
+#define HAVE_iorv8hi3_neon (TARGET_NEON)
+#define HAVE_iorv2si3_neon (TARGET_NEON)
+#define HAVE_iorv4si3_neon (TARGET_NEON)
+#define HAVE_iorv4hf3_neon (TARGET_NEON)
+#define HAVE_iorv8hf3_neon (TARGET_NEON)
+#define HAVE_iorv2sf3_neon (TARGET_NEON)
+#define HAVE_iorv4sf3_neon (TARGET_NEON)
+#define HAVE_iorv2di3_neon (TARGET_NEON)
+#define HAVE_andv8qi3_neon (TARGET_NEON)
+#define HAVE_andv16qi3_neon (TARGET_NEON)
+#define HAVE_andv4hi3_neon (TARGET_NEON)
+#define HAVE_andv8hi3_neon (TARGET_NEON)
+#define HAVE_andv2si3_neon (TARGET_NEON)
+#define HAVE_andv4si3_neon (TARGET_NEON)
+#define HAVE_andv4hf3_neon (TARGET_NEON)
+#define HAVE_andv8hf3_neon (TARGET_NEON)
+#define HAVE_andv2sf3_neon (TARGET_NEON)
+#define HAVE_andv4sf3_neon (TARGET_NEON)
+#define HAVE_andv2di3_neon (TARGET_NEON)
+#define HAVE_ornv8qi3_neon (TARGET_NEON)
+#define HAVE_ornv16qi3_neon (TARGET_NEON)
+#define HAVE_ornv4hi3_neon (TARGET_NEON)
+#define HAVE_ornv8hi3_neon (TARGET_NEON)
+#define HAVE_ornv2si3_neon (TARGET_NEON)
+#define HAVE_ornv4si3_neon (TARGET_NEON)
+#define HAVE_ornv4hf3_neon (TARGET_NEON)
+#define HAVE_ornv8hf3_neon (TARGET_NEON)
+#define HAVE_ornv2sf3_neon (TARGET_NEON)
+#define HAVE_ornv4sf3_neon (TARGET_NEON)
+#define HAVE_ornv2di3_neon (TARGET_NEON)
+#define HAVE_bicv8qi3_neon (TARGET_NEON)
+#define HAVE_bicv16qi3_neon (TARGET_NEON)
+#define HAVE_bicv4hi3_neon (TARGET_NEON)
+#define HAVE_bicv8hi3_neon (TARGET_NEON)
+#define HAVE_bicv2si3_neon (TARGET_NEON)
+#define HAVE_bicv4si3_neon (TARGET_NEON)
+#define HAVE_bicv4hf3_neon (TARGET_NEON)
+#define HAVE_bicv8hf3_neon (TARGET_NEON)
+#define HAVE_bicv2sf3_neon (TARGET_NEON)
+#define HAVE_bicv4sf3_neon (TARGET_NEON)
+#define HAVE_bicv2di3_neon (TARGET_NEON)
+#define HAVE_xorv8qi3_neon (TARGET_NEON)
+#define HAVE_xorv16qi3_neon (TARGET_NEON)
+#define HAVE_xorv4hi3_neon (TARGET_NEON)
+#define HAVE_xorv8hi3_neon (TARGET_NEON)
+#define HAVE_xorv2si3_neon (TARGET_NEON)
+#define HAVE_xorv4si3_neon (TARGET_NEON)
+#define HAVE_xorv4hf3_neon (TARGET_NEON)
+#define HAVE_xorv8hf3_neon (TARGET_NEON)
+#define HAVE_xorv2sf3_neon (TARGET_NEON)
+#define HAVE_xorv4sf3_neon (TARGET_NEON)
+#define HAVE_xorv2di3_neon (TARGET_NEON)
+#define HAVE_one_cmplv8qi2_neon (TARGET_NEON)
+#define HAVE_one_cmplv16qi2_neon (TARGET_NEON)
+#define HAVE_one_cmplv4hi2_neon (TARGET_NEON)
+#define HAVE_one_cmplv8hi2_neon (TARGET_NEON)
+#define HAVE_one_cmplv2si2_neon (TARGET_NEON)
+#define HAVE_one_cmplv4si2_neon (TARGET_NEON)
+#define HAVE_one_cmplv4hf2_neon (TARGET_NEON)
+#define HAVE_one_cmplv8hf2_neon (TARGET_NEON)
+#define HAVE_one_cmplv2sf2_neon (TARGET_NEON)
+#define HAVE_one_cmplv4sf2_neon (TARGET_NEON)
+#define HAVE_one_cmplv2di2_neon (TARGET_NEON)
+#define HAVE_neon_absv8qi2 (TARGET_NEON)
+#define HAVE_neon_absv16qi2 (TARGET_NEON)
+#define HAVE_neon_absv4hi2 (TARGET_NEON)
+#define HAVE_neon_absv8hi2 (TARGET_NEON)
+#define HAVE_neon_absv2si2 (TARGET_NEON)
+#define HAVE_neon_absv4si2 (TARGET_NEON)
+#define HAVE_neon_absv2sf2 (TARGET_NEON)
+#define HAVE_neon_absv4sf2 (TARGET_NEON)
+#define HAVE_neon_negv8qi2 (TARGET_NEON)
+#define HAVE_neon_negv16qi2 (TARGET_NEON)
+#define HAVE_neon_negv4hi2 (TARGET_NEON)
+#define HAVE_neon_negv8hi2 (TARGET_NEON)
+#define HAVE_neon_negv2si2 (TARGET_NEON)
+#define HAVE_neon_negv4si2 (TARGET_NEON)
+#define HAVE_neon_negv2sf2 (TARGET_NEON)
+#define HAVE_neon_negv4sf2 (TARGET_NEON)
+#define HAVE_neon_absv8hf2 (TARGET_NEON_FP16INST)
+#define HAVE_neon_negv8hf2 (TARGET_NEON_FP16INST)
+#define HAVE_neon_absv4hf2 (TARGET_NEON_FP16INST)
+#define HAVE_neon_negv4hf2 (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndav8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndmv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndnv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndpv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndxv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndav4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndmv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndnv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndpv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrndxv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrsqrtev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrsqrtev4hf (TARGET_NEON_FP16INST)
+#define HAVE_vashrv8qi3_imm (TARGET_NEON)
+#define HAVE_vashrv16qi3_imm (TARGET_NEON)
+#define HAVE_vashrv4hi3_imm (TARGET_NEON)
+#define HAVE_vashrv8hi3_imm (TARGET_NEON)
+#define HAVE_vashrv2si3_imm (TARGET_NEON)
+#define HAVE_vashrv4si3_imm (TARGET_NEON)
+#define HAVE_vlshrv8qi3_imm (TARGET_NEON)
+#define HAVE_vlshrv16qi3_imm (TARGET_NEON)
+#define HAVE_vlshrv4hi3_imm (TARGET_NEON)
+#define HAVE_vlshrv8hi3_imm (TARGET_NEON)
+#define HAVE_vlshrv2si3_imm (TARGET_NEON)
+#define HAVE_vlshrv4si3_imm (TARGET_NEON)
+#define HAVE_ashlv8qi3_signed (TARGET_NEON)
+#define HAVE_ashlv16qi3_signed (TARGET_NEON)
+#define HAVE_ashlv4hi3_signed (TARGET_NEON)
+#define HAVE_ashlv8hi3_signed (TARGET_NEON)
+#define HAVE_ashlv2si3_signed (TARGET_NEON)
+#define HAVE_ashlv4si3_signed (TARGET_NEON)
+#define HAVE_ashlv2di3_signed (TARGET_NEON)
+#define HAVE_ashlv8qi3_unsigned (TARGET_NEON)
+#define HAVE_ashlv16qi3_unsigned (TARGET_NEON)
+#define HAVE_ashlv4hi3_unsigned (TARGET_NEON)
+#define HAVE_ashlv8hi3_unsigned (TARGET_NEON)
+#define HAVE_ashlv2si3_unsigned (TARGET_NEON)
+#define HAVE_ashlv4si3_unsigned (TARGET_NEON)
+#define HAVE_ashlv2di3_unsigned (TARGET_NEON)
+#define HAVE_neon_load_count (TARGET_NEON)
+#define HAVE_vec_sel_widen_ssum_lov16qiv8qi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_ssum_lov8hiv4hi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_ssum_lov4siv2si3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_ssum_hiv16qiv8qi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_ssum_hiv8hiv4hi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_ssum_hiv4siv2si3 (TARGET_NEON)
+#define HAVE_widen_ssumv8qi3 (TARGET_NEON)
+#define HAVE_widen_ssumv4hi3 (TARGET_NEON)
+#define HAVE_widen_ssumv2si3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_usum_lov16qiv8qi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_usum_lov8hiv4hi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_usum_lov4siv2si3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_usum_hiv16qiv8qi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_usum_hiv8hiv4hi3 (TARGET_NEON)
+#define HAVE_vec_sel_widen_usum_hiv4siv2si3 (TARGET_NEON)
+#define HAVE_widen_usumv8qi3 (TARGET_NEON)
+#define HAVE_widen_usumv4hi3 (TARGET_NEON)
+#define HAVE_widen_usumv2si3 (TARGET_NEON)
+#define HAVE_quad_halves_plusv4si (TARGET_NEON)
+#define HAVE_quad_halves_sminv4si (TARGET_NEON)
+#define HAVE_quad_halves_smaxv4si (TARGET_NEON)
+#define HAVE_quad_halves_uminv4si (TARGET_NEON)
+#define HAVE_quad_halves_umaxv4si (TARGET_NEON)
+#define HAVE_quad_halves_plusv4sf (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_quad_halves_sminv4sf (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_quad_halves_smaxv4sf (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_quad_halves_plusv8hi (TARGET_NEON)
+#define HAVE_quad_halves_sminv8hi (TARGET_NEON)
+#define HAVE_quad_halves_smaxv8hi (TARGET_NEON)
+#define HAVE_quad_halves_uminv8hi (TARGET_NEON)
+#define HAVE_quad_halves_umaxv8hi (TARGET_NEON)
+#define HAVE_quad_halves_plusv16qi (TARGET_NEON)
+#define HAVE_quad_halves_sminv16qi (TARGET_NEON)
+#define HAVE_quad_halves_smaxv16qi (TARGET_NEON)
+#define HAVE_quad_halves_uminv16qi (TARGET_NEON)
+#define HAVE_quad_halves_umaxv16qi (TARGET_NEON)
+#define HAVE_arm_reduc_plus_internal_v2di (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vpadd_internalv8qi (TARGET_NEON)
+#define HAVE_neon_vpadd_internalv4hi (TARGET_NEON)
+#define HAVE_neon_vpadd_internalv2si (TARGET_NEON)
+#define HAVE_neon_vpadd_internalv2sf (TARGET_NEON)
+#define HAVE_neon_vpaddv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vpsminv8qi (TARGET_NEON)
+#define HAVE_neon_vpsminv4hi (TARGET_NEON)
+#define HAVE_neon_vpsminv2si (TARGET_NEON)
+#define HAVE_neon_vpsminv2sf (TARGET_NEON)
+#define HAVE_neon_vpsmaxv8qi (TARGET_NEON)
+#define HAVE_neon_vpsmaxv4hi (TARGET_NEON)
+#define HAVE_neon_vpsmaxv2si (TARGET_NEON)
+#define HAVE_neon_vpsmaxv2sf (TARGET_NEON)
+#define HAVE_neon_vpuminv8qi (TARGET_NEON)
+#define HAVE_neon_vpuminv4hi (TARGET_NEON)
+#define HAVE_neon_vpuminv2si (TARGET_NEON)
+#define HAVE_neon_vpumaxv8qi (TARGET_NEON)
+#define HAVE_neon_vpumaxv4hi (TARGET_NEON)
+#define HAVE_neon_vpumaxv2si (TARGET_NEON)
+#define HAVE_neon_vaddv2sf_unspec (TARGET_NEON)
+#define HAVE_neon_vaddv4sf_unspec (TARGET_NEON)
+#define HAVE_neon_vaddlsv8qi (TARGET_NEON)
+#define HAVE_neon_vaddluv8qi (TARGET_NEON)
+#define HAVE_neon_vaddlsv4hi (TARGET_NEON)
+#define HAVE_neon_vaddluv4hi (TARGET_NEON)
+#define HAVE_neon_vaddlsv2si (TARGET_NEON)
+#define HAVE_neon_vaddluv2si (TARGET_NEON)
+#define HAVE_neon_vaddwsv8qi (TARGET_NEON)
+#define HAVE_neon_vaddwuv8qi (TARGET_NEON)
+#define HAVE_neon_vaddwsv4hi (TARGET_NEON)
+#define HAVE_neon_vaddwuv4hi (TARGET_NEON)
+#define HAVE_neon_vaddwsv2si (TARGET_NEON)
+#define HAVE_neon_vaddwuv2si (TARGET_NEON)
+#define HAVE_neon_vrhaddsv8qi (TARGET_NEON)
+#define HAVE_neon_vrhadduv8qi (TARGET_NEON)
+#define HAVE_neon_vhaddsv8qi (TARGET_NEON)
+#define HAVE_neon_vhadduv8qi (TARGET_NEON)
+#define HAVE_neon_vrhaddsv16qi (TARGET_NEON)
+#define HAVE_neon_vrhadduv16qi (TARGET_NEON)
+#define HAVE_neon_vhaddsv16qi (TARGET_NEON)
+#define HAVE_neon_vhadduv16qi (TARGET_NEON)
+#define HAVE_neon_vrhaddsv4hi (TARGET_NEON)
+#define HAVE_neon_vrhadduv4hi (TARGET_NEON)
+#define HAVE_neon_vhaddsv4hi (TARGET_NEON)
+#define HAVE_neon_vhadduv4hi (TARGET_NEON)
+#define HAVE_neon_vrhaddsv8hi (TARGET_NEON)
+#define HAVE_neon_vrhadduv8hi (TARGET_NEON)
+#define HAVE_neon_vhaddsv8hi (TARGET_NEON)
+#define HAVE_neon_vhadduv8hi (TARGET_NEON)
+#define HAVE_neon_vrhaddsv2si (TARGET_NEON)
+#define HAVE_neon_vrhadduv2si (TARGET_NEON)
+#define HAVE_neon_vhaddsv2si (TARGET_NEON)
+#define HAVE_neon_vhadduv2si (TARGET_NEON)
+#define HAVE_neon_vrhaddsv4si (TARGET_NEON)
+#define HAVE_neon_vrhadduv4si (TARGET_NEON)
+#define HAVE_neon_vhaddsv4si (TARGET_NEON)
+#define HAVE_neon_vhadduv4si (TARGET_NEON)
+#define HAVE_neon_vqaddsv8qi (TARGET_NEON)
+#define HAVE_neon_vqadduv8qi (TARGET_NEON)
+#define HAVE_neon_vqaddsv16qi (TARGET_NEON)
+#define HAVE_neon_vqadduv16qi (TARGET_NEON)
+#define HAVE_neon_vqaddsv4hi (TARGET_NEON)
+#define HAVE_neon_vqadduv4hi (TARGET_NEON)
+#define HAVE_neon_vqaddsv8hi (TARGET_NEON)
+#define HAVE_neon_vqadduv8hi (TARGET_NEON)
+#define HAVE_neon_vqaddsv2si (TARGET_NEON)
+#define HAVE_neon_vqadduv2si (TARGET_NEON)
+#define HAVE_neon_vqaddsv4si (TARGET_NEON)
+#define HAVE_neon_vqadduv4si (TARGET_NEON)
+#define HAVE_neon_vqaddsdi (TARGET_NEON)
+#define HAVE_neon_vqaddudi (TARGET_NEON)
+#define HAVE_neon_vqaddsv2di (TARGET_NEON)
+#define HAVE_neon_vqadduv2di (TARGET_NEON)
+#define HAVE_neon_vaddhnv8hi (TARGET_NEON)
+#define HAVE_neon_vraddhnv8hi (TARGET_NEON)
+#define HAVE_neon_vaddhnv4si (TARGET_NEON)
+#define HAVE_neon_vraddhnv4si (TARGET_NEON)
+#define HAVE_neon_vaddhnv2di (TARGET_NEON)
+#define HAVE_neon_vraddhnv2di (TARGET_NEON)
+#define HAVE_neon_vmulpv8qi (TARGET_NEON)
+#define HAVE_neon_vmulpv16qi (TARGET_NEON)
+#define HAVE_neon_vmulfv2sf (TARGET_NEON)
+#define HAVE_neon_vmulfv4sf (TARGET_NEON)
+#define HAVE_neon_vmulfv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmulfv4hf (TARGET_NEON_FP16INST)
+#define HAVE_vfmal_lowv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lowv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_highv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_highv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_highv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_highv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lowv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lowv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_lowv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_lowv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_lowv8hfv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_lowv4hfv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_highv8hfv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_highv4hfv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_highv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmal_lane_highv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_lowv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_lowv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_lowv8hfv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_lowv4hfv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_highv8hfv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_highv4hfv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_highv2sf_intrinsic (TARGET_FP16FML)
+#define HAVE_vfmsl_lane_highv4sf_intrinsic (TARGET_FP16FML)
+#define HAVE_neon_vmlav8qi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav16qi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav4hi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav8hi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav2si_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav4si_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav2sf_unspec (TARGET_NEON)
+#define HAVE_neon_vmlav4sf_unspec (TARGET_NEON)
+#define HAVE_neon_vmlalsv8qi (TARGET_NEON)
+#define HAVE_neon_vmlaluv8qi (TARGET_NEON)
+#define HAVE_neon_vmlalsv4hi (TARGET_NEON)
+#define HAVE_neon_vmlaluv4hi (TARGET_NEON)
+#define HAVE_neon_vmlalsv2si (TARGET_NEON)
+#define HAVE_neon_vmlaluv2si (TARGET_NEON)
+#define HAVE_neon_vmlsv8qi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv16qi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv4hi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv8hi_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv2si_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv4si_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv2sf_unspec (TARGET_NEON)
+#define HAVE_neon_vmlsv4sf_unspec (TARGET_NEON)
+#define HAVE_neon_vmlslsv8qi (TARGET_NEON)
+#define HAVE_neon_vmlsluv8qi (TARGET_NEON)
+#define HAVE_neon_vmlslsv4hi (TARGET_NEON)
+#define HAVE_neon_vmlsluv4hi (TARGET_NEON)
+#define HAVE_neon_vmlslsv2si (TARGET_NEON)
+#define HAVE_neon_vmlsluv2si (TARGET_NEON)
+#define HAVE_neon_vqdmulhv4hi (TARGET_NEON)
+#define HAVE_neon_vqrdmulhv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmulhv2si (TARGET_NEON)
+#define HAVE_neon_vqrdmulhv2si (TARGET_NEON)
+#define HAVE_neon_vqdmulhv8hi (TARGET_NEON)
+#define HAVE_neon_vqrdmulhv8hi (TARGET_NEON)
+#define HAVE_neon_vqdmulhv4si (TARGET_NEON)
+#define HAVE_neon_vqrdmulhv4si (TARGET_NEON)
+#define HAVE_neon_vqrdmlahv4hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlshv4hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlahv2si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlshv2si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlahv8hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlshv8hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlahv4si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlshv4si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqdmlalv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmlalv2si (TARGET_NEON)
+#define HAVE_neon_vqdmlslv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmlslv2si (TARGET_NEON)
+#define HAVE_neon_vmullsv8qi (TARGET_NEON)
+#define HAVE_neon_vmulluv8qi (TARGET_NEON)
+#define HAVE_neon_vmullpv8qi (TARGET_NEON)
+#define HAVE_neon_vmullsv4hi (TARGET_NEON)
+#define HAVE_neon_vmulluv4hi (TARGET_NEON)
+#define HAVE_neon_vmullpv4hi (TARGET_NEON)
+#define HAVE_neon_vmullsv2si (TARGET_NEON)
+#define HAVE_neon_vmulluv2si (TARGET_NEON)
+#define HAVE_neon_vmullpv2si (TARGET_NEON)
+#define HAVE_neon_vqdmullv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmullv2si (TARGET_NEON)
+#define HAVE_neon_vsubv2sf_unspec (TARGET_NEON)
+#define HAVE_neon_vsubv4sf_unspec (TARGET_NEON)
+#define HAVE_neon_vsublsv8qi (TARGET_NEON)
+#define HAVE_neon_vsubluv8qi (TARGET_NEON)
+#define HAVE_neon_vsublsv4hi (TARGET_NEON)
+#define HAVE_neon_vsubluv4hi (TARGET_NEON)
+#define HAVE_neon_vsublsv2si (TARGET_NEON)
+#define HAVE_neon_vsubluv2si (TARGET_NEON)
+#define HAVE_neon_vsubwsv8qi (TARGET_NEON)
+#define HAVE_neon_vsubwuv8qi (TARGET_NEON)
+#define HAVE_neon_vsubwsv4hi (TARGET_NEON)
+#define HAVE_neon_vsubwuv4hi (TARGET_NEON)
+#define HAVE_neon_vsubwsv2si (TARGET_NEON)
+#define HAVE_neon_vsubwuv2si (TARGET_NEON)
+#define HAVE_neon_vqsubsv8qi (TARGET_NEON)
+#define HAVE_neon_vqsubuv8qi (TARGET_NEON)
+#define HAVE_neon_vqsubsv16qi (TARGET_NEON)
+#define HAVE_neon_vqsubuv16qi (TARGET_NEON)
+#define HAVE_neon_vqsubsv4hi (TARGET_NEON)
+#define HAVE_neon_vqsubuv4hi (TARGET_NEON)
+#define HAVE_neon_vqsubsv8hi (TARGET_NEON)
+#define HAVE_neon_vqsubuv8hi (TARGET_NEON)
+#define HAVE_neon_vqsubsv2si (TARGET_NEON)
+#define HAVE_neon_vqsubuv2si (TARGET_NEON)
+#define HAVE_neon_vqsubsv4si (TARGET_NEON)
+#define HAVE_neon_vqsubuv4si (TARGET_NEON)
+#define HAVE_neon_vqsubsdi (TARGET_NEON)
+#define HAVE_neon_vqsubudi (TARGET_NEON)
+#define HAVE_neon_vqsubsv2di (TARGET_NEON)
+#define HAVE_neon_vqsubuv2di (TARGET_NEON)
+#define HAVE_neon_vhsubsv8qi (TARGET_NEON)
+#define HAVE_neon_vhsubuv8qi (TARGET_NEON)
+#define HAVE_neon_vhsubsv16qi (TARGET_NEON)
+#define HAVE_neon_vhsubuv16qi (TARGET_NEON)
+#define HAVE_neon_vhsubsv4hi (TARGET_NEON)
+#define HAVE_neon_vhsubuv4hi (TARGET_NEON)
+#define HAVE_neon_vhsubsv8hi (TARGET_NEON)
+#define HAVE_neon_vhsubuv8hi (TARGET_NEON)
+#define HAVE_neon_vhsubsv2si (TARGET_NEON)
+#define HAVE_neon_vhsubuv2si (TARGET_NEON)
+#define HAVE_neon_vhsubsv4si (TARGET_NEON)
+#define HAVE_neon_vhsubuv4si (TARGET_NEON)
+#define HAVE_neon_vsubhnv8hi (TARGET_NEON)
+#define HAVE_neon_vrsubhnv8hi (TARGET_NEON)
+#define HAVE_neon_vsubhnv4si (TARGET_NEON)
+#define HAVE_neon_vrsubhnv4si (TARGET_NEON)
+#define HAVE_neon_vsubhnv2di (TARGET_NEON)
+#define HAVE_neon_vrsubhnv2di (TARGET_NEON)
+#define HAVE_neon_vceqv8qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv8qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev8qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev8qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv8qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv16qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V16QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv16qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V16QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev16qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V16QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev16qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V16QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv16qi_insn (TARGET_NEON && !(GET_MODE_CLASS (V16QImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv4hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V4HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv4hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V4HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev4hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V4HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev4hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V4HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv4hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V4HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv8hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv8hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev8hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev8hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv8hi_insn (TARGET_NEON && !(GET_MODE_CLASS (V8HImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv2si_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv2si_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev2si_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev2si_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv2si_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv4si_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv4si_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev4si_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev4si_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv4si_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SImode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv2sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv2sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev2sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev2sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv2sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V2SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv4sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv4sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev4sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev4sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv4sf_insn (TARGET_NEON && !(GET_MODE_CLASS (V4SFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcgtv2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcgev2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcltv2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vclev2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vceqv4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcgtv4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcgev4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcltv4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vclev4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vceqv8hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V8HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv8hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V8HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev8hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V8HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev8hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V8HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv8hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V8HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv4hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V4HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgtv4hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V4HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcgev4hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V4HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vclev4hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V4HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vcltv4hf_fp16insn (TARGET_NEON_FP16INST \
+ && !(GET_MODE_CLASS (V4HFmode) == MODE_VECTOR_FLOAT \
+ && !flag_unsafe_math_optimizations))
+#define HAVE_neon_vceqv8hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtv8hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgev8hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcltv8hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vclev8hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vceqv4hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtv4hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgev4hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcltv4hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vclev4hf_fp16insn_unspec (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtuv8qi (TARGET_NEON)
+#define HAVE_neon_vcgeuv8qi (TARGET_NEON)
+#define HAVE_neon_vcgtuv16qi (TARGET_NEON)
+#define HAVE_neon_vcgeuv16qi (TARGET_NEON)
+#define HAVE_neon_vcgtuv4hi (TARGET_NEON)
+#define HAVE_neon_vcgeuv4hi (TARGET_NEON)
+#define HAVE_neon_vcgtuv8hi (TARGET_NEON)
+#define HAVE_neon_vcgeuv8hi (TARGET_NEON)
+#define HAVE_neon_vcgtuv2si (TARGET_NEON)
+#define HAVE_neon_vcgeuv2si (TARGET_NEON)
+#define HAVE_neon_vcgtuv4si (TARGET_NEON)
+#define HAVE_neon_vcgeuv4si (TARGET_NEON)
+#define HAVE_neon_vcagtv2sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagev2sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcaltv2sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcalev2sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagtv4sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagev4sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcaltv4sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcalev4sf_insn (TARGET_NEON && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagev2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagtv2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcalev2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcaltv2sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagev4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagtv4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcalev4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcaltv4sf_insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagtv8hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagev8hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcaltv8hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcalev8hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagtv4hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagev4hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcaltv4hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcalev4hf_fp16insn (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+#define HAVE_neon_vcagev8hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagtv8hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcalev8hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcaltv8hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagev4hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcagtv4hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcalev4hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vcaltv4hf_fp16insn_unspec (TARGET_NEON)
+#define HAVE_neon_vtst_combinev8qi (TARGET_NEON)
+#define HAVE_neon_vtst_combinev16qi (TARGET_NEON)
+#define HAVE_neon_vtst_combinev4hi (TARGET_NEON)
+#define HAVE_neon_vtst_combinev8hi (TARGET_NEON)
+#define HAVE_neon_vtst_combinev2si (TARGET_NEON)
+#define HAVE_neon_vtst_combinev4si (TARGET_NEON)
+#define HAVE_neon_vabdsv8qi (TARGET_NEON)
+#define HAVE_neon_vabduv8qi (TARGET_NEON)
+#define HAVE_neon_vabdsv16qi (TARGET_NEON)
+#define HAVE_neon_vabduv16qi (TARGET_NEON)
+#define HAVE_neon_vabdsv4hi (TARGET_NEON)
+#define HAVE_neon_vabduv4hi (TARGET_NEON)
+#define HAVE_neon_vabdsv8hi (TARGET_NEON)
+#define HAVE_neon_vabduv8hi (TARGET_NEON)
+#define HAVE_neon_vabdsv2si (TARGET_NEON)
+#define HAVE_neon_vabduv2si (TARGET_NEON)
+#define HAVE_neon_vabdsv4si (TARGET_NEON)
+#define HAVE_neon_vabduv4si (TARGET_NEON)
+#define HAVE_neon_vabdv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vabdv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vabdfv2sf (TARGET_NEON)
+#define HAVE_neon_vabdfv4sf (TARGET_NEON)
+#define HAVE_neon_vabdlsv8qi (TARGET_NEON)
+#define HAVE_neon_vabdluv8qi (TARGET_NEON)
+#define HAVE_neon_vabdlsv4hi (TARGET_NEON)
+#define HAVE_neon_vabdluv4hi (TARGET_NEON)
+#define HAVE_neon_vabdlsv2si (TARGET_NEON)
+#define HAVE_neon_vabdluv2si (TARGET_NEON)
+#define HAVE_neon_vabasv8qi (TARGET_NEON)
+#define HAVE_neon_vabauv8qi (TARGET_NEON)
+#define HAVE_neon_vabasv16qi (TARGET_NEON)
+#define HAVE_neon_vabauv16qi (TARGET_NEON)
+#define HAVE_neon_vabasv4hi (TARGET_NEON)
+#define HAVE_neon_vabauv4hi (TARGET_NEON)
+#define HAVE_neon_vabasv8hi (TARGET_NEON)
+#define HAVE_neon_vabauv8hi (TARGET_NEON)
+#define HAVE_neon_vabasv2si (TARGET_NEON)
+#define HAVE_neon_vabauv2si (TARGET_NEON)
+#define HAVE_neon_vabasv4si (TARGET_NEON)
+#define HAVE_neon_vabauv4si (TARGET_NEON)
+#define HAVE_neon_vabalsv8qi (TARGET_NEON)
+#define HAVE_neon_vabaluv8qi (TARGET_NEON)
+#define HAVE_neon_vabalsv4hi (TARGET_NEON)
+#define HAVE_neon_vabaluv4hi (TARGET_NEON)
+#define HAVE_neon_vabalsv2si (TARGET_NEON)
+#define HAVE_neon_vabaluv2si (TARGET_NEON)
+#define HAVE_neon_vmaxsv8qi (TARGET_NEON)
+#define HAVE_neon_vmaxuv8qi (TARGET_NEON)
+#define HAVE_neon_vminsv8qi (TARGET_NEON)
+#define HAVE_neon_vminuv8qi (TARGET_NEON)
+#define HAVE_neon_vmaxsv16qi (TARGET_NEON)
+#define HAVE_neon_vmaxuv16qi (TARGET_NEON)
+#define HAVE_neon_vminsv16qi (TARGET_NEON)
+#define HAVE_neon_vminuv16qi (TARGET_NEON)
+#define HAVE_neon_vmaxsv4hi (TARGET_NEON)
+#define HAVE_neon_vmaxuv4hi (TARGET_NEON)
+#define HAVE_neon_vminsv4hi (TARGET_NEON)
+#define HAVE_neon_vminuv4hi (TARGET_NEON)
+#define HAVE_neon_vmaxsv8hi (TARGET_NEON)
+#define HAVE_neon_vmaxuv8hi (TARGET_NEON)
+#define HAVE_neon_vminsv8hi (TARGET_NEON)
+#define HAVE_neon_vminuv8hi (TARGET_NEON)
+#define HAVE_neon_vmaxsv2si (TARGET_NEON)
+#define HAVE_neon_vmaxuv2si (TARGET_NEON)
+#define HAVE_neon_vminsv2si (TARGET_NEON)
+#define HAVE_neon_vminuv2si (TARGET_NEON)
+#define HAVE_neon_vmaxsv4si (TARGET_NEON)
+#define HAVE_neon_vmaxuv4si (TARGET_NEON)
+#define HAVE_neon_vminsv4si (TARGET_NEON)
+#define HAVE_neon_vminuv4si (TARGET_NEON)
+#define HAVE_neon_vmaxfv2sf (TARGET_NEON)
+#define HAVE_neon_vminfv2sf (TARGET_NEON)
+#define HAVE_neon_vmaxfv4sf (TARGET_NEON)
+#define HAVE_neon_vminfv4sf (TARGET_NEON)
+#define HAVE_neon_vmaxfv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vminfv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmaxfv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vminfv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vpmaxfv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vpminfv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmaxnmv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vminnmv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmaxnmv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vminnmv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmaxnmv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vminnmv2sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vmaxnmv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vminnmv4sf (TARGET_NEON && TARGET_VFP5)
+#define HAVE_fmaxv2sf3 (TARGET_NEON && TARGET_VFP5)
+#define HAVE_fminv2sf3 (TARGET_NEON && TARGET_VFP5)
+#define HAVE_fmaxv4sf3 (TARGET_NEON && TARGET_VFP5)
+#define HAVE_fminv4sf3 (TARGET_NEON && TARGET_VFP5)
+#define HAVE_neon_vpaddlsv8qi (TARGET_NEON)
+#define HAVE_neon_vpaddluv8qi (TARGET_NEON)
+#define HAVE_neon_vpaddlsv16qi (TARGET_NEON)
+#define HAVE_neon_vpaddluv16qi (TARGET_NEON)
+#define HAVE_neon_vpaddlsv4hi (TARGET_NEON)
+#define HAVE_neon_vpaddluv4hi (TARGET_NEON)
+#define HAVE_neon_vpaddlsv8hi (TARGET_NEON)
+#define HAVE_neon_vpaddluv8hi (TARGET_NEON)
+#define HAVE_neon_vpaddlsv2si (TARGET_NEON)
+#define HAVE_neon_vpaddluv2si (TARGET_NEON)
+#define HAVE_neon_vpaddlsv4si (TARGET_NEON)
+#define HAVE_neon_vpaddluv4si (TARGET_NEON)
+#define HAVE_neon_vpadalsv8qi (TARGET_NEON)
+#define HAVE_neon_vpadaluv8qi (TARGET_NEON)
+#define HAVE_neon_vpadalsv16qi (TARGET_NEON)
+#define HAVE_neon_vpadaluv16qi (TARGET_NEON)
+#define HAVE_neon_vpadalsv4hi (TARGET_NEON)
+#define HAVE_neon_vpadaluv4hi (TARGET_NEON)
+#define HAVE_neon_vpadalsv8hi (TARGET_NEON)
+#define HAVE_neon_vpadaluv8hi (TARGET_NEON)
+#define HAVE_neon_vpadalsv2si (TARGET_NEON)
+#define HAVE_neon_vpadaluv2si (TARGET_NEON)
+#define HAVE_neon_vpadalsv4si (TARGET_NEON)
+#define HAVE_neon_vpadaluv4si (TARGET_NEON)
+#define HAVE_neon_vpmaxsv8qi (TARGET_NEON)
+#define HAVE_neon_vpmaxuv8qi (TARGET_NEON)
+#define HAVE_neon_vpminsv8qi (TARGET_NEON)
+#define HAVE_neon_vpminuv8qi (TARGET_NEON)
+#define HAVE_neon_vpmaxsv4hi (TARGET_NEON)
+#define HAVE_neon_vpmaxuv4hi (TARGET_NEON)
+#define HAVE_neon_vpminsv4hi (TARGET_NEON)
+#define HAVE_neon_vpminuv4hi (TARGET_NEON)
+#define HAVE_neon_vpmaxsv2si (TARGET_NEON)
+#define HAVE_neon_vpmaxuv2si (TARGET_NEON)
+#define HAVE_neon_vpminsv2si (TARGET_NEON)
+#define HAVE_neon_vpminuv2si (TARGET_NEON)
+#define HAVE_neon_vpmaxfv2sf (TARGET_NEON)
+#define HAVE_neon_vpminfv2sf (TARGET_NEON)
+#define HAVE_neon_vpmaxfv4sf (TARGET_NEON)
+#define HAVE_neon_vpminfv4sf (TARGET_NEON)
+#define HAVE_neon_vrecpsv2sf (TARGET_NEON)
+#define HAVE_neon_vrecpsv4sf (TARGET_NEON)
+#define HAVE_neon_vrecpsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrecpsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrsqrtsv2sf (TARGET_NEON)
+#define HAVE_neon_vrsqrtsv4sf (TARGET_NEON)
+#define HAVE_neon_vrsqrtsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrsqrtsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vqabsv8qi (TARGET_NEON)
+#define HAVE_neon_vqabsv16qi (TARGET_NEON)
+#define HAVE_neon_vqabsv4hi (TARGET_NEON)
+#define HAVE_neon_vqabsv8hi (TARGET_NEON)
+#define HAVE_neon_vqabsv2si (TARGET_NEON)
+#define HAVE_neon_vqabsv4si (TARGET_NEON)
+#define HAVE_neon_bswapv4hi (TARGET_NEON)
+#define HAVE_neon_bswapv8hi (TARGET_NEON)
+#define HAVE_neon_bswapv2si (TARGET_NEON)
+#define HAVE_neon_bswapv4si (TARGET_NEON)
+#define HAVE_neon_bswapv2di (TARGET_NEON)
+#define HAVE_neon_vcadd90v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcadd270v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcadd90v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcadd270v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcadd90v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcadd270v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcadd90v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcadd270v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla0v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla90v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla180v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla270v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla0v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla90v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla180v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla270v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla0v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla90v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla180v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla270v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla0v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla90v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla180v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla270v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane0v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane90v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane180v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane270v4hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane0v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane90v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane180v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane270v8hf ((TARGET_COMPLEX) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vcmla_lane0v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane90v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane180v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane270v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane0v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane90v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane180v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_lane270v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq0v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq90v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq180v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq270v2sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq0v4hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq90v4hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq180v4hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmla_laneq270v4hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane0v8hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane90v8hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane180v8hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane270v8hf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane0v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane90v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane180v4sf (TARGET_COMPLEX)
+#define HAVE_neon_vcmlaq_lane270v4sf (TARGET_COMPLEX)
+#define HAVE_sdot_prodv8qi (TARGET_DOTPROD)
+#define HAVE_udot_prodv8qi (TARGET_DOTPROD)
+#define HAVE_sdot_prodv16qi (TARGET_DOTPROD)
+#define HAVE_udot_prodv16qi (TARGET_DOTPROD)
+#define HAVE_neon_usdotv8qi (TARGET_I8MM)
+#define HAVE_neon_usdotv16qi (TARGET_I8MM)
+#define HAVE_neon_sdot_lanev8qi (TARGET_DOTPROD)
+#define HAVE_neon_udot_lanev8qi (TARGET_DOTPROD)
+#define HAVE_neon_sdot_lanev16qi (TARGET_DOTPROD)
+#define HAVE_neon_udot_lanev16qi (TARGET_DOTPROD)
+#define HAVE_neon_sdot_laneqv8qi (TARGET_DOTPROD)
+#define HAVE_neon_udot_laneqv8qi (TARGET_DOTPROD)
+#define HAVE_neon_sdot_laneqv16qi (TARGET_DOTPROD)
+#define HAVE_neon_udot_laneqv16qi (TARGET_DOTPROD)
+#define HAVE_neon_usdot_lanev8qi (TARGET_I8MM)
+#define HAVE_neon_sudot_lanev8qi (TARGET_I8MM)
+#define HAVE_neon_usdot_lanev16qi (TARGET_I8MM)
+#define HAVE_neon_sudot_lanev16qi (TARGET_I8MM)
+#define HAVE_neon_usdot_laneqv8qi (TARGET_I8MM)
+#define HAVE_neon_sudot_laneqv8qi (TARGET_I8MM)
+#define HAVE_neon_usdot_laneqv16qi (TARGET_I8MM)
+#define HAVE_neon_sudot_laneqv16qi (TARGET_I8MM)
+#define HAVE_neon_vqnegv8qi (TARGET_NEON)
+#define HAVE_neon_vqnegv16qi (TARGET_NEON)
+#define HAVE_neon_vqnegv4hi (TARGET_NEON)
+#define HAVE_neon_vqnegv8hi (TARGET_NEON)
+#define HAVE_neon_vqnegv2si (TARGET_NEON)
+#define HAVE_neon_vqnegv4si (TARGET_NEON)
+#define HAVE_neon_vclsv8qi (TARGET_NEON)
+#define HAVE_neon_vclsv16qi (TARGET_NEON)
+#define HAVE_neon_vclsv4hi (TARGET_NEON)
+#define HAVE_neon_vclsv8hi (TARGET_NEON)
+#define HAVE_neon_vclsv2si (TARGET_NEON)
+#define HAVE_neon_vclsv4si (TARGET_NEON)
+#define HAVE_neon_vclzv8qi (TARGET_NEON)
+#define HAVE_neon_vclzv16qi (TARGET_NEON)
+#define HAVE_neon_vclzv4hi (TARGET_NEON)
+#define HAVE_neon_vclzv8hi (TARGET_NEON)
+#define HAVE_neon_vclzv2si (TARGET_NEON)
+#define HAVE_neon_vclzv4si (TARGET_NEON)
+#define HAVE_popcountv8qi2 (TARGET_NEON)
+#define HAVE_popcountv16qi2 (TARGET_NEON)
+#define HAVE_neon_vrecpev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrecpev4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vrecpev2si (TARGET_NEON)
+#define HAVE_neon_vrecpev2sf (TARGET_NEON)
+#define HAVE_neon_vrecpev4si (TARGET_NEON)
+#define HAVE_neon_vrecpev4sf (TARGET_NEON)
+#define HAVE_neon_vrsqrtev2si (TARGET_NEON)
+#define HAVE_neon_vrsqrtev2sf (TARGET_NEON)
+#define HAVE_neon_vrsqrtev4si (TARGET_NEON)
+#define HAVE_neon_vrsqrtev4sf (TARGET_NEON)
+#define HAVE_neon_vget_lanev8qi_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev4hi_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev2si_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev2sf_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev8qi_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev4hi_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev2si_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev2sf_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev16qi_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev8hi_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev8hf_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev4si_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev4sf_sext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev16qi_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev8hi_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev8hf_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev4si_zext_internal (TARGET_NEON)
+#define HAVE_neon_vget_lanev4sf_zext_internal (TARGET_NEON)
+#define HAVE_neon_vdup_nv8qi (TARGET_NEON)
+#define HAVE_neon_vdup_nv4hi (TARGET_NEON)
+#define HAVE_neon_vdup_nv16qi (TARGET_NEON)
+#define HAVE_neon_vdup_nv8hi (TARGET_NEON)
+#define HAVE_neon_vdup_nv4hf (TARGET_NEON)
+#define HAVE_neon_vdup_nv8hf (TARGET_NEON)
+#define HAVE_neon_vdup_nv4bf (TARGET_NEON)
+#define HAVE_neon_vdup_nv8bf (TARGET_NEON)
+#define HAVE_neon_vdup_nv2si (TARGET_NEON)
+#define HAVE_neon_vdup_nv2sf (TARGET_NEON)
+#define HAVE_neon_vdup_nv4si (TARGET_NEON)
+#define HAVE_neon_vdup_nv4sf (TARGET_NEON)
+#define HAVE_neon_vdup_nv2di (TARGET_NEON)
+#define HAVE_neon_vdup_lanev8qi_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev16qi_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev4hi_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev8hi_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev2si_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev4si_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev2sf_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev4sf_internal (TARGET_NEON)
+#define HAVE_neon_vdup_lanev8hf_internal (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanev4hf_internal (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanev4bf_internal (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanev8bf_internal (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vcombinev8qi (TARGET_NEON)
+#define HAVE_neon_vcombinev4hi (TARGET_NEON)
+#define HAVE_neon_vcombinev4hf (TARGET_NEON)
+#define HAVE_neon_vcombinev4bf (TARGET_NEON)
+#define HAVE_neon_vcombinev2si (TARGET_NEON)
+#define HAVE_neon_vcombinev2sf (TARGET_NEON)
+#define HAVE_neon_vcombinedi (TARGET_NEON)
+#define HAVE_floatv2siv2sf2 (TARGET_NEON && !flag_rounding_math)
+#define HAVE_floatv4siv4sf2 (TARGET_NEON && !flag_rounding_math)
+#define HAVE_floatunsv2siv2sf2 (TARGET_NEON && !flag_rounding_math)
+#define HAVE_floatunsv4siv4sf2 (TARGET_NEON && !flag_rounding_math)
+#define HAVE_fix_truncv2sfv2si2 (TARGET_NEON)
+#define HAVE_fix_truncv4sfv4si2 (TARGET_NEON)
+#define HAVE_fixuns_truncv2sfv2si2 (TARGET_NEON)
+#define HAVE_fixuns_truncv4sfv4si2 (TARGET_NEON)
+#define HAVE_neon_vcvtsv2sf (TARGET_NEON)
+#define HAVE_neon_vcvtuv2sf (TARGET_NEON)
+#define HAVE_neon_vcvtsv4sf (TARGET_NEON)
+#define HAVE_neon_vcvtuv4sf (TARGET_NEON)
+#define HAVE_neon_vcvtsv2si (TARGET_NEON)
+#define HAVE_neon_vcvtuv2si (TARGET_NEON)
+#define HAVE_neon_vcvtsv4si (TARGET_NEON)
+#define HAVE_neon_vcvtuv4si (TARGET_NEON)
+#define HAVE_neon_vcvtv4sfv4hf (TARGET_NEON && TARGET_FP16)
+#define HAVE_neon_vcvtv4hfv4sf (TARGET_NEON && TARGET_FP16)
+#define HAVE_neon_vcvtsv4hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtuv4hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtsv8hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtuv8hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtuv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtuv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvts_nv2sf (TARGET_NEON)
+#define HAVE_neon_vcvtu_nv2sf (TARGET_NEON)
+#define HAVE_neon_vcvts_nv4sf (TARGET_NEON)
+#define HAVE_neon_vcvtu_nv4sf (TARGET_NEON)
+#define HAVE_neon_vcvts_nv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtu_nv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvts_nv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtu_nv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvts_nv2si (TARGET_NEON)
+#define HAVE_neon_vcvtu_nv2si (TARGET_NEON)
+#define HAVE_neon_vcvts_nv4si (TARGET_NEON)
+#define HAVE_neon_vcvtu_nv4si (TARGET_NEON)
+#define HAVE_neon_vcvts_nv4hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtu_nv4hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvts_nv8hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtu_nv8hi (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtasv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtauv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtmsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtmuv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtnsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtnuv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtpsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtpuv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtasv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtauv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtmsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtmuv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtnsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtnuv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtpsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcvtpuv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmovnv8hi (TARGET_NEON)
+#define HAVE_neon_vmovnv4si (TARGET_NEON)
+#define HAVE_neon_vmovnv2di (TARGET_NEON)
+#define HAVE_neon_vqmovnsv8hi (TARGET_NEON)
+#define HAVE_neon_vqmovnuv8hi (TARGET_NEON)
+#define HAVE_neon_vqmovnsv4si (TARGET_NEON)
+#define HAVE_neon_vqmovnuv4si (TARGET_NEON)
+#define HAVE_neon_vqmovnsv2di (TARGET_NEON)
+#define HAVE_neon_vqmovnuv2di (TARGET_NEON)
+#define HAVE_neon_vqmovunv8hi (TARGET_NEON)
+#define HAVE_neon_vqmovunv4si (TARGET_NEON)
+#define HAVE_neon_vqmovunv2di (TARGET_NEON)
+#define HAVE_neon_vmovlsv8qi (TARGET_NEON)
+#define HAVE_neon_vmovluv8qi (TARGET_NEON)
+#define HAVE_neon_vmovlsv4hi (TARGET_NEON)
+#define HAVE_neon_vmovluv4hi (TARGET_NEON)
+#define HAVE_neon_vmovlsv2si (TARGET_NEON)
+#define HAVE_neon_vmovluv2si (TARGET_NEON)
+#define HAVE_neon_vmul_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmul_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmul_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vmul_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vmul_lanev4si (TARGET_NEON)
+#define HAVE_neon_vmul_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vmul_lanev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmul_lanev4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmulls_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmullu_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmulls_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmullu_lanev2si (TARGET_NEON)
+#define HAVE_neon_vqdmull_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vqdmull_lanev2si (TARGET_NEON)
+#define HAVE_neon_vqdmulh_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vqdmulh_lanev4si (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_lanev4si (TARGET_NEON)
+#define HAVE_neon_vqdmulh_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vqdmulh_lanev2si (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_lanev2si (TARGET_NEON)
+#define HAVE_neon_vqrdmlah_lanev8hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlsh_lanev8hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlah_lanev4si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlsh_lanev4si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlah_lanev4hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlsh_lanev4hi (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlah_lanev2si (TARGET_NEON_RDMA)
+#define HAVE_neon_vqrdmlsh_lanev2si (TARGET_NEON_RDMA)
+#define HAVE_neon_vmla_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmla_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmla_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vmla_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vmla_lanev4si (TARGET_NEON)
+#define HAVE_neon_vmla_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vmlals_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmlalu_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmlals_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmlalu_lanev2si (TARGET_NEON)
+#define HAVE_neon_vqdmlal_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vqdmlal_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmls_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmls_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmls_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vmls_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vmls_lanev4si (TARGET_NEON)
+#define HAVE_neon_vmls_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vmlsls_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmlslu_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vmlsls_lanev2si (TARGET_NEON)
+#define HAVE_neon_vmlslu_lanev2si (TARGET_NEON)
+#define HAVE_neon_vqdmlsl_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vqdmlsl_lanev2si (TARGET_NEON)
+#define HAVE_neon_vextv8qi (TARGET_NEON)
+#define HAVE_neon_vextv16qi (TARGET_NEON)
+#define HAVE_neon_vextv4hi (TARGET_NEON)
+#define HAVE_neon_vextv8hi (TARGET_NEON)
+#define HAVE_neon_vextv2si (TARGET_NEON)
+#define HAVE_neon_vextv4si (TARGET_NEON)
+#define HAVE_neon_vextv4hf (TARGET_NEON)
+#define HAVE_neon_vextv8hf (TARGET_NEON)
+#define HAVE_neon_vextv4bf (TARGET_NEON)
+#define HAVE_neon_vextv8bf (TARGET_NEON)
+#define HAVE_neon_vextv2sf (TARGET_NEON)
+#define HAVE_neon_vextv4sf (TARGET_NEON)
+#define HAVE_neon_vextdi (TARGET_NEON)
+#define HAVE_neon_vextv2di (TARGET_NEON)
+#define HAVE_neon_vrev64v8qi (TARGET_NEON)
+#define HAVE_neon_vrev64v16qi (TARGET_NEON)
+#define HAVE_neon_vrev64v4hi (TARGET_NEON)
+#define HAVE_neon_vrev64v8hi (TARGET_NEON)
+#define HAVE_neon_vrev64v2si (TARGET_NEON)
+#define HAVE_neon_vrev64v4si (TARGET_NEON)
+#define HAVE_neon_vrev64v4hf (TARGET_NEON)
+#define HAVE_neon_vrev64v8hf (TARGET_NEON)
+#define HAVE_neon_vrev64v2sf (TARGET_NEON)
+#define HAVE_neon_vrev64v4sf (TARGET_NEON)
+#define HAVE_neon_vrev64v2di (TARGET_NEON)
+#define HAVE_neon_vrev32v8qi (TARGET_NEON)
+#define HAVE_neon_vrev32v4hi (TARGET_NEON)
+#define HAVE_neon_vrev32v16qi (TARGET_NEON)
+#define HAVE_neon_vrev32v8hi (TARGET_NEON)
+#define HAVE_neon_vrev16v8qi (TARGET_NEON)
+#define HAVE_neon_vrev16v16qi (TARGET_NEON)
+#define HAVE_neon_vbslv8qi_internal (TARGET_NEON)
+#define HAVE_neon_vbslv16qi_internal (TARGET_NEON)
+#define HAVE_neon_vbslv4hi_internal (TARGET_NEON)
+#define HAVE_neon_vbslv8hi_internal (TARGET_NEON)
+#define HAVE_neon_vbslv2si_internal (TARGET_NEON)
+#define HAVE_neon_vbslv4si_internal (TARGET_NEON)
+#define HAVE_neon_vbslv4hf_internal (TARGET_NEON)
+#define HAVE_neon_vbslv8hf_internal (TARGET_NEON)
+#define HAVE_neon_vbslv4bf_internal (TARGET_NEON)
+#define HAVE_neon_vbslv8bf_internal (TARGET_NEON)
+#define HAVE_neon_vbslv2sf_internal (TARGET_NEON)
+#define HAVE_neon_vbslv4sf_internal (TARGET_NEON)
+#define HAVE_neon_vbsldi_internal (TARGET_NEON)
+#define HAVE_neon_vbslv2di_internal (TARGET_NEON)
+#define HAVE_neon_vshlsv8qi (TARGET_NEON)
+#define HAVE_neon_vshluv8qi (TARGET_NEON)
+#define HAVE_neon_vrshlsv8qi (TARGET_NEON)
+#define HAVE_neon_vrshluv8qi (TARGET_NEON)
+#define HAVE_neon_vshlsv16qi (TARGET_NEON)
+#define HAVE_neon_vshluv16qi (TARGET_NEON)
+#define HAVE_neon_vrshlsv16qi (TARGET_NEON)
+#define HAVE_neon_vrshluv16qi (TARGET_NEON)
+#define HAVE_neon_vshlsv4hi (TARGET_NEON)
+#define HAVE_neon_vshluv4hi (TARGET_NEON)
+#define HAVE_neon_vrshlsv4hi (TARGET_NEON)
+#define HAVE_neon_vrshluv4hi (TARGET_NEON)
+#define HAVE_neon_vshlsv8hi (TARGET_NEON)
+#define HAVE_neon_vshluv8hi (TARGET_NEON)
+#define HAVE_neon_vrshlsv8hi (TARGET_NEON)
+#define HAVE_neon_vrshluv8hi (TARGET_NEON)
+#define HAVE_neon_vshlsv2si (TARGET_NEON)
+#define HAVE_neon_vshluv2si (TARGET_NEON)
+#define HAVE_neon_vrshlsv2si (TARGET_NEON)
+#define HAVE_neon_vrshluv2si (TARGET_NEON)
+#define HAVE_neon_vshlsv4si (TARGET_NEON)
+#define HAVE_neon_vshluv4si (TARGET_NEON)
+#define HAVE_neon_vrshlsv4si (TARGET_NEON)
+#define HAVE_neon_vrshluv4si (TARGET_NEON)
+#define HAVE_neon_vshlsdi (TARGET_NEON)
+#define HAVE_neon_vshludi (TARGET_NEON)
+#define HAVE_neon_vrshlsdi (TARGET_NEON)
+#define HAVE_neon_vrshludi (TARGET_NEON)
+#define HAVE_neon_vshlsv2di (TARGET_NEON)
+#define HAVE_neon_vshluv2di (TARGET_NEON)
+#define HAVE_neon_vrshlsv2di (TARGET_NEON)
+#define HAVE_neon_vrshluv2di (TARGET_NEON)
+#define HAVE_neon_vqshlsv8qi (TARGET_NEON)
+#define HAVE_neon_vqshluv8qi (TARGET_NEON)
+#define HAVE_neon_vqrshlsv8qi (TARGET_NEON)
+#define HAVE_neon_vqrshluv8qi (TARGET_NEON)
+#define HAVE_neon_vqshlsv16qi (TARGET_NEON)
+#define HAVE_neon_vqshluv16qi (TARGET_NEON)
+#define HAVE_neon_vqrshlsv16qi (TARGET_NEON)
+#define HAVE_neon_vqrshluv16qi (TARGET_NEON)
+#define HAVE_neon_vqshlsv4hi (TARGET_NEON)
+#define HAVE_neon_vqshluv4hi (TARGET_NEON)
+#define HAVE_neon_vqrshlsv4hi (TARGET_NEON)
+#define HAVE_neon_vqrshluv4hi (TARGET_NEON)
+#define HAVE_neon_vqshlsv8hi (TARGET_NEON)
+#define HAVE_neon_vqshluv8hi (TARGET_NEON)
+#define HAVE_neon_vqrshlsv8hi (TARGET_NEON)
+#define HAVE_neon_vqrshluv8hi (TARGET_NEON)
+#define HAVE_neon_vqshlsv2si (TARGET_NEON)
+#define HAVE_neon_vqshluv2si (TARGET_NEON)
+#define HAVE_neon_vqrshlsv2si (TARGET_NEON)
+#define HAVE_neon_vqrshluv2si (TARGET_NEON)
+#define HAVE_neon_vqshlsv4si (TARGET_NEON)
+#define HAVE_neon_vqshluv4si (TARGET_NEON)
+#define HAVE_neon_vqrshlsv4si (TARGET_NEON)
+#define HAVE_neon_vqrshluv4si (TARGET_NEON)
+#define HAVE_neon_vqshlsdi (TARGET_NEON)
+#define HAVE_neon_vqshludi (TARGET_NEON)
+#define HAVE_neon_vqrshlsdi (TARGET_NEON)
+#define HAVE_neon_vqrshludi (TARGET_NEON)
+#define HAVE_neon_vqshlsv2di (TARGET_NEON)
+#define HAVE_neon_vqshluv2di (TARGET_NEON)
+#define HAVE_neon_vqrshlsv2di (TARGET_NEON)
+#define HAVE_neon_vqrshluv2di (TARGET_NEON)
+#define HAVE_neon_vshrs_nv8qi (TARGET_NEON)
+#define HAVE_neon_vshru_nv8qi (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv8qi (TARGET_NEON)
+#define HAVE_neon_vrshru_nv8qi (TARGET_NEON)
+#define HAVE_neon_vshrs_nv16qi (TARGET_NEON)
+#define HAVE_neon_vshru_nv16qi (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv16qi (TARGET_NEON)
+#define HAVE_neon_vrshru_nv16qi (TARGET_NEON)
+#define HAVE_neon_vshrs_nv4hi (TARGET_NEON)
+#define HAVE_neon_vshru_nv4hi (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv4hi (TARGET_NEON)
+#define HAVE_neon_vrshru_nv4hi (TARGET_NEON)
+#define HAVE_neon_vshrs_nv8hi (TARGET_NEON)
+#define HAVE_neon_vshru_nv8hi (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv8hi (TARGET_NEON)
+#define HAVE_neon_vrshru_nv8hi (TARGET_NEON)
+#define HAVE_neon_vshrs_nv2si (TARGET_NEON)
+#define HAVE_neon_vshru_nv2si (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv2si (TARGET_NEON)
+#define HAVE_neon_vrshru_nv2si (TARGET_NEON)
+#define HAVE_neon_vshrs_nv4si (TARGET_NEON)
+#define HAVE_neon_vshru_nv4si (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv4si (TARGET_NEON)
+#define HAVE_neon_vrshru_nv4si (TARGET_NEON)
+#define HAVE_neon_vshrs_ndi (TARGET_NEON)
+#define HAVE_neon_vshru_ndi (TARGET_NEON)
+#define HAVE_neon_vrshrs_ndi (TARGET_NEON)
+#define HAVE_neon_vrshru_ndi (TARGET_NEON)
+#define HAVE_neon_vshrs_nv2di (TARGET_NEON)
+#define HAVE_neon_vshru_nv2di (TARGET_NEON)
+#define HAVE_neon_vrshrs_nv2di (TARGET_NEON)
+#define HAVE_neon_vrshru_nv2di (TARGET_NEON)
+#define HAVE_neon_vshrn_nv8hi (TARGET_NEON)
+#define HAVE_neon_vrshrn_nv8hi (TARGET_NEON)
+#define HAVE_neon_vshrn_nv4si (TARGET_NEON)
+#define HAVE_neon_vrshrn_nv4si (TARGET_NEON)
+#define HAVE_neon_vshrn_nv2di (TARGET_NEON)
+#define HAVE_neon_vrshrn_nv2di (TARGET_NEON)
+#define HAVE_neon_vqshrns_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqshrnu_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqrshrns_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqrshrnu_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqshrns_nv4si (TARGET_NEON)
+#define HAVE_neon_vqshrnu_nv4si (TARGET_NEON)
+#define HAVE_neon_vqrshrns_nv4si (TARGET_NEON)
+#define HAVE_neon_vqrshrnu_nv4si (TARGET_NEON)
+#define HAVE_neon_vqshrns_nv2di (TARGET_NEON)
+#define HAVE_neon_vqshrnu_nv2di (TARGET_NEON)
+#define HAVE_neon_vqrshrns_nv2di (TARGET_NEON)
+#define HAVE_neon_vqrshrnu_nv2di (TARGET_NEON)
+#define HAVE_neon_vqshrun_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqrshrun_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqshrun_nv4si (TARGET_NEON)
+#define HAVE_neon_vqrshrun_nv4si (TARGET_NEON)
+#define HAVE_neon_vqshrun_nv2di (TARGET_NEON)
+#define HAVE_neon_vqrshrun_nv2di (TARGET_NEON)
+#define HAVE_neon_vshl_nv8qi (TARGET_NEON)
+#define HAVE_neon_vshl_nv16qi (TARGET_NEON)
+#define HAVE_neon_vshl_nv4hi (TARGET_NEON)
+#define HAVE_neon_vshl_nv8hi (TARGET_NEON)
+#define HAVE_neon_vshl_nv2si (TARGET_NEON)
+#define HAVE_neon_vshl_nv4si (TARGET_NEON)
+#define HAVE_neon_vshl_ndi (TARGET_NEON)
+#define HAVE_neon_vshl_nv2di (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv8qi (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv8qi (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv16qi (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv16qi (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv2si (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv2si (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv4si (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv4si (TARGET_NEON)
+#define HAVE_neon_vqshl_s_ndi (TARGET_NEON)
+#define HAVE_neon_vqshl_u_ndi (TARGET_NEON)
+#define HAVE_neon_vqshl_s_nv2di (TARGET_NEON)
+#define HAVE_neon_vqshl_u_nv2di (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv8qi (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv16qi (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv2si (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv4si (TARGET_NEON)
+#define HAVE_neon_vqshlu_ndi (TARGET_NEON)
+#define HAVE_neon_vqshlu_nv2di (TARGET_NEON)
+#define HAVE_neon_vshlls_nv8qi (TARGET_NEON)
+#define HAVE_neon_vshllu_nv8qi (TARGET_NEON)
+#define HAVE_neon_vshlls_nv4hi (TARGET_NEON)
+#define HAVE_neon_vshllu_nv4hi (TARGET_NEON)
+#define HAVE_neon_vshlls_nv2si (TARGET_NEON)
+#define HAVE_neon_vshllu_nv2si (TARGET_NEON)
+#define HAVE_neon_vsras_nv8qi (TARGET_NEON)
+#define HAVE_neon_vsrau_nv8qi (TARGET_NEON)
+#define HAVE_neon_vrsras_nv8qi (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv8qi (TARGET_NEON)
+#define HAVE_neon_vsras_nv16qi (TARGET_NEON)
+#define HAVE_neon_vsrau_nv16qi (TARGET_NEON)
+#define HAVE_neon_vrsras_nv16qi (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv16qi (TARGET_NEON)
+#define HAVE_neon_vsras_nv4hi (TARGET_NEON)
+#define HAVE_neon_vsrau_nv4hi (TARGET_NEON)
+#define HAVE_neon_vrsras_nv4hi (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv4hi (TARGET_NEON)
+#define HAVE_neon_vsras_nv8hi (TARGET_NEON)
+#define HAVE_neon_vsrau_nv8hi (TARGET_NEON)
+#define HAVE_neon_vrsras_nv8hi (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv8hi (TARGET_NEON)
+#define HAVE_neon_vsras_nv2si (TARGET_NEON)
+#define HAVE_neon_vsrau_nv2si (TARGET_NEON)
+#define HAVE_neon_vrsras_nv2si (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv2si (TARGET_NEON)
+#define HAVE_neon_vsras_nv4si (TARGET_NEON)
+#define HAVE_neon_vsrau_nv4si (TARGET_NEON)
+#define HAVE_neon_vrsras_nv4si (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv4si (TARGET_NEON)
+#define HAVE_neon_vsras_ndi (TARGET_NEON)
+#define HAVE_neon_vsrau_ndi (TARGET_NEON)
+#define HAVE_neon_vrsras_ndi (TARGET_NEON)
+#define HAVE_neon_vrsrau_ndi (TARGET_NEON)
+#define HAVE_neon_vsras_nv2di (TARGET_NEON)
+#define HAVE_neon_vsrau_nv2di (TARGET_NEON)
+#define HAVE_neon_vrsras_nv2di (TARGET_NEON)
+#define HAVE_neon_vrsrau_nv2di (TARGET_NEON)
+#define HAVE_neon_vsri_nv8qi (TARGET_NEON)
+#define HAVE_neon_vsri_nv16qi (TARGET_NEON)
+#define HAVE_neon_vsri_nv4hi (TARGET_NEON)
+#define HAVE_neon_vsri_nv8hi (TARGET_NEON)
+#define HAVE_neon_vsri_nv2si (TARGET_NEON)
+#define HAVE_neon_vsri_nv4si (TARGET_NEON)
+#define HAVE_neon_vsri_ndi (TARGET_NEON)
+#define HAVE_neon_vsri_nv2di (TARGET_NEON)
+#define HAVE_neon_vsli_nv8qi (TARGET_NEON)
+#define HAVE_neon_vsli_nv16qi (TARGET_NEON)
+#define HAVE_neon_vsli_nv4hi (TARGET_NEON)
+#define HAVE_neon_vsli_nv8hi (TARGET_NEON)
+#define HAVE_neon_vsli_nv2si (TARGET_NEON)
+#define HAVE_neon_vsli_nv4si (TARGET_NEON)
+#define HAVE_neon_vsli_ndi (TARGET_NEON)
+#define HAVE_neon_vsli_nv2di (TARGET_NEON)
+#define HAVE_neon_vtbl1v8qi (TARGET_NEON)
+#define HAVE_neon_vtbl2v8qi (TARGET_NEON)
+#define HAVE_neon_vtbl3v8qi (TARGET_NEON)
+#define HAVE_neon_vtbl4v8qi (TARGET_NEON)
+#define HAVE_neon_vtbl1v16qi (TARGET_NEON)
+#define HAVE_neon_vtbl2v16qi (TARGET_NEON)
+#define HAVE_neon_vcombinev16qi (TARGET_NEON)
+#define HAVE_neon_vtbx1v8qi (TARGET_NEON)
+#define HAVE_neon_vtbx2v8qi (TARGET_NEON)
+#define HAVE_neon_vtbx3v8qi (TARGET_NEON)
+#define HAVE_neon_vtbx4v8qi (TARGET_NEON)
+#define HAVE_neon_vld1v8qi (TARGET_NEON)
+#define HAVE_neon_vld1v16qi (TARGET_NEON)
+#define HAVE_neon_vld1v4hi (TARGET_NEON)
+#define HAVE_neon_vld1v8hi (TARGET_NEON)
+#define HAVE_neon_vld1v2si (TARGET_NEON)
+#define HAVE_neon_vld1v4si (TARGET_NEON)
+#define HAVE_neon_vld1v4hf (TARGET_NEON)
+#define HAVE_neon_vld1v8hf (TARGET_NEON)
+#define HAVE_neon_vld1v4bf (TARGET_NEON)
+#define HAVE_neon_vld1v8bf (TARGET_NEON)
+#define HAVE_neon_vld1v2sf (TARGET_NEON)
+#define HAVE_neon_vld1v4sf (TARGET_NEON)
+#define HAVE_neon_vld1di (TARGET_NEON)
+#define HAVE_neon_vld1v2di (TARGET_NEON)
+#define HAVE_neon_vld1_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vld1_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vld1_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vld1_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vld1_lanev2si (TARGET_NEON)
+#define HAVE_neon_vld1_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vld1_lanedi (TARGET_NEON)
+#define HAVE_neon_vld1_lanev16qi (TARGET_NEON)
+#define HAVE_neon_vld1_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vld1_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vld1_lanev8bf (TARGET_NEON)
+#define HAVE_neon_vld1_lanev4si (TARGET_NEON)
+#define HAVE_neon_vld1_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vld1_lanev2di (TARGET_NEON)
+#define HAVE_neon_vld1_dupv8qi (TARGET_NEON)
+#define HAVE_neon_vld1_dupv4hi (TARGET_NEON)
+#define HAVE_neon_vld1_dupv4hf (TARGET_NEON)
+#define HAVE_neon_vld1_dupv4bf (TARGET_NEON)
+#define HAVE_neon_vld1_dupv2si (TARGET_NEON)
+#define HAVE_neon_vld1_dupv2sf (TARGET_NEON)
+#define HAVE_neon_vld1_dupv16qi (TARGET_NEON)
+#define HAVE_neon_vld1_dupv8hi (TARGET_NEON)
+#define HAVE_neon_vld1_dupv8hf (TARGET_NEON)
+#define HAVE_neon_vld1_dupv4si (TARGET_NEON)
+#define HAVE_neon_vld1_dupv4sf (TARGET_NEON)
+#define HAVE_neon_vld1_dupv2di (TARGET_NEON)
+#define HAVE_neon_vst1v8qi (TARGET_NEON)
+#define HAVE_neon_vst1v16qi (TARGET_NEON)
+#define HAVE_neon_vst1v4hi (TARGET_NEON)
+#define HAVE_neon_vst1v8hi (TARGET_NEON)
+#define HAVE_neon_vst1v2si (TARGET_NEON)
+#define HAVE_neon_vst1v4si (TARGET_NEON)
+#define HAVE_neon_vst1v4hf (TARGET_NEON)
+#define HAVE_neon_vst1v8hf (TARGET_NEON)
+#define HAVE_neon_vst1v4bf (TARGET_NEON)
+#define HAVE_neon_vst1v8bf (TARGET_NEON)
+#define HAVE_neon_vst1v2sf (TARGET_NEON)
+#define HAVE_neon_vst1v4sf (TARGET_NEON)
+#define HAVE_neon_vst1di (TARGET_NEON)
+#define HAVE_neon_vst1v2di (TARGET_NEON)
+#define HAVE_neon_vst1_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vst1_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vst1_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vst1_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vst1_lanev2si (TARGET_NEON)
+#define HAVE_neon_vst1_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vst1_lanedi (TARGET_NEON)
+#define HAVE_neon_vst1_lanev16qi (TARGET_NEON)
+#define HAVE_neon_vst1_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vst1_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vst1_lanev8bf (TARGET_NEON)
+#define HAVE_neon_vst1_lanev4si (TARGET_NEON)
+#define HAVE_neon_vst1_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vst1_lanev2di (TARGET_NEON)
+#define HAVE_neon_vld2v8qi (TARGET_NEON)
+#define HAVE_neon_vld2v4hi (TARGET_NEON)
+#define HAVE_neon_vld2v4hf (TARGET_NEON)
+#define HAVE_neon_vld2v4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld2v2si (TARGET_NEON)
+#define HAVE_neon_vld2v2sf (TARGET_NEON)
+#define HAVE_neon_vld2di (TARGET_NEON)
+#define HAVE_neon_vld2v16qi (TARGET_NEON)
+#define HAVE_neon_vld2v8hi (TARGET_NEON)
+#define HAVE_neon_vld2v8hf (TARGET_NEON)
+#define HAVE_neon_vld2v8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld2v4si (TARGET_NEON)
+#define HAVE_neon_vld2v4sf (TARGET_NEON)
+#define HAVE_neon_vld2_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vld2_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vld2_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vld2_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vld2_lanev2si (TARGET_NEON)
+#define HAVE_neon_vld2_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vld2_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vld2_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vld2_lanev4si (TARGET_NEON)
+#define HAVE_neon_vld2_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vld2_lanev8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld2_dupv8qi (TARGET_NEON)
+#define HAVE_neon_vld2_dupv4hi (TARGET_NEON)
+#define HAVE_neon_vld2_dupv4hf (TARGET_NEON)
+#define HAVE_neon_vld2_dupv4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld2_dupv2si (TARGET_NEON)
+#define HAVE_neon_vld2_dupv2sf (TARGET_NEON)
+#define HAVE_neon_vld2_dupdi (TARGET_NEON)
+#define HAVE_neon_vld2_dupv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vst2v8qi (TARGET_NEON)
+#define HAVE_neon_vst2v4hi (TARGET_NEON)
+#define HAVE_neon_vst2v4hf (TARGET_NEON)
+#define HAVE_neon_vst2v4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst2v2si (TARGET_NEON)
+#define HAVE_neon_vst2v2sf (TARGET_NEON)
+#define HAVE_neon_vst2di (TARGET_NEON)
+#define HAVE_neon_vst2v16qi (TARGET_NEON)
+#define HAVE_neon_vst2v8hi (TARGET_NEON)
+#define HAVE_neon_vst2v8hf (TARGET_NEON)
+#define HAVE_neon_vst2v8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst2v4si (TARGET_NEON)
+#define HAVE_neon_vst2v4sf (TARGET_NEON)
+#define HAVE_neon_vst2_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vst2_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vst2_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vst2_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vst2_lanev2si (TARGET_NEON)
+#define HAVE_neon_vst2_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vst2_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vst2_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vst2_lanev4si (TARGET_NEON)
+#define HAVE_neon_vst2_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vst2_lanev8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3v8qi (TARGET_NEON)
+#define HAVE_neon_vld3v4hi (TARGET_NEON)
+#define HAVE_neon_vld3v4hf (TARGET_NEON)
+#define HAVE_neon_vld3v4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3v2si (TARGET_NEON)
+#define HAVE_neon_vld3v2sf (TARGET_NEON)
+#define HAVE_neon_vld3di (TARGET_NEON)
+#define HAVE_neon_vld3qav16qi (TARGET_NEON)
+#define HAVE_neon_vld3qav8hi (TARGET_NEON)
+#define HAVE_neon_vld3qav8hf (TARGET_NEON)
+#define HAVE_neon_vld3qav8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3qav4si (TARGET_NEON)
+#define HAVE_neon_vld3qav4sf (TARGET_NEON)
+#define HAVE_neon_vld3qbv16qi (TARGET_NEON)
+#define HAVE_neon_vld3qbv8hi (TARGET_NEON)
+#define HAVE_neon_vld3qbv8hf (TARGET_NEON)
+#define HAVE_neon_vld3qbv8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3qbv4si (TARGET_NEON)
+#define HAVE_neon_vld3qbv4sf (TARGET_NEON)
+#define HAVE_neon_vld3_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vld3_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vld3_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vld3_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vld3_lanev2si (TARGET_NEON)
+#define HAVE_neon_vld3_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vld3_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vld3_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vld3_lanev4si (TARGET_NEON)
+#define HAVE_neon_vld3_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vld3_lanev8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3_dupv8qi (TARGET_NEON)
+#define HAVE_neon_vld3_dupv4hi (TARGET_NEON)
+#define HAVE_neon_vld3_dupv4hf (TARGET_NEON)
+#define HAVE_neon_vld3_dupv4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3_dupv2si (TARGET_NEON)
+#define HAVE_neon_vld3_dupv2sf (TARGET_NEON)
+#define HAVE_neon_vld3_dupdi (TARGET_NEON)
+#define HAVE_neon_vld3_dupv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vst3v8qi (TARGET_NEON)
+#define HAVE_neon_vst3v4hi (TARGET_NEON)
+#define HAVE_neon_vst3v4hf (TARGET_NEON)
+#define HAVE_neon_vst3v4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst3v2si (TARGET_NEON)
+#define HAVE_neon_vst3v2sf (TARGET_NEON)
+#define HAVE_neon_vst3di (TARGET_NEON)
+#define HAVE_neon_vst3qav16qi (TARGET_NEON)
+#define HAVE_neon_vst3qav8hi (TARGET_NEON)
+#define HAVE_neon_vst3qav8hf (TARGET_NEON)
+#define HAVE_neon_vst3qav8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst3qav4si (TARGET_NEON)
+#define HAVE_neon_vst3qav4sf (TARGET_NEON)
+#define HAVE_neon_vst3qbv16qi (TARGET_NEON)
+#define HAVE_neon_vst3qbv8hi (TARGET_NEON)
+#define HAVE_neon_vst3qbv8hf (TARGET_NEON)
+#define HAVE_neon_vst3qbv8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst3qbv4si (TARGET_NEON)
+#define HAVE_neon_vst3qbv4sf (TARGET_NEON)
+#define HAVE_neon_vst3_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vst3_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vst3_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vst3_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vst3_lanev2si (TARGET_NEON)
+#define HAVE_neon_vst3_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vst3_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vst3_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vst3_lanev4si (TARGET_NEON)
+#define HAVE_neon_vst3_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vst3_lanev8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4v8qi (TARGET_NEON)
+#define HAVE_neon_vld4v4hi (TARGET_NEON)
+#define HAVE_neon_vld4v4hf (TARGET_NEON)
+#define HAVE_neon_vld4v4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4v2si (TARGET_NEON)
+#define HAVE_neon_vld4v2sf (TARGET_NEON)
+#define HAVE_neon_vld4di (TARGET_NEON)
+#define HAVE_neon_vld4qav16qi (TARGET_NEON)
+#define HAVE_neon_vld4qav8hi (TARGET_NEON)
+#define HAVE_neon_vld4qav8hf (TARGET_NEON)
+#define HAVE_neon_vld4qav8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4qav4si (TARGET_NEON)
+#define HAVE_neon_vld4qav4sf (TARGET_NEON)
+#define HAVE_neon_vld4qbv16qi (TARGET_NEON)
+#define HAVE_neon_vld4qbv8hi (TARGET_NEON)
+#define HAVE_neon_vld4qbv8hf (TARGET_NEON)
+#define HAVE_neon_vld4qbv8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4qbv4si (TARGET_NEON)
+#define HAVE_neon_vld4qbv4sf (TARGET_NEON)
+#define HAVE_neon_vld4_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vld4_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vld4_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vld4_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vld4_lanev2si (TARGET_NEON)
+#define HAVE_neon_vld4_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vld4_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vld4_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vld4_lanev4si (TARGET_NEON)
+#define HAVE_neon_vld4_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vld4_lanev8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4_dupv8qi (TARGET_NEON)
+#define HAVE_neon_vld4_dupv4hi (TARGET_NEON)
+#define HAVE_neon_vld4_dupv4hf (TARGET_NEON)
+#define HAVE_neon_vld4_dupv4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4_dupv2si (TARGET_NEON)
+#define HAVE_neon_vld4_dupv2sf (TARGET_NEON)
+#define HAVE_neon_vld4_dupdi (TARGET_NEON)
+#define HAVE_neon_vld4_dupv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vst4v8qi (TARGET_NEON)
+#define HAVE_neon_vst4v4hi (TARGET_NEON)
+#define HAVE_neon_vst4v4hf (TARGET_NEON)
+#define HAVE_neon_vst4v4bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst4v2si (TARGET_NEON)
+#define HAVE_neon_vst4v2sf (TARGET_NEON)
+#define HAVE_neon_vst4di (TARGET_NEON)
+#define HAVE_neon_vst4qav16qi (TARGET_NEON)
+#define HAVE_neon_vst4qav8hi (TARGET_NEON)
+#define HAVE_neon_vst4qav8hf (TARGET_NEON)
+#define HAVE_neon_vst4qav8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst4qav4si (TARGET_NEON)
+#define HAVE_neon_vst4qav4sf (TARGET_NEON)
+#define HAVE_neon_vst4qbv16qi (TARGET_NEON)
+#define HAVE_neon_vst4qbv8hi (TARGET_NEON)
+#define HAVE_neon_vst4qbv8hf (TARGET_NEON)
+#define HAVE_neon_vst4qbv8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst4qbv4si (TARGET_NEON)
+#define HAVE_neon_vst4qbv4sf (TARGET_NEON)
+#define HAVE_neon_vst4_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vst4_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vst4_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vst4_lanev4bf (TARGET_NEON)
+#define HAVE_neon_vst4_lanev2si (TARGET_NEON)
+#define HAVE_neon_vst4_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vst4_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vst4_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vst4_lanev4si (TARGET_NEON)
+#define HAVE_neon_vst4_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vst4_lanev8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vec_unpacks_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacku_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacks_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacku_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacks_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacku_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacks_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacku_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacks_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacku_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacks_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_unpacku_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_smult_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_umult_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_smult_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_umult_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_smult_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_umult_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_smult_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_umult_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_smult_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_umult_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_smult_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_umult_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_sshiftl_v8qi (TARGET_NEON)
+#define HAVE_neon_vec_ushiftl_v8qi (TARGET_NEON)
+#define HAVE_neon_vec_sshiftl_v4hi (TARGET_NEON)
+#define HAVE_neon_vec_ushiftl_v4hi (TARGET_NEON)
+#define HAVE_neon_vec_sshiftl_v2si (TARGET_NEON)
+#define HAVE_neon_vec_ushiftl_v2si (TARGET_NEON)
+#define HAVE_neon_unpacks_v8qi (TARGET_NEON)
+#define HAVE_neon_unpacku_v8qi (TARGET_NEON)
+#define HAVE_neon_unpacks_v4hi (TARGET_NEON)
+#define HAVE_neon_unpacku_v4hi (TARGET_NEON)
+#define HAVE_neon_unpacks_v2si (TARGET_NEON)
+#define HAVE_neon_unpacku_v2si (TARGET_NEON)
+#define HAVE_neon_vec_smult_v8qi (TARGET_NEON)
+#define HAVE_neon_vec_umult_v8qi (TARGET_NEON)
+#define HAVE_neon_vec_smult_v4hi (TARGET_NEON)
+#define HAVE_neon_vec_umult_v4hi (TARGET_NEON)
+#define HAVE_neon_vec_smult_v2si (TARGET_NEON)
+#define HAVE_neon_vec_umult_v2si (TARGET_NEON)
+#define HAVE_vec_pack_trunc_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_pack_trunc_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_pack_trunc_v2di (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_pack_trunc_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_pack_trunc_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vec_pack_trunc_v2di (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vabdv4hf_2 ((ARM_HAVE_NEON_V4HF_ARITH) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vabdv8hf_2 ((ARM_HAVE_NEON_V8HF_ARITH) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vabdv2sf_2 (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_neon_vabdv4sf_2 (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_neon_vabdv4hf_3 ((ARM_HAVE_NEON_V4HF_ARITH) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vabdv8hf_3 ((ARM_HAVE_NEON_V8HF_ARITH) && (TARGET_NEON_FP16INST))
+#define HAVE_neon_vabdv2sf_3 (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_neon_vabdv4sf_3 (ARM_HAVE_NEON_V4SF_ARITH)
+#define HAVE_neon_smmlav16qi (TARGET_I8MM)
+#define HAVE_neon_ummlav16qi (TARGET_I8MM)
+#define HAVE_neon_usmmlav16qi (TARGET_I8MM)
+#define HAVE_neon_vbfdotv2sf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfdotv4sf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfdot_lanev4bfv2sf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfdot_lanev4bfv4sf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfdot_lanev8bfv2sf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfdot_lanev8bfv4sf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvtv4sfv4bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvtv4sfv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvtv4sf_highv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvtsf (TARGET_BF16_FP)
+#define HAVE_neon_vbfcvtv4bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvtv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvt_highv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vbfcvtbf_cvtmodev2si (TARGET_BF16_FP)
+#define HAVE_neon_vbfcvtbf_cvtmodesf (TARGET_BF16_FP)
+#define HAVE_neon_vmmlav8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vfmabv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vfmatv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vfmab_lanev8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vfmat_lanev8bf (TARGET_BF16_SIMD)
+#define HAVE_crypto_aesmc (TARGET_CRYPTO)
+#define HAVE_crypto_aesimc (TARGET_CRYPTO)
+#define HAVE_aes_op_protect (TARGET_CRYPTO && fix_aes_erratum_1742098)
+#define HAVE_aes_op_protect_neon_vld1v16qi (TARGET_NEON)
+#define HAVE_crypto_sha1su1 (TARGET_CRYPTO)
+#define HAVE_crypto_sha256su0 (TARGET_CRYPTO)
+#define HAVE_crypto_sha1su0 (TARGET_CRYPTO)
+#define HAVE_crypto_sha256h (TARGET_CRYPTO)
+#define HAVE_crypto_sha256h2 (TARGET_CRYPTO)
+#define HAVE_crypto_sha256su1 (TARGET_CRYPTO)
+#define HAVE_crypto_sha1h_lb (TARGET_CRYPTO && INTVAL (operands[2]) == NEON_ENDIAN_LANE_N (V2SImode, 0))
+#define HAVE_crypto_vmullp64 (TARGET_CRYPTO)
+#define HAVE_crypto_sha1c_lb (TARGET_CRYPTO && INTVAL (operands[4]) == NEON_ENDIAN_LANE_N (V2SImode, 0))
+#define HAVE_crypto_sha1m_lb (TARGET_CRYPTO && INTVAL (operands[4]) == NEON_ENDIAN_LANE_N (V2SImode, 0))
+#define HAVE_crypto_sha1p_lb (TARGET_CRYPTO && INTVAL (operands[4]) == NEON_ENDIAN_LANE_N (V2SImode, 0))
+#define HAVE_atomic_loadqi (TARGET_HAVE_LDACQ)
+#define HAVE_atomic_loadhi (TARGET_HAVE_LDACQ)
+#define HAVE_atomic_loadsi (TARGET_HAVE_LDACQ)
+#define HAVE_atomic_storeqi (TARGET_HAVE_LDACQ)
+#define HAVE_atomic_storehi (TARGET_HAVE_LDACQ)
+#define HAVE_atomic_storesi (TARGET_HAVE_LDACQ)
+#define HAVE_arm_atomic_loaddi2_ldrd (ARM_DOUBLEWORD_ALIGN && TARGET_HAVE_LPAE)
+#define HAVE_atomic_compare_and_swap32qi_1 ((TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_32BIT))
+#define HAVE_atomic_compare_and_swap32hi_1 ((TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_32BIT))
+#define HAVE_atomic_compare_and_swapt1qi_1 ((TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_THUMB1))
+#define HAVE_atomic_compare_and_swapt1hi_1 ((TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_THUMB1))
+#define HAVE_atomic_compare_and_swap32si_1 ((TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_32BIT))
+#define HAVE_atomic_compare_and_swap32di_1 ((TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_32BIT))
+#define HAVE_atomic_compare_and_swapt1si_1 ((TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_THUMB1))
+#define HAVE_atomic_compare_and_swapt1di_1 ((TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER) && (TARGET_THUMB1))
+#define HAVE_atomic_exchangeqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_exchangehi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_exchangesi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_exchangedi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_addqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_subqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_orqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xorqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_andqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_addhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_subhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_orhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xorhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_andhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_addsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_subsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_orsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xorsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_andsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_adddi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_subdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_ordi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xordi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_anddi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nandqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nandhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nandsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nanddi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_addqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_subqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_orqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_xorqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_andqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_addhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_subhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_orhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_xorhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_andhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_addsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_subsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_orsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_xorsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_andsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_adddi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_subdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_ordi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_xordi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_anddi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_nandqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_nandhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_nandsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_fetch_nanddi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_add_fetchqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_sub_fetchqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_or_fetchqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xor_fetchqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_and_fetchqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_add_fetchhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_sub_fetchhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_or_fetchhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xor_fetchhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_and_fetchhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_add_fetchsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_sub_fetchsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_or_fetchsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xor_fetchsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_and_fetchsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_add_fetchdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_sub_fetchdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_or_fetchdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_xor_fetchdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_and_fetchdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nand_fetchqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nand_fetchhi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nand_fetchsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_nand_fetchdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_arm_load_exclusiveqi (TARGET_HAVE_LDREXBH)
+#define HAVE_arm_load_exclusivehi (TARGET_HAVE_LDREXBH)
+#define HAVE_arm_load_acquire_exclusiveqi (TARGET_HAVE_LDACQ)
+#define HAVE_arm_load_acquire_exclusivehi (TARGET_HAVE_LDACQ)
+#define HAVE_arm_load_exclusivesi (TARGET_HAVE_LDREX)
+#define HAVE_arm_load_acquire_exclusivesi (TARGET_HAVE_LDACQ)
+#define HAVE_arm_load_exclusivedi (TARGET_HAVE_LDREXD)
+#define HAVE_arm_load_acquire_exclusivedi (TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN)
+#define HAVE_arm_store_exclusiveqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_arm_store_exclusivehi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_arm_store_exclusivesi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_arm_store_exclusivedi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_arm_store_release_exclusivedi (TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN)
+#define HAVE_arm_store_release_exclusiveqi (TARGET_HAVE_LDACQ)
+#define HAVE_arm_store_release_exclusivehi (TARGET_HAVE_LDACQ)
+#define HAVE_arm_store_release_exclusivesi (TARGET_HAVE_LDACQ)
+#define HAVE_addqq3 (TARGET_32BIT)
+#define HAVE_addhq3 (TARGET_32BIT)
+#define HAVE_addsq3 (TARGET_32BIT)
+#define HAVE_adduqq3 (TARGET_32BIT)
+#define HAVE_adduhq3 (TARGET_32BIT)
+#define HAVE_addusq3 (TARGET_32BIT)
+#define HAVE_addha3 (TARGET_32BIT)
+#define HAVE_addsa3 (TARGET_32BIT)
+#define HAVE_adduha3 (TARGET_32BIT)
+#define HAVE_addusa3 (TARGET_32BIT)
+#define HAVE_usaddv4uqq3 (TARGET_INT_SIMD)
+#define HAVE_usaddv2uhq3 (TARGET_INT_SIMD)
+#define HAVE_usadduqq3 (TARGET_INT_SIMD)
+#define HAVE_usadduhq3 (TARGET_INT_SIMD)
+#define HAVE_usaddv2uha3 (TARGET_INT_SIMD)
+#define HAVE_usadduha3 (TARGET_INT_SIMD)
+#define HAVE_subqq3 (TARGET_32BIT)
+#define HAVE_subhq3 (TARGET_32BIT)
+#define HAVE_subsq3 (TARGET_32BIT)
+#define HAVE_subuqq3 (TARGET_32BIT)
+#define HAVE_subuhq3 (TARGET_32BIT)
+#define HAVE_subusq3 (TARGET_32BIT)
+#define HAVE_subha3 (TARGET_32BIT)
+#define HAVE_subsa3 (TARGET_32BIT)
+#define HAVE_subuha3 (TARGET_32BIT)
+#define HAVE_subusa3 (TARGET_32BIT)
+#define HAVE_ussubv4uqq3 (TARGET_INT_SIMD)
+#define HAVE_ussubv2uhq3 (TARGET_INT_SIMD)
+#define HAVE_ussubuqq3 (TARGET_INT_SIMD)
+#define HAVE_ussubuhq3 (TARGET_INT_SIMD)
+#define HAVE_ussubv2uha3 (TARGET_INT_SIMD)
+#define HAVE_ussubuha3 (TARGET_INT_SIMD)
+#define HAVE_arm_ssatsihi_shift (TARGET_32BIT && arm_arch6 && !ARM_Q_BIT_READ)
+#define HAVE_arm_usatsihi (TARGET_INT_SIMD && !ARM_Q_BIT_READ)
+#define HAVE_mve_vst4qv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vst4qv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vst4qv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vst4qv8hf (TARGET_HAVE_MVE)
+#define HAVE_mve_vst4qv4sf (TARGET_HAVE_MVE)
+#define HAVE_mve_vrndq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndxq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndxq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndpq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndpq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndnq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndnq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndmq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndmq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndaq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndaq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev64q_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev64q_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vnegq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vnegq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vdupq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vdupq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vabsq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vabsq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev32q_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvttq_f32_f16v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtbq_f32_f16v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_to_f_sv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_to_f_uv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_to_f_sv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_to_f_uv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev64q_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtq_from_f_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_from_f_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_from_f_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_from_f_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vqnegq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqnegq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqnegq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqabsq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqabsq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqabsq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vclsq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclsq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclsq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtpq_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmvnq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev16q_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev16q_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddlvq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddlvq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp8qv16bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp16qv8bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp32qv4bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp64qv2qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vpnotv16bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vsubq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbrsrq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbrsrq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_to_f_sv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_to_f_uv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_to_f_sv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_to_f_uv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcreateq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcreateq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcreateq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vcreateq_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_sv16qi_imm (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_sv8hi_imm (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_sv4si_imm (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_uv16qi_imm (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_uv8hi_imm (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_n_uv4si_imm (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtq_n_from_f_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_from_f_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_from_f_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_n_from_f_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddlvq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddlvq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_n_v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_n_v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_n_v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270v16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270v8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270v4si (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot270_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot270_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot270_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot90_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot90_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot90_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxaq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxavq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminaq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminavq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulqv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulqv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulqv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_r_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_r_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_r_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_r_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_r_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_r_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshluq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshluq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshluq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_r_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_r_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_r_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_r_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_r_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_r_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubqv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubqv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubqv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vabdq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddlvaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddlvaq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vandq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vandq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbicq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbicq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbicq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot270v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot90v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot270v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulqv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot90v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot180v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot270v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulqv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot90v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot180v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot270v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vctp8q_mv16bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp16q_mv8bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp32q_mv4bi (TARGET_HAVE_MVE)
+#define HAVE_mve_vctp64q_mv2qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtbq_f16_f32v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvttq_f16_f32v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_veorq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_veorq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmaq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmaq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmavq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmavq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmvq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmvq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmaq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmaq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmavq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmavq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmvq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmvq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmlaldavq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vornq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vornq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vorrq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vorrq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vorrq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovunbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovunbq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovuntq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovuntq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vsubq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulltq_poly_pv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_poly_pv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_poly_pv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_poly_pv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_m_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_m_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_m_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtaq_m_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_to_f_sv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_to_f_uv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_to_f_sv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_to_f_uv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vqrshrnbq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrunbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrunbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhaq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddvaq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vclsq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclsq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclsq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpcsq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpeqq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgeq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpgtq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmphiq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpleq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpltq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmpneq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdupq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxaq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxaq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxaq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxavq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxvq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminaq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminaq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminaq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminavq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminvq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavxq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavxq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vqabsq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqabsq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqabsq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlahq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlahq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlahq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlashq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlashq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlashq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqnegq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqnegq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqnegq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlahq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlahq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlahq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlashq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlashq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlashq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_r_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_r_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_r_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_r_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_r_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_r_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_r_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_r_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_r_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_r_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_r_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_r_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaxq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabsq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vabsq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddlvaq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddlvaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcmlaqv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot90v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot180v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot270v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaqv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot90v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot180v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot270v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpeqq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgeq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpgtq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpleq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpltq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmpneq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtbq_m_f16_f32v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtbq_m_f32_f16v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvttq_m_f16_f32v8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvttq_m_f32_f16v4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vdupq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vdupq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmasq_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmasq_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmsq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmsq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmaq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmaq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmavq_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmavq_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmvq_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmvq_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmaq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmaq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmavq_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmavq_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmvq_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmvq_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmlaldavaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaxq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovlbq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovltq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovnbq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmovntq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vnegq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vnegq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vorrq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vpselq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vpselq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vqmovnbq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovnbq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovntq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovunbq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovunbq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovuntq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqmovuntq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshruntq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshruntq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrunbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrunbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshruntq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshruntq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev32q_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev32q_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev64q_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev64q_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrmlaldavhaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhaxq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrndaq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndaq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndmq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndmq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndnq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndnq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndpq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndpq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndxq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrndxq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrshrnbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtmq_m_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_m_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_m_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtmq_m_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_m_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_m_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_m_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtpq_m_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_m_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_m_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_m_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtnq_m_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_from_f_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_from_f_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_from_f_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_from_f_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrev16q_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrev16q_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtq_m_from_f_uv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_from_f_sv8hi (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_from_f_uv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_from_f_sv4si (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vrmlaldavhq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhaq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabavq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshluq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshluq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshluq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsriq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcvtq_m_n_to_f_uv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_to_f_sv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_to_f_uv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcvtq_m_n_to_f_sv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vabdq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbrsrq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot270_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vcaddq_rot90_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhaddq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhsubq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmaxq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vminq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlasq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulhq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_int_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_int_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqaddq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlahq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlahq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlahq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlashq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlashq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlashq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlahq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlahq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlahq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlashq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlashq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlashq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshlq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshlq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqsubq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrhaddq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmulhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshlq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsliq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsubq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot270_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot270_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot270_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot90_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot90_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vhcaddq_rot90_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaxq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmladavaxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaxq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsdavaxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhxq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhxq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmladhxq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhxq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhxq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmlsdhxq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhxq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhxq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmladhxq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhxq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhxq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmlsdhxq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrdmulhq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlaldavaxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrnbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrntq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrnbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrntq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrnbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrshrntq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshllbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_m_n_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlltq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrnbq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshrntq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaxq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmlsldavaxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_poly_m_pv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmullbq_poly_m_pv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_poly_m_pv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmulltq_poly_m_pv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmullbq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqdmulltq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrunbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshrunbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshruntq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqrshruntq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrunbq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshrunbq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshruntq_m_n_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vqshruntq_m_n_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhaq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlaldavhaxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhaq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vrmlsldavhaxq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vabdq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vabdq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vandq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vandq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbicq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbicq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbrsrq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vbrsrq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot270_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot270_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot90_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcaddq_rot90_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot180_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot180_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot270_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot270_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot90_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmlaq_rot90_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot180_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot180_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot270_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot270_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot90_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vcmulq_rot90_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_veorq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_veorq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmaq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmasq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmasq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmsq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vfmsq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmaxnmq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vminnmq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vmulq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vornq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vornq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vorrq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vorrq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vsubq_m_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vsubq_m_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vsubq_m_n_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vsubq_m_n_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrbq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_sv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_sv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_sv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_sv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_z_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_z_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_z_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_z_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_gather_offset_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_z_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_z_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_z_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_z_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrbq_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrhq_gather_offset_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_z_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_z_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_z_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_z_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_shifted_offset_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_z_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrhq_z_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_z_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_z_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_z_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_z_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_offset_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_offset_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_offset_z_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_offset_z_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_shifted_offset_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_shifted_offset_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_shifted_offset_z_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_shifted_offset_z_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrhq_gather_offset_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrhq_gather_offset_z_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrhq_gather_shifted_offset_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrhq_gather_shifted_offset_z_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_base_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_base_z_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_offset_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_offset_z_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_offset_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_offset_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_shifted_offset_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_shifted_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_shifted_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_shifted_offset_z_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_shifted_offset_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_shifted_offset_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_sv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_sv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_sv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_sv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_p_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_p_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_p_sv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_p_uv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_sv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_uv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_p_sv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_p_uv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_sv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_uv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_fv8hf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_scatter_offset_p_fv8hf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_fv8hf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_fv8hf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_base_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_base_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_offset_fv4sf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_offset_p_fv4sf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_offset_p_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_offset_p_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_offset_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_offset_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_fv4sf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_p_fv4sf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_p_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_p_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddqv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddqv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddqv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vaddq_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vaddq_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vidupq_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_m_wb_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_m_wb_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_m_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_m_wb_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_m_wb_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_m_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_wb_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_wb_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_wb_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_wb_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_wb_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_wb_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_wb_uv16qi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_wb_uv8hi_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_wb_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_wb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_wb_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_wb_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_base_wb_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_base_wb_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrdq_scatter_base_wb_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_wb_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_wb_p_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_base_wb_p_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_z_sv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_z_uv4si_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_fv4sf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_base_wb_z_fv4sf_insn (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrdq_gather_base_wb_sv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_wb_uv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_get_fpscr_nzcvqc (TARGET_HAVE_MVE)
+#define HAVE_set_fpscr_nzcvqc (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_wb_z_sv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_wb_z_uv2di_insn (TARGET_HAVE_MVE)
+#define HAVE_mve_vadciq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadciq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadciq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadciq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadcq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadcq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadcq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vadcq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbciq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbciq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbciq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbciq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbcq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbcq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbcq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vsbcq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vst2qv16qi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V16QImode)))
+#define HAVE_mve_vst2qv8hi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HImode)))
+#define HAVE_mve_vst2qv4si ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SImode)))
+#define HAVE_mve_vst2qv8hf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HFmode)))
+#define HAVE_mve_vst2qv4sf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SFmode)))
+#define HAVE_mve_vld2qv16qi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V16QImode)))
+#define HAVE_mve_vld2qv8hi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HImode)))
+#define HAVE_mve_vld2qv4si ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SImode)))
+#define HAVE_mve_vld2qv8hf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HFmode)))
+#define HAVE_mve_vld2qv4sf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SFmode)))
+#define HAVE_mve_vld4qv16qi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V16QImode)))
+#define HAVE_mve_vld4qv8hi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HImode)))
+#define HAVE_mve_vld4qv4si ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SImode)))
+#define HAVE_mve_vld4qv8hf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HFmode)))
+#define HAVE_mve_vld4qv4sf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SFmode)))
+#define HAVE_mve_vec_extractv16qiqi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V16QImode)))
+#define HAVE_mve_vec_extractv8hihi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HImode)))
+#define HAVE_mve_vec_extractv4sisi ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SImode)))
+#define HAVE_mve_vec_extractv8hfhf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HFmode)))
+#define HAVE_mve_vec_extractv4sfsf ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SFmode)))
+#define HAVE_mve_vec_extractv2didi (TARGET_HAVE_MVE)
+#define HAVE_mve_vec_setv16qi_internal ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V16QImode)))
+#define HAVE_mve_vec_setv8hi_internal ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HImode)))
+#define HAVE_mve_vec_setv8hf_internal ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HFmode)))
+#define HAVE_mve_vec_setv4si_internal ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SImode)))
+#define HAVE_mve_vec_setv4sf_internal ((TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SFmode)))
+#define HAVE_mve_vec_setv2di_internal (TARGET_HAVE_MVE)
+#define HAVE_mve_uqrshll_sat64_di (TARGET_HAVE_MVE)
+#define HAVE_mve_uqrshll_sat48_di (TARGET_HAVE_MVE)
+#define HAVE_mve_sqrshrl_sat64_di (TARGET_HAVE_MVE)
+#define HAVE_mve_sqrshrl_sat48_di (TARGET_HAVE_MVE)
+#define HAVE_mve_uqrshl_si (TARGET_HAVE_MVE)
+#define HAVE_mve_sqrshr_si (TARGET_HAVE_MVE)
+#define HAVE_mve_uqshll_di (TARGET_HAVE_MVE)
+#define HAVE_mve_urshrl_di (TARGET_HAVE_MVE)
+#define HAVE_mve_uqshl_si (TARGET_HAVE_MVE)
+#define HAVE_mve_urshr_si (TARGET_HAVE_MVE)
+#define HAVE_mve_sqshl_si (TARGET_HAVE_MVE)
+#define HAVE_mve_srshr_si (TARGET_HAVE_MVE)
+#define HAVE_mve_srshrl_di (TARGET_HAVE_MVE)
+#define HAVE_mve_sqshll_di (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_uv4si (TARGET_HAVE_MVE)
+#define HAVE_arm_vcx1qv16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx1qav16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx2qv16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx2qav16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx3qv16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx3qav16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx1q_p_v16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx1qa_p_v16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx2q_p_v16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx2qa_p_v16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx3q_p_v16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_arm_vcx3qa_p_v16qi (TARGET_CDE && TARGET_HAVE_MVE)
+#define HAVE_adddi3 1
+#define HAVE_addvsi4 (TARGET_32BIT)
+#define HAVE_addvdi4 (TARGET_32BIT)
+#define HAVE_addsi3_cin_vout_reg (TARGET_32BIT)
+#define HAVE_addsi3_cin_vout_imm (TARGET_32BIT)
+#define HAVE_addsi3_cin_vout_0 (TARGET_32BIT)
+#define HAVE_uaddvsi4 (TARGET_32BIT)
+#define HAVE_uaddvdi4 (TARGET_32BIT)
+#define HAVE_addsi3_cin_cout_reg (TARGET_32BIT)
+#define HAVE_addsi3_cin_cout_imm (TARGET_32BIT)
+#define HAVE_addsi3_cin_cout_0 (TARGET_32BIT)
+#define HAVE_addsi3 1
+#define HAVE_subvsi4 (TARGET_32BIT)
+#define HAVE_subvdi4 (TARGET_32BIT)
+#define HAVE_usubvsi4 (TARGET_32BIT)
+#define HAVE_usubvdi4 (TARGET_32BIT)
+#define HAVE_addsf3 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_adddf3 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_subdi3 1
+#define HAVE_subsi3 1
+#define HAVE_subsf3 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_subdf3 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_mulhi3 (TARGET_DSP_MULTIPLY)
+#define HAVE_mulsi3 1
+#define HAVE_mulsidi3 (TARGET_32BIT)
+#define HAVE_umulsidi3 (TARGET_32BIT)
+#define HAVE_maddsidi4 (TARGET_32BIT)
+#define HAVE_umaddsidi4 (TARGET_32BIT)
+#define HAVE_smulsi3_highpart (TARGET_32BIT)
+#define HAVE_umulsi3_highpart (TARGET_32BIT)
+#define HAVE_maddhisi4 (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlabb (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlatb (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlatt (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlawb (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_smlawt (TARGET_DSP_MULTIPLY)
+#define HAVE_mulsf3 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_muldf3 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_divsf3 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_divdf3 (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE)
+#define HAVE_anddi3 (TARGET_32BIT)
+#define HAVE_iordi3 (TARGET_32BIT)
+#define HAVE_xordi3 (TARGET_32BIT)
+#define HAVE_one_cmpldi2 (TARGET_32BIT)
+#define HAVE_andsi3 1
+#define HAVE_insv (TARGET_ARM || arm_arch_thumb2)
+#define HAVE_iorsi3 1
+#define HAVE_xorsi3 1
+#define HAVE_smaxsi3 (TARGET_32BIT)
+#define HAVE_sminsi3 (TARGET_32BIT)
+#define HAVE_umaxsi3 (TARGET_32BIT)
+#define HAVE_uminsi3 (TARGET_32BIT)
+#define HAVE_arm_qadd (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_qsub (TARGET_DSP_MULTIPLY)
+#define HAVE_arm_ssat (TARGET_32BIT && arm_arch6)
+#define HAVE_arm_usat (TARGET_32BIT && arm_arch6)
+#define HAVE_arm_saturation_occurred (TARGET_ARM_QBIT)
+#define HAVE_arm_set_saturation (TARGET_ARM_QBIT)
+#define HAVE_ashldi3 (TARGET_32BIT)
+#define HAVE_ashlsi3 1
+#define HAVE_ashrdi3 (TARGET_32BIT)
+#define HAVE_ashrsi3 1
+#define HAVE_lshrdi3 (TARGET_32BIT)
+#define HAVE_lshrsi3 1
+#define HAVE_rotlsi3 (TARGET_32BIT)
+#define HAVE_rotrsi3 1
+#define HAVE_extzv (TARGET_THUMB1 || arm_arch_thumb2)
+#define HAVE_extzv_t1 (TARGET_THUMB1)
+#define HAVE_extv (arm_arch_thumb2)
+#define HAVE_extv_regsi 1
+#define HAVE_negvsi3 (TARGET_32BIT)
+#define HAVE_negvdi3 (TARGET_32BIT)
+#define HAVE_negsi2 1
+#define HAVE_negsf2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_negdf2 (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE)
+#define HAVE_abssi2 1
+#define HAVE_abssf2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_absdf2 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_sqrtsf2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_sqrtdf2 (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE)
+#define HAVE_one_cmplsi2 1
+#define HAVE_floatsihf2 1
+#define HAVE_floatdihf2 1
+#define HAVE_floatsisf2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_floatsidf2 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_fix_trunchfsi2 1
+#define HAVE_fix_trunchfdi2 1
+#define HAVE_fix_truncsfsi2 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_fix_truncdfsi2 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_truncdfsf2 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_truncdfhf2 ((TARGET_EITHER && flag_unsafe_math_optimizations) \
+ || (TARGET_32BIT && TARGET_FP16_TO_DOUBLE))
+#define HAVE_zero_extendqidi2 (TARGET_32BIT )
+#define HAVE_zero_extendhidi2 (TARGET_32BIT && arm_arch6)
+#define HAVE_zero_extendsidi2 (TARGET_32BIT )
+#define HAVE_extendqidi2 (TARGET_32BIT && arm_arch6)
+#define HAVE_extendhidi2 (TARGET_32BIT && arm_arch6)
+#define HAVE_extendsidi2 (TARGET_32BIT )
+#define HAVE_zero_extendhisi2 1
+#define HAVE_zero_extendqisi2 1
+#define HAVE_extendhisi2 1
+#define HAVE_extendhisi2_mem (TARGET_ARM)
+#define HAVE_extendqihi2 (TARGET_ARM)
+#define HAVE_extendqisi2 1
+#define HAVE_arm_smlad (TARGET_INT_SIMD)
+#define HAVE_arm_smladx (TARGET_INT_SIMD)
+#define HAVE_arm_smlsd (TARGET_INT_SIMD)
+#define HAVE_arm_smlsdx (TARGET_INT_SIMD)
+#define HAVE_arm_smuad (TARGET_INT_SIMD)
+#define HAVE_arm_smuadx (TARGET_INT_SIMD)
+#define HAVE_arm_ssat16 (TARGET_INT_SIMD)
+#define HAVE_arm_usat16 (TARGET_INT_SIMD)
+#define HAVE_extendsfdf2 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_extendhfdf2 1
+#define HAVE_movdi 1
+#define HAVE_movsi 1
+#define HAVE_calculate_pic_address (flag_pic)
+#define HAVE_builtin_setjmp_receiver (flag_pic)
+#define HAVE_storehi (TARGET_ARM)
+#define HAVE_storehi_bigend (TARGET_ARM)
+#define HAVE_storeinthi (TARGET_ARM)
+#define HAVE_storehi_single_op (TARGET_32BIT && arm_arch4)
+#define HAVE_movhi 1
+#define HAVE_movhi_bytes (TARGET_ARM)
+#define HAVE_movhi_bigend (TARGET_ARM)
+#define HAVE_reload_outhi 1
+#define HAVE_reload_inhi 1
+#define HAVE_movqi 1
+#define HAVE_movhf 1
+#define HAVE_movbf 1
+#define HAVE_movsf 1
+#define HAVE_movdf 1
+#define HAVE_reload_outdf (TARGET_THUMB2)
+#define HAVE_load_multiple (TARGET_32BIT)
+#define HAVE_store_multiple (TARGET_32BIT)
+#define HAVE_setmemsi (TARGET_32BIT)
+#define HAVE_cpymemqi 1
+#define HAVE_cbranchsi4 1
+#define HAVE_cbranchsf4 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_cbranchdf4 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_cbranchdi4 (TARGET_32BIT)
+#define HAVE_cbranch_cc (TARGET_32BIT)
+#define HAVE_cstore_cc (TARGET_32BIT)
+#define HAVE_cstoresi4 (TARGET_32BIT || TARGET_THUMB1)
+#define HAVE_cstorehf4 (TARGET_VFP_FP16INST)
+#define HAVE_cstoresf4 (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_cstoredf4 (TARGET_32BIT && TARGET_HARD_FLOAT && !TARGET_VFP_SINGLE)
+#define HAVE_cstoredi4 (TARGET_32BIT)
+#define HAVE_movsicc (TARGET_32BIT)
+#define HAVE_movhfcc (TARGET_VFP_FP16INST)
+#define HAVE_movsfcc (TARGET_32BIT && TARGET_HARD_FLOAT)
+#define HAVE_movdfcc (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE)
+#define HAVE_jump 1
+#define HAVE_call 1
+#define HAVE_call_internal 1
+#define HAVE_nonsecure_call_internal (use_cmse)
+#define HAVE_call_value 1
+#define HAVE_call_value_internal 1
+#define HAVE_nonsecure_call_value_internal (use_cmse)
+#define HAVE_sibcall_internal 1
+#define HAVE_sibcall (TARGET_32BIT)
+#define HAVE_sibcall_value_internal 1
+#define HAVE_sibcall_value (TARGET_32BIT)
+#define HAVE_return ((TARGET_ARM || (TARGET_THUMB2 \
+ && ARM_FUNC_TYPE (arm_current_func_type ()) == ARM_FT_NORMAL \
+ && !IS_STACKALIGN (arm_current_func_type ()))) \
+ && USE_RETURN_INSN (FALSE))
+#define HAVE_simple_return ((TARGET_ARM || (TARGET_THUMB2 \
+ && ARM_FUNC_TYPE (arm_current_func_type ()) == ARM_FT_NORMAL \
+ && !IS_STACKALIGN (arm_current_func_type ()))) \
+ && use_simple_return_p ())
+#define HAVE_return_addr_mask (TARGET_ARM)
+#define HAVE_untyped_call (TARGET_EITHER && !TARGET_FDPIC)
+#define HAVE_untyped_return (TARGET_EITHER && !TARGET_FDPIC)
+#define HAVE_stack_protect_combined_set (arm_stack_protector_guard == SSP_GLOBAL)
+#define HAVE_stack_protect_combined_test (arm_stack_protector_guard == SSP_GLOBAL)
+#define HAVE_stack_protect_set (arm_stack_protector_guard == SSP_TLSREG)
+#define HAVE_stack_protect_test (arm_stack_protector_guard == SSP_TLSREG)
+#define HAVE_casesi ((TARGET_32BIT || optimize_size || flag_pic) && !target_pure_code)
+#define HAVE_arm_casesi_internal (TARGET_ARM)
+#define HAVE_indirect_jump 1
+#define HAVE_prologue 1
+#define HAVE_epilogue 1
+#define HAVE_sibcall_epilogue (TARGET_32BIT)
+#define HAVE_eh_epilogue 1
+#define HAVE_eh_return 1
+#define HAVE_get_thread_pointersi 1
+#define HAVE_arm_legacy_rev (TARGET_32BIT)
+#define HAVE_thumb_legacy_rev (TARGET_THUMB)
+#define HAVE_modsi3 (TARGET_32BIT)
+#define HAVE_bswapsi2 (TARGET_EITHER && (arm_arch6 || !optimize_size))
+#define HAVE_bswaphi2 (arm_arch6)
+#define HAVE_copysignsf3 (TARGET_SOFT_FLOAT && arm_arch_thumb2)
+#define HAVE_copysigndf3 (TARGET_SOFT_FLOAT && arm_arch_thumb2)
+#define HAVE_movmisaligndi (unaligned_access)
+#define HAVE_movmisalignhi (unaligned_access)
+#define HAVE_movmisalignsi (unaligned_access)
+#define HAVE_arm_ldc (arm_coproc_builtin_available (VUNSPEC_LDC))
+#define HAVE_arm_ldc2 (arm_coproc_builtin_available (VUNSPEC_LDC2))
+#define HAVE_arm_ldcl (arm_coproc_builtin_available (VUNSPEC_LDCL))
+#define HAVE_arm_ldc2l (arm_coproc_builtin_available (VUNSPEC_LDC2L))
+#define HAVE_arm_stc (arm_coproc_builtin_available (VUNSPEC_STC))
+#define HAVE_arm_stc2 (arm_coproc_builtin_available (VUNSPEC_STC2))
+#define HAVE_arm_stcl (arm_coproc_builtin_available (VUNSPEC_STCL))
+#define HAVE_arm_stc2l (arm_coproc_builtin_available (VUNSPEC_STC2L))
+#define HAVE_speculation_barrier 1
+#define HAVE_movv16qi (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V16QImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V16QImode)))
+#define HAVE_movv8hi (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V8HImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V8HImode)))
+#define HAVE_movv4si (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SImode)))
+#define HAVE_movv4sf (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V4SFmode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V4SFmode)))
+#define HAVE_movv2di (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V2DImode)) \
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE (V2DImode)) \
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (V2DImode)))
+#define HAVE_movv2si (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V2SImode)))
+#define HAVE_movv4hi (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V4HImode)))
+#define HAVE_movv8qi (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V8QImode)))
+#define HAVE_movv2sf (TARGET_NEON \
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (V2SFmode)))
+#define HAVE_movv8hf (TARGET_NEON || TARGET_HAVE_MVE_FLOAT)
+#define HAVE_addv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_addv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_addv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_addv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_addv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_addv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_addv4hf3 (ARM_HAVE_V4HF_ARITH)
+#define HAVE_addv8hf3 (ARM_HAVE_V8HF_ARITH)
+#define HAVE_addv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_addv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_addv2di3 (ARM_HAVE_V2DI_ARITH)
+#define HAVE_subv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_subv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_subv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_subv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_subv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_subv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_subv4hf3 (ARM_HAVE_V4HF_ARITH)
+#define HAVE_subv8hf3 (ARM_HAVE_V8HF_ARITH)
+#define HAVE_subv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_subv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_subv2di3 (ARM_HAVE_V2DI_ARITH)
+#define HAVE_mulv8qi3 (ARM_HAVE_V8QI_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V8QImode == V4HImode \
+ || V8QImode == V2SImode))
+#define HAVE_mulv16qi3 (ARM_HAVE_V16QI_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V16QImode == V4HImode \
+ || V16QImode == V2SImode))
+#define HAVE_mulv4hi3 (ARM_HAVE_V4HI_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V4HImode == V4HImode \
+ || V4HImode == V2SImode))
+#define HAVE_mulv8hi3 (ARM_HAVE_V8HI_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V8HImode == V4HImode \
+ || V8HImode == V2SImode))
+#define HAVE_mulv2si3 (ARM_HAVE_V2SI_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V2SImode == V4HImode \
+ || V2SImode == V2SImode))
+#define HAVE_mulv4si3 (ARM_HAVE_V4SI_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V4SImode == V4HImode \
+ || V4SImode == V2SImode))
+#define HAVE_mulv2sf3 (ARM_HAVE_V2SF_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V2SFmode == V4HImode \
+ || V2SFmode == V2SImode))
+#define HAVE_mulv4sf3 (ARM_HAVE_V4SF_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V4SFmode == V4HImode \
+ || V4SFmode == V2SImode))
+#define HAVE_mulv8hf3 (ARM_HAVE_V8HF_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V8HFmode == V4HImode \
+ || V8HFmode == V2SImode))
+#define HAVE_mulv4hf3 (ARM_HAVE_V4HF_ARITH \
+ && (!TARGET_REALLY_IWMMXT \
+ || V4HFmode == V4HImode \
+ || V4HFmode == V2SImode))
+#define HAVE_sminv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_sminv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_sminv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_sminv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_sminv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_sminv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_sminv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_sminv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_uminv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_uminv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_uminv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_uminv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_uminv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_uminv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_smaxv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_smaxv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_smaxv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_smaxv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_smaxv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_smaxv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_smaxv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_smaxv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_umaxv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_umaxv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_umaxv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_umaxv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_umaxv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_umaxv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_vec_permv8qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_permv16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_extractv16qiqi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_extractv8hihi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_extractv8hfhf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_extractv4sisi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_extractv4sfsf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_extractv2didi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_setv16qi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_setv8hi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_setv8hf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_setv4si (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_setv4sf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_setv2di (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_andv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_andv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_andv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_andv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_andv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_andv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_andv4hf3 (ARM_HAVE_V4HF_ARITH)
+#define HAVE_andv8hf3 (ARM_HAVE_V8HF_ARITH)
+#define HAVE_andv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_andv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_andv2di3 (ARM_HAVE_V2DI_ARITH)
+#define HAVE_iorv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_iorv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_iorv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_iorv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_iorv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_iorv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_iorv4hf3 (ARM_HAVE_V4HF_ARITH)
+#define HAVE_iorv8hf3 (ARM_HAVE_V8HF_ARITH)
+#define HAVE_iorv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_iorv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_iorv2di3 (ARM_HAVE_V2DI_ARITH)
+#define HAVE_xorv8qi3 (ARM_HAVE_V8QI_ARITH)
+#define HAVE_xorv16qi3 (ARM_HAVE_V16QI_ARITH)
+#define HAVE_xorv4hi3 (ARM_HAVE_V4HI_ARITH)
+#define HAVE_xorv8hi3 (ARM_HAVE_V8HI_ARITH)
+#define HAVE_xorv2si3 (ARM_HAVE_V2SI_ARITH)
+#define HAVE_xorv4si3 (ARM_HAVE_V4SI_ARITH)
+#define HAVE_xorv4hf3 (ARM_HAVE_V4HF_ARITH)
+#define HAVE_xorv8hf3 (ARM_HAVE_V8HF_ARITH)
+#define HAVE_xorv2sf3 (ARM_HAVE_V2SF_ARITH)
+#define HAVE_xorv4sf3 (ARM_HAVE_V4SF_ARITH)
+#define HAVE_xorv2di3 (ARM_HAVE_V2DI_ARITH)
+#define HAVE_one_cmplv8qi2 (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv16qi2 (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv4hi2 (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv8hi2 (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv2si2 (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv4si2 (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv4hf2 (ARM_HAVE_V4HF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv8hf2 (ARM_HAVE_V8HF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv2sf2 (ARM_HAVE_V2SF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv4sf2 (ARM_HAVE_V4SF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_one_cmplv2di2 (ARM_HAVE_V2DI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv8qi2 (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv8qi2 (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv16qi2 (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv16qi2 (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv4hi2 (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv4hi2 (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv8hi2 (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv8hi2 (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv2si2 (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv2si2 (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv4si2 (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv4si2 (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv2sf2 (ARM_HAVE_V2SF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv2sf2 (ARM_HAVE_V2SF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv4sf2 (ARM_HAVE_V4SF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv4sf2 (ARM_HAVE_V4SF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv8hf2 (ARM_HAVE_V8HF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv8hf2 (ARM_HAVE_V8HF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_absv4hf2 (ARM_HAVE_V4HF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_negv4hf2 (ARM_HAVE_V4HF_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_cadd90v4hf3 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cadd270v4hf3 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cadd90v8hf3 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cadd270v8hf3 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cadd90v2sf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd270v2sf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd90v4sf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd270v4sf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmulv8hf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_cmul_conjv8hf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_cmulv4sf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_cmul_conjv4sf3 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla0v4hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla90v4hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla180v4hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla270v4hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla0v8hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla90v8hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla180v8hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla270v8hf (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_arm_vcmla0v2sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla90v2sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla180v2sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla270v2sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla0v4sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla90v4sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla180v4sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_arm_vcmla270v4sf ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmlav4hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmla_conjv4hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmlsv4hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmls_conjv4hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmlav8hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmla_conjv8hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmlsv8hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmls_conjv8hf4 (((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V8HF_ARITH)) && !BYTES_BIG_ENDIAN) && (TARGET_NEON_FP16INST))
+#define HAVE_cmlav2sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmla_conjv2sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmlsv2sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmls_conjv2sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V2SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmlav4sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmla_conjv4sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmlsv4sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_cmls_conjv4sf4 ((TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT \
+ && ARM_HAVE_V4SF_ARITH)) && !BYTES_BIG_ENDIAN)
+#define HAVE_movmisalignv8qi (ARM_HAVE_V8QI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv16qi (ARM_HAVE_V16QI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv4hi (ARM_HAVE_V4HI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv8hi (ARM_HAVE_V8HI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv2si (ARM_HAVE_V2SI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv4si (ARM_HAVE_V4SI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv4hf (ARM_HAVE_V4HF_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv8hf (ARM_HAVE_V8HF_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv2sf (ARM_HAVE_V2SF_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv4sf (ARM_HAVE_V4SF_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_movmisalignv2di (ARM_HAVE_V2DI_LDST && !BYTES_BIG_ENDIAN \
+ && unaligned_access && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashlv8qi3 (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashlv16qi3 (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashlv4hi3 (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashlv8hi3 (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashlv2si3 (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashlv4si3 (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashrv8qi3 (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashrv16qi3 (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashrv4hi3 (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashrv8hi3 (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashrv2si3 (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vashrv4si3 (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vlshrv8qi3 (ARM_HAVE_V8QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vlshrv16qi3 (ARM_HAVE_V16QI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vlshrv4hi3 (ARM_HAVE_V4HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vlshrv8hi3 (ARM_HAVE_V8HI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vlshrv2si3 (ARM_HAVE_V2SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vlshrv4si3 (ARM_HAVE_V4SI_ARITH && !TARGET_REALLY_IWMMXT)
+#define HAVE_vcondv8qiv8qi (ARM_HAVE_V8QI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv16qiv16qi (ARM_HAVE_V16QI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4hiv4hi (ARM_HAVE_V4HI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv8hiv8hi (ARM_HAVE_V8HI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv2siv2si (ARM_HAVE_V2SI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4siv4si (ARM_HAVE_V4SI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv2sfv2sf (ARM_HAVE_V2SF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4sfv4sf (ARM_HAVE_V4SF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv8hfv8hf (ARM_HAVE_V8HF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4hfv4hf (ARM_HAVE_V4HF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv2sfv2si (ARM_HAVE_V2SI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv2siv2sf (ARM_HAVE_V2SF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4sfv4si (ARM_HAVE_V4SI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4siv4sf (ARM_HAVE_V4SF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4hfv4hi (ARM_HAVE_V4HI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv4hiv4hf (ARM_HAVE_V4HF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcondv8hfv8hi (ARM_HAVE_V8HI_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcondv8hiv8hf (ARM_HAVE_V8HF_ARITH \
+ && !TARGET_REALLY_IWMMXT \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vconduv8qiv8qi (ARM_HAVE_V8QI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv16qiv16qi (ARM_HAVE_V16QI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv4hiv4hi (ARM_HAVE_V4HI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv8hiv8hi (ARM_HAVE_V8HI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv2siv2si (ARM_HAVE_V2SI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv4siv4si (ARM_HAVE_V4SI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv2sfv2si (ARM_HAVE_V2SF_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vconduv4sfv4si (ARM_HAVE_V4SF_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vec_load_lanesoiv16qi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesoiv8hi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesoiv8hf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesoiv4si (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesoiv4sf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesoiv16qi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesoiv8hi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesoiv8hf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesoiv4si (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesoiv4sf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesxiv16qi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesxiv8hi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesxiv8hf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesxiv4si (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_load_lanesxiv4sf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesxiv16qi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesxiv8hi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesxiv8hf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesxiv4si (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_vec_store_lanesxiv4sf (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_reduc_plus_scal_v16qi (ARM_HAVE_V16QI_ARITH \
+ && !(TARGET_HAVE_MVE && FLOAT_MODE_P (V16QImode)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_plus_scal_v8hi (ARM_HAVE_V8HI_ARITH \
+ && !(TARGET_HAVE_MVE && FLOAT_MODE_P (V8HImode)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_plus_scal_v4si (ARM_HAVE_V4SI_ARITH \
+ && !(TARGET_HAVE_MVE && FLOAT_MODE_P (V4SImode)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_plus_scal_v4sf (ARM_HAVE_V4SF_ARITH \
+ && !(TARGET_HAVE_MVE && FLOAT_MODE_P (V4SFmode)) \
+ && !BYTES_BIG_ENDIAN)
+#define HAVE_avgv16qi3_floor (ARM_HAVE_V16QI_ARITH)
+#define HAVE_avgv8hi3_floor (ARM_HAVE_V8HI_ARITH)
+#define HAVE_avgv4si3_floor (ARM_HAVE_V4SI_ARITH)
+#define HAVE_uavgv16qi3_floor (ARM_HAVE_V16QI_ARITH)
+#define HAVE_uavgv8hi3_floor (ARM_HAVE_V8HI_ARITH)
+#define HAVE_uavgv4si3_floor (ARM_HAVE_V4SI_ARITH)
+#define HAVE_avgv16qi3_ceil (ARM_HAVE_V16QI_ARITH)
+#define HAVE_avgv8hi3_ceil (ARM_HAVE_V8HI_ARITH)
+#define HAVE_avgv4si3_ceil (ARM_HAVE_V4SI_ARITH)
+#define HAVE_uavgv16qi3_ceil (ARM_HAVE_V16QI_ARITH)
+#define HAVE_uavgv8hi3_ceil (ARM_HAVE_V8HI_ARITH)
+#define HAVE_uavgv4si3_ceil (ARM_HAVE_V4SI_ARITH)
+#define HAVE_clzv8qi2 (ARM_HAVE_V8QI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_clzv16qi2 (ARM_HAVE_V16QI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_clzv4hi2 (ARM_HAVE_V4HI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_clzv8hi2 (ARM_HAVE_V8HI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_clzv2si2 (ARM_HAVE_V2SI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_clzv4si2 (ARM_HAVE_V4SI_ARITH \
+ && !TARGET_REALLY_IWMMXT)
+#define HAVE_vec_initv8qiqi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V8QImode)))
+#define HAVE_vec_initv16qiqi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V16QImode)))
+#define HAVE_vec_initv4hihi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V4HImode)))
+#define HAVE_vec_initv8hihi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V8HImode)))
+#define HAVE_vec_initv2sisi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V2SImode)))
+#define HAVE_vec_initv4sisi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V4SImode)))
+#define HAVE_vec_initv4hfhf (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V4HFmode)))
+#define HAVE_vec_initv8hfhf (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V8HFmode)))
+#define HAVE_vec_initv4bfbf (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V4BFmode)))
+#define HAVE_vec_initv8bfbf (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V8BFmode)))
+#define HAVE_vec_initv2sfsf (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V2SFmode)))
+#define HAVE_vec_initv4sfsf (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V4SFmode)))
+#define HAVE_vec_initdidi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (DImode)))
+#define HAVE_vec_initv2didi (TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (V2DImode)))
+#define HAVE_iwmmxt_setwcgr0 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_setwcgr1 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_setwcgr2 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_setwcgr3 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_getwcgr0 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_getwcgr1 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_getwcgr2 (TARGET_REALLY_IWMMXT)
+#define HAVE_iwmmxt_getwcgr3 (TARGET_REALLY_IWMMXT)
+#define HAVE_neon_vabshf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vfmahf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vfmshf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvths_nhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthu_nhf (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvths_nsi (TARGET_VFP_FP16INST)
+#define HAVE_neon_vcvthu_nsi (TARGET_VFP_FP16INST)
+#define HAVE_thumb_movhi_clobber (TARGET_THUMB1)
+#define HAVE_cbranchqi4 (TARGET_THUMB1)
+#define HAVE_cbranchsi4_neg_late (TARGET_THUMB1)
+#define HAVE_cstoresi_eq0_thumb1 (TARGET_THUMB1)
+#define HAVE_cstoresi_ne0_thumb1 (TARGET_THUMB1)
+#define HAVE_thumb1_casesi_internal_pic (TARGET_THUMB1)
+#define HAVE_tablejump (TARGET_THUMB1)
+#define HAVE_thumb2_casesi_internal (TARGET_THUMB2 && !flag_pic)
+#define HAVE_thumb2_casesi_internal_pic (TARGET_THUMB2 && flag_pic)
+#define HAVE_doloop_end (TARGET_32BIT)
+#define HAVE_doloop_begin (TARGET_32BIT && TARGET_HAVE_LOB)
+#define HAVE_movti (TARGET_NEON)
+#define HAVE_movei ((TARGET_NEON || TARGET_HAVE_MVE) && (!TARGET_HAVE_MVE))
+#define HAVE_movoi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_movci ((TARGET_NEON || TARGET_HAVE_MVE) && (!TARGET_HAVE_MVE))
+#define HAVE_movxi (TARGET_NEON || TARGET_HAVE_MVE)
+#define HAVE_movv4hf (TARGET_NEON)
+#define HAVE_movv4bf (TARGET_NEON)
+#define HAVE_movv8bf (TARGET_NEON)
+#define HAVE_divv2sf3 (TARGET_NEON && !optimize_size \
+ && flag_reciprocal_math)
+#define HAVE_divv4sf3 (TARGET_NEON && !optimize_size \
+ && flag_reciprocal_math)
+#define HAVE_ceilv2sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_btruncv2sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_floorv2sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_rintv2sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_roundv2sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_roundevenv2sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_ceilv4sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_btruncv4sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_floorv4sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_rintv4sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_roundv4sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_roundevenv4sf2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lceilv2sfv2si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lfloorv2sfv2si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lroundv2sfv2si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lceiluv2sfv2si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lflooruv2sfv2si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lrounduv2sfv2si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lceilv4sfv4si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lfloorv4sfv4si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lroundv4sfv4si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lceiluv4sfv4si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lflooruv4sfv4si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_lrounduv4sfv4si2 (TARGET_NEON && TARGET_VFP5 && flag_unsafe_math_optimizations)
+#define HAVE_neon_vabsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vnegv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vabsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vnegv4hf (TARGET_NEON_FP16INST)
+#define HAVE_widen_ssumv16qi3 (TARGET_NEON)
+#define HAVE_widen_ssumv8hi3 (TARGET_NEON)
+#define HAVE_widen_ssumv4si3 (TARGET_NEON)
+#define HAVE_widen_usumv16qi3 (TARGET_NEON)
+#define HAVE_widen_usumv8hi3 (TARGET_NEON)
+#define HAVE_widen_usumv4si3 (TARGET_NEON)
+#define HAVE_move_hi_quad_v2di (TARGET_NEON)
+#define HAVE_move_hi_quad_v2df (TARGET_NEON)
+#define HAVE_move_hi_quad_v16qi (TARGET_NEON)
+#define HAVE_move_hi_quad_v8hi (TARGET_NEON)
+#define HAVE_move_hi_quad_v4si (TARGET_NEON)
+#define HAVE_move_hi_quad_v4sf (TARGET_NEON)
+#define HAVE_move_lo_quad_v2di (TARGET_NEON)
+#define HAVE_move_lo_quad_v2df (TARGET_NEON)
+#define HAVE_move_lo_quad_v16qi (TARGET_NEON)
+#define HAVE_move_lo_quad_v8hi (TARGET_NEON)
+#define HAVE_move_lo_quad_v4si (TARGET_NEON)
+#define HAVE_move_lo_quad_v4sf (TARGET_NEON)
+#define HAVE_reduc_plus_scal_v8qi (ARM_HAVE_NEON_V8QI_ARITH)
+#define HAVE_reduc_plus_scal_v4hi (ARM_HAVE_NEON_V4HI_ARITH)
+#define HAVE_reduc_plus_scal_v2si (ARM_HAVE_NEON_V2SI_ARITH)
+#define HAVE_reduc_plus_scal_v2sf (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_reduc_plus_scal_v2di (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smin_scal_v8qi (ARM_HAVE_NEON_V8QI_ARITH)
+#define HAVE_reduc_smin_scal_v4hi (ARM_HAVE_NEON_V4HI_ARITH)
+#define HAVE_reduc_smin_scal_v2si (ARM_HAVE_NEON_V2SI_ARITH)
+#define HAVE_reduc_smin_scal_v2sf (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_reduc_smin_scal_v16qi (ARM_HAVE_NEON_V16QI_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smin_scal_v8hi (ARM_HAVE_NEON_V8HI_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smin_scal_v4si (ARM_HAVE_NEON_V4SI_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smin_scal_v4sf (ARM_HAVE_NEON_V4SF_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smax_scal_v8qi (ARM_HAVE_NEON_V8QI_ARITH)
+#define HAVE_reduc_smax_scal_v4hi (ARM_HAVE_NEON_V4HI_ARITH)
+#define HAVE_reduc_smax_scal_v2si (ARM_HAVE_NEON_V2SI_ARITH)
+#define HAVE_reduc_smax_scal_v2sf (ARM_HAVE_NEON_V2SF_ARITH)
+#define HAVE_reduc_smax_scal_v16qi (ARM_HAVE_NEON_V16QI_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smax_scal_v8hi (ARM_HAVE_NEON_V8HI_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smax_scal_v4si (ARM_HAVE_NEON_V4SI_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_smax_scal_v4sf (ARM_HAVE_NEON_V4SF_ARITH && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_umin_scal_v8qi (TARGET_NEON)
+#define HAVE_reduc_umin_scal_v4hi (TARGET_NEON)
+#define HAVE_reduc_umin_scal_v2si (TARGET_NEON)
+#define HAVE_reduc_umin_scal_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_umin_scal_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_umin_scal_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_umax_scal_v8qi (TARGET_NEON)
+#define HAVE_reduc_umax_scal_v4hi (TARGET_NEON)
+#define HAVE_reduc_umax_scal_v2si (TARGET_NEON)
+#define HAVE_reduc_umax_scal_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_umax_scal_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_reduc_umax_scal_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_cmpv8qiv8qi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv16qiv16qi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv4hiv4hi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv8hiv8hi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv2siv2si (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv4siv4si (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv2sfv2si (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv4sfv4si (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv8hfv8hi (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv4hfv4hi (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpuv8qiv8qi (TARGET_NEON)
+#define HAVE_vec_cmpuv16qiv16qi (TARGET_NEON)
+#define HAVE_vec_cmpuv4hiv4hi (TARGET_NEON)
+#define HAVE_vec_cmpuv8hiv8hi (TARGET_NEON)
+#define HAVE_vec_cmpuv2siv2si (TARGET_NEON)
+#define HAVE_vec_cmpuv4siv4si (TARGET_NEON)
+#define HAVE_vcond_mask_v8qiv8qi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v16qiv16qi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v4hiv4hi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v8hiv8hi (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v2siv2si (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v4siv4si (TARGET_NEON \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v2sfv2si (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v4sfv4si (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v8hfv8hi (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vcond_mask_v4hfv4hi (TARGET_NEON \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_neon_vaddv2sf (TARGET_NEON)
+#define HAVE_neon_vaddv4sf (TARGET_NEON)
+#define HAVE_neon_vaddv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vaddv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vsubv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vsubv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmlav8qi (TARGET_NEON)
+#define HAVE_neon_vmlav16qi (TARGET_NEON)
+#define HAVE_neon_vmlav4hi (TARGET_NEON)
+#define HAVE_neon_vmlav8hi (TARGET_NEON)
+#define HAVE_neon_vmlav2si (TARGET_NEON)
+#define HAVE_neon_vmlav4si (TARGET_NEON)
+#define HAVE_neon_vmlav2sf (TARGET_NEON)
+#define HAVE_neon_vmlav4sf (TARGET_NEON)
+#define HAVE_neon_vfmav2sf (TARGET_NEON && TARGET_FMA)
+#define HAVE_neon_vfmav4sf (TARGET_NEON && TARGET_FMA)
+#define HAVE_neon_vfmav8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vfmav4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vfmsv2sf (TARGET_NEON && TARGET_FMA)
+#define HAVE_neon_vfmsv4sf (TARGET_NEON && TARGET_FMA)
+#define HAVE_neon_vfmsv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vfmsv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vfmal_lowv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_highv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lowv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_highv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lowv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_highv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lowv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_highv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_lowv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_highv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_lowv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_highv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_lowv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_highv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_lowv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_highv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_lowv8hfv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_highv8hfv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_lowv8hfv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_highv8hfv2sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_lowv4hfv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmal_lane_highv4hfv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_lowv4hfv4sf (TARGET_FP16FML)
+#define HAVE_neon_vfmsl_lane_highv4hfv4sf (TARGET_FP16FML)
+#define HAVE_neon_vmlsv8qi (TARGET_NEON)
+#define HAVE_neon_vmlsv16qi (TARGET_NEON)
+#define HAVE_neon_vmlsv4hi (TARGET_NEON)
+#define HAVE_neon_vmlsv8hi (TARGET_NEON)
+#define HAVE_neon_vmlsv2si (TARGET_NEON)
+#define HAVE_neon_vmlsv4si (TARGET_NEON)
+#define HAVE_neon_vmlsv2sf (TARGET_NEON)
+#define HAVE_neon_vmlsv4sf (TARGET_NEON)
+#define HAVE_neon_vsubv2sf (TARGET_NEON)
+#define HAVE_neon_vsubv4sf (TARGET_NEON)
+#define HAVE_neon_vceqv8qi (TARGET_NEON)
+#define HAVE_neon_vcgtv8qi (TARGET_NEON)
+#define HAVE_neon_vcgev8qi (TARGET_NEON)
+#define HAVE_neon_vclev8qi (TARGET_NEON)
+#define HAVE_neon_vcltv8qi (TARGET_NEON)
+#define HAVE_neon_vceqv16qi (TARGET_NEON)
+#define HAVE_neon_vcgtv16qi (TARGET_NEON)
+#define HAVE_neon_vcgev16qi (TARGET_NEON)
+#define HAVE_neon_vclev16qi (TARGET_NEON)
+#define HAVE_neon_vcltv16qi (TARGET_NEON)
+#define HAVE_neon_vceqv4hi (TARGET_NEON)
+#define HAVE_neon_vcgtv4hi (TARGET_NEON)
+#define HAVE_neon_vcgev4hi (TARGET_NEON)
+#define HAVE_neon_vclev4hi (TARGET_NEON)
+#define HAVE_neon_vcltv4hi (TARGET_NEON)
+#define HAVE_neon_vceqv8hi (TARGET_NEON)
+#define HAVE_neon_vcgtv8hi (TARGET_NEON)
+#define HAVE_neon_vcgev8hi (TARGET_NEON)
+#define HAVE_neon_vclev8hi (TARGET_NEON)
+#define HAVE_neon_vcltv8hi (TARGET_NEON)
+#define HAVE_neon_vceqv2si (TARGET_NEON)
+#define HAVE_neon_vcgtv2si (TARGET_NEON)
+#define HAVE_neon_vcgev2si (TARGET_NEON)
+#define HAVE_neon_vclev2si (TARGET_NEON)
+#define HAVE_neon_vcltv2si (TARGET_NEON)
+#define HAVE_neon_vceqv4si (TARGET_NEON)
+#define HAVE_neon_vcgtv4si (TARGET_NEON)
+#define HAVE_neon_vcgev4si (TARGET_NEON)
+#define HAVE_neon_vclev4si (TARGET_NEON)
+#define HAVE_neon_vcltv4si (TARGET_NEON)
+#define HAVE_neon_vceqv2sf (TARGET_NEON)
+#define HAVE_neon_vcgtv2sf (TARGET_NEON)
+#define HAVE_neon_vcgev2sf (TARGET_NEON)
+#define HAVE_neon_vclev2sf (TARGET_NEON)
+#define HAVE_neon_vcltv2sf (TARGET_NEON)
+#define HAVE_neon_vceqv4sf (TARGET_NEON)
+#define HAVE_neon_vcgtv4sf (TARGET_NEON)
+#define HAVE_neon_vcgev4sf (TARGET_NEON)
+#define HAVE_neon_vclev4sf (TARGET_NEON)
+#define HAVE_neon_vcltv4sf (TARGET_NEON)
+#define HAVE_neon_vceqv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vclev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcltv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vceqv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgev4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vclev4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcltv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcagtv2sf (TARGET_NEON)
+#define HAVE_neon_vcagev2sf (TARGET_NEON)
+#define HAVE_neon_vcaltv2sf (TARGET_NEON)
+#define HAVE_neon_vcalev2sf (TARGET_NEON)
+#define HAVE_neon_vcagtv4sf (TARGET_NEON)
+#define HAVE_neon_vcagev4sf (TARGET_NEON)
+#define HAVE_neon_vcaltv4sf (TARGET_NEON)
+#define HAVE_neon_vcalev4sf (TARGET_NEON)
+#define HAVE_neon_vcagtv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcagev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcaltv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcalev8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcagtv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcagev4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcaltv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcalev4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vceqzv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtzv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgezv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vclezv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcltzv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vceqzv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgtzv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcgezv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vclezv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vcltzv4hf (TARGET_NEON_FP16INST)
+#define HAVE_ssadv16qi (TARGET_NEON)
+#define HAVE_usadv16qi (TARGET_NEON)
+#define HAVE_neon_vpaddv8qi (TARGET_NEON)
+#define HAVE_neon_vpaddv4hi (TARGET_NEON)
+#define HAVE_neon_vpaddv2si (TARGET_NEON)
+#define HAVE_neon_vpaddv2sf (TARGET_NEON)
+#define HAVE_neon_vabsv8qi (TARGET_NEON)
+#define HAVE_neon_vabsv16qi (TARGET_NEON)
+#define HAVE_neon_vabsv4hi (TARGET_NEON)
+#define HAVE_neon_vabsv8hi (TARGET_NEON)
+#define HAVE_neon_vabsv2si (TARGET_NEON)
+#define HAVE_neon_vabsv4si (TARGET_NEON)
+#define HAVE_neon_vabsv2sf (TARGET_NEON)
+#define HAVE_neon_vabsv4sf (TARGET_NEON)
+#define HAVE_neon_vnegv8qi (TARGET_NEON)
+#define HAVE_neon_vnegv16qi (TARGET_NEON)
+#define HAVE_neon_vnegv4hi (TARGET_NEON)
+#define HAVE_neon_vnegv8hi (TARGET_NEON)
+#define HAVE_neon_vnegv2si (TARGET_NEON)
+#define HAVE_neon_vnegv4si (TARGET_NEON)
+#define HAVE_neon_vnegv2sf (TARGET_NEON)
+#define HAVE_neon_vnegv4sf (TARGET_NEON)
+#define HAVE_cmulv2sf3 (TARGET_COMPLEX && !BYTES_BIG_ENDIAN)
+#define HAVE_cmul_conjv2sf3 (TARGET_COMPLEX && !BYTES_BIG_ENDIAN)
+#define HAVE_cmulv4hf3 (TARGET_COMPLEX && !BYTES_BIG_ENDIAN)
+#define HAVE_cmul_conjv4hf3 (TARGET_COMPLEX && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_sdotv8qi (TARGET_DOTPROD)
+#define HAVE_neon_udotv8qi (TARGET_DOTPROD)
+#define HAVE_neon_sdotv16qi (TARGET_DOTPROD)
+#define HAVE_neon_udotv16qi (TARGET_DOTPROD)
+#define HAVE_usdot_prodv8qi (TARGET_I8MM)
+#define HAVE_usdot_prodv16qi (TARGET_I8MM)
+#define HAVE_copysignv2sf3 (TARGET_NEON)
+#define HAVE_copysignv4sf3 (TARGET_NEON)
+#define HAVE_neon_vcntv8qi (TARGET_NEON)
+#define HAVE_neon_vcntv16qi (TARGET_NEON)
+#define HAVE_neon_vmvnv8qi (TARGET_NEON)
+#define HAVE_neon_vmvnv16qi (TARGET_NEON)
+#define HAVE_neon_vmvnv4hi (TARGET_NEON)
+#define HAVE_neon_vmvnv8hi (TARGET_NEON)
+#define HAVE_neon_vmvnv2si (TARGET_NEON)
+#define HAVE_neon_vmvnv4si (TARGET_NEON)
+#define HAVE_neon_vget_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vget_lanev16qi (TARGET_NEON)
+#define HAVE_neon_vget_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vget_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vget_lanev2si (TARGET_NEON)
+#define HAVE_neon_vget_lanev4si (TARGET_NEON)
+#define HAVE_neon_vget_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vget_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vget_laneuv8qi (TARGET_NEON)
+#define HAVE_neon_vget_laneuv16qi (TARGET_NEON)
+#define HAVE_neon_vget_laneuv4hi (TARGET_NEON)
+#define HAVE_neon_vget_laneuv8hi (TARGET_NEON)
+#define HAVE_neon_vget_laneuv2si (TARGET_NEON)
+#define HAVE_neon_vget_laneuv4si (TARGET_NEON)
+#define HAVE_neon_vget_lanedi (TARGET_NEON)
+#define HAVE_neon_vget_lanev2di (TARGET_NEON)
+#define HAVE_neon_vset_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vset_lanev16qi (TARGET_NEON)
+#define HAVE_neon_vset_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vset_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vset_lanev2si (TARGET_NEON)
+#define HAVE_neon_vset_lanev4si (TARGET_NEON)
+#define HAVE_neon_vset_lanev4hf (TARGET_NEON)
+#define HAVE_neon_vset_lanev8hf (TARGET_NEON)
+#define HAVE_neon_vset_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vset_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vset_lanev2di (TARGET_NEON)
+#define HAVE_neon_vset_lanedi (TARGET_NEON)
+#define HAVE_neon_vcreatev8qi (TARGET_NEON)
+#define HAVE_neon_vcreatev4hi (TARGET_NEON)
+#define HAVE_neon_vcreatev2si (TARGET_NEON)
+#define HAVE_neon_vcreatev2sf (TARGET_NEON)
+#define HAVE_neon_vcreatedi (TARGET_NEON)
+#define HAVE_neon_vdup_ndi (TARGET_NEON)
+#define HAVE_neon_vdup_lanev8qi (TARGET_NEON)
+#define HAVE_neon_vdup_lanev16qi (TARGET_NEON)
+#define HAVE_neon_vdup_lanev4hi (TARGET_NEON)
+#define HAVE_neon_vdup_lanev8hi (TARGET_NEON)
+#define HAVE_neon_vdup_lanev2si (TARGET_NEON)
+#define HAVE_neon_vdup_lanev4si (TARGET_NEON)
+#define HAVE_neon_vdup_lanev2sf (TARGET_NEON)
+#define HAVE_neon_vdup_lanev4sf (TARGET_NEON)
+#define HAVE_neon_vdup_lanev8hf (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanev4hf (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanev4bf (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanev8bf (TARGET_NEON && (TARGET_FP16 || TARGET_BF16_SIMD))
+#define HAVE_neon_vdup_lanedi (TARGET_NEON)
+#define HAVE_neon_vdup_lanev2di (TARGET_NEON)
+#define HAVE_neon_vget_highv16qi (TARGET_NEON)
+#define HAVE_neon_vget_highv8hi (TARGET_NEON)
+#define HAVE_neon_vget_highv8hf (TARGET_NEON)
+#define HAVE_neon_vget_highv8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vget_highv4si (TARGET_NEON)
+#define HAVE_neon_vget_highv4sf (TARGET_NEON)
+#define HAVE_neon_vget_highv2di (TARGET_NEON)
+#define HAVE_neon_vget_lowv16qi (TARGET_NEON)
+#define HAVE_neon_vget_lowv8hi (TARGET_NEON)
+#define HAVE_neon_vget_lowv8hf (TARGET_NEON)
+#define HAVE_neon_vget_lowv8bf (TARGET_NEON)
+#define HAVE_neon_vget_lowv4si (TARGET_NEON)
+#define HAVE_neon_vget_lowv4sf (TARGET_NEON)
+#define HAVE_neon_vget_lowv2di (TARGET_NEON)
+#define HAVE_neon_vmul_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmul_nv2si (TARGET_NEON)
+#define HAVE_neon_vmul_nv2sf (TARGET_NEON)
+#define HAVE_neon_vmul_nv8hi (TARGET_NEON)
+#define HAVE_neon_vmul_nv4si (TARGET_NEON)
+#define HAVE_neon_vmul_nv4sf (TARGET_NEON)
+#define HAVE_neon_vmul_nv8hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmul_nv4hf (TARGET_NEON_FP16INST)
+#define HAVE_neon_vmulls_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmulls_nv2si (TARGET_NEON)
+#define HAVE_neon_vmullu_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmullu_nv2si (TARGET_NEON)
+#define HAVE_neon_vqdmull_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmull_nv2si (TARGET_NEON)
+#define HAVE_neon_vqdmulh_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmulh_nv2si (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_nv2si (TARGET_NEON)
+#define HAVE_neon_vqdmulh_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqdmulh_nv4si (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_nv8hi (TARGET_NEON)
+#define HAVE_neon_vqrdmulh_nv4si (TARGET_NEON)
+#define HAVE_neon_vmla_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmla_nv2si (TARGET_NEON)
+#define HAVE_neon_vmla_nv2sf (TARGET_NEON)
+#define HAVE_neon_vmla_nv8hi (TARGET_NEON)
+#define HAVE_neon_vmla_nv4si (TARGET_NEON)
+#define HAVE_neon_vmla_nv4sf (TARGET_NEON)
+#define HAVE_neon_vmlals_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmlals_nv2si (TARGET_NEON)
+#define HAVE_neon_vmlalu_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmlalu_nv2si (TARGET_NEON)
+#define HAVE_neon_vqdmlal_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmlal_nv2si (TARGET_NEON)
+#define HAVE_neon_vmls_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmls_nv2si (TARGET_NEON)
+#define HAVE_neon_vmls_nv2sf (TARGET_NEON)
+#define HAVE_neon_vmls_nv8hi (TARGET_NEON)
+#define HAVE_neon_vmls_nv4si (TARGET_NEON)
+#define HAVE_neon_vmls_nv4sf (TARGET_NEON)
+#define HAVE_neon_vmlsls_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmlsls_nv2si (TARGET_NEON)
+#define HAVE_neon_vmlslu_nv4hi (TARGET_NEON)
+#define HAVE_neon_vmlslu_nv2si (TARGET_NEON)
+#define HAVE_neon_vqdmlsl_nv4hi (TARGET_NEON)
+#define HAVE_neon_vqdmlsl_nv2si (TARGET_NEON)
+#define HAVE_neon_vbslv8qi (TARGET_NEON)
+#define HAVE_neon_vbslv16qi (TARGET_NEON)
+#define HAVE_neon_vbslv4hi (TARGET_NEON)
+#define HAVE_neon_vbslv8hi (TARGET_NEON)
+#define HAVE_neon_vbslv2si (TARGET_NEON)
+#define HAVE_neon_vbslv4si (TARGET_NEON)
+#define HAVE_neon_vbslv4hf (TARGET_NEON)
+#define HAVE_neon_vbslv8hf (TARGET_NEON)
+#define HAVE_neon_vbslv4bf (TARGET_NEON)
+#define HAVE_neon_vbslv8bf (TARGET_NEON)
+#define HAVE_neon_vbslv2sf (TARGET_NEON)
+#define HAVE_neon_vbslv4sf (TARGET_NEON)
+#define HAVE_neon_vbsldi (TARGET_NEON)
+#define HAVE_neon_vbslv2di (TARGET_NEON)
+#define HAVE_neon_vtrnv8qi_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv16qi_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv4hi_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv8hi_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv2si_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv4si_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv2sf_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv4sf_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv8hf_internal (TARGET_NEON)
+#define HAVE_neon_vtrnv4hf_internal (TARGET_NEON)
+#define HAVE_neon_vzipv8qi_internal (TARGET_NEON)
+#define HAVE_neon_vzipv16qi_internal (TARGET_NEON)
+#define HAVE_neon_vzipv4hi_internal (TARGET_NEON)
+#define HAVE_neon_vzipv8hi_internal (TARGET_NEON)
+#define HAVE_neon_vzipv2si_internal (TARGET_NEON)
+#define HAVE_neon_vzipv4si_internal (TARGET_NEON)
+#define HAVE_neon_vzipv2sf_internal (TARGET_NEON)
+#define HAVE_neon_vzipv4sf_internal (TARGET_NEON)
+#define HAVE_neon_vzipv8hf_internal (TARGET_NEON)
+#define HAVE_neon_vzipv4hf_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv8qi_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv16qi_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv4hi_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv8hi_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv2si_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv4si_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv2sf_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv4sf_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv8hf_internal (TARGET_NEON)
+#define HAVE_neon_vuzpv4hf_internal (TARGET_NEON)
+#define HAVE_vec_load_lanesv8qiv8qi (TARGET_NEON)
+#define HAVE_vec_load_lanesv16qiv16qi (TARGET_NEON)
+#define HAVE_vec_load_lanesv4hiv4hi (TARGET_NEON)
+#define HAVE_vec_load_lanesv8hiv8hi (TARGET_NEON)
+#define HAVE_vec_load_lanesv2siv2si (TARGET_NEON)
+#define HAVE_vec_load_lanesv4siv4si (TARGET_NEON)
+#define HAVE_vec_load_lanesv4hfv4hf (TARGET_NEON)
+#define HAVE_vec_load_lanesv8hfv8hf (TARGET_NEON)
+#define HAVE_vec_load_lanesv4bfv4bf (TARGET_NEON)
+#define HAVE_vec_load_lanesv8bfv8bf (TARGET_NEON)
+#define HAVE_vec_load_lanesv2sfv2sf (TARGET_NEON)
+#define HAVE_vec_load_lanesv4sfv4sf (TARGET_NEON)
+#define HAVE_vec_load_lanesdidi (TARGET_NEON)
+#define HAVE_vec_load_lanesv2div2di (TARGET_NEON)
+#define HAVE_neon_vld1_dupdi (TARGET_NEON)
+#define HAVE_vec_store_lanesv8qiv8qi (TARGET_NEON)
+#define HAVE_vec_store_lanesv16qiv16qi (TARGET_NEON)
+#define HAVE_vec_store_lanesv4hiv4hi (TARGET_NEON)
+#define HAVE_vec_store_lanesv8hiv8hi (TARGET_NEON)
+#define HAVE_vec_store_lanesv2siv2si (TARGET_NEON)
+#define HAVE_vec_store_lanesv4siv4si (TARGET_NEON)
+#define HAVE_vec_store_lanesv4hfv4hf (TARGET_NEON)
+#define HAVE_vec_store_lanesv8hfv8hf (TARGET_NEON)
+#define HAVE_vec_store_lanesv4bfv4bf (TARGET_NEON)
+#define HAVE_vec_store_lanesv8bfv8bf (TARGET_NEON)
+#define HAVE_vec_store_lanesv2sfv2sf (TARGET_NEON)
+#define HAVE_vec_store_lanesv4sfv4sf (TARGET_NEON)
+#define HAVE_vec_store_lanesdidi (TARGET_NEON)
+#define HAVE_vec_store_lanesv2div2di (TARGET_NEON)
+#define HAVE_vec_load_lanestiv8qi (TARGET_NEON)
+#define HAVE_vec_load_lanestiv4hi (TARGET_NEON)
+#define HAVE_vec_load_lanestiv4hf (TARGET_NEON)
+#define HAVE_vec_load_lanestiv4bf (TARGET_NEON)
+#define HAVE_vec_load_lanestiv2si (TARGET_NEON)
+#define HAVE_vec_load_lanestiv2sf (TARGET_NEON)
+#define HAVE_vec_load_lanestidi (TARGET_NEON)
+#define HAVE_vec_store_lanestiv8qi (TARGET_NEON)
+#define HAVE_vec_store_lanestiv4hi (TARGET_NEON)
+#define HAVE_vec_store_lanestiv4hf (TARGET_NEON)
+#define HAVE_vec_store_lanestiv4bf (TARGET_NEON)
+#define HAVE_vec_store_lanestiv2si (TARGET_NEON)
+#define HAVE_vec_store_lanestiv2sf (TARGET_NEON)
+#define HAVE_vec_store_lanestidi (TARGET_NEON)
+#define HAVE_vec_load_laneseiv8qi (TARGET_NEON)
+#define HAVE_vec_load_laneseiv4hi (TARGET_NEON)
+#define HAVE_vec_load_laneseiv4hf (TARGET_NEON)
+#define HAVE_vec_load_laneseiv4bf (TARGET_NEON)
+#define HAVE_vec_load_laneseiv2si (TARGET_NEON)
+#define HAVE_vec_load_laneseiv2sf (TARGET_NEON)
+#define HAVE_vec_load_laneseidi (TARGET_NEON)
+#define HAVE_vec_load_lanesciv16qi (TARGET_NEON)
+#define HAVE_vec_load_lanesciv8hi (TARGET_NEON)
+#define HAVE_vec_load_lanesciv8hf (TARGET_NEON)
+#define HAVE_vec_load_lanesciv4si (TARGET_NEON)
+#define HAVE_vec_load_lanesciv4sf (TARGET_NEON)
+#define HAVE_neon_vld3v16qi (TARGET_NEON)
+#define HAVE_neon_vld3v8hi (TARGET_NEON)
+#define HAVE_neon_vld3v8hf (TARGET_NEON)
+#define HAVE_neon_vld3v8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld3v4si (TARGET_NEON)
+#define HAVE_neon_vld3v4sf (TARGET_NEON)
+#define HAVE_vec_store_laneseiv8qi (TARGET_NEON)
+#define HAVE_vec_store_laneseiv4hi (TARGET_NEON)
+#define HAVE_vec_store_laneseiv4hf (TARGET_NEON)
+#define HAVE_vec_store_laneseiv4bf (TARGET_NEON)
+#define HAVE_vec_store_laneseiv2si (TARGET_NEON)
+#define HAVE_vec_store_laneseiv2sf (TARGET_NEON)
+#define HAVE_vec_store_laneseidi (TARGET_NEON)
+#define HAVE_vec_store_lanesciv16qi (TARGET_NEON)
+#define HAVE_vec_store_lanesciv8hi (TARGET_NEON)
+#define HAVE_vec_store_lanesciv8hf (TARGET_NEON)
+#define HAVE_vec_store_lanesciv4si (TARGET_NEON)
+#define HAVE_vec_store_lanesciv4sf (TARGET_NEON)
+#define HAVE_neon_vst3v16qi (TARGET_NEON)
+#define HAVE_neon_vst3v8hi (TARGET_NEON)
+#define HAVE_neon_vst3v8hf (TARGET_NEON)
+#define HAVE_neon_vst3v8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst3v4si (TARGET_NEON)
+#define HAVE_neon_vst3v4sf (TARGET_NEON)
+#define HAVE_vec_load_lanesoiv8qi (TARGET_NEON)
+#define HAVE_vec_load_lanesoiv4hi (TARGET_NEON)
+#define HAVE_vec_load_lanesoiv4hf (TARGET_NEON)
+#define HAVE_vec_load_lanesoiv4bf (TARGET_NEON)
+#define HAVE_vec_load_lanesoiv2si (TARGET_NEON)
+#define HAVE_vec_load_lanesoiv2sf (TARGET_NEON)
+#define HAVE_vec_load_lanesoidi (TARGET_NEON)
+#define HAVE_neon_vld4v16qi (TARGET_NEON)
+#define HAVE_neon_vld4v8hi (TARGET_NEON)
+#define HAVE_neon_vld4v8hf (TARGET_NEON)
+#define HAVE_neon_vld4v8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vld4v4si (TARGET_NEON)
+#define HAVE_neon_vld4v4sf (TARGET_NEON)
+#define HAVE_vec_store_lanesoiv8qi (TARGET_NEON)
+#define HAVE_vec_store_lanesoiv4hi (TARGET_NEON)
+#define HAVE_vec_store_lanesoiv4hf (TARGET_NEON)
+#define HAVE_vec_store_lanesoiv4bf (TARGET_NEON)
+#define HAVE_vec_store_lanesoiv2si (TARGET_NEON)
+#define HAVE_vec_store_lanesoiv2sf (TARGET_NEON)
+#define HAVE_vec_store_lanesoidi (TARGET_NEON)
+#define HAVE_neon_vst4v16qi (TARGET_NEON)
+#define HAVE_neon_vst4v8hi (TARGET_NEON)
+#define HAVE_neon_vst4v8hf (TARGET_NEON)
+#define HAVE_neon_vst4v8bf ((TARGET_NEON) && (TARGET_BF16_SIMD))
+#define HAVE_neon_vst4v4si (TARGET_NEON)
+#define HAVE_neon_vst4v4sf (TARGET_NEON)
+#define HAVE_vec_unpacks_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacku_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacks_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacku_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacks_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacku_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacks_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacku_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacks_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacku_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacks_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacku_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_smult_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_umult_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_smult_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_umult_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_smult_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_umult_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_smult_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_umult_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_smult_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_umult_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_smult_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_umult_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_sshiftl_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_ushiftl_lo_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_sshiftl_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_ushiftl_lo_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_sshiftl_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_ushiftl_lo_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_sshiftl_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_ushiftl_hi_v16qi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_sshiftl_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_ushiftl_hi_v8hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_sshiftl_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_widen_ushiftl_hi_v4si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_unpacks_lo_v8qi (TARGET_NEON)
+#define HAVE_vec_unpacku_lo_v8qi (TARGET_NEON)
+#define HAVE_vec_unpacks_lo_v4hi (TARGET_NEON)
+#define HAVE_vec_unpacku_lo_v4hi (TARGET_NEON)
+#define HAVE_vec_unpacks_lo_v2si (TARGET_NEON)
+#define HAVE_vec_unpacku_lo_v2si (TARGET_NEON)
+#define HAVE_vec_unpacks_hi_v8qi (TARGET_NEON)
+#define HAVE_vec_unpacku_hi_v8qi (TARGET_NEON)
+#define HAVE_vec_unpacks_hi_v4hi (TARGET_NEON)
+#define HAVE_vec_unpacku_hi_v4hi (TARGET_NEON)
+#define HAVE_vec_unpacks_hi_v2si (TARGET_NEON)
+#define HAVE_vec_unpacku_hi_v2si (TARGET_NEON)
+#define HAVE_vec_widen_smult_hi_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_umult_hi_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_smult_hi_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_umult_hi_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_smult_hi_v2si (TARGET_NEON)
+#define HAVE_vec_widen_umult_hi_v2si (TARGET_NEON)
+#define HAVE_vec_widen_smult_lo_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_umult_lo_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_smult_lo_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_umult_lo_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_smult_lo_v2si (TARGET_NEON)
+#define HAVE_vec_widen_umult_lo_v2si (TARGET_NEON)
+#define HAVE_vec_widen_sshiftl_hi_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_ushiftl_hi_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_sshiftl_hi_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_ushiftl_hi_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_sshiftl_hi_v2si (TARGET_NEON)
+#define HAVE_vec_widen_ushiftl_hi_v2si (TARGET_NEON)
+#define HAVE_vec_widen_sshiftl_lo_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_ushiftl_lo_v8qi (TARGET_NEON)
+#define HAVE_vec_widen_sshiftl_lo_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_ushiftl_lo_v4hi (TARGET_NEON)
+#define HAVE_vec_widen_sshiftl_lo_v2si (TARGET_NEON)
+#define HAVE_vec_widen_ushiftl_lo_v2si (TARGET_NEON)
+#define HAVE_vec_pack_trunc_v4hi (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_pack_trunc_v2si (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_vec_pack_trunc_di (TARGET_NEON && !BYTES_BIG_ENDIAN)
+#define HAVE_neon_vbfcvtbf (TARGET_BF16_FP)
+#define HAVE_neon_vfmab_laneqv8bf (TARGET_BF16_SIMD)
+#define HAVE_neon_vfmat_laneqv8bf (TARGET_BF16_SIMD)
+#define HAVE_crypto_aesd (TARGET_CRYPTO)
+#define HAVE_crypto_aese (TARGET_CRYPTO)
+#define HAVE_crypto_sha1h (TARGET_CRYPTO)
+#define HAVE_crypto_sha1c (TARGET_CRYPTO)
+#define HAVE_crypto_sha1m (TARGET_CRYPTO)
+#define HAVE_crypto_sha1p (TARGET_CRYPTO)
+#define HAVE_memory_barrier (TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_loaddi ((TARGET_HAVE_LDREXD || TARGET_HAVE_LPAE || TARGET_HAVE_LDACQEXD) \
+ && ARM_DOUBLEWORD_ALIGN)
+#define HAVE_atomic_compare_and_swapqi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_compare_and_swaphi (TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_compare_and_swapsi (TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_atomic_compare_and_swapdi (TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN \
+ && TARGET_HAVE_MEMORY_BARRIER)
+#define HAVE_addv4qq3 (TARGET_INT_SIMD)
+#define HAVE_addv2hq3 (TARGET_INT_SIMD)
+#define HAVE_addv2ha3 (TARGET_INT_SIMD)
+#define HAVE_ssaddv4qq3 (TARGET_INT_SIMD)
+#define HAVE_ssaddv2hq3 (TARGET_INT_SIMD)
+#define HAVE_ssaddqq3 (TARGET_INT_SIMD)
+#define HAVE_ssaddhq3 (TARGET_INT_SIMD)
+#define HAVE_ssaddv2ha3 (TARGET_INT_SIMD)
+#define HAVE_ssaddha3 (TARGET_INT_SIMD)
+#define HAVE_ssaddsq3 (TARGET_INT_SIMD)
+#define HAVE_ssaddsa3 (TARGET_INT_SIMD)
+#define HAVE_subv4qq3 (TARGET_INT_SIMD)
+#define HAVE_subv2hq3 (TARGET_INT_SIMD)
+#define HAVE_subv2ha3 (TARGET_INT_SIMD)
+#define HAVE_sssubv4qq3 (TARGET_INT_SIMD)
+#define HAVE_sssubv2hq3 (TARGET_INT_SIMD)
+#define HAVE_sssubqq3 (TARGET_INT_SIMD)
+#define HAVE_sssubhq3 (TARGET_INT_SIMD)
+#define HAVE_sssubv2ha3 (TARGET_INT_SIMD)
+#define HAVE_sssubha3 (TARGET_INT_SIMD)
+#define HAVE_sssubsq3 (TARGET_INT_SIMD)
+#define HAVE_sssubsa3 (TARGET_INT_SIMD)
+#define HAVE_mulqq3 (TARGET_DSP_MULTIPLY && arm_arch_thumb2)
+#define HAVE_mulhq3 (TARGET_DSP_MULTIPLY && arm_arch_thumb2)
+#define HAVE_mulsq3 (TARGET_32BIT)
+#define HAVE_mulsa3 (TARGET_32BIT)
+#define HAVE_mulusa3 (TARGET_32BIT)
+#define HAVE_ssmulsa3 (TARGET_32BIT && arm_arch6)
+#define HAVE_usmulusa3 (TARGET_32BIT && arm_arch6)
+#define HAVE_mulha3 (TARGET_DSP_MULTIPLY && arm_arch_thumb2)
+#define HAVE_muluha3 (TARGET_DSP_MULTIPLY)
+#define HAVE_ssmulha3 (TARGET_32BIT && TARGET_DSP_MULTIPLY && arm_arch6)
+#define HAVE_usmuluha3 (TARGET_INT_SIMD)
+#define HAVE_mve_vmvnq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vmvnq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vclzq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vandq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vbicq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_cadd90v16qi3 (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd270v16qi3 (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd90v8hi3 (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd270v8hi3 (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd90v4si3 (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN)
+#define HAVE_cadd270v4si3 (TARGET_HAVE_MVE && !BYTES_BIG_ENDIAN)
+#define HAVE_mve_veorq_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_veorq_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vornq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vorrq_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_vec_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_vec_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_vec_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_vec_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_vec_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_vec_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_carry_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_carry_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_carry_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_carry_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_carry_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_carry_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrbq_scatter_offset_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vld1q_fv8hf (TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vld1q_fv4sf (TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vld1q_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vld1q_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vld1q_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vld1q_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vld1q_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vld1q_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vst1q_fv8hf (TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vst1q_fv4sf (TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vst1q_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vst1q_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vst1q_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vst1q_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vst1q_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vst1q_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_p_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_p_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_offset_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_p_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_p_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrdq_scatter_shifted_offset_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrhq_scatter_offset_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_scatter_offset_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrhq_scatter_shifted_offset_p_fv8hf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_offset_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_offset_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_offset_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_offset_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_p_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_p_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_p_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vstrwq_scatter_shifted_offset_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vidupq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vddupq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_wb_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_wb_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_wb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_wb_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_wb_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vdwdupq_m_wb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_wb_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_wb_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_wb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_n_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_n_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_n_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_wb_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_wb_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_viwdupq_m_wb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_nowb_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_nowb_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_nowb_z_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_nowb_z_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrwq_gather_base_wb_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_base_nowb_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_base_wb_z_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrwq_gather_base_nowb_z_fv4sf (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT)
+#define HAVE_mve_vldrdq_gather_base_wb_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_wb_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_nowb_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_nowb_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_wb_z_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_wb_z_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_nowb_z_sv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vldrdq_gather_base_nowb_z_uv2di (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_vec_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_vec_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_vec_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_vec_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_vec_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_vec_uv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_carry_sv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_carry_uv16qi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_carry_sv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_carry_uv8hi (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_carry_sv4si (TARGET_HAVE_MVE)
+#define HAVE_mve_vshlcq_m_carry_uv4si (TARGET_HAVE_MVE)
+#define HAVE_movv16bi (TARGET_HAVE_MVE)
+#define HAVE_movv8bi (TARGET_HAVE_MVE)
+#define HAVE_movv4bi (TARGET_HAVE_MVE)
+#define HAVE_movv2qi (TARGET_HAVE_MVE)
+#define HAVE_vec_cmpv16qiv16bi (TARGET_HAVE_MVE \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv8hiv8bi (TARGET_HAVE_MVE \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv4siv4bi (TARGET_HAVE_MVE \
+ && (!false || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv8hfv8bi (TARGET_HAVE_MVE \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpv4sfv4bi (TARGET_HAVE_MVE \
+ && (!true || flag_unsafe_math_optimizations))
+#define HAVE_vec_cmpuv16qiv16bi (TARGET_HAVE_MVE)
+#define HAVE_vec_cmpuv8hiv8bi (TARGET_HAVE_MVE)
+#define HAVE_vec_cmpuv4siv4bi (TARGET_HAVE_MVE)
+#define HAVE_vcond_mask_v16qiv16bi (TARGET_HAVE_MVE)
+#define HAVE_vcond_mask_v8hiv8bi (TARGET_HAVE_MVE)
+#define HAVE_vcond_mask_v4siv4bi (TARGET_HAVE_MVE)
+#define HAVE_vcond_mask_v8hfv8bi (TARGET_HAVE_MVE)
+#define HAVE_vcond_mask_v4sfv4bi (TARGET_HAVE_MVE)
+extern rtx gen_addsi3_compareV_reg (rtx, rtx, rtx);
+extern rtx gen_subvsi3_intmin (rtx, rtx);
+extern rtx gen_addsi3_compareV_imm (rtx, rtx, rtx);
+extern rtx gen_addsi3_compareV_imm_nosum (rtx, rtx);
+extern rtx gen_addsi3_compare0 (rtx, rtx, rtx);
+extern rtx gen_cmpsi2_addneg (rtx, rtx, rtx, rtx);
+extern rtx gen_addsi3_compare_op1 (rtx, rtx, rtx);
+extern rtx gen_addsi3_carryin (rtx, rtx, rtx, rtx);
+extern rtx gen_add0si3_carryin (rtx, rtx, rtx);
+extern rtx gen_subsi3_compare1 (rtx, rtx, rtx);
+extern rtx gen_subvsi3 (rtx, rtx, rtx);
+extern rtx gen_subvsi3_imm1 (rtx, rtx, rtx);
+extern rtx gen_subsi3_carryin (rtx, rtx, rtx, rtx);
+extern rtx gen_rsbsi_carryin_reg (rtx, rtx, rtx);
+extern rtx gen_add_not_shift_cin (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_cmpsi3_carryin_CC_NVout (rtx, rtx, rtx, rtx);
+extern rtx gen_cmpsi3_carryin_CC_Bout (rtx, rtx, rtx, rtx);
+extern rtx gen_cmpsi3_imm_carryin_CC_NVout (rtx, rtx, rtx, rtx);
+extern rtx gen_cmpsi3_imm_carryin_CC_Bout (rtx, rtx, rtx, rtx);
+extern rtx gen_cmpsi3_0_carryin_CC_NVout (rtx, rtx, rtx);
+extern rtx gen_cmpsi3_0_carryin_CC_Bout (rtx, rtx, rtx);
+extern rtx gen_subsi3_compare0 (rtx, rtx, rtx);
+extern rtx gen_subsi3_compare (rtx, rtx, rtx);
+extern rtx gen_rsb_imm_compare (rtx, rtx, rtx, rtx);
+extern rtx gen_rsb_imm_compare_scratch (rtx, rtx, rtx);
+extern rtx gen_rscsi3_CC_NVout_scratch (rtx, rtx, rtx, rtx);
+extern rtx gen_rscsi3_CC_Bout_scratch (rtx, rtx, rtx, rtx);
+extern rtx gen_usubvsi3_borrow (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_usubvsi3_borrow_imm (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_subvsi3_borrow (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_subvsi3_borrow_imm (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_smull (rtx, rtx, rtx, rtx);
+extern rtx gen_umull (rtx, rtx, rtx, rtx);
+extern rtx gen_smlal (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_umlal (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mulhisi3 (rtx, rtx, rtx);
+extern rtx gen_arm_smlabb_setq (rtx, rtx, rtx, rtx);
+extern rtx gen_maddhisi4tb (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlatb_setq (rtx, rtx, rtx, rtx);
+extern rtx gen_maddhisi4tt (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlatt_setq (rtx, rtx, rtx, rtx);
+extern rtx gen_maddhidi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlawb_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlawb_setq_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlawt_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlawt_setq_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_insv_zero (rtx, rtx, rtx);
+extern rtx gen_insv_t2 (rtx, rtx, rtx, rtx);
+extern rtx gen_andsi_notsi_si (rtx, rtx, rtx);
+extern rtx gen_andsi_not_shiftsi_si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_andsi_not_shiftsi_si_scc_no_reuse (rtx, rtx, rtx, rtx);
+extern rtx gen_andsi_not_shiftsi_si_scc (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_qadd_insn (rtx, rtx, rtx);
+extern rtx gen_arm_qadd_setq_insn (rtx, rtx, rtx);
+extern rtx gen_arm_qsub_insn (rtx, rtx, rtx);
+extern rtx gen_arm_qsub_setq_insn (rtx, rtx, rtx);
+extern rtx gen_arm_get_apsr (rtx);
+extern rtx gen_arm_set_apsr (rtx);
+extern rtx gen_satsi_smin (rtx, rtx, rtx, rtx);
+extern rtx gen_satsi_smin_setq (rtx, rtx, rtx, rtx);
+extern rtx gen_satsi_smax (rtx, rtx, rtx, rtx);
+extern rtx gen_satsi_smax_setq (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx1si (rtx, rtx, rtx);
+extern rtx gen_arm_cx1di (rtx, rtx, rtx);
+extern rtx gen_arm_cx1asi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx1adi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx2si (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx2di (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx2asi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx2adi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx3si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx3di (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx3asi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cx3adi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_unaligned_loaddi (rtx, rtx);
+extern rtx gen_unaligned_loadsi (rtx, rtx);
+extern rtx gen_unaligned_loadhis (rtx, rtx);
+extern rtx gen_unaligned_loadhiu (rtx, rtx);
+extern rtx gen_unaligned_storedi (rtx, rtx);
+extern rtx gen_unaligned_storesi (rtx, rtx);
+extern rtx gen_unaligned_storehi (rtx, rtx);
+extern rtx gen_extzv_t2 (rtx, rtx, rtx, rtx);
+extern rtx gen_divsi3 (rtx, rtx, rtx);
+extern rtx gen_udivsi3 (rtx, rtx, rtx);
+extern rtx gen_negsi2_0compare (rtx, rtx);
+extern rtx gen_negsi2_carryin (rtx, rtx, rtx);
+extern rtx gen_arm_sxtb16 (rtx, rtx);
+extern rtx gen_arm_uxtb16 (rtx, rtx);
+extern rtx gen_arm_qadd8 (rtx, rtx, rtx);
+extern rtx gen_arm_qsub8 (rtx, rtx, rtx);
+extern rtx gen_arm_shadd8 (rtx, rtx, rtx);
+extern rtx gen_arm_shsub8 (rtx, rtx, rtx);
+extern rtx gen_arm_uhadd8 (rtx, rtx, rtx);
+extern rtx gen_arm_uhsub8 (rtx, rtx, rtx);
+extern rtx gen_arm_uqadd8 (rtx, rtx, rtx);
+extern rtx gen_arm_uqsub8 (rtx, rtx, rtx);
+extern rtx gen_arm_qadd16 (rtx, rtx, rtx);
+extern rtx gen_arm_qasx (rtx, rtx, rtx);
+extern rtx gen_arm_qsax (rtx, rtx, rtx);
+extern rtx gen_arm_qsub16 (rtx, rtx, rtx);
+extern rtx gen_arm_shadd16 (rtx, rtx, rtx);
+extern rtx gen_arm_shasx (rtx, rtx, rtx);
+extern rtx gen_arm_shsax (rtx, rtx, rtx);
+extern rtx gen_arm_shsub16 (rtx, rtx, rtx);
+extern rtx gen_arm_uhadd16 (rtx, rtx, rtx);
+extern rtx gen_arm_uhasx (rtx, rtx, rtx);
+extern rtx gen_arm_uhsax (rtx, rtx, rtx);
+extern rtx gen_arm_uhsub16 (rtx, rtx, rtx);
+extern rtx gen_arm_uqadd16 (rtx, rtx, rtx);
+extern rtx gen_arm_uqasx (rtx, rtx, rtx);
+extern rtx gen_arm_uqsax (rtx, rtx, rtx);
+extern rtx gen_arm_uqsub16 (rtx, rtx, rtx);
+extern rtx gen_arm_smusd (rtx, rtx, rtx);
+extern rtx gen_arm_smusdx (rtx, rtx, rtx);
+extern rtx gen_arm_sxtab16 (rtx, rtx, rtx);
+extern rtx gen_arm_uxtab16 (rtx, rtx, rtx);
+extern rtx gen_arm_usad8 (rtx, rtx, rtx);
+extern rtx gen_arm_usada8 (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlald (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlaldx (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsld (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsldx (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_sadd8 (rtx, rtx, rtx);
+extern rtx gen_arm_ssub8 (rtx, rtx, rtx);
+extern rtx gen_arm_uadd8 (rtx, rtx, rtx);
+extern rtx gen_arm_usub8 (rtx, rtx, rtx);
+extern rtx gen_arm_sadd16 (rtx, rtx, rtx);
+extern rtx gen_arm_sasx (rtx, rtx, rtx);
+extern rtx gen_arm_ssax (rtx, rtx, rtx);
+extern rtx gen_arm_ssub16 (rtx, rtx, rtx);
+extern rtx gen_arm_uadd16 (rtx, rtx, rtx);
+extern rtx gen_arm_uasx (rtx, rtx, rtx);
+extern rtx gen_arm_usax (rtx, rtx, rtx);
+extern rtx gen_arm_usub16 (rtx, rtx, rtx);
+extern rtx gen_arm_smlad_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlad_setq_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smladx_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smladx_setq_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsd_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsd_setq_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsdx_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsdx_setq_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smuad_insn (rtx, rtx, rtx);
+extern rtx gen_arm_smuad_setq_insn (rtx, rtx, rtx);
+extern rtx gen_arm_smuadx_insn (rtx, rtx, rtx);
+extern rtx gen_arm_smuadx_setq_insn (rtx, rtx, rtx);
+extern rtx gen_arm_ssat16_insn (rtx, rtx, rtx);
+extern rtx gen_arm_ssat16_setq_insn (rtx, rtx, rtx);
+extern rtx gen_arm_usat16_insn (rtx, rtx, rtx);
+extern rtx gen_arm_usat16_setq_insn (rtx, rtx, rtx);
+extern rtx gen_arm_sel (rtx, rtx, rtx);
+extern rtx gen_pic_load_addr_unified (rtx, rtx, rtx);
+extern rtx gen_pic_load_addr_32bit (rtx, rtx);
+extern rtx gen_pic_load_addr_thumb1 (rtx, rtx);
+extern rtx gen_pic_add_dot_plus_four (rtx, rtx, rtx);
+extern rtx gen_pic_add_dot_plus_eight (rtx, rtx, rtx);
+extern rtx gen_tls_load_dot_plus_eight (rtx, rtx, rtx);
+static inline rtx gen_pic_offset_arm (rtx, rtx, rtx);
+static inline rtx
+gen_pic_offset_arm(rtx ARG_UNUSED (a), rtx ARG_UNUSED (b), rtx ARG_UNUSED (c))
+{
+ return 0;
+}
+extern rtx gen_arm_cond_branch (rtx, rtx, rtx);
+extern rtx gen_restore_pic_register_after_call (rtx, rtx);
+extern rtx gen_blockage (void);
+extern rtx gen_probe_stack (rtx);
+extern rtx gen_probe_stack_range (rtx, rtx, rtx);
+extern rtx gen_arm_stack_protect_test_insn (rtx, rtx, rtx);
+extern rtx gen_stack_protect_set_tls (rtx, rtx);
+extern rtx gen_stack_protect_test_tls (rtx, rtx);
+extern rtx gen_nop (void);
+extern rtx gen_trap (void);
+extern rtx gen_movcond_addsi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_movcond (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_stack_tie (rtx, rtx);
+extern rtx gen_align_4 (void);
+extern rtx gen_align_8 (void);
+extern rtx gen_consttable_end (void);
+extern rtx gen_consttable_1 (rtx);
+extern rtx gen_consttable_2 (rtx);
+extern rtx gen_consttable_4 (rtx);
+extern rtx gen_consttable_8 (rtx);
+extern rtx gen_consttable_16 (rtx);
+extern rtx gen_clzsi2 (rtx, rtx);
+extern rtx gen_rbitsi2 (rtx, rtx);
+extern rtx gen_ctzsi2 (rtx, rtx);
+extern rtx gen_prefetch (rtx, rtx, rtx);
+extern rtx gen_force_register_use (rtx);
+extern rtx gen_arm_eh_return (rtx);
+extern rtx gen_load_tp_hard (rtx);
+extern rtx gen_reload_tp_hard (rtx);
+extern rtx gen_load_tp_soft_fdpic (void);
+extern rtx gen_load_tp_soft (void);
+extern rtx gen_tlscall (rtx, rtx);
+extern rtx gen_arm_rev16si2 (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_rev16si2_alt (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_crc32b (rtx, rtx, rtx);
+extern rtx gen_arm_crc32h (rtx, rtx, rtx);
+extern rtx gen_arm_crc32w (rtx, rtx, rtx);
+extern rtx gen_arm_crc32cb (rtx, rtx, rtx);
+extern rtx gen_arm_crc32ch (rtx, rtx, rtx);
+extern rtx gen_arm_crc32cw (rtx, rtx, rtx);
+extern rtx gen_arm_cdp (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_cdp2 (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mcr (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mcr2 (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mrc (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mrc2 (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mcrr (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mcrr2 (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mrrc (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_mrrc2 (rtx, rtx, rtx, rtx);
+extern rtx gen_pac_nop (void);
+extern rtx gen_pacbti_nop (void);
+extern rtx gen_aut_nop (void);
+extern rtx gen_bti_nop (void);
+extern rtx gen_mve_vshlq_sv8qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_uv8qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_sv4hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_uv4hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_sv2si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_uv2si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_uv4si (rtx, rtx, rtx);
+extern rtx gen_tbcstv8qi (rtx, rtx);
+extern rtx gen_tbcstv4hi (rtx, rtx);
+extern rtx gen_tbcstv2si (rtx, rtx);
+extern rtx gen_iwmmxt_iordi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_xordi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_anddi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_nanddi3 (rtx, rtx, rtx);
+extern rtx gen_movv2si_internal (rtx, rtx);
+extern rtx gen_movv4hi_internal (rtx, rtx);
+extern rtx gen_movv8qi_internal (rtx, rtx);
+extern rtx gen_ssaddv8qi3 (rtx, rtx, rtx);
+extern rtx gen_ssaddv4hi3 (rtx, rtx, rtx);
+extern rtx gen_ssaddv2si3 (rtx, rtx, rtx);
+extern rtx gen_usaddv8qi3 (rtx, rtx, rtx);
+extern rtx gen_usaddv4hi3 (rtx, rtx, rtx);
+extern rtx gen_usaddv2si3 (rtx, rtx, rtx);
+extern rtx gen_sssubv8qi3 (rtx, rtx, rtx);
+extern rtx gen_sssubv4hi3 (rtx, rtx, rtx);
+extern rtx gen_sssubv2si3 (rtx, rtx, rtx);
+extern rtx gen_ussubv8qi3 (rtx, rtx, rtx);
+extern rtx gen_ussubv4hi3 (rtx, rtx, rtx);
+extern rtx gen_ussubv2si3 (rtx, rtx, rtx);
+extern rtx gen_smulv4hi3_highpart (rtx, rtx, rtx);
+extern rtx gen_umulv4hi3_highpart (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmacs (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmacsz (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmacu (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmacuz (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_clrdi (rtx);
+extern rtx gen_iwmmxt_clrv8qi (rtx);
+extern rtx gen_iwmmxt_clrv4hi (rtx);
+extern rtx gen_iwmmxt_clrv2si (rtx);
+extern rtx gen_iwmmxt_uavgrndv8qi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_uavgrndv4hi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_uavgv8qi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_uavgv4hi3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tinsrb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tinsrh (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tinsrw (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_textrmub (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_textrmsb (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_textrmuh (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_textrmsh (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_textrmw (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wshufh (rtx, rtx, rtx);
+extern rtx gen_eqv8qi3 (rtx, rtx, rtx);
+extern rtx gen_eqv4hi3 (rtx, rtx, rtx);
+extern rtx gen_eqv2si3 (rtx, rtx, rtx);
+extern rtx gen_gtuv8qi3 (rtx, rtx, rtx);
+extern rtx gen_gtuv4hi3 (rtx, rtx, rtx);
+extern rtx gen_gtuv2si3 (rtx, rtx, rtx);
+extern rtx gen_gtv8qi3 (rtx, rtx, rtx);
+extern rtx gen_gtv4hi3 (rtx, rtx, rtx);
+extern rtx gen_gtv2si3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wpackhss (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wpackwss (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wpackdss (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wpackhus (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wpackwus (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wpackdus (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckihb (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckihh (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckihw (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckilb (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckilh (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckilw (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wunpckehub (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckehuh (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckehuw (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckehsb (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckehsh (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckehsw (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckelub (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckeluh (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckeluw (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckelsb (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckelsh (rtx, rtx);
+extern rtx gen_iwmmxt_wunpckelsw (rtx, rtx);
+extern rtx gen_rorv4hi3 (rtx, rtx, rtx);
+extern rtx gen_rorv2si3 (rtx, rtx, rtx);
+extern rtx gen_rordi3 (rtx, rtx, rtx);
+extern rtx gen_ashrv4hi3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_ashrv2si3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_ashrdi3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_lshrv4hi3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_lshrv2si3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_lshrdi3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_ashlv4hi3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_ashlv2si3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_ashldi3_iwmmxt (rtx, rtx, rtx);
+extern rtx gen_rorv4hi3_di (rtx, rtx, rtx);
+extern rtx gen_rorv2si3_di (rtx, rtx, rtx);
+extern rtx gen_rordi3_di (rtx, rtx, rtx);
+extern rtx gen_ashrv4hi3_di (rtx, rtx, rtx);
+extern rtx gen_ashrv2si3_di (rtx, rtx, rtx);
+extern rtx gen_ashrdi3_di (rtx, rtx, rtx);
+extern rtx gen_lshrv4hi3_di (rtx, rtx, rtx);
+extern rtx gen_lshrv2si3_di (rtx, rtx, rtx);
+extern rtx gen_lshrdi3_di (rtx, rtx, rtx);
+extern rtx gen_ashlv4hi3_di (rtx, rtx, rtx);
+extern rtx gen_ashlv2si3_di (rtx, rtx, rtx);
+extern rtx gen_ashldi3_di (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmadds (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmaddu (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmia (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmiaph (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmiabb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmiatb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmiabt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmiatt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tmovmskb (rtx, rtx);
+extern rtx gen_iwmmxt_tmovmskh (rtx, rtx);
+extern rtx gen_iwmmxt_tmovmskw (rtx, rtx);
+extern rtx gen_iwmmxt_waccb (rtx, rtx);
+extern rtx gen_iwmmxt_wacch (rtx, rtx);
+extern rtx gen_iwmmxt_waccw (rtx, rtx);
+extern rtx gen_iwmmxt_waligni (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_walignr (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_walignr0 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_walignr1 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_walignr2 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_walignr3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wsadb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wsadh (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wsadbz (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wsadhz (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wabsv2si3 (rtx, rtx);
+extern rtx gen_iwmmxt_wabsv4hi3 (rtx, rtx);
+extern rtx gen_iwmmxt_wabsv8qi3 (rtx, rtx);
+extern rtx gen_iwmmxt_wabsdiffb (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wabsdiffh (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wabsdiffw (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_waddsubhx (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wsubaddhx (rtx, rtx, rtx);
+extern rtx gen_addcv4hi3 (rtx, rtx, rtx);
+extern rtx gen_addcv2si3 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_avg4 (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_avg4r (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmaddsx (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmaddux (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmaddsn (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmaddun (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulwsm (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulwum (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulsmr (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulumr (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulwsmr (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulwumr (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmulwl (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmulm (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmulwm (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmulmr (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmulwmr (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_waddbhusm (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_waddbhusl (rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiabb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiabt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiatb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiatt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiabbn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiabtn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiatbn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wqmiattn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiabb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiabt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiatb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiatt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiabbn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiabtn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiatbn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiattn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawbb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawbt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawtb (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawtt (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawbbn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawbtn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawtbn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmiawttn (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_wmerge (rtx, rtx, rtx, rtx);
+extern rtx gen_iwmmxt_tandcv2si3 (void);
+extern rtx gen_iwmmxt_tandcv4hi3 (void);
+extern rtx gen_iwmmxt_tandcv8qi3 (void);
+extern rtx gen_iwmmxt_torcv2si3 (void);
+extern rtx gen_iwmmxt_torcv4hi3 (void);
+extern rtx gen_iwmmxt_torcv8qi3 (void);
+extern rtx gen_iwmmxt_torvscv2si3 (void);
+extern rtx gen_iwmmxt_torvscv4hi3 (void);
+extern rtx gen_iwmmxt_torvscv8qi3 (void);
+extern rtx gen_iwmmxt_textrcv2si3 (rtx);
+extern rtx gen_iwmmxt_textrcv4hi3 (rtx);
+extern rtx gen_iwmmxt_textrcv8qi3 (rtx);
+extern rtx gen_abshf2 (rtx, rtx);
+extern rtx gen_neghf2 (rtx, rtx);
+extern rtx gen_neon_vrndhf (rtx, rtx);
+extern rtx gen_neon_vrndahf (rtx, rtx);
+extern rtx gen_neon_vrndmhf (rtx, rtx);
+extern rtx gen_neon_vrndnhf (rtx, rtx);
+extern rtx gen_neon_vrndphf (rtx, rtx);
+extern rtx gen_neon_vrndxhf (rtx, rtx);
+extern rtx gen_neon_vrndihf (rtx, rtx);
+extern rtx gen_addhf3 (rtx, rtx, rtx);
+extern rtx gen_subhf3 (rtx, rtx, rtx);
+extern rtx gen_divhf3 (rtx, rtx, rtx);
+extern rtx gen_mulhf3 (rtx, rtx, rtx);
+extern rtx gen_fmahf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmasf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmadf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmsubhf4_fp16 (rtx, rtx, rtx, rtx);
+extern rtx gen_extendhfsf2 (rtx, rtx);
+extern rtx gen_truncsfhf2 (rtx, rtx);
+extern rtx gen_fixuns_truncsfsi2 (rtx, rtx);
+extern rtx gen_fixuns_truncdfsi2 (rtx, rtx);
+extern rtx gen_floatunssisf2 (rtx, rtx);
+extern rtx gen_floatunssidf2 (rtx, rtx);
+extern rtx gen_neon_vsqrthf (rtx, rtx);
+extern rtx gen_neon_vrsqrtshf (rtx, rtx, rtx);
+extern rtx gen_push_fpsysreg_insn (rtx, rtx);
+extern rtx gen_pop_fpsysreg_insn (rtx, rtx);
+extern rtx gen_lazy_store_multiple_insn (rtx);
+extern rtx gen_lazy_load_multiple_insn (rtx);
+extern rtx gen_neon_vcvthshf (rtx, rtx);
+extern rtx gen_neon_vcvthuhf (rtx, rtx);
+extern rtx gen_neon_vcvthssi (rtx, rtx);
+extern rtx gen_neon_vcvthusi (rtx, rtx);
+extern rtx gen_neon_vcvths_nhf_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcvthu_nhf_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcvths_nsi_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcvthu_nsi_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtahssi (rtx, rtx);
+extern rtx gen_neon_vcvtahusi (rtx, rtx);
+extern rtx gen_neon_vcvtmhssi (rtx, rtx);
+extern rtx gen_neon_vcvtmhusi (rtx, rtx);
+extern rtx gen_neon_vcvtnhssi (rtx, rtx);
+extern rtx gen_neon_vcvtnhusi (rtx, rtx);
+extern rtx gen_neon_vcvtphssi (rtx, rtx);
+extern rtx gen_neon_vcvtphusi (rtx, rtx);
+extern rtx gen_btruncsf2 (rtx, rtx);
+extern rtx gen_ceilsf2 (rtx, rtx);
+extern rtx gen_floorsf2 (rtx, rtx);
+extern rtx gen_nearbyintsf2 (rtx, rtx);
+extern rtx gen_rintsf2 (rtx, rtx);
+extern rtx gen_roundsf2 (rtx, rtx);
+extern rtx gen_btruncdf2 (rtx, rtx);
+extern rtx gen_ceildf2 (rtx, rtx);
+extern rtx gen_floordf2 (rtx, rtx);
+extern rtx gen_nearbyintdf2 (rtx, rtx);
+extern rtx gen_rintdf2 (rtx, rtx);
+extern rtx gen_rounddf2 (rtx, rtx);
+extern rtx gen_lceilsfsi2 (rtx, rtx);
+extern rtx gen_lfloorsfsi2 (rtx, rtx);
+extern rtx gen_lroundsfsi2 (rtx, rtx);
+extern rtx gen_lceilusfsi2 (rtx, rtx);
+extern rtx gen_lfloorusfsi2 (rtx, rtx);
+extern rtx gen_lroundusfsi2 (rtx, rtx);
+extern rtx gen_lceildfsi2 (rtx, rtx);
+extern rtx gen_lfloordfsi2 (rtx, rtx);
+extern rtx gen_lrounddfsi2 (rtx, rtx);
+extern rtx gen_lceiludfsi2 (rtx, rtx);
+extern rtx gen_lfloorudfsi2 (rtx, rtx);
+extern rtx gen_lroundudfsi2 (rtx, rtx);
+extern rtx gen_smaxsf3 (rtx, rtx, rtx);
+extern rtx gen_smaxdf3 (rtx, rtx, rtx);
+extern rtx gen_sminsf3 (rtx, rtx, rtx);
+extern rtx gen_smindf3 (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxnmhf (rtx, rtx, rtx);
+extern rtx gen_neon_vminnmhf (rtx, rtx, rtx);
+extern rtx gen_fmaxsf3 (rtx, rtx, rtx);
+extern rtx gen_fminsf3 (rtx, rtx, rtx);
+extern rtx gen_fmaxdf3 (rtx, rtx, rtx);
+extern rtx gen_fmindf3 (rtx, rtx, rtx);
+extern rtx gen_set_fpscr (rtx);
+extern rtx gen_get_fpscr (rtx);
+extern rtx gen_no_literal_pool_df_immediate (rtx, rtx, rtx);
+extern rtx gen_no_literal_pool_sf_immediate (rtx, rtx, rtx);
+extern rtx gen_arm_vcx1si (rtx, rtx, rtx);
+extern rtx gen_arm_vcx1di (rtx, rtx, rtx);
+extern rtx gen_arm_vcx1asi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx1adi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2si (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2di (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2asi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2adi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3di (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3asi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3adi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_thumb1_subsi3_insn (rtx, rtx, rtx);
+extern rtx gen_thumb1_bicsi3 (rtx, rtx, rtx);
+extern rtx gen_thumb1_extendhisi2 (rtx, rtx);
+extern rtx gen_thumb1_extendqisi2 (rtx, rtx);
+extern rtx gen_cpymem12b (rtx, rtx, rtx, rtx);
+extern rtx gen_cpymem8b (rtx, rtx, rtx, rtx);
+extern rtx gen_thumb1_cbz (rtx, rtx, rtx);
+extern rtx gen_cbranchsi4_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranchsi4_scratch (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_cstoresi_nltu_thumb1 (rtx, rtx, rtx);
+extern rtx gen_cstoresi_ltu_thumb1 (rtx, rtx, rtx);
+extern rtx gen_thumb1_addsi3_addgeu (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_thumb1_casesi_dispatch (rtx);
+extern rtx gen_prologue_thumb1_interwork (void);
+extern rtx gen_thumb_eh_return (rtx);
+extern rtx gen_thumb1_stack_protect_test_insn (rtx, rtx, rtx);
+extern rtx gen_tls_load_dot_plus_four (rtx, rtx, rtx, rtx);
+extern rtx gen_thumb2_zero_extendqisi2_v6 (rtx, rtx);
+extern rtx gen_thumb2_eh_return (rtx);
+extern rtx gen_thumb2_addsi3_compare0 (rtx, rtx, rtx);
+extern rtx gen_thumb2_asrl (rtx, rtx);
+extern rtx gen_thumb2_lsll (rtx, rtx);
+extern rtx gen_thumb2_lsrl (rtx, rtx);
+extern rtx gen_dls_insn (rtx);
+extern rtx gen_unaligned_storev8qi (rtx, rtx);
+extern rtx gen_vec_setv8qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv4hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv4hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv4bf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv2si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv2sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv16qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv8hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv8hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv4si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv4sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_setv2di_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_extractv8qiqi (rtx, rtx, rtx);
+extern rtx gen_vec_extractv4hihi (rtx, rtx, rtx);
+extern rtx gen_vec_extractv4hfhf (rtx, rtx, rtx);
+extern rtx gen_vec_extractv4bfbf (rtx, rtx, rtx);
+extern rtx gen_vec_extractv2sisi (rtx, rtx, rtx);
+extern rtx gen_vec_extractv2sfsf (rtx, rtx, rtx);
+extern rtx gen_neon_vec_extractv16qiqi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_extractv8hihi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_extractv8hfhf (rtx, rtx, rtx);
+extern rtx gen_neon_vec_extractv4sisi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_extractv4sfsf (rtx, rtx, rtx);
+extern rtx gen_neon_vec_extractv2didi (rtx, rtx, rtx);
+extern rtx gen_mulv8qi3addv8qi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv16qi3addv16qi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4hi3addv4hi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv8hi3addv8hi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv2si3addv2si_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4si3addv4si_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv2sf3addv2sf_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4sf3addv4sf_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv8hf3addv8hf_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4hf3addv4hf_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv8qi3negv8qiaddv8qi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv16qi3negv16qiaddv16qi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4hi3negv4hiaddv4hi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv8hi3negv8hiaddv8hi_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv2si3negv2siaddv2si_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4si3negv4siaddv4si_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv2sf3negv2sfaddv2sf_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_mulv4sf3negv4sfaddv4sf_neon (rtx, rtx, rtx, rtx);
+extern rtx gen_fmav2sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmav4sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmav2sf4_intrinsic (rtx, rtx, rtx, rtx);
+extern rtx gen_fmav4sf4_intrinsic (rtx, rtx, rtx, rtx);
+extern rtx gen_fmav8hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmav4hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_fmsubv2sf4_intrinsic (rtx, rtx, rtx, rtx);
+extern rtx gen_fmsubv4sf4_intrinsic (rtx, rtx, rtx, rtx);
+extern rtx gen_fmsubv8hf4_intrinsic (rtx, rtx, rtx, rtx);
+extern rtx gen_fmsubv4hf4_intrinsic (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrintpv2sf (rtx, rtx);
+extern rtx gen_neon_vrintzv2sf (rtx, rtx);
+extern rtx gen_neon_vrintmv2sf (rtx, rtx);
+extern rtx gen_neon_vrintxv2sf (rtx, rtx);
+extern rtx gen_neon_vrintav2sf (rtx, rtx);
+extern rtx gen_neon_vrintnv2sf (rtx, rtx);
+extern rtx gen_neon_vrintpv4sf (rtx, rtx);
+extern rtx gen_neon_vrintzv4sf (rtx, rtx);
+extern rtx gen_neon_vrintmv4sf (rtx, rtx);
+extern rtx gen_neon_vrintxv4sf (rtx, rtx);
+extern rtx gen_neon_vrintav4sf (rtx, rtx);
+extern rtx gen_neon_vrintnv4sf (rtx, rtx);
+extern rtx gen_neon_vcvtpv2sfv2si (rtx, rtx);
+extern rtx gen_neon_vcvtmv2sfv2si (rtx, rtx);
+extern rtx gen_neon_vcvtav2sfv2si (rtx, rtx);
+extern rtx gen_neon_vcvtpuv2sfv2si (rtx, rtx);
+extern rtx gen_neon_vcvtmuv2sfv2si (rtx, rtx);
+extern rtx gen_neon_vcvtauv2sfv2si (rtx, rtx);
+extern rtx gen_neon_vcvtpv4sfv4si (rtx, rtx);
+extern rtx gen_neon_vcvtmv4sfv4si (rtx, rtx);
+extern rtx gen_neon_vcvtav4sfv4si (rtx, rtx);
+extern rtx gen_neon_vcvtpuv4sfv4si (rtx, rtx);
+extern rtx gen_neon_vcvtmuv4sfv4si (rtx, rtx);
+extern rtx gen_neon_vcvtauv4sfv4si (rtx, rtx);
+extern rtx gen_iorv8qi3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv16qi3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv4hi3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv8hi3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv2si3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv4si3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv4hf3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv8hf3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv2sf3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv4sf3_neon (rtx, rtx, rtx);
+extern rtx gen_iorv2di3_neon (rtx, rtx, rtx);
+extern rtx gen_andv8qi3_neon (rtx, rtx, rtx);
+extern rtx gen_andv16qi3_neon (rtx, rtx, rtx);
+extern rtx gen_andv4hi3_neon (rtx, rtx, rtx);
+extern rtx gen_andv8hi3_neon (rtx, rtx, rtx);
+extern rtx gen_andv2si3_neon (rtx, rtx, rtx);
+extern rtx gen_andv4si3_neon (rtx, rtx, rtx);
+extern rtx gen_andv4hf3_neon (rtx, rtx, rtx);
+extern rtx gen_andv8hf3_neon (rtx, rtx, rtx);
+extern rtx gen_andv2sf3_neon (rtx, rtx, rtx);
+extern rtx gen_andv4sf3_neon (rtx, rtx, rtx);
+extern rtx gen_andv2di3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv8qi3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv16qi3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv4hi3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv8hi3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv2si3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv4si3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv4hf3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv8hf3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv2sf3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv4sf3_neon (rtx, rtx, rtx);
+extern rtx gen_ornv2di3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv8qi3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv16qi3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv4hi3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv8hi3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv2si3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv4si3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv4hf3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv8hf3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv2sf3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv4sf3_neon (rtx, rtx, rtx);
+extern rtx gen_bicv2di3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv8qi3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv16qi3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv4hi3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv8hi3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv2si3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv4si3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv4hf3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv8hf3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv2sf3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv4sf3_neon (rtx, rtx, rtx);
+extern rtx gen_xorv2di3_neon (rtx, rtx, rtx);
+extern rtx gen_one_cmplv8qi2_neon (rtx, rtx);
+extern rtx gen_one_cmplv16qi2_neon (rtx, rtx);
+extern rtx gen_one_cmplv4hi2_neon (rtx, rtx);
+extern rtx gen_one_cmplv8hi2_neon (rtx, rtx);
+extern rtx gen_one_cmplv2si2_neon (rtx, rtx);
+extern rtx gen_one_cmplv4si2_neon (rtx, rtx);
+extern rtx gen_one_cmplv4hf2_neon (rtx, rtx);
+extern rtx gen_one_cmplv8hf2_neon (rtx, rtx);
+extern rtx gen_one_cmplv2sf2_neon (rtx, rtx);
+extern rtx gen_one_cmplv4sf2_neon (rtx, rtx);
+extern rtx gen_one_cmplv2di2_neon (rtx, rtx);
+extern rtx gen_neon_absv8qi2 (rtx, rtx);
+extern rtx gen_neon_absv16qi2 (rtx, rtx);
+extern rtx gen_neon_absv4hi2 (rtx, rtx);
+extern rtx gen_neon_absv8hi2 (rtx, rtx);
+extern rtx gen_neon_absv2si2 (rtx, rtx);
+extern rtx gen_neon_absv4si2 (rtx, rtx);
+extern rtx gen_neon_absv2sf2 (rtx, rtx);
+extern rtx gen_neon_absv4sf2 (rtx, rtx);
+extern rtx gen_neon_negv8qi2 (rtx, rtx);
+extern rtx gen_neon_negv16qi2 (rtx, rtx);
+extern rtx gen_neon_negv4hi2 (rtx, rtx);
+extern rtx gen_neon_negv8hi2 (rtx, rtx);
+extern rtx gen_neon_negv2si2 (rtx, rtx);
+extern rtx gen_neon_negv4si2 (rtx, rtx);
+extern rtx gen_neon_negv2sf2 (rtx, rtx);
+extern rtx gen_neon_negv4sf2 (rtx, rtx);
+extern rtx gen_neon_absv8hf2 (rtx, rtx);
+extern rtx gen_neon_negv8hf2 (rtx, rtx);
+extern rtx gen_neon_absv4hf2 (rtx, rtx);
+extern rtx gen_neon_negv4hf2 (rtx, rtx);
+extern rtx gen_neon_vrndv8hf (rtx, rtx);
+extern rtx gen_neon_vrndav8hf (rtx, rtx);
+extern rtx gen_neon_vrndmv8hf (rtx, rtx);
+extern rtx gen_neon_vrndnv8hf (rtx, rtx);
+extern rtx gen_neon_vrndpv8hf (rtx, rtx);
+extern rtx gen_neon_vrndxv8hf (rtx, rtx);
+extern rtx gen_neon_vrndv4hf (rtx, rtx);
+extern rtx gen_neon_vrndav4hf (rtx, rtx);
+extern rtx gen_neon_vrndmv4hf (rtx, rtx);
+extern rtx gen_neon_vrndnv4hf (rtx, rtx);
+extern rtx gen_neon_vrndpv4hf (rtx, rtx);
+extern rtx gen_neon_vrndxv4hf (rtx, rtx);
+extern rtx gen_neon_vrsqrtev8hf (rtx, rtx);
+extern rtx gen_neon_vrsqrtev4hf (rtx, rtx);
+extern rtx gen_vashrv8qi3_imm (rtx, rtx, rtx);
+extern rtx gen_vashrv16qi3_imm (rtx, rtx, rtx);
+extern rtx gen_vashrv4hi3_imm (rtx, rtx, rtx);
+extern rtx gen_vashrv8hi3_imm (rtx, rtx, rtx);
+extern rtx gen_vashrv2si3_imm (rtx, rtx, rtx);
+extern rtx gen_vashrv4si3_imm (rtx, rtx, rtx);
+extern rtx gen_vlshrv8qi3_imm (rtx, rtx, rtx);
+extern rtx gen_vlshrv16qi3_imm (rtx, rtx, rtx);
+extern rtx gen_vlshrv4hi3_imm (rtx, rtx, rtx);
+extern rtx gen_vlshrv8hi3_imm (rtx, rtx, rtx);
+extern rtx gen_vlshrv2si3_imm (rtx, rtx, rtx);
+extern rtx gen_vlshrv4si3_imm (rtx, rtx, rtx);
+extern rtx gen_ashlv8qi3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv16qi3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv4hi3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv8hi3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv2si3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv4si3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv2di3_signed (rtx, rtx, rtx);
+extern rtx gen_ashlv8qi3_unsigned (rtx, rtx, rtx);
+extern rtx gen_ashlv16qi3_unsigned (rtx, rtx, rtx);
+extern rtx gen_ashlv4hi3_unsigned (rtx, rtx, rtx);
+extern rtx gen_ashlv8hi3_unsigned (rtx, rtx, rtx);
+extern rtx gen_ashlv2si3_unsigned (rtx, rtx, rtx);
+extern rtx gen_ashlv4si3_unsigned (rtx, rtx, rtx);
+extern rtx gen_ashlv2di3_unsigned (rtx, rtx, rtx);
+extern rtx gen_neon_load_count (rtx, rtx);
+extern rtx gen_vec_sel_widen_ssum_lov16qiv8qi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_ssum_lov8hiv4hi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_ssum_lov4siv2si3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_ssum_hiv16qiv8qi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_ssum_hiv8hiv4hi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_ssum_hiv4siv2si3 (rtx, rtx, rtx, rtx);
+extern rtx gen_widen_ssumv8qi3 (rtx, rtx, rtx);
+extern rtx gen_widen_ssumv4hi3 (rtx, rtx, rtx);
+extern rtx gen_widen_ssumv2si3 (rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_usum_lov16qiv8qi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_usum_lov8hiv4hi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_usum_lov4siv2si3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_usum_hiv16qiv8qi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_usum_hiv8hiv4hi3 (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_sel_widen_usum_hiv4siv2si3 (rtx, rtx, rtx, rtx);
+extern rtx gen_widen_usumv8qi3 (rtx, rtx, rtx);
+extern rtx gen_widen_usumv4hi3 (rtx, rtx, rtx);
+extern rtx gen_widen_usumv2si3 (rtx, rtx, rtx);
+extern rtx gen_quad_halves_plusv4si (rtx, rtx);
+extern rtx gen_quad_halves_sminv4si (rtx, rtx);
+extern rtx gen_quad_halves_smaxv4si (rtx, rtx);
+extern rtx gen_quad_halves_uminv4si (rtx, rtx);
+extern rtx gen_quad_halves_umaxv4si (rtx, rtx);
+extern rtx gen_quad_halves_plusv4sf (rtx, rtx);
+extern rtx gen_quad_halves_sminv4sf (rtx, rtx);
+extern rtx gen_quad_halves_smaxv4sf (rtx, rtx);
+extern rtx gen_quad_halves_plusv8hi (rtx, rtx);
+extern rtx gen_quad_halves_sminv8hi (rtx, rtx);
+extern rtx gen_quad_halves_smaxv8hi (rtx, rtx);
+extern rtx gen_quad_halves_uminv8hi (rtx, rtx);
+extern rtx gen_quad_halves_umaxv8hi (rtx, rtx);
+extern rtx gen_quad_halves_plusv16qi (rtx, rtx);
+extern rtx gen_quad_halves_sminv16qi (rtx, rtx);
+extern rtx gen_quad_halves_smaxv16qi (rtx, rtx);
+extern rtx gen_quad_halves_uminv16qi (rtx, rtx);
+extern rtx gen_quad_halves_umaxv16qi (rtx, rtx);
+extern rtx gen_arm_reduc_plus_internal_v2di (rtx, rtx);
+extern rtx gen_neon_vpadd_internalv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadd_internalv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadd_internalv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpadd_internalv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vpaddv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vpsminv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpsminv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpsminv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpsminv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vpsmaxv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpsmaxv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpsmaxv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpsmaxv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vpuminv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpuminv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpuminv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpumaxv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpumaxv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpumaxv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vaddv2sf_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vaddv4sf_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vaddlsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddlsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddlsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vaddluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vaddwsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddwuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddwsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddwuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddwsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vaddwuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrhaddsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhadduv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhaddsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhadduv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhaddsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhadduv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhaddsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhadduv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhaddsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhadduv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhaddsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhadduv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhaddsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhadduv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhaddsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhadduv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrhaddsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrhadduv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vhaddsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vhadduv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrhaddsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrhadduv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vhaddsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vhadduv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsdi (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddudi (rtx, rtx, rtx);
+extern rtx gen_neon_vqaddsv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqadduv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vaddhnv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vraddhnv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vaddhnv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vraddhnv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vaddhnv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vraddhnv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vmulpv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmulpv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmulfv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmulfv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmulfv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmulfv4hf (rtx, rtx, rtx);
+extern rtx gen_vfmal_lowv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lowv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_highv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_highv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_highv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_highv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lowv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lowv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_lowv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_lowv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_lowv8hfv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_lowv4hfv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_highv8hfv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_highv4hfv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_highv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmal_lane_highv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_lowv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_lowv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_lowv8hfv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_lowv4hfv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_highv8hfv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_highv4hfv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_highv2sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vfmsl_lane_highv4sf_intrinsic (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav8qi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav16qi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav4hi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav8hi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav2si_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav4si_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav2sf_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav4sf_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalsv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlaluv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalsv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlaluv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalsv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlaluv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv8qi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv16qi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv4hi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv8hi_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv2si_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv4si_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv2sf_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv4sf_unspec (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslsv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsluv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslsv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsluv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslsv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsluv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulhv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulhv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulhv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulhv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulhv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulhv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulhv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulhv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlahv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlshv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlahv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlshv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlahv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlshv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlahv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlshv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlalv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlalv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlslv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlslv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmullsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmulluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmullpv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmullsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmulluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmullpv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmullsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vmulluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vmullpv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmullv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmullv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vsubv2sf_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vsubv4sf_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vsublsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vsublsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vsublsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vsubluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vsubwsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubwuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubwsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubwuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubwsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vsubwuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsdi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubudi (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubsv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqsubuv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vhsubuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vsubhnv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrsubhnv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vsubhnv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrsubhnv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vsubhnv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vrsubhnv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv16qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv16qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev16qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev16qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv16qi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv2si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv2si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev2si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev2si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv2si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4si_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vclev2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgeuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgeuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgeuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgeuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgeuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgeuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev2sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev4sf_insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv2sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv4sf_insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev8hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev4hf_fp16insn (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv8hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv4hf_fp16insn_unspec (rtx, rtx, rtx);
+extern rtx gen_neon_vtst_combinev8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtst_combinev16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtst_combinev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtst_combinev8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtst_combinev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtst_combinev4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabdsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vabduv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vabduv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vabduv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vabduv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vabduv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vabdsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vabduv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vabdfv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vabdfv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vabdlsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdlsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vabdlsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vabdluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vabasv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabauv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabasv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabauv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabasv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabauv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabasv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabauv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabasv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabauv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabasv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabauv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabalsv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabaluv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabalsv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabaluv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabalsv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vabaluv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmaxsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vminsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vminuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vminsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vminuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vminsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vminuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vminsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vminuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vminsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vminuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vminsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vminuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxfv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vminfv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxfv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vminfv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxfv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vminfv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxfv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vminfv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxfv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vpminfv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxnmv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vminnmv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxnmv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vminnmv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxnmv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vminnmv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmaxnmv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vminnmv4sf (rtx, rtx, rtx);
+extern rtx gen_fmaxv2sf3 (rtx, rtx, rtx);
+extern rtx gen_fminv2sf3 (rtx, rtx, rtx);
+extern rtx gen_fmaxv4sf3 (rtx, rtx, rtx);
+extern rtx gen_fminv4sf3 (rtx, rtx, rtx);
+extern rtx gen_neon_vpaddlsv8qi (rtx, rtx);
+extern rtx gen_neon_vpaddluv8qi (rtx, rtx);
+extern rtx gen_neon_vpaddlsv16qi (rtx, rtx);
+extern rtx gen_neon_vpaddluv16qi (rtx, rtx);
+extern rtx gen_neon_vpaddlsv4hi (rtx, rtx);
+extern rtx gen_neon_vpaddluv4hi (rtx, rtx);
+extern rtx gen_neon_vpaddlsv8hi (rtx, rtx);
+extern rtx gen_neon_vpaddluv8hi (rtx, rtx);
+extern rtx gen_neon_vpaddlsv2si (rtx, rtx);
+extern rtx gen_neon_vpaddluv2si (rtx, rtx);
+extern rtx gen_neon_vpaddlsv4si (rtx, rtx);
+extern rtx gen_neon_vpaddluv4si (rtx, rtx);
+extern rtx gen_neon_vpadalsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadaluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadalsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadaluv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadalsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadaluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadalsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadaluv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpadalsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpadaluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpadalsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vpadaluv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpminsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpminuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpminsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpminuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpminsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpminuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxfv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vpminfv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vpmaxfv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vpminfv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vrecpsv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vrecpsv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vrecpsv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vrecpsv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vrsqrtsv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vrsqrtsv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vrsqrtsv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vrsqrtsv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vqabsv8qi (rtx, rtx);
+extern rtx gen_neon_vqabsv16qi (rtx, rtx);
+extern rtx gen_neon_vqabsv4hi (rtx, rtx);
+extern rtx gen_neon_vqabsv8hi (rtx, rtx);
+extern rtx gen_neon_vqabsv2si (rtx, rtx);
+extern rtx gen_neon_vqabsv4si (rtx, rtx);
+extern rtx gen_neon_bswapv4hi (rtx, rtx);
+extern rtx gen_neon_bswapv8hi (rtx, rtx);
+extern rtx gen_neon_bswapv2si (rtx, rtx);
+extern rtx gen_neon_bswapv4si (rtx, rtx);
+extern rtx gen_neon_bswapv2di (rtx, rtx);
+extern rtx gen_neon_vcadd90v4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd270v4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd90v8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd270v8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd90v2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd270v2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd90v4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcadd270v4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcmla0v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla90v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla180v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla270v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla0v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla90v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla180v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla270v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla0v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla90v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla180v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla270v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla0v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla90v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla180v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla270v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane0v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane90v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane180v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane270v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane0v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane90v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane180v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane270v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane0v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane90v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane180v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane270v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane0v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane90v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane180v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_lane270v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq0v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq90v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq180v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq270v2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq0v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq90v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq180v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmla_laneq270v4hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane0v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane90v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane180v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane270v8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane0v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane90v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane180v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcmlaq_lane270v4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_sdot_prodv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_udot_prodv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_sdot_prodv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_udot_prodv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usdotv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usdotv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sdot_lanev8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_udot_lanev8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sdot_lanev16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_udot_lanev16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sdot_laneqv8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_udot_laneqv8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sdot_laneqv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_udot_laneqv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usdot_lanev8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sudot_lanev8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usdot_lanev16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sudot_lanev16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usdot_laneqv8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sudot_laneqv8qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usdot_laneqv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sudot_laneqv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqnegv8qi (rtx, rtx);
+extern rtx gen_neon_vqnegv16qi (rtx, rtx);
+extern rtx gen_neon_vqnegv4hi (rtx, rtx);
+extern rtx gen_neon_vqnegv8hi (rtx, rtx);
+extern rtx gen_neon_vqnegv2si (rtx, rtx);
+extern rtx gen_neon_vqnegv4si (rtx, rtx);
+extern rtx gen_neon_vclsv8qi (rtx, rtx);
+extern rtx gen_neon_vclsv16qi (rtx, rtx);
+extern rtx gen_neon_vclsv4hi (rtx, rtx);
+extern rtx gen_neon_vclsv8hi (rtx, rtx);
+extern rtx gen_neon_vclsv2si (rtx, rtx);
+extern rtx gen_neon_vclsv4si (rtx, rtx);
+extern rtx gen_neon_vclzv8qi (rtx, rtx);
+extern rtx gen_neon_vclzv16qi (rtx, rtx);
+extern rtx gen_neon_vclzv4hi (rtx, rtx);
+extern rtx gen_neon_vclzv8hi (rtx, rtx);
+extern rtx gen_neon_vclzv2si (rtx, rtx);
+extern rtx gen_neon_vclzv4si (rtx, rtx);
+extern rtx gen_popcountv8qi2 (rtx, rtx);
+extern rtx gen_popcountv16qi2 (rtx, rtx);
+extern rtx gen_neon_vrecpev8hf (rtx, rtx);
+extern rtx gen_neon_vrecpev4hf (rtx, rtx);
+extern rtx gen_neon_vrecpev2si (rtx, rtx);
+extern rtx gen_neon_vrecpev2sf (rtx, rtx);
+extern rtx gen_neon_vrecpev4si (rtx, rtx);
+extern rtx gen_neon_vrecpev4sf (rtx, rtx);
+extern rtx gen_neon_vrsqrtev2si (rtx, rtx);
+extern rtx gen_neon_vrsqrtev2sf (rtx, rtx);
+extern rtx gen_neon_vrsqrtev4si (rtx, rtx);
+extern rtx gen_neon_vrsqrtev4sf (rtx, rtx);
+extern rtx gen_neon_vget_lanev8qi_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4hi_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2si_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2sf_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev8qi_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4hi_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2si_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2sf_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev16qi_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev8hi_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev8hf_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4si_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4sf_sext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev16qi_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev8hi_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev8hf_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4si_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4sf_zext_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_nv8qi (rtx, rtx);
+extern rtx gen_neon_vdup_nv4hi (rtx, rtx);
+extern rtx gen_neon_vdup_nv16qi (rtx, rtx);
+extern rtx gen_neon_vdup_nv8hi (rtx, rtx);
+extern rtx gen_neon_vdup_nv4hf (rtx, rtx);
+extern rtx gen_neon_vdup_nv8hf (rtx, rtx);
+extern rtx gen_neon_vdup_nv4bf (rtx, rtx);
+extern rtx gen_neon_vdup_nv8bf (rtx, rtx);
+extern rtx gen_neon_vdup_nv2si (rtx, rtx);
+extern rtx gen_neon_vdup_nv2sf (rtx, rtx);
+extern rtx gen_neon_vdup_nv4si (rtx, rtx);
+extern rtx gen_neon_vdup_nv4sf (rtx, rtx);
+extern rtx gen_neon_vdup_nv2di (rtx, rtx);
+extern rtx gen_neon_vdup_lanev8qi_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev16qi_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4hi_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev8hi_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev2si_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4si_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev2sf_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4sf_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev8hf_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4hf_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4bf_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev8bf_internal (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev4bf (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinedi (rtx, rtx, rtx);
+extern rtx gen_floatv2siv2sf2 (rtx, rtx);
+extern rtx gen_floatv4siv4sf2 (rtx, rtx);
+extern rtx gen_floatunsv2siv2sf2 (rtx, rtx);
+extern rtx gen_floatunsv4siv4sf2 (rtx, rtx);
+extern rtx gen_fix_truncv2sfv2si2 (rtx, rtx);
+extern rtx gen_fix_truncv4sfv4si2 (rtx, rtx);
+extern rtx gen_fixuns_truncv2sfv2si2 (rtx, rtx);
+extern rtx gen_fixuns_truncv4sfv4si2 (rtx, rtx);
+extern rtx gen_neon_vcvtsv2sf (rtx, rtx);
+extern rtx gen_neon_vcvtuv2sf (rtx, rtx);
+extern rtx gen_neon_vcvtsv4sf (rtx, rtx);
+extern rtx gen_neon_vcvtuv4sf (rtx, rtx);
+extern rtx gen_neon_vcvtsv2si (rtx, rtx);
+extern rtx gen_neon_vcvtuv2si (rtx, rtx);
+extern rtx gen_neon_vcvtsv4si (rtx, rtx);
+extern rtx gen_neon_vcvtuv4si (rtx, rtx);
+extern rtx gen_neon_vcvtv4sfv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtv4hfv4sf (rtx, rtx);
+extern rtx gen_neon_vcvtsv4hi (rtx, rtx);
+extern rtx gen_neon_vcvtuv4hi (rtx, rtx);
+extern rtx gen_neon_vcvtsv8hi (rtx, rtx);
+extern rtx gen_neon_vcvtuv8hi (rtx, rtx);
+extern rtx gen_neon_vcvtsv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtuv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtsv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtuv4hf (rtx, rtx);
+extern rtx gen_neon_vcvts_nv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcvts_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtu_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcvtasv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtauv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtmsv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtmuv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtnsv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtnuv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtpsv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtpuv8hf (rtx, rtx);
+extern rtx gen_neon_vcvtasv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtauv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtmsv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtmuv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtnsv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtnuv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtpsv4hf (rtx, rtx);
+extern rtx gen_neon_vcvtpuv4hf (rtx, rtx);
+extern rtx gen_neon_vmovnv8hi (rtx, rtx);
+extern rtx gen_neon_vmovnv4si (rtx, rtx);
+extern rtx gen_neon_vmovnv2di (rtx, rtx);
+extern rtx gen_neon_vqmovnsv8hi (rtx, rtx);
+extern rtx gen_neon_vqmovnuv8hi (rtx, rtx);
+extern rtx gen_neon_vqmovnsv4si (rtx, rtx);
+extern rtx gen_neon_vqmovnuv4si (rtx, rtx);
+extern rtx gen_neon_vqmovnsv2di (rtx, rtx);
+extern rtx gen_neon_vqmovnuv2di (rtx, rtx);
+extern rtx gen_neon_vqmovunv8hi (rtx, rtx);
+extern rtx gen_neon_vqmovunv4si (rtx, rtx);
+extern rtx gen_neon_vqmovunv2di (rtx, rtx);
+extern rtx gen_neon_vmovlsv8qi (rtx, rtx);
+extern rtx gen_neon_vmovluv8qi (rtx, rtx);
+extern rtx gen_neon_vmovlsv4hi (rtx, rtx);
+extern rtx gen_neon_vmovluv4hi (rtx, rtx);
+extern rtx gen_neon_vmovlsv2si (rtx, rtx);
+extern rtx gen_neon_vmovluv2si (rtx, rtx);
+extern rtx gen_neon_vmul_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmul_lanev4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmulls_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmullu_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmulls_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmullu_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmull_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmull_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlah_lanev8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlsh_lanev8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlah_lanev4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlsh_lanev4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlah_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlsh_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlah_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmlsh_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_lanev2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_lanev8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_lanev4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_lanev4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlals_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalu_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlals_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalu_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlal_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlal_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_lanev2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_lanev8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_lanev4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_lanev4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsls_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslu_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsls_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslu_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlsl_lanev4hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlsl_lanev2si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv4bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextdi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vextv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrev64v8qi (rtx, rtx);
+extern rtx gen_neon_vrev64v16qi (rtx, rtx);
+extern rtx gen_neon_vrev64v4hi (rtx, rtx);
+extern rtx gen_neon_vrev64v8hi (rtx, rtx);
+extern rtx gen_neon_vrev64v2si (rtx, rtx);
+extern rtx gen_neon_vrev64v4si (rtx, rtx);
+extern rtx gen_neon_vrev64v4hf (rtx, rtx);
+extern rtx gen_neon_vrev64v8hf (rtx, rtx);
+extern rtx gen_neon_vrev64v2sf (rtx, rtx);
+extern rtx gen_neon_vrev64v4sf (rtx, rtx);
+extern rtx gen_neon_vrev64v2di (rtx, rtx);
+extern rtx gen_neon_vrev32v8qi (rtx, rtx);
+extern rtx gen_neon_vrev32v4hi (rtx, rtx);
+extern rtx gen_neon_vrev32v16qi (rtx, rtx);
+extern rtx gen_neon_vrev32v8hi (rtx, rtx);
+extern rtx gen_neon_vrev16v8qi (rtx, rtx);
+extern rtx gen_neon_vrev16v16qi (rtx, rtx);
+extern rtx gen_neon_vbslv8qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv16qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv2si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4bf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8bf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv2sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbsldi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv2di_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsdi (rtx, rtx, rtx);
+extern rtx gen_neon_vshludi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsdi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshludi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlsv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vshluv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vrshlsv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vrshluv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsdi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshludi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsdi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshludi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlsv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshluv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshlsv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshluv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vshrs_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vshru_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrs_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vrshru_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vshrn_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrn_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshrn_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrn_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vshrn_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vrshrn_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrns_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrnu_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrns_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrnu_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrns_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrnu_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrns_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrnu_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrns_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrnu_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrns_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrnu_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrun_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrun_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrun_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrun_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshrun_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqrshrun_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vshl_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_s_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshl_u_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_ndi (rtx, rtx, rtx);
+extern rtx gen_neon_vqshlu_nv2di (rtx, rtx, rtx);
+extern rtx gen_neon_vshlls_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshllu_nv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlls_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshllu_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vshlls_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vshllu_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_ndi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_ndi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_ndi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_ndi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsras_nv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsrau_nv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsras_nv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vrsrau_nv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_ndi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsri_nv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_ndi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsli_nv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtbl1v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vtbl2v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vtbl3v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vtbl4v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vtbl1v16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vtbl2v16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcombinev16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vtbx1v8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtbx2v8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtbx3v8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtbx4v8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1v8qi (rtx, rtx);
+extern rtx gen_neon_vld1v16qi (rtx, rtx);
+extern rtx gen_neon_vld1v4hi (rtx, rtx);
+extern rtx gen_neon_vld1v8hi (rtx, rtx);
+extern rtx gen_neon_vld1v2si (rtx, rtx);
+extern rtx gen_neon_vld1v4si (rtx, rtx);
+extern rtx gen_neon_vld1v4hf (rtx, rtx);
+extern rtx gen_neon_vld1v8hf (rtx, rtx);
+extern rtx gen_neon_vld1v4bf (rtx, rtx);
+extern rtx gen_neon_vld1v8bf (rtx, rtx);
+extern rtx gen_neon_vld1v2sf (rtx, rtx);
+extern rtx gen_neon_vld1v4sf (rtx, rtx);
+extern rtx gen_neon_vld1di (rtx, rtx);
+extern rtx gen_neon_vld1v2di (rtx, rtx);
+extern rtx gen_neon_vld1_lanev8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev4bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanedi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_lanev2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld1_dupv8qi (rtx, rtx);
+extern rtx gen_neon_vld1_dupv4hi (rtx, rtx);
+extern rtx gen_neon_vld1_dupv4hf (rtx, rtx);
+extern rtx gen_neon_vld1_dupv4bf (rtx, rtx);
+extern rtx gen_neon_vld1_dupv2si (rtx, rtx);
+extern rtx gen_neon_vld1_dupv2sf (rtx, rtx);
+extern rtx gen_neon_vld1_dupv16qi (rtx, rtx);
+extern rtx gen_neon_vld1_dupv8hi (rtx, rtx);
+extern rtx gen_neon_vld1_dupv8hf (rtx, rtx);
+extern rtx gen_neon_vld1_dupv4si (rtx, rtx);
+extern rtx gen_neon_vld1_dupv4sf (rtx, rtx);
+extern rtx gen_neon_vld1_dupv2di (rtx, rtx);
+extern rtx gen_neon_vst1v8qi (rtx, rtx);
+extern rtx gen_neon_vst1v16qi (rtx, rtx);
+extern rtx gen_neon_vst1v4hi (rtx, rtx);
+extern rtx gen_neon_vst1v8hi (rtx, rtx);
+extern rtx gen_neon_vst1v2si (rtx, rtx);
+extern rtx gen_neon_vst1v4si (rtx, rtx);
+extern rtx gen_neon_vst1v4hf (rtx, rtx);
+extern rtx gen_neon_vst1v8hf (rtx, rtx);
+extern rtx gen_neon_vst1v4bf (rtx, rtx);
+extern rtx gen_neon_vst1v8bf (rtx, rtx);
+extern rtx gen_neon_vst1v2sf (rtx, rtx);
+extern rtx gen_neon_vst1v4sf (rtx, rtx);
+extern rtx gen_neon_vst1di (rtx, rtx);
+extern rtx gen_neon_vst1v2di (rtx, rtx);
+extern rtx gen_neon_vst1_lanev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev4bf (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanedi (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst1_lanev2di (rtx, rtx, rtx);
+extern rtx gen_neon_vld2v8qi (rtx, rtx);
+extern rtx gen_neon_vld2v4hi (rtx, rtx);
+extern rtx gen_neon_vld2v4hf (rtx, rtx);
+extern rtx gen_neon_vld2v4bf (rtx, rtx);
+extern rtx gen_neon_vld2v2si (rtx, rtx);
+extern rtx gen_neon_vld2v2sf (rtx, rtx);
+extern rtx gen_neon_vld2di (rtx, rtx);
+extern rtx gen_neon_vld2v16qi (rtx, rtx);
+extern rtx gen_neon_vld2v8hi (rtx, rtx);
+extern rtx gen_neon_vld2v8hf (rtx, rtx);
+extern rtx gen_neon_vld2v8bf (rtx, rtx);
+extern rtx gen_neon_vld2v4si (rtx, rtx);
+extern rtx gen_neon_vld2v4sf (rtx, rtx);
+extern rtx gen_neon_vld2_lanev8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev4bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_lanev8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld2_dupv8qi (rtx, rtx);
+extern rtx gen_neon_vld2_dupv4hi (rtx, rtx);
+extern rtx gen_neon_vld2_dupv4hf (rtx, rtx);
+extern rtx gen_neon_vld2_dupv4bf (rtx, rtx);
+extern rtx gen_neon_vld2_dupv2si (rtx, rtx);
+extern rtx gen_neon_vld2_dupv2sf (rtx, rtx);
+extern rtx gen_neon_vld2_dupdi (rtx, rtx);
+extern rtx gen_neon_vld2_dupv8bf (rtx, rtx);
+extern rtx gen_neon_vst2v8qi (rtx, rtx);
+extern rtx gen_neon_vst2v4hi (rtx, rtx);
+extern rtx gen_neon_vst2v4hf (rtx, rtx);
+extern rtx gen_neon_vst2v4bf (rtx, rtx);
+extern rtx gen_neon_vst2v2si (rtx, rtx);
+extern rtx gen_neon_vst2v2sf (rtx, rtx);
+extern rtx gen_neon_vst2di (rtx, rtx);
+extern rtx gen_neon_vst2v16qi (rtx, rtx);
+extern rtx gen_neon_vst2v8hi (rtx, rtx);
+extern rtx gen_neon_vst2v8hf (rtx, rtx);
+extern rtx gen_neon_vst2v8bf (rtx, rtx);
+extern rtx gen_neon_vst2v4si (rtx, rtx);
+extern rtx gen_neon_vst2v4sf (rtx, rtx);
+extern rtx gen_neon_vst2_lanev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev4bf (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst2_lanev8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vld3v8qi (rtx, rtx);
+extern rtx gen_neon_vld3v4hi (rtx, rtx);
+extern rtx gen_neon_vld3v4hf (rtx, rtx);
+extern rtx gen_neon_vld3v4bf (rtx, rtx);
+extern rtx gen_neon_vld3v2si (rtx, rtx);
+extern rtx gen_neon_vld3v2sf (rtx, rtx);
+extern rtx gen_neon_vld3di (rtx, rtx);
+extern rtx gen_neon_vld3qav16qi (rtx, rtx);
+extern rtx gen_neon_vld3qav8hi (rtx, rtx);
+extern rtx gen_neon_vld3qav8hf (rtx, rtx);
+extern rtx gen_neon_vld3qav8bf (rtx, rtx);
+extern rtx gen_neon_vld3qav4si (rtx, rtx);
+extern rtx gen_neon_vld3qav4sf (rtx, rtx);
+extern rtx gen_neon_vld3qbv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vld3qbv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vld3qbv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vld3qbv8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vld3qbv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vld3qbv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev4bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_lanev8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld3_dupv8qi (rtx, rtx);
+extern rtx gen_neon_vld3_dupv4hi (rtx, rtx);
+extern rtx gen_neon_vld3_dupv4hf (rtx, rtx);
+extern rtx gen_neon_vld3_dupv4bf (rtx, rtx);
+extern rtx gen_neon_vld3_dupv2si (rtx, rtx);
+extern rtx gen_neon_vld3_dupv2sf (rtx, rtx);
+extern rtx gen_neon_vld3_dupdi (rtx, rtx);
+extern rtx gen_neon_vld3_dupv8bf (rtx, rtx);
+extern rtx gen_neon_vst3v8qi (rtx, rtx);
+extern rtx gen_neon_vst3v4hi (rtx, rtx);
+extern rtx gen_neon_vst3v4hf (rtx, rtx);
+extern rtx gen_neon_vst3v4bf (rtx, rtx);
+extern rtx gen_neon_vst3v2si (rtx, rtx);
+extern rtx gen_neon_vst3v2sf (rtx, rtx);
+extern rtx gen_neon_vst3di (rtx, rtx);
+extern rtx gen_neon_vst3qav16qi (rtx, rtx);
+extern rtx gen_neon_vst3qav8hi (rtx, rtx);
+extern rtx gen_neon_vst3qav8hf (rtx, rtx);
+extern rtx gen_neon_vst3qav8bf (rtx, rtx);
+extern rtx gen_neon_vst3qav4si (rtx, rtx);
+extern rtx gen_neon_vst3qav4sf (rtx, rtx);
+extern rtx gen_neon_vst3qbv16qi (rtx, rtx);
+extern rtx gen_neon_vst3qbv8hi (rtx, rtx);
+extern rtx gen_neon_vst3qbv8hf (rtx, rtx);
+extern rtx gen_neon_vst3qbv8bf (rtx, rtx);
+extern rtx gen_neon_vst3qbv4si (rtx, rtx);
+extern rtx gen_neon_vst3qbv4sf (rtx, rtx);
+extern rtx gen_neon_vst3_lanev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev4bf (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst3_lanev8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vld4v8qi (rtx, rtx);
+extern rtx gen_neon_vld4v4hi (rtx, rtx);
+extern rtx gen_neon_vld4v4hf (rtx, rtx);
+extern rtx gen_neon_vld4v4bf (rtx, rtx);
+extern rtx gen_neon_vld4v2si (rtx, rtx);
+extern rtx gen_neon_vld4v2sf (rtx, rtx);
+extern rtx gen_neon_vld4di (rtx, rtx);
+extern rtx gen_neon_vld4qav16qi (rtx, rtx);
+extern rtx gen_neon_vld4qav8hi (rtx, rtx);
+extern rtx gen_neon_vld4qav8hf (rtx, rtx);
+extern rtx gen_neon_vld4qav8bf (rtx, rtx);
+extern rtx gen_neon_vld4qav4si (rtx, rtx);
+extern rtx gen_neon_vld4qav4sf (rtx, rtx);
+extern rtx gen_neon_vld4qbv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vld4qbv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vld4qbv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vld4qbv8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vld4qbv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vld4qbv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev4bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_lanev8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vld4_dupv8qi (rtx, rtx);
+extern rtx gen_neon_vld4_dupv4hi (rtx, rtx);
+extern rtx gen_neon_vld4_dupv4hf (rtx, rtx);
+extern rtx gen_neon_vld4_dupv4bf (rtx, rtx);
+extern rtx gen_neon_vld4_dupv2si (rtx, rtx);
+extern rtx gen_neon_vld4_dupv2sf (rtx, rtx);
+extern rtx gen_neon_vld4_dupdi (rtx, rtx);
+extern rtx gen_neon_vld4_dupv8bf (rtx, rtx);
+extern rtx gen_neon_vst4v8qi (rtx, rtx);
+extern rtx gen_neon_vst4v4hi (rtx, rtx);
+extern rtx gen_neon_vst4v4hf (rtx, rtx);
+extern rtx gen_neon_vst4v4bf (rtx, rtx);
+extern rtx gen_neon_vst4v2si (rtx, rtx);
+extern rtx gen_neon_vst4v2sf (rtx, rtx);
+extern rtx gen_neon_vst4di (rtx, rtx);
+extern rtx gen_neon_vst4qav16qi (rtx, rtx);
+extern rtx gen_neon_vst4qav8hi (rtx, rtx);
+extern rtx gen_neon_vst4qav8hf (rtx, rtx);
+extern rtx gen_neon_vst4qav8bf (rtx, rtx);
+extern rtx gen_neon_vst4qav4si (rtx, rtx);
+extern rtx gen_neon_vst4qav4sf (rtx, rtx);
+extern rtx gen_neon_vst4qbv16qi (rtx, rtx);
+extern rtx gen_neon_vst4qbv8hi (rtx, rtx);
+extern rtx gen_neon_vst4qbv8hf (rtx, rtx);
+extern rtx gen_neon_vst4qbv8bf (rtx, rtx);
+extern rtx gen_neon_vst4qbv4si (rtx, rtx);
+extern rtx gen_neon_vst4qbv4sf (rtx, rtx);
+extern rtx gen_neon_vst4_lanev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev4bf (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vst4_lanev8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacks_lo_v16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacku_lo_v16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacks_lo_v8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacku_lo_v8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacks_lo_v4si (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacku_lo_v4si (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacks_hi_v16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacku_hi_v16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacks_hi_v8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacku_hi_v8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacks_hi_v4si (rtx, rtx, rtx);
+extern rtx gen_neon_vec_unpacku_hi_v4si (rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_lo_v16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_lo_v16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_lo_v8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_lo_v8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_lo_v4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_lo_v4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_hi_v16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_hi_v16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_hi_v8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_hi_v8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_hi_v4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_hi_v4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vec_sshiftl_v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_ushiftl_v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_sshiftl_v4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_ushiftl_v4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_sshiftl_v2si (rtx, rtx, rtx);
+extern rtx gen_neon_vec_ushiftl_v2si (rtx, rtx, rtx);
+extern rtx gen_neon_unpacks_v8qi (rtx, rtx);
+extern rtx gen_neon_unpacku_v8qi (rtx, rtx);
+extern rtx gen_neon_unpacks_v4hi (rtx, rtx);
+extern rtx gen_neon_unpacku_v4hi (rtx, rtx);
+extern rtx gen_neon_unpacks_v2si (rtx, rtx);
+extern rtx gen_neon_unpacku_v2si (rtx, rtx);
+extern rtx gen_neon_vec_smult_v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_v8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_v4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_v4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vec_smult_v2si (rtx, rtx, rtx);
+extern rtx gen_neon_vec_umult_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_pack_trunc_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_pack_trunc_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_pack_trunc_v2di (rtx, rtx, rtx);
+extern rtx gen_neon_vec_pack_trunc_v8hi (rtx, rtx);
+extern rtx gen_neon_vec_pack_trunc_v4si (rtx, rtx);
+extern rtx gen_neon_vec_pack_trunc_v2di (rtx, rtx);
+extern rtx gen_neon_vabdv4hf_2 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv8hf_2 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv2sf_2 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv4sf_2 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv4hf_3 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv8hf_3 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv2sf_3 (rtx, rtx, rtx);
+extern rtx gen_neon_vabdv4sf_3 (rtx, rtx, rtx);
+extern rtx gen_neon_smmlav16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_ummlav16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_usmmlav16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfdotv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfdotv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfdot_lanev4bfv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfdot_lanev4bfv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfdot_lanev8bfv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfdot_lanev8bfv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbfcvtv4sfv4bf (rtx, rtx);
+extern rtx gen_neon_vbfcvtv4sfv8bf (rtx, rtx);
+extern rtx gen_neon_vbfcvtv4sf_highv8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vbfcvtsf (rtx, rtx);
+extern rtx gen_neon_vbfcvtv4bf (rtx, rtx);
+extern rtx gen_neon_vbfcvtv8bf (rtx, rtx);
+extern rtx gen_neon_vbfcvt_highv8bf (rtx, rtx);
+extern rtx gen_neon_vbfcvtbf_cvtmodev2si (rtx, rtx);
+extern rtx gen_neon_vbfcvtbf_cvtmodesf (rtx, rtx);
+extern rtx gen_neon_vmmlav8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmabv8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmatv8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmab_lanev8bf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmat_lanev8bf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_aesmc (rtx, rtx);
+extern rtx gen_crypto_aesimc (rtx, rtx);
+extern rtx gen_aes_op_protect (rtx, rtx);
+extern rtx gen_aes_op_protect_neon_vld1v16qi (rtx, rtx);
+extern rtx gen_crypto_sha1su1 (rtx, rtx, rtx);
+extern rtx gen_crypto_sha256su0 (rtx, rtx, rtx);
+extern rtx gen_crypto_sha1su0 (rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha256h (rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha256h2 (rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha256su1 (rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha1h_lb (rtx, rtx, rtx);
+extern rtx gen_crypto_vmullp64 (rtx, rtx, rtx);
+extern rtx gen_crypto_sha1c_lb (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha1m_lb (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha1p_lb (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_loadqi (rtx, rtx, rtx);
+extern rtx gen_atomic_loadhi (rtx, rtx, rtx);
+extern rtx gen_atomic_loadsi (rtx, rtx, rtx);
+extern rtx gen_atomic_storeqi (rtx, rtx, rtx);
+extern rtx gen_atomic_storehi (rtx, rtx, rtx);
+extern rtx gen_atomic_storesi (rtx, rtx, rtx);
+extern rtx gen_arm_atomic_loaddi2_ldrd (rtx, rtx);
+extern rtx gen_atomic_compare_and_swap32qi_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swap32hi_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapt1qi_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapt1hi_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swap32si_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swap32di_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapt1si_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapt1di_1 (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_exchangeqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_exchangehi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_exchangesi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_exchangedi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_addqi (rtx, rtx, rtx);
+extern rtx gen_atomic_subqi (rtx, rtx, rtx);
+extern rtx gen_atomic_orqi (rtx, rtx, rtx);
+extern rtx gen_atomic_xorqi (rtx, rtx, rtx);
+extern rtx gen_atomic_andqi (rtx, rtx, rtx);
+extern rtx gen_atomic_addhi (rtx, rtx, rtx);
+extern rtx gen_atomic_subhi (rtx, rtx, rtx);
+extern rtx gen_atomic_orhi (rtx, rtx, rtx);
+extern rtx gen_atomic_xorhi (rtx, rtx, rtx);
+extern rtx gen_atomic_andhi (rtx, rtx, rtx);
+extern rtx gen_atomic_addsi (rtx, rtx, rtx);
+extern rtx gen_atomic_subsi (rtx, rtx, rtx);
+extern rtx gen_atomic_orsi (rtx, rtx, rtx);
+extern rtx gen_atomic_xorsi (rtx, rtx, rtx);
+extern rtx gen_atomic_andsi (rtx, rtx, rtx);
+extern rtx gen_atomic_adddi (rtx, rtx, rtx);
+extern rtx gen_atomic_subdi (rtx, rtx, rtx);
+extern rtx gen_atomic_ordi (rtx, rtx, rtx);
+extern rtx gen_atomic_xordi (rtx, rtx, rtx);
+extern rtx gen_atomic_anddi (rtx, rtx, rtx);
+extern rtx gen_atomic_nandqi (rtx, rtx, rtx);
+extern rtx gen_atomic_nandhi (rtx, rtx, rtx);
+extern rtx gen_atomic_nandsi (rtx, rtx, rtx);
+extern rtx gen_atomic_nanddi (rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_addqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_subqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_orqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_xorqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_andqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_addhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_subhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_orhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_xorhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_andhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_addsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_subsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_orsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_xorsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_andsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_adddi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_subdi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_ordi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_xordi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_anddi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_nandqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_nandhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_nandsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_fetch_nanddi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_add_fetchqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_sub_fetchqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_or_fetchqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_xor_fetchqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_and_fetchqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_add_fetchhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_sub_fetchhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_or_fetchhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_xor_fetchhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_and_fetchhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_add_fetchsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_sub_fetchsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_or_fetchsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_xor_fetchsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_and_fetchsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_add_fetchdi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_sub_fetchdi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_or_fetchdi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_xor_fetchdi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_and_fetchdi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_nand_fetchqi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_nand_fetchhi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_nand_fetchsi (rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_nand_fetchdi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_load_exclusiveqi (rtx, rtx);
+extern rtx gen_arm_load_exclusivehi (rtx, rtx);
+extern rtx gen_arm_load_acquire_exclusiveqi (rtx, rtx);
+extern rtx gen_arm_load_acquire_exclusivehi (rtx, rtx);
+extern rtx gen_arm_load_exclusivesi (rtx, rtx);
+extern rtx gen_arm_load_acquire_exclusivesi (rtx, rtx);
+extern rtx gen_arm_load_exclusivedi (rtx, rtx);
+extern rtx gen_arm_load_acquire_exclusivedi (rtx, rtx);
+extern rtx gen_arm_store_exclusiveqi (rtx, rtx, rtx);
+extern rtx gen_arm_store_exclusivehi (rtx, rtx, rtx);
+extern rtx gen_arm_store_exclusivesi (rtx, rtx, rtx);
+extern rtx gen_arm_store_exclusivedi (rtx, rtx, rtx);
+extern rtx gen_arm_store_release_exclusivedi (rtx, rtx, rtx);
+extern rtx gen_arm_store_release_exclusiveqi (rtx, rtx, rtx);
+extern rtx gen_arm_store_release_exclusivehi (rtx, rtx, rtx);
+extern rtx gen_arm_store_release_exclusivesi (rtx, rtx, rtx);
+extern rtx gen_addqq3 (rtx, rtx, rtx);
+extern rtx gen_addhq3 (rtx, rtx, rtx);
+extern rtx gen_addsq3 (rtx, rtx, rtx);
+extern rtx gen_adduqq3 (rtx, rtx, rtx);
+extern rtx gen_adduhq3 (rtx, rtx, rtx);
+extern rtx gen_addusq3 (rtx, rtx, rtx);
+extern rtx gen_addha3 (rtx, rtx, rtx);
+extern rtx gen_addsa3 (rtx, rtx, rtx);
+extern rtx gen_adduha3 (rtx, rtx, rtx);
+extern rtx gen_addusa3 (rtx, rtx, rtx);
+extern rtx gen_usaddv4uqq3 (rtx, rtx, rtx);
+extern rtx gen_usaddv2uhq3 (rtx, rtx, rtx);
+extern rtx gen_usadduqq3 (rtx, rtx, rtx);
+extern rtx gen_usadduhq3 (rtx, rtx, rtx);
+extern rtx gen_usaddv2uha3 (rtx, rtx, rtx);
+extern rtx gen_usadduha3 (rtx, rtx, rtx);
+extern rtx gen_subqq3 (rtx, rtx, rtx);
+extern rtx gen_subhq3 (rtx, rtx, rtx);
+extern rtx gen_subsq3 (rtx, rtx, rtx);
+extern rtx gen_subuqq3 (rtx, rtx, rtx);
+extern rtx gen_subuhq3 (rtx, rtx, rtx);
+extern rtx gen_subusq3 (rtx, rtx, rtx);
+extern rtx gen_subha3 (rtx, rtx, rtx);
+extern rtx gen_subsa3 (rtx, rtx, rtx);
+extern rtx gen_subuha3 (rtx, rtx, rtx);
+extern rtx gen_subusa3 (rtx, rtx, rtx);
+extern rtx gen_ussubv4uqq3 (rtx, rtx, rtx);
+extern rtx gen_ussubv2uhq3 (rtx, rtx, rtx);
+extern rtx gen_ussubuqq3 (rtx, rtx, rtx);
+extern rtx gen_ussubuhq3 (rtx, rtx, rtx);
+extern rtx gen_ussubv2uha3 (rtx, rtx, rtx);
+extern rtx gen_ussubuha3 (rtx, rtx, rtx);
+extern rtx gen_arm_ssatsihi_shift (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_usatsihi (rtx, rtx);
+extern rtx gen_mve_vst4qv16qi (rtx, rtx);
+extern rtx gen_mve_vst4qv8hi (rtx, rtx);
+extern rtx gen_mve_vst4qv4si (rtx, rtx);
+extern rtx gen_mve_vst4qv8hf (rtx, rtx);
+extern rtx gen_mve_vst4qv4sf (rtx, rtx);
+extern rtx gen_mve_vrndq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndxq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrndxq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrndq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrndq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrndpq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrndpq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrndnq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrndnq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrndmq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrndmq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrndaq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrndaq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrev64q_fv8hf (rtx, rtx);
+extern rtx gen_mve_vrev64q_fv4sf (rtx, rtx);
+extern rtx gen_mve_vnegq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vnegq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vdupq_n_fv8hf (rtx, rtx);
+extern rtx gen_mve_vdupq_n_fv4sf (rtx, rtx);
+extern rtx gen_mve_vabsq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vabsq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vrev32q_fv8hf (rtx, rtx);
+extern rtx gen_mve_vcvttq_f32_f16v4sf (rtx, rtx);
+extern rtx gen_mve_vcvtbq_f32_f16v4sf (rtx, rtx);
+extern rtx gen_mve_vcvtq_to_f_sv8hf (rtx, rtx);
+extern rtx gen_mve_vcvtq_to_f_uv8hf (rtx, rtx);
+extern rtx gen_mve_vcvtq_to_f_sv4sf (rtx, rtx);
+extern rtx gen_mve_vcvtq_to_f_uv4sf (rtx, rtx);
+extern rtx gen_mve_vrev64q_sv16qi (rtx, rtx);
+extern rtx gen_mve_vrev64q_uv16qi (rtx, rtx);
+extern rtx gen_mve_vrev64q_sv8hi (rtx, rtx);
+extern rtx gen_mve_vrev64q_uv8hi (rtx, rtx);
+extern rtx gen_mve_vrev64q_sv4si (rtx, rtx);
+extern rtx gen_mve_vrev64q_uv4si (rtx, rtx);
+extern rtx gen_mve_vcvtq_from_f_sv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtq_from_f_uv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtq_from_f_sv4si (rtx, rtx);
+extern rtx gen_mve_vcvtq_from_f_uv4si (rtx, rtx);
+extern rtx gen_mve_vqnegq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vqnegq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vqnegq_sv4si (rtx, rtx);
+extern rtx gen_mve_vqabsq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vqabsq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vqabsq_sv4si (rtx, rtx);
+extern rtx gen_mve_vnegq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vnegq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vnegq_sv4si (rtx, rtx);
+extern rtx gen_mve_vmvnq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vmvnq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vmvnq_uv4si (rtx, rtx);
+extern rtx gen_mve_vdupq_n_uv16qi (rtx, rtx);
+extern rtx gen_mve_vdupq_n_sv16qi (rtx, rtx);
+extern rtx gen_mve_vdupq_n_uv8hi (rtx, rtx);
+extern rtx gen_mve_vdupq_n_sv8hi (rtx, rtx);
+extern rtx gen_mve_vdupq_n_uv4si (rtx, rtx);
+extern rtx gen_mve_vdupq_n_sv4si (rtx, rtx);
+extern rtx gen_mve_vclzq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vclzq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vclzq_sv4si (rtx, rtx);
+extern rtx gen_mve_vclsq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vclsq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vclsq_sv4si (rtx, rtx);
+extern rtx gen_mve_vaddvq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vaddvq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vaddvq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vaddvq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vaddvq_uv4si (rtx, rtx);
+extern rtx gen_mve_vaddvq_sv4si (rtx, rtx);
+extern rtx gen_mve_vabsq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vabsq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vabsq_sv4si (rtx, rtx);
+extern rtx gen_mve_vrev32q_uv16qi (rtx, rtx);
+extern rtx gen_mve_vrev32q_sv16qi (rtx, rtx);
+extern rtx gen_mve_vrev32q_uv8hi (rtx, rtx);
+extern rtx gen_mve_vrev32q_sv8hi (rtx, rtx);
+extern rtx gen_mve_vmovltq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vmovltq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vmovltq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vmovltq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vmovlbq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vmovlbq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vmovlbq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vmovlbq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtpq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtpq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtpq_sv4si (rtx, rtx);
+extern rtx gen_mve_vcvtpq_uv4si (rtx, rtx);
+extern rtx gen_mve_vcvtnq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtnq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtnq_sv4si (rtx, rtx);
+extern rtx gen_mve_vcvtnq_uv4si (rtx, rtx);
+extern rtx gen_mve_vcvtmq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtmq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtmq_sv4si (rtx, rtx);
+extern rtx gen_mve_vcvtmq_uv4si (rtx, rtx);
+extern rtx gen_mve_vcvtaq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtaq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vcvtaq_uv4si (rtx, rtx);
+extern rtx gen_mve_vcvtaq_sv4si (rtx, rtx);
+extern rtx gen_mve_vmvnq_n_uv8hi (rtx, rtx);
+extern rtx gen_mve_vmvnq_n_sv8hi (rtx, rtx);
+extern rtx gen_mve_vmvnq_n_uv4si (rtx, rtx);
+extern rtx gen_mve_vmvnq_n_sv4si (rtx, rtx);
+extern rtx gen_mve_vrev16q_uv16qi (rtx, rtx);
+extern rtx gen_mve_vrev16q_sv16qi (rtx, rtx);
+extern rtx gen_mve_vaddlvq_uv4si (rtx, rtx);
+extern rtx gen_mve_vaddlvq_sv4si (rtx, rtx);
+extern rtx gen_mve_vctp8qv16bi (rtx, rtx);
+extern rtx gen_mve_vctp16qv8bi (rtx, rtx);
+extern rtx gen_mve_vctp32qv4bi (rtx, rtx);
+extern rtx gen_mve_vctp64qv2qi (rtx, rtx);
+extern rtx gen_mve_vpnotv16bi (rtx, rtx);
+extern rtx gen_mve_vsubq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_to_f_sv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_to_f_uv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_to_f_sv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_to_f_uv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vcreateq_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_sv16qi_imm (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_sv8hi_imm (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_sv4si_imm (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_uv16qi_imm (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_uv8hi_imm (rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_n_uv4si_imm (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_from_f_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_from_f_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_from_f_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_n_from_f_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddlvq_p_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddlvq_p_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_n_v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_n_v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_n_v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvq_p_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvq_p_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvq_p_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvq_p_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvq_p_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddvq_p_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270v16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270v8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90v4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270v4si (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot270_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot270_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot270_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot90_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot90_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot90_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxaq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxaq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxaq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxavq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxavq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxavq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vminaq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vminaq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vminaq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vminavq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vminavq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vminavq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vminq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vminq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vminq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vminq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vminq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vminq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavxq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavxq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmladavxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavxq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavxq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulqv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulqv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulqv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_r_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_r_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_r_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_r_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_r_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_r_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqshluq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshluq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqshluq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_r_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_r_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_r_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_r_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_r_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_r_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsubqv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubqv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubqv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vaddlvaq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddlvaq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90v4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270v4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulqv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot90v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot180v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot270v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulqv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot90v4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot180v4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot270v4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vctp8q_mv16bi (rtx, rtx, rtx);
+extern rtx gen_mve_vctp16q_mv8bi (rtx, rtx, rtx);
+extern rtx gen_mve_vctp32q_mv4bi (rtx, rtx, rtx);
+extern rtx gen_mve_vctp64q_mv2qi (rtx, rtx, rtx);
+extern rtx gen_mve_vcvtbq_f16_f32v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vcvttq_f16_f32v8hf (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmaq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmaq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmavq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmavq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmvq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmvq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmaq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmaq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmavq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmavq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmvq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vminnmvq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavxq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavxq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_n_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_n_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovunbq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovunbq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovuntq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vqmovuntq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhxq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_n_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_n_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_poly_pv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_poly_pv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_poly_pv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_poly_pv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtaq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtaq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtaq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtaq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_to_f_sv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_to_f_uv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_to_f_sv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_to_f_uv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrunbq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrunbq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhaq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhaq_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabsq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabsq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabsq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_p_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddvaq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclsq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclsq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclsq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclzq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclzq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclzq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclzq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclzq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vclzq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpcsq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmphiq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxaq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxaq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxaq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxavq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxavq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxavq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_p_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxvq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminaq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminaq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminaq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminavq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminavq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminavq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_p_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminvq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_p_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavxq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavxq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavxq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavxq_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavxq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavxq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vnegq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vnegq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vnegq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqabsq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqabsq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqabsq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlahq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlahq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlahq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlashq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlashq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlashq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqnegq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqnegq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqnegq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhxq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlahq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlahq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlahq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlashq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlashq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlashq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhxq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_r_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_r_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_r_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_r_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_r_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_r_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_r_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_r_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_r_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_r_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_r_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_r_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_n_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhxq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhxq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaxq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaxq_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabsq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabsq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddlvaq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddlvaq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaqv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot90v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot180v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot270v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaqv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot90v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot180v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot270v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpeqq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgeq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpgtq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpleq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpltq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmpneq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtbq_m_f16_f32v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtbq_m_f32_f16v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvttq_m_f16_f32v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvttq_m_f32_f16v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdupq_m_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmasq_n_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmasq_n_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmsq_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmsq_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmaq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmaq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmavq_p_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmavq_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmvq_p_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmvq_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmaq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmaq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmavq_p_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmavq_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmvq_p_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmvq_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavxq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavxq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaxq_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavxq_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavxq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovlbq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovlbq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovlbq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovlbq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovltq_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovltq_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovltq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovltq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovnbq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmovntq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vnegq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vnegq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vpselq_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovnbq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovntq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovunbq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovunbq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovuntq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqmovuntq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshruntq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshruntq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrunbq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrunbq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshruntq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshruntq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev32q_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev32q_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev32q_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev32q_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev32q_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev64q_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhaxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhxq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhaxq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhxq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndaq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndaq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndmq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndmq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndnq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndnq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndpq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndpq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndxq_m_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrndxq_m_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_n_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_n_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtmq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtmq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtmq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtmq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtpq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtpq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtpq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtpq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtnq_m_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtnq_m_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtnq_m_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtnq_m_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_from_f_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_from_f_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_from_f_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_from_f_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev16q_m_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrev16q_m_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_from_f_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_from_f_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_from_f_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_from_f_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhq_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhq_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhaq_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_p_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_p_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_p_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabavq_p_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshluq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshluq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshluq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsriq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_to_f_uv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_to_f_sv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_to_f_uv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcvtq_m_n_to_f_sv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhaddq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhsubq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_p_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_p_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_p_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_p_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlasq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulhq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_int_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_int_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqaddq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlahq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlahq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlahq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlashq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlashq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlashq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlahq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlahq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlahq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlashq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlashq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlashq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshlq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshlq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqsubq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrhaddq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmulhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshlq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsliq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot270_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot270_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot270_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot90_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot90_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vhcaddq_rot90_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaxq_p_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaxq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmladavaxq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaq_p_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaxq_p_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaxq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsdavaxq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhxq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhxq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmladhxq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhxq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhxq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmlsdhxq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhxq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhxq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmladhxq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhxq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhxq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmlsdhxq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_m_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrdmulhq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_p_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_p_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaxq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlaldavaxq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrnbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrntq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrnbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrntq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhaq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrnbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrshrntq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshllbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_m_n_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlltq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrnbq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshrntq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaxq_p_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmlsldavaxq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_poly_m_pv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmullbq_poly_m_pv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_poly_m_pv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulltq_poly_m_pv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmullbq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_m_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqdmulltq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrunbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshrunbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshruntq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqrshruntq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrunbq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshrunbq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshruntq_m_n_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vqshruntq_m_n_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhaq_p_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlaldavhaxq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhaq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vrmlsldavhaxq_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vabdq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_m_n_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vandq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vbrsrq_m_n_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot270_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcaddq_rot90_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot180_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot180_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot270_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot270_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot90_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmlaq_rot90_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot180_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot180_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot270_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot270_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot90_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vcmulq_rot90_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_veorq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_m_n_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmaq_m_n_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmasq_m_n_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmasq_m_n_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmsq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vfmsq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmaxnmq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vminnmq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vmulq_m_n_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vornq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_fv8hf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsubq_m_n_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vstrbq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vstrbq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vstrbq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vstrbq_sv4si (rtx, rtx);
+extern rtx gen_mve_vstrbq_uv4si (rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_sv16qi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_uv16qi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_sv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_uv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_sv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_uv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vldrbq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vldrbq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vldrbq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vldrbq_sv4si (rtx, rtx);
+extern rtx gen_mve_vldrbq_uv4si (rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_sv16qi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_uv16qi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_sv8hi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_uv8hi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_sv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_uv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_p_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_p_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_p_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_p_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_p_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_p_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_z_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_z_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_z_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_z_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_gather_offset_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_z_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_z_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_z_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_z_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_z_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrbq_z_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_z_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_z_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_z_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_z_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vldrhq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vldrhq_sv4si (rtx, rtx);
+extern rtx gen_mve_vldrhq_uv4si (rtx, rtx);
+extern rtx gen_mve_vldrhq_z_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_z_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_z_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_z_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_z_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vldrwq_sv4si (rtx, rtx);
+extern rtx gen_mve_vldrwq_uv4si (rtx, rtx);
+extern rtx gen_mve_vldrwq_z_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_z_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_z_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_z_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_z_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_offset_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_offset_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_offset_z_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_offset_z_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_shifted_offset_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_shifted_offset_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_shifted_offset_z_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_shifted_offset_z_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_offset_z_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrhq_gather_shifted_offset_z_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_z_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_offset_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_offset_z_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_offset_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_offset_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_shifted_offset_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_shifted_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_shifted_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_shifted_offset_z_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_shifted_offset_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_shifted_offset_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_fv8hf (rtx, rtx);
+extern rtx gen_mve_vstrhq_p_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_p_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_p_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_p_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_p_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_sv8hi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_uv8hi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_sv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_uv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_sv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_uv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_sv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_uv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_sv8hi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_uv8hi_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_sv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_uv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_sv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_uv8hi_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_sv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_uv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vstrhq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vstrhq_sv4si (rtx, rtx);
+extern rtx gen_mve_vstrhq_uv4si (rtx, rtx);
+extern rtx gen_mve_vstrwq_fv4sf (rtx, rtx);
+extern rtx gen_mve_vstrwq_p_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_p_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_p_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_sv4si (rtx, rtx);
+extern rtx gen_mve_vstrwq_uv4si (rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_p_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_p_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_p_sv2di_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_p_uv2di_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_sv2di_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_uv2di_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_p_sv2di_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_p_uv2di_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_sv2di_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_uv2di_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_fv8hf_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_fv8hf_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_fv8hf_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_fv8hf_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_fv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_p_fv4sf_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_p_sv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_p_uv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_sv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_uv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_fv4sf_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_p_fv4sf_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_p_sv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_p_uv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_sv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_uv4si_insn (rtx, rtx, rtx);
+extern rtx gen_mve_vaddqv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddqv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vaddqv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vaddq_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_uv16qi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_uv8hi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_uv4si_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_m_wb_uv16qi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_m_wb_uv8hi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_m_wb_uv4si_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_uv16qi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_uv8hi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_uv4si_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_m_wb_uv16qi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_m_wb_uv8hi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_m_wb_uv4si_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_wb_uv16qi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_wb_uv8hi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_wb_uv4si_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_wb_uv16qi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_wb_uv8hi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_wb_uv4si_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_wb_uv16qi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_wb_uv8hi_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_wb_uv4si_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_wb_uv16qi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_wb_uv8hi_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_wb_uv4si_insn (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_wb_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_wb_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_wb_p_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_wb_p_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_wb_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_base_wb_p_fv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_wb_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_wb_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_wb_p_sv2di (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_base_wb_p_uv2di (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_sv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_uv4si_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_z_sv4si_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_z_uv4si_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_fv4sf_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_z_fv4sf_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_sv2di_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_uv2di_insn (rtx, rtx, rtx, rtx);
+extern rtx gen_get_fpscr_nzcvqc (rtx);
+extern rtx gen_set_fpscr_nzcvqc (rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_z_sv2di_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_z_uv2di_insn (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vadciq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vadciq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vadciq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vadciq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vadcq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vadcq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vadcq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vadcq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsbciq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsbciq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsbciq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsbciq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsbcq_m_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsbcq_m_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vsbcq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vsbcq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vst2qv16qi (rtx, rtx);
+extern rtx gen_mve_vst2qv8hi (rtx, rtx);
+extern rtx gen_mve_vst2qv4si (rtx, rtx);
+extern rtx gen_mve_vst2qv8hf (rtx, rtx);
+extern rtx gen_mve_vst2qv4sf (rtx, rtx);
+extern rtx gen_mve_vld2qv16qi (rtx, rtx);
+extern rtx gen_mve_vld2qv8hi (rtx, rtx);
+extern rtx gen_mve_vld2qv4si (rtx, rtx);
+extern rtx gen_mve_vld2qv8hf (rtx, rtx);
+extern rtx gen_mve_vld2qv4sf (rtx, rtx);
+extern rtx gen_mve_vld4qv16qi (rtx, rtx);
+extern rtx gen_mve_vld4qv8hi (rtx, rtx);
+extern rtx gen_mve_vld4qv4si (rtx, rtx);
+extern rtx gen_mve_vld4qv8hf (rtx, rtx);
+extern rtx gen_mve_vld4qv4sf (rtx, rtx);
+extern rtx gen_mve_vec_extractv16qiqi (rtx, rtx, rtx);
+extern rtx gen_mve_vec_extractv8hihi (rtx, rtx, rtx);
+extern rtx gen_mve_vec_extractv4sisi (rtx, rtx, rtx);
+extern rtx gen_mve_vec_extractv8hfhf (rtx, rtx, rtx);
+extern rtx gen_mve_vec_extractv4sfsf (rtx, rtx, rtx);
+extern rtx gen_mve_vec_extractv2didi (rtx, rtx, rtx);
+extern rtx gen_mve_vec_setv16qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vec_setv8hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vec_setv8hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vec_setv4si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vec_setv4sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vec_setv2di_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_uqrshll_sat64_di (rtx, rtx, rtx);
+extern rtx gen_mve_uqrshll_sat48_di (rtx, rtx, rtx);
+extern rtx gen_mve_sqrshrl_sat64_di (rtx, rtx, rtx);
+extern rtx gen_mve_sqrshrl_sat48_di (rtx, rtx, rtx);
+extern rtx gen_mve_uqrshl_si (rtx, rtx, rtx);
+extern rtx gen_mve_sqrshr_si (rtx, rtx, rtx);
+extern rtx gen_mve_uqshll_di (rtx, rtx, rtx);
+extern rtx gen_mve_urshrl_di (rtx, rtx, rtx);
+extern rtx gen_mve_uqshl_si (rtx, rtx, rtx);
+extern rtx gen_mve_urshr_si (rtx, rtx, rtx);
+extern rtx gen_mve_sqshl_si (rtx, rtx, rtx);
+extern rtx gen_mve_srshr_si (rtx, rtx, rtx);
+extern rtx gen_mve_srshrl_di (rtx, rtx, rtx);
+extern rtx gen_mve_sqshll_di (rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_sv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_uv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_sv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_uv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_sv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_uv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx1qv16qi (rtx, rtx, rtx);
+extern rtx gen_arm_vcx1qav16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2qv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2qav16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3qv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3qav16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx1q_p_v16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx1qa_p_v16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2q_p_v16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx2qa_p_v16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3q_p_v16qi (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcx3qa_p_v16qi (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_adddi3 (rtx, rtx, rtx);
+extern rtx gen_addvsi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_addvdi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_addsi3_cin_vout_reg (rtx, rtx, rtx);
+extern rtx gen_addsi3_cin_vout_imm (rtx, rtx, rtx);
+extern rtx gen_addsi3_cin_vout_0 (rtx, rtx);
+extern rtx gen_uaddvsi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_uaddvdi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_addsi3_cin_cout_reg (rtx, rtx, rtx);
+extern rtx gen_addsi3_cin_cout_imm (rtx, rtx, rtx);
+extern rtx gen_addsi3_cin_cout_0 (rtx, rtx);
+extern rtx gen_addsi3 (rtx, rtx, rtx);
+extern rtx gen_subvsi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_subvdi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_usubvsi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_usubvdi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_addsf3 (rtx, rtx, rtx);
+extern rtx gen_adddf3 (rtx, rtx, rtx);
+extern rtx gen_subdi3 (rtx, rtx, rtx);
+extern rtx gen_subsi3 (rtx, rtx, rtx);
+extern rtx gen_subsf3 (rtx, rtx, rtx);
+extern rtx gen_subdf3 (rtx, rtx, rtx);
+extern rtx gen_mulhi3 (rtx, rtx, rtx);
+extern rtx gen_mulsi3 (rtx, rtx, rtx);
+extern rtx gen_mulsidi3 (rtx, rtx, rtx);
+extern rtx gen_umulsidi3 (rtx, rtx, rtx);
+extern rtx gen_maddsidi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_umaddsidi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_smulsi3_highpart (rtx, rtx, rtx);
+extern rtx gen_umulsi3_highpart (rtx, rtx, rtx);
+extern rtx gen_maddhisi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlabb (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlatb (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlatt (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlawb (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlawt (rtx, rtx, rtx, rtx);
+extern rtx gen_mulsf3 (rtx, rtx, rtx);
+extern rtx gen_muldf3 (rtx, rtx, rtx);
+extern rtx gen_divsf3 (rtx, rtx, rtx);
+extern rtx gen_divdf3 (rtx, rtx, rtx);
+extern rtx gen_anddi3 (rtx, rtx, rtx);
+extern rtx gen_iordi3 (rtx, rtx, rtx);
+extern rtx gen_xordi3 (rtx, rtx, rtx);
+extern rtx gen_one_cmpldi2 (rtx, rtx);
+extern rtx gen_andsi3 (rtx, rtx, rtx);
+extern rtx gen_insv (rtx, rtx, rtx, rtx);
+extern rtx gen_iorsi3 (rtx, rtx, rtx);
+extern rtx gen_xorsi3 (rtx, rtx, rtx);
+extern rtx gen_smaxsi3 (rtx, rtx, rtx);
+extern rtx gen_sminsi3 (rtx, rtx, rtx);
+extern rtx gen_umaxsi3 (rtx, rtx, rtx);
+extern rtx gen_uminsi3 (rtx, rtx, rtx);
+extern rtx gen_arm_qadd (rtx, rtx, rtx);
+extern rtx gen_arm_qsub (rtx, rtx, rtx);
+extern rtx gen_arm_ssat (rtx, rtx, rtx);
+extern rtx gen_arm_usat (rtx, rtx, rtx);
+extern rtx gen_arm_saturation_occurred (rtx);
+extern rtx gen_arm_set_saturation (rtx);
+extern rtx gen_ashldi3 (rtx, rtx, rtx);
+extern rtx gen_ashlsi3 (rtx, rtx, rtx);
+extern rtx gen_ashrdi3 (rtx, rtx, rtx);
+extern rtx gen_ashrsi3 (rtx, rtx, rtx);
+extern rtx gen_lshrdi3 (rtx, rtx, rtx);
+extern rtx gen_lshrsi3 (rtx, rtx, rtx);
+extern rtx gen_rotlsi3 (rtx, rtx, rtx);
+extern rtx gen_rotrsi3 (rtx, rtx, rtx);
+extern rtx gen_extzv (rtx, rtx, rtx, rtx);
+extern rtx gen_extzv_t1 (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_extv (rtx, rtx, rtx, rtx);
+extern rtx gen_extv_regsi (rtx, rtx, rtx, rtx);
+extern rtx gen_negvsi3 (rtx, rtx, rtx);
+extern rtx gen_negvdi3 (rtx, rtx, rtx);
+extern rtx gen_negsi2 (rtx, rtx);
+extern rtx gen_negsf2 (rtx, rtx);
+extern rtx gen_negdf2 (rtx, rtx);
+extern rtx gen_abssi2 (rtx, rtx);
+extern rtx gen_abssf2 (rtx, rtx);
+extern rtx gen_absdf2 (rtx, rtx);
+extern rtx gen_sqrtsf2 (rtx, rtx);
+extern rtx gen_sqrtdf2 (rtx, rtx);
+extern rtx gen_one_cmplsi2 (rtx, rtx);
+extern rtx gen_floatsihf2 (rtx, rtx);
+extern rtx gen_floatdihf2 (rtx, rtx);
+extern rtx gen_floatsisf2 (rtx, rtx);
+extern rtx gen_floatsidf2 (rtx, rtx);
+extern rtx gen_fix_trunchfsi2 (rtx, rtx);
+extern rtx gen_fix_trunchfdi2 (rtx, rtx);
+extern rtx gen_fix_truncsfsi2 (rtx, rtx);
+extern rtx gen_fix_truncdfsi2 (rtx, rtx);
+extern rtx gen_truncdfsf2 (rtx, rtx);
+extern rtx gen_truncdfhf2 (rtx, rtx);
+extern rtx gen_zero_extendqidi2 (rtx, rtx);
+extern rtx gen_zero_extendhidi2 (rtx, rtx);
+extern rtx gen_zero_extendsidi2 (rtx, rtx);
+extern rtx gen_extendqidi2 (rtx, rtx);
+extern rtx gen_extendhidi2 (rtx, rtx);
+extern rtx gen_extendsidi2 (rtx, rtx);
+extern rtx gen_zero_extendhisi2 (rtx, rtx);
+extern rtx gen_zero_extendqisi2 (rtx, rtx);
+extern rtx gen_extendhisi2 (rtx, rtx);
+extern rtx gen_extendhisi2_mem (rtx, rtx);
+extern rtx gen_extendqihi2 (rtx, rtx);
+extern rtx gen_extendqisi2 (rtx, rtx);
+extern rtx gen_arm_smlad (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smladx (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsd (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smlsdx (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_smuad (rtx, rtx, rtx);
+extern rtx gen_arm_smuadx (rtx, rtx, rtx);
+extern rtx gen_arm_ssat16 (rtx, rtx, rtx);
+extern rtx gen_arm_usat16 (rtx, rtx, rtx);
+extern rtx gen_extendsfdf2 (rtx, rtx);
+extern rtx gen_extendhfdf2 (rtx, rtx);
+extern rtx gen_movdi (rtx, rtx);
+extern rtx gen_movsi (rtx, rtx);
+extern rtx gen_calculate_pic_address (rtx, rtx, rtx);
+extern rtx gen_builtin_setjmp_receiver (rtx);
+extern rtx gen_storehi (rtx, rtx);
+extern rtx gen_storehi_bigend (rtx, rtx);
+extern rtx gen_storeinthi (rtx, rtx);
+extern rtx gen_storehi_single_op (rtx, rtx);
+extern rtx gen_movhi (rtx, rtx);
+extern rtx gen_movhi_bytes (rtx, rtx);
+extern rtx gen_movhi_bigend (rtx, rtx);
+extern rtx gen_reload_outhi (rtx, rtx, rtx);
+extern rtx gen_reload_inhi (rtx, rtx, rtx);
+extern rtx gen_movqi (rtx, rtx);
+extern rtx gen_movhf (rtx, rtx);
+extern rtx gen_movbf (rtx, rtx);
+extern rtx gen_movsf (rtx, rtx);
+extern rtx gen_movdf (rtx, rtx);
+extern rtx gen_reload_outdf (rtx, rtx, rtx);
+extern rtx gen_load_multiple (rtx, rtx, rtx);
+extern rtx gen_store_multiple (rtx, rtx, rtx);
+extern rtx gen_setmemsi (rtx, rtx, rtx, rtx);
+extern rtx gen_cpymemqi (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranchsi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranchsf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranchdf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranchdi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranch_cc (rtx, rtx, rtx, rtx);
+extern rtx gen_cstore_cc (rtx, rtx, rtx, rtx);
+extern rtx gen_cstoresi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cstorehf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cstoresf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cstoredf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cstoredi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_movsicc (rtx, rtx, rtx, rtx);
+extern rtx gen_movhfcc (rtx, rtx, rtx, rtx);
+extern rtx gen_movsfcc (rtx, rtx, rtx, rtx);
+extern rtx gen_movdfcc (rtx, rtx, rtx, rtx);
+extern rtx gen_jump (rtx);
+extern rtx gen_call (rtx, rtx, rtx);
+extern rtx gen_call_internal (rtx, rtx, rtx);
+extern rtx gen_nonsecure_call_internal (rtx, rtx, rtx);
+extern rtx gen_call_value (rtx, rtx, rtx, rtx);
+extern rtx gen_call_value_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_nonsecure_call_value_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_sibcall_internal (rtx, rtx, rtx);
+extern rtx gen_sibcall (rtx, rtx, rtx);
+extern rtx gen_sibcall_value_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_sibcall_value (rtx, rtx, rtx, rtx);
+extern rtx gen_return (void);
+extern rtx gen_simple_return (void);
+extern rtx gen_return_addr_mask (rtx);
+extern rtx gen_untyped_call (rtx, rtx, rtx);
+extern rtx gen_untyped_return (rtx, rtx);
+extern rtx gen_stack_protect_combined_set (rtx, rtx);
+extern rtx gen_stack_protect_combined_test (rtx, rtx, rtx);
+extern rtx gen_stack_protect_set (rtx, rtx);
+extern rtx gen_stack_protect_test (rtx, rtx, rtx);
+extern rtx gen_casesi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_arm_casesi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_indirect_jump (rtx);
+extern rtx gen_prologue (void);
+extern rtx gen_epilogue (void);
+extern rtx gen_sibcall_epilogue (void);
+extern rtx gen_eh_epilogue (rtx, rtx, rtx);
+extern rtx gen_eh_return (rtx);
+extern rtx gen_get_thread_pointersi (rtx);
+extern rtx gen_arm_legacy_rev (rtx, rtx, rtx, rtx);
+extern rtx gen_thumb_legacy_rev (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_modsi3 (rtx, rtx, rtx);
+extern rtx gen_bswapsi2 (rtx, rtx);
+extern rtx gen_bswaphi2 (rtx, rtx);
+extern rtx gen_copysignsf3 (rtx, rtx, rtx);
+extern rtx gen_copysigndf3 (rtx, rtx, rtx);
+extern rtx gen_movmisaligndi (rtx, rtx);
+extern rtx gen_movmisalignhi (rtx, rtx);
+extern rtx gen_movmisalignsi (rtx, rtx);
+extern rtx gen_arm_ldc (rtx, rtx, rtx);
+extern rtx gen_arm_ldc2 (rtx, rtx, rtx);
+extern rtx gen_arm_ldcl (rtx, rtx, rtx);
+extern rtx gen_arm_ldc2l (rtx, rtx, rtx);
+extern rtx gen_arm_stc (rtx, rtx, rtx);
+extern rtx gen_arm_stc2 (rtx, rtx, rtx);
+extern rtx gen_arm_stcl (rtx, rtx, rtx);
+extern rtx gen_arm_stc2l (rtx, rtx, rtx);
+extern rtx gen_speculation_barrier (void);
+extern rtx gen_movv16qi (rtx, rtx);
+extern rtx gen_movv8hi (rtx, rtx);
+extern rtx gen_movv4si (rtx, rtx);
+extern rtx gen_movv4sf (rtx, rtx);
+extern rtx gen_movv2di (rtx, rtx);
+extern rtx gen_movv2si (rtx, rtx);
+extern rtx gen_movv4hi (rtx, rtx);
+extern rtx gen_movv8qi (rtx, rtx);
+extern rtx gen_movv2sf (rtx, rtx);
+extern rtx gen_movv8hf (rtx, rtx);
+extern rtx gen_addv8qi3 (rtx, rtx, rtx);
+extern rtx gen_addv16qi3 (rtx, rtx, rtx);
+extern rtx gen_addv4hi3 (rtx, rtx, rtx);
+extern rtx gen_addv8hi3 (rtx, rtx, rtx);
+extern rtx gen_addv2si3 (rtx, rtx, rtx);
+extern rtx gen_addv4si3 (rtx, rtx, rtx);
+extern rtx gen_addv4hf3 (rtx, rtx, rtx);
+extern rtx gen_addv8hf3 (rtx, rtx, rtx);
+extern rtx gen_addv2sf3 (rtx, rtx, rtx);
+extern rtx gen_addv4sf3 (rtx, rtx, rtx);
+extern rtx gen_addv2di3 (rtx, rtx, rtx);
+extern rtx gen_subv8qi3 (rtx, rtx, rtx);
+extern rtx gen_subv16qi3 (rtx, rtx, rtx);
+extern rtx gen_subv4hi3 (rtx, rtx, rtx);
+extern rtx gen_subv8hi3 (rtx, rtx, rtx);
+extern rtx gen_subv2si3 (rtx, rtx, rtx);
+extern rtx gen_subv4si3 (rtx, rtx, rtx);
+extern rtx gen_subv4hf3 (rtx, rtx, rtx);
+extern rtx gen_subv8hf3 (rtx, rtx, rtx);
+extern rtx gen_subv2sf3 (rtx, rtx, rtx);
+extern rtx gen_subv4sf3 (rtx, rtx, rtx);
+extern rtx gen_subv2di3 (rtx, rtx, rtx);
+extern rtx gen_mulv8qi3 (rtx, rtx, rtx);
+extern rtx gen_mulv16qi3 (rtx, rtx, rtx);
+extern rtx gen_mulv4hi3 (rtx, rtx, rtx);
+extern rtx gen_mulv8hi3 (rtx, rtx, rtx);
+extern rtx gen_mulv2si3 (rtx, rtx, rtx);
+extern rtx gen_mulv4si3 (rtx, rtx, rtx);
+extern rtx gen_mulv2sf3 (rtx, rtx, rtx);
+extern rtx gen_mulv4sf3 (rtx, rtx, rtx);
+extern rtx gen_mulv8hf3 (rtx, rtx, rtx);
+extern rtx gen_mulv4hf3 (rtx, rtx, rtx);
+extern rtx gen_sminv2si3 (rtx, rtx, rtx);
+extern rtx gen_sminv4hi3 (rtx, rtx, rtx);
+extern rtx gen_sminv8qi3 (rtx, rtx, rtx);
+extern rtx gen_sminv2sf3 (rtx, rtx, rtx);
+extern rtx gen_sminv4si3 (rtx, rtx, rtx);
+extern rtx gen_sminv8hi3 (rtx, rtx, rtx);
+extern rtx gen_sminv16qi3 (rtx, rtx, rtx);
+extern rtx gen_sminv4sf3 (rtx, rtx, rtx);
+extern rtx gen_uminv2si3 (rtx, rtx, rtx);
+extern rtx gen_uminv4hi3 (rtx, rtx, rtx);
+extern rtx gen_uminv8qi3 (rtx, rtx, rtx);
+extern rtx gen_uminv4si3 (rtx, rtx, rtx);
+extern rtx gen_uminv8hi3 (rtx, rtx, rtx);
+extern rtx gen_uminv16qi3 (rtx, rtx, rtx);
+extern rtx gen_smaxv2si3 (rtx, rtx, rtx);
+extern rtx gen_smaxv4hi3 (rtx, rtx, rtx);
+extern rtx gen_smaxv8qi3 (rtx, rtx, rtx);
+extern rtx gen_smaxv2sf3 (rtx, rtx, rtx);
+extern rtx gen_smaxv4si3 (rtx, rtx, rtx);
+extern rtx gen_smaxv8hi3 (rtx, rtx, rtx);
+extern rtx gen_smaxv16qi3 (rtx, rtx, rtx);
+extern rtx gen_smaxv4sf3 (rtx, rtx, rtx);
+extern rtx gen_umaxv2si3 (rtx, rtx, rtx);
+extern rtx gen_umaxv4hi3 (rtx, rtx, rtx);
+extern rtx gen_umaxv8qi3 (rtx, rtx, rtx);
+extern rtx gen_umaxv4si3 (rtx, rtx, rtx);
+extern rtx gen_umaxv8hi3 (rtx, rtx, rtx);
+extern rtx gen_umaxv16qi3 (rtx, rtx, rtx);
+extern rtx gen_vec_permv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_permv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_extractv16qiqi (rtx, rtx, rtx);
+extern rtx gen_vec_extractv8hihi (rtx, rtx, rtx);
+extern rtx gen_vec_extractv8hfhf (rtx, rtx, rtx);
+extern rtx gen_vec_extractv4sisi (rtx, rtx, rtx);
+extern rtx gen_vec_extractv4sfsf (rtx, rtx, rtx);
+extern rtx gen_vec_extractv2didi (rtx, rtx, rtx);
+extern rtx gen_vec_setv16qi (rtx, rtx, rtx);
+extern rtx gen_vec_setv8hi (rtx, rtx, rtx);
+extern rtx gen_vec_setv8hf (rtx, rtx, rtx);
+extern rtx gen_vec_setv4si (rtx, rtx, rtx);
+extern rtx gen_vec_setv4sf (rtx, rtx, rtx);
+extern rtx gen_vec_setv2di (rtx, rtx, rtx);
+extern rtx gen_andv8qi3 (rtx, rtx, rtx);
+extern rtx gen_andv16qi3 (rtx, rtx, rtx);
+extern rtx gen_andv4hi3 (rtx, rtx, rtx);
+extern rtx gen_andv8hi3 (rtx, rtx, rtx);
+extern rtx gen_andv2si3 (rtx, rtx, rtx);
+extern rtx gen_andv4si3 (rtx, rtx, rtx);
+extern rtx gen_andv4hf3 (rtx, rtx, rtx);
+extern rtx gen_andv8hf3 (rtx, rtx, rtx);
+extern rtx gen_andv2sf3 (rtx, rtx, rtx);
+extern rtx gen_andv4sf3 (rtx, rtx, rtx);
+extern rtx gen_andv2di3 (rtx, rtx, rtx);
+extern rtx gen_iorv8qi3 (rtx, rtx, rtx);
+extern rtx gen_iorv16qi3 (rtx, rtx, rtx);
+extern rtx gen_iorv4hi3 (rtx, rtx, rtx);
+extern rtx gen_iorv8hi3 (rtx, rtx, rtx);
+extern rtx gen_iorv2si3 (rtx, rtx, rtx);
+extern rtx gen_iorv4si3 (rtx, rtx, rtx);
+extern rtx gen_iorv4hf3 (rtx, rtx, rtx);
+extern rtx gen_iorv8hf3 (rtx, rtx, rtx);
+extern rtx gen_iorv2sf3 (rtx, rtx, rtx);
+extern rtx gen_iorv4sf3 (rtx, rtx, rtx);
+extern rtx gen_iorv2di3 (rtx, rtx, rtx);
+extern rtx gen_xorv8qi3 (rtx, rtx, rtx);
+extern rtx gen_xorv16qi3 (rtx, rtx, rtx);
+extern rtx gen_xorv4hi3 (rtx, rtx, rtx);
+extern rtx gen_xorv8hi3 (rtx, rtx, rtx);
+extern rtx gen_xorv2si3 (rtx, rtx, rtx);
+extern rtx gen_xorv4si3 (rtx, rtx, rtx);
+extern rtx gen_xorv4hf3 (rtx, rtx, rtx);
+extern rtx gen_xorv8hf3 (rtx, rtx, rtx);
+extern rtx gen_xorv2sf3 (rtx, rtx, rtx);
+extern rtx gen_xorv4sf3 (rtx, rtx, rtx);
+extern rtx gen_xorv2di3 (rtx, rtx, rtx);
+extern rtx gen_one_cmplv8qi2 (rtx, rtx);
+extern rtx gen_one_cmplv16qi2 (rtx, rtx);
+extern rtx gen_one_cmplv4hi2 (rtx, rtx);
+extern rtx gen_one_cmplv8hi2 (rtx, rtx);
+extern rtx gen_one_cmplv2si2 (rtx, rtx);
+extern rtx gen_one_cmplv4si2 (rtx, rtx);
+extern rtx gen_one_cmplv4hf2 (rtx, rtx);
+extern rtx gen_one_cmplv8hf2 (rtx, rtx);
+extern rtx gen_one_cmplv2sf2 (rtx, rtx);
+extern rtx gen_one_cmplv4sf2 (rtx, rtx);
+extern rtx gen_one_cmplv2di2 (rtx, rtx);
+extern rtx gen_absv8qi2 (rtx, rtx);
+extern rtx gen_negv8qi2 (rtx, rtx);
+extern rtx gen_absv16qi2 (rtx, rtx);
+extern rtx gen_negv16qi2 (rtx, rtx);
+extern rtx gen_absv4hi2 (rtx, rtx);
+extern rtx gen_negv4hi2 (rtx, rtx);
+extern rtx gen_absv8hi2 (rtx, rtx);
+extern rtx gen_negv8hi2 (rtx, rtx);
+extern rtx gen_absv2si2 (rtx, rtx);
+extern rtx gen_negv2si2 (rtx, rtx);
+extern rtx gen_absv4si2 (rtx, rtx);
+extern rtx gen_negv4si2 (rtx, rtx);
+extern rtx gen_absv2sf2 (rtx, rtx);
+extern rtx gen_negv2sf2 (rtx, rtx);
+extern rtx gen_absv4sf2 (rtx, rtx);
+extern rtx gen_negv4sf2 (rtx, rtx);
+extern rtx gen_absv8hf2 (rtx, rtx);
+extern rtx gen_negv8hf2 (rtx, rtx);
+extern rtx gen_absv4hf2 (rtx, rtx);
+extern rtx gen_negv4hf2 (rtx, rtx);
+extern rtx gen_cadd90v4hf3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v4hf3 (rtx, rtx, rtx);
+extern rtx gen_cadd90v8hf3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v8hf3 (rtx, rtx, rtx);
+extern rtx gen_cadd90v2sf3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v2sf3 (rtx, rtx, rtx);
+extern rtx gen_cadd90v4sf3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v4sf3 (rtx, rtx, rtx);
+extern rtx gen_cmulv8hf3 (rtx, rtx, rtx);
+extern rtx gen_cmul_conjv8hf3 (rtx, rtx, rtx);
+extern rtx gen_cmulv4sf3 (rtx, rtx, rtx);
+extern rtx gen_cmul_conjv4sf3 (rtx, rtx, rtx);
+extern rtx gen_arm_vcmla0v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla90v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla180v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla270v4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla0v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla90v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla180v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla270v8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla0v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla90v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla180v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla270v2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla0v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla90v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla180v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_arm_vcmla270v4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlav4hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmla_conjv4hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlsv4hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmls_conjv4hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlav8hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmla_conjv8hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlsv8hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmls_conjv8hf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlav2sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmla_conjv2sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlsv2sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmls_conjv2sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlav4sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmla_conjv4sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmlsv4sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cmls_conjv4sf4 (rtx, rtx, rtx, rtx);
+extern rtx gen_movmisalignv8qi (rtx, rtx);
+extern rtx gen_movmisalignv16qi (rtx, rtx);
+extern rtx gen_movmisalignv4hi (rtx, rtx);
+extern rtx gen_movmisalignv8hi (rtx, rtx);
+extern rtx gen_movmisalignv2si (rtx, rtx);
+extern rtx gen_movmisalignv4si (rtx, rtx);
+extern rtx gen_movmisalignv4hf (rtx, rtx);
+extern rtx gen_movmisalignv8hf (rtx, rtx);
+extern rtx gen_movmisalignv2sf (rtx, rtx);
+extern rtx gen_movmisalignv4sf (rtx, rtx);
+extern rtx gen_movmisalignv2di (rtx, rtx);
+extern rtx gen_vashlv8qi3 (rtx, rtx, rtx);
+extern rtx gen_vashlv16qi3 (rtx, rtx, rtx);
+extern rtx gen_vashlv4hi3 (rtx, rtx, rtx);
+extern rtx gen_vashlv8hi3 (rtx, rtx, rtx);
+extern rtx gen_vashlv2si3 (rtx, rtx, rtx);
+extern rtx gen_vashlv4si3 (rtx, rtx, rtx);
+extern rtx gen_vashrv8qi3 (rtx, rtx, rtx);
+extern rtx gen_vashrv16qi3 (rtx, rtx, rtx);
+extern rtx gen_vashrv4hi3 (rtx, rtx, rtx);
+extern rtx gen_vashrv8hi3 (rtx, rtx, rtx);
+extern rtx gen_vashrv2si3 (rtx, rtx, rtx);
+extern rtx gen_vashrv4si3 (rtx, rtx, rtx);
+extern rtx gen_vlshrv8qi3 (rtx, rtx, rtx);
+extern rtx gen_vlshrv16qi3 (rtx, rtx, rtx);
+extern rtx gen_vlshrv4hi3 (rtx, rtx, rtx);
+extern rtx gen_vlshrv8hi3 (rtx, rtx, rtx);
+extern rtx gen_vlshrv2si3 (rtx, rtx, rtx);
+extern rtx gen_vlshrv4si3 (rtx, rtx, rtx);
+extern rtx gen_vcondv8qiv8qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv16qiv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4hiv4hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv8hiv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv2siv2si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4siv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv2sfv2sf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4sfv4sf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv8hfv8hf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4hfv4hf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv2sfv2si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv2siv2sf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4sfv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4siv4sf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4hfv4hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv4hiv4hf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv8hfv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vcondv8hiv8hf (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv8qiv8qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv16qiv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv4hiv4hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv8hiv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv2siv2si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv4siv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv2sfv2si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vconduv4sfv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_vec_load_lanesoiv16qi (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv8hi (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv8hf (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv4si (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv4sf (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv16qi (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv8hi (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv8hf (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv4si (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv4sf (rtx, rtx);
+extern rtx gen_vec_load_lanesxiv16qi (rtx, rtx);
+extern rtx gen_vec_load_lanesxiv8hi (rtx, rtx);
+extern rtx gen_vec_load_lanesxiv8hf (rtx, rtx);
+extern rtx gen_vec_load_lanesxiv4si (rtx, rtx);
+extern rtx gen_vec_load_lanesxiv4sf (rtx, rtx);
+extern rtx gen_vec_store_lanesxiv16qi (rtx, rtx);
+extern rtx gen_vec_store_lanesxiv8hi (rtx, rtx);
+extern rtx gen_vec_store_lanesxiv8hf (rtx, rtx);
+extern rtx gen_vec_store_lanesxiv4si (rtx, rtx);
+extern rtx gen_vec_store_lanesxiv4sf (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v16qi (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v8hi (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v4si (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v4sf (rtx, rtx);
+extern rtx gen_avgv16qi3_floor (rtx, rtx, rtx);
+extern rtx gen_avgv8hi3_floor (rtx, rtx, rtx);
+extern rtx gen_avgv4si3_floor (rtx, rtx, rtx);
+extern rtx gen_uavgv16qi3_floor (rtx, rtx, rtx);
+extern rtx gen_uavgv8hi3_floor (rtx, rtx, rtx);
+extern rtx gen_uavgv4si3_floor (rtx, rtx, rtx);
+extern rtx gen_avgv16qi3_ceil (rtx, rtx, rtx);
+extern rtx gen_avgv8hi3_ceil (rtx, rtx, rtx);
+extern rtx gen_avgv4si3_ceil (rtx, rtx, rtx);
+extern rtx gen_uavgv16qi3_ceil (rtx, rtx, rtx);
+extern rtx gen_uavgv8hi3_ceil (rtx, rtx, rtx);
+extern rtx gen_uavgv4si3_ceil (rtx, rtx, rtx);
+extern rtx gen_clzv8qi2 (rtx, rtx);
+extern rtx gen_clzv16qi2 (rtx, rtx);
+extern rtx gen_clzv4hi2 (rtx, rtx);
+extern rtx gen_clzv8hi2 (rtx, rtx);
+extern rtx gen_clzv2si2 (rtx, rtx);
+extern rtx gen_clzv4si2 (rtx, rtx);
+extern rtx gen_vec_initv8qiqi (rtx, rtx);
+extern rtx gen_vec_initv16qiqi (rtx, rtx);
+extern rtx gen_vec_initv4hihi (rtx, rtx);
+extern rtx gen_vec_initv8hihi (rtx, rtx);
+extern rtx gen_vec_initv2sisi (rtx, rtx);
+extern rtx gen_vec_initv4sisi (rtx, rtx);
+extern rtx gen_vec_initv4hfhf (rtx, rtx);
+extern rtx gen_vec_initv8hfhf (rtx, rtx);
+extern rtx gen_vec_initv4bfbf (rtx, rtx);
+extern rtx gen_vec_initv8bfbf (rtx, rtx);
+extern rtx gen_vec_initv2sfsf (rtx, rtx);
+extern rtx gen_vec_initv4sfsf (rtx, rtx);
+extern rtx gen_vec_initdidi (rtx, rtx);
+extern rtx gen_vec_initv2didi (rtx, rtx);
+extern rtx gen_iwmmxt_setwcgr0 (rtx);
+extern rtx gen_iwmmxt_setwcgr1 (rtx);
+extern rtx gen_iwmmxt_setwcgr2 (rtx);
+extern rtx gen_iwmmxt_setwcgr3 (rtx);
+extern rtx gen_iwmmxt_getwcgr0 (rtx);
+extern rtx gen_iwmmxt_getwcgr1 (rtx);
+extern rtx gen_iwmmxt_getwcgr2 (rtx);
+extern rtx gen_iwmmxt_getwcgr3 (rtx);
+extern rtx gen_neon_vabshf (rtx, rtx);
+extern rtx gen_neon_vfmahf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmshf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcvths_nhf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvthu_nhf (rtx, rtx, rtx);
+extern rtx gen_neon_vcvths_nsi (rtx, rtx, rtx);
+extern rtx gen_neon_vcvthu_nsi (rtx, rtx, rtx);
+extern rtx gen_thumb_movhi_clobber (rtx, rtx, rtx);
+extern rtx gen_cbranchqi4 (rtx, rtx, rtx, rtx);
+extern rtx gen_cbranchsi4_neg_late (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_cstoresi_eq0_thumb1 (rtx, rtx);
+extern rtx gen_cstoresi_ne0_thumb1 (rtx, rtx);
+extern rtx gen_thumb1_casesi_internal_pic (rtx, rtx, rtx, rtx);
+extern rtx gen_tablejump (rtx, rtx);
+extern rtx gen_thumb2_casesi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_thumb2_casesi_internal_pic (rtx, rtx, rtx, rtx);
+extern rtx gen_doloop_end (rtx, rtx);
+extern rtx gen_doloop_begin (rtx, rtx);
+extern rtx gen_movti (rtx, rtx);
+extern rtx gen_movei (rtx, rtx);
+extern rtx gen_movoi (rtx, rtx);
+extern rtx gen_movci (rtx, rtx);
+extern rtx gen_movxi (rtx, rtx);
+extern rtx gen_movv4hf (rtx, rtx);
+extern rtx gen_movv4bf (rtx, rtx);
+extern rtx gen_movv8bf (rtx, rtx);
+extern rtx gen_divv2sf3 (rtx, rtx, rtx);
+extern rtx gen_divv4sf3 (rtx, rtx, rtx);
+extern rtx gen_ceilv2sf2 (rtx, rtx);
+extern rtx gen_btruncv2sf2 (rtx, rtx);
+extern rtx gen_floorv2sf2 (rtx, rtx);
+extern rtx gen_rintv2sf2 (rtx, rtx);
+extern rtx gen_roundv2sf2 (rtx, rtx);
+extern rtx gen_roundevenv2sf2 (rtx, rtx);
+extern rtx gen_ceilv4sf2 (rtx, rtx);
+extern rtx gen_btruncv4sf2 (rtx, rtx);
+extern rtx gen_floorv4sf2 (rtx, rtx);
+extern rtx gen_rintv4sf2 (rtx, rtx);
+extern rtx gen_roundv4sf2 (rtx, rtx);
+extern rtx gen_roundevenv4sf2 (rtx, rtx);
+extern rtx gen_lceilv2sfv2si2 (rtx, rtx);
+extern rtx gen_lfloorv2sfv2si2 (rtx, rtx);
+extern rtx gen_lroundv2sfv2si2 (rtx, rtx);
+extern rtx gen_lceiluv2sfv2si2 (rtx, rtx);
+extern rtx gen_lflooruv2sfv2si2 (rtx, rtx);
+extern rtx gen_lrounduv2sfv2si2 (rtx, rtx);
+extern rtx gen_lceilv4sfv4si2 (rtx, rtx);
+extern rtx gen_lfloorv4sfv4si2 (rtx, rtx);
+extern rtx gen_lroundv4sfv4si2 (rtx, rtx);
+extern rtx gen_lceiluv4sfv4si2 (rtx, rtx);
+extern rtx gen_lflooruv4sfv4si2 (rtx, rtx);
+extern rtx gen_lrounduv4sfv4si2 (rtx, rtx);
+extern rtx gen_neon_vabsv8hf (rtx, rtx);
+extern rtx gen_neon_vnegv8hf (rtx, rtx);
+extern rtx gen_neon_vabsv4hf (rtx, rtx);
+extern rtx gen_neon_vnegv4hf (rtx, rtx);
+extern rtx gen_widen_ssumv16qi3 (rtx, rtx, rtx);
+extern rtx gen_widen_ssumv8hi3 (rtx, rtx, rtx);
+extern rtx gen_widen_ssumv4si3 (rtx, rtx, rtx);
+extern rtx gen_widen_usumv16qi3 (rtx, rtx, rtx);
+extern rtx gen_widen_usumv8hi3 (rtx, rtx, rtx);
+extern rtx gen_widen_usumv4si3 (rtx, rtx, rtx);
+extern rtx gen_move_hi_quad_v2di (rtx, rtx);
+extern rtx gen_move_hi_quad_v2df (rtx, rtx);
+extern rtx gen_move_hi_quad_v16qi (rtx, rtx);
+extern rtx gen_move_hi_quad_v8hi (rtx, rtx);
+extern rtx gen_move_hi_quad_v4si (rtx, rtx);
+extern rtx gen_move_hi_quad_v4sf (rtx, rtx);
+extern rtx gen_move_lo_quad_v2di (rtx, rtx);
+extern rtx gen_move_lo_quad_v2df (rtx, rtx);
+extern rtx gen_move_lo_quad_v16qi (rtx, rtx);
+extern rtx gen_move_lo_quad_v8hi (rtx, rtx);
+extern rtx gen_move_lo_quad_v4si (rtx, rtx);
+extern rtx gen_move_lo_quad_v4sf (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v8qi (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v4hi (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v2si (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v2sf (rtx, rtx);
+extern rtx gen_reduc_plus_scal_v2di (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v8qi (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v4hi (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v2si (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v2sf (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v16qi (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v8hi (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v4si (rtx, rtx);
+extern rtx gen_reduc_smin_scal_v4sf (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v8qi (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v4hi (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v2si (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v2sf (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v16qi (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v8hi (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v4si (rtx, rtx);
+extern rtx gen_reduc_smax_scal_v4sf (rtx, rtx);
+extern rtx gen_reduc_umin_scal_v8qi (rtx, rtx);
+extern rtx gen_reduc_umin_scal_v4hi (rtx, rtx);
+extern rtx gen_reduc_umin_scal_v2si (rtx, rtx);
+extern rtx gen_reduc_umin_scal_v16qi (rtx, rtx);
+extern rtx gen_reduc_umin_scal_v8hi (rtx, rtx);
+extern rtx gen_reduc_umin_scal_v4si (rtx, rtx);
+extern rtx gen_reduc_umax_scal_v8qi (rtx, rtx);
+extern rtx gen_reduc_umax_scal_v4hi (rtx, rtx);
+extern rtx gen_reduc_umax_scal_v2si (rtx, rtx);
+extern rtx gen_reduc_umax_scal_v16qi (rtx, rtx);
+extern rtx gen_reduc_umax_scal_v8hi (rtx, rtx);
+extern rtx gen_reduc_umax_scal_v4si (rtx, rtx);
+extern rtx gen_vec_cmpv8qiv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv16qiv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv4hiv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv8hiv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv2siv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv4siv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv2sfv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv4sfv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv8hfv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv4hfv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv8qiv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv16qiv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv4hiv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv8hiv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv2siv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv4siv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v8qiv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v16qiv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v4hiv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v8hiv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v2siv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v4siv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v2sfv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v4sfv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v8hfv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v4hfv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vaddv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vaddv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vaddv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vaddv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vsubv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vsubv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmlav8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlav4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmav2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmav4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmav8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmav4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsv4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lowv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_highv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lowv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_highv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lowv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_highv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lowv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_highv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_lowv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_highv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_lowv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_highv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_lowv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_highv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_lowv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_highv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_lowv8hfv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_highv8hfv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_lowv8hfv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_highv8hfv2sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_lowv4hfv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmal_lane_highv4hfv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_lowv4hfv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmsl_lane_highv4hfv4sf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vsubv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vsubv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vclev16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vclev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vclev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vclev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vceqv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgtv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcgev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vclev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcltv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagtv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcagev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcaltv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vcalev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vceqzv8hf (rtx, rtx);
+extern rtx gen_neon_vcgtzv8hf (rtx, rtx);
+extern rtx gen_neon_vcgezv8hf (rtx, rtx);
+extern rtx gen_neon_vclezv8hf (rtx, rtx);
+extern rtx gen_neon_vcltzv8hf (rtx, rtx);
+extern rtx gen_neon_vceqzv4hf (rtx, rtx);
+extern rtx gen_neon_vcgtzv4hf (rtx, rtx);
+extern rtx gen_neon_vcgezv4hf (rtx, rtx);
+extern rtx gen_neon_vclezv4hf (rtx, rtx);
+extern rtx gen_neon_vcltzv4hf (rtx, rtx);
+extern rtx gen_ssadv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_usadv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vpaddv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vpaddv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vpaddv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vpaddv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vabsv8qi (rtx, rtx);
+extern rtx gen_neon_vabsv16qi (rtx, rtx);
+extern rtx gen_neon_vabsv4hi (rtx, rtx);
+extern rtx gen_neon_vabsv8hi (rtx, rtx);
+extern rtx gen_neon_vabsv2si (rtx, rtx);
+extern rtx gen_neon_vabsv4si (rtx, rtx);
+extern rtx gen_neon_vabsv2sf (rtx, rtx);
+extern rtx gen_neon_vabsv4sf (rtx, rtx);
+extern rtx gen_neon_vnegv8qi (rtx, rtx);
+extern rtx gen_neon_vnegv16qi (rtx, rtx);
+extern rtx gen_neon_vnegv4hi (rtx, rtx);
+extern rtx gen_neon_vnegv8hi (rtx, rtx);
+extern rtx gen_neon_vnegv2si (rtx, rtx);
+extern rtx gen_neon_vnegv4si (rtx, rtx);
+extern rtx gen_neon_vnegv2sf (rtx, rtx);
+extern rtx gen_neon_vnegv4sf (rtx, rtx);
+extern rtx gen_cmulv2sf3 (rtx, rtx, rtx);
+extern rtx gen_cmul_conjv2sf3 (rtx, rtx, rtx);
+extern rtx gen_cmulv4hf3 (rtx, rtx, rtx);
+extern rtx gen_cmul_conjv4hf3 (rtx, rtx, rtx);
+extern rtx gen_neon_sdotv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_udotv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_sdotv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_udotv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_usdot_prodv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_usdot_prodv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_copysignv2sf3 (rtx, rtx, rtx);
+extern rtx gen_copysignv4sf3 (rtx, rtx, rtx);
+extern rtx gen_neon_vcntv8qi (rtx, rtx);
+extern rtx gen_neon_vcntv16qi (rtx, rtx);
+extern rtx gen_neon_vmvnv8qi (rtx, rtx);
+extern rtx gen_neon_vmvnv16qi (rtx, rtx);
+extern rtx gen_neon_vmvnv4hi (rtx, rtx);
+extern rtx gen_neon_vmvnv8hi (rtx, rtx);
+extern rtx gen_neon_vmvnv2si (rtx, rtx);
+extern rtx gen_neon_vmvnv4si (rtx, rtx);
+extern rtx gen_neon_vget_lanev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vget_laneuv8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_laneuv16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_laneuv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_laneuv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_laneuv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vget_laneuv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanedi (rtx, rtx, rtx);
+extern rtx gen_neon_vget_lanev2di (rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanev2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vset_lanedi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vcreatev8qi (rtx, rtx);
+extern rtx gen_neon_vcreatev4hi (rtx, rtx);
+extern rtx gen_neon_vcreatev2si (rtx, rtx);
+extern rtx gen_neon_vcreatev2sf (rtx, rtx);
+extern rtx gen_neon_vcreatedi (rtx, rtx);
+extern rtx gen_neon_vdup_ndi (rtx, rtx);
+extern rtx gen_neon_vdup_lanev8qi (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev16qi (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev2si (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4si (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev4bf (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev8bf (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanedi (rtx, rtx, rtx);
+extern rtx gen_neon_vdup_lanev2di (rtx, rtx, rtx);
+extern rtx gen_neon_vget_highv16qi (rtx, rtx);
+extern rtx gen_neon_vget_highv8hi (rtx, rtx);
+extern rtx gen_neon_vget_highv8hf (rtx, rtx);
+extern rtx gen_neon_vget_highv8bf (rtx, rtx);
+extern rtx gen_neon_vget_highv4si (rtx, rtx);
+extern rtx gen_neon_vget_highv4sf (rtx, rtx);
+extern rtx gen_neon_vget_highv2di (rtx, rtx);
+extern rtx gen_neon_vget_lowv16qi (rtx, rtx);
+extern rtx gen_neon_vget_lowv8hi (rtx, rtx);
+extern rtx gen_neon_vget_lowv8hf (rtx, rtx);
+extern rtx gen_neon_vget_lowv8bf (rtx, rtx);
+extern rtx gen_neon_vget_lowv4si (rtx, rtx);
+extern rtx gen_neon_vget_lowv4sf (rtx, rtx);
+extern rtx gen_neon_vget_lowv2di (rtx, rtx);
+extern rtx gen_neon_vmul_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv2sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv4sf (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv8hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmul_nv4hf (rtx, rtx, rtx);
+extern rtx gen_neon_vmulls_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmulls_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vmullu_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vmullu_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmull_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmull_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_nv4hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_nv2si (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqdmulh_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_nv8hi (rtx, rtx, rtx);
+extern rtx gen_neon_vqrdmulh_nv4si (rtx, rtx, rtx);
+extern rtx gen_neon_vmla_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_nv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmla_nv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlals_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlals_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalu_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlalu_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlal_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlal_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_nv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_nv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_nv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmls_nv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsls_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlsls_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslu_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vmlslu_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlsl_nv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vqdmlsl_nv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv2si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv8bf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv2sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbsldi (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vbslv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv8qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv16qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv4hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv8hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv2si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv4si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv2sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv4sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv8hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vtrnv4hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv8qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv16qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv4hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv8hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv2si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv4si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv2sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv4sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv8hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vzipv4hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv8qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv16qi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv4hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv8hi_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv2si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv4si_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv2sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv4sf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv8hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vuzpv4hf_internal (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_load_lanesv8qiv8qi (rtx, rtx);
+extern rtx gen_vec_load_lanesv16qiv16qi (rtx, rtx);
+extern rtx gen_vec_load_lanesv4hiv4hi (rtx, rtx);
+extern rtx gen_vec_load_lanesv8hiv8hi (rtx, rtx);
+extern rtx gen_vec_load_lanesv2siv2si (rtx, rtx);
+extern rtx gen_vec_load_lanesv4siv4si (rtx, rtx);
+extern rtx gen_vec_load_lanesv4hfv4hf (rtx, rtx);
+extern rtx gen_vec_load_lanesv8hfv8hf (rtx, rtx);
+extern rtx gen_vec_load_lanesv4bfv4bf (rtx, rtx);
+extern rtx gen_vec_load_lanesv8bfv8bf (rtx, rtx);
+extern rtx gen_vec_load_lanesv2sfv2sf (rtx, rtx);
+extern rtx gen_vec_load_lanesv4sfv4sf (rtx, rtx);
+extern rtx gen_vec_load_lanesdidi (rtx, rtx);
+extern rtx gen_vec_load_lanesv2div2di (rtx, rtx);
+extern rtx gen_neon_vld1_dupdi (rtx, rtx);
+extern rtx gen_vec_store_lanesv8qiv8qi (rtx, rtx);
+extern rtx gen_vec_store_lanesv16qiv16qi (rtx, rtx);
+extern rtx gen_vec_store_lanesv4hiv4hi (rtx, rtx);
+extern rtx gen_vec_store_lanesv8hiv8hi (rtx, rtx);
+extern rtx gen_vec_store_lanesv2siv2si (rtx, rtx);
+extern rtx gen_vec_store_lanesv4siv4si (rtx, rtx);
+extern rtx gen_vec_store_lanesv4hfv4hf (rtx, rtx);
+extern rtx gen_vec_store_lanesv8hfv8hf (rtx, rtx);
+extern rtx gen_vec_store_lanesv4bfv4bf (rtx, rtx);
+extern rtx gen_vec_store_lanesv8bfv8bf (rtx, rtx);
+extern rtx gen_vec_store_lanesv2sfv2sf (rtx, rtx);
+extern rtx gen_vec_store_lanesv4sfv4sf (rtx, rtx);
+extern rtx gen_vec_store_lanesdidi (rtx, rtx);
+extern rtx gen_vec_store_lanesv2div2di (rtx, rtx);
+extern rtx gen_vec_load_lanestiv8qi (rtx, rtx);
+extern rtx gen_vec_load_lanestiv4hi (rtx, rtx);
+extern rtx gen_vec_load_lanestiv4hf (rtx, rtx);
+extern rtx gen_vec_load_lanestiv4bf (rtx, rtx);
+extern rtx gen_vec_load_lanestiv2si (rtx, rtx);
+extern rtx gen_vec_load_lanestiv2sf (rtx, rtx);
+extern rtx gen_vec_load_lanestidi (rtx, rtx);
+extern rtx gen_vec_store_lanestiv8qi (rtx, rtx);
+extern rtx gen_vec_store_lanestiv4hi (rtx, rtx);
+extern rtx gen_vec_store_lanestiv4hf (rtx, rtx);
+extern rtx gen_vec_store_lanestiv4bf (rtx, rtx);
+extern rtx gen_vec_store_lanestiv2si (rtx, rtx);
+extern rtx gen_vec_store_lanestiv2sf (rtx, rtx);
+extern rtx gen_vec_store_lanestidi (rtx, rtx);
+extern rtx gen_vec_load_laneseiv8qi (rtx, rtx);
+extern rtx gen_vec_load_laneseiv4hi (rtx, rtx);
+extern rtx gen_vec_load_laneseiv4hf (rtx, rtx);
+extern rtx gen_vec_load_laneseiv4bf (rtx, rtx);
+extern rtx gen_vec_load_laneseiv2si (rtx, rtx);
+extern rtx gen_vec_load_laneseiv2sf (rtx, rtx);
+extern rtx gen_vec_load_laneseidi (rtx, rtx);
+extern rtx gen_vec_load_lanesciv16qi (rtx, rtx);
+extern rtx gen_vec_load_lanesciv8hi (rtx, rtx);
+extern rtx gen_vec_load_lanesciv8hf (rtx, rtx);
+extern rtx gen_vec_load_lanesciv4si (rtx, rtx);
+extern rtx gen_vec_load_lanesciv4sf (rtx, rtx);
+extern rtx gen_neon_vld3v16qi (rtx, rtx);
+extern rtx gen_neon_vld3v8hi (rtx, rtx);
+extern rtx gen_neon_vld3v8hf (rtx, rtx);
+extern rtx gen_neon_vld3v8bf (rtx, rtx);
+extern rtx gen_neon_vld3v4si (rtx, rtx);
+extern rtx gen_neon_vld3v4sf (rtx, rtx);
+extern rtx gen_vec_store_laneseiv8qi (rtx, rtx);
+extern rtx gen_vec_store_laneseiv4hi (rtx, rtx);
+extern rtx gen_vec_store_laneseiv4hf (rtx, rtx);
+extern rtx gen_vec_store_laneseiv4bf (rtx, rtx);
+extern rtx gen_vec_store_laneseiv2si (rtx, rtx);
+extern rtx gen_vec_store_laneseiv2sf (rtx, rtx);
+extern rtx gen_vec_store_laneseidi (rtx, rtx);
+extern rtx gen_vec_store_lanesciv16qi (rtx, rtx);
+extern rtx gen_vec_store_lanesciv8hi (rtx, rtx);
+extern rtx gen_vec_store_lanesciv8hf (rtx, rtx);
+extern rtx gen_vec_store_lanesciv4si (rtx, rtx);
+extern rtx gen_vec_store_lanesciv4sf (rtx, rtx);
+extern rtx gen_neon_vst3v16qi (rtx, rtx);
+extern rtx gen_neon_vst3v8hi (rtx, rtx);
+extern rtx gen_neon_vst3v8hf (rtx, rtx);
+extern rtx gen_neon_vst3v8bf (rtx, rtx);
+extern rtx gen_neon_vst3v4si (rtx, rtx);
+extern rtx gen_neon_vst3v4sf (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv8qi (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv4hi (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv4hf (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv4bf (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv2si (rtx, rtx);
+extern rtx gen_vec_load_lanesoiv2sf (rtx, rtx);
+extern rtx gen_vec_load_lanesoidi (rtx, rtx);
+extern rtx gen_neon_vld4v16qi (rtx, rtx);
+extern rtx gen_neon_vld4v8hi (rtx, rtx);
+extern rtx gen_neon_vld4v8hf (rtx, rtx);
+extern rtx gen_neon_vld4v8bf (rtx, rtx);
+extern rtx gen_neon_vld4v4si (rtx, rtx);
+extern rtx gen_neon_vld4v4sf (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv8qi (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv4hi (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv4hf (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv4bf (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv2si (rtx, rtx);
+extern rtx gen_vec_store_lanesoiv2sf (rtx, rtx);
+extern rtx gen_vec_store_lanesoidi (rtx, rtx);
+extern rtx gen_neon_vst4v16qi (rtx, rtx);
+extern rtx gen_neon_vst4v8hi (rtx, rtx);
+extern rtx gen_neon_vst4v8hf (rtx, rtx);
+extern rtx gen_neon_vst4v8bf (rtx, rtx);
+extern rtx gen_neon_vst4v4si (rtx, rtx);
+extern rtx gen_neon_vst4v4sf (rtx, rtx);
+extern rtx gen_vec_unpacks_hi_v16qi (rtx, rtx);
+extern rtx gen_vec_unpacku_hi_v16qi (rtx, rtx);
+extern rtx gen_vec_unpacks_hi_v8hi (rtx, rtx);
+extern rtx gen_vec_unpacku_hi_v8hi (rtx, rtx);
+extern rtx gen_vec_unpacks_hi_v4si (rtx, rtx);
+extern rtx gen_vec_unpacku_hi_v4si (rtx, rtx);
+extern rtx gen_vec_unpacks_lo_v16qi (rtx, rtx);
+extern rtx gen_vec_unpacku_lo_v16qi (rtx, rtx);
+extern rtx gen_vec_unpacks_lo_v8hi (rtx, rtx);
+extern rtx gen_vec_unpacku_lo_v8hi (rtx, rtx);
+extern rtx gen_vec_unpacks_lo_v4si (rtx, rtx);
+extern rtx gen_vec_unpacku_lo_v4si (rtx, rtx);
+extern rtx gen_vec_widen_smult_lo_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_lo_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_lo_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_lo_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_lo_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_lo_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_hi_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_hi_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_hi_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_hi_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_hi_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_hi_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_lo_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_lo_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_lo_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_lo_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_lo_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_lo_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_hi_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_hi_v16qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_hi_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_hi_v8hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_hi_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_hi_v4si (rtx, rtx, rtx);
+extern rtx gen_vec_unpacks_lo_v8qi (rtx, rtx);
+extern rtx gen_vec_unpacku_lo_v8qi (rtx, rtx);
+extern rtx gen_vec_unpacks_lo_v4hi (rtx, rtx);
+extern rtx gen_vec_unpacku_lo_v4hi (rtx, rtx);
+extern rtx gen_vec_unpacks_lo_v2si (rtx, rtx);
+extern rtx gen_vec_unpacku_lo_v2si (rtx, rtx);
+extern rtx gen_vec_unpacks_hi_v8qi (rtx, rtx);
+extern rtx gen_vec_unpacku_hi_v8qi (rtx, rtx);
+extern rtx gen_vec_unpacks_hi_v4hi (rtx, rtx);
+extern rtx gen_vec_unpacku_hi_v4hi (rtx, rtx);
+extern rtx gen_vec_unpacks_hi_v2si (rtx, rtx);
+extern rtx gen_vec_unpacku_hi_v2si (rtx, rtx);
+extern rtx gen_vec_widen_smult_hi_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_hi_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_hi_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_hi_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_hi_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_hi_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_lo_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_lo_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_lo_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_lo_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_smult_lo_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_umult_lo_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_hi_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_hi_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_hi_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_hi_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_hi_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_hi_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_lo_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_lo_v8qi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_lo_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_lo_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_widen_sshiftl_lo_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_widen_ushiftl_lo_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_pack_trunc_v4hi (rtx, rtx, rtx);
+extern rtx gen_vec_pack_trunc_v2si (rtx, rtx, rtx);
+extern rtx gen_vec_pack_trunc_di (rtx, rtx, rtx);
+extern rtx gen_neon_vbfcvtbf (rtx, rtx);
+extern rtx gen_neon_vfmab_laneqv8bf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_neon_vfmat_laneqv8bf (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_aesd (rtx, rtx, rtx);
+extern rtx gen_crypto_aese (rtx, rtx, rtx);
+extern rtx gen_crypto_sha1h (rtx, rtx);
+extern rtx gen_crypto_sha1c (rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha1m (rtx, rtx, rtx, rtx);
+extern rtx gen_crypto_sha1p (rtx, rtx, rtx, rtx);
+extern rtx gen_memory_barrier (void);
+extern rtx gen_atomic_loaddi (rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapqi (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swaphi (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapsi (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_atomic_compare_and_swapdi (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_addv4qq3 (rtx, rtx, rtx);
+extern rtx gen_addv2hq3 (rtx, rtx, rtx);
+extern rtx gen_addv2ha3 (rtx, rtx, rtx);
+extern rtx gen_ssaddv4qq3 (rtx, rtx, rtx);
+extern rtx gen_ssaddv2hq3 (rtx, rtx, rtx);
+extern rtx gen_ssaddqq3 (rtx, rtx, rtx);
+extern rtx gen_ssaddhq3 (rtx, rtx, rtx);
+extern rtx gen_ssaddv2ha3 (rtx, rtx, rtx);
+extern rtx gen_ssaddha3 (rtx, rtx, rtx);
+extern rtx gen_ssaddsq3 (rtx, rtx, rtx);
+extern rtx gen_ssaddsa3 (rtx, rtx, rtx);
+extern rtx gen_subv4qq3 (rtx, rtx, rtx);
+extern rtx gen_subv2hq3 (rtx, rtx, rtx);
+extern rtx gen_subv2ha3 (rtx, rtx, rtx);
+extern rtx gen_sssubv4qq3 (rtx, rtx, rtx);
+extern rtx gen_sssubv2hq3 (rtx, rtx, rtx);
+extern rtx gen_sssubqq3 (rtx, rtx, rtx);
+extern rtx gen_sssubhq3 (rtx, rtx, rtx);
+extern rtx gen_sssubv2ha3 (rtx, rtx, rtx);
+extern rtx gen_sssubha3 (rtx, rtx, rtx);
+extern rtx gen_sssubsq3 (rtx, rtx, rtx);
+extern rtx gen_sssubsa3 (rtx, rtx, rtx);
+extern rtx gen_mulqq3 (rtx, rtx, rtx);
+extern rtx gen_mulhq3 (rtx, rtx, rtx);
+extern rtx gen_mulsq3 (rtx, rtx, rtx);
+extern rtx gen_mulsa3 (rtx, rtx, rtx);
+extern rtx gen_mulusa3 (rtx, rtx, rtx);
+extern rtx gen_ssmulsa3 (rtx, rtx, rtx);
+extern rtx gen_usmulusa3 (rtx, rtx, rtx);
+extern rtx gen_mulha3 (rtx, rtx, rtx);
+extern rtx gen_muluha3 (rtx, rtx, rtx);
+extern rtx gen_ssmulha3 (rtx, rtx, rtx);
+extern rtx gen_usmuluha3 (rtx, rtx, rtx);
+extern rtx gen_mve_vmvnq_sv16qi (rtx, rtx);
+extern rtx gen_mve_vmvnq_sv8hi (rtx, rtx);
+extern rtx gen_mve_vmvnq_sv4si (rtx, rtx);
+extern rtx gen_mve_vclzq_uv16qi (rtx, rtx);
+extern rtx gen_mve_vclzq_uv8hi (rtx, rtx);
+extern rtx gen_mve_vclzq_uv4si (rtx, rtx);
+extern rtx gen_mve_vandq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vandq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vbicq_sv4si (rtx, rtx, rtx);
+extern rtx gen_cadd90v16qi3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v16qi3 (rtx, rtx, rtx);
+extern rtx gen_cadd90v8hi3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v8hi3 (rtx, rtx, rtx);
+extern rtx gen_cadd90v4si3 (rtx, rtx, rtx);
+extern rtx gen_cadd270v4si3 (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_veorq_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vornq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vorrq_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_vec_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_vec_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_vec_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_vec_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_vec_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_vec_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_carry_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_carry_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_carry_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_carry_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_carry_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_carry_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_sv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_sv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrbq_scatter_offset_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vld1q_fv8hf (rtx, rtx);
+extern rtx gen_mve_vld1q_fv4sf (rtx, rtx);
+extern rtx gen_mve_vld1q_sv16qi (rtx, rtx);
+extern rtx gen_mve_vld1q_uv16qi (rtx, rtx);
+extern rtx gen_mve_vld1q_sv8hi (rtx, rtx);
+extern rtx gen_mve_vld1q_uv8hi (rtx, rtx);
+extern rtx gen_mve_vld1q_sv4si (rtx, rtx);
+extern rtx gen_mve_vld1q_uv4si (rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_sv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_sv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vst1q_fv8hf (rtx, rtx);
+extern rtx gen_mve_vst1q_fv4sf (rtx, rtx);
+extern rtx gen_mve_vst1q_sv16qi (rtx, rtx);
+extern rtx gen_mve_vst1q_uv16qi (rtx, rtx);
+extern rtx gen_mve_vst1q_sv8hi (rtx, rtx);
+extern rtx gen_mve_vst1q_uv8hi (rtx, rtx);
+extern rtx gen_mve_vst1q_sv4si (rtx, rtx);
+extern rtx gen_mve_vst1q_uv4si (rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_p_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_p_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_offset_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_p_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_p_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vstrdq_scatter_shifted_offset_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_offset_p_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_fv8hf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrhq_scatter_shifted_offset_p_fv8hf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_p_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_p_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_p_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vstrwq_scatter_shifted_offset_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vidupq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_n_uv16qi (rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_n_uv8hi (rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_n_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vddupq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_wb_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_wb_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_wb_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_wb_uv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_wb_uv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vdwdupq_m_wb_uv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_n_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_n_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_n_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_wb_uv16qi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_wb_uv8hi (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_wb_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_n_uv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_n_uv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_n_uv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_wb_uv16qi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_wb_uv8hi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_viwdupq_m_wb_uv4si (rtx, rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_nowb_sv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_nowb_uv4si (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_nowb_z_sv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_nowb_z_uv4si (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_nowb_fv4sf (rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_wb_z_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrwq_gather_base_nowb_z_fv4sf (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_nowb_sv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_nowb_uv2di (rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_z_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_wb_z_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_nowb_z_sv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vldrdq_gather_base_nowb_z_uv2di (rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_vec_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_vec_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_vec_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_vec_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_vec_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_vec_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_carry_sv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_carry_uv16qi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_carry_sv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_carry_uv8hi (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_carry_sv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_mve_vshlcq_m_carry_uv4si (rtx, rtx, rtx, rtx, rtx);
+extern rtx gen_movv16bi (rtx, rtx);
+extern rtx gen_movv8bi (rtx, rtx);
+extern rtx gen_movv4bi (rtx, rtx);
+extern rtx gen_movv2qi (rtx, rtx);
+extern rtx gen_vec_cmpv16qiv16bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv8hiv8bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv4siv4bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv8hfv8bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpv4sfv4bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv16qiv16bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv8hiv8bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vec_cmpuv4siv4bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v16qiv16bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v8hiv8bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v4siv4bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v8hfv8bi (rtx, rtx, rtx, rtx);
+extern rtx gen_vcond_mask_v4sfv4bi (rtx, rtx, rtx, rtx);
+
+#endif /* GCC_INSN_FLAGS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes-inline.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes-inline.h
new file mode 100644
index 0000000..e949bc4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes-inline.h
@@ -0,0 +1,605 @@
+/* Generated automatically from machmode.def and config/arm/arm-modes.def
+ by genmodes. */
+
+#ifndef GCC_INSN_MODES_INLINE_H
+#define GCC_INSN_MODES_INLINE_H
+
+#if !defined (USED_FOR_TARGET) && GCC_VERSION >= 4001
+
+#ifdef __cplusplus
+inline __attribute__((__always_inline__))
+#else
+extern __inline__ __attribute__((__always_inline__, __gnu_inline__))
+#endif
+poly_uint16
+mode_size_inline (machine_mode mode)
+{
+ extern const poly_uint16_pod mode_size[NUM_MACHINE_MODES];
+ gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);
+ switch (mode)
+ {
+ case E_VOIDmode: return 0;
+ case E_BLKmode: return 0;
+ case E_CCmode: return 4;
+ case E_CC_NZmode: return 4;
+ case E_CC_Zmode: return 4;
+ case E_CC_NVmode: return 4;
+ case E_CC_SWPmode: return 4;
+ case E_CC_RSBmode: return 4;
+ case E_CCFPmode: return 4;
+ case E_CCFPEmode: return 4;
+ case E_CC_DNEmode: return 4;
+ case E_CC_DEQmode: return 4;
+ case E_CC_DLEmode: return 4;
+ case E_CC_DLTmode: return 4;
+ case E_CC_DGEmode: return 4;
+ case E_CC_DGTmode: return 4;
+ case E_CC_DLEUmode: return 4;
+ case E_CC_DLTUmode: return 4;
+ case E_CC_DGEUmode: return 4;
+ case E_CC_DGTUmode: return 4;
+ case E_CC_Cmode: return 4;
+ case E_CC_Bmode: return 4;
+ case E_CC_Nmode: return 4;
+ case E_CC_Vmode: return 4;
+ case E_CC_ADCmode: return 4;
+ case E_BImode: return 1;
+ case E_B2Imode: return 1;
+ case E_B4Imode: return 1;
+ case E_QImode: return 1;
+ case E_HImode: return 2;
+ case E_SImode: return 4;
+ case E_DImode: return 8;
+ case E_TImode: return 16;
+ case E_EImode: return 24;
+ case E_OImode: return 32;
+ case E_CImode: return 48;
+ case E_XImode: return 64;
+ case E_QQmode: return 1;
+ case E_HQmode: return 2;
+ case E_SQmode: return 4;
+ case E_DQmode: return 8;
+ case E_TQmode: return 16;
+ case E_UQQmode: return 1;
+ case E_UHQmode: return 2;
+ case E_USQmode: return 4;
+ case E_UDQmode: return 8;
+ case E_UTQmode: return 16;
+ case E_HAmode: return 2;
+ case E_SAmode: return 4;
+ case E_DAmode: return 8;
+ case E_TAmode: return 16;
+ case E_UHAmode: return 2;
+ case E_USAmode: return 4;
+ case E_UDAmode: return 8;
+ case E_UTAmode: return 16;
+ case E_HFmode: return 2;
+ case E_BFmode: return 2;
+ case E_SFmode: return 4;
+ case E_DFmode: return 8;
+ case E_SDmode: return 4;
+ case E_DDmode: return 8;
+ case E_TDmode: return 16;
+ case E_CQImode: return 2;
+ case E_CHImode: return 4;
+ case E_CSImode: return 8;
+ case E_CDImode: return 16;
+ case E_CTImode: return 32;
+ case E_CEImode: return 48;
+ case E_COImode: return 64;
+ case E_CCImode: return 96;
+ case E_CXImode: return 128;
+ case E_BCmode: return 4;
+ case E_HCmode: return 4;
+ case E_SCmode: return 8;
+ case E_DCmode: return 16;
+ case E_V16BImode: return 2;
+ case E_V8BImode: return 2;
+ case E_V4BImode: return 2;
+ case E_V2QImode: return 2;
+ case E_V4QImode: return 4;
+ case E_V2HImode: return 4;
+ case E_V8QImode: return 8;
+ case E_V4HImode: return 8;
+ case E_V2SImode: return 8;
+ case E_V16QImode: return 16;
+ case E_V8HImode: return 16;
+ case E_V4SImode: return 16;
+ case E_V2DImode: return 16;
+ case E_V4QQmode: return 4;
+ case E_V2HQmode: return 4;
+ case E_V4UQQmode: return 4;
+ case E_V2UHQmode: return 4;
+ case E_V2HAmode: return 4;
+ case E_V2UHAmode: return 4;
+ case E_V2HFmode: return 4;
+ case E_V2BFmode: return 4;
+ case E_V4HFmode: return 8;
+ case E_V4BFmode: return 8;
+ case E_V2SFmode: return 8;
+ case E_V8HFmode: return 16;
+ case E_V8BFmode: return 16;
+ case E_V4SFmode: return 16;
+ case E_V2DFmode: return 16;
+ default: return mode_size[mode];
+ }
+}
+
+#ifdef __cplusplus
+inline __attribute__((__always_inline__))
+#else
+extern __inline__ __attribute__((__always_inline__, __gnu_inline__))
+#endif
+poly_uint16
+mode_nunits_inline (machine_mode mode)
+{
+ extern const poly_uint16_pod mode_nunits[NUM_MACHINE_MODES];
+ switch (mode)
+ {
+ case E_VOIDmode: return 0;
+ case E_BLKmode: return 0;
+ case E_CCmode: return 1;
+ case E_CC_NZmode: return 1;
+ case E_CC_Zmode: return 1;
+ case E_CC_NVmode: return 1;
+ case E_CC_SWPmode: return 1;
+ case E_CC_RSBmode: return 1;
+ case E_CCFPmode: return 1;
+ case E_CCFPEmode: return 1;
+ case E_CC_DNEmode: return 1;
+ case E_CC_DEQmode: return 1;
+ case E_CC_DLEmode: return 1;
+ case E_CC_DLTmode: return 1;
+ case E_CC_DGEmode: return 1;
+ case E_CC_DGTmode: return 1;
+ case E_CC_DLEUmode: return 1;
+ case E_CC_DLTUmode: return 1;
+ case E_CC_DGEUmode: return 1;
+ case E_CC_DGTUmode: return 1;
+ case E_CC_Cmode: return 1;
+ case E_CC_Bmode: return 1;
+ case E_CC_Nmode: return 1;
+ case E_CC_Vmode: return 1;
+ case E_CC_ADCmode: return 1;
+ case E_BImode: return 1;
+ case E_B2Imode: return 1;
+ case E_B4Imode: return 1;
+ case E_QImode: return 1;
+ case E_HImode: return 1;
+ case E_SImode: return 1;
+ case E_DImode: return 1;
+ case E_TImode: return 1;
+ case E_EImode: return 1;
+ case E_OImode: return 1;
+ case E_CImode: return 1;
+ case E_XImode: return 1;
+ case E_QQmode: return 1;
+ case E_HQmode: return 1;
+ case E_SQmode: return 1;
+ case E_DQmode: return 1;
+ case E_TQmode: return 1;
+ case E_UQQmode: return 1;
+ case E_UHQmode: return 1;
+ case E_USQmode: return 1;
+ case E_UDQmode: return 1;
+ case E_UTQmode: return 1;
+ case E_HAmode: return 1;
+ case E_SAmode: return 1;
+ case E_DAmode: return 1;
+ case E_TAmode: return 1;
+ case E_UHAmode: return 1;
+ case E_USAmode: return 1;
+ case E_UDAmode: return 1;
+ case E_UTAmode: return 1;
+ case E_HFmode: return 1;
+ case E_BFmode: return 1;
+ case E_SFmode: return 1;
+ case E_DFmode: return 1;
+ case E_SDmode: return 1;
+ case E_DDmode: return 1;
+ case E_TDmode: return 1;
+ case E_CQImode: return 2;
+ case E_CHImode: return 2;
+ case E_CSImode: return 2;
+ case E_CDImode: return 2;
+ case E_CTImode: return 2;
+ case E_CEImode: return 2;
+ case E_COImode: return 2;
+ case E_CCImode: return 2;
+ case E_CXImode: return 2;
+ case E_BCmode: return 2;
+ case E_HCmode: return 2;
+ case E_SCmode: return 2;
+ case E_DCmode: return 2;
+ case E_V16BImode: return 16;
+ case E_V8BImode: return 8;
+ case E_V4BImode: return 4;
+ case E_V2QImode: return 2;
+ case E_V4QImode: return 4;
+ case E_V2HImode: return 2;
+ case E_V8QImode: return 8;
+ case E_V4HImode: return 4;
+ case E_V2SImode: return 2;
+ case E_V16QImode: return 16;
+ case E_V8HImode: return 8;
+ case E_V4SImode: return 4;
+ case E_V2DImode: return 2;
+ case E_V4QQmode: return 4;
+ case E_V2HQmode: return 2;
+ case E_V4UQQmode: return 4;
+ case E_V2UHQmode: return 2;
+ case E_V2HAmode: return 2;
+ case E_V2UHAmode: return 2;
+ case E_V2HFmode: return 2;
+ case E_V2BFmode: return 2;
+ case E_V4HFmode: return 4;
+ case E_V4BFmode: return 4;
+ case E_V2SFmode: return 2;
+ case E_V8HFmode: return 8;
+ case E_V8BFmode: return 8;
+ case E_V4SFmode: return 4;
+ case E_V2DFmode: return 2;
+ default: return mode_nunits[mode];
+ }
+}
+
+#ifdef __cplusplus
+inline __attribute__((__always_inline__))
+#else
+extern __inline__ __attribute__((__always_inline__, __gnu_inline__))
+#endif
+unsigned char
+mode_inner_inline (machine_mode mode)
+{
+ extern const unsigned char mode_inner[NUM_MACHINE_MODES];
+ gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);
+ switch (mode)
+ {
+ case E_VOIDmode: return E_VOIDmode;
+ case E_BLKmode: return E_BLKmode;
+ case E_CCmode: return E_CCmode;
+ case E_CC_NZmode: return E_CC_NZmode;
+ case E_CC_Zmode: return E_CC_Zmode;
+ case E_CC_NVmode: return E_CC_NVmode;
+ case E_CC_SWPmode: return E_CC_SWPmode;
+ case E_CC_RSBmode: return E_CC_RSBmode;
+ case E_CCFPmode: return E_CCFPmode;
+ case E_CCFPEmode: return E_CCFPEmode;
+ case E_CC_DNEmode: return E_CC_DNEmode;
+ case E_CC_DEQmode: return E_CC_DEQmode;
+ case E_CC_DLEmode: return E_CC_DLEmode;
+ case E_CC_DLTmode: return E_CC_DLTmode;
+ case E_CC_DGEmode: return E_CC_DGEmode;
+ case E_CC_DGTmode: return E_CC_DGTmode;
+ case E_CC_DLEUmode: return E_CC_DLEUmode;
+ case E_CC_DLTUmode: return E_CC_DLTUmode;
+ case E_CC_DGEUmode: return E_CC_DGEUmode;
+ case E_CC_DGTUmode: return E_CC_DGTUmode;
+ case E_CC_Cmode: return E_CC_Cmode;
+ case E_CC_Bmode: return E_CC_Bmode;
+ case E_CC_Nmode: return E_CC_Nmode;
+ case E_CC_Vmode: return E_CC_Vmode;
+ case E_CC_ADCmode: return E_CC_ADCmode;
+ case E_BImode: return E_BImode;
+ case E_B2Imode: return E_B2Imode;
+ case E_B4Imode: return E_B4Imode;
+ case E_QImode: return E_QImode;
+ case E_HImode: return E_HImode;
+ case E_SImode: return E_SImode;
+ case E_DImode: return E_DImode;
+ case E_TImode: return E_TImode;
+ case E_EImode: return E_EImode;
+ case E_OImode: return E_OImode;
+ case E_CImode: return E_CImode;
+ case E_XImode: return E_XImode;
+ case E_QQmode: return E_QQmode;
+ case E_HQmode: return E_HQmode;
+ case E_SQmode: return E_SQmode;
+ case E_DQmode: return E_DQmode;
+ case E_TQmode: return E_TQmode;
+ case E_UQQmode: return E_UQQmode;
+ case E_UHQmode: return E_UHQmode;
+ case E_USQmode: return E_USQmode;
+ case E_UDQmode: return E_UDQmode;
+ case E_UTQmode: return E_UTQmode;
+ case E_HAmode: return E_HAmode;
+ case E_SAmode: return E_SAmode;
+ case E_DAmode: return E_DAmode;
+ case E_TAmode: return E_TAmode;
+ case E_UHAmode: return E_UHAmode;
+ case E_USAmode: return E_USAmode;
+ case E_UDAmode: return E_UDAmode;
+ case E_UTAmode: return E_UTAmode;
+ case E_HFmode: return E_HFmode;
+ case E_BFmode: return E_BFmode;
+ case E_SFmode: return E_SFmode;
+ case E_DFmode: return E_DFmode;
+ case E_SDmode: return E_SDmode;
+ case E_DDmode: return E_DDmode;
+ case E_TDmode: return E_TDmode;
+ case E_CQImode: return E_QImode;
+ case E_CHImode: return E_HImode;
+ case E_CSImode: return E_SImode;
+ case E_CDImode: return E_DImode;
+ case E_CTImode: return E_TImode;
+ case E_CEImode: return E_EImode;
+ case E_COImode: return E_OImode;
+ case E_CCImode: return E_CImode;
+ case E_CXImode: return E_XImode;
+ case E_BCmode: return E_BFmode;
+ case E_HCmode: return E_HFmode;
+ case E_SCmode: return E_SFmode;
+ case E_DCmode: return E_DFmode;
+ case E_V16BImode: return E_BImode;
+ case E_V8BImode: return E_B2Imode;
+ case E_V4BImode: return E_B4Imode;
+ case E_V2QImode: return E_QImode;
+ case E_V4QImode: return E_QImode;
+ case E_V2HImode: return E_HImode;
+ case E_V8QImode: return E_QImode;
+ case E_V4HImode: return E_HImode;
+ case E_V2SImode: return E_SImode;
+ case E_V16QImode: return E_QImode;
+ case E_V8HImode: return E_HImode;
+ case E_V4SImode: return E_SImode;
+ case E_V2DImode: return E_DImode;
+ case E_V4QQmode: return E_QQmode;
+ case E_V2HQmode: return E_HQmode;
+ case E_V4UQQmode: return E_UQQmode;
+ case E_V2UHQmode: return E_UHQmode;
+ case E_V2HAmode: return E_HAmode;
+ case E_V2UHAmode: return E_UHAmode;
+ case E_V2HFmode: return E_HFmode;
+ case E_V2BFmode: return E_BFmode;
+ case E_V4HFmode: return E_HFmode;
+ case E_V4BFmode: return E_BFmode;
+ case E_V2SFmode: return E_SFmode;
+ case E_V8HFmode: return E_HFmode;
+ case E_V8BFmode: return E_BFmode;
+ case E_V4SFmode: return E_SFmode;
+ case E_V2DFmode: return E_DFmode;
+ default: return mode_inner[mode];
+ }
+}
+
+#ifdef __cplusplus
+inline __attribute__((__always_inline__))
+#else
+extern __inline__ __attribute__((__always_inline__, __gnu_inline__))
+#endif
+unsigned char
+mode_unit_size_inline (machine_mode mode)
+{
+ extern CONST_MODE_UNIT_SIZE unsigned char mode_unit_size[NUM_MACHINE_MODES];
+ gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);
+ switch (mode)
+ {
+ case E_VOIDmode: return 0;
+ case E_BLKmode: return 0;
+ case E_CCmode: return 4;
+ case E_CC_NZmode: return 4;
+ case E_CC_Zmode: return 4;
+ case E_CC_NVmode: return 4;
+ case E_CC_SWPmode: return 4;
+ case E_CC_RSBmode: return 4;
+ case E_CCFPmode: return 4;
+ case E_CCFPEmode: return 4;
+ case E_CC_DNEmode: return 4;
+ case E_CC_DEQmode: return 4;
+ case E_CC_DLEmode: return 4;
+ case E_CC_DLTmode: return 4;
+ case E_CC_DGEmode: return 4;
+ case E_CC_DGTmode: return 4;
+ case E_CC_DLEUmode: return 4;
+ case E_CC_DLTUmode: return 4;
+ case E_CC_DGEUmode: return 4;
+ case E_CC_DGTUmode: return 4;
+ case E_CC_Cmode: return 4;
+ case E_CC_Bmode: return 4;
+ case E_CC_Nmode: return 4;
+ case E_CC_Vmode: return 4;
+ case E_CC_ADCmode: return 4;
+ case E_BImode: return 1;
+ case E_B2Imode: return 1;
+ case E_B4Imode: return 1;
+ case E_QImode: return 1;
+ case E_HImode: return 2;
+ case E_SImode: return 4;
+ case E_DImode: return 8;
+ case E_TImode: return 16;
+ case E_EImode: return 24;
+ case E_OImode: return 32;
+ case E_CImode: return 48;
+ case E_XImode: return 64;
+ case E_QQmode: return 1;
+ case E_HQmode: return 2;
+ case E_SQmode: return 4;
+ case E_DQmode: return 8;
+ case E_TQmode: return 16;
+ case E_UQQmode: return 1;
+ case E_UHQmode: return 2;
+ case E_USQmode: return 4;
+ case E_UDQmode: return 8;
+ case E_UTQmode: return 16;
+ case E_HAmode: return 2;
+ case E_SAmode: return 4;
+ case E_DAmode: return 8;
+ case E_TAmode: return 16;
+ case E_UHAmode: return 2;
+ case E_USAmode: return 4;
+ case E_UDAmode: return 8;
+ case E_UTAmode: return 16;
+ case E_HFmode: return 2;
+ case E_BFmode: return 2;
+ case E_SFmode: return 4;
+ case E_DFmode: return 8;
+ case E_SDmode: return 4;
+ case E_DDmode: return 8;
+ case E_TDmode: return 16;
+ case E_CQImode: return 1;
+ case E_CHImode: return 2;
+ case E_CSImode: return 4;
+ case E_CDImode: return 8;
+ case E_CTImode: return 16;
+ case E_CEImode: return 24;
+ case E_COImode: return 32;
+ case E_CCImode: return 48;
+ case E_CXImode: return 64;
+ case E_BCmode: return 2;
+ case E_HCmode: return 2;
+ case E_SCmode: return 4;
+ case E_DCmode: return 8;
+ case E_V16BImode: return 1;
+ case E_V8BImode: return 1;
+ case E_V4BImode: return 1;
+ case E_V2QImode: return 1;
+ case E_V4QImode: return 1;
+ case E_V2HImode: return 2;
+ case E_V8QImode: return 1;
+ case E_V4HImode: return 2;
+ case E_V2SImode: return 4;
+ case E_V16QImode: return 1;
+ case E_V8HImode: return 2;
+ case E_V4SImode: return 4;
+ case E_V2DImode: return 8;
+ case E_V4QQmode: return 1;
+ case E_V2HQmode: return 2;
+ case E_V4UQQmode: return 1;
+ case E_V2UHQmode: return 2;
+ case E_V2HAmode: return 2;
+ case E_V2UHAmode: return 2;
+ case E_V2HFmode: return 2;
+ case E_V2BFmode: return 2;
+ case E_V4HFmode: return 2;
+ case E_V4BFmode: return 2;
+ case E_V2SFmode: return 4;
+ case E_V8HFmode: return 2;
+ case E_V8BFmode: return 2;
+ case E_V4SFmode: return 4;
+ case E_V2DFmode: return 8;
+ default: return mode_unit_size[mode];
+ }
+}
+
+#ifdef __cplusplus
+inline __attribute__((__always_inline__))
+#else
+extern __inline__ __attribute__((__always_inline__, __gnu_inline__))
+#endif
+unsigned short
+mode_unit_precision_inline (machine_mode mode)
+{
+ extern const unsigned short mode_unit_precision[NUM_MACHINE_MODES];
+ gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);
+ switch (mode)
+ {
+ case E_VOIDmode: return 0;
+ case E_BLKmode: return 0;
+ case E_CCmode: return 4*BITS_PER_UNIT;
+ case E_CC_NZmode: return 4*BITS_PER_UNIT;
+ case E_CC_Zmode: return 4*BITS_PER_UNIT;
+ case E_CC_NVmode: return 4*BITS_PER_UNIT;
+ case E_CC_SWPmode: return 4*BITS_PER_UNIT;
+ case E_CC_RSBmode: return 4*BITS_PER_UNIT;
+ case E_CCFPmode: return 4*BITS_PER_UNIT;
+ case E_CCFPEmode: return 4*BITS_PER_UNIT;
+ case E_CC_DNEmode: return 4*BITS_PER_UNIT;
+ case E_CC_DEQmode: return 4*BITS_PER_UNIT;
+ case E_CC_DLEmode: return 4*BITS_PER_UNIT;
+ case E_CC_DLTmode: return 4*BITS_PER_UNIT;
+ case E_CC_DGEmode: return 4*BITS_PER_UNIT;
+ case E_CC_DGTmode: return 4*BITS_PER_UNIT;
+ case E_CC_DLEUmode: return 4*BITS_PER_UNIT;
+ case E_CC_DLTUmode: return 4*BITS_PER_UNIT;
+ case E_CC_DGEUmode: return 4*BITS_PER_UNIT;
+ case E_CC_DGTUmode: return 4*BITS_PER_UNIT;
+ case E_CC_Cmode: return 4*BITS_PER_UNIT;
+ case E_CC_Bmode: return 4*BITS_PER_UNIT;
+ case E_CC_Nmode: return 4*BITS_PER_UNIT;
+ case E_CC_Vmode: return 4*BITS_PER_UNIT;
+ case E_CC_ADCmode: return 4*BITS_PER_UNIT;
+ case E_BImode: return 1;
+ case E_B2Imode: return 2;
+ case E_B4Imode: return 4;
+ case E_QImode: return 1*BITS_PER_UNIT;
+ case E_HImode: return 2*BITS_PER_UNIT;
+ case E_SImode: return 4*BITS_PER_UNIT;
+ case E_DImode: return 8*BITS_PER_UNIT;
+ case E_TImode: return 16*BITS_PER_UNIT;
+ case E_EImode: return 24*BITS_PER_UNIT;
+ case E_OImode: return 32*BITS_PER_UNIT;
+ case E_CImode: return 48*BITS_PER_UNIT;
+ case E_XImode: return 64*BITS_PER_UNIT;
+ case E_QQmode: return 1*BITS_PER_UNIT;
+ case E_HQmode: return 2*BITS_PER_UNIT;
+ case E_SQmode: return 4*BITS_PER_UNIT;
+ case E_DQmode: return 8*BITS_PER_UNIT;
+ case E_TQmode: return 16*BITS_PER_UNIT;
+ case E_UQQmode: return 1*BITS_PER_UNIT;
+ case E_UHQmode: return 2*BITS_PER_UNIT;
+ case E_USQmode: return 4*BITS_PER_UNIT;
+ case E_UDQmode: return 8*BITS_PER_UNIT;
+ case E_UTQmode: return 16*BITS_PER_UNIT;
+ case E_HAmode: return 2*BITS_PER_UNIT;
+ case E_SAmode: return 4*BITS_PER_UNIT;
+ case E_DAmode: return 8*BITS_PER_UNIT;
+ case E_TAmode: return 16*BITS_PER_UNIT;
+ case E_UHAmode: return 2*BITS_PER_UNIT;
+ case E_USAmode: return 4*BITS_PER_UNIT;
+ case E_UDAmode: return 8*BITS_PER_UNIT;
+ case E_UTAmode: return 16*BITS_PER_UNIT;
+ case E_HFmode: return 2*BITS_PER_UNIT;
+ case E_BFmode: return 2*BITS_PER_UNIT;
+ case E_SFmode: return 4*BITS_PER_UNIT;
+ case E_DFmode: return 8*BITS_PER_UNIT;
+ case E_SDmode: return 4*BITS_PER_UNIT;
+ case E_DDmode: return 8*BITS_PER_UNIT;
+ case E_TDmode: return 16*BITS_PER_UNIT;
+ case E_CQImode: return 1*BITS_PER_UNIT;
+ case E_CHImode: return 2*BITS_PER_UNIT;
+ case E_CSImode: return 4*BITS_PER_UNIT;
+ case E_CDImode: return 8*BITS_PER_UNIT;
+ case E_CTImode: return 16*BITS_PER_UNIT;
+ case E_CEImode: return 24*BITS_PER_UNIT;
+ case E_COImode: return 32*BITS_PER_UNIT;
+ case E_CCImode: return 48*BITS_PER_UNIT;
+ case E_CXImode: return 64*BITS_PER_UNIT;
+ case E_BCmode: return 2*BITS_PER_UNIT;
+ case E_HCmode: return 2*BITS_PER_UNIT;
+ case E_SCmode: return 4*BITS_PER_UNIT;
+ case E_DCmode: return 8*BITS_PER_UNIT;
+ case E_V16BImode: return 1;
+ case E_V8BImode: return 2;
+ case E_V4BImode: return 4;
+ case E_V2QImode: return 1*BITS_PER_UNIT;
+ case E_V4QImode: return 1*BITS_PER_UNIT;
+ case E_V2HImode: return 2*BITS_PER_UNIT;
+ case E_V8QImode: return 1*BITS_PER_UNIT;
+ case E_V4HImode: return 2*BITS_PER_UNIT;
+ case E_V2SImode: return 4*BITS_PER_UNIT;
+ case E_V16QImode: return 1*BITS_PER_UNIT;
+ case E_V8HImode: return 2*BITS_PER_UNIT;
+ case E_V4SImode: return 4*BITS_PER_UNIT;
+ case E_V2DImode: return 8*BITS_PER_UNIT;
+ case E_V4QQmode: return 1*BITS_PER_UNIT;
+ case E_V2HQmode: return 2*BITS_PER_UNIT;
+ case E_V4UQQmode: return 1*BITS_PER_UNIT;
+ case E_V2UHQmode: return 2*BITS_PER_UNIT;
+ case E_V2HAmode: return 2*BITS_PER_UNIT;
+ case E_V2UHAmode: return 2*BITS_PER_UNIT;
+ case E_V2HFmode: return 2*BITS_PER_UNIT;
+ case E_V2BFmode: return 2*BITS_PER_UNIT;
+ case E_V4HFmode: return 2*BITS_PER_UNIT;
+ case E_V4BFmode: return 2*BITS_PER_UNIT;
+ case E_V2SFmode: return 4*BITS_PER_UNIT;
+ case E_V8HFmode: return 2*BITS_PER_UNIT;
+ case E_V8BFmode: return 2*BITS_PER_UNIT;
+ case E_V4SFmode: return 4*BITS_PER_UNIT;
+ case E_V2DFmode: return 8*BITS_PER_UNIT;
+ default: return mode_unit_precision[mode];
+ }
+}
+
+#endif /* GCC_VERSION >= 4001 */
+
+#endif /* insn-modes-inline.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes.h
new file mode 100644
index 0000000..d473711
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-modes.h
@@ -0,0 +1,834 @@
+/* Generated automatically from machmode.def and config/arm/arm-modes.def
+ by genmodes. */
+
+#ifndef GCC_INSN_MODES_H
+#define GCC_INSN_MODES_H
+
+enum machine_mode
+{
+ E_VOIDmode, /* machmode.def:194 */
+#define HAVE_VOIDmode
+#ifdef USE_ENUM_MODES
+#define VOIDmode E_VOIDmode
+#else
+#define VOIDmode ((void) 0, E_VOIDmode)
+#endif
+ E_BLKmode, /* machmode.def:198 */
+#define HAVE_BLKmode
+#ifdef USE_ENUM_MODES
+#define BLKmode E_BLKmode
+#else
+#define BLKmode ((void) 0, E_BLKmode)
+#endif
+ E_CCmode, /* machmode.def:236 */
+#define HAVE_CCmode
+#ifdef USE_ENUM_MODES
+#define CCmode E_CCmode
+#else
+#define CCmode ((void) 0, E_CCmode)
+#endif
+ E_CC_NZmode, /* config/arm/arm-modes.def:50 */
+#define HAVE_CC_NZmode
+#ifdef USE_ENUM_MODES
+#define CC_NZmode E_CC_NZmode
+#else
+#define CC_NZmode ((void) 0, E_CC_NZmode)
+#endif
+ E_CC_Zmode, /* config/arm/arm-modes.def:51 */
+#define HAVE_CC_Zmode
+#ifdef USE_ENUM_MODES
+#define CC_Zmode E_CC_Zmode
+#else
+#define CC_Zmode ((void) 0, E_CC_Zmode)
+#endif
+ E_CC_NVmode, /* config/arm/arm-modes.def:52 */
+#define HAVE_CC_NVmode
+#ifdef USE_ENUM_MODES
+#define CC_NVmode E_CC_NVmode
+#else
+#define CC_NVmode ((void) 0, E_CC_NVmode)
+#endif
+ E_CC_SWPmode, /* config/arm/arm-modes.def:53 */
+#define HAVE_CC_SWPmode
+#ifdef USE_ENUM_MODES
+#define CC_SWPmode E_CC_SWPmode
+#else
+#define CC_SWPmode ((void) 0, E_CC_SWPmode)
+#endif
+ E_CC_RSBmode, /* config/arm/arm-modes.def:54 */
+#define HAVE_CC_RSBmode
+#ifdef USE_ENUM_MODES
+#define CC_RSBmode E_CC_RSBmode
+#else
+#define CC_RSBmode ((void) 0, E_CC_RSBmode)
+#endif
+ E_CCFPmode, /* config/arm/arm-modes.def:55 */
+#define HAVE_CCFPmode
+#ifdef USE_ENUM_MODES
+#define CCFPmode E_CCFPmode
+#else
+#define CCFPmode ((void) 0, E_CCFPmode)
+#endif
+ E_CCFPEmode, /* config/arm/arm-modes.def:56 */
+#define HAVE_CCFPEmode
+#ifdef USE_ENUM_MODES
+#define CCFPEmode E_CCFPEmode
+#else
+#define CCFPEmode ((void) 0, E_CCFPEmode)
+#endif
+ E_CC_DNEmode, /* config/arm/arm-modes.def:57 */
+#define HAVE_CC_DNEmode
+#ifdef USE_ENUM_MODES
+#define CC_DNEmode E_CC_DNEmode
+#else
+#define CC_DNEmode ((void) 0, E_CC_DNEmode)
+#endif
+ E_CC_DEQmode, /* config/arm/arm-modes.def:58 */
+#define HAVE_CC_DEQmode
+#ifdef USE_ENUM_MODES
+#define CC_DEQmode E_CC_DEQmode
+#else
+#define CC_DEQmode ((void) 0, E_CC_DEQmode)
+#endif
+ E_CC_DLEmode, /* config/arm/arm-modes.def:59 */
+#define HAVE_CC_DLEmode
+#ifdef USE_ENUM_MODES
+#define CC_DLEmode E_CC_DLEmode
+#else
+#define CC_DLEmode ((void) 0, E_CC_DLEmode)
+#endif
+ E_CC_DLTmode, /* config/arm/arm-modes.def:60 */
+#define HAVE_CC_DLTmode
+#ifdef USE_ENUM_MODES
+#define CC_DLTmode E_CC_DLTmode
+#else
+#define CC_DLTmode ((void) 0, E_CC_DLTmode)
+#endif
+ E_CC_DGEmode, /* config/arm/arm-modes.def:61 */
+#define HAVE_CC_DGEmode
+#ifdef USE_ENUM_MODES
+#define CC_DGEmode E_CC_DGEmode
+#else
+#define CC_DGEmode ((void) 0, E_CC_DGEmode)
+#endif
+ E_CC_DGTmode, /* config/arm/arm-modes.def:62 */
+#define HAVE_CC_DGTmode
+#ifdef USE_ENUM_MODES
+#define CC_DGTmode E_CC_DGTmode
+#else
+#define CC_DGTmode ((void) 0, E_CC_DGTmode)
+#endif
+ E_CC_DLEUmode, /* config/arm/arm-modes.def:63 */
+#define HAVE_CC_DLEUmode
+#ifdef USE_ENUM_MODES
+#define CC_DLEUmode E_CC_DLEUmode
+#else
+#define CC_DLEUmode ((void) 0, E_CC_DLEUmode)
+#endif
+ E_CC_DLTUmode, /* config/arm/arm-modes.def:64 */
+#define HAVE_CC_DLTUmode
+#ifdef USE_ENUM_MODES
+#define CC_DLTUmode E_CC_DLTUmode
+#else
+#define CC_DLTUmode ((void) 0, E_CC_DLTUmode)
+#endif
+ E_CC_DGEUmode, /* config/arm/arm-modes.def:65 */
+#define HAVE_CC_DGEUmode
+#ifdef USE_ENUM_MODES
+#define CC_DGEUmode E_CC_DGEUmode
+#else
+#define CC_DGEUmode ((void) 0, E_CC_DGEUmode)
+#endif
+ E_CC_DGTUmode, /* config/arm/arm-modes.def:66 */
+#define HAVE_CC_DGTUmode
+#ifdef USE_ENUM_MODES
+#define CC_DGTUmode E_CC_DGTUmode
+#else
+#define CC_DGTUmode ((void) 0, E_CC_DGTUmode)
+#endif
+ E_CC_Cmode, /* config/arm/arm-modes.def:67 */
+#define HAVE_CC_Cmode
+#ifdef USE_ENUM_MODES
+#define CC_Cmode E_CC_Cmode
+#else
+#define CC_Cmode ((void) 0, E_CC_Cmode)
+#endif
+ E_CC_Bmode, /* config/arm/arm-modes.def:68 */
+#define HAVE_CC_Bmode
+#ifdef USE_ENUM_MODES
+#define CC_Bmode E_CC_Bmode
+#else
+#define CC_Bmode ((void) 0, E_CC_Bmode)
+#endif
+ E_CC_Nmode, /* config/arm/arm-modes.def:69 */
+#define HAVE_CC_Nmode
+#ifdef USE_ENUM_MODES
+#define CC_Nmode E_CC_Nmode
+#else
+#define CC_Nmode ((void) 0, E_CC_Nmode)
+#endif
+ E_CC_Vmode, /* config/arm/arm-modes.def:70 */
+#define HAVE_CC_Vmode
+#ifdef USE_ENUM_MODES
+#define CC_Vmode E_CC_Vmode
+#else
+#define CC_Vmode ((void) 0, E_CC_Vmode)
+#endif
+ E_CC_ADCmode, /* config/arm/arm-modes.def:71 */
+#define HAVE_CC_ADCmode
+#ifdef USE_ENUM_MODES
+#define CC_ADCmode E_CC_ADCmode
+#else
+#define CC_ADCmode ((void) 0, E_CC_ADCmode)
+#endif
+ E_BImode, /* machmode.def:201 */
+#define HAVE_BImode
+#ifdef USE_ENUM_MODES
+#define BImode E_BImode
+#else
+#define BImode (scalar_int_mode ((scalar_int_mode::from_int) E_BImode))
+#endif
+ E_B2Imode, /* config/arm/arm-modes.def:88 */
+#define HAVE_B2Imode
+#ifdef USE_ENUM_MODES
+#define B2Imode E_B2Imode
+#else
+#define B2Imode (scalar_int_mode ((scalar_int_mode::from_int) E_B2Imode))
+#endif
+ E_B4Imode, /* config/arm/arm-modes.def:89 */
+#define HAVE_B4Imode
+#ifdef USE_ENUM_MODES
+#define B4Imode E_B4Imode
+#else
+#define B4Imode (scalar_int_mode ((scalar_int_mode::from_int) E_B4Imode))
+#endif
+ E_QImode, /* machmode.def:209 */
+#define HAVE_QImode
+#ifdef USE_ENUM_MODES
+#define QImode E_QImode
+#else
+#define QImode (scalar_int_mode ((scalar_int_mode::from_int) E_QImode))
+#endif
+ E_HImode, /* machmode.def:210 */
+#define HAVE_HImode
+#ifdef USE_ENUM_MODES
+#define HImode E_HImode
+#else
+#define HImode (scalar_int_mode ((scalar_int_mode::from_int) E_HImode))
+#endif
+ E_SImode, /* machmode.def:211 */
+#define HAVE_SImode
+#ifdef USE_ENUM_MODES
+#define SImode E_SImode
+#else
+#define SImode (scalar_int_mode ((scalar_int_mode::from_int) E_SImode))
+#endif
+ E_DImode, /* machmode.def:212 */
+#define HAVE_DImode
+#ifdef USE_ENUM_MODES
+#define DImode E_DImode
+#else
+#define DImode (scalar_int_mode ((scalar_int_mode::from_int) E_DImode))
+#endif
+ E_TImode, /* machmode.def:213 */
+#define HAVE_TImode
+#ifdef USE_ENUM_MODES
+#define TImode E_TImode
+#else
+#define TImode (scalar_int_mode ((scalar_int_mode::from_int) E_TImode))
+#endif
+ E_EImode, /* config/arm/arm-modes.def:104 */
+#define HAVE_EImode
+#ifdef USE_ENUM_MODES
+#define EImode E_EImode
+#else
+#define EImode (scalar_int_mode ((scalar_int_mode::from_int) E_EImode))
+#endif
+ E_OImode, /* config/arm/arm-modes.def:105 */
+#define HAVE_OImode
+#ifdef USE_ENUM_MODES
+#define OImode E_OImode
+#else
+#define OImode (scalar_int_mode ((scalar_int_mode::from_int) E_OImode))
+#endif
+ E_CImode, /* config/arm/arm-modes.def:106 */
+#define HAVE_CImode
+#ifdef USE_ENUM_MODES
+#define CImode E_CImode
+#else
+#define CImode (scalar_int_mode ((scalar_int_mode::from_int) E_CImode))
+#endif
+ E_XImode, /* config/arm/arm-modes.def:107 */
+#define HAVE_XImode
+#ifdef USE_ENUM_MODES
+#define XImode E_XImode
+#else
+#define XImode (scalar_int_mode ((scalar_int_mode::from_int) E_XImode))
+#endif
+ E_QQmode, /* machmode.def:239 */
+#define HAVE_QQmode
+#ifdef USE_ENUM_MODES
+#define QQmode E_QQmode
+#else
+#define QQmode (scalar_mode ((scalar_mode::from_int) E_QQmode))
+#endif
+ E_HQmode, /* machmode.def:240 */
+#define HAVE_HQmode
+#ifdef USE_ENUM_MODES
+#define HQmode E_HQmode
+#else
+#define HQmode (scalar_mode ((scalar_mode::from_int) E_HQmode))
+#endif
+ E_SQmode, /* machmode.def:241 */
+#define HAVE_SQmode
+#ifdef USE_ENUM_MODES
+#define SQmode E_SQmode
+#else
+#define SQmode (scalar_mode ((scalar_mode::from_int) E_SQmode))
+#endif
+ E_DQmode, /* machmode.def:242 */
+#define HAVE_DQmode
+#ifdef USE_ENUM_MODES
+#define DQmode E_DQmode
+#else
+#define DQmode (scalar_mode ((scalar_mode::from_int) E_DQmode))
+#endif
+ E_TQmode, /* machmode.def:243 */
+#define HAVE_TQmode
+#ifdef USE_ENUM_MODES
+#define TQmode E_TQmode
+#else
+#define TQmode (scalar_mode ((scalar_mode::from_int) E_TQmode))
+#endif
+ E_UQQmode, /* machmode.def:245 */
+#define HAVE_UQQmode
+#ifdef USE_ENUM_MODES
+#define UQQmode E_UQQmode
+#else
+#define UQQmode (scalar_mode ((scalar_mode::from_int) E_UQQmode))
+#endif
+ E_UHQmode, /* machmode.def:246 */
+#define HAVE_UHQmode
+#ifdef USE_ENUM_MODES
+#define UHQmode E_UHQmode
+#else
+#define UHQmode (scalar_mode ((scalar_mode::from_int) E_UHQmode))
+#endif
+ E_USQmode, /* machmode.def:247 */
+#define HAVE_USQmode
+#ifdef USE_ENUM_MODES
+#define USQmode E_USQmode
+#else
+#define USQmode (scalar_mode ((scalar_mode::from_int) E_USQmode))
+#endif
+ E_UDQmode, /* machmode.def:248 */
+#define HAVE_UDQmode
+#ifdef USE_ENUM_MODES
+#define UDQmode E_UDQmode
+#else
+#define UDQmode (scalar_mode ((scalar_mode::from_int) E_UDQmode))
+#endif
+ E_UTQmode, /* machmode.def:249 */
+#define HAVE_UTQmode
+#ifdef USE_ENUM_MODES
+#define UTQmode E_UTQmode
+#else
+#define UTQmode (scalar_mode ((scalar_mode::from_int) E_UTQmode))
+#endif
+ E_HAmode, /* machmode.def:251 */
+#define HAVE_HAmode
+#ifdef USE_ENUM_MODES
+#define HAmode E_HAmode
+#else
+#define HAmode (scalar_mode ((scalar_mode::from_int) E_HAmode))
+#endif
+ E_SAmode, /* machmode.def:252 */
+#define HAVE_SAmode
+#ifdef USE_ENUM_MODES
+#define SAmode E_SAmode
+#else
+#define SAmode (scalar_mode ((scalar_mode::from_int) E_SAmode))
+#endif
+ E_DAmode, /* machmode.def:253 */
+#define HAVE_DAmode
+#ifdef USE_ENUM_MODES
+#define DAmode E_DAmode
+#else
+#define DAmode (scalar_mode ((scalar_mode::from_int) E_DAmode))
+#endif
+ E_TAmode, /* machmode.def:254 */
+#define HAVE_TAmode
+#ifdef USE_ENUM_MODES
+#define TAmode E_TAmode
+#else
+#define TAmode (scalar_mode ((scalar_mode::from_int) E_TAmode))
+#endif
+ E_UHAmode, /* machmode.def:256 */
+#define HAVE_UHAmode
+#ifdef USE_ENUM_MODES
+#define UHAmode E_UHAmode
+#else
+#define UHAmode (scalar_mode ((scalar_mode::from_int) E_UHAmode))
+#endif
+ E_USAmode, /* machmode.def:257 */
+#define HAVE_USAmode
+#ifdef USE_ENUM_MODES
+#define USAmode E_USAmode
+#else
+#define USAmode (scalar_mode ((scalar_mode::from_int) E_USAmode))
+#endif
+ E_UDAmode, /* machmode.def:258 */
+#define HAVE_UDAmode
+#ifdef USE_ENUM_MODES
+#define UDAmode E_UDAmode
+#else
+#define UDAmode (scalar_mode ((scalar_mode::from_int) E_UDAmode))
+#endif
+ E_UTAmode, /* machmode.def:259 */
+#define HAVE_UTAmode
+#ifdef USE_ENUM_MODES
+#define UTAmode E_UTAmode
+#else
+#define UTAmode (scalar_mode ((scalar_mode::from_int) E_UTAmode))
+#endif
+ E_HFmode, /* config/arm/arm-modes.def:26 */
+#define HAVE_HFmode
+#ifdef USE_ENUM_MODES
+#define HFmode E_HFmode
+#else
+#define HFmode (scalar_float_mode ((scalar_float_mode::from_int) E_HFmode))
+#endif
+ E_BFmode, /* config/arm/arm-modes.def:81 */
+#define HAVE_BFmode
+#ifdef USE_ENUM_MODES
+#define BFmode E_BFmode
+#else
+#define BFmode (scalar_float_mode ((scalar_float_mode::from_int) E_BFmode))
+#endif
+ E_SFmode, /* machmode.def:231 */
+#define HAVE_SFmode
+#ifdef USE_ENUM_MODES
+#define SFmode E_SFmode
+#else
+#define SFmode (scalar_float_mode ((scalar_float_mode::from_int) E_SFmode))
+#endif
+ E_DFmode, /* machmode.def:232 */
+#define HAVE_DFmode
+#ifdef USE_ENUM_MODES
+#define DFmode E_DFmode
+#else
+#define DFmode (scalar_float_mode ((scalar_float_mode::from_int) E_DFmode))
+#endif
+ E_SDmode, /* machmode.def:272 */
+#define HAVE_SDmode
+#ifdef USE_ENUM_MODES
+#define SDmode E_SDmode
+#else
+#define SDmode (scalar_float_mode ((scalar_float_mode::from_int) E_SDmode))
+#endif
+ E_DDmode, /* machmode.def:273 */
+#define HAVE_DDmode
+#ifdef USE_ENUM_MODES
+#define DDmode E_DDmode
+#else
+#define DDmode (scalar_float_mode ((scalar_float_mode::from_int) E_DDmode))
+#endif
+ E_TDmode, /* machmode.def:274 */
+#define HAVE_TDmode
+#ifdef USE_ENUM_MODES
+#define TDmode E_TDmode
+#else
+#define TDmode (scalar_float_mode ((scalar_float_mode::from_int) E_TDmode))
+#endif
+ E_CQImode, /* machmode.def:267 */
+#define HAVE_CQImode
+#ifdef USE_ENUM_MODES
+#define CQImode E_CQImode
+#else
+#define CQImode (complex_mode ((complex_mode::from_int) E_CQImode))
+#endif
+ E_CHImode, /* machmode.def:267 */
+#define HAVE_CHImode
+#ifdef USE_ENUM_MODES
+#define CHImode E_CHImode
+#else
+#define CHImode (complex_mode ((complex_mode::from_int) E_CHImode))
+#endif
+ E_CSImode, /* machmode.def:267 */
+#define HAVE_CSImode
+#ifdef USE_ENUM_MODES
+#define CSImode E_CSImode
+#else
+#define CSImode (complex_mode ((complex_mode::from_int) E_CSImode))
+#endif
+ E_CDImode, /* machmode.def:267 */
+#define HAVE_CDImode
+#ifdef USE_ENUM_MODES
+#define CDImode E_CDImode
+#else
+#define CDImode (complex_mode ((complex_mode::from_int) E_CDImode))
+#endif
+ E_CTImode, /* machmode.def:267 */
+#define HAVE_CTImode
+#ifdef USE_ENUM_MODES
+#define CTImode E_CTImode
+#else
+#define CTImode (complex_mode ((complex_mode::from_int) E_CTImode))
+#endif
+ E_CEImode, /* machmode.def:267 */
+#define HAVE_CEImode
+#ifdef USE_ENUM_MODES
+#define CEImode E_CEImode
+#else
+#define CEImode (complex_mode ((complex_mode::from_int) E_CEImode))
+#endif
+ E_COImode, /* machmode.def:267 */
+#define HAVE_COImode
+#ifdef USE_ENUM_MODES
+#define COImode E_COImode
+#else
+#define COImode (complex_mode ((complex_mode::from_int) E_COImode))
+#endif
+ E_CCImode, /* machmode.def:267 */
+#define HAVE_CCImode
+#ifdef USE_ENUM_MODES
+#define CCImode E_CCImode
+#else
+#define CCImode (complex_mode ((complex_mode::from_int) E_CCImode))
+#endif
+ E_CXImode, /* machmode.def:267 */
+#define HAVE_CXImode
+#ifdef USE_ENUM_MODES
+#define CXImode E_CXImode
+#else
+#define CXImode (complex_mode ((complex_mode::from_int) E_CXImode))
+#endif
+ E_BCmode, /* machmode.def:269 */
+#define HAVE_BCmode
+#ifdef USE_ENUM_MODES
+#define BCmode E_BCmode
+#else
+#define BCmode (complex_mode ((complex_mode::from_int) E_BCmode))
+#endif
+ E_HCmode, /* machmode.def:269 */
+#define HAVE_HCmode
+#ifdef USE_ENUM_MODES
+#define HCmode E_HCmode
+#else
+#define HCmode (complex_mode ((complex_mode::from_int) E_HCmode))
+#endif
+ E_SCmode, /* machmode.def:269 */
+#define HAVE_SCmode
+#ifdef USE_ENUM_MODES
+#define SCmode E_SCmode
+#else
+#define SCmode (complex_mode ((complex_mode::from_int) E_SCmode))
+#endif
+ E_DCmode, /* machmode.def:269 */
+#define HAVE_DCmode
+#ifdef USE_ENUM_MODES
+#define DCmode E_DCmode
+#else
+#define DCmode (complex_mode ((complex_mode::from_int) E_DCmode))
+#endif
+ E_V16BImode, /* config/arm/arm-modes.def:91 */
+#define HAVE_V16BImode
+#ifdef USE_ENUM_MODES
+#define V16BImode E_V16BImode
+#else
+#define V16BImode ((void) 0, E_V16BImode)
+#endif
+ E_V8BImode, /* config/arm/arm-modes.def:92 */
+#define HAVE_V8BImode
+#ifdef USE_ENUM_MODES
+#define V8BImode E_V8BImode
+#else
+#define V8BImode ((void) 0, E_V8BImode)
+#endif
+ E_V4BImode, /* config/arm/arm-modes.def:93 */
+#define HAVE_V4BImode
+#ifdef USE_ENUM_MODES
+#define V4BImode E_V4BImode
+#else
+#define V4BImode ((void) 0, E_V4BImode)
+#endif
+ E_V2QImode, /* config/arm/arm-modes.def:94 */
+#define HAVE_V2QImode
+#ifdef USE_ENUM_MODES
+#define V2QImode E_V2QImode
+#else
+#define V2QImode ((void) 0, E_V2QImode)
+#endif
+ E_V4QImode, /* config/arm/arm-modes.def:74 */
+#define HAVE_V4QImode
+#ifdef USE_ENUM_MODES
+#define V4QImode E_V4QImode
+#else
+#define V4QImode ((void) 0, E_V4QImode)
+#endif
+ E_V2HImode, /* config/arm/arm-modes.def:74 */
+#define HAVE_V2HImode
+#ifdef USE_ENUM_MODES
+#define V2HImode E_V2HImode
+#else
+#define V2HImode ((void) 0, E_V2HImode)
+#endif
+ E_V8QImode, /* config/arm/arm-modes.def:75 */
+#define HAVE_V8QImode
+#ifdef USE_ENUM_MODES
+#define V8QImode E_V8QImode
+#else
+#define V8QImode ((void) 0, E_V8QImode)
+#endif
+ E_V4HImode, /* config/arm/arm-modes.def:75 */
+#define HAVE_V4HImode
+#ifdef USE_ENUM_MODES
+#define V4HImode E_V4HImode
+#else
+#define V4HImode ((void) 0, E_V4HImode)
+#endif
+ E_V2SImode, /* config/arm/arm-modes.def:75 */
+#define HAVE_V2SImode
+#ifdef USE_ENUM_MODES
+#define V2SImode E_V2SImode
+#else
+#define V2SImode ((void) 0, E_V2SImode)
+#endif
+ E_V16QImode, /* config/arm/arm-modes.def:76 */
+#define HAVE_V16QImode
+#ifdef USE_ENUM_MODES
+#define V16QImode E_V16QImode
+#else
+#define V16QImode ((void) 0, E_V16QImode)
+#endif
+ E_V8HImode, /* config/arm/arm-modes.def:76 */
+#define HAVE_V8HImode
+#ifdef USE_ENUM_MODES
+#define V8HImode E_V8HImode
+#else
+#define V8HImode ((void) 0, E_V8HImode)
+#endif
+ E_V4SImode, /* config/arm/arm-modes.def:76 */
+#define HAVE_V4SImode
+#ifdef USE_ENUM_MODES
+#define V4SImode E_V4SImode
+#else
+#define V4SImode ((void) 0, E_V4SImode)
+#endif
+ E_V2DImode, /* config/arm/arm-modes.def:76 */
+#define HAVE_V2DImode
+#ifdef USE_ENUM_MODES
+#define V2DImode E_V2DImode
+#else
+#define V2DImode ((void) 0, E_V2DImode)
+#endif
+ E_V4QQmode, /* config/arm/arm-modes.def:97 */
+#define HAVE_V4QQmode
+#ifdef USE_ENUM_MODES
+#define V4QQmode E_V4QQmode
+#else
+#define V4QQmode ((void) 0, E_V4QQmode)
+#endif
+ E_V2HQmode, /* config/arm/arm-modes.def:97 */
+#define HAVE_V2HQmode
+#ifdef USE_ENUM_MODES
+#define V2HQmode E_V2HQmode
+#else
+#define V2HQmode ((void) 0, E_V2HQmode)
+#endif
+ E_V4UQQmode, /* config/arm/arm-modes.def:98 */
+#define HAVE_V4UQQmode
+#ifdef USE_ENUM_MODES
+#define V4UQQmode E_V4UQQmode
+#else
+#define V4UQQmode ((void) 0, E_V4UQQmode)
+#endif
+ E_V2UHQmode, /* config/arm/arm-modes.def:98 */
+#define HAVE_V2UHQmode
+#ifdef USE_ENUM_MODES
+#define V2UHQmode E_V2UHQmode
+#else
+#define V2UHQmode ((void) 0, E_V2UHQmode)
+#endif
+ E_V2HAmode, /* config/arm/arm-modes.def:99 */
+#define HAVE_V2HAmode
+#ifdef USE_ENUM_MODES
+#define V2HAmode E_V2HAmode
+#else
+#define V2HAmode ((void) 0, E_V2HAmode)
+#endif
+ E_V2UHAmode, /* config/arm/arm-modes.def:100 */
+#define HAVE_V2UHAmode
+#ifdef USE_ENUM_MODES
+#define V2UHAmode E_V2UHAmode
+#else
+#define V2UHAmode ((void) 0, E_V2UHAmode)
+#endif
+ E_V2HFmode, /* config/arm/arm-modes.def:79 */
+#define HAVE_V2HFmode
+#ifdef USE_ENUM_MODES
+#define V2HFmode E_V2HFmode
+#else
+#define V2HFmode ((void) 0, E_V2HFmode)
+#endif
+ E_V2BFmode, /* config/arm/arm-modes.def:83 */
+#define HAVE_V2BFmode
+#ifdef USE_ENUM_MODES
+#define V2BFmode E_V2BFmode
+#else
+#define V2BFmode ((void) 0, E_V2BFmode)
+#endif
+ E_V4HFmode, /* config/arm/arm-modes.def:77 */
+#define HAVE_V4HFmode
+#ifdef USE_ENUM_MODES
+#define V4HFmode E_V4HFmode
+#else
+#define V4HFmode ((void) 0, E_V4HFmode)
+#endif
+ E_V4BFmode, /* config/arm/arm-modes.def:84 */
+#define HAVE_V4BFmode
+#ifdef USE_ENUM_MODES
+#define V4BFmode E_V4BFmode
+#else
+#define V4BFmode ((void) 0, E_V4BFmode)
+#endif
+ E_V2SFmode, /* config/arm/arm-modes.def:77 */
+#define HAVE_V2SFmode
+#ifdef USE_ENUM_MODES
+#define V2SFmode E_V2SFmode
+#else
+#define V2SFmode ((void) 0, E_V2SFmode)
+#endif
+ E_V8HFmode, /* config/arm/arm-modes.def:78 */
+#define HAVE_V8HFmode
+#ifdef USE_ENUM_MODES
+#define V8HFmode E_V8HFmode
+#else
+#define V8HFmode ((void) 0, E_V8HFmode)
+#endif
+ E_V8BFmode, /* config/arm/arm-modes.def:85 */
+#define HAVE_V8BFmode
+#ifdef USE_ENUM_MODES
+#define V8BFmode E_V8BFmode
+#else
+#define V8BFmode ((void) 0, E_V8BFmode)
+#endif
+ E_V4SFmode, /* config/arm/arm-modes.def:78 */
+#define HAVE_V4SFmode
+#ifdef USE_ENUM_MODES
+#define V4SFmode E_V4SFmode
+#else
+#define V4SFmode ((void) 0, E_V4SFmode)
+#endif
+ E_V2DFmode, /* config/arm/arm-modes.def:78 */
+#define HAVE_V2DFmode
+#ifdef USE_ENUM_MODES
+#define V2DFmode E_V2DFmode
+#else
+#define V2DFmode ((void) 0, E_V2DFmode)
+#endif
+ MAX_MACHINE_MODE,
+
+ MIN_MODE_RANDOM = E_VOIDmode,
+ MAX_MODE_RANDOM = E_BLKmode,
+
+ MIN_MODE_CC = E_CCmode,
+ MAX_MODE_CC = E_CC_ADCmode,
+
+ MIN_MODE_BOOL = E_BImode,
+ MAX_MODE_BOOL = E_B4Imode,
+
+ MIN_MODE_INT = E_QImode,
+ MAX_MODE_INT = E_XImode,
+
+ MIN_MODE_PARTIAL_INT = E_VOIDmode,
+ MAX_MODE_PARTIAL_INT = E_VOIDmode,
+
+ MIN_MODE_FRACT = E_QQmode,
+ MAX_MODE_FRACT = E_TQmode,
+
+ MIN_MODE_UFRACT = E_UQQmode,
+ MAX_MODE_UFRACT = E_UTQmode,
+
+ MIN_MODE_ACCUM = E_HAmode,
+ MAX_MODE_ACCUM = E_TAmode,
+
+ MIN_MODE_UACCUM = E_UHAmode,
+ MAX_MODE_UACCUM = E_UTAmode,
+
+ MIN_MODE_FLOAT = E_HFmode,
+ MAX_MODE_FLOAT = E_DFmode,
+
+ MIN_MODE_DECIMAL_FLOAT = E_SDmode,
+ MAX_MODE_DECIMAL_FLOAT = E_TDmode,
+
+ MIN_MODE_COMPLEX_INT = E_CQImode,
+ MAX_MODE_COMPLEX_INT = E_CXImode,
+
+ MIN_MODE_COMPLEX_FLOAT = E_BCmode,
+ MAX_MODE_COMPLEX_FLOAT = E_DCmode,
+
+ MIN_MODE_VECTOR_BOOL = E_V16BImode,
+ MAX_MODE_VECTOR_BOOL = E_V4BImode,
+
+ MIN_MODE_VECTOR_INT = E_V2QImode,
+ MAX_MODE_VECTOR_INT = E_V2DImode,
+
+ MIN_MODE_VECTOR_FRACT = E_V4QQmode,
+ MAX_MODE_VECTOR_FRACT = E_V2HQmode,
+
+ MIN_MODE_VECTOR_UFRACT = E_V4UQQmode,
+ MAX_MODE_VECTOR_UFRACT = E_V2UHQmode,
+
+ MIN_MODE_VECTOR_ACCUM = E_V2HAmode,
+ MAX_MODE_VECTOR_ACCUM = E_V2HAmode,
+
+ MIN_MODE_VECTOR_UACCUM = E_V2UHAmode,
+ MAX_MODE_VECTOR_UACCUM = E_V2UHAmode,
+
+ MIN_MODE_VECTOR_FLOAT = E_V2HFmode,
+ MAX_MODE_VECTOR_FLOAT = E_V2DFmode,
+
+ MIN_MODE_OPAQUE = E_VOIDmode,
+ MAX_MODE_OPAQUE = E_VOIDmode,
+
+ NUM_MACHINE_MODES = MAX_MACHINE_MODE
+};
+
+#define NUM_MODE_RANDOM (MAX_MODE_RANDOM - MIN_MODE_RANDOM + 1)
+#define NUM_MODE_CC (MAX_MODE_CC - MIN_MODE_CC + 1)
+#define NUM_MODE_INT (MAX_MODE_INT - MIN_MODE_INT + 1)
+#define NUM_MODE_PARTIAL_INT 0
+#define NUM_MODE_FRACT (MAX_MODE_FRACT - MIN_MODE_FRACT + 1)
+#define NUM_MODE_UFRACT (MAX_MODE_UFRACT - MIN_MODE_UFRACT + 1)
+#define NUM_MODE_ACCUM (MAX_MODE_ACCUM - MIN_MODE_ACCUM + 1)
+#define NUM_MODE_UACCUM (MAX_MODE_UACCUM - MIN_MODE_UACCUM + 1)
+#define NUM_MODE_FLOAT (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)
+#define NUM_MODE_DECIMAL_FLOAT (MAX_MODE_DECIMAL_FLOAT - MIN_MODE_DECIMAL_FLOAT + 1)
+#define NUM_MODE_COMPLEX_INT (MAX_MODE_COMPLEX_INT - MIN_MODE_COMPLEX_INT + 1)
+#define NUM_MODE_COMPLEX_FLOAT (MAX_MODE_COMPLEX_FLOAT - MIN_MODE_COMPLEX_FLOAT + 1)
+#define NUM_MODE_VECTOR_BOOL (MAX_MODE_VECTOR_BOOL - MIN_MODE_VECTOR_BOOL + 1)
+#define NUM_MODE_VECTOR_INT (MAX_MODE_VECTOR_INT - MIN_MODE_VECTOR_INT + 1)
+#define NUM_MODE_VECTOR_FRACT (MAX_MODE_VECTOR_FRACT - MIN_MODE_VECTOR_FRACT + 1)
+#define NUM_MODE_VECTOR_UFRACT (MAX_MODE_VECTOR_UFRACT - MIN_MODE_VECTOR_UFRACT + 1)
+#define NUM_MODE_VECTOR_ACCUM (MAX_MODE_VECTOR_ACCUM - MIN_MODE_VECTOR_ACCUM + 1)
+#define NUM_MODE_VECTOR_UACCUM (MAX_MODE_VECTOR_UACCUM - MIN_MODE_VECTOR_UACCUM + 1)
+#define NUM_MODE_VECTOR_FLOAT (MAX_MODE_VECTOR_FLOAT - MIN_MODE_VECTOR_FLOAT + 1)
+#define NUM_MODE_OPAQUE 0
+
+#define CONST_MODE_NUNITS const
+#define CONST_MODE_PRECISION const
+#define CONST_MODE_SIZE const
+#define CONST_MODE_UNIT_SIZE const
+#define CONST_MODE_BASE_ALIGN const
+#define CONST_MODE_IBIT const
+#define CONST_MODE_FBIT const
+#define CONST_MODE_MASK const
+
+#define BITS_PER_UNIT (8)
+#define MAX_BITSIZE_MODE_ANY_INT (64*BITS_PER_UNIT)
+#define MAX_BITSIZE_MODE_ANY_MODE (128*BITS_PER_UNIT)
+#define NUM_INT_N_ENTS 1
+#define NUM_POLY_INT_COEFFS 1
+
+#endif /* insn-modes.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-notes.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-notes.def
new file mode 100644
index 0000000..79735d1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/insn-notes.def
@@ -0,0 +1,98 @@
+/* Insn note definitions.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file defines all the codes that may appear in the
+ NOTE_LINE_NUMBER field of a NOTE insn for kinds of notes that are
+ not line numbers. Source files define DEF_INSN_NOTE appropriately
+ before including this file.
+
+ We are slowly removing the concept of insn-chain notes from the
+ compiler. Adding new codes to this file is STRONGLY DISCOURAGED.
+ If you think you need one, look for other ways to express what you
+ mean, such as register notes or bits in the basic-block structure. */
+
+/* Shorthand. */
+#define INSN_NOTE(NAME) DEF_INSN_NOTE (NOTE_INSN_##NAME)
+
+/* This note is used to get rid of an insn when it isn't safe to patch
+ the insn out of the chain. */
+INSN_NOTE (DELETED)
+
+/* Generated in place of user-declared labels when they are deleted. */
+INSN_NOTE (DELETED_LABEL)
+/* Similarly, but for labels that have been present in debug stmts
+ earlier and thus will only appear with -g. These must use different
+ label namespace. */
+INSN_NOTE (DELETED_DEBUG_LABEL)
+
+/* These are used to mark the beginning and end of a lexical block.
+ See NOTE_BLOCK and reorder_blocks. */
+INSN_NOTE (BLOCK_BEG)
+INSN_NOTE (BLOCK_END)
+
+/* This note indicates the start of the real body of the function,
+ i.e. the point just after all of the parms have been moved into
+ their homes, etc. */
+INSN_NOTE (FUNCTION_BEG)
+
+/* This marks the point immediately after the last prologue insn. */
+INSN_NOTE (PROLOGUE_END)
+
+/* This marks the point immediately prior to the first epilogue insn. */
+INSN_NOTE (EPILOGUE_BEG)
+
+/* These note where exception handling regions begin and end.
+ Uses NOTE_EH_HANDLER to identify the region in question. */
+INSN_NOTE (EH_REGION_BEG)
+INSN_NOTE (EH_REGION_END)
+
+/* The location of a variable. */
+INSN_NOTE (VAR_LOCATION)
+
+/* The beginning of a statement. */
+INSN_NOTE (BEGIN_STMT)
+
+/* The entry point for an inlined function. Its NOTE_BLOCK references
+ the lexical block whose abstract origin is the inlined function. */
+INSN_NOTE (INLINE_ENTRY)
+
+/* Record the struct for the following basic block. Uses
+ NOTE_BASIC_BLOCK. FIXME: Redundant with the basic block pointer
+ now included in every insn. NOTE: If there's no CFG anymore, in other words,
+ if BLOCK_FOR_INSN () == NULL, NOTE_BASIC_BLOCK cannot be considered reliable
+ anymore. */
+INSN_NOTE (BASIC_BLOCK)
+
+/* Mark the inflection point in the instruction stream where we switch
+ between hot and cold text sections. */
+INSN_NOTE (SWITCH_TEXT_SECTIONS)
+
+/* When emitting dwarf2 frame information, contains a directive that
+ should be emitted. */
+INSN_NOTE (CFI)
+
+/* When emitting dwarf2 frame information, contains the number of a debug
+ label that should be emitted. */
+INSN_NOTE (CFI_LABEL)
+
+/* This note indicates that the function context must be updated if
+ the Setjmp/Longjmp exception mechanism is used. */
+INSN_NOTE (UPDATE_SJLJ_CONTEXT)
+
+#undef INSN_NOTE
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/int-vector-builder.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/int-vector-builder.h
new file mode 100644
index 0000000..8c6f25e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/int-vector-builder.h
@@ -0,0 +1,93 @@
+/* A class for building vector integer constants.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_INT_VECTOR_BUILDER_H
+#define GCC_INT_VECTOR_BUILDER_H 1
+
+#include "vector-builder.h"
+
+/* This class is used to build vectors of integer type T using the same
+ encoding as tree and rtx constants. See vector_builder for more
+ details. */
+template<typename T>
+class int_vector_builder : public vector_builder<T, poly_uint64,
+ int_vector_builder<T> >
+{
+ typedef vector_builder<T, poly_uint64, int_vector_builder> parent;
+ friend class vector_builder<T, poly_uint64, int_vector_builder>;
+
+public:
+ int_vector_builder () {}
+ int_vector_builder (poly_uint64, unsigned int, unsigned int);
+
+ using parent::new_vector;
+
+private:
+ bool equal_p (T, T) const;
+ bool allow_steps_p () const { return true; }
+ bool integral_p (T) const { return true; }
+ T step (T, T) const;
+ T apply_step (T, unsigned int, T) const;
+ bool can_elide_p (T) const { return true; }
+ void note_representative (T *, T) {}
+
+ static poly_uint64 shape_nelts (poly_uint64 x) { return x; }
+};
+
+/* Create a new builder for a vector with FULL_NELTS elements.
+ Initially encode the value as NPATTERNS interleaved patterns with
+ NELTS_PER_PATTERN elements each. */
+
+template<typename T>
+inline
+int_vector_builder<T>::int_vector_builder (poly_uint64 full_nelts,
+ unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ new_vector (full_nelts, npatterns, nelts_per_pattern);
+}
+
+/* Return true if elements ELT1 and ELT2 are equal. */
+
+template<typename T>
+inline bool
+int_vector_builder<T>::equal_p (T elt1, T elt2) const
+{
+ return known_eq (elt1, elt2);
+}
+
+/* Return the value of element ELT2 minus the value of element ELT1. */
+
+template<typename T>
+inline T
+int_vector_builder<T>::step (T elt1, T elt2) const
+{
+ return elt2 - elt1;
+}
+
+/* Return a vector element with the value BASE + FACTOR * STEP. */
+
+template<typename T>
+inline T
+int_vector_builder<T>::apply_step (T base, unsigned int factor, T step) const
+{
+ return base + factor * step;
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.def
new file mode 100644
index 0000000..7fe742c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.def
@@ -0,0 +1,472 @@
+/* Internal functions.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file specifies a list of internal "functions". These functions
+ differ from built-in functions in that they have no linkage and cannot
+ be called directly by the user. They represent operations that are only
+ synthesised by GCC itself.
+
+ Internal functions are used instead of tree codes if the operation
+ and its operands are more naturally represented as a GIMPLE_CALL
+ than a GIMPLE_ASSIGN.
+
+ Each entry in this file has one of the forms:
+
+ DEF_INTERNAL_FN (NAME, FLAGS, FNSPEC)
+ DEF_INTERNAL_OPTAB_FN (NAME, FLAGS, OPTAB, TYPE)
+ DEF_INTERNAL_SIGNED_OPTAB_FN (NAME, FLAGS, SELECTOR, SIGNED_OPTAB,
+ UNSIGNED_OPTAB, TYPE)
+ DEF_INTERNAL_FLT_FN (NAME, FLAGS, OPTAB, TYPE)
+ DEF_INTERNAL_INT_FN (NAME, FLAGS, OPTAB, TYPE)
+
+ where NAME is the name of the function, FLAGS is a set of
+ ECF_* flags and FNSPEC is a string describing functions fnspec.
+
+ DEF_INTERNAL_OPTAB_FN defines an internal function that maps to a
+ direct optab. The function should only be called with a given
+ set of types if the associated optab is available for the modes
+ of those types. OPTAB says what optab to use (without the trailing
+ "_optab") and TYPE categorizes the optab based on its inputs and
+ outputs. The possible types of optab are:
+
+ - mask_load: currently just maskload
+ - load_lanes: currently just vec_load_lanes
+ - mask_load_lanes: currently just vec_mask_load_lanes
+ - gather_load: used for {mask_,}gather_load
+ - len_load: currently just len_load
+
+ - mask_store: currently just maskstore
+ - store_lanes: currently just vec_store_lanes
+ - mask_store_lanes: currently just vec_mask_store_lanes
+ - scatter_store: used for {mask_,}scatter_store
+ - len_store: currently just len_store
+
+ - unary: a normal unary optab, such as vec_reverse_<mode>
+ - binary: a normal binary optab, such as vec_interleave_lo_<mode>
+ - ternary: a normal ternary optab, such as fma<mode>4
+
+ - unary_convert: a single-input conversion optab, such as
+ lround<srcmode><dstmode>2.
+
+ - cond_binary: a conditional binary optab, such as cond_add<mode>
+ - cond_ternary: a conditional ternary optab, such as cond_fma_rev<mode>
+
+ - fold_left: for scalar = FN (scalar, vector), keyed off the vector mode
+ - check_ptrs: used for check_{raw,war}_ptrs
+
+ DEF_INTERNAL_SIGNED_OPTAB_FN defines an internal function that
+ maps to one of two optabs, depending on the signedness of an input.
+ SIGNED_OPTAB and UNSIGNED_OPTAB are the optabs for signed and
+ unsigned inputs respectively, both without the trailing "_optab".
+ SELECTOR says which type in the tree_pair determines the signedness.
+
+ DEF_INTERNAL_FLT_FN is like DEF_INTERNAL_OPTAB_FN, but in addition,
+ the function implements the computational part of a built-in math
+ function BUILT_IN_<NAME>{F,,L}. Unlike some built-in functions,
+ these internal functions never set errno.
+
+ DEF_INTERNAL_INT_FN is like DEF_INTERNAL_OPTAB_FN, but in addition
+ says that the function extends the C-level BUILT_IN_<NAME>{,L,LL,IMAX}
+ group of functions to any integral mode (including vector modes).
+
+ Each entry must have a corresponding expander of the form:
+
+ void expand_NAME (gimple_call stmt)
+
+ where STMT is the statement that performs the call. These are generated
+ automatically for optab functions and call out to a function or macro
+ called expand_<TYPE>_optab_fn. */
+
+#ifndef DEF_INTERNAL_FN
+#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC)
+#endif
+
+#ifndef DEF_INTERNAL_OPTAB_FN
+#define DEF_INTERNAL_OPTAB_FN(NAME, FLAGS, OPTAB, TYPE) \
+ DEF_INTERNAL_FN (NAME, FLAGS | ECF_LEAF, NULL)
+#endif
+
+#ifndef DEF_INTERNAL_SIGNED_OPTAB_FN
+#define DEF_INTERNAL_SIGNED_OPTAB_FN(NAME, FLAGS, SELECTOR, SIGNED_OPTAB, \
+ UNSIGNED_OPTAB, TYPE) \
+ DEF_INTERNAL_FN (NAME, FLAGS | ECF_LEAF, NULL)
+#endif
+
+#ifndef DEF_INTERNAL_FLT_FN
+#define DEF_INTERNAL_FLT_FN(NAME, FLAGS, OPTAB, TYPE) \
+ DEF_INTERNAL_OPTAB_FN (NAME, FLAGS, OPTAB, TYPE)
+#endif
+
+#ifndef DEF_INTERNAL_FLT_FLOATN_FN
+#define DEF_INTERNAL_FLT_FLOATN_FN(NAME, FLAGS, OPTAB, TYPE) \
+ DEF_INTERNAL_FLT_FN (NAME, FLAGS, OPTAB, TYPE)
+#endif
+
+#ifndef DEF_INTERNAL_INT_FN
+#define DEF_INTERNAL_INT_FN(NAME, FLAGS, OPTAB, TYPE) \
+ DEF_INTERNAL_OPTAB_FN (NAME, FLAGS, OPTAB, TYPE)
+#endif
+
+DEF_INTERNAL_OPTAB_FN (MASK_LOAD, ECF_PURE, maskload, mask_load)
+DEF_INTERNAL_OPTAB_FN (LOAD_LANES, ECF_CONST, vec_load_lanes, load_lanes)
+DEF_INTERNAL_OPTAB_FN (MASK_LOAD_LANES, ECF_PURE,
+ vec_mask_load_lanes, mask_load_lanes)
+
+DEF_INTERNAL_OPTAB_FN (GATHER_LOAD, ECF_PURE, gather_load, gather_load)
+DEF_INTERNAL_OPTAB_FN (MASK_GATHER_LOAD, ECF_PURE,
+ mask_gather_load, gather_load)
+
+DEF_INTERNAL_OPTAB_FN (LEN_LOAD, ECF_PURE, len_load, len_load)
+
+DEF_INTERNAL_OPTAB_FN (SCATTER_STORE, 0, scatter_store, scatter_store)
+DEF_INTERNAL_OPTAB_FN (MASK_SCATTER_STORE, 0,
+ mask_scatter_store, scatter_store)
+
+DEF_INTERNAL_OPTAB_FN (MASK_STORE, 0, maskstore, mask_store)
+DEF_INTERNAL_OPTAB_FN (STORE_LANES, ECF_CONST, vec_store_lanes, store_lanes)
+DEF_INTERNAL_OPTAB_FN (MASK_STORE_LANES, 0,
+ vec_mask_store_lanes, mask_store_lanes)
+
+DEF_INTERNAL_OPTAB_FN (VCOND, 0, vcond, vec_cond)
+DEF_INTERNAL_OPTAB_FN (VCONDU, 0, vcondu, vec_cond)
+DEF_INTERNAL_OPTAB_FN (VCONDEQ, 0, vcondeq, vec_cond)
+DEF_INTERNAL_OPTAB_FN (VCOND_MASK, 0, vcond_mask, vec_cond_mask)
+
+DEF_INTERNAL_OPTAB_FN (VEC_SET, 0, vec_set, vec_set)
+
+DEF_INTERNAL_OPTAB_FN (LEN_STORE, 0, len_store, len_store)
+
+DEF_INTERNAL_OPTAB_FN (WHILE_ULT, ECF_CONST | ECF_NOTHROW, while_ult, while)
+DEF_INTERNAL_OPTAB_FN (CHECK_RAW_PTRS, ECF_CONST | ECF_NOTHROW,
+ check_raw_ptrs, check_ptrs)
+DEF_INTERNAL_OPTAB_FN (CHECK_WAR_PTRS, ECF_CONST | ECF_NOTHROW,
+ check_war_ptrs, check_ptrs)
+
+DEF_INTERNAL_OPTAB_FN (VEC_SHL_INSERT, ECF_CONST | ECF_NOTHROW,
+ vec_shl_insert, binary)
+
+DEF_INTERNAL_OPTAB_FN (DIV_POW2, ECF_CONST | ECF_NOTHROW, sdiv_pow2, binary)
+
+DEF_INTERNAL_OPTAB_FN (FMS, ECF_CONST, fms, ternary)
+DEF_INTERNAL_OPTAB_FN (FNMA, ECF_CONST, fnma, ternary)
+DEF_INTERNAL_OPTAB_FN (FNMS, ECF_CONST, fnms, ternary)
+
+DEF_INTERNAL_SIGNED_OPTAB_FN (AVG_FLOOR, ECF_CONST | ECF_NOTHROW, first,
+ savg_floor, uavg_floor, binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (AVG_CEIL, ECF_CONST | ECF_NOTHROW, first,
+ savg_ceil, uavg_ceil, binary)
+
+DEF_INTERNAL_SIGNED_OPTAB_FN (MULH, ECF_CONST | ECF_NOTHROW, first,
+ smul_highpart, umul_highpart, binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (MULHS, ECF_CONST | ECF_NOTHROW, first,
+ smulhs, umulhs, binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (MULHRS, ECF_CONST | ECF_NOTHROW, first,
+ smulhrs, umulhrs, binary)
+
+DEF_INTERNAL_OPTAB_FN (COND_ADD, ECF_CONST, cond_add, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_SUB, ECF_CONST, cond_sub, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_MUL, ECF_CONST, cond_smul, cond_binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (COND_DIV, ECF_CONST, first,
+ cond_sdiv, cond_udiv, cond_binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (COND_MOD, ECF_CONST, first,
+ cond_smod, cond_umod, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_RDIV, ECF_CONST, cond_sdiv, cond_binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (COND_MIN, ECF_CONST, first,
+ cond_smin, cond_umin, cond_binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (COND_MAX, ECF_CONST, first,
+ cond_smax, cond_umax, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_FMIN, ECF_CONST, cond_fmin, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_FMAX, ECF_CONST, cond_fmax, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_AND, ECF_CONST | ECF_NOTHROW,
+ cond_and, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_IOR, ECF_CONST | ECF_NOTHROW,
+ cond_ior, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_XOR, ECF_CONST | ECF_NOTHROW,
+ cond_xor, cond_binary)
+DEF_INTERNAL_OPTAB_FN (COND_SHL, ECF_CONST | ECF_NOTHROW,
+ cond_ashl, cond_binary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (COND_SHR, ECF_CONST | ECF_NOTHROW, first,
+ cond_ashr, cond_lshr, cond_binary)
+
+DEF_INTERNAL_OPTAB_FN (COND_FMA, ECF_CONST, cond_fma, cond_ternary)
+DEF_INTERNAL_OPTAB_FN (COND_FMS, ECF_CONST, cond_fms, cond_ternary)
+DEF_INTERNAL_OPTAB_FN (COND_FNMA, ECF_CONST, cond_fnma, cond_ternary)
+DEF_INTERNAL_OPTAB_FN (COND_FNMS, ECF_CONST, cond_fnms, cond_ternary)
+
+DEF_INTERNAL_OPTAB_FN (COND_NEG, ECF_CONST, cond_neg, cond_unary)
+
+DEF_INTERNAL_OPTAB_FN (RSQRT, ECF_CONST, rsqrt, unary)
+
+DEF_INTERNAL_OPTAB_FN (REDUC_PLUS, ECF_CONST | ECF_NOTHROW,
+ reduc_plus_scal, unary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (REDUC_MAX, ECF_CONST | ECF_NOTHROW, first,
+ reduc_smax_scal, reduc_umax_scal, unary)
+DEF_INTERNAL_SIGNED_OPTAB_FN (REDUC_MIN, ECF_CONST | ECF_NOTHROW, first,
+ reduc_smin_scal, reduc_umin_scal, unary)
+DEF_INTERNAL_OPTAB_FN (REDUC_FMAX, ECF_CONST | ECF_NOTHROW,
+ reduc_fmax_scal, unary)
+DEF_INTERNAL_OPTAB_FN (REDUC_FMIN, ECF_CONST | ECF_NOTHROW,
+ reduc_fmin_scal, unary)
+DEF_INTERNAL_OPTAB_FN (REDUC_AND, ECF_CONST | ECF_NOTHROW,
+ reduc_and_scal, unary)
+DEF_INTERNAL_OPTAB_FN (REDUC_IOR, ECF_CONST | ECF_NOTHROW,
+ reduc_ior_scal, unary)
+DEF_INTERNAL_OPTAB_FN (REDUC_XOR, ECF_CONST | ECF_NOTHROW,
+ reduc_xor_scal, unary)
+
+/* Extract the last active element from a vector. */
+DEF_INTERNAL_OPTAB_FN (EXTRACT_LAST, ECF_CONST | ECF_NOTHROW,
+ extract_last, fold_left)
+
+/* Same, but return the first argument if no elements are active. */
+DEF_INTERNAL_OPTAB_FN (FOLD_EXTRACT_LAST, ECF_CONST | ECF_NOTHROW,
+ fold_extract_last, fold_extract)
+
+DEF_INTERNAL_OPTAB_FN (FOLD_LEFT_PLUS, ECF_CONST | ECF_NOTHROW,
+ fold_left_plus, fold_left)
+
+DEF_INTERNAL_OPTAB_FN (MASK_FOLD_LEFT_PLUS, ECF_CONST | ECF_NOTHROW,
+ mask_fold_left_plus, mask_fold_left)
+
+/* Unary math functions. */
+DEF_INTERNAL_FLT_FN (ACOS, ECF_CONST, acos, unary)
+DEF_INTERNAL_FLT_FN (ACOSH, ECF_CONST, acosh, unary)
+DEF_INTERNAL_FLT_FN (ASIN, ECF_CONST, asin, unary)
+DEF_INTERNAL_FLT_FN (ASINH, ECF_CONST, asinh, unary)
+DEF_INTERNAL_FLT_FN (ATAN, ECF_CONST, atan, unary)
+DEF_INTERNAL_FLT_FN (ATANH, ECF_CONST, atanh, unary)
+DEF_INTERNAL_FLT_FN (COS, ECF_CONST, cos, unary)
+DEF_INTERNAL_FLT_FN (COSH, ECF_CONST, cosh, unary)
+DEF_INTERNAL_FLT_FN (EXP, ECF_CONST, exp, unary)
+DEF_INTERNAL_FLT_FN (EXP10, ECF_CONST, exp10, unary)
+DEF_INTERNAL_FLT_FN (EXP2, ECF_CONST, exp2, unary)
+DEF_INTERNAL_FLT_FN (EXPM1, ECF_CONST, expm1, unary)
+DEF_INTERNAL_FLT_FN (LOG, ECF_CONST, log, unary)
+DEF_INTERNAL_FLT_FN (LOG10, ECF_CONST, log10, unary)
+DEF_INTERNAL_FLT_FN (LOG1P, ECF_CONST, log1p, unary)
+DEF_INTERNAL_FLT_FN (LOG2, ECF_CONST, log2, unary)
+DEF_INTERNAL_FLT_FN (LOGB, ECF_CONST, logb, unary)
+DEF_INTERNAL_FLT_FN (SIGNBIT, ECF_CONST, signbit, unary)
+DEF_INTERNAL_FLT_FN (SIGNIFICAND, ECF_CONST, significand, unary)
+DEF_INTERNAL_FLT_FN (SIN, ECF_CONST, sin, unary)
+DEF_INTERNAL_FLT_FN (SINH, ECF_CONST, sinh, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (SQRT, ECF_CONST, sqrt, unary)
+DEF_INTERNAL_FLT_FN (TAN, ECF_CONST, tan, unary)
+DEF_INTERNAL_FLT_FN (TANH, ECF_CONST, tanh, unary)
+
+/* Floating-point to integer conversions.
+
+ ??? Here we preserve the I/L/LL prefix convention from the
+ corresponding built-in functions, rather than make the internal
+ functions polymorphic in both the argument and the return types.
+ Perhaps an alternative would be to pass a zero of the required
+ return type as a second parameter. */
+DEF_INTERNAL_FLT_FN (ICEIL, ECF_CONST, lceil, unary_convert)
+DEF_INTERNAL_FLT_FN (IFLOOR, ECF_CONST, lfloor, unary_convert)
+DEF_INTERNAL_FLT_FN (IRINT, ECF_CONST, lrint, unary_convert)
+DEF_INTERNAL_FLT_FN (IROUND, ECF_CONST, lround, unary_convert)
+DEF_INTERNAL_FLT_FN (LCEIL, ECF_CONST, lceil, unary_convert)
+DEF_INTERNAL_FLT_FN (LFLOOR, ECF_CONST, lfloor, unary_convert)
+DEF_INTERNAL_FLT_FN (LRINT, ECF_CONST, lrint, unary_convert)
+DEF_INTERNAL_FLT_FN (LROUND, ECF_CONST, lround, unary_convert)
+DEF_INTERNAL_FLT_FN (LLCEIL, ECF_CONST, lceil, unary_convert)
+DEF_INTERNAL_FLT_FN (LLFLOOR, ECF_CONST, lfloor, unary_convert)
+DEF_INTERNAL_FLT_FN (LLRINT, ECF_CONST, lrint, unary_convert)
+DEF_INTERNAL_FLT_FN (LLROUND, ECF_CONST, lround, unary_convert)
+
+/* FP rounding. */
+DEF_INTERNAL_FLT_FLOATN_FN (CEIL, ECF_CONST, ceil, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (FLOOR, ECF_CONST, floor, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (NEARBYINT, ECF_CONST, nearbyint, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (RINT, ECF_CONST, rint, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (ROUND, ECF_CONST, round, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (ROUNDEVEN, ECF_CONST, roundeven, unary)
+DEF_INTERNAL_FLT_FLOATN_FN (TRUNC, ECF_CONST, btrunc, unary)
+
+/* Binary math functions. */
+DEF_INTERNAL_FLT_FN (ATAN2, ECF_CONST, atan2, binary)
+DEF_INTERNAL_FLT_FLOATN_FN (COPYSIGN, ECF_CONST, copysign, binary)
+DEF_INTERNAL_FLT_FN (FMOD, ECF_CONST, fmod, binary)
+DEF_INTERNAL_FLT_FN (HYPOT, ECF_CONST, hypot, binary)
+DEF_INTERNAL_FLT_FN (POW, ECF_CONST, pow, binary)
+DEF_INTERNAL_FLT_FN (REMAINDER, ECF_CONST, remainder, binary)
+DEF_INTERNAL_FLT_FN (SCALB, ECF_CONST, scalb, binary)
+DEF_INTERNAL_FLT_FLOATN_FN (FMIN, ECF_CONST, fmin, binary)
+DEF_INTERNAL_FLT_FLOATN_FN (FMAX, ECF_CONST, fmax, binary)
+DEF_INTERNAL_OPTAB_FN (XORSIGN, ECF_CONST, xorsign, binary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_ADD_ROT90, ECF_CONST, cadd90, binary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_ADD_ROT270, ECF_CONST, cadd270, binary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_MUL, ECF_CONST, cmul, binary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_MUL_CONJ, ECF_CONST, cmul_conj, binary)
+DEF_INTERNAL_OPTAB_FN (VEC_ADDSUB, ECF_CONST, vec_addsub, binary)
+DEF_INTERNAL_OPTAB_FN (VEC_FMADDSUB, ECF_CONST, vec_fmaddsub, ternary)
+DEF_INTERNAL_OPTAB_FN (VEC_FMSUBADD, ECF_CONST, vec_fmsubadd, ternary)
+
+/* FP scales. */
+DEF_INTERNAL_FLT_FN (LDEXP, ECF_CONST, ldexp, binary)
+
+/* Ternary math functions. */
+DEF_INTERNAL_FLT_FLOATN_FN (FMA, ECF_CONST, fma, ternary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_FMA, ECF_CONST, cmla, ternary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_FMA_CONJ, ECF_CONST, cmla_conj, ternary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_FMS, ECF_CONST, cmls, ternary)
+DEF_INTERNAL_OPTAB_FN (COMPLEX_FMS_CONJ, ECF_CONST, cmls_conj, ternary)
+
+/* Unary integer ops. */
+DEF_INTERNAL_INT_FN (CLRSB, ECF_CONST | ECF_NOTHROW, clrsb, unary)
+DEF_INTERNAL_INT_FN (CLZ, ECF_CONST | ECF_NOTHROW, clz, unary)
+DEF_INTERNAL_INT_FN (CTZ, ECF_CONST | ECF_NOTHROW, ctz, unary)
+DEF_INTERNAL_INT_FN (FFS, ECF_CONST | ECF_NOTHROW, ffs, unary)
+DEF_INTERNAL_INT_FN (PARITY, ECF_CONST | ECF_NOTHROW, parity, unary)
+DEF_INTERNAL_INT_FN (POPCOUNT, ECF_CONST | ECF_NOTHROW, popcount, unary)
+
+DEF_INTERNAL_FN (GOMP_TARGET_REV, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_USE_SIMT, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_ENTER, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_ENTER_ALLOC, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_EXIT, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_LANE, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_VF, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_LAST_LANE, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_ORDERED_PRED, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_VOTE_ANY, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_XCHG_BFLY, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMT_XCHG_IDX, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMD_LANE, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMD_VF, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMD_LAST_LANE, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMD_ORDERED_START, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (GOMP_SIMD_ORDERED_END, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (LOOP_VECTORIZED, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (LOOP_DIST_ALIAS, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (ANNOTATE, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (UBSAN_NULL, ECF_LEAF | ECF_NOTHROW, ". R . ")
+DEF_INTERNAL_FN (UBSAN_BOUNDS, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (UBSAN_VPTR, ECF_LEAF | ECF_NOTHROW, ". R R . . ")
+DEF_INTERNAL_FN (UBSAN_CHECK_ADD, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (UBSAN_CHECK_SUB, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (UBSAN_CHECK_MUL, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (UBSAN_PTR, ECF_LEAF | ECF_NOTHROW, ". R . ")
+DEF_INTERNAL_FN (UBSAN_OBJECT_SIZE, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (ABNORMAL_DISPATCHER, ECF_NORETURN, NULL)
+DEF_INTERNAL_FN (BUILTIN_EXPECT, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (HWASAN_ALLOCA_UNPOISON, ECF_LEAF | ECF_NOTHROW, ". R ")
+DEF_INTERNAL_FN (HWASAN_CHOOSE_TAG, ECF_LEAF | ECF_NOTHROW, ". ")
+DEF_INTERNAL_FN (HWASAN_CHECK, ECF_TM_PURE | ECF_LEAF | ECF_NOTHROW,
+ ". . R . . ")
+DEF_INTERNAL_FN (HWASAN_MARK, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (HWASAN_SET_TAG,
+ ECF_TM_PURE | ECF_PURE | ECF_LEAF | ECF_NOTHROW, ". R R ")
+DEF_INTERNAL_FN (ASAN_CHECK, ECF_TM_PURE | ECF_LEAF | ECF_NOTHROW,
+ ". . R . . ")
+DEF_INTERNAL_FN (ASAN_MARK, ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (ASAN_POISON, ECF_LEAF | ECF_NOTHROW | ECF_NOVOPS, NULL)
+DEF_INTERNAL_FN (ASAN_POISON_USE, ECF_LEAF | ECF_NOTHROW | ECF_NOVOPS, NULL)
+DEF_INTERNAL_FN (ADD_OVERFLOW, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (SUB_OVERFLOW, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (MUL_OVERFLOW, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (TSAN_FUNC_EXIT, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (VA_ARG, ECF_NOTHROW | ECF_LEAF, NULL)
+DEF_INTERNAL_FN (VEC_CONVERT, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (RAWMEMCHR, ECF_PURE | ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* An unduplicable, uncombinable function. Generally used to preserve
+ a CFG property in the face of jump threading, tail merging or
+ other such optimizations. The first argument distinguishes
+ between uses. See internal-fn.h for usage. */
+DEF_INTERNAL_FN (UNIQUE, ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (PHI, 0, NULL)
+
+/* A function to represent an artifical initialization to an uninitialized
+ automatic variable. */
+DEF_INTERNAL_FN (DEFERRED_INIT, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* DIM_SIZE and DIM_POS return the size of a particular compute
+ dimension and the executing thread's position within that
+ dimension. DIM_POS is pure (and not const) so that it isn't
+ thought to clobber memory and can be gcse'd within a single
+ parallel region, but not across FORK/JOIN boundaries. They take a
+ single INTEGER_CST argument. This might be overly conservative. */
+DEF_INTERNAL_FN (GOACC_DIM_SIZE, ECF_CONST | ECF_NOTHROW | ECF_LEAF, NULL)
+DEF_INTERNAL_FN (GOACC_DIM_POS, ECF_PURE | ECF_NOTHROW | ECF_LEAF, NULL)
+
+/* OpenACC looping abstraction. See internal-fn.h for usage. */
+DEF_INTERNAL_FN (GOACC_LOOP, ECF_PURE | ECF_NOTHROW, NULL)
+
+/* OpenACC reduction abstraction. See internal-fn.h for usage. */
+DEF_INTERNAL_FN (GOACC_REDUCTION, ECF_NOTHROW | ECF_LEAF, NULL)
+
+/* Openacc tile abstraction. Describes the spans of the element loop.
+ GOACC_TILE (num-loops, loop-no, tile-arg, tile-mask, element-mask). */
+DEF_INTERNAL_FN (GOACC_TILE, ECF_NOTHROW | ECF_LEAF, NULL)
+
+/* Set errno to EDOM, if GCC knows how to do that directly for the
+ current target. */
+DEF_INTERNAL_FN (SET_EDOM, ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* Atomic functions. These don't have ECF_NOTHROW because for
+ -fnon-call-exceptions they can throw, otherwise we set
+ gimple_call_nothrow_p on it. */
+DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_SET, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_COMPLEMENT, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_RESET, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_COMPARE_EXCHANGE, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_ADD_FETCH_CMP_0, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_SUB_FETCH_CMP_0, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_AND_FETCH_CMP_0, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_OR_FETCH_CMP_0, ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ATOMIC_XOR_FETCH_CMP_0, ECF_LEAF, NULL)
+
+/* To implement [[fallthrough]]. */
+DEF_INTERNAL_FN (FALLTHROUGH, ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* To implement __builtin_launder. */
+DEF_INTERNAL_FN (LAUNDER, ECF_LEAF | ECF_NOTHROW | ECF_NOVOPS, NULL)
+
+/* Divmod function. */
+DEF_INTERNAL_FN (DIVMOD, ECF_CONST | ECF_LEAF, NULL)
+
+/* For coroutines. */
+DEF_INTERNAL_FN (CO_ACTOR, ECF_NOTHROW | ECF_LEAF, NULL)
+DEF_INTERNAL_FN (CO_YIELD, ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (CO_SUSPN, ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (CO_FRAME, ECF_PURE | ECF_NOTHROW | ECF_LEAF, NULL)
+
+/* A NOP function with arbitrary arguments and return value. */
+DEF_INTERNAL_FN (NOP, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* Temporary vehicle for __builtin_shufflevector. */
+DEF_INTERNAL_FN (SHUFFLEVECTOR, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* <=> optimization. */
+DEF_INTERNAL_FN (SPACESHIP, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+
+/* [[assume (cond)]]. */
+DEF_INTERNAL_FN (ASSUME, ECF_CONST | ECF_LEAF | ECF_NOTHROW
+ | ECF_LOOPING_CONST_OR_PURE, NULL)
+
+/* For if-conversion of inbranch SIMD clones. */
+DEF_INTERNAL_FN (MASK_CALL, ECF_NOVOPS, NULL)
+
+#undef DEF_INTERNAL_INT_FN
+#undef DEF_INTERNAL_FLT_FN
+#undef DEF_INTERNAL_FLT_FLOATN_FN
+#undef DEF_INTERNAL_SIGNED_OPTAB_FN
+#undef DEF_INTERNAL_OPTAB_FN
+#undef DEF_INTERNAL_FN
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.h
new file mode 100644
index 0000000..08922ed
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/internal-fn.h
@@ -0,0 +1,260 @@
+/* Internal functions.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_INTERNAL_FN_H
+#define GCC_INTERNAL_FN_H
+
+/* INTEGER_CST values for IFN_UNIQUE function arg-0.
+
+ UNSPEC: Undifferentiated UNIQUE.
+
+ FORK and JOIN mark the points at which OpenACC partitioned
+ execution is entered or exited.
+ DEP_VAR = UNIQUE ({FORK,JOIN}, DEP_VAR, AXIS)
+
+ HEAD_MARK and TAIL_MARK are used to demark the sequence entering
+ or leaving partitioned execution.
+ DEP_VAR = UNIQUE ({HEAD,TAIL}_MARK, REMAINING_MARKS, ...PRIMARY_FLAGS)
+
+ The PRIMARY_FLAGS only occur on the first HEAD_MARK of a sequence.
+
+ PRIVATE captures variables to be made private at the surrounding parallelism
+ level. */
+#define IFN_UNIQUE_CODES \
+ DEF(UNSPEC), \
+ DEF(OACC_FORK), DEF(OACC_JOIN), \
+ DEF(OACC_HEAD_MARK), DEF(OACC_TAIL_MARK), \
+ DEF(OACC_PRIVATE)
+
+enum ifn_unique_kind {
+#define DEF(X) IFN_UNIQUE_##X
+ IFN_UNIQUE_CODES
+#undef DEF
+};
+
+/* INTEGER_CST values for IFN_GOACC_LOOP arg-0. Allows the precise
+ stepping of the compute geometry over the loop iterations to be
+ deferred until it is known which compiler is generating the code.
+ The action is encoded in a constant first argument.
+
+ CHUNK_MAX = LOOP (CODE_CHUNKS, DIR, RANGE, STEP, CHUNK_SIZE, MASK)
+ STEP = LOOP (CODE_STEP, DIR, RANGE, STEP, CHUNK_SIZE, MASK)
+ OFFSET = LOOP (CODE_OFFSET, DIR, RANGE, STEP, CHUNK_SIZE, MASK, CHUNK_NO)
+ BOUND = LOOP (CODE_BOUND, DIR, RANGE, STEP, CHUNK_SIZE, MASK, OFFSET)
+
+ DIR - +1 for up loop, -1 for down loop
+ RANGE - Range of loop (END - BASE)
+ STEP - iteration step size
+ CHUNKING - size of chunking, (constant zero for no chunking)
+ CHUNK_NO - chunk number
+ MASK - partitioning mask. */
+
+#define IFN_GOACC_LOOP_CODES \
+ DEF(CHUNKS), DEF(STEP), DEF(OFFSET), DEF(BOUND)
+enum ifn_goacc_loop_kind {
+#define DEF(X) IFN_GOACC_LOOP_##X
+ IFN_GOACC_LOOP_CODES
+#undef DEF
+};
+
+/* The GOACC_REDUCTION function defines a generic interface to support
+ gang, worker and vector reductions. All calls are of the following
+ form:
+
+ V = REDUCTION (CODE, REF_TO_RES, LOCAL_VAR, LEVEL, OP, OFFSET)
+
+ REF_TO_RES - is a reference to the original reduction varl, may be NULL
+ LOCAL_VAR is the intermediate reduction variable
+ LEVEL corresponds to the GOMP_DIM of the reduction
+ OP is the tree code of the reduction operation
+ OFFSET may be used as an offset into a reduction array for the
+ reductions occuring at this level.
+ In general the return value is LOCAL_VAR, which creates a data
+ dependency between calls operating on the same reduction. */
+
+#define IFN_GOACC_REDUCTION_CODES \
+ DEF(SETUP), DEF(INIT), DEF(FINI), DEF(TEARDOWN)
+enum ifn_goacc_reduction_kind {
+#define DEF(X) IFN_GOACC_REDUCTION_##X
+ IFN_GOACC_REDUCTION_CODES
+#undef DEF
+};
+
+/* Initialize internal function tables. */
+
+extern void init_internal_fns ();
+
+/* Return the name of internal function FN. The name is only meaningful
+ for dumps; it has no linkage. */
+
+extern const char *const internal_fn_name_array[];
+
+inline const char *
+internal_fn_name (enum internal_fn fn)
+{
+ return internal_fn_name_array[(int) fn];
+}
+
+extern internal_fn lookup_internal_fn (const char *);
+
+/* Return the ECF_* flags for function FN. */
+
+extern const int internal_fn_flags_array[];
+
+inline int
+internal_fn_flags (enum internal_fn fn)
+{
+ return internal_fn_flags_array[(int) fn];
+}
+
+/* Return fnspec for function FN. */
+
+extern GTY(()) const_tree internal_fn_fnspec_array[IFN_LAST + 1];
+
+inline const_tree
+internal_fn_fnspec (enum internal_fn fn)
+{
+ return internal_fn_fnspec_array[(int) fn];
+}
+
+/* Describes an internal function that maps directly to an optab. */
+struct direct_internal_fn_info
+{
+ /* optabs can be parameterized by one or two modes. These fields describe
+ how to select those modes from the types of the return value and
+ arguments. A value of -1 says that the mode is determined by the
+ return type while a value N >= 0 says that the mode is determined by
+ the type of argument N. A value of -2 says that this internal
+ function isn't directly mapped to an optab. */
+ signed int type0 : 8;
+ signed int type1 : 8;
+ /* True if the function is pointwise, so that it can be vectorized by
+ converting the return type and all argument types to vectors of the
+ same number of elements. E.g. we can vectorize an IFN_SQRT on
+ floats as an IFN_SQRT on vectors of N floats.
+
+ This only needs 1 bit, but occupies the full 16 to ensure a nice
+ layout. */
+ unsigned int vectorizable : 16;
+};
+
+extern const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1];
+
+/* Return true if FN is mapped directly to an optab. */
+
+inline bool
+direct_internal_fn_p (internal_fn fn)
+{
+ return direct_internal_fn_array[fn].type0 >= -1;
+}
+
+/* Return true if FN is a direct internal function that can be vectorized by
+ converting the return type and all argument types to vectors of the same
+ number of elements. E.g. we can vectorize an IFN_SQRT on floats as an
+ IFN_SQRT on vectors of N floats. */
+
+inline bool
+vectorizable_internal_fn_p (internal_fn fn)
+{
+ return direct_internal_fn_array[fn].vectorizable;
+}
+
+/* Return optab information about internal function FN. Only meaningful
+ if direct_internal_fn_p (FN). */
+
+inline const direct_internal_fn_info &
+direct_internal_fn (internal_fn fn)
+{
+ gcc_checking_assert (direct_internal_fn_p (fn));
+ return direct_internal_fn_array[fn];
+}
+
+extern tree_pair direct_internal_fn_types (internal_fn, tree, tree *);
+extern tree_pair direct_internal_fn_types (internal_fn, gcall *);
+extern bool direct_internal_fn_supported_p (internal_fn, tree_pair,
+ optimization_type);
+extern bool direct_internal_fn_supported_p (internal_fn, tree,
+ optimization_type);
+extern bool direct_internal_fn_supported_p (gcall *, optimization_type);
+
+/* Return true if FN is supported for types TYPE0 and TYPE1 when the
+ optimization type is OPT_TYPE. The types are those associated with
+ the "type0" and "type1" fields of FN's direct_internal_fn_info
+ structure. */
+
+inline bool
+direct_internal_fn_supported_p (internal_fn fn, tree type0, tree type1,
+ optimization_type opt_type)
+{
+ return direct_internal_fn_supported_p (fn, tree_pair (type0, type1),
+ opt_type);
+}
+
+extern bool commutative_binary_fn_p (internal_fn);
+extern bool commutative_ternary_fn_p (internal_fn);
+extern int first_commutative_argument (internal_fn);
+extern bool associative_binary_fn_p (internal_fn);
+
+extern bool set_edom_supported_p (void);
+
+extern internal_fn get_conditional_internal_fn (tree_code);
+extern internal_fn get_conditional_internal_fn (internal_fn);
+extern tree_code conditional_internal_fn_code (internal_fn);
+extern internal_fn get_unconditional_internal_fn (internal_fn);
+extern bool can_interpret_as_conditional_op_p (gimple *, tree *,
+ tree_code *, tree (&)[3],
+ tree *);
+
+extern bool internal_load_fn_p (internal_fn);
+extern bool internal_store_fn_p (internal_fn);
+extern bool internal_gather_scatter_fn_p (internal_fn);
+extern int internal_fn_mask_index (internal_fn);
+extern int internal_fn_stored_value_index (internal_fn);
+extern bool internal_gather_scatter_fn_supported_p (internal_fn, tree,
+ tree, tree, int);
+extern bool internal_check_ptrs_fn_supported_p (internal_fn, tree,
+ poly_uint64, unsigned int);
+#define VECT_PARTIAL_BIAS_UNSUPPORTED 127
+
+extern signed char internal_len_load_store_bias (internal_fn ifn,
+ machine_mode);
+
+extern void expand_addsub_overflow (location_t, tree_code, tree, tree, tree,
+ bool, bool, bool, bool, tree *);
+extern void expand_internal_call (gcall *);
+extern void expand_internal_call (internal_fn, gcall *);
+extern void expand_PHI (internal_fn, gcall *);
+extern void expand_SHUFFLEVECTOR (internal_fn, gcall *);
+extern void expand_SPACESHIP (internal_fn, gcall *);
+extern void expand_TRAP (internal_fn, gcall *);
+extern void expand_ASSUME (internal_fn, gcall *);
+extern void expand_MASK_CALL (internal_fn, gcall *);
+
+extern bool vectorized_internal_fn_supported_p (internal_fn, tree);
+
+enum {
+ ATOMIC_OP_FETCH_CMP_0_EQ = 0,
+ ATOMIC_OP_FETCH_CMP_0_NE = 1,
+ ATOMIC_OP_FETCH_CMP_0_LT = 2,
+ ATOMIC_OP_FETCH_CMP_0_LE = 3,
+ ATOMIC_OP_FETCH_CMP_0_GT = 4,
+ ATOMIC_OP_FETCH_CMP_0_GE = 5
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/intl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/intl.h
new file mode 100644
index 0000000..ad94090
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/intl.h
@@ -0,0 +1,73 @@
+/* intl.h - internationalization
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_INTL_H
+#define GCC_INTL_H
+
+#ifdef HAVE_LOCALE_H
+# include <locale.h>
+#endif
+
+#ifndef HAVE_SETLOCALE
+# define setlocale(category, locale) (locale)
+#endif
+
+#ifdef ENABLE_NLS
+#include <libintl.h>
+extern void gcc_init_libintl (void);
+extern size_t gcc_gettext_width (const char *);
+#else
+/* Stubs. */
+# undef textdomain
+# define textdomain(domain) (domain)
+# undef bindtextdomain
+# define bindtextdomain(domain, directory) (domain)
+# undef gettext
+# define gettext(msgid) (msgid)
+# define ngettext(singular,plural,n) fake_ngettext (singular, plural, n)
+# define gcc_init_libintl() /* nothing */
+# define gcc_gettext_width(s) strlen (s)
+
+extern const char *fake_ngettext (const char *singular, const char *plural,
+ unsigned long int n);
+
+#endif
+
+/* Used to immediately translate the argument. */
+#ifndef _
+# define _(msgid) gettext (msgid)
+#endif
+
+/* Used to mark strings that will be translated later. */
+#ifndef N_
+# define N_(msgid) msgid
+#endif
+
+/* Like N_, but for GCC diagnostic format strings. See ABOUT-GCC-NLS for
+ details. */
+#ifndef G_
+# define G_(gmsgid) gmsgid
+#endif
+
+extern char *get_spaces (const char *);
+
+extern const char *open_quote;
+extern const char *close_quote;
+extern const char *locale_encoding;
+extern bool locale_utf8;
+
+#endif /* intl.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-fnsummary.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-fnsummary.h
new file mode 100644
index 0000000..fcc0116
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-fnsummary.h
@@ -0,0 +1,453 @@
+/* IPA function body analysis.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IPA_SUMMARY_H
+#define GCC_IPA_SUMMARY_H
+
+#include "sreal.h"
+#include "ipa-predicate.h"
+
+
+/* Hints are reasons why IPA heuristics should prefer specializing given
+ function. They are represented as bitmap of the following values. */
+enum ipa_hints_vals {
+ /* When specialization turns indirect call into a direct call,
+ it is good idea to do so. */
+ INLINE_HINT_indirect_call = 1,
+ /* Inlining may make loop iterations or loop stride known. It is good idea
+ to do so because it enables loop optimizations. */
+ INLINE_HINT_loop_iterations = 2,
+ INLINE_HINT_loop_stride = 4,
+ /* Inlining within same strongly connected component of callgraph is often
+ a loss due to increased stack frame usage and prologue setup costs. */
+ INLINE_HINT_same_scc = 8,
+ /* Inlining functions in strongly connected component is not such a great
+ win. */
+ INLINE_HINT_in_scc = 16,
+ /* If function is declared inline by user, it may be good idea to inline
+ it. Set by simple_edge_hints in ipa-inline-analysis.cc. */
+ INLINE_HINT_declared_inline = 32,
+ /* Programs are usually still organized for non-LTO compilation and thus
+ if functions are in different modules, inlining may not be so important.
+ Set by simple_edge_hints in ipa-inline-analysis.cc. */
+ INLINE_HINT_cross_module = 64,
+ /* We know that the callee is hot by profile. */
+ INLINE_HINT_known_hot = 128,
+ /* There is builtin_constant_p dependent on parameter which is usually
+ a strong hint to inline. */
+ INLINE_HINT_builtin_constant_p = 256
+};
+
+typedef int ipa_hints;
+
+/* Simple description of whether a memory load or a condition refers to a load
+ from an aggregate and if so, how and where from in the aggregate.
+ Individual fields have the same meaning like fields with the same name in
+ struct condition. */
+
+struct agg_position_info
+{
+ HOST_WIDE_INT offset;
+ bool agg_contents;
+ bool by_ref;
+};
+
+/* Representation of function body size and time depending on the call
+ context. We keep simple array of record, every containing of predicate
+ and time/size to account. */
+class size_time_entry
+{
+public:
+ /* Predicate for code to be executed. */
+ ipa_predicate exec_predicate;
+ /* Predicate for value to be constant and optimized out in a specialized copy.
+ When deciding on specialization this makes it possible to see how much
+ the executed code paths will simplify. */
+ ipa_predicate nonconst_predicate;
+ int size;
+ sreal time;
+};
+
+/* Summary about function and stack frame sizes. We keep this info
+ for inline clones and also for WPA streaming. For this reason this is not
+ part of ipa_fn_summary which exists only for offline functions. */
+class ipa_size_summary
+{
+public:
+ /* Estimated stack frame consumption by the function. */
+ HOST_WIDE_INT estimated_self_stack_size;
+ /* Size of the function body. */
+ int self_size;
+ /* Estimated size of the function after inlining. */
+ int size;
+
+ ipa_size_summary ()
+ : estimated_self_stack_size (0), self_size (0), size (0)
+ {
+ }
+};
+
+/* Structure to capture how frequently some interesting events occur given a
+ particular predicate. The structure is used to estimate how often we
+ encounter loops with known iteration count or stride in various
+ contexts. */
+
+struct GTY(()) ipa_freqcounting_predicate
+{
+ /* The described event happens with this frequency... */
+ sreal freq;
+ /* ...when this predicate evaluates to false. */
+ ipa_predicate * GTY((skip)) predicate;
+};
+
+/* Function inlining information. */
+class GTY(()) ipa_fn_summary
+{
+public:
+ /* Keep all field empty so summary dumping works during its computation.
+ This is useful for debugging. */
+ ipa_fn_summary ()
+ : min_size (0),
+ inlinable (false), single_caller (false),
+ fp_expressions (false), target_info (0),
+ estimated_stack_size (false),
+ time (0), conds (NULL),
+ size_time_table (), call_size_time_table (vNULL),
+ loop_iterations (NULL), loop_strides (NULL),
+ builtin_constant_p_parms (vNULL),
+ growth (0), scc_no (0)
+ {
+ }
+
+ /* Copy constructor. */
+ ipa_fn_summary (const ipa_fn_summary &s)
+ : min_size (s.min_size),
+ inlinable (s.inlinable), single_caller (s.single_caller),
+ fp_expressions (s.fp_expressions),
+ target_info (s.target_info),
+ estimated_stack_size (s.estimated_stack_size),
+ time (s.time), conds (s.conds), size_time_table (),
+ call_size_time_table (vNULL),
+ loop_iterations (s.loop_iterations), loop_strides (s.loop_strides),
+ builtin_constant_p_parms (s.builtin_constant_p_parms),
+ growth (s.growth), scc_no (s.scc_no)
+ {}
+
+ /* Default constructor. */
+ ~ipa_fn_summary ();
+
+ /* Information about the function body itself. */
+
+ /* Minimal size increase after inlining. */
+ int min_size;
+
+ /* False when there something makes inlining impossible (such as va_arg). */
+ unsigned inlinable : 1;
+ /* True wen there is only one caller of the function before small function
+ inlining. */
+ unsigned int single_caller : 1;
+ /* True if function contains any floating point expressions. */
+ unsigned int fp_expressions : 1;
+ /* Like fp_expressions field above, but it's to hold some target specific
+ information, such as some target specific isa flags. Note that for
+ offloading target compilers, this field isn't streamed. */
+ unsigned int target_info;
+
+ /* Information about function that will result after applying all the
+ inline decisions present in the callgraph. Generally kept up to
+ date only for functions that are not inline clones. */
+
+ /* Estimated stack frame consumption by the function. */
+ HOST_WIDE_INT estimated_stack_size;
+ /* Estimated runtime of function after inlining. */
+ sreal GTY((skip)) time;
+
+ /* Conditional size/time information. The summaries are being
+ merged during inlining. */
+ conditions conds;
+ /* Normal code is accounted in size_time_table, while calls are
+ accounted in call_size_time_table. This is because calls
+ are often adjusted by IPA optimizations and thus this summary
+ is generated from call summary information when needed. */
+ auto_vec<size_time_entry> GTY((skip)) size_time_table;
+ /* Unlike size_time_table that is initialized for all summaries
+ call_size_time_table is allocated only for functions with
+ many calls. Use effecient vl_ptr storage. */
+ vec<size_time_entry, va_heap, vl_ptr> GTY((skip)) call_size_time_table;
+
+ /* Predicates on when some loops in the function can have known bounds. */
+ vec<ipa_freqcounting_predicate, va_gc> *loop_iterations;
+ /* Predicates on when some loops in the function can have known strides. */
+ vec<ipa_freqcounting_predicate, va_gc> *loop_strides;
+ /* Parameters tested by builtin_constant_p. */
+ vec<int, va_heap, vl_ptr> GTY((skip)) builtin_constant_p_parms;
+ /* Estimated growth for inlining all copies of the function before start
+ of small functions inlining.
+ This value will get out of date as the callers are duplicated, but
+ using up-to-date value in the badness metric mean a lot of extra
+ expenses. */
+ int growth;
+ /* Number of SCC on the beginning of inlining process. */
+ int scc_no;
+
+ /* Record time and size under given predicates. */
+ void account_size_time (int, sreal, const ipa_predicate &,
+ const ipa_predicate &,
+ bool call = false);
+
+ /* We keep values scaled up, so fractional sizes can be accounted. */
+ static const int size_scale = 2;
+ /* Maximal size of size_time_table before we start to be conservative. */
+ static const int max_size_time_table_size = 256;
+};
+
+class GTY((user)) ipa_fn_summary_t:
+ public fast_function_summary <ipa_fn_summary *, va_gc>
+{
+public:
+ ipa_fn_summary_t (symbol_table *symtab):
+ fast_function_summary <ipa_fn_summary *, va_gc> (symtab) {}
+
+ static ipa_fn_summary_t *create_ggc (symbol_table *symtab)
+ {
+ class ipa_fn_summary_t *summary
+ = new (ggc_alloc_no_dtor<ipa_fn_summary_t> ()) ipa_fn_summary_t (symtab);
+ summary->disable_insertion_hook ();
+ return summary;
+ }
+
+ /* Remove ipa_fn_summary for all callees of NODE. */
+ void remove_callees (cgraph_node *node);
+
+ void insert (cgraph_node *, ipa_fn_summary *) final override;
+ void remove (cgraph_node *node, ipa_fn_summary *) final override
+ {
+ remove_callees (node);
+ }
+
+ void duplicate (cgraph_node *src, cgraph_node *dst,
+ ipa_fn_summary *src_data, ipa_fn_summary *dst_data)
+ final override;
+};
+
+extern GTY(()) fast_function_summary <ipa_fn_summary *, va_gc>
+ *ipa_fn_summaries;
+
+class ipa_size_summary_t:
+ public fast_function_summary <ipa_size_summary *, va_heap>
+{
+public:
+ ipa_size_summary_t (symbol_table *symtab):
+ fast_function_summary <ipa_size_summary *, va_heap> (symtab)
+ {
+ disable_insertion_hook ();
+ }
+
+ void duplicate (cgraph_node *, cgraph_node *,
+ ipa_size_summary *src_data,
+ ipa_size_summary *dst_data) final override
+ {
+ *dst_data = *src_data;
+ }
+};
+extern fast_function_summary <ipa_size_summary *, va_heap>
+ *ipa_size_summaries;
+
+/* Information kept about callgraph edges. */
+class ipa_call_summary
+{
+public:
+ /* Keep all field empty so summary dumping works during its computation.
+ This is useful for debugging. */
+ ipa_call_summary ()
+ : predicate (NULL), param (vNULL), call_stmt_size (0), call_stmt_time (0),
+ loop_depth (0), is_return_callee_uncaptured (false)
+ {
+ }
+
+ /* Copy constructor. */
+ ipa_call_summary (const ipa_call_summary &s):
+ predicate (s.predicate), param (s.param), call_stmt_size (s.call_stmt_size),
+ call_stmt_time (s.call_stmt_time), loop_depth (s.loop_depth),
+ is_return_callee_uncaptured (s.is_return_callee_uncaptured)
+ {
+ }
+
+ /* Default destructor. */
+ ~ipa_call_summary ();
+
+ ipa_predicate *predicate;
+ /* Vector indexed by parameters. */
+ vec<inline_param_summary> param;
+ /* Estimated size and time of the call statement. */
+ int call_stmt_size;
+ int call_stmt_time;
+ /* Depth of loop nest, 0 means no nesting. */
+ unsigned int loop_depth;
+ /* Indicates whether the caller returns the value of it's callee. */
+ bool is_return_callee_uncaptured;
+};
+
+class ipa_call_summary_t: public fast_call_summary <ipa_call_summary *, va_heap>
+{
+public:
+ ipa_call_summary_t (symbol_table *symtab):
+ fast_call_summary <ipa_call_summary *, va_heap> (symtab) {}
+
+ /* Hook that is called by summary when an edge is duplicated. */
+ void duplicate (cgraph_edge *src, cgraph_edge *dst,
+ ipa_call_summary *src_data,
+ ipa_call_summary *dst_data) final override;
+};
+
+/* Estimated execution times, code sizes and other information about the
+ code executing a call described by ipa_call_context. */
+
+struct ipa_call_estimates
+{
+ /* Estimated size needed to execute call in the given context. */
+ int size;
+
+ /* Minimal size needed for the call that is + independent on the call context
+ and can be used for fast estimates. */
+ int min_size;
+
+ /* Estimated time needed to execute call in the given context. */
+ sreal time;
+
+ /* Estimated time needed to execute the function when not ignoring
+ computations known to be constant in this context. */
+ sreal nonspecialized_time;
+
+ /* Further discovered reasons why to inline or specialize the give calls. */
+ ipa_hints hints;
+
+ /* Frequency how often a loop with known number of iterations is encountered.
+ Calculated with hints. */
+ sreal loops_with_known_iterations;
+
+ /* Frequency how often a loop with known strides is encountered. Calculated
+ with hints. */
+ sreal loops_with_known_strides;
+};
+
+class ipa_cached_call_context;
+
+/* This object describe a context of call. That is a summary of known
+ information about its parameters. Main purpose of this context is
+ to give more realistic estimations of function runtime, size and
+ inline hints. */
+class ipa_call_context
+{
+public:
+ ipa_call_context (cgraph_node *node,
+ clause_t possible_truths,
+ clause_t nonspec_possible_truths,
+ vec<inline_param_summary> inline_param_summary,
+ ipa_auto_call_arg_values *arg_values);
+ ipa_call_context ()
+ : m_node(NULL)
+ {
+ }
+ void estimate_size_and_time (ipa_call_estimates *estimates,
+ bool est_times = true, bool est_hints = true);
+ bool equal_to (const ipa_call_context &);
+ bool exists_p ()
+ {
+ return m_node != NULL;
+ }
+private:
+ /* Called function. */
+ cgraph_node *m_node;
+ /* Clause describing what predicate conditionals can be satisfied
+ in this context if function is inlined/specialized. */
+ clause_t m_possible_truths;
+ /* Clause describing what predicate conditionals can be satisfied
+ in this context if function is kept offline. */
+ clause_t m_nonspec_possible_truths;
+ /* Inline summary maintains info about change probabilities. */
+ vec<inline_param_summary> m_inline_param_summary;
+
+ /* Even after having calculated clauses, the information about argument
+ values is used to resolve indirect calls. */
+ ipa_call_arg_values m_avals;
+
+ friend ipa_cached_call_context;
+};
+
+/* Variant of ipa_call_context that is stored in a cache over a longer period
+ of time. */
+
+class ipa_cached_call_context : public ipa_call_context
+{
+public:
+ void duplicate_from (const ipa_call_context &ctx);
+ void release ();
+};
+
+extern fast_call_summary <ipa_call_summary *, va_heap> *ipa_call_summaries;
+
+/* In ipa-fnsummary.cc */
+void ipa_debug_fn_summary (struct cgraph_node *);
+void ipa_dump_fn_summaries (FILE *f);
+void ipa_dump_fn_summary (FILE *f, struct cgraph_node *node);
+void ipa_dump_hints (FILE *f, ipa_hints);
+void ipa_free_fn_summary (void);
+void ipa_free_size_summary (void);
+void inline_analyze_function (struct cgraph_node *node);
+void estimate_ipcp_clone_size_and_time (struct cgraph_node *node,
+ ipa_auto_call_arg_values *avals,
+ ipa_call_estimates *estimates);
+void ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge);
+void ipa_update_overall_fn_summary (struct cgraph_node *node, bool reset = true);
+void compute_fn_summary (struct cgraph_node *, bool);
+bool refs_local_or_readonly_memory_p (tree);
+bool points_to_local_or_readonly_memory_p (tree);
+
+
+void evaluate_properties_for_edge (struct cgraph_edge *e,
+ bool inline_p,
+ clause_t *clause_ptr,
+ clause_t *nonspec_clause_ptr,
+ ipa_auto_call_arg_values *avals,
+ bool compute_contexts);
+
+void ipa_fnsummary_cc_finalize (void);
+HOST_WIDE_INT ipa_get_stack_frame_offset (struct cgraph_node *node);
+void ipa_remove_from_growth_caches (struct cgraph_edge *edge);
+
+/* Return true if EDGE is a cross module call. */
+
+inline bool
+cross_module_call_p (struct cgraph_edge *edge)
+{
+ /* Here we do not want to walk to alias target becuase ICF may create
+ cross-unit aliases. */
+ if (edge->caller->unit_id == edge->callee->unit_id)
+ return false;
+ /* If the call is to a (former) comdat function or s symbol with mutiple
+ extern inline definitions then treat is as in-module call. */
+ if (edge->callee->merged_extern_inline || edge->callee->merged_comdat
+ || DECL_COMDAT (edge->callee->decl))
+ return false;
+ return true;
+}
+
+#endif /* GCC_IPA_FNSUMMARY_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf-gimple.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf-gimple.h
new file mode 100644
index 0000000..1ad6421
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf-gimple.h
@@ -0,0 +1,296 @@
+/* Interprocedural semantic function equality pass
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+ Contributed by Jan Hubicka <hubicka@ucw.cz> and Martin Liska <mliska@suse.cz>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Gimple identical code folding (class func_checker) is an infrastructure
+ capable of comparing two given functions. The class compares every
+ gimple statement and uses many dictionaries to map source and target
+ SSA_NAMEs, declarations and other components.
+
+ To use the infrastructure, create an instance of func_checker and call
+ a comparison function based on type of gimple statement. */
+
+/* Prints string STRING to a FILE with a given number of SPACE_COUNT. */
+#define FPUTS_SPACES(file, space_count, string) \
+ fprintf (file, "%*s" string, space_count, " ");
+
+/* fprintf function wrapper that transforms given FORMAT to follow given
+ number for SPACE_COUNT and call fprintf for a FILE. */
+#define FPRINTF_SPACES(file, space_count, format, ...) \
+ fprintf (file, "%*s" format, space_count, " ", ##__VA_ARGS__);
+
+/* Logs a MESSAGE to dump_file if exists and returns false. FUNC is name
+ of function and LINE is location in the source file. */
+
+inline bool
+return_false_with_message_1 (const char *message, const char *filename,
+ const char *func, unsigned int line)
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " false returned: '%s' in %s at %s:%u\n", message, func,
+ filename, line);
+ return false;
+}
+
+/* Logs a MESSAGE to dump_file if exists and returns false. */
+#define return_false_with_msg(message) \
+ return_false_with_message_1 (message, __FILE__, __func__, __LINE__)
+
+/* Return false and log that false value is returned. */
+#define return_false() return_false_with_msg ("")
+
+/* Logs return value if RESULT is false. FUNC is name of function and LINE
+ is location in the source file. */
+
+inline bool
+return_with_result (bool result, const char *filename,
+ const char *func, unsigned int line)
+{
+ if (!result && dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " false returned: '' in %s at %s:%u\n", func,
+ filename, line);
+
+ return result;
+}
+
+/* Logs return value if RESULT is false. */
+#define return_with_debug(result) return_with_result \
+ (result, __FILE__, __func__, __LINE__)
+
+/* Verbose logging function logging statements S1 and S2 of a CODE.
+ FUNC is name of function and LINE is location in the source file. */
+
+inline bool
+return_different_stmts_1 (gimple *s1, gimple *s2, const char *code,
+ const char *func, unsigned int line)
+{
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " different statement for code: %s (%s:%u):\n",
+ code, func, line);
+
+ print_gimple_stmt (dump_file, s1, 3, TDF_DETAILS);
+ print_gimple_stmt (dump_file, s2, 3, TDF_DETAILS);
+ }
+
+ return false;
+}
+
+/* Verbose logging function logging statements S1 and S2 of a CODE. */
+#define return_different_stmts(s1, s2, code) \
+ return_different_stmts_1 (s1, s2, code, __func__, __LINE__)
+
+namespace ipa_icf_gimple {
+
+/* Basic block struct for semantic equality pass. */
+class sem_bb
+{
+public:
+ sem_bb (basic_block bb_, unsigned nondbg_stmt_count_, unsigned edge_count_):
+ bb (bb_), nondbg_stmt_count (nondbg_stmt_count_), edge_count (edge_count_) {}
+
+ /* Basic block the structure belongs to. */
+ basic_block bb;
+
+ /* Number of non-debug statements in the basic block. */
+ unsigned nondbg_stmt_count;
+
+ /* Number of edges connected to the block. */
+ unsigned edge_count;
+};
+
+/* A class aggregating all connections and semantic equivalents
+ for a given pair of semantic function candidates. */
+class func_checker : ao_compare
+{
+public:
+ /* Default constructor. */
+ func_checker ():
+ m_source_func_decl (NULL_TREE), m_target_func_decl (NULL_TREE),
+ m_ignored_source_nodes (NULL), m_ignored_target_nodes (NULL),
+ m_ignore_labels (false), m_tbaa (true)
+ {
+ m_source_ssa_names.create (0);
+ m_target_ssa_names.create (0);
+ }
+
+ /* Initialize internal structures for a given SOURCE_FUNC_DECL and
+ TARGET_FUNC_DECL. Strict polymorphic comparison is processed if
+ an option COMPARE_POLYMORPHIC is true. For special cases, one can
+ set IGNORE_LABELS to skip label comparison.
+ Similarly, IGNORE_SOURCE_DECLS and IGNORE_TARGET_DECLS are sets
+ of declarations that can be skipped. */
+ func_checker (tree source_func_decl, tree target_func_decl,
+ bool ignore_labels = false,
+ bool tbaa = true,
+ hash_set<symtab_node *> *ignored_source_nodes = NULL,
+ hash_set<symtab_node *> *ignored_target_nodes = NULL);
+
+ /* Memory release routine. */
+ virtual ~func_checker ();
+
+ /* Function visits all gimple labels and creates corresponding
+ mapping between basic blocks and labels. */
+ void parse_labels (sem_bb *bb);
+
+ /* Basic block equivalence comparison function that returns true if
+ basic blocks BB1 and BB2 correspond. */
+ bool compare_bb (sem_bb *bb1, sem_bb *bb2);
+
+ /* Verifies that trees T1 and T2 are equivalent from perspective of ICF. */
+ bool compare_ssa_name (const_tree t1, const_tree t2);
+
+ /* Verification function for edges E1 and E2. */
+ bool compare_edge (edge e1, edge e2);
+
+ /* Verifies for given GIMPLEs S1 and S2 that
+ call statements are semantically equivalent. */
+ bool compare_gimple_call (gcall *s1, gcall *s2);
+
+ /* Verifies for given GIMPLEs S1 and S2 that
+ assignment statements are semantically equivalent. */
+ bool compare_gimple_assign (gimple *s1, gimple *s2);
+
+ /* Verifies for given GIMPLEs S1 and S2 that
+ condition statements are semantically equivalent. */
+ bool compare_gimple_cond (gimple *s1, gimple *s2);
+
+ /* Verifies for given GIMPLE_LABEL stmts S1 and S2 that
+ label statements are semantically equivalent. */
+ bool compare_gimple_label (const glabel *s1, const glabel *s2);
+
+ /* Verifies for given GIMPLE_SWITCH stmts S1 and S2 that
+ switch statements are semantically equivalent. */
+ bool compare_gimple_switch (const gswitch *s1, const gswitch *s2);
+
+ /* Verifies for given GIMPLE_RETURN stmts S1 and S2 that
+ return statements are semantically equivalent. */
+ bool compare_gimple_return (const greturn *s1, const greturn *s2);
+
+ /* Verifies for given GIMPLEs S1 and S2 that
+ goto statements are semantically equivalent. */
+ bool compare_gimple_goto (gimple *s1, gimple *s2);
+
+ /* Verifies for given GIMPLE_RESX stmts S1 and S2 that
+ resx statements are semantically equivalent. */
+ bool compare_gimple_resx (const gresx *s1, const gresx *s2);
+
+ /* Verifies for given GIMPLE_ASM stmts S1 and S2 that ASM statements
+ are equivalent.
+ For the beginning, the pass only supports equality for
+ '__asm__ __volatile__ ("", "", "", "memory")'. */
+ bool compare_gimple_asm (const gasm *s1, const gasm *s2);
+
+ /* Verification function for declaration trees T1 and T2. */
+ bool compare_decl (const_tree t1, const_tree t2);
+
+ /* Compute hash map MAP that determines loads and stores of STMT. */
+ enum operand_access_type {OP_MEMORY, OP_NORMAL};
+ typedef hash_set<tree> operand_access_type_map;
+
+ /* Function responsible for comparison of various operands T1 and T2.
+ If these components, from functions FUNC1 and FUNC2, are equal, true
+ is returned. */
+ bool compare_operand (tree t1, tree t2, operand_access_type type);
+
+ /* Compares GIMPLE ASM inputs (or outputs) where we iterate tree chain
+ and compare both TREE_PURPOSEs and TREE_VALUEs. */
+ bool compare_asm_inputs_outputs (tree t1, tree t2,
+ operand_access_type_map *map);
+
+ /* Verifies that trees T1 and T2, representing function declarations
+ are equivalent from perspective of ICF. */
+ bool compare_function_decl (tree t1, tree t2);
+
+ /* Verifies that trees T1 and T2 do correspond. */
+ bool compare_variable_decl (const_tree t1, const_tree t2);
+
+ /* Compare loop information for basic blocks BB1 and BB2. */
+ bool compare_loops (basic_block bb1, basic_block bb2);
+
+ /* Return true if types are compatible for polymorphic call analysis.
+ COMPARE_PTR indicates if polymorphic type comparison should be
+ done for pointers, too. */
+ static bool compatible_polymorphic_types_p (tree t1, tree t2,
+ bool compare_ptr);
+
+ /* Return true if types are compatible from perspective of ICF.
+ FIRST_ARGUMENT indicates if the comparison is called for
+ first parameter of a function. */
+ static bool compatible_types_p (tree t1, tree t2);
+
+ /* Compute hash map determining access types of operands. */
+ static void classify_operands (const gimple *stmt,
+ operand_access_type_map *map);
+
+ /* Return access type of a given operand. */
+ static operand_access_type get_operand_access_type
+ (operand_access_type_map *map, tree);
+private:
+ /* Vector mapping source SSA names to target ones. */
+ vec <int> m_source_ssa_names;
+
+ /* Vector mapping target SSA names to source ones. */
+ vec <int> m_target_ssa_names;
+
+ /* Source TREE function declaration. */
+ tree m_source_func_decl;
+
+ /* Target TREE function declaration. */
+ tree m_target_func_decl;
+
+ /* Source symbol nodes that should be skipped by
+ declaration comparison. */
+ hash_set<symtab_node *> *m_ignored_source_nodes;
+
+ /* Target symbol nodes that should be skipped by
+ declaration comparison. */
+ hash_set<symtab_node *> *m_ignored_target_nodes;
+
+ /* Source to target edge map. */
+ hash_map <edge, edge> m_edge_map;
+
+ /* Source to target declaration map. */
+ hash_map <const_tree, const_tree> m_decl_map;
+
+ /* Label to basic block index mapping. */
+ hash_map <const_tree, int> m_label_bb_map;
+
+ /* Flag if ignore labels in comparison. */
+ bool m_ignore_labels;
+
+ /* Flag if we should compare type based alias analysis info. */
+ bool m_tbaa;
+
+public:
+ /* Return true if two operands are equal. The flags fields can be used
+ to specify OEP flags described above. */
+ bool operand_equal_p (const_tree, const_tree, unsigned int flags)
+ final override;
+
+ /* Generate a hash value for an expression. This can be used iteratively
+ by passing a previous result as the HSTATE argument. */
+ void hash_operand (const_tree, inchash::hash &, unsigned flags)
+ final override;
+ void hash_operand (const_tree, inchash::hash &, unsigned flags,
+ operand_access_type access);
+};
+
+} // ipa_icf_gimple namespace
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf.h
new file mode 100644
index 0000000..3a4ba29
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-icf.h
@@ -0,0 +1,680 @@
+/* Interprocedural semantic function equality pass
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+ Contributed by Jan Hubicka <hubicka@ucw.cz> and Martin Liska <mliska@suse.cz>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+namespace ipa_icf {
+class sem_item;
+
+/* Congruence class encompasses a collection of either functions or
+ read-only variables. These items are considered to be equivalent
+ if not proved the opposite. */
+class congruence_class
+{
+public:
+ /* Congruence class constructor for a new class with _ID. */
+ congruence_class (unsigned int _id): in_worklist (false), id (_id),
+ referenced_by_count (0)
+ {
+ }
+
+ /* Destructor. */
+ ~congruence_class ()
+ {
+ }
+
+ /* Dump function prints all class members to a FILE with an INDENT. */
+ void dump (FILE *file, unsigned int indent = 0) const;
+
+ /* Returns true if there's a member that is used from another group. */
+ bool is_class_used (void);
+
+ /* Flag is used in case we want to remove a class from worklist and
+ delete operation is quite expensive for
+ the data structure (linked list). */
+ bool in_worklist;
+
+ /* Vector of all group members. */
+ auto_vec <sem_item *> members;
+
+ /* Global unique class identifier. */
+ unsigned int id;
+
+ /* Total number of references to items of this class. */
+ unsigned referenced_by_count;
+};
+
+/* Semantic item type enum. */
+enum sem_item_type
+{
+ FUNC,
+ VAR
+};
+
+/* Class is container for address references for a symtab_node. */
+
+class symbol_compare_collection
+{
+public:
+ /* Constructor. */
+ symbol_compare_collection (symtab_node *node);
+
+ /* Destructor. */
+ ~symbol_compare_collection ()
+ {
+ m_references.release ();
+ m_interposables.release ();
+ }
+
+ /* Vector of address references. */
+ vec<symtab_node *> m_references;
+
+ /* Vector of interposable references. */
+ vec<symtab_node *> m_interposables;
+};
+
+/* Hash traits for symbol_compare_collection map. */
+
+struct symbol_compare_hash : nofree_ptr_hash <symbol_compare_collection>
+{
+ static hashval_t
+ hash (value_type v)
+ {
+ inchash::hash hstate;
+ hstate.add_int (v->m_references.length ());
+
+ for (unsigned i = 0; i < v->m_references.length (); i++)
+ hstate.add_int (v->m_references[i]->ultimate_alias_target ()->order);
+
+ hstate.add_int (v->m_interposables.length ());
+
+ for (unsigned i = 0; i < v->m_interposables.length (); i++)
+ hstate.add_int (v->m_interposables[i]->ultimate_alias_target ()->order);
+
+ return hstate.end ();
+ }
+
+ static bool
+ equal (value_type a, value_type b)
+ {
+ if (a->m_references.length () != b->m_references.length ()
+ || a->m_interposables.length () != b->m_interposables.length ())
+ return false;
+
+ for (unsigned i = 0; i < a->m_references.length (); i++)
+ if (a->m_references[i]->equal_address_to (b->m_references[i]) != 1)
+ return false;
+
+ for (unsigned i = 0; i < a->m_interposables.length (); i++)
+ if (!a->m_interposables[i]->semantically_equivalent_p
+ (b->m_interposables[i]))
+ return false;
+
+ return true;
+ }
+};
+
+/* Semantic item usage pair. */
+class sem_usage_pair
+{
+public:
+ /* Constructor for key value pair, where _ITEM is key and _INDEX is a target. */
+ sem_usage_pair (sem_item *_item, unsigned int _index);
+
+ /* Target semantic item where an item is used. */
+ sem_item *item;
+
+ /* Index of usage of such an item. */
+ unsigned int index;
+};
+
+struct sem_usage_pair_hash : pointer_hash <sem_usage_pair>
+{
+ static inline hashval_t hash (sem_usage_pair *);
+ static inline bool equal (sem_usage_pair *, sem_usage_pair *);
+};
+
+inline hashval_t
+sem_usage_pair_hash::hash (sem_usage_pair *pair)
+{
+ inchash::hash hstate;
+
+ hstate.add_ptr (pair->item);
+ hstate.add_int (pair->index);
+
+ return hstate.end ();
+}
+
+inline bool
+sem_usage_pair_hash::equal (sem_usage_pair *p1, sem_usage_pair *p2)
+{
+ return p1->item == p2->item && p1->index == p2->index;
+}
+
+struct sem_usage_hash : sem_usage_pair_hash, typed_delete_remove <sem_usage_pair> {};
+typedef hash_map<sem_usage_hash, auto_vec<sem_item *> > ref_map;
+
+typedef std::pair<symtab_node *, symtab_node *> symtab_pair;
+
+/* Semantic item is a base class that encapsulates all shared functionality
+ for both semantic function and variable items. */
+class sem_item
+{
+public:
+ /* Semantic item constructor for a node of _TYPE, where STACK is used
+ for bitmap memory allocation. */
+ sem_item (sem_item_type _type, bitmap_obstack *stack);
+
+ /* Semantic item constructor for a node of _TYPE, where STACK is used
+ for bitmap memory allocation. The item is based on symtab node _NODE. */
+ sem_item (sem_item_type _type, symtab_node *_node, bitmap_obstack *stack);
+
+ virtual ~sem_item ();
+
+ /* Dump function for debugging purpose. */
+ DEBUG_FUNCTION void dump (void);
+
+ /* Semantic item initialization function. */
+ virtual void init (ipa_icf_gimple::func_checker *) = 0;
+
+ /* Add reference to a semantic TARGET. */
+ void add_reference (ref_map *map, sem_item *target);
+
+ /* Fast equality function based on knowledge known in WPA. */
+ virtual bool equals_wpa (sem_item *item,
+ hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0;
+
+ /* Returns true if the item equals to ITEM given as argument. */
+ virtual bool equals (sem_item *item,
+ hash_map <symtab_node *, sem_item *> &ignored_nodes) = 0;
+
+ /* References independent hash function. */
+ virtual hashval_t get_hash (void) = 0;
+
+ /* Set new hash value of the item. */
+ void set_hash (hashval_t hash);
+
+ /* Merges instance with an ALIAS_ITEM, where alias, thunk or redirection can
+ be applied. */
+ virtual bool merge (sem_item *alias_item) = 0;
+
+ /* Dump symbol to FILE. */
+ virtual void dump_to_file (FILE *file) = 0;
+
+ /* Update hash by address sensitive references. */
+ void update_hash_by_addr_refs (hash_map <symtab_node *,
+ sem_item *> &m_symtab_node_map);
+
+ /* Update hash by computed local hash values taken from different
+ semantic items. */
+ void update_hash_by_local_refs (hash_map <symtab_node *,
+ sem_item *> &m_symtab_node_map);
+
+ /* Return base tree that can be used for compatible_types_p and
+ contains_polymorphic_type_p comparison. */
+ static bool get_base_types (tree *t1, tree *t2);
+
+ /* Return true if target supports alias symbols. */
+ bool target_supports_symbol_aliases_p (void);
+
+ /* Item type. */
+ sem_item_type type;
+
+ /* Symtab node. */
+ symtab_node *node;
+
+ /* Declaration tree node. */
+ tree decl;
+
+ /* Number of references to a semantic symbols (function calls,
+ variable references). */
+ unsigned reference_count;
+
+ /* Pointer to a congruence class the item belongs to. */
+ congruence_class *cls;
+
+ /* Index of the item in a class belonging to. */
+ unsigned int index_in_class;
+
+ /* A bitmap with indices of all classes referencing this item. */
+ bitmap usage_index_bitmap;
+
+ /* List of tree references (either FUNC_DECL or VAR_DECL). */
+ vec <tree> tree_refs;
+
+ /* A set with symbol table references. */
+ hash_set <symtab_node *> refs_set;
+
+ /* Temporary hash used where hash values of references are added. */
+ hashval_t global_hash;
+
+ /* Number of references to this symbol. */
+ unsigned referenced_by_count;
+protected:
+ /* Cached, once calculated hash for the item. */
+
+ /* Compare properties of symbol that does not affect semantics of symbol
+ itself but affects semantics of its references.
+ If ADDRESS is true, do extra checking needed for IPA_REF_ADDR. */
+ static bool compare_referenced_symbol_properties (symtab_node *used_by,
+ symtab_node *n1,
+ symtab_node *n2,
+ bool address);
+
+ /* Hash properties compared by compare_referenced_symbol_properties. */
+ void hash_referenced_symbol_properties (symtab_node *ref,
+ inchash::hash &hstate,
+ bool address);
+
+ /* For a given symbol table nodes N1 and N2, we check that FUNCTION_DECLs
+ point to a same function. Comparison can be skipped if IGNORED_NODES
+ contains these nodes. ADDRESS indicate if address is taken. */
+ bool compare_symbol_references (hash_map <symtab_node *, sem_item *>
+ &ignored_nodes,
+ symtab_node *n1, symtab_node *n2,
+ bool address);
+protected:
+ /* Hash of item. */
+ hashval_t m_hash;
+
+ /* Indicated whether a hash value has been set or not. */
+ bool m_hash_set;
+
+private:
+ /* Initialize internal data structures. Bitmap STACK is used for
+ bitmap memory allocation process. */
+ void setup (bitmap_obstack *stack);
+
+ /* Because types can be arbitrarily large, avoid quadratic bottleneck. */
+ static hash_map<const_tree, hashval_t> m_type_hash_cache;
+}; // class sem_item
+
+class sem_function: public sem_item
+{
+public:
+ /* Semantic function constructor that uses STACK as bitmap memory stack. */
+ sem_function (bitmap_obstack *stack);
+
+ /* Constructor based on callgraph node _NODE.
+ Bitmap STACK is used for memory allocation. */
+ sem_function (cgraph_node *_node, bitmap_obstack *stack);
+
+ ~sem_function ();
+
+ void init (ipa_icf_gimple::func_checker *) final override;
+ bool equals_wpa (sem_item *item,
+ hash_map <symtab_node *, sem_item *> &ignored_nodes)
+ final override;
+ hashval_t get_hash (void) final override;
+ bool equals (sem_item *item,
+ hash_map <symtab_node *, sem_item *> &ignored_nodes)
+ final override;
+ bool merge (sem_item *alias_item) final override;
+
+ /* Dump symbol to FILE. */
+ void dump_to_file (FILE *file) final override
+ {
+ gcc_assert (file);
+ dump_function_to_file (decl, file, TDF_DETAILS);
+ }
+
+ /* Returns cgraph_node. */
+ inline cgraph_node *get_node (void)
+ {
+ return dyn_cast <cgraph_node *> (node);
+ }
+
+ /* Improve accumulated hash for HSTATE based on a gimple statement STMT. */
+ void hash_stmt (gimple *stmt, inchash::hash &inchash);
+
+ /* Return true if polymorphic comparison must be processed. */
+ bool compare_polymorphic_p (void);
+
+ /* For a given call graph NODE, the function constructs new
+ semantic function item. */
+ static sem_function *parse (cgraph_node *node, bitmap_obstack *stack,
+ ipa_icf_gimple::func_checker *checker);
+
+ /* Perform additional checks needed to match types of used function
+ parameters. */
+ bool compatible_parm_types_p (tree, tree);
+
+ /* Exception handling region tree. */
+ eh_region region_tree;
+
+ /* Number of function arguments. */
+ unsigned int arg_count;
+
+ /* Total amount of edges in the function. */
+ unsigned int edge_count;
+
+ /* Vector of sizes of all basic blocks. */
+ vec <unsigned int> bb_sizes;
+
+ /* Control flow graph checksum. */
+ hashval_t cfg_checksum;
+
+ /* GIMPLE codes hash value. */
+ hashval_t gcode_hash;
+
+ /* Vector of subpart of memory access types. */
+ auto_vec<tree> memory_access_types;
+
+ /* Total number of SSA names used in the function. */
+ unsigned ssa_names_size;
+
+ /* Array of structures for all basic blocks. */
+ vec <ipa_icf_gimple::sem_bb *> bb_sorted;
+
+ /* Hash of canonical types used for memory references in the
+ function. */
+ hashval_t m_alias_sets_hash;
+
+ /* Return true if parameter I may be used. */
+ bool param_used_p (unsigned int i);
+
+private:
+ /* Calculates hash value based on a BASIC_BLOCK. */
+ hashval_t get_bb_hash (const ipa_icf_gimple::sem_bb *basic_block);
+
+ /* For given basic blocks BB1 and BB2 (from functions FUNC1 and FUNC),
+ true value is returned if phi nodes are semantically
+ equivalent in these blocks . */
+ bool compare_phi_node (basic_block bb1, basic_block bb2);
+
+ /* Basic blocks dictionary BB_DICT returns true if SOURCE index BB
+ corresponds to TARGET. */
+ bool bb_dict_test (vec<int> *bb_dict, int source, int target);
+
+ /* If cgraph edges E1 and E2 are indirect calls, verify that
+ ICF flags are the same. */
+ bool compare_edge_flags (cgraph_edge *e1, cgraph_edge *e2);
+
+ /* Processes function equality comparison. */
+ bool equals_private (sem_item *item);
+
+ /* Function checker stores binding between functions. */
+ ipa_icf_gimple::func_checker *m_checker;
+
+ /* COMPARED_FUNC is a function that we compare to. */
+ sem_function *m_compared_func;
+}; // class sem_function
+
+class sem_variable: public sem_item
+{
+public:
+ /* Semantic variable constructor that uses STACK as bitmap memory stack. */
+ sem_variable (bitmap_obstack *stack);
+
+ /* Constructor based on callgraph node _NODE.
+ Bitmap STACK is used for memory allocation. */
+
+ sem_variable (varpool_node *_node, bitmap_obstack *stack);
+
+ /* Semantic variable initialization function. */
+ void init (ipa_icf_gimple::func_checker *) final override;
+
+ hashval_t get_hash (void) final override;
+ bool merge (sem_item *alias_item) final override;
+ void dump_to_file (FILE *file) final override;
+ bool equals (sem_item *item,
+ hash_map <symtab_node *, sem_item *> &ignored_nodes)
+ final override;
+
+ /* Fast equality variable based on knowledge known in WPA. */
+ bool equals_wpa (sem_item *item,
+ hash_map <symtab_node *, sem_item *> &ignored_nodes)
+ final override;
+
+ /* Returns varpool_node. */
+ inline varpool_node *get_node (void)
+ {
+ return dyn_cast <varpool_node *> (node);
+ }
+
+ /* Parser function that visits a varpool NODE. */
+ static sem_variable *parse (varpool_node *node, bitmap_obstack *stack,
+ ipa_icf_gimple::func_checker *checker);
+
+private:
+ /* Compares trees T1 and T2 for semantic equality. */
+ static bool equals (tree t1, tree t2);
+}; // class sem_variable
+
+class sem_item_optimizer;
+
+struct congruence_class_group
+{
+ hashval_t hash;
+ sem_item_type type;
+ vec <congruence_class *> classes;
+};
+
+/* Congruence class set structure. */
+struct congruence_class_hash : nofree_ptr_hash <congruence_class_group>
+{
+ static inline hashval_t hash (const congruence_class_group *item)
+ {
+ return item->hash;
+ }
+
+ static inline int equal (const congruence_class_group *item1,
+ const congruence_class_group *item2)
+ {
+ return item1->hash == item2->hash && item1->type == item2->type;
+ }
+};
+
+struct traverse_split_pair
+{
+ sem_item_optimizer *optimizer;
+ class congruence_class *cls;
+};
+
+/* Semantic item optimizer includes all top-level logic
+ related to semantic equality comparison. */
+class sem_item_optimizer
+{
+public:
+ sem_item_optimizer ();
+ ~sem_item_optimizer ();
+
+ /* Function responsible for visiting all potential functions and
+ read-only variables that can be merged. */
+ void parse_funcs_and_vars (void);
+
+ /* Optimizer entry point which returns true in case it processes
+ a merge operation. True is returned if there's a merge operation
+ processed. */
+ bool execute (void);
+
+ /* Dump function. */
+ void dump (void);
+
+ /* Verify congruence classes if checking is enabled. */
+ void checking_verify_classes (void);
+
+ /* Verify congruence classes. */
+ void verify_classes (void);
+
+ /* Write IPA ICF summary for symbols. */
+ void write_summary (void);
+
+ /* Read IPA ICF summary for symbols. */
+ void read_summary (void);
+
+ /* Callgraph removal hook called for a NODE with a custom DATA. */
+ static void cgraph_removal_hook (cgraph_node *node, void *data);
+
+ /* Varpool removal hook called for a NODE with a custom DATA. */
+ static void varpool_removal_hook (varpool_node *node, void *data);
+
+ /* Worklist of congruence classes that can potentially
+ refine classes of congruence. */
+ fibonacci_heap<unsigned, congruence_class> worklist;
+
+ /* Remove semantic ITEM and release memory. */
+ void remove_item (sem_item *item);
+
+ /* Remove symtab NODE triggered by symtab removal hooks. */
+ void remove_symtab_node (symtab_node *node);
+
+ /* Register callgraph and varpool hooks. */
+ void register_hooks (void);
+
+ /* Unregister callgraph and varpool hooks. */
+ void unregister_hooks (void);
+
+ /* Adds a CLS to hashtable associated by hash value. */
+ void add_class (congruence_class *cls);
+
+ /* Gets a congruence class group based on given HASH value and TYPE. */
+ congruence_class_group *get_group_by_hash (hashval_t hash,
+ sem_item_type type);
+private:
+
+ /* For each semantic item, append hash values of references. */
+ void update_hash_by_addr_refs ();
+
+ /* Update hash by canonical types of memory accesses. */
+ void update_hash_by_memory_access_type ();
+
+ /* Congruence classes are built by hash value. */
+ void build_hash_based_classes (void);
+
+ /* Semantic items in classes having more than one element and initialized.
+ In case of WPA, we load function body. */
+ unsigned int parse_nonsingleton_classes (void);
+
+ /* Equality function for semantic items is used to subdivide existing
+ classes. If IN_WPA, fast equality function is invoked. */
+ void subdivide_classes_by_equality (bool in_wpa = false);
+
+ /* Subdivide classes by address and interposable references
+ that members of the class reference.
+ Example can be a pair of functions that have an address
+ taken from a function. If these addresses are different the class
+ is split. */
+ unsigned subdivide_classes_by_sensitive_refs();
+
+ /* Debug function prints all informations about congruence classes. */
+ void dump_cong_classes (void);
+
+ /* Build references according to call graph. */
+ void build_graph (void);
+
+ /* Iterative congruence reduction function. */
+ void process_cong_reduction (void);
+
+ /* After reduction is done, we can declare all items in a group
+ to be equal. PREV_CLASS_COUNT is start number of classes
+ before reduction. True is returned if there's a merge operation
+ processed. LOADED_SYMBOLS is number of symbols that were loaded
+ in WPA. */
+ bool merge_classes (unsigned int prev_class_count,
+ unsigned int loaded_symbols);
+
+ /* Fixup points to analysis info. */
+ void fixup_points_to_sets (void);
+
+ /* Fixup points to set PT. */
+ void fixup_pt_set (struct pt_solution *pt);
+
+ /* Adds a newly created congruence class CLS to worklist. */
+ void worklist_push (congruence_class *cls);
+
+ /* Pops a class from worklist. */
+ congruence_class *worklist_pop ();
+
+ /* Every usage of a congruence class CLS is a candidate that can split the
+ collection of classes. Bitmap stack BMSTACK is used for bitmap
+ allocation. */
+ void do_congruence_step (congruence_class *cls);
+
+ /* Tests if a class CLS used as INDEXth splits any congruence classes.
+ Bitmap stack BMSTACK is used for bitmap allocation. */
+ bool do_congruence_step_for_index (congruence_class *cls, unsigned int index);
+
+ /* Makes pairing between a congruence class CLS and semantic ITEM. */
+ static void add_item_to_class (congruence_class *cls, sem_item *item);
+
+ /* Disposes split map traverse function. CLS is congruence
+ class, BSLOT is bitmap slot we want to release. DATA is mandatory,
+ but unused argument. */
+ static bool release_split_map (congruence_class * const &cls, bitmap const &b,
+ traverse_split_pair *pair);
+
+ /* Process split operation for a congruence class CLS,
+ where bitmap B splits congruence class members. DATA is used
+ as argument of split pair. */
+ static bool traverse_congruence_split (congruence_class * const &cls,
+ bitmap const &b,
+ traverse_split_pair *pair);
+
+ /* Compare function for sorting pairs in do_congruence_step_f. */
+ static int sort_congruence_split (const void *, const void *);
+
+ /* Reads a section from LTO stream file FILE_DATA. Input block for DATA
+ contains LEN bytes. */
+ void read_section (lto_file_decl_data *file_data, const char *data,
+ size_t len);
+
+ /* Removes all callgraph and varpool nodes that are marked by symtab
+ as deleted. */
+ void filter_removed_items (void);
+
+ /* Vector of semantic items. */
+ vec <sem_item *> m_items;
+
+ /* A set containing all items removed by hooks. */
+ hash_set <symtab_node *> m_removed_items_set;
+
+ /* Hashtable of congruence classes. */
+ hash_table <congruence_class_hash> m_classes;
+
+ /* Count of congruence classes. */
+ unsigned int m_classes_count;
+
+ /* Map data structure maps symtab nodes to semantic items. */
+ hash_map <symtab_node *, sem_item *> m_symtab_node_map;
+
+ /* Set to true if a splitter class is removed. */
+ bool splitter_class_removed;
+
+ /* Global unique class id counter. */
+ static unsigned int class_id;
+
+ /* Callgraph node removal hook holder. */
+ cgraph_node_hook_list *m_cgraph_node_hooks;
+
+ /* Varpool node removal hook holder. */
+ varpool_node_hook_list *m_varpool_node_hooks;
+
+ /* Bitmap stack. */
+ bitmap_obstack m_bmstack;
+
+ /* Vector of merged variables. Needed for fixup of points-to-analysis
+ info. */
+ vec <symtab_pair> m_merged_variables;
+
+ /* Hash map will all references. */
+ ref_map m_references;
+}; // class sem_item_optimizer
+
+} // ipa_icf namespace
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-inline.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-inline.h
new file mode 100644
index 0000000..70de89a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-inline.h
@@ -0,0 +1,134 @@
+/* Inlining decision heuristics.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IPA_INLINE_H
+#define GCC_IPA_INLINE_H
+
+/* Data we cache about callgraph edges during inlining to avoid expensive
+ re-computations during the greedy algorithm. */
+class edge_growth_cache_entry
+{
+public:
+ sreal time, nonspec_time;
+ int size;
+ ipa_hints hints;
+
+ edge_growth_cache_entry()
+ : size (0), hints (0) {}
+
+ edge_growth_cache_entry(int64_t time, int64_t nonspec_time,
+ int size, ipa_hints hints)
+ : time (time), nonspec_time (nonspec_time), size (size),
+ hints (hints) {}
+};
+
+extern fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache;
+
+/* In ipa-inline-analysis.cc */
+int estimate_size_after_inlining (struct cgraph_node *, struct cgraph_edge *);
+int estimate_growth (struct cgraph_node *);
+bool growth_positive_p (struct cgraph_node *, struct cgraph_edge *, int);
+int do_estimate_edge_size (struct cgraph_edge *edge);
+sreal do_estimate_edge_time (struct cgraph_edge *edge, sreal *nonspec_time = NULL);
+ipa_hints do_estimate_edge_hints (struct cgraph_edge *edge);
+void reset_node_cache (struct cgraph_node *node);
+void initialize_growth_caches ();
+void free_growth_caches (void);
+
+/* In ipa-inline.cc */
+unsigned int early_inliner (function *fun);
+bool inline_account_function_p (struct cgraph_node *node);
+
+
+/* In ipa-inline-transform.cc */
+bool inline_call (struct cgraph_edge *, bool, vec<cgraph_edge *> *, int *, bool,
+ bool *callee_removed = NULL);
+unsigned int inline_transform (struct cgraph_node *);
+void clone_inlined_nodes (struct cgraph_edge *e, bool, bool, int *);
+
+extern int ncalls_inlined;
+extern int nfunctions_inlined;
+extern function_summary <tree *> *ipa_saved_clone_sources;
+
+/* Return estimated size of the inline sequence of EDGE. */
+
+inline int
+estimate_edge_size (struct cgraph_edge *edge)
+{
+ edge_growth_cache_entry *entry;
+ if (edge_growth_cache == NULL
+ || (entry = edge_growth_cache->get (edge)) == NULL
+ || entry->size == 0)
+ return do_estimate_edge_size (edge);
+ return entry->size - (entry->size > 0);
+}
+
+/* Return lower bound on estimated callee growth after inlining EDGE. */
+
+inline int
+estimate_min_edge_growth (struct cgraph_edge *edge)
+{
+ ipa_call_summary *s = ipa_call_summaries->get (edge);
+ struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
+ return (ipa_fn_summaries->get (callee)->min_size - s->call_stmt_size);
+}
+
+/* Return estimated callee growth after inlining EDGE. */
+
+inline int
+estimate_edge_growth (struct cgraph_edge *edge)
+{
+ ipa_call_summary *s = ipa_call_summaries->get (edge);
+ gcc_checking_assert (s->call_stmt_size || !edge->callee->analyzed);
+ return (estimate_edge_size (edge) - s->call_stmt_size);
+}
+
+/* Return estimated callee runtime increase after inlining
+ EDGE. */
+
+inline sreal
+estimate_edge_time (struct cgraph_edge *edge, sreal *nonspec_time = NULL)
+{
+ edge_growth_cache_entry *entry;
+ if (edge_growth_cache == NULL
+ || (entry = edge_growth_cache->get (edge)) == NULL
+ || entry->time == 0)
+ return do_estimate_edge_time (edge, nonspec_time);
+ if (nonspec_time)
+ *nonspec_time = edge_growth_cache->get (edge)->nonspec_time;
+ return entry->time;
+}
+
+
+/* Return estimated callee runtime increase after inlining
+ EDGE. */
+
+inline ipa_hints
+estimate_edge_hints (struct cgraph_edge *edge)
+{
+ edge_growth_cache_entry *entry;
+ if (edge_growth_cache == NULL
+ || (entry = edge_growth_cache->get (edge)) == NULL
+ || entry->hints == 0)
+ return do_estimate_edge_hints (edge);
+ return entry->hints - 1;
+}
+
+#endif /* GCC_IPA_INLINE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref-tree.h
new file mode 100644
index 0000000..8354e86
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref-tree.h
@@ -0,0 +1,766 @@
+/* Data structure for the modref pass.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by David Cepelik and Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* modref_tree represent a decision tree that can be used by alias analysis
+ oracle to determine whether given memory access can be affected by a function
+ call. For every function we collect two trees, one for loads and other
+ for stores. Tree consist of following levels:
+
+ 1) Base: this level represent base alias set of the access and refers
+ to sons (ref nodes). Flag all_refs means that all possible references
+ are aliasing.
+
+ Because for LTO streaming we need to stream types rather than alias sets
+ modref_base_node is implemented as a template.
+ 2) Ref: this level represent ref alias set and links to accesses unless
+ all_refs flag is set.
+ Again ref is an template to allow LTO streaming.
+ 3) Access: this level represent info about individual accesses. Presently
+ we record whether access is through a dereference of a function parameter
+ and if so we record the access range.
+*/
+
+#ifndef GCC_MODREF_TREE_H
+#define GCC_MODREF_TREE_H
+
+struct ipa_modref_summary;
+
+/* parm indexes greater than 0 are normal parms.
+ Some negative values have special meaning. */
+enum modref_special_parms {
+ MODREF_UNKNOWN_PARM = -1,
+ MODREF_STATIC_CHAIN_PARM = -2,
+ MODREF_RETSLOT_PARM = -3,
+ /* Used for bases that points to memory that escapes from function. */
+ MODREF_GLOBAL_MEMORY_PARM = -4,
+ /* Used in modref_parm_map to take references which can be removed
+ from the summary during summary update since they now points to local
+ memory. */
+ MODREF_LOCAL_MEMORY_PARM = -5
+};
+
+/* Modref record accesses relative to function parameters.
+ This is entry for single access specifying its base and access range.
+
+ Accesses can be collected to boundedly sized arrays using
+ modref_access_node::insert. */
+struct GTY(()) modref_access_node
+{
+ /* Access range information (in bits). */
+ poly_int64 offset;
+ poly_int64 size;
+ poly_int64 max_size;
+
+ /* Offset from parameter pointer to the base of the access (in bytes). */
+ poly_int64 parm_offset;
+
+ /* Index of parameter which specifies the base of access. -1 if base is not
+ a function parameter. */
+ int parm_index;
+ bool parm_offset_known;
+ /* Number of times interval was extended during dataflow.
+ This has to be limited in order to keep dataflow finite. */
+ unsigned char adjustments;
+
+ /* Return true if access node holds some useful info. */
+ bool useful_p () const
+ {
+ return parm_index != MODREF_UNKNOWN_PARM;
+ }
+ /* Return true if access can be used to determine a kill. */
+ bool useful_for_kill_p () const
+ {
+ return parm_offset_known && parm_index != MODREF_UNKNOWN_PARM
+ && parm_index != MODREF_GLOBAL_MEMORY_PARM
+ && parm_index != MODREF_RETSLOT_PARM && known_size_p (size)
+ && known_eq (max_size, size)
+ && known_gt (size, 0);
+ }
+ /* Dump range to debug OUT. */
+ void dump (FILE *out);
+ /* Return true if both accesses are the same. */
+ bool operator == (modref_access_node &a) const;
+ /* Return true if range info is useful. */
+ bool range_info_useful_p () const;
+ /* Return tree corresponding to parameter of the range in STMT. */
+ tree get_call_arg (const gcall *stmt) const;
+ /* Build ao_ref corresponding to the access and return true if successful. */
+ bool get_ao_ref (const gcall *stmt, class ao_ref *ref) const;
+ /* Stream access to OB. */
+ void stream_out (struct output_block *ob) const;
+ /* Stream access in from IB. */
+ static modref_access_node stream_in (struct lto_input_block *ib);
+ /* Insert A into vector ACCESSES. Limit size of vector to MAX_ACCESSES and
+ if RECORD_ADJUSTMENT is true keep track of adjustment counts.
+ Return 0 if nothing changed, 1 is insertion succeeded and -1 if failed. */
+ static int insert (vec <modref_access_node, va_gc> *&accesses,
+ modref_access_node a, size_t max_accesses,
+ bool record_adjustments);
+ /* Same as insert but for kills where we are conservative the other way
+ around: if information is lost, the kill is lost. */
+ static bool insert_kill (vec<modref_access_node> &kills,
+ modref_access_node &a, bool record_adjustments);
+private:
+ bool contains (const modref_access_node &) const;
+ bool contains_for_kills (const modref_access_node &) const;
+ void update (poly_int64, poly_int64, poly_int64, poly_int64, bool);
+ bool update_for_kills (poly_int64, poly_int64, poly_int64,
+ poly_int64, poly_int64, bool);
+ bool merge (const modref_access_node &, bool);
+ bool merge_for_kills (const modref_access_node &, bool);
+ static bool closer_pair_p (const modref_access_node &,
+ const modref_access_node &,
+ const modref_access_node &,
+ const modref_access_node &);
+ void forced_merge (const modref_access_node &, bool);
+ void update2 (poly_int64, poly_int64, poly_int64, poly_int64,
+ poly_int64, poly_int64, poly_int64, bool);
+ bool combined_offsets (const modref_access_node &,
+ poly_int64 *, poly_int64 *, poly_int64 *) const;
+ static void try_merge_with (vec <modref_access_node, va_gc> *&, size_t);
+};
+
+/* Access node specifying no useful info. */
+const modref_access_node unspecified_modref_access_node
+ = {0, -1, -1, 0, MODREF_UNKNOWN_PARM, false, 0};
+
+template <typename T>
+struct GTY((user)) modref_ref_node
+{
+ T ref;
+ bool every_access;
+ vec <modref_access_node, va_gc> *accesses;
+
+ modref_ref_node (T ref):
+ ref (ref),
+ every_access (false),
+ accesses (NULL)
+ {}
+
+ /* Collapse the tree. */
+ void collapse ()
+ {
+ vec_free (accesses);
+ accesses = NULL;
+ every_access = true;
+ }
+
+ /* Insert access with OFFSET and SIZE.
+ Collapse tree if it has more than MAX_ACCESSES entries.
+ If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
+ Return true if record was changed. */
+ bool insert_access (modref_access_node a, size_t max_accesses,
+ bool record_adjustments)
+ {
+ /* If this base->ref pair has no access information, bail out. */
+ if (every_access)
+ return false;
+
+ /* Only the following kind of parameters needs to be tracked.
+ We do not track return slots because they are seen as a direct store
+ in the caller. */
+ gcc_checking_assert (a.parm_index >= 0
+ || a.parm_index == MODREF_STATIC_CHAIN_PARM
+ || a.parm_index == MODREF_GLOBAL_MEMORY_PARM
+ || a.parm_index == MODREF_UNKNOWN_PARM);
+
+ if (!a.useful_p ())
+ {
+ if (!every_access)
+ {
+ collapse ();
+ return true;
+ }
+ return false;
+ }
+
+ int ret = modref_access_node::insert (accesses, a, max_accesses,
+ record_adjustments);
+ if (ret == -1)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "--param modref-max-accesses limit reached; collapsing\n");
+ collapse ();
+ }
+ return ret != 0;
+ }
+};
+
+/* Base of an access. */
+template <typename T>
+struct GTY((user)) modref_base_node
+{
+ T base;
+ vec <modref_ref_node <T> *, va_gc> *refs;
+ bool every_ref;
+
+ modref_base_node (T base):
+ base (base),
+ refs (NULL),
+ every_ref (false) {}
+
+ /* Search REF; return NULL if failed. */
+ modref_ref_node <T> *search (T ref)
+ {
+ size_t i;
+ modref_ref_node <T> *n;
+ FOR_EACH_VEC_SAFE_ELT (refs, i, n)
+ if (n->ref == ref)
+ return n;
+ return NULL;
+ }
+
+ /* Insert REF; collapse tree if there are more than MAX_REFS.
+ Return inserted ref and if CHANGED is non-null set it to true if
+ something changed. */
+ modref_ref_node <T> *insert_ref (T ref, size_t max_refs,
+ bool *changed = NULL)
+ {
+ modref_ref_node <T> *ref_node;
+
+ /* If the node is collapsed, don't do anything. */
+ if (every_ref)
+ return NULL;
+
+ /* Otherwise, insert a node for the ref of the access under the base. */
+ ref_node = search (ref);
+ if (ref_node)
+ return ref_node;
+
+ /* We always allow inserting ref 0. For non-0 refs there is upper
+ limit on number of entries and if exceeded,
+ drop ref conservatively to 0. */
+ if (ref && refs && refs->length () >= max_refs)
+ {
+ if (dump_file)
+ fprintf (dump_file, "--param modref-max-refs limit reached;"
+ " using 0\n");
+ ref = 0;
+ ref_node = search (ref);
+ if (ref_node)
+ return ref_node;
+ }
+
+ if (changed)
+ *changed = true;
+
+ ref_node = new (ggc_alloc <modref_ref_node <T> > ())modref_ref_node <T>
+ (ref);
+ vec_safe_push (refs, ref_node);
+ return ref_node;
+ }
+
+ void collapse ()
+ {
+ size_t i;
+ modref_ref_node <T> *r;
+
+ if (refs)
+ {
+ FOR_EACH_VEC_SAFE_ELT (refs, i, r)
+ {
+ r->collapse ();
+ ggc_free (r);
+ }
+ vec_free (refs);
+ }
+ refs = NULL;
+ every_ref = true;
+ }
+};
+
+/* Map translating parameters across function call. */
+
+struct modref_parm_map
+{
+ /* Default constructor. */
+ modref_parm_map ()
+ : parm_index (MODREF_UNKNOWN_PARM), parm_offset_known (false), parm_offset ()
+ {}
+
+ /* Index of parameter we translate to.
+ Values from special_params enum are permitted too. */
+ int parm_index;
+ bool parm_offset_known;
+ poly_int64 parm_offset;
+};
+
+/* Access tree for a single function. */
+template <typename T>
+struct GTY((user)) modref_tree
+{
+ vec <modref_base_node <T> *, va_gc> *bases;
+ bool every_base;
+
+ modref_tree ():
+ bases (NULL),
+ every_base (false) {}
+
+ /* Insert BASE; collapse tree if there are more than MAX_REFS.
+ Return inserted base and if CHANGED is non-null set it to true if
+ something changed.
+ If table gets full, try to insert REF instead. */
+
+ modref_base_node <T> *insert_base (T base, T ref,
+ unsigned int max_bases,
+ bool *changed = NULL)
+ {
+ modref_base_node <T> *base_node;
+
+ /* If the node is collapsed, don't do anything. */
+ if (every_base)
+ return NULL;
+
+ /* Otherwise, insert a node for the base of the access into the tree. */
+ base_node = search (base);
+ if (base_node)
+ return base_node;
+
+ /* We always allow inserting base 0. For non-0 base there is upper
+ limit on number of entries and if exceeded,
+ drop base conservatively to ref and if it still does not fit to 0. */
+ if (base && bases && bases->length () >= max_bases)
+ {
+ base_node = search (ref);
+ if (base_node)
+ {
+ if (dump_file)
+ fprintf (dump_file, "--param modref-max-bases"
+ " limit reached; using ref\n");
+ return base_node;
+ }
+ if (dump_file)
+ fprintf (dump_file, "--param modref-max-bases"
+ " limit reached; using 0\n");
+ base = 0;
+ base_node = search (base);
+ if (base_node)
+ return base_node;
+ }
+
+ if (changed)
+ *changed = true;
+
+ base_node = new (ggc_alloc <modref_base_node <T> > ())
+ modref_base_node <T> (base);
+ vec_safe_push (bases, base_node);
+ return base_node;
+ }
+
+ /* Insert memory access to the tree.
+ Return true if something changed. */
+ bool insert (unsigned int max_bases,
+ unsigned int max_refs,
+ unsigned int max_accesses,
+ T base, T ref, modref_access_node a,
+ bool record_adjustments)
+ {
+ if (every_base)
+ return false;
+
+ bool changed = false;
+
+ /* We may end up with max_size being less than size for accesses past the
+ end of array. Those are undefined and safe to ignore. */
+ if (a.range_info_useful_p ()
+ && known_size_p (a.size) && known_size_p (a.max_size)
+ && known_lt (a.max_size, a.size))
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ " - Paradoxical range. Ignoring\n");
+ return false;
+ }
+ if (known_size_p (a.size)
+ && known_eq (a.size, 0))
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ " - Zero size. Ignoring\n");
+ return false;
+ }
+ if (known_size_p (a.max_size)
+ && known_eq (a.max_size, 0))
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ " - Zero max_size. Ignoring\n");
+ return false;
+ }
+ gcc_checking_assert (!known_size_p (a.max_size)
+ || !known_le (a.max_size, 0));
+
+ /* No useful information tracked; collapse everything. */
+ if (!base && !ref && !a.useful_p ())
+ {
+ collapse ();
+ return true;
+ }
+
+ modref_base_node <T> *base_node
+ = insert_base (base, ref, max_bases, &changed);
+ base = base_node->base;
+ /* If table got full we may end up with useless base. */
+ if (!base && !ref && !a.useful_p ())
+ {
+ collapse ();
+ return true;
+ }
+ if (base_node->every_ref)
+ return changed;
+ gcc_checking_assert (search (base) != NULL);
+
+ /* No useful ref info tracked; collapse base. */
+ if (!ref && !a.useful_p ())
+ {
+ base_node->collapse ();
+ return true;
+ }
+
+ modref_ref_node <T> *ref_node
+ = base_node->insert_ref (ref, max_refs, &changed);
+ ref = ref_node->ref;
+
+ if (ref_node->every_access)
+ return changed;
+ changed |= ref_node->insert_access (a, max_accesses,
+ record_adjustments);
+ /* See if we failed to add useful access. */
+ if (ref_node->every_access)
+ {
+ /* Collapse everything if there is no useful base and ref. */
+ if (!base && !ref)
+ {
+ collapse ();
+ gcc_checking_assert (changed);
+ }
+ /* Collapse base if there is no useful ref. */
+ else if (!ref)
+ {
+ base_node->collapse ();
+ gcc_checking_assert (changed);
+ }
+ }
+ return changed;
+ }
+
+ /* Insert memory access to the tree.
+ Return true if something changed. */
+ bool insert (tree fndecl,
+ T base, T ref, const modref_access_node &a,
+ bool record_adjustments)
+ {
+ return insert (opt_for_fn (fndecl, param_modref_max_bases),
+ opt_for_fn (fndecl, param_modref_max_refs),
+ opt_for_fn (fndecl, param_modref_max_accesses),
+ base, ref, a, record_adjustments);
+ }
+
+ /* Remove tree branches that are not useful (i.e. they will always pass). */
+
+ void cleanup ()
+ {
+ size_t i, j;
+ modref_base_node <T> *base_node;
+ modref_ref_node <T> *ref_node;
+
+ if (!bases)
+ return;
+
+ for (i = 0; vec_safe_iterate (bases, i, &base_node);)
+ {
+ if (base_node->refs)
+ for (j = 0; vec_safe_iterate (base_node->refs, j, &ref_node);)
+ {
+ if (!ref_node->every_access
+ && (!ref_node->accesses
+ || !ref_node->accesses->length ()))
+ {
+ base_node->refs->unordered_remove (j);
+ vec_free (ref_node->accesses);
+ ggc_delete (ref_node);
+ }
+ else
+ j++;
+ }
+ if (!base_node->every_ref
+ && (!base_node->refs || !base_node->refs->length ()))
+ {
+ bases->unordered_remove (i);
+ vec_free (base_node->refs);
+ ggc_delete (base_node);
+ }
+ else
+ i++;
+ }
+ if (bases && !bases->length ())
+ {
+ vec_free (bases);
+ bases = NULL;
+ }
+ }
+
+ /* Merge OTHER into the tree.
+ PARM_MAP, if non-NULL, maps parm indexes of callee to caller.
+ Similar CHAIN_MAP, if non-NULL, maps static chain of callee to caller.
+ Return true if something has changed. */
+ bool merge (unsigned int max_bases,
+ unsigned int max_refs,
+ unsigned int max_accesses,
+ modref_tree <T> *other, vec <modref_parm_map> *parm_map,
+ modref_parm_map *static_chain_map,
+ bool record_accesses,
+ bool promote_unknown_to_global = false)
+ {
+ if (!other || every_base)
+ return false;
+ if (other->every_base)
+ {
+ collapse ();
+ return true;
+ }
+
+ bool changed = false;
+ size_t i, j, k;
+ modref_base_node <T> *base_node, *my_base_node;
+ modref_ref_node <T> *ref_node;
+ modref_access_node *access_node;
+ bool release = false;
+
+ /* For self-recursive functions we may end up merging summary into itself;
+ produce copy first so we do not modify summary under our own hands. */
+ if (other == this)
+ {
+ release = true;
+ other = modref_tree<T>::create_ggc ();
+ other->copy_from (this);
+ }
+
+ FOR_EACH_VEC_SAFE_ELT (other->bases, i, base_node)
+ {
+ if (base_node->every_ref)
+ {
+ my_base_node = insert_base (base_node->base, 0,
+ max_bases, &changed);
+ if (my_base_node && !my_base_node->every_ref)
+ {
+ my_base_node->collapse ();
+ cleanup ();
+ changed = true;
+ }
+ }
+ else
+ FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
+ {
+ if (ref_node->every_access)
+ {
+ changed |= insert (max_bases, max_refs, max_accesses,
+ base_node->base,
+ ref_node->ref,
+ unspecified_modref_access_node,
+ record_accesses);
+ }
+ else
+ FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
+ {
+ modref_access_node a = *access_node;
+
+ if (a.parm_index != MODREF_UNKNOWN_PARM
+ && a.parm_index != MODREF_GLOBAL_MEMORY_PARM
+ && parm_map)
+ {
+ if (a.parm_index >= (int)parm_map->length ())
+ a.parm_index = MODREF_UNKNOWN_PARM;
+ else
+ {
+ modref_parm_map &m
+ = a.parm_index == MODREF_STATIC_CHAIN_PARM
+ ? *static_chain_map
+ : (*parm_map) [a.parm_index];
+ if (m.parm_index == MODREF_LOCAL_MEMORY_PARM)
+ continue;
+ a.parm_offset += m.parm_offset;
+ a.parm_offset_known &= m.parm_offset_known;
+ a.parm_index = m.parm_index;
+ }
+ }
+ if (a.parm_index == MODREF_UNKNOWN_PARM
+ && promote_unknown_to_global)
+ a.parm_index = MODREF_GLOBAL_MEMORY_PARM;
+ changed |= insert (max_bases, max_refs, max_accesses,
+ base_node->base, ref_node->ref,
+ a, record_accesses);
+ }
+ }
+ }
+ if (release)
+ ggc_delete (other);
+ return changed;
+ }
+
+ /* Merge OTHER into the tree.
+ PARM_MAP, if non-NULL, maps parm indexes of callee to caller.
+ Similar CHAIN_MAP, if non-NULL, maps static chain of callee to caller.
+ Return true if something has changed. */
+ bool merge (tree fndecl,
+ modref_tree <T> *other, vec <modref_parm_map> *parm_map,
+ modref_parm_map *static_chain_map,
+ bool record_accesses,
+ bool promote_unknown_to_global = false)
+ {
+ return merge (opt_for_fn (fndecl, param_modref_max_bases),
+ opt_for_fn (fndecl, param_modref_max_refs),
+ opt_for_fn (fndecl, param_modref_max_accesses),
+ other, parm_map, static_chain_map, record_accesses,
+ promote_unknown_to_global);
+ }
+
+ /* Copy OTHER to THIS. */
+ void copy_from (modref_tree <T> *other)
+ {
+ merge (INT_MAX, INT_MAX, INT_MAX, other, NULL, NULL, false);
+ }
+
+ /* Search BASE in tree; return NULL if failed. */
+ modref_base_node <T> *search (T base)
+ {
+ size_t i;
+ modref_base_node <T> *n;
+ FOR_EACH_VEC_SAFE_ELT (bases, i, n)
+ if (n->base == base)
+ return n;
+ return NULL;
+ }
+
+ /* Return true if tree contains access to global memory. */
+ bool global_access_p ()
+ {
+ size_t i, j, k;
+ modref_base_node <T> *base_node;
+ modref_ref_node <T> *ref_node;
+ modref_access_node *access_node;
+ if (every_base)
+ return true;
+ FOR_EACH_VEC_SAFE_ELT (bases, i, base_node)
+ {
+ if (base_node->every_ref)
+ return true;
+ FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
+ {
+ if (ref_node->every_access)
+ return true;
+ FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
+ if (access_node->parm_index == MODREF_UNKNOWN_PARM
+ || access_node->parm_index == MODREF_GLOBAL_MEMORY_PARM)
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /* Return ggc allocated instance. We explicitly call destructors via
+ ggc_delete and do not want finalizers to be registered and
+ called at the garbage collection time. */
+ static modref_tree<T> *create_ggc ()
+ {
+ return new (ggc_alloc_no_dtor<modref_tree<T>> ())
+ modref_tree<T> ();
+ }
+
+ /* Remove all records and mark tree to alias with everything. */
+ void collapse ()
+ {
+ size_t i;
+ modref_base_node <T> *n;
+
+ if (bases)
+ {
+ FOR_EACH_VEC_SAFE_ELT (bases, i, n)
+ {
+ n->collapse ();
+ ggc_free (n);
+ }
+ vec_free (bases);
+ }
+ bases = NULL;
+ every_base = true;
+ }
+
+ /* Release memory. */
+ ~modref_tree ()
+ {
+ collapse ();
+ }
+
+ /* Update parameter indexes in TT according to MAP. */
+ void
+ remap_params (vec <int> *map)
+ {
+ size_t i;
+ modref_base_node <T> *base_node;
+ FOR_EACH_VEC_SAFE_ELT (bases, i, base_node)
+ {
+ size_t j;
+ modref_ref_node <T> *ref_node;
+ FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
+ {
+ size_t k;
+ modref_access_node *access_node;
+ FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
+ if (access_node->parm_index >= 0)
+ {
+ if (access_node->parm_index < (int)map->length ())
+ access_node->parm_index = (*map)[access_node->parm_index];
+ else
+ access_node->parm_index = MODREF_UNKNOWN_PARM;
+ }
+ }
+ }
+ }
+};
+
+void gt_ggc_mx (modref_tree <int>* const&);
+void gt_ggc_mx (modref_tree <tree_node*>* const&);
+void gt_pch_nx (modref_tree <int>* const&);
+void gt_pch_nx (modref_tree <tree_node*>* const&);
+void gt_pch_nx (modref_tree <int>* const&, gt_pointer_operator op, void *cookie);
+void gt_pch_nx (modref_tree <tree_node*>* const&, gt_pointer_operator op,
+ void *cookie);
+
+void gt_ggc_mx (modref_base_node <int>*);
+void gt_ggc_mx (modref_base_node <tree_node*>* &);
+void gt_pch_nx (modref_base_node <int>* const&);
+void gt_pch_nx (modref_base_node <tree_node*>* const&);
+void gt_pch_nx (modref_base_node <int>* const&, gt_pointer_operator op,
+ void *cookie);
+void gt_pch_nx (modref_base_node <tree_node*>* const&, gt_pointer_operator op,
+ void *cookie);
+
+void gt_ggc_mx (modref_ref_node <int>*);
+void gt_ggc_mx (modref_ref_node <tree_node*>* &);
+void gt_pch_nx (modref_ref_node <int>* const&);
+void gt_pch_nx (modref_ref_node <tree_node*>* const&);
+void gt_pch_nx (modref_ref_node <int>* const&, gt_pointer_operator op,
+ void *cookie);
+void gt_pch_nx (modref_ref_node <tree_node*>* const&, gt_pointer_operator op,
+ void *cookie);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref.h
new file mode 100644
index 0000000..2a2d31e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-modref.h
@@ -0,0 +1,131 @@
+/* Search for references that a functions loads or stores.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef IPA_MODREF_H
+#define IPA_MODREF_H
+
+typedef modref_tree <alias_set_type> modref_records;
+typedef unsigned short eaf_flags_t;
+
+/* Single function summary. */
+
+struct GTY(()) modref_summary
+{
+ /* Load and stores in function (transitively closed to all callees) */
+ modref_records *loads;
+ modref_records *stores;
+ auto_vec<modref_access_node> GTY((skip)) kills;
+ auto_vec<eaf_flags_t> GTY((skip)) arg_flags;
+
+ eaf_flags_t retslot_flags;
+ eaf_flags_t static_chain_flags;
+
+ unsigned writes_errno : 1;
+ /* Side effects does not include memory loads and stores which are
+ expressed using loads, stores and calls_interposable fields. */
+ unsigned side_effects : 1;
+ /* If true function can not be CSE optimized because it may behave
+ differently even if invoked with same inputs. */
+ unsigned nondeterministic : 1;
+ /* IF true the function may read any reachable memory but not use
+ it for anything useful. This may happen i.e. when interposing
+ function with optimized out conditional with unoptimized one.
+
+ In this situation the loads summary is not useful for DSE but
+ it is still useful for CSE. */
+ unsigned calls_interposable : 1;
+
+ /* Flags computed by finalize method. */
+
+ /* Total number of accesses in loads tree. */
+ unsigned int load_accesses;
+ /* global_memory_read is not set for functions calling functions
+ with !binds_to_current_def which, after interposition, may read global
+ memory but do nothing useful with it (except for crashing if some
+ stores are optimized out. */
+ unsigned global_memory_read : 1;
+ unsigned global_memory_written : 1;
+ unsigned try_dse : 1;
+
+
+ modref_summary ();
+ ~modref_summary ();
+ void dump (FILE *);
+ bool useful_p (int ecf_flags, bool check_flags = true);
+ void finalize (tree);
+};
+
+modref_summary *get_modref_function_summary (cgraph_node *func);
+modref_summary *get_modref_function_summary (gcall *call, bool *interposed);
+void ipa_modref_cc_finalize ();
+void ipa_merge_modref_summary_after_inlining (cgraph_edge *e);
+
+/* All flags that are implied by the ECF_CONST functions. */
+static const int implicit_const_eaf_flags
+ = EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER
+ | EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE
+ | EAF_NO_DIRECT_READ | EAF_NO_INDIRECT_READ
+ | EAF_NOT_RETURNED_INDIRECTLY;
+
+/* All flags that are implied by the ECF_PURE function. */
+static const int implicit_pure_eaf_flags
+ = EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER
+ | EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE;
+
+/* All flags implied when we know we can ignore stores (i.e. when handling
+ call to noreturn). */
+static const int ignore_stores_eaf_flags
+ = EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER
+ | EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE;
+
+/* Return slot is write-only. */
+static const int implicit_retslot_eaf_flags
+ = EAF_NO_DIRECT_READ | EAF_NO_INDIRECT_READ
+ | EAF_NO_INDIRECT_ESCAPE | EAF_NO_INDIRECT_CLOBBER
+ | EAF_NOT_RETURNED_INDIRECTLY;
+
+/* If function does not bind to current def (i.e. it is inline in comdat
+ section), the modref analysis may not match the behavior of function
+ which will be later symbol interposed to. All side effects must match
+ however it is possible that the other function body contains more loads
+ which may trap.
+ MODREF_FLAGS are flags determined by analysis of function body while
+ FLAGS are flags known otherwise (i.e. by fnspec, pure/const attributes
+ etc.) */
+inline int
+interposable_eaf_flags (int modref_flags, int flags)
+{
+ /* If parameter was previously unused, we know it is only read
+ and its value is not used. */
+ if ((modref_flags & EAF_UNUSED) && !(flags & EAF_UNUSED))
+ {
+ modref_flags &= ~EAF_UNUSED;
+ modref_flags |= EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE
+ | EAF_NOT_RETURNED_DIRECTLY | EAF_NOT_RETURNED_INDIRECTLY
+ | EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER;
+ }
+ /* We can not determine that value is not read at all. */
+ if ((modref_flags & EAF_NO_DIRECT_READ) && !(flags & EAF_NO_DIRECT_READ))
+ modref_flags &= ~EAF_NO_DIRECT_READ;
+ if ((modref_flags & EAF_NO_INDIRECT_READ) && !(flags & EAF_NO_INDIRECT_READ))
+ modref_flags &= ~EAF_NO_INDIRECT_READ;
+ return modref_flags;
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-param-manipulation.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-param-manipulation.h
new file mode 100644
index 0000000..9cc957a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-param-manipulation.h
@@ -0,0 +1,445 @@
+/* Manipulation of formal and actual parameters of functions and function
+ calls.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>.
+
+
+
+This file defines classes and other data structures that are used to manipulate
+the prototype of a function, especially to create, remove or split its formal
+parameters, but also to remove its return value, and also its call statements
+correspondingly.
+
+The most basic one is a vector of structures ipa_adjusted_param. It is simply
+a description how the new parameters should look like after the transformation
+in what way they relate to the previous ones (if in any). Such relation to an
+old parameter can be an outright copy or an IPA-SRA replacement. If an old
+parameter is not listed or otherwise mentioned, it is removed as unused or at
+least unnecessary. Note that this most basic structure does not work for
+modifying calls of functions with variable number of arguments.
+
+Class ipa_param_adjustments is only a little more than a thin encapsulation of
+a vector of ipa_param_adjustments. Along with this vector it contains an index
+of the first potential vararg argument and a boolean flag whether the return
+value should be removed or not. Moreover, the class contains method
+modify_call which can transform a call statement so that it correctly calls a
+modified function. These two data structures were designed to have a small
+memory footprint because they are allocated for each clone of a call graph node
+that has its prototype changed and live until the end of IPA clone
+materialization and call redirection phase.
+
+On the other hand, class ipa_param_body_adjustments can afford to allocate more
+data because its life span is much smaller, it is allocated and destroyed in
+the course of materialization of each single clone that needs it or only when a
+particular pass needs to change a function it is operating on. This class has
+various methods required to change function declaration and the body of the
+function according to instructions given either by class ipa_param_adjustments
+or only a vector of ipa_adjusted_params.
+
+When these classes are used in the context of call graph clone materialization
+and subsequent call statement redirection - which is the point at which we
+modify arguments in call statements - they need to cooperate with each other in
+order to handle what we refer to as pass-through (IPA-SRA) splits. These are
+situations when a formal parameter of one function is split into several
+smaller ones and some of them are then passed on in a call to another function
+because the formal parameter of this callee has also been split.
+
+Consider a simple example:
+
+struct S {int a, b, c;};
+struct Z {int x; S s;};
+
+foo (S s)
+{
+ use (s.b);
+}
+
+bar (Z z)
+{
+ use (z.s.a);
+ foo (z.s);
+}
+
+baz ()
+{
+ bar (*global);
+}
+
+Both bar and foo would have their parameter split. Foo would receive one
+replacement representing s.b. Function bar would see its parameter split into
+one replacement representing z.s.a and another representing z.s.b which would
+be passed on to foo. It would be a so called pass-through split IPA-SRA
+replacement, one which is passed in a call as an actual argument to another
+IPA-SRA replacement in another function.
+
+Note that the call chain the example can be arbitrarily long and recursive and
+that any function in it can be cloned by another IPA pass and any number of
+adjacent functions in the call chain can be inlined into each other. Call
+redirection takes place only after bodies of the function have been modified by
+all of the above.
+
+Call redirection has to be able to find the right decl or SSA_NAME that
+corresponds to the transitive split in the caller. The SSA names are assigned
+right after clone materialization/ modification and cannot be "added" to call
+arguments at any later point. Moreover, if the caller has been inlined the
+SSA_NAMEs in question no longer belong to PARM_DECLs but to VAR_DECLs,
+indistinguishable from any others.
+
+Therefore, when clone materialization finds a call statement which it knows is
+a part of a transitive split, it will simply add as arguments all new "split"
+replacements (that have grater or equal offset than the original call
+argument):
+
+ foo (repl_for_a, repl_for_b, <rest of original arguments>);
+
+It will also store into ipa_edge_modification_info (which is internal to
+ipa-param-modification.c) information about which replacement is which and
+where original arguments are. Call redirection will then invoke
+ipa_param_adjustments::modify_call which will access this information and
+eliminate all replacements which the callee does not expect (repl_for_a in our
+example above). In between these two steps, however, a call statement might
+have extraneous arguments. */
+
+#ifndef IPA_PARAM_MANIPULATION_H
+#define IPA_PARAM_MANIPULATION_H
+
+/* Indices into ipa_param_prefixes to identify a human-readable prefix for newly
+ synthesized parameters. Keep in sync with the array. */
+enum ipa_param_name_prefix_indices
+ {
+ IPA_PARAM_PREFIX_SYNTH,
+ IPA_PARAM_PREFIX_ISRA,
+ IPA_PARAM_PREFIX_SIMD,
+ IPA_PARAM_PREFIX_MASK,
+ IPA_PARAM_PREFIX_COUNT
+};
+
+/* We do not support manipulating functions with more than
+ 1<<IPA_PARAM_MAX_INDEX_BITS parameters. */
+#define IPA_PARAM_MAX_INDEX_BITS 16
+
+/* Operation to be performed for the parameter in ipa_parm_adjustment
+ below. */
+
+enum ipa_parm_op
+{
+ /* Do not use or you will trigger an assert. */
+ IPA_PARAM_OP_UNDEFINED,
+
+ /* This new parameter is an unmodified parameter at index base_index. */
+ IPA_PARAM_OP_COPY,
+
+ /* This describes a brand new parameter. If it somehow relates to any
+ original parameters, the user needs to manage the transition itself. */
+ IPA_PARAM_OP_NEW,
+
+ /* Split parameter as indicated by fields base_index, offset and type. */
+ IPA_PARAM_OP_SPLIT
+};
+
+/* Structure that describes one parameter of a function after transformation.
+ Omitted parameters will be removed. */
+
+struct GTY(()) ipa_adjusted_param
+{
+ /* Type of the new parameter. Required for all operations except
+ IPA_PARM_OP_COPY when the original type will be preserved. */
+ tree type;
+
+ /* Alias reference type to be used in MEM_REFs when adjusting caller
+ arguments. Required for IPA_PARM_OP_SPLIT operation. */
+ tree alias_ptr_type;
+
+ /* Offset into the original parameter (for the cases when the new parameter
+ is a component of an original one). Required for IPA_PARM_OP_SPLIT
+ operation. */
+ unsigned unit_offset;
+
+ /* Zero based index of the original parameter this one is based on. Required
+ for IPA_PARAM_OP_COPY and IPA_PARAM_OP_SPLIT, users of IPA_PARAM_OP_NEW
+ only need to specify it if they use replacement lookup provided by
+ ipa_param_body_adjustments. */
+ unsigned base_index : IPA_PARAM_MAX_INDEX_BITS;
+
+ /* Zero based index of the parameter this one is based on in the previous
+ clone. If there is no previous clone, it must be equal to base_index. */
+ unsigned prev_clone_index : IPA_PARAM_MAX_INDEX_BITS;
+
+ /* Specify the operation, if any, to be performed on the parameter. */
+ enum ipa_parm_op op : 2;
+
+ /* If set, this structure describes a parameter copied over from a previous
+ IPA clone, any transformations are thus not to be re-done. */
+ unsigned prev_clone_adjustment : 1;
+
+ /* Index into ipa_param_prefixes specifying a prefix to be used with
+ DECL_NAMEs of newly synthesized parameters. */
+ unsigned param_prefix_index : 2;
+
+ /* Storage order of the original parameter (for the cases when the new
+ parameter is a component of an original one). */
+ unsigned reverse : 1;
+
+ /* A bit free for the user. */
+ unsigned user_flag : 1;
+};
+
+void ipa_dump_adjusted_parameters (FILE *f,
+ vec<ipa_adjusted_param, va_gc> *adj_params);
+
+/* Class used to record planned modifications to parameters of a function and
+ also to perform necessary modifications at the caller side at the gimple
+ level. Used to describe all cgraph node clones that have their parameters
+ changed, therefore the class should only have a small memory footprint. */
+
+class GTY(()) ipa_param_adjustments
+{
+public:
+ /* Constructor from NEW_PARAMS showing how new parameters should look like
+ plus copying any pre-existing actual arguments starting from argument
+ with index ALWAYS_COPY_START (if non-negative, negative means do not copy
+ anything beyond what is described in NEW_PARAMS), and SKIP_RETURN, which
+ indicates that the function should return void after transformation. */
+
+ ipa_param_adjustments (vec<ipa_adjusted_param, va_gc> *new_params,
+ int always_copy_start, bool skip_return)
+ : m_adj_params (new_params), m_always_copy_start (always_copy_start),
+ m_skip_return (skip_return)
+ {}
+
+ /* Modify a call statement arguments (and possibly remove the return value)
+ as described in the data fields of this class. */
+ gcall *modify_call (cgraph_edge *cs, bool update_references);
+ /* Return if the first parameter is left intact. */
+ bool first_param_intact_p ();
+ /* Build a function type corresponding to the modified call. */
+ tree build_new_function_type (tree old_type, bool type_is_original_p);
+ /* Build a declaration corresponding to the target of the modified call. */
+ tree adjust_decl (tree orig_decl);
+ /* Fill a vector marking which parameters are intact by the described
+ modifications. */
+ void get_surviving_params (vec<bool> *surviving_params);
+ /* Fill a vector with new indices of surviving original parameters. */
+ void get_updated_indices (vec<int> *new_indices);
+ /* Return the original index for the given new parameter index. Return a
+ negative number if not available. */
+ int get_original_index (int newidx);
+
+ void dump (FILE *f);
+ void debug ();
+
+ /* How the known part of arguments should look like. */
+ vec<ipa_adjusted_param, va_gc> *m_adj_params;
+
+ /* If non-negative, copy any arguments starting at this offset without any
+ modifications so that functions with variable number of arguments can be
+ modified. This number should be equal to the number of original forma
+ parameters. */
+ int m_always_copy_start;
+ /* If true, make the function not return any value. */
+ bool m_skip_return;
+
+ static bool type_attribute_allowed_p (tree);
+private:
+ ipa_param_adjustments () {}
+
+ void init (vec<tree> *cur_params);
+ int get_max_base_index ();
+ bool method2func_p (tree orig_type);
+};
+
+/* Structure used to map expressions accessing split or replaced parameters to
+ new PARM_DECLs. */
+
+struct ipa_param_body_replacement
+{
+ /* The old decl of the original parameter. */
+ tree base;
+ /* The new decl it should be replaced with. */
+ tree repl;
+ /* Users of ipa_param_body_adjustments that modify standalone functions
+ outside of IPA clone materialization can use the following field for their
+ internal purposes. */
+ tree dummy;
+ /* The offset within BASE that REPL represents. */
+ unsigned unit_offset;
+};
+
+struct ipa_replace_map;
+
+/* Class used when actually performing adjustments to formal parameters of a
+ function to map accesses that need to be replaced to replacements. The
+ class attempts to work in two very different sets of circumstances: as a
+ part of tree-inine.c's tree_function_versioning machinery to clone functions
+ (when M_ID is not NULL) and in s standalone fashion, modifying an existing
+ function in place (when M_ID is NULL). While a lot of stuff handled in a
+ unified way in both modes, there are many aspects of the processs that
+ requires distinct paths. */
+
+class ipa_param_body_adjustments
+{
+public:
+ /* Constructor to use from within tree-inline. */
+ ipa_param_body_adjustments (ipa_param_adjustments *adjustments,
+ tree fndecl, tree old_fndecl,
+ struct copy_body_data *id, tree *vars,
+ vec<ipa_replace_map *, va_gc> *tree_map);
+ /* Constructor to use for modifying a function outside of tree-inline from an
+ instance of ipa_param_adjustments. */
+ ipa_param_body_adjustments (ipa_param_adjustments *adjustments,
+ tree fndecl);
+ /* Constructor to use for modifying a function outside of tree-inline from a
+ simple vector of desired parameter modification. */
+ ipa_param_body_adjustments (vec<ipa_adjusted_param, va_gc> *adj_params,
+ tree fndecl);
+
+ /* The do-it-all function for modifying a function outside of
+ tree-inline. */
+ bool perform_cfun_body_modifications ();
+
+ /* Change the PARM_DECLs. */
+ void modify_formal_parameters ();
+ /* Register a REPLACEMENT for accesses to BASE at UNIT_OFFSET. */
+ void register_replacement (tree base, unsigned unit_offset, tree replacement);
+ /* Register a replacement decl for the transformation done in APM. */
+ void register_replacement (ipa_adjusted_param *apm, tree replacement);
+ /* Sort m_replacements and set m_sorted_replacements_p to true. Users that
+ call register_replacement themselves must call the method before any
+ lookup and thus also any statement or expression modification. */
+ void sort_replacements ();
+ /* Lookup a replacement for a given offset within a given parameter. */
+ tree lookup_replacement (tree base, unsigned unit_offset);
+ /* Lookup a replacement for an expression, if there is one. */
+ ipa_param_body_replacement *get_expr_replacement (tree expr,
+ bool ignore_default_def);
+ /* Lookup the new base for surviving names previously belonging to a
+ parameter. */
+ tree get_replacement_ssa_base (tree old_decl);
+ /* Modify a statement. */
+ bool modify_gimple_stmt (gimple **stmt, gimple_seq *extra_stmts,
+ gimple *orig_stmt);
+ /* Return the new chain of parameters. */
+ tree get_new_param_chain ();
+ /* Replace all occurances of SSAs in m_dead_ssa_debug_equiv in t with what
+ they are mapped to. */
+ void remap_with_debug_expressions (tree *t);
+
+ /* If there are any initialization statements that need to be emitted into
+ the basic block BB right at ther start of the new function, do so. */
+ void append_init_stmts (basic_block bb);
+
+ /* Pointers to data structures defining how the function should be
+ modified. */
+ vec<ipa_adjusted_param, va_gc> *m_adj_params;
+ ipa_param_adjustments *m_adjustments;
+
+ /* Vector of old parameter declarations that must have their debug bind
+ statements re-mapped and debug decls created. */
+
+ auto_vec<tree, 16> m_reset_debug_decls;
+
+ /* Sets of statements and SSA_NAMEs that only manipulate data from parameters
+ removed because they are not necessary. */
+ hash_set<gimple *> m_dead_stmts;
+ hash_set<tree> m_dead_ssas;
+
+ /* Mapping from DCEd SSAs to what their potential debug_binds should be. */
+ hash_map<tree, tree> m_dead_ssa_debug_equiv;
+ /* Mapping from DCEd statements to debug expressions that will be placed on
+ the RHS of debug statement that will replace this one. */
+ hash_map<gimple *, tree> m_dead_stmt_debug_equiv;
+
+private:
+ void common_initialization (tree old_fndecl, tree *vars,
+ vec<ipa_replace_map *, va_gc> *tree_map);
+ tree carry_over_param (tree t);
+ unsigned get_base_index (ipa_adjusted_param *apm);
+ ipa_param_body_replacement *lookup_replacement_1 (tree base,
+ unsigned unit_offset);
+ ipa_param_body_replacement *lookup_first_base_replacement (tree base);
+ tree replace_removed_params_ssa_names (tree old_name, gimple *stmt);
+ bool modify_expression (tree *expr_p, bool convert);
+ bool modify_assignment (gimple *stmt, gimple_seq *extra_stmts);
+ bool modify_call_stmt (gcall **stmt_p, gimple *orig_stmt);
+ bool modify_cfun_body ();
+ void reset_debug_stmts ();
+ void mark_dead_statements (tree dead_param, vec<tree> *debugstack);
+ bool prepare_debug_expressions (tree dead_ssa);
+
+ /* Declaration of the function that is being transformed. */
+
+ tree m_fndecl;
+
+ /* If non-NULL, the tree-inline master data structure guiding materialization
+ of the current clone. */
+ struct copy_body_data *m_id;
+
+ /* Vector of old parameter declarations (before changing them). */
+
+ auto_vec<tree, 16> m_oparms;
+
+ /* Vector of parameter declarations the function will have after
+ transformation. */
+
+ auto_vec<tree, 16> m_new_decls;
+
+ /* If the function type has non-NULL TYPE_ARG_TYPES, this is the vector of
+ these types after transformation, otherwise an empty one. */
+
+ auto_vec<tree, 16> m_new_types;
+
+ /* Vector of structures telling how to replace old parameters in the
+ function body. TODO: Even though there usually be only few, but should we
+ use a hash? */
+
+ auto_vec<ipa_param_body_replacement, 16> m_replacements;
+
+ /* List of initialization assignments to be put at the beginning of the
+ cloned function to deal with split aggregates which however have known
+ constant value and so their PARM_DECL disappears. */
+
+ auto_vec<gimple *, 8> m_split_agg_csts_inits;
+
+ /* Vector for remapping SSA_BASES from old parameter declarations that are
+ being removed as a part of the transformation. Before a new VAR_DECL is
+ created, it holds the old PARM_DECL, once the variable is built it is
+ stored here. */
+
+ auto_vec<tree> m_removed_decls;
+
+ /* Hash to quickly lookup the item in m_removed_decls given the old decl. */
+
+ hash_map<tree, unsigned> m_removed_map;
+
+ /* True iff the transformed function is a class method that is about to loose
+ its this pointer and must be converted to a normal function. */
+
+ bool m_method2func;
+
+ /* True if m_replacements have ben sorted since the last insertion. */
+
+ bool m_sorted_replacements_p;
+};
+
+void push_function_arg_decls (vec<tree> *args, tree fndecl);
+void push_function_arg_types (vec<tree> *types, tree fntype);
+void ipa_verify_edge_has_no_modifications (cgraph_edge *cs);
+void ipa_edge_modifications_finalize ();
+
+
+#endif /* IPA_PARAM_MANIPULATION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-predicate.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-predicate.h
new file mode 100644
index 0000000..2882bf8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-predicate.h
@@ -0,0 +1,273 @@
+/* IPA predicates.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Representation of inline parameters that do depend on context function is
+ inlined into (i.e. known constant values of function parameters.
+
+ Conditions that are interesting for function body are collected into CONDS
+ vector. They are of simple as kind of a mathematical transformation on
+ function parameter, T(function_param), in which the parameter occurs only
+ once, and other operands are IPA invariant. The conditions are then
+ referred by predicates. */
+
+
+/* A simplified representation of tree node, for unary, binary and ternary
+ operation. Computations on parameter are decomposed to a series of this
+ kind of structure. */
+struct GTY(()) expr_eval_op
+{
+ /* Result type of expression. */
+ tree type;
+ /* Constant operands in expression, there are at most two. */
+ tree val[2];
+ /* Index of parameter operand in expression. */
+ unsigned index : 2;
+ /* Operation code of expression. */
+ ENUM_BITFIELD(tree_code) code : 16;
+};
+
+typedef vec<expr_eval_op, va_gc> *expr_eval_ops;
+
+struct GTY(()) condition
+{
+ /* If agg_contents is set, this is the offset from which the used data was
+ loaded. */
+ HOST_WIDE_INT offset;
+ /* Type of the access reading the data (or the PARM_DECL SSA_NAME). */
+ tree type;
+ tree val;
+ int operand_num;
+ ENUM_BITFIELD(tree_code) code : 16;
+ /* Set if the used data were loaded from an aggregate parameter or from
+ data received by reference. */
+ unsigned agg_contents : 1;
+ /* If agg_contents is set, this differentiates between loads from data
+ passed by reference and by value. */
+ unsigned by_ref : 1;
+ /* A set of sequential operations on the parameter, which can be seen as
+ a mathematical function on the parameter. */
+ expr_eval_ops param_ops;
+};
+
+/* Information kept about parameter of call site. */
+struct inline_param_summary
+{
+ /* REG_BR_PROB_BASE based probability that parameter will change in between
+ two invocation of the calls.
+ I.e. loop invariant parameters
+ REG_BR_PROB_BASE/estimated_iterations and regular
+ parameters REG_BR_PROB_BASE.
+
+ Value 0 is reserved for compile time invariants. */
+ short change_prob;
+ unsigned points_to_local_or_readonly_memory : 1;
+ bool equal_to (const inline_param_summary &other) const
+ {
+ return change_prob == other.change_prob
+ && points_to_local_or_readonly_memory
+ == other.points_to_local_or_readonly_memory;
+ }
+ bool useless_p (void) const
+ {
+ return change_prob == REG_BR_PROB_BASE
+ && !points_to_local_or_readonly_memory;
+ }
+};
+
+typedef vec<condition, va_gc> *conditions;
+
+/* Predicates are used to represent function parameters (such as runtime)
+ which depend on a context function is called in.
+
+ Predicates are logical formulas in conjunctive-disjunctive form consisting
+ of clauses which are bitmaps specifying a set of condition that must
+ be true for a clause to be satisfied. Physically they are represented as
+ array of clauses terminated by 0.
+
+ In order to make predicate (possibly) true, all of its clauses must
+ be (possibly) true. To make clause (possibly) true, one of conditions
+ it mentions must be (possibly) true.
+
+ There are fixed bounds on number of clauses and conditions and all the
+ manipulation functions are conservative in positive direction. I.e. we
+ may lose precision by thinking that predicate may be true even when it
+ is not. */
+
+typedef uint32_t clause_t;
+class ipa_predicate
+{
+public:
+ enum predicate_conditions
+ {
+ false_condition = 0,
+ not_inlined_condition = 1,
+ first_dynamic_condition = 2
+ };
+
+ /* Maximal number of conditions predicate can refer to. This is limited
+ by using clause_t to be 32bit. */
+ static const int num_conditions = 32;
+
+ /* Special condition code we use to represent test that operand is compile
+ time constant. */
+ static const tree_code is_not_constant = ERROR_MARK;
+
+ /* Special condition code we use to represent test that operand is not changed
+ across invocation of the function. When operand IS_NOT_CONSTANT it is
+ always CHANGED, however i.e. loop invariants can be NOT_CHANGED given
+ percentage of executions even when they are not compile time constants. */
+ static const tree_code changed = IDENTIFIER_NODE;
+
+
+
+ /* Initialize predicate either to true of false depending on P. */
+ inline ipa_predicate (bool p = true)
+ {
+ if (p)
+ /* True predicate. */
+ m_clause[0] = 0;
+ else
+ /* False predicate. */
+ set_to_cond (false_condition);
+ }
+
+ /* Sanity check that we do not mix pointers to predicates with predicates. */
+ inline ipa_predicate (ipa_predicate *)
+ {
+ gcc_unreachable ();
+ }
+
+ /* Return predicate testing condition I. */
+ static inline ipa_predicate predicate_testing_cond (int i)
+ {
+ ipa_predicate p;
+ p.set_to_cond (i + first_dynamic_condition);
+ return p;
+ }
+
+ /* Return predicate testing that function was not inlined. */
+ static ipa_predicate not_inlined (void)
+ {
+ ipa_predicate p;
+ p.set_to_cond (not_inlined_condition);
+ return p;
+ }
+
+ /* Compute logical and of ipa_predicates. */
+ ipa_predicate & operator &= (const ipa_predicate &);
+ inline ipa_predicate operator &(const ipa_predicate &p) const
+ {
+ ipa_predicate ret = *this;
+ ret &= p;
+ return ret;
+ }
+
+ /* Compute logical or of ipa_predicates. This is not operator because
+ extra parameter CONDITIONS is needed */
+ ipa_predicate or_with (conditions, const ipa_predicate &) const;
+
+ /* Return true if ipa_predicates are known to be equal. */
+ inline bool operator==(const ipa_predicate &p2) const
+ {
+ int i;
+ for (i = 0; m_clause[i]; i++)
+ {
+ gcc_checking_assert (i < max_clauses);
+ gcc_checking_assert (m_clause[i] > m_clause[i + 1]);
+ gcc_checking_assert (!p2.m_clause[i]
+ || p2.m_clause[i] > p2.m_clause[i + 1]);
+ if (m_clause[i] != p2.m_clause[i])
+ return false;
+ }
+ return !p2.m_clause[i];
+ }
+
+ /* Return true if predicates are known to be true or false depending
+ on COND. */
+ inline bool operator==(const bool cond) const
+ {
+ if (cond)
+ return !m_clause[0];
+ if (m_clause[0] == (1 << false_condition))
+ {
+ gcc_checking_assert (!m_clause[1]
+ && m_clause[0] == 1
+ << false_condition);
+ return true;
+ }
+ return false;
+ }
+
+ inline bool operator!=(const ipa_predicate &p2) const
+ {
+ return !(*this == p2);
+ }
+
+ inline bool operator!=(const bool cond) const
+ {
+ return !(*this == cond);
+ }
+
+ /* Evaluate if predicate is known to be false given the clause of possible
+ truths. */
+ bool evaluate (clause_t) const;
+
+ /* Estimate probability that predicate will be true in a given context. */
+ int probability (conditions, clause_t, vec<inline_param_summary>) const;
+
+ /* Dump predicate to F. Output newline if nl. */
+ void dump (FILE *f, conditions, bool nl=true) const;
+ void DEBUG_FUNCTION debug (conditions) const;
+
+ /* Return ipa_predicate equal to THIS after duplication. */
+ ipa_predicate remap_after_duplication (clause_t);
+
+ /* Return ipa_predicate equal to THIS after inlining. */
+ ipa_predicate remap_after_inlining (class ipa_fn_summary *,
+ ipa_node_params *params_summary,
+ ipa_fn_summary *,
+ const vec<int> &,
+ const vec<HOST_WIDE_INT> &,
+ clause_t, const ipa_predicate &);
+
+ void stream_in (lto_input_block *);
+ void stream_out (output_block *);
+
+private:
+ static const int max_clauses = 8;
+ clause_t m_clause[max_clauses + 1];
+
+ /* Initialize predicate to one testing single condition number COND. */
+ inline void set_to_cond (int cond)
+ {
+ m_clause[0] = 1 << cond;
+ m_clause[1] = 0;
+ }
+
+ void add_clause (conditions conditions, clause_t);
+};
+
+void dump_condition (FILE *f, conditions conditions, int cond);
+ipa_predicate add_condition (ipa_fn_summary *summary,
+ ipa_node_params *params_summary,
+ int operand_num,
+ tree type, struct agg_position_info *aggpos,
+ enum tree_code code, tree val,
+ expr_eval_ops param_ops = NULL);
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-prop.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-prop.h
new file mode 100644
index 0000000..7eb5c8f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-prop.h
@@ -0,0 +1,1204 @@
+/* Interprocedural analyses.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef IPA_PROP_H
+#define IPA_PROP_H
+
+/* The following definitions and interfaces are used by
+ interprocedural analyses or parameters. */
+
+#define IPA_UNDESCRIBED_USE -1
+
+/* Index identifying an actualargument or a formal parameter may have only this
+ many bits. */
+
+#define IPA_PROP_ARG_INDEX_LIMIT_BITS 16
+
+/* ipa-prop.cc stuff (ipa-cp, indirect inlining): */
+
+/* A jump function for a callsite represents the values passed as actual
+ arguments of the callsite. They were originally proposed in a paper called
+ "Interprocedural Constant Propagation", by David Callahan, Keith D Cooper,
+ Ken Kennedy, Linda Torczon in Comp86, pg 152-161. There are three main
+ types of values :
+
+ Pass-through - the caller's formal parameter is passed as an actual
+ argument, possibly one simple operation performed on it.
+ Constant - a constant (is_gimple_ip_invariant)is passed as an actual
+ argument.
+ Unknown - neither of the above.
+
+ IPA_JF_LOAD_AGG is a compound pass-through jump function, in which primary
+ operation on formal parameter is memory dereference that loads a value from
+ a part of an aggregate, which is represented or pointed to by the formal
+ parameter. Moreover, an additional unary/binary operation can be applied on
+ the loaded value, and final result is passed as actual argument of callee
+ (e.g. *(param_1(D) + 4) op 24 ). It is meant to describe usage of aggregate
+ parameter or by-reference parameter referenced in argument passing, commonly
+ found in C++ and Fortran.
+
+ IPA_JF_ANCESTOR is a special pass-through jump function, which means that
+ the result is an address of a part of the object pointed to by the formal
+ parameter to which the function refers. It is mainly intended to represent
+ getting addresses of ancestor fields in C++
+ (e.g. &this_1(D)->D.1766.D.1756). Note that if the original pointer is
+ NULL, ancestor jump function must behave like a simple pass-through.
+
+ Other pass-through functions can either simply pass on an unchanged formal
+ parameter or can apply one simple binary operation to it (such jump
+ functions are called polynomial).
+
+ Jump functions are computed in ipa-prop.cc by function
+ update_call_notes_after_inlining. Some information can be lost and jump
+ functions degraded accordingly when inlining, see
+ update_call_notes_after_inlining in the same file. */
+
+enum jump_func_type
+{
+ IPA_JF_UNKNOWN = 0, /* newly allocated and zeroed jump functions default */
+ IPA_JF_CONST, /* represented by field costant */
+ IPA_JF_PASS_THROUGH, /* represented by field pass_through */
+ IPA_JF_LOAD_AGG, /* represented by field load_agg */
+ IPA_JF_ANCESTOR /* represented by field ancestor */
+};
+
+struct ipa_cst_ref_desc;
+
+/* Structure holding data required to describe a constant jump function. */
+struct GTY(()) ipa_constant_data
+{
+ /* The value of the constant. */
+ tree value;
+ /* Pointer to the structure that describes the reference. */
+ struct ipa_cst_ref_desc GTY((skip)) *rdesc;
+};
+
+/* Structure holding data required to describe a pass-through jump function. */
+
+struct GTY(()) ipa_pass_through_data
+{
+ /* If an operation is to be performed on the original parameter, this is the
+ second (constant) operand. */
+ tree operand;
+ /* Number of the caller's formal parameter being passed. */
+ int formal_id;
+ /* Operation that is performed on the argument before it is passed on.
+ Special values which have other meaning than in normal contexts:
+ - NOP_EXPR means no operation, not even type conversion.
+ - ASSERT_EXPR means that only the value in operand is allowed to pass
+ through (without any change), for all other values the result is
+ unknown.
+ Otherwise operation must be a simple binary or unary arithmetic operation
+ where the caller's parameter is the first operand and (for binary
+ operations) the operand field from this structure is the second one. */
+ enum tree_code operation;
+ /* When the passed value is a pointer, it is set to true only when we are
+ certain that no write to the object it points to has occurred since the
+ caller functions started execution, except for changes noted in the
+ aggregate part of the jump function (see description of
+ ipa_agg_jump_function). The flag is used only when the operation is
+ NOP_EXPR. */
+ unsigned agg_preserved : 1;
+ /* Set when the edge has already been used to decrement an appropriate
+ reference description counter and should not be decremented again. */
+ unsigned refdesc_decremented : 1;
+};
+
+/* Structure holding data required to describe a load-value-from-aggregate
+ jump function. */
+
+struct GTY(()) ipa_load_agg_data
+{
+ /* Inherit from pass through jump function, describing unary/binary
+ operation on the value loaded from aggregate that is represented or
+ pointed to by the formal parameter, specified by formal_id in this
+ pass_through jump function data structure. */
+ struct ipa_pass_through_data pass_through;
+ /* Type of the value loaded from the aggregate. */
+ tree type;
+ /* Offset at which the value is located within the aggregate. */
+ HOST_WIDE_INT offset;
+ /* True if loaded by reference (the aggregate is pointed to by the formal
+ parameter) or false if loaded by value (the aggregate is represented
+ by the formal parameter). */
+ bool by_ref;
+};
+
+/* Structure holding data required to describe an ancestor pass-through
+ jump function. */
+
+struct GTY(()) ipa_ancestor_jf_data
+{
+ /* Offset of the field representing the ancestor. */
+ HOST_WIDE_INT offset;
+ /* Number of the caller's formal parameter being passed. */
+ int formal_id;
+ /* Flag with the same meaning like agg_preserve in ipa_pass_through_data. */
+ unsigned agg_preserved : 1;
+ /* When set, the operation should not have any effect on NULL pointers. */
+ unsigned keep_null : 1;
+};
+
+/* A jump function for an aggregate part at a given offset, which describes how
+ it content value is generated. All unlisted positions are assumed to have a
+ value defined in an unknown way. */
+
+struct GTY(()) ipa_agg_jf_item
+{
+ /* The offset for the aggregate part. */
+ HOST_WIDE_INT offset;
+
+ /* Data type of the aggregate part. */
+ tree type;
+
+ /* Jump function type. */
+ enum jump_func_type jftype;
+
+ /* Represents a value of jump function. constant represents the actual constant
+ in constant jump function content. pass_through is used only in simple pass
+ through jump function context. load_agg is for load-value-from-aggregate
+ jump function context. */
+ union jump_func_agg_value
+ {
+ tree GTY ((tag ("IPA_JF_CONST"))) constant;
+ struct ipa_pass_through_data GTY ((tag ("IPA_JF_PASS_THROUGH"))) pass_through;
+ struct ipa_load_agg_data GTY ((tag ("IPA_JF_LOAD_AGG"))) load_agg;
+ } GTY ((desc ("%1.jftype"))) value;
+};
+
+/* Jump functions describing a set of aggregate contents. */
+
+struct GTY(()) ipa_agg_jump_function
+{
+ /* Description of the individual jump function item. */
+ vec<ipa_agg_jf_item, va_gc> *items;
+ /* True if the data was passed by reference (as opposed to by value). */
+ bool by_ref;
+};
+
+class ipcp_transformation;
+class ipa_auto_call_arg_values;
+class ipa_call_arg_values;
+
+/* Element of a vector describing aggregate values for a number of arguments in
+ a particular context, be it a call or the aggregate constants that a node is
+ specialized for. */
+
+struct GTY(()) ipa_argagg_value
+{
+ /* The constant value. In the contexts where the list of known values is
+ being pruned, NULL means a variable value. */
+ tree value;
+ /* Unit offset within the aggregate. */
+ unsigned unit_offset;
+ /* Index of the parameter, as it was in the original function (i.e. needs
+ remapping after parameter modification is carried out as part of clone
+ materialization). */
+ unsigned index : IPA_PROP_ARG_INDEX_LIMIT_BITS;
+ /* Whether the value was passed by reference. */
+ unsigned by_ref : 1;
+};
+
+/* A view into a sorted list of aggregate values in a particular context, be it
+ a call or the aggregate constants that a node is specialized for. The
+ actual data is stored in the vector this has been constructed from. */
+
+class ipa_argagg_value_list
+{
+public:
+ ipa_argagg_value_list () = delete;
+ ipa_argagg_value_list (const vec<ipa_argagg_value, va_gc> *values)
+ : m_elts (values)
+ {}
+ ipa_argagg_value_list (const vec<ipa_argagg_value> *values)
+ : m_elts (*values)
+ {}
+ ipa_argagg_value_list (const ipa_auto_call_arg_values *aavals);
+ ipa_argagg_value_list (const ipa_call_arg_values *gavals);
+ ipa_argagg_value_list (const ipcp_transformation *tinfo);
+
+ /* Return the aggregate constant stored for INDEX at UNIT_OFFSET, if it is
+ passed by reference or not according to BY_REF, or NULL_TREE
+ otherwise. */
+
+ tree get_value (int index, unsigned unit_offset, bool by_ref) const;
+
+ /* Return the aggregate constant stored for INDEX at UNIT_OFFSET, not
+ performing any check of whether value is passed by reference. Return
+ NULL_TREE if there is no such constant. */
+
+ tree get_value (int index, unsigned unit_offset) const;
+
+ /* Return the item describing a constant stored for INDEX at UNIT_OFFSET or
+ NULL if there is no such constant. */
+
+ const ipa_argagg_value *get_elt (int index, unsigned unit_offset) const;
+
+
+ /* Return the first item describing a constant stored for parameter with
+ INDEX, regardless of offset or reference, or NULL if there is no such
+ constant. */
+
+ const ipa_argagg_value *get_elt_for_index (int index) const;
+
+ /* Return true if there is an aggregate constant referring to a value passed
+ in or by parameter with INDEX (at any offset, whether by reference or
+ not). */
+
+ bool value_for_index_p (int index) const
+ {
+ return !!get_elt_for_index (index);
+ }
+
+ /* Return true if all elements present in OTHER are also present in this
+ list. */
+
+ bool superset_of_p (const ipa_argagg_value_list &other) const;
+
+ /* Push all items in this list that describe parameter SRC_INDEX into RES as
+ ones describing DST_INDEX while subtracting UNIT_DELTA from their unit
+ offsets but skip those which would end up with a negative offset. */
+
+ void push_adjusted_values (unsigned src_index, unsigned dest_index,
+ unsigned unit_delta,
+ vec<ipa_argagg_value> *res) const;
+
+ /* Dump aggregate constants to FILE. */
+
+ void dump (FILE *f);
+
+ /* Dump aggregate constants to stderr. */
+
+ void DEBUG_FUNCTION debug ();
+
+ /* Array slice pointing to the actual storage. */
+
+ array_slice<const ipa_argagg_value> m_elts;
+};
+
+/* Information about zero/non-zero bits. */
+class GTY(()) ipa_bits
+{
+public:
+ /* The propagated value. */
+ widest_int value;
+ /* Mask corresponding to the value.
+ Similar to ccp_lattice_t, if xth bit of mask is 0,
+ implies xth bit of value is constant. */
+ widest_int mask;
+};
+
+/* Info about value ranges. */
+
+class GTY(()) ipa_vr
+{
+public:
+ /* The data fields below are valid only if known is true. */
+ bool known;
+ enum value_range_kind type;
+ wide_int min;
+ wide_int max;
+ bool nonzero_p (tree) const;
+};
+
+/* A jump function for a callsite represents the values passed as actual
+ arguments of the callsite. See enum jump_func_type for the various
+ types of jump functions supported. */
+struct GTY (()) ipa_jump_func
+{
+ /* Aggregate jump function description. See struct ipa_agg_jump_function
+ and its description. */
+ struct ipa_agg_jump_function agg;
+
+ /* Information about zero/non-zero bits. The pointed to structure is shared
+ betweed different jump functions. Use ipa_set_jfunc_bits to set this
+ field. */
+ class ipa_bits *bits;
+
+ /* Information about value range, containing valid data only when vr_known is
+ true. The pointed to structure is shared betweed different jump
+ functions. Use ipa_set_jfunc_vr to set this field. */
+ value_range *m_vr;
+
+ enum jump_func_type type;
+ /* Represents a value of a jump function. pass_through is used only in jump
+ function context. constant represents the actual constant in constant jump
+ functions and member_cst holds constant c++ member functions. */
+ union jump_func_value
+ {
+ struct ipa_constant_data GTY ((tag ("IPA_JF_CONST"))) constant;
+ struct ipa_pass_through_data GTY ((tag ("IPA_JF_PASS_THROUGH"))) pass_through;
+ struct ipa_ancestor_jf_data GTY ((tag ("IPA_JF_ANCESTOR"))) ancestor;
+ } GTY ((desc ("%1.type"))) value;
+};
+
+
+/* Return the constant stored in a constant jump functin JFUNC. */
+
+inline tree
+ipa_get_jf_constant (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_CONST);
+ return jfunc->value.constant.value;
+}
+
+inline struct ipa_cst_ref_desc *
+ipa_get_jf_constant_rdesc (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_CONST);
+ return jfunc->value.constant.rdesc;
+}
+
+/* Make JFUNC not participate in any further reference counting. */
+
+inline void
+ipa_zap_jf_refdesc (ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_CONST);
+ jfunc->value.constant.rdesc = NULL;
+}
+
+/* Return the operand of a pass through jmp function JFUNC. */
+
+inline tree
+ipa_get_jf_pass_through_operand (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ return jfunc->value.pass_through.operand;
+}
+
+/* Return the number of the caller's formal parameter that a pass through jump
+ function JFUNC refers to. */
+
+inline int
+ipa_get_jf_pass_through_formal_id (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ return jfunc->value.pass_through.formal_id;
+}
+
+/* Return operation of a pass through jump function JFUNC. */
+
+inline enum tree_code
+ipa_get_jf_pass_through_operation (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ return jfunc->value.pass_through.operation;
+}
+
+/* Return the agg_preserved flag of a pass through jump function JFUNC. */
+
+inline bool
+ipa_get_jf_pass_through_agg_preserved (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ return jfunc->value.pass_through.agg_preserved;
+}
+
+/* Return the refdesc_decremented flag of a pass through jump function
+ JFUNC. */
+
+inline bool
+ipa_get_jf_pass_through_refdesc_decremented (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ return jfunc->value.pass_through.refdesc_decremented;
+}
+
+/* Set the refdesc_decremented flag of a pass through jump function JFUNC to
+ VALUE. */
+
+inline void
+ipa_set_jf_pass_through_refdesc_decremented (ipa_jump_func *jfunc, bool value)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ jfunc->value.pass_through.refdesc_decremented = value;
+}
+
+/* Return true if pass through jump function JFUNC preserves type
+ information. */
+
+inline bool
+ipa_get_jf_pass_through_type_preserved (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
+ return jfunc->value.pass_through.agg_preserved;
+}
+
+/* Return the offset of an ancestor jump function JFUNC. */
+
+inline HOST_WIDE_INT
+ipa_get_jf_ancestor_offset (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
+ return jfunc->value.ancestor.offset;
+}
+
+/* Return the number of the caller's formal parameter that an ancestor jump
+ function JFUNC refers to. */
+
+inline int
+ipa_get_jf_ancestor_formal_id (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
+ return jfunc->value.ancestor.formal_id;
+}
+
+/* Return the agg_preserved flag of an ancestor jump function JFUNC. */
+
+inline bool
+ipa_get_jf_ancestor_agg_preserved (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
+ return jfunc->value.ancestor.agg_preserved;
+}
+
+/* Return true if ancestor jump function JFUNC presrves type information. */
+
+inline bool
+ipa_get_jf_ancestor_type_preserved (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
+ return jfunc->value.ancestor.agg_preserved;
+}
+
+/* Return if jfunc represents an operation whether we first check the formal
+ parameter for non-NULLness unless it does not matter because the offset is
+ zero anyway. */
+
+inline bool
+ipa_get_jf_ancestor_keep_null (struct ipa_jump_func *jfunc)
+{
+ gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
+ return jfunc->value.ancestor.keep_null;
+}
+
+/* Class for allocating a bundle of various potentially known properties about
+ actual arguments of a particular call on stack for the usual case and on
+ heap only if there are unusually many arguments. The data is deallocated
+ when the instance of this class goes out of scope or is otherwise
+ destructed. */
+
+class ipa_auto_call_arg_values
+{
+public:
+ /* If m_known_vals (vector of known "scalar" values) is sufficiantly long,
+ return its element at INDEX, otherwise return NULL. */
+ tree safe_sval_at (int index)
+ {
+ if ((unsigned) index < m_known_vals.length ())
+ return m_known_vals[index];
+ return NULL;
+ }
+
+ /* Vector describing known values of parameters. */
+ auto_vec<tree, 32> m_known_vals;
+
+ /* Vector describing known polymorphic call contexts. */
+ auto_vec<ipa_polymorphic_call_context, 32> m_known_contexts;
+
+ /* Vector describing known aggregate values. */
+ auto_vec<ipa_argagg_value, 32> m_known_aggs;
+
+ /* Vector describing known value ranges of arguments. */
+ auto_vec<value_range, 32> m_known_value_ranges;
+};
+
+inline
+ipa_argagg_value_list
+::ipa_argagg_value_list (const ipa_auto_call_arg_values *aavals)
+ : m_elts (aavals->m_known_aggs)
+{}
+
+/* Class bundling the various potentially known properties about actual
+ arguments of a particular call. This variant does not deallocate the
+ bundled data in any way as the vectors can either be pointing to vectors in
+ ipa_auto_call_arg_values or be allocated independently. */
+
+class ipa_call_arg_values
+{
+public:
+ /* Default constructor, setting the vectors to empty ones. */
+ ipa_call_arg_values ()
+ {}
+
+ /* Construct this general variant of the bundle from the variant which uses
+ auto_vecs to hold the vectors. This means that vectors of objects
+ constructed with this constructor should not be changed because if they
+ get reallocated, the member vectors and the underlying auto_vecs would get
+ out of sync. */
+ ipa_call_arg_values (ipa_auto_call_arg_values *aavals)
+ : m_known_vals (aavals->m_known_vals.to_vec_legacy ()),
+ m_known_contexts (aavals->m_known_contexts.to_vec_legacy ()),
+ m_known_aggs (aavals->m_known_aggs.to_vec_legacy ()),
+ m_known_value_ranges (aavals->m_known_value_ranges.to_vec_legacy ())
+ {}
+
+ /* If m_known_vals (vector of known "scalar" values) is sufficiantly long,
+ return its element at INDEX, otherwise return NULL. */
+ tree safe_sval_at (int index)
+ {
+ if ((unsigned) index < m_known_vals.length ())
+ return m_known_vals[index];
+ return NULL;
+ }
+
+ /* Vector describing known values of parameters. */
+ vec<tree> m_known_vals = vNULL;
+
+ /* Vector describing known polymorphic call contexts. */
+ vec<ipa_polymorphic_call_context> m_known_contexts = vNULL;
+
+ /* Vector describing known aggregate values. */
+ vec<ipa_argagg_value> m_known_aggs = vNULL;
+
+ /* Vector describing known value ranges of arguments. */
+ vec<value_range> m_known_value_ranges = vNULL;
+};
+
+inline
+ipa_argagg_value_list
+::ipa_argagg_value_list (const ipa_call_arg_values *gavals)
+ : m_elts (gavals->m_known_aggs)
+{}
+
+/* Summary describing a single formal parameter. */
+
+struct GTY(()) ipa_param_descriptor
+{
+ /* In analysis and modification phase, this is the PARAM_DECL of this
+ parameter, in IPA LTO phase, this is the type of the described
+ parameter or NULL if not known. Do not read this field directly but
+ through ipa_get_param and ipa_get_type as appropriate. */
+ tree decl_or_type;
+ /* If all uses of the parameter are described by ipa-prop structures, this
+ says how many there are. If any use could not be described by means of
+ ipa-prop structures (which include flag dereferenced below), this is
+ IPA_UNDESCRIBED_USE. */
+ int controlled_uses;
+ unsigned int move_cost : 27;
+ /* The parameter is used. */
+ unsigned used : 1;
+ unsigned used_by_ipa_predicates : 1;
+ unsigned used_by_indirect_call : 1;
+ unsigned used_by_polymorphic_call : 1;
+ /* Set to true when in addition to being used in call statements, the
+ parameter has also been used for loads (but not for writes, does not
+ escape, etc.). This allows us to identify parameters p which are only
+ used as *p, and so when we propagate a constant to them, we can generate a
+ LOAD and not ADDR reference to them. */
+ unsigned load_dereferenced : 1;
+};
+
+/* ipa_node_params stores information related to formal parameters of functions
+ and some other information for interprocedural passes that operate on
+ parameters (such as ipa-cp). */
+
+class GTY((for_user)) ipa_node_params
+{
+public:
+ /* Default constructor. */
+ ipa_node_params ();
+
+ /* Default destructor. */
+ ~ipa_node_params ();
+
+ /* Information about individual formal parameters that are gathered when
+ summaries are generated. */
+ vec<ipa_param_descriptor, va_gc> *descriptors;
+ /* Pointer to an array of structures describing individual formal
+ parameters. */
+ class ipcp_param_lattices * GTY((skip)) lattices;
+ /* Only for versioned nodes this field would not be NULL,
+ it points to the node that IPA cp cloned from. */
+ struct cgraph_node * GTY((skip)) ipcp_orig_node;
+ /* If this node is an ipa-cp clone, these are the known constants that
+ describe what it has been specialized for. */
+ vec<tree> GTY((skip)) known_csts;
+ /* If this node is an ipa-cp clone, these are the known polymorphic contexts
+ that describe what it has been specialized for. */
+ vec<ipa_polymorphic_call_context> GTY((skip)) known_contexts;
+ /* Whether the param uses analysis and jump function computation has already
+ been performed. */
+ unsigned analysis_done : 1;
+ /* Whether the function is enqueued in ipa-cp propagation stack. */
+ unsigned node_enqueued : 1;
+ /* Whether we should create a specialized version based on values that are
+ known to be constant in all contexts. */
+ unsigned do_clone_for_all_contexts : 1;
+ /* Set if this is an IPA-CP clone for all contexts. */
+ unsigned is_all_contexts_clone : 1;
+ /* Node has been completely replaced by clones and will be removed after
+ ipa-cp is finished. */
+ unsigned node_dead : 1;
+ /* Node is involved in a recursion, potentionally indirect. */
+ unsigned node_within_scc : 1;
+ /* Node contains only direct recursion. */
+ unsigned node_is_self_scc : 1;
+ /* Node is calling a private function called only once. */
+ unsigned node_calling_single_call : 1;
+ /* False when there is something makes versioning impossible. */
+ unsigned versionable : 1;
+};
+
+inline
+ipa_node_params::ipa_node_params ()
+: descriptors (NULL), lattices (NULL), ipcp_orig_node (NULL),
+ known_csts (vNULL), known_contexts (vNULL), analysis_done (0),
+ node_enqueued (0), do_clone_for_all_contexts (0), is_all_contexts_clone (0),
+ node_dead (0), node_within_scc (0), node_is_self_scc (0),
+ node_calling_single_call (0), versionable (0)
+{
+}
+
+inline
+ipa_node_params::~ipa_node_params ()
+{
+ free (lattices);
+ vec_free (descriptors);
+ known_csts.release ();
+ known_contexts.release ();
+}
+
+/* Intermediate information that we get from alias analysis about a particular
+ parameter in a particular basic_block. When a parameter or the memory it
+ references is marked modified, we use that information in all dominated
+ blocks without consulting alias analysis oracle. */
+
+struct ipa_param_aa_status
+{
+ /* Set when this structure contains meaningful information. If not, the
+ structure describing a dominating BB should be used instead. */
+ bool valid;
+
+ /* Whether we have seen something which might have modified the data in
+ question. PARM is for the parameter itself, REF is for data it points to
+ but using the alias type of individual accesses and PT is the same thing
+ but for computing aggregate pass-through functions using a very inclusive
+ ao_ref. */
+ bool parm_modified, ref_modified, pt_modified;
+};
+
+/* Information related to a given BB that used only when looking at function
+ body. */
+
+struct ipa_bb_info
+{
+ /* Call graph edges going out of this BB. */
+ vec<cgraph_edge *> cg_edges;
+ /* Alias analysis statuses of each formal parameter at this bb. */
+ vec<ipa_param_aa_status> param_aa_statuses;
+};
+
+/* Structure with global information that is only used when looking at function
+ body. */
+
+struct ipa_func_body_info
+{
+ /* The node that is being analyzed. */
+ cgraph_node *node;
+
+ /* Its info. */
+ class ipa_node_params *info;
+
+ /* Information about individual BBs. */
+ vec<ipa_bb_info> bb_infos;
+
+ /* Number of parameters. */
+ int param_count;
+
+ /* Number of statements we are still allowed to walked by when analyzing this
+ function. */
+ unsigned int aa_walk_budget;
+};
+
+/* ipa_node_params access functions. Please use these to access fields that
+ are or will be shared among various passes. */
+
+/* Return the number of formal parameters. */
+
+inline int
+ipa_get_param_count (class ipa_node_params *info)
+{
+ return vec_safe_length (info->descriptors);
+}
+
+/* Return the parameter declaration in DESCRIPTORS at index I and assert it is
+ indeed a PARM_DECL. */
+
+inline tree
+ipa_get_param (const vec<ipa_param_descriptor, va_gc> &descriptors, int i)
+{
+ tree t = descriptors[i].decl_or_type;
+ gcc_checking_assert (TREE_CODE (t) == PARM_DECL);
+ return t;
+}
+
+/* Return the declaration of Ith formal parameter of the function corresponding
+ to INFO. Note there is no setter function as this array is built just once
+ using ipa_initialize_node_params. This function should not be called in
+ WPA. */
+
+inline tree
+ipa_get_param (class ipa_node_params *info, int i)
+{
+ gcc_checking_assert (info->descriptors);
+ return ipa_get_param (*info->descriptors, i);
+}
+
+/* Return the type of Ith formal parameter of the function corresponding
+ to INFO if it is known or NULL if not. */
+
+inline tree
+ipa_get_type (class ipa_node_params *info, int i)
+{
+ if (vec_safe_length (info->descriptors) <= (unsigned) i)
+ return NULL;
+ tree t = (*info->descriptors)[i].decl_or_type;
+ if (!t)
+ return NULL;
+ if (TYPE_P (t))
+ return t;
+ gcc_checking_assert (TREE_CODE (t) == PARM_DECL);
+ return TREE_TYPE (t);
+}
+
+/* Return the move cost of Ith formal parameter of the function corresponding
+ to INFO. */
+
+inline int
+ipa_get_param_move_cost (class ipa_node_params *info, int i)
+{
+ gcc_checking_assert (info->descriptors);
+ return (*info->descriptors)[i].move_cost;
+}
+
+/* Set the used flag corresponding to the Ith formal parameter of the function
+ associated with INFO to VAL. */
+
+inline void
+ipa_set_param_used (class ipa_node_params *info, int i, bool val)
+{
+ gcc_checking_assert (info->descriptors);
+ (*info->descriptors)[i].used = val;
+}
+
+/* Set the used_by_ipa_predicates flag corresponding to the Ith formal
+ parameter of the function associated with INFO to VAL. */
+
+inline void
+ipa_set_param_used_by_ipa_predicates (class ipa_node_params *info, int i, bool val)
+{
+ gcc_checking_assert (info->descriptors);
+ (*info->descriptors)[i].used_by_ipa_predicates = val;
+}
+
+/* Set the used_by_indirect_call flag corresponding to the Ith formal
+ parameter of the function associated with INFO to VAL. */
+
+inline void
+ipa_set_param_used_by_indirect_call (class ipa_node_params *info, int i, bool val)
+{
+ gcc_checking_assert (info->descriptors);
+ (*info->descriptors)[i].used_by_indirect_call = val;
+}
+
+/* Set the .used_by_polymorphic_call flag corresponding to the Ith formal
+ parameter of the function associated with INFO to VAL. */
+
+inline void
+ipa_set_param_used_by_polymorphic_call (class ipa_node_params *info, int i, bool val)
+{
+ gcc_checking_assert (info->descriptors);
+ (*info->descriptors)[i].used_by_polymorphic_call = val;
+}
+
+/* Return how many uses described by ipa-prop a parameter has or
+ IPA_UNDESCRIBED_USE if there is a use that is not described by these
+ structures. */
+inline int
+ipa_get_controlled_uses (class ipa_node_params *info, int i)
+{
+ /* FIXME: introducing speculation causes out of bounds access here. */
+ if (vec_safe_length (info->descriptors) > (unsigned)i)
+ return (*info->descriptors)[i].controlled_uses;
+ return IPA_UNDESCRIBED_USE;
+}
+
+/* Set the controlled counter of a given parameter. */
+
+inline void
+ipa_set_controlled_uses (class ipa_node_params *info, int i, int val)
+{
+ gcc_checking_assert (info->descriptors);
+ (*info->descriptors)[i].controlled_uses = val;
+}
+
+/* Assuming a parameter does not have IPA_UNDESCRIBED_USE controlled uses,
+ return flag which indicates it has been dereferenced but only in a load. */
+inline int
+ipa_get_param_load_dereferenced (class ipa_node_params *info, int i)
+{
+ gcc_assert (ipa_get_controlled_uses (info, i) != IPA_UNDESCRIBED_USE);
+ return (*info->descriptors)[i].load_dereferenced;
+}
+
+/* Set the load_dereferenced flag of a given parameter. */
+
+inline void
+ipa_set_param_load_dereferenced (class ipa_node_params *info, int i, bool val)
+{
+ gcc_checking_assert (info->descriptors);
+ (*info->descriptors)[i].load_dereferenced = val;
+}
+
+/* Return the used flag corresponding to the Ith formal parameter of the
+ function associated with INFO. */
+
+inline bool
+ipa_is_param_used (class ipa_node_params *info, int i)
+{
+ gcc_checking_assert (info->descriptors);
+ return (*info->descriptors)[i].used;
+}
+
+/* Return the used_by_ipa_predicates flag corresponding to the Ith formal
+ parameter of the function associated with INFO. */
+
+inline bool
+ipa_is_param_used_by_ipa_predicates (class ipa_node_params *info, int i)
+{
+ gcc_checking_assert (info->descriptors);
+ return (*info->descriptors)[i].used_by_ipa_predicates;
+}
+
+/* Return the used_by_indirect_call flag corresponding to the Ith formal
+ parameter of the function associated with INFO. */
+
+inline bool
+ipa_is_param_used_by_indirect_call (class ipa_node_params *info, int i)
+{
+ gcc_checking_assert (info->descriptors);
+ return (*info->descriptors)[i].used_by_indirect_call;
+}
+
+/* Return the used_by_polymorphic_call flag corresponding to the Ith formal
+ parameter of the function associated with INFO. */
+
+inline bool
+ipa_is_param_used_by_polymorphic_call (class ipa_node_params *info, int i)
+{
+ gcc_checking_assert (info->descriptors);
+ return (*info->descriptors)[i].used_by_polymorphic_call;
+}
+
+/* Structure holding information for the transformation phase of IPA-CP. */
+
+struct GTY(()) ipcp_transformation
+{
+ /* Known aggregate values. */
+ vec<ipa_argagg_value, va_gc> *m_agg_values;
+ /* Known bits information. */
+ vec<ipa_bits *, va_gc> *bits;
+ /* Value range information. */
+ vec<ipa_vr, va_gc> *m_vr;
+
+ /* Default constructor. */
+ ipcp_transformation ()
+ : m_agg_values (NULL), bits (NULL), m_vr (NULL)
+ { }
+
+ /* Default destructor. */
+ ~ipcp_transformation ()
+ {
+ vec_free (m_agg_values);
+ vec_free (bits);
+ vec_free (m_vr);
+ }
+};
+
+inline
+ipa_argagg_value_list::ipa_argagg_value_list (const ipcp_transformation *tinfo)
+ : m_elts (tinfo->m_agg_values)
+{}
+
+void ipa_set_node_agg_value_chain (struct cgraph_node *node,
+ vec<ipa_argagg_value, va_gc> *aggs);
+void ipcp_transformation_initialize (void);
+void ipcp_free_transformation_sum (void);
+
+/* ipa_edge_args stores information related to a callsite and particularly its
+ arguments. It can be accessed by the IPA_EDGE_REF macro. */
+
+class GTY((for_user)) ipa_edge_args
+{
+ public:
+
+ /* Default constructor. */
+ ipa_edge_args () : jump_functions (NULL), polymorphic_call_contexts (NULL)
+ {}
+
+ /* Destructor. */
+ ~ipa_edge_args ()
+ {
+ unsigned int i;
+ ipa_jump_func *jf;
+ FOR_EACH_VEC_SAFE_ELT (jump_functions, i, jf)
+ vec_free (jf->agg.items);
+ vec_free (jump_functions);
+ vec_free (polymorphic_call_contexts);
+ }
+
+ /* Vectors of the callsite's jump function and polymorphic context
+ information of each parameter. */
+ vec<ipa_jump_func, va_gc> *jump_functions;
+ vec<ipa_polymorphic_call_context, va_gc> *polymorphic_call_contexts;
+};
+
+/* ipa_edge_args access functions. Please use these to access fields that
+ are or will be shared among various passes. */
+
+/* Return the number of actual arguments. */
+
+inline int
+ipa_get_cs_argument_count (class ipa_edge_args *args)
+{
+ return vec_safe_length (args->jump_functions);
+}
+
+/* Returns a pointer to the jump function for the ith argument. Please note
+ there is no setter function as jump functions are all set up in
+ ipa_compute_jump_functions. */
+
+inline struct ipa_jump_func *
+ipa_get_ith_jump_func (class ipa_edge_args *args, int i)
+{
+ return &(*args->jump_functions)[i];
+}
+
+/* Returns a pointer to the polymorphic call context for the ith argument.
+ NULL if contexts are not computed. */
+inline class ipa_polymorphic_call_context *
+ipa_get_ith_polymorhic_call_context (class ipa_edge_args *args, int i)
+{
+ if (!args->polymorphic_call_contexts)
+ return NULL;
+ return &(*args->polymorphic_call_contexts)[i];
+}
+
+/* Function summary for ipa_node_params. */
+class GTY((user)) ipa_node_params_t: public function_summary <ipa_node_params *>
+{
+public:
+ ipa_node_params_t (symbol_table *table, bool ggc):
+ function_summary<ipa_node_params *> (table, ggc)
+ {
+ disable_insertion_hook ();
+ }
+
+ /* Hook that is called by summary when a node is duplicated. */
+ void duplicate (cgraph_node *node,
+ cgraph_node *node2,
+ ipa_node_params *data,
+ ipa_node_params *data2) final override;
+};
+
+/* Summary to manange ipa_edge_args structures. */
+
+class GTY((user)) ipa_edge_args_sum_t : public call_summary <ipa_edge_args *>
+{
+ public:
+ ipa_edge_args_sum_t (symbol_table *table, bool ggc)
+ : call_summary<ipa_edge_args *> (table, ggc) { }
+
+ void remove (cgraph_edge *edge)
+ {
+ call_summary <ipa_edge_args *>::remove (edge);
+ }
+
+ /* Hook that is called by summary when an edge is removed. */
+ void remove (cgraph_edge *cs, ipa_edge_args *args) final override;
+ /* Hook that is called by summary when an edge is duplicated. */
+ void duplicate (cgraph_edge *src,
+ cgraph_edge *dst,
+ ipa_edge_args *old_args,
+ ipa_edge_args *new_args) final override;
+};
+
+/* Function summary where the parameter infos are actually stored. */
+extern GTY(()) ipa_node_params_t * ipa_node_params_sum;
+/* Call summary to store information about edges such as jump functions. */
+extern GTY(()) ipa_edge_args_sum_t *ipa_edge_args_sum;
+
+/* Function summary for IPA-CP transformation. */
+class ipcp_transformation_t
+: public function_summary<ipcp_transformation *>
+{
+public:
+ ipcp_transformation_t (symbol_table *table, bool ggc):
+ function_summary<ipcp_transformation *> (table, ggc) {}
+
+ ~ipcp_transformation_t () {}
+
+ static ipcp_transformation_t *create_ggc (symbol_table *symtab)
+ {
+ ipcp_transformation_t *summary
+ = new (ggc_alloc_no_dtor <ipcp_transformation_t> ())
+ ipcp_transformation_t (symtab, true);
+ return summary;
+ }
+ /* Hook that is called by summary when a node is duplicated. */
+ void duplicate (cgraph_node *node,
+ cgraph_node *node2,
+ ipcp_transformation *data,
+ ipcp_transformation *data2) final override;
+};
+
+/* Function summary where the IPA CP transformations are actually stored. */
+extern GTY(()) function_summary <ipcp_transformation *> *ipcp_transformation_sum;
+
+/* Creating and freeing ipa_node_params and ipa_edge_args. */
+void ipa_create_all_node_params (void);
+void ipa_create_all_edge_args (void);
+void ipa_check_create_edge_args (void);
+void ipa_free_all_node_params (void);
+void ipa_free_all_edge_args (void);
+void ipa_free_all_structures_after_ipa_cp (void);
+void ipa_free_all_structures_after_iinln (void);
+
+void ipa_register_cgraph_hooks (void);
+int count_formal_params (tree fndecl);
+
+/* This function ensures the array of node param infos is big enough to
+ accommodate a structure for all nodes and reallocates it if not. */
+
+inline void
+ipa_check_create_node_params (void)
+{
+ if (!ipa_node_params_sum)
+ ipa_node_params_sum
+ = (new (ggc_alloc_no_dtor <ipa_node_params_t> ())
+ ipa_node_params_t (symtab, true));
+}
+
+/* Returns true if edge summary contains a record for EDGE. The main purpose
+ of this function is that debug dumping function can check info availability
+ without causing allocations. */
+
+inline bool
+ipa_edge_args_info_available_for_edge_p (struct cgraph_edge *edge)
+{
+ return ipa_edge_args_sum->exists (edge);
+}
+
+inline ipcp_transformation *
+ipcp_get_transformation_summary (cgraph_node *node)
+{
+ if (ipcp_transformation_sum == NULL)
+ return NULL;
+
+ return ipcp_transformation_sum->get (node);
+}
+
+/* Function formal parameters related computations. */
+void ipa_initialize_node_params (struct cgraph_node *node);
+bool ipa_propagate_indirect_call_infos (struct cgraph_edge *cs,
+ vec<cgraph_edge *> *new_edges);
+
+/* Indirect edge processing and target discovery. */
+tree ipa_get_indirect_edge_target (struct cgraph_edge *ie,
+ ipa_call_arg_values *avals,
+ bool *speculative);
+struct cgraph_edge *ipa_make_edge_direct_to_target (struct cgraph_edge *, tree,
+ bool speculative = false);
+tree ipa_impossible_devirt_target (struct cgraph_edge *, tree);
+ipa_bits *ipa_get_ipa_bits_for_value (const widest_int &value,
+ const widest_int &mask);
+
+
+/* Functions related to both. */
+void ipa_analyze_node (struct cgraph_node *);
+
+/* Aggregate jump function related functions. */
+tree ipa_find_agg_cst_from_init (tree scalar, HOST_WIDE_INT offset,
+ bool by_ref);
+bool ipa_load_from_parm_agg (struct ipa_func_body_info *fbi,
+ vec<ipa_param_descriptor, va_gc> *descriptors,
+ gimple *stmt, tree op, int *index_p,
+ HOST_WIDE_INT *offset_p, poly_int64 *size_p,
+ bool *by_ref, bool *guaranteed_unmodified = NULL);
+
+/* Debugging interface. */
+void ipa_print_node_params (FILE *, struct cgraph_node *node);
+void ipa_print_all_params (FILE *);
+void ipa_print_node_jump_functions (FILE *f, struct cgraph_node *node);
+void ipa_print_all_jump_functions (FILE * f);
+void ipcp_verify_propagated_values (void);
+
+template <typename value>
+class ipcp_value;
+
+extern object_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
+extern object_allocator<ipcp_value<ipa_polymorphic_call_context> >
+ ipcp_poly_ctx_values_pool;
+
+template <typename valtype>
+struct ipcp_value_source;
+
+extern object_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
+
+struct ipcp_agg_lattice;
+
+extern object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
+
+void ipa_prop_write_jump_functions (void);
+void ipa_prop_read_jump_functions (void);
+void ipcp_write_transformation_summaries (void);
+void ipcp_read_transformation_summaries (void);
+int ipa_get_param_decl_index (class ipa_node_params *, tree);
+tree ipa_value_from_jfunc (class ipa_node_params *info,
+ struct ipa_jump_func *jfunc, tree type);
+tree ipa_agg_value_from_jfunc (ipa_node_params *info, cgraph_node *node,
+ const ipa_agg_jf_item *item);
+unsigned int ipcp_transform_function (struct cgraph_node *node);
+ipa_polymorphic_call_context ipa_context_from_jfunc (ipa_node_params *,
+ cgraph_edge *,
+ int,
+ ipa_jump_func *);
+value_range ipa_value_range_from_jfunc (ipa_node_params *, cgraph_edge *,
+ ipa_jump_func *, tree);
+void ipa_push_agg_values_from_jfunc (ipa_node_params *info, cgraph_node *node,
+ ipa_agg_jump_function *agg_jfunc,
+ unsigned dst_index,
+ vec<ipa_argagg_value> *res);
+void ipa_dump_param (FILE *, class ipa_node_params *info, int i);
+void ipa_release_body_info (struct ipa_func_body_info *);
+tree ipa_get_callee_param_type (struct cgraph_edge *e, int i);
+bool ipcp_get_parm_bits (tree, tree *, widest_int *);
+bool unadjusted_ptr_and_unit_offset (tree op, tree *ret,
+ poly_int64 *offset_ret);
+
+/* From tree-sra.cc: */
+tree build_ref_for_offset (location_t, tree, poly_int64, bool, tree,
+ gimple_stmt_iterator *, bool);
+
+/* In ipa-cp.cc */
+void ipa_cp_cc_finalize (void);
+
+#endif /* IPA_PROP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-ref.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-ref.h
new file mode 100644
index 0000000..c071010
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-ref.h
@@ -0,0 +1,139 @@
+/* IPA reference lists.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IPA_REF_H
+#define GCC_IPA_REF_H
+
+struct cgraph_node;
+struct varpool_node;
+struct symtab_node;
+
+
+/* How the reference is done. */
+enum ipa_ref_use
+{
+ IPA_REF_LOAD,
+ IPA_REF_STORE,
+ IPA_REF_ADDR,
+ IPA_REF_ALIAS
+};
+
+/* Record of reference in callgraph or varpool. */
+struct ipa_ref
+{
+public:
+ /* Remove reference. */
+ void remove_reference ();
+
+ /* Return true when execution of reference can lead to return from
+ function. */
+ bool cannot_lead_to_return ();
+
+ /* Return true if reference may be used in address compare. */
+ bool address_matters_p ();
+
+ /* Return reference list this reference is in. */
+ struct ipa_ref_list * referring_ref_list (void);
+
+ /* Return reference list this reference is in. */
+ struct ipa_ref_list * referred_ref_list (void);
+
+ symtab_node *referring;
+ symtab_node *referred;
+ gimple *stmt;
+ unsigned int lto_stmt_uid;
+ unsigned int referred_index;
+ /* speculative id is used to link direct calls with their corresponding
+ IPA_REF_ADDR references when representing speculative calls. */
+ unsigned int speculative_id : 16;
+ ENUM_BITFIELD (ipa_ref_use) use:3;
+ unsigned int speculative:1;
+};
+
+typedef struct ipa_ref ipa_ref_t;
+
+
+/* List of references. This is stored in both callgraph and varpool nodes. */
+struct ipa_ref_list
+{
+public:
+ /* Return first reference in list or NULL if empty. */
+ struct ipa_ref *first_reference (void)
+ {
+ if (!references.length ())
+ return NULL;
+ return &references[0];
+ }
+
+ /* Return first referring ref in list or NULL if empty. */
+ struct ipa_ref *first_referring (void)
+ {
+ if (!referring.length ())
+ return NULL;
+ return referring[0];
+ }
+
+ /* Return first referring alias. */
+ struct ipa_ref *first_alias (void)
+ {
+ struct ipa_ref *r = first_referring ();
+
+ return r && r->use == IPA_REF_ALIAS ? r : NULL;
+ }
+
+ /* Return last referring alias. */
+ struct ipa_ref *last_alias (void)
+ {
+ unsigned int i = 0;
+
+ for(i = 0; i < referring.length (); i++)
+ if (referring[i]->use != IPA_REF_ALIAS)
+ break;
+
+ return i == 0 ? NULL : referring[i - 1];
+ }
+
+ /* Return true if the symbol has an alias. */
+ bool inline has_aliases_p (void)
+ {
+ return first_alias ();
+ }
+
+ /* Clear reference list. */
+ void clear (void)
+ {
+ referring.create (0);
+ references.create (0);
+ }
+
+ /* Return number of references. */
+ unsigned int nreferences (void)
+ {
+ return references.length ();
+ }
+
+ /* Store actual references in references vector. */
+ vec<ipa_ref_t, va_heap, vl_ptr> references;
+ /* Referring is vector of pointers to references. It must not live in GGC space
+ or GGC will try to mark middle of references vectors. */
+ vec<ipa_ref_t *, va_heap, vl_ptr> referring;
+};
+
+#endif /* GCC_IPA_REF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-reference.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-reference.h
new file mode 100644
index 0000000..8d87245
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-reference.h
@@ -0,0 +1,31 @@
+/* IPA handling of references.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IPA_REFERENCE_H
+#define GCC_IPA_REFERENCE_H
+
+/* In ipa-reference.cc */
+bitmap ipa_reference_get_read_global (struct cgraph_node *fn);
+bitmap ipa_reference_get_written_global (struct cgraph_node *fn);
+void ipa_reference_cc_finalize (void);
+int ipa_reference_var_uid (tree t);
+
+#endif /* GCC_IPA_REFERENCE_H */
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-utils.h
new file mode 100644
index 0000000..0eefcf4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ipa-utils.h
@@ -0,0 +1,286 @@
+/* Utilities for ipa analysis.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IPA_UTILS_H
+#define GCC_IPA_UTILS_H
+
+struct ipa_dfs_info {
+ int dfn_number;
+ int low_link;
+ /* This field will have the samy value for any two nodes in the same strongly
+ connected component. */
+ int scc_no;
+ bool new_node;
+ bool on_stack;
+ struct cgraph_node* next_cycle;
+ void *aux;
+};
+
+
+/* In ipa-utils.cc */
+void ipa_print_order (FILE*, const char *, struct cgraph_node**, int);
+int ipa_reduced_postorder (struct cgraph_node **, bool,
+ bool (*ignore_edge) (struct cgraph_edge *));
+void ipa_free_postorder_info (void);
+vec<cgraph_node *> ipa_get_nodes_in_cycle (struct cgraph_node *);
+bool ipa_edge_within_scc (struct cgraph_edge *);
+int ipa_reverse_postorder (struct cgraph_node **);
+tree get_base_var (tree);
+void ipa_merge_profiles (struct cgraph_node *dst,
+ struct cgraph_node *src, bool preserve_body = false);
+bool recursive_call_p (tree, tree);
+bool stmt_may_terminate_function_p (function *fun, gimple *stmt, bool assume_return_or_eh);
+bitmap find_always_executed_bbs (function *fun, bool assume_return_or_eh);
+
+/* In ipa-pure-const.cc */
+bool finite_function_p ();
+bool builtin_safe_for_const_function_p (bool *, tree);
+bool ipa_make_function_const (cgraph_node *, bool, bool);
+bool ipa_make_function_pure (cgraph_node *, bool, bool);
+
+/* In ipa-profile.cc */
+bool ipa_propagate_frequency (struct cgraph_node *node);
+
+/* In ipa-devirt.cc */
+
+struct odr_type_d;
+typedef odr_type_d *odr_type;
+extern bool thunk_expansion;
+void build_type_inheritance_graph (void);
+void rebuild_type_inheritance_graph (void);
+void update_type_inheritance_graph (void);
+vec <cgraph_node *>
+possible_polymorphic_call_targets (tree, HOST_WIDE_INT,
+ ipa_polymorphic_call_context,
+ bool *copletep = NULL,
+ void **cache_token = NULL,
+ bool speuclative = false);
+odr_type get_odr_type (tree, bool insert = false);
+bool odr_type_p (const_tree);
+bool possible_polymorphic_call_target_p (tree ref, gimple *stmt, struct cgraph_node *n);
+void dump_possible_polymorphic_call_targets (FILE *, tree, HOST_WIDE_INT,
+ const ipa_polymorphic_call_context &,
+ bool verbose = true);
+bool possible_polymorphic_call_target_p (tree, HOST_WIDE_INT,
+ const ipa_polymorphic_call_context &,
+ struct cgraph_node *);
+tree polymorphic_ctor_dtor_p (tree, bool);
+tree inlined_polymorphic_ctor_dtor_block_p (tree, bool);
+bool decl_maybe_in_construction_p (tree, tree, gimple *, tree);
+tree vtable_pointer_value_to_binfo (const_tree);
+bool vtable_pointer_value_to_vtable (const_tree, tree *, unsigned HOST_WIDE_INT *);
+tree subbinfo_with_vtable_at_offset (tree, unsigned HOST_WIDE_INT, tree);
+void compare_virtual_tables (varpool_node *, varpool_node *);
+bool type_all_derivations_known_p (const_tree);
+bool type_known_to_have_no_derivations_p (tree);
+bool contains_polymorphic_type_p (const_tree);
+void register_odr_type (tree);
+bool types_must_be_same_for_odr (tree, tree);
+bool types_odr_comparable (tree, tree);
+cgraph_node *try_speculative_devirtualization (tree, HOST_WIDE_INT,
+ ipa_polymorphic_call_context);
+void warn_types_mismatch (tree t1, tree t2, location_t loc1 = UNKNOWN_LOCATION,
+ location_t loc2 = UNKNOWN_LOCATION);
+bool odr_or_derived_type_p (const_tree t);
+bool odr_types_equivalent_p (tree type1, tree type2);
+bool odr_type_violation_reported_p (tree type);
+tree prevailing_odr_type (tree type);
+void enable_odr_based_tbaa (tree type);
+bool odr_based_tbaa_p (const_tree type);
+void set_type_canonical_for_odr_type (tree type, tree canonical);
+
+void register_odr_enum (tree type);
+
+/* Return vector containing possible targets of polymorphic call E.
+ If COMPLETEP is non-NULL, store true if the list is complete.
+ CACHE_TOKEN (if non-NULL) will get stored to an unique ID of entry
+ in the target cache. If user needs to visit every target list
+ just once, it can memoize them.
+
+ Returned vector is placed into cache. It is NOT caller's responsibility
+ to free it. The vector can be freed on cgraph_remove_node call if
+ the particular node is a virtual function present in the cache. */
+
+inline vec <cgraph_node *>
+possible_polymorphic_call_targets (struct cgraph_edge *e,
+ bool *completep = NULL,
+ void **cache_token = NULL,
+ bool speculative = false)
+{
+ ipa_polymorphic_call_context context(e);
+
+ return possible_polymorphic_call_targets (e->indirect_info->otr_type,
+ e->indirect_info->otr_token,
+ context,
+ completep, cache_token,
+ speculative);
+}
+
+/* Same as above but taking OBJ_TYPE_REF as an parameter. */
+
+inline vec <cgraph_node *>
+possible_polymorphic_call_targets (tree ref,
+ gimple *call,
+ bool *completep = NULL,
+ void **cache_token = NULL)
+{
+ ipa_polymorphic_call_context context (current_function_decl, ref, call);
+
+ return possible_polymorphic_call_targets (obj_type_ref_class (ref),
+ tree_to_uhwi
+ (OBJ_TYPE_REF_TOKEN (ref)),
+ context,
+ completep, cache_token);
+}
+
+/* Dump possible targets of a polymorphic call E into F. */
+
+inline void
+dump_possible_polymorphic_call_targets (FILE *f, struct cgraph_edge *e,
+ bool verbose = true)
+{
+ ipa_polymorphic_call_context context(e);
+
+ dump_possible_polymorphic_call_targets (f, e->indirect_info->otr_type,
+ e->indirect_info->otr_token,
+ context, verbose);
+}
+
+/* Return true if N can be possibly target of a polymorphic call of
+ E. */
+
+inline bool
+possible_polymorphic_call_target_p (struct cgraph_edge *e,
+ struct cgraph_node *n)
+{
+ ipa_polymorphic_call_context context(e);
+
+ return possible_polymorphic_call_target_p (e->indirect_info->otr_type,
+ e->indirect_info->otr_token,
+ context, n);
+}
+
+/* Return true if BINFO corresponds to a type with virtual methods.
+
+ Every type has several BINFOs. One is the BINFO associated by the type
+ while other represents bases of derived types. The BINFOs representing
+ bases do not have BINFO_VTABLE pointer set when this is the single
+ inheritance (because vtables are shared). Look up the BINFO of type
+ and check presence of its vtable. */
+
+inline bool
+polymorphic_type_binfo_p (const_tree binfo)
+{
+ return (BINFO_TYPE (binfo) && TYPE_BINFO (BINFO_TYPE (binfo))
+ && BINFO_VTABLE (TYPE_BINFO (BINFO_TYPE (binfo))));
+}
+
+/* Return true if T is a type with linkage defined. */
+
+inline bool
+type_with_linkage_p (const_tree t)
+{
+ gcc_checking_assert (TYPE_MAIN_VARIANT (t) == t);
+ if (!TYPE_NAME (t) || TREE_CODE (TYPE_NAME (t)) != TYPE_DECL)
+ return false;
+
+ /* After free_lang_data was run we can recongize
+ types with linkage by presence of mangled name. */
+ if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t)))
+ return true;
+
+ if (in_lto_p)
+ return false;
+
+ /* We used to check for TYPE_STUB_DECL but that is set to NULL for forward
+ declarations. */
+
+ if (!RECORD_OR_UNION_TYPE_P (t) && TREE_CODE (t) != ENUMERAL_TYPE)
+ return false;
+
+ /* Builtin types do not define linkage, their TYPE_CONTEXT is NULL. */
+ if (!TYPE_CONTEXT (t))
+ return false;
+
+ return true;
+}
+
+/* Return true if T is in anonymous namespace.
+ This works only on those C++ types with linkage defined. */
+
+inline bool
+type_in_anonymous_namespace_p (const_tree t)
+{
+ gcc_checking_assert (type_with_linkage_p (t));
+
+ /* free_lang_data clears TYPE_STUB_DECL but sets assembler name to
+ "<anon>" */
+ if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t)))
+ return !strcmp ("<anon>",
+ IDENTIFIER_POINTER
+ (DECL_ASSEMBLER_NAME (TYPE_NAME (t))));
+ else if (!TYPE_STUB_DECL (t))
+ return false;
+ else
+ return !TREE_PUBLIC (TYPE_STUB_DECL (t));
+}
+
+/* Return true of T is type with One Definition Rule info attached.
+ It means that either it is anonymous type or it has assembler name
+ set. */
+
+inline bool
+odr_type_p (const_tree t)
+{
+ /* We do not have this information when not in LTO, but we do not need
+ to care, since it is used only for type merging. */
+ gcc_checking_assert (in_lto_p || flag_lto || flag_generate_offload);
+ return TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL
+ && DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t));
+}
+
+/* If TYPE has mangled ODR name, return it. Otherwise return NULL.
+ The function works only when free_lang_data is run. */
+
+inline const char *
+get_odr_name_for_type (tree type)
+{
+ tree type_name = TYPE_NAME (type);
+ if (type_name == NULL_TREE
+ || TREE_CODE (type_name) != TYPE_DECL
+ || !DECL_ASSEMBLER_NAME_SET_P (type_name))
+ return NULL;
+
+ return IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (type_name));
+}
+
+/* Return true if we are going to do LTO streaming. */
+
+inline bool
+lto_streaming_expected_p ()
+{
+ /* Compilation before LTO stremaing. */
+ if (flag_lto && !in_lto_p && symtab->state < IPA_SSA_AFTER_INLINING)
+ return true;
+ /* WPA or incremental link. */
+ return (flag_wpa || flag_incremental_link == INCREMENTAL_LINK_LTO);
+}
+
+#endif /* GCC_IPA_UTILS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira-int.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira-int.h
new file mode 100644
index 0000000..e2de472
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira-int.h
@@ -0,0 +1,1711 @@
+/* Integrated Register Allocator (IRA) intercommunication header file.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+ Contributed by Vladimir Makarov <vmakarov@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IRA_INT_H
+#define GCC_IRA_INT_H
+
+#include "recog.h"
+#include "function-abi.h"
+
+/* To provide consistency in naming, all IRA external variables,
+ functions, common typedefs start with prefix ira_. */
+
+#if CHECKING_P
+#define ENABLE_IRA_CHECKING
+#endif
+
+#ifdef ENABLE_IRA_CHECKING
+#define ira_assert(c) gcc_assert (c)
+#else
+/* Always define and include C, so that warnings for empty body in an
+ 'if' statement and unused variable do not occur. */
+#define ira_assert(c) ((void)(0 && (c)))
+#endif
+
+/* Compute register frequency from edge frequency FREQ. It is
+ analogous to REG_FREQ_FROM_BB. When optimizing for size, or
+ profile driven feedback is available and the function is never
+ executed, frequency is always equivalent. Otherwise rescale the
+ edge frequency. */
+#define REG_FREQ_FROM_EDGE_FREQ(freq) \
+ (optimize_function_for_size_p (cfun) \
+ ? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
+ ? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
+
+/* A modified value of flag `-fira-verbose' used internally. */
+extern int internal_flag_ira_verbose;
+
+/* Dump file of the allocator if it is not NULL. */
+extern FILE *ira_dump_file;
+
+/* Typedefs for pointers to allocno live range, allocno, and copy of
+ allocnos. */
+typedef struct live_range *live_range_t;
+typedef struct ira_allocno *ira_allocno_t;
+typedef struct ira_allocno_pref *ira_pref_t;
+typedef struct ira_allocno_copy *ira_copy_t;
+typedef struct ira_object *ira_object_t;
+
+/* Definition of vector of allocnos and copies. */
+
+/* Typedef for pointer to the subsequent structure. */
+typedef struct ira_loop_tree_node *ira_loop_tree_node_t;
+
+typedef unsigned short move_table[N_REG_CLASSES];
+
+/* In general case, IRA is a regional allocator. The regions are
+ nested and form a tree. Currently regions are natural loops. The
+ following structure describes loop tree node (representing basic
+ block or loop). We need such tree because the loop tree from
+ cfgloop.h is not convenient for the optimization: basic blocks are
+ not a part of the tree from cfgloop.h. We also use the nodes for
+ storing additional information about basic blocks/loops for the
+ register allocation purposes. */
+struct ira_loop_tree_node
+{
+ /* The node represents basic block if children == NULL. */
+ basic_block bb; /* NULL for loop. */
+ /* NULL for BB or for loop tree root if we did not build CFG loop tree. */
+ class loop *loop;
+ /* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
+ SUBLOOP_NEXT is always NULL for BBs. */
+ ira_loop_tree_node_t subloop_next, next;
+ /* CHILDREN/SUBLOOPS is the first node/loop-node immediately inside
+ the node. They are NULL for BBs. */
+ ira_loop_tree_node_t subloops, children;
+ /* The node immediately containing given node. */
+ ira_loop_tree_node_t parent;
+
+ /* Loop level in range [0, ira_loop_tree_height). */
+ int level;
+
+ /* All the following members are defined only for nodes representing
+ loops. */
+
+ /* The loop number from CFG loop tree. The root number is 0. */
+ int loop_num;
+
+ /* True if the loop was marked for removal from the register
+ allocation. */
+ bool to_remove_p;
+
+ /* Allocnos in the loop corresponding to their regnos. If it is
+ NULL the loop does not form a separate register allocation region
+ (e.g. because it has abnormal enter/exit edges and we cannot put
+ code for register shuffling on the edges if a different
+ allocation is used for a pseudo-register on different sides of
+ the edges). Caps are not in the map (remember we can have more
+ one cap with the same regno in a region). */
+ ira_allocno_t *regno_allocno_map;
+
+ /* True if there is an entry to given loop not from its parent (or
+ grandparent) basic block. For example, it is possible for two
+ adjacent loops inside another loop. */
+ bool entered_from_non_parent_p;
+
+ /* Maximal register pressure inside loop for given register class
+ (defined only for the pressure classes). */
+ int reg_pressure[N_REG_CLASSES];
+
+ /* Numbers of allocnos referred or living in the loop node (except
+ for its subloops). */
+ bitmap all_allocnos;
+
+ /* Numbers of allocnos living at the loop borders. */
+ bitmap border_allocnos;
+
+ /* Regnos of pseudos modified in the loop node (including its
+ subloops). */
+ bitmap modified_regnos;
+
+ /* Numbers of copies referred in the corresponding loop. */
+ bitmap local_copies;
+};
+
+/* The root of the loop tree corresponding to the all function. */
+extern ira_loop_tree_node_t ira_loop_tree_root;
+
+/* Height of the loop tree. */
+extern int ira_loop_tree_height;
+
+/* All nodes representing basic blocks are referred through the
+ following array. We cannot use basic block member `aux' for this
+ because it is used for insertion of insns on edges. */
+extern ira_loop_tree_node_t ira_bb_nodes;
+
+/* Two access macros to the nodes representing basic blocks. */
+#if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
+#define IRA_BB_NODE_BY_INDEX(index) __extension__ \
+(({ ira_loop_tree_node_t _node = (&ira_bb_nodes[index]); \
+ if (_node->children != NULL || _node->loop != NULL || _node->bb == NULL)\
+ { \
+ fprintf (stderr, \
+ "\n%s: %d: error in %s: it is not a block node\n", \
+ __FILE__, __LINE__, __FUNCTION__); \
+ gcc_unreachable (); \
+ } \
+ _node; }))
+#else
+#define IRA_BB_NODE_BY_INDEX(index) (&ira_bb_nodes[index])
+#endif
+
+#define IRA_BB_NODE(bb) IRA_BB_NODE_BY_INDEX ((bb)->index)
+
+/* All nodes representing loops are referred through the following
+ array. */
+extern ira_loop_tree_node_t ira_loop_nodes;
+
+/* Two access macros to the nodes representing loops. */
+#if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
+#define IRA_LOOP_NODE_BY_INDEX(index) __extension__ \
+(({ ira_loop_tree_node_t const _node = (&ira_loop_nodes[index]); \
+ if (_node->children == NULL || _node->bb != NULL \
+ || (_node->loop == NULL && current_loops != NULL)) \
+ { \
+ fprintf (stderr, \
+ "\n%s: %d: error in %s: it is not a loop node\n", \
+ __FILE__, __LINE__, __FUNCTION__); \
+ gcc_unreachable (); \
+ } \
+ _node; }))
+#else
+#define IRA_LOOP_NODE_BY_INDEX(index) (&ira_loop_nodes[index])
+#endif
+
+#define IRA_LOOP_NODE(loop) IRA_LOOP_NODE_BY_INDEX ((loop)->num)
+
+
+/* The structure describes program points where a given allocno lives.
+ If the live ranges of two allocnos are intersected, the allocnos
+ are in conflict. */
+struct live_range
+{
+ /* Object whose live range is described by given structure. */
+ ira_object_t object;
+ /* Program point range. */
+ int start, finish;
+ /* Next structure describing program points where the allocno
+ lives. */
+ live_range_t next;
+ /* Pointer to structures with the same start/finish. */
+ live_range_t start_next, finish_next;
+};
+
+/* Program points are enumerated by numbers from range
+ 0..IRA_MAX_POINT-1. There are approximately two times more program
+ points than insns. Program points are places in the program where
+ liveness info can be changed. In most general case (there are more
+ complicated cases too) some program points correspond to places
+ where input operand dies and other ones correspond to places where
+ output operands are born. */
+extern int ira_max_point;
+
+/* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
+ live ranges with given start/finish point. */
+extern live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
+
+/* A structure representing conflict information for an allocno
+ (or one of its subwords). */
+struct ira_object
+{
+ /* The allocno associated with this record. */
+ ira_allocno_t allocno;
+ /* Vector of accumulated conflicting conflict_redords with NULL end
+ marker (if OBJECT_CONFLICT_VEC_P is true) or conflict bit vector
+ otherwise. */
+ void *conflicts_array;
+ /* Pointer to structures describing at what program point the
+ object lives. We always maintain the list in such way that *the
+ ranges in the list are not intersected and ordered by decreasing
+ their program points*. */
+ live_range_t live_ranges;
+ /* The subword within ALLOCNO which is represented by this object.
+ Zero means the lowest-order subword (or the entire allocno in case
+ it is not being tracked in subwords). */
+ int subword;
+ /* Allocated size of the conflicts array. */
+ unsigned int conflicts_array_size;
+ /* A unique number for every instance of this structure, which is used
+ to represent it in conflict bit vectors. */
+ int id;
+ /* Before building conflicts, MIN and MAX are initialized to
+ correspondingly minimal and maximal points of the accumulated
+ live ranges. Afterwards, they hold the minimal and maximal ids
+ of other ira_objects that this one can conflict with. */
+ int min, max;
+ /* Initial and accumulated hard registers conflicting with this
+ object and as a consequences cannot be assigned to the allocno.
+ All non-allocatable hard regs and hard regs of register classes
+ different from given allocno one are included in the sets. */
+ HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
+ /* Number of accumulated conflicts in the vector of conflicting
+ objects. */
+ int num_accumulated_conflicts;
+ /* TRUE if conflicts are represented by a vector of pointers to
+ ira_object structures. Otherwise, we use a bit vector indexed
+ by conflict ID numbers. */
+ unsigned int conflict_vec_p : 1;
+};
+
+/* A structure representing an allocno (allocation entity). Allocno
+ represents a pseudo-register in an allocation region. If
+ pseudo-register does not live in a region but it lives in the
+ nested regions, it is represented in the region by special allocno
+ called *cap*. There may be more one cap representing the same
+ pseudo-register in region. It means that the corresponding
+ pseudo-register lives in more one non-intersected subregion. */
+struct ira_allocno
+{
+ /* The allocno order number starting with 0. Each allocno has an
+ unique number and the number is never changed for the
+ allocno. */
+ int num;
+ /* Regno for allocno or cap. */
+ int regno;
+ /* Mode of the allocno which is the mode of the corresponding
+ pseudo-register. */
+ ENUM_BITFIELD (machine_mode) mode : 8;
+ /* Widest mode of the allocno which in at least one case could be
+ for paradoxical subregs where wmode > mode. */
+ ENUM_BITFIELD (machine_mode) wmode : 8;
+ /* Register class which should be used for allocation for given
+ allocno. NO_REGS means that we should use memory. */
+ ENUM_BITFIELD (reg_class) aclass : 16;
+ /* A bitmask of the ABIs used by calls that occur while the allocno
+ is live. */
+ unsigned int crossed_calls_abis : NUM_ABI_IDS;
+ /* During the reload, value TRUE means that we should not reassign a
+ hard register to the allocno got memory earlier. It is set up
+ when we removed memory-memory move insn before each iteration of
+ the reload. */
+ unsigned int dont_reassign_p : 1;
+#ifdef STACK_REGS
+ /* Set to TRUE if allocno can't be assigned to the stack hard
+ register correspondingly in this region and area including the
+ region and all its subregions recursively. */
+ unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
+#endif
+ /* TRUE value means that there is no sense to spill the allocno
+ during coloring because the spill will result in additional
+ reloads in reload pass. */
+ unsigned int bad_spill_p : 1;
+ /* TRUE if a hard register or memory has been assigned to the
+ allocno. */
+ unsigned int assigned_p : 1;
+ /* TRUE if conflicts for given allocno are represented by vector of
+ pointers to the conflicting allocnos. Otherwise, we use a bit
+ vector where a bit with given index represents allocno with the
+ same number. */
+ unsigned int conflict_vec_p : 1;
+ /* True if the parent loop has an allocno for the same register and
+ if the parent allocno's assignment might not be valid in this loop.
+ This means that we cannot merge this allocno and the parent allocno
+ together.
+
+ This is only ever true for non-cap allocnos. */
+ unsigned int might_conflict_with_parent_p : 1;
+ /* Hard register assigned to given allocno. Negative value means
+ that memory was allocated to the allocno. During the reload,
+ spilled allocno has value equal to the corresponding stack slot
+ number (0, ...) - 2. Value -1 is used for allocnos spilled by the
+ reload (at this point pseudo-register has only one allocno) which
+ did not get stack slot yet. */
+ signed int hard_regno : 16;
+ /* Allocnos with the same regno are linked by the following member.
+ Allocnos corresponding to inner loops are first in the list (it
+ corresponds to depth-first traverse of the loops). */
+ ira_allocno_t next_regno_allocno;
+ /* There may be different allocnos with the same regno in different
+ regions. Allocnos are bound to the corresponding loop tree node.
+ Pseudo-register may have only one regular allocno with given loop
+ tree node but more than one cap (see comments above). */
+ ira_loop_tree_node_t loop_tree_node;
+ /* Accumulated usage references of the allocno. Here and below,
+ word 'accumulated' means info for given region and all nested
+ subregions. In this case, 'accumulated' means sum of references
+ of the corresponding pseudo-register in this region and in all
+ nested subregions recursively. */
+ int nrefs;
+ /* Accumulated frequency of usage of the allocno. */
+ int freq;
+ /* Minimal accumulated and updated costs of usage register of the
+ allocno class. */
+ int class_cost, updated_class_cost;
+ /* Minimal accumulated, and updated costs of memory for the allocno.
+ At the allocation start, the original and updated costs are
+ equal. The updated cost may be changed after finishing
+ allocation in a region and starting allocation in a subregion.
+ The change reflects the cost of spill/restore code on the
+ subregion border if we assign memory to the pseudo in the
+ subregion. */
+ int memory_cost, updated_memory_cost;
+ /* Accumulated number of points where the allocno lives and there is
+ excess pressure for its class. Excess pressure for a register
+ class at some point means that there are more allocnos of given
+ register class living at the point than number of hard-registers
+ of the class available for the allocation. */
+ int excess_pressure_points_num;
+ /* Allocno hard reg preferences. */
+ ira_pref_t allocno_prefs;
+ /* Copies to other non-conflicting allocnos. The copies can
+ represent move insn or potential move insn usually because of two
+ operand insn constraints. */
+ ira_copy_t allocno_copies;
+ /* It is a allocno (cap) representing given allocno on upper loop tree
+ level. */
+ ira_allocno_t cap;
+ /* It is a link to allocno (cap) on lower loop level represented by
+ given cap. Null if given allocno is not a cap. */
+ ira_allocno_t cap_member;
+ /* The number of objects tracked in the following array. */
+ int num_objects;
+ /* An array of structures describing conflict information and live
+ ranges for each object associated with the allocno. There may be
+ more than one such object in cases where the allocno represents a
+ multi-word register. */
+ ira_object_t objects[2];
+ /* Accumulated frequency of calls which given allocno
+ intersects. */
+ int call_freq;
+ /* Accumulated number of the intersected calls. */
+ int calls_crossed_num;
+ /* The number of calls across which it is live, but which should not
+ affect register preferences. */
+ int cheap_calls_crossed_num;
+ /* Registers clobbered by intersected calls. */
+ HARD_REG_SET crossed_calls_clobbered_regs;
+ /* Array of usage costs (accumulated and the one updated during
+ coloring) for each hard register of the allocno class. The
+ member value can be NULL if all costs are the same and equal to
+ CLASS_COST. For example, the costs of two different hard
+ registers can be different if one hard register is callee-saved
+ and another one is callee-used and the allocno lives through
+ calls. Another example can be case when for some insn the
+ corresponding pseudo-register value should be put in specific
+ register class (e.g. AREG for x86) which is a strict subset of
+ the allocno class (GENERAL_REGS for x86). We have updated costs
+ to reflect the situation when the usage cost of a hard register
+ is decreased because the allocno is connected to another allocno
+ by a copy and the another allocno has been assigned to the hard
+ register. */
+ int *hard_reg_costs, *updated_hard_reg_costs;
+ /* Array of decreasing costs (accumulated and the one updated during
+ coloring) for allocnos conflicting with given allocno for hard
+ regno of the allocno class. The member value can be NULL if all
+ costs are the same. These costs are used to reflect preferences
+ of other allocnos not assigned yet during assigning to given
+ allocno. */
+ int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
+ /* Different additional data. It is used to decrease size of
+ allocno data footprint. */
+ void *add_data;
+};
+
+
+/* All members of the allocno structures should be accessed only
+ through the following macros. */
+#define ALLOCNO_NUM(A) ((A)->num)
+#define ALLOCNO_REGNO(A) ((A)->regno)
+#define ALLOCNO_REG(A) ((A)->reg)
+#define ALLOCNO_NEXT_REGNO_ALLOCNO(A) ((A)->next_regno_allocno)
+#define ALLOCNO_LOOP_TREE_NODE(A) ((A)->loop_tree_node)
+#define ALLOCNO_CAP(A) ((A)->cap)
+#define ALLOCNO_CAP_MEMBER(A) ((A)->cap_member)
+#define ALLOCNO_NREFS(A) ((A)->nrefs)
+#define ALLOCNO_FREQ(A) ((A)->freq)
+#define ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P(A) \
+ ((A)->might_conflict_with_parent_p)
+#define ALLOCNO_HARD_REGNO(A) ((A)->hard_regno)
+#define ALLOCNO_CALL_FREQ(A) ((A)->call_freq)
+#define ALLOCNO_CALLS_CROSSED_NUM(A) ((A)->calls_crossed_num)
+#define ALLOCNO_CHEAP_CALLS_CROSSED_NUM(A) ((A)->cheap_calls_crossed_num)
+#define ALLOCNO_CROSSED_CALLS_ABIS(A) ((A)->crossed_calls_abis)
+#define ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS(A) \
+ ((A)->crossed_calls_clobbered_regs)
+#define ALLOCNO_MEM_OPTIMIZED_DEST(A) ((A)->mem_optimized_dest)
+#define ALLOCNO_MEM_OPTIMIZED_DEST_P(A) ((A)->mem_optimized_dest_p)
+#define ALLOCNO_SOMEWHERE_RENAMED_P(A) ((A)->somewhere_renamed_p)
+#define ALLOCNO_CHILD_RENAMED_P(A) ((A)->child_renamed_p)
+#define ALLOCNO_DONT_REASSIGN_P(A) ((A)->dont_reassign_p)
+#ifdef STACK_REGS
+#define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
+#define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
+#endif
+#define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
+#define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
+#define ALLOCNO_MODE(A) ((A)->mode)
+#define ALLOCNO_WMODE(A) ((A)->wmode)
+#define ALLOCNO_PREFS(A) ((A)->allocno_prefs)
+#define ALLOCNO_COPIES(A) ((A)->allocno_copies)
+#define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
+#define ALLOCNO_UPDATED_HARD_REG_COSTS(A) ((A)->updated_hard_reg_costs)
+#define ALLOCNO_CONFLICT_HARD_REG_COSTS(A) \
+ ((A)->conflict_hard_reg_costs)
+#define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
+ ((A)->updated_conflict_hard_reg_costs)
+#define ALLOCNO_CLASS(A) ((A)->aclass)
+#define ALLOCNO_CLASS_COST(A) ((A)->class_cost)
+#define ALLOCNO_UPDATED_CLASS_COST(A) ((A)->updated_class_cost)
+#define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
+#define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
+#define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) \
+ ((A)->excess_pressure_points_num)
+#define ALLOCNO_OBJECT(A,N) ((A)->objects[N])
+#define ALLOCNO_NUM_OBJECTS(A) ((A)->num_objects)
+#define ALLOCNO_ADD_DATA(A) ((A)->add_data)
+
+/* Typedef for pointer to the subsequent structure. */
+typedef struct ira_emit_data *ira_emit_data_t;
+
+/* Allocno bound data used for emit pseudo live range split insns and
+ to flattening IR. */
+struct ira_emit_data
+{
+ /* TRUE if the allocno assigned to memory was a destination of
+ removed move (see ira-emit.cc) at loop exit because the value of
+ the corresponding pseudo-register is not changed inside the
+ loop. */
+ unsigned int mem_optimized_dest_p : 1;
+ /* TRUE if the corresponding pseudo-register has disjoint live
+ ranges and the other allocnos of the pseudo-register except this
+ one changed REG. */
+ unsigned int somewhere_renamed_p : 1;
+ /* TRUE if allocno with the same REGNO in a subregion has been
+ renamed, in other words, got a new pseudo-register. */
+ unsigned int child_renamed_p : 1;
+ /* Final rtx representation of the allocno. */
+ rtx reg;
+ /* Non NULL if we remove restoring value from given allocno to
+ MEM_OPTIMIZED_DEST at loop exit (see ira-emit.cc) because the
+ allocno value is not changed inside the loop. */
+ ira_allocno_t mem_optimized_dest;
+};
+
+#define ALLOCNO_EMIT_DATA(a) ((ira_emit_data_t) ALLOCNO_ADD_DATA (a))
+
+/* Data used to emit live range split insns and to flattening IR. */
+extern ira_emit_data_t ira_allocno_emit_data;
+
+/* Abbreviation for frequent emit data access. */
+inline rtx
+allocno_emit_reg (ira_allocno_t a)
+{
+ return ALLOCNO_EMIT_DATA (a)->reg;
+}
+
+#define OBJECT_ALLOCNO(O) ((O)->allocno)
+#define OBJECT_SUBWORD(O) ((O)->subword)
+#define OBJECT_CONFLICT_ARRAY(O) ((O)->conflicts_array)
+#define OBJECT_CONFLICT_VEC(O) ((ira_object_t *)(O)->conflicts_array)
+#define OBJECT_CONFLICT_BITVEC(O) ((IRA_INT_TYPE *)(O)->conflicts_array)
+#define OBJECT_CONFLICT_ARRAY_SIZE(O) ((O)->conflicts_array_size)
+#define OBJECT_CONFLICT_VEC_P(O) ((O)->conflict_vec_p)
+#define OBJECT_NUM_CONFLICTS(O) ((O)->num_accumulated_conflicts)
+#define OBJECT_CONFLICT_HARD_REGS(O) ((O)->conflict_hard_regs)
+#define OBJECT_TOTAL_CONFLICT_HARD_REGS(O) ((O)->total_conflict_hard_regs)
+#define OBJECT_MIN(O) ((O)->min)
+#define OBJECT_MAX(O) ((O)->max)
+#define OBJECT_CONFLICT_ID(O) ((O)->id)
+#define OBJECT_LIVE_RANGES(O) ((O)->live_ranges)
+
+/* Map regno -> allocnos with given regno (see comments for
+ allocno member `next_regno_allocno'). */
+extern ira_allocno_t *ira_regno_allocno_map;
+
+/* Array of references to all allocnos. The order number of the
+ allocno corresponds to the index in the array. Removed allocnos
+ have NULL element value. */
+extern ira_allocno_t *ira_allocnos;
+
+/* The size of the previous array. */
+extern int ira_allocnos_num;
+
+/* Map a conflict id to its corresponding ira_object structure. */
+extern ira_object_t *ira_object_id_map;
+
+/* The size of the previous array. */
+extern int ira_objects_num;
+
+/* The following structure represents a hard register preference of
+ allocno. The preference represent move insns or potential move
+ insns usually because of two operand insn constraints. One move
+ operand is a hard register. */
+struct ira_allocno_pref
+{
+ /* The unique order number of the preference node starting with 0. */
+ int num;
+ /* Preferred hard register. */
+ int hard_regno;
+ /* Accumulated execution frequency of insns from which the
+ preference created. */
+ int freq;
+ /* Given allocno. */
+ ira_allocno_t allocno;
+ /* All preferences with the same allocno are linked by the following
+ member. */
+ ira_pref_t next_pref;
+};
+
+/* Array of references to all allocno preferences. The order number
+ of the preference corresponds to the index in the array. */
+extern ira_pref_t *ira_prefs;
+
+/* Size of the previous array. */
+extern int ira_prefs_num;
+
+/* The following structure represents a copy of two allocnos. The
+ copies represent move insns or potential move insns usually because
+ of two operand insn constraints. To remove register shuffle, we
+ also create copies between allocno which is output of an insn and
+ allocno becoming dead in the insn. */
+struct ira_allocno_copy
+{
+ /* The unique order number of the copy node starting with 0. */
+ int num;
+ /* Allocnos connected by the copy. The first allocno should have
+ smaller order number than the second one. */
+ ira_allocno_t first, second;
+ /* Execution frequency of the copy. */
+ int freq;
+ bool constraint_p;
+ /* It is a move insn which is an origin of the copy. The member
+ value for the copy representing two operand insn constraints or
+ for the copy created to remove register shuffle is NULL. In last
+ case the copy frequency is smaller than the corresponding insn
+ execution frequency. */
+ rtx_insn *insn;
+ /* All copies with the same allocno as FIRST are linked by the two
+ following members. */
+ ira_copy_t prev_first_allocno_copy, next_first_allocno_copy;
+ /* All copies with the same allocno as SECOND are linked by the two
+ following members. */
+ ira_copy_t prev_second_allocno_copy, next_second_allocno_copy;
+ /* Region from which given copy is originated. */
+ ira_loop_tree_node_t loop_tree_node;
+};
+
+/* Array of references to all copies. The order number of the copy
+ corresponds to the index in the array. Removed copies have NULL
+ element value. */
+extern ira_copy_t *ira_copies;
+
+/* Size of the previous array. */
+extern int ira_copies_num;
+
+/* The following structure describes a stack slot used for spilled
+ pseudo-registers. */
+class ira_spilled_reg_stack_slot
+{
+public:
+ /* pseudo-registers assigned to the stack slot. */
+ bitmap_head spilled_regs;
+ /* RTL representation of the stack slot. */
+ rtx mem;
+ /* Size of the stack slot. */
+ poly_uint64_pod width;
+};
+
+/* The number of elements in the following array. */
+extern int ira_spilled_reg_stack_slots_num;
+
+/* The following array contains info about spilled pseudo-registers
+ stack slots used in current function so far. */
+extern class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
+
+/* Correspondingly overall cost of the allocation, cost of the
+ allocnos assigned to hard-registers, cost of the allocnos assigned
+ to memory, cost of loads, stores and register move insns generated
+ for pseudo-register live range splitting (see ira-emit.cc). */
+extern int64_t ira_overall_cost;
+extern int64_t ira_reg_cost, ira_mem_cost;
+extern int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost;
+extern int ira_move_loops_num, ira_additional_jumps_num;
+
+
+/* This page contains a bitset implementation called 'min/max sets' used to
+ record conflicts in IRA.
+ They are named min/maxs set since we keep track of a minimum and a maximum
+ bit number for each set representing the bounds of valid elements. Otherwise,
+ the implementation resembles sbitmaps in that we store an array of integers
+ whose bits directly represent the members of the set. */
+
+/* The type used as elements in the array, and the number of bits in
+ this type. */
+
+#define IRA_INT_BITS HOST_BITS_PER_WIDE_INT
+#define IRA_INT_TYPE HOST_WIDE_INT
+
+/* Set, clear or test bit number I in R, a bit vector of elements with
+ minimal index and maximal index equal correspondingly to MIN and
+ MAX. */
+#if defined ENABLE_IRA_CHECKING && (GCC_VERSION >= 2007)
+
+#define SET_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
+ (({ int _min = (MIN), _max = (MAX), _i = (I); \
+ if (_i < _min || _i > _max) \
+ { \
+ fprintf (stderr, \
+ "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
+ __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
+ gcc_unreachable (); \
+ } \
+ ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
+ |= ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
+
+
+#define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
+ (({ int _min = (MIN), _max = (MAX), _i = (I); \
+ if (_i < _min || _i > _max) \
+ { \
+ fprintf (stderr, \
+ "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
+ __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
+ gcc_unreachable (); \
+ } \
+ ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
+ &= ~((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
+
+#define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) __extension__ \
+ (({ int _min = (MIN), _max = (MAX), _i = (I); \
+ if (_i < _min || _i > _max) \
+ { \
+ fprintf (stderr, \
+ "\n%s: %d: error in %s: %d not in range [%d,%d]\n", \
+ __FILE__, __LINE__, __FUNCTION__, _i, _min, _max); \
+ gcc_unreachable (); \
+ } \
+ ((R)[(unsigned) (_i - _min) / IRA_INT_BITS] \
+ & ((IRA_INT_TYPE) 1 << ((unsigned) (_i - _min) % IRA_INT_BITS))); }))
+
+#else
+
+#define SET_MINMAX_SET_BIT(R, I, MIN, MAX) \
+ ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
+ |= ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
+
+#define CLEAR_MINMAX_SET_BIT(R, I, MIN, MAX) \
+ ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
+ &= ~((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
+
+#define TEST_MINMAX_SET_BIT(R, I, MIN, MAX) \
+ ((R)[(unsigned) ((I) - (MIN)) / IRA_INT_BITS] \
+ & ((IRA_INT_TYPE) 1 << ((unsigned) ((I) - (MIN)) % IRA_INT_BITS)))
+
+#endif
+
+/* The iterator for min/max sets. */
+struct minmax_set_iterator {
+
+ /* Array containing the bit vector. */
+ IRA_INT_TYPE *vec;
+
+ /* The number of the current element in the vector. */
+ unsigned int word_num;
+
+ /* The number of bits in the bit vector. */
+ unsigned int nel;
+
+ /* The current bit index of the bit vector. */
+ unsigned int bit_num;
+
+ /* Index corresponding to the 1st bit of the bit vector. */
+ int start_val;
+
+ /* The word of the bit vector currently visited. */
+ unsigned IRA_INT_TYPE word;
+};
+
+/* Initialize the iterator I for bit vector VEC containing minimal and
+ maximal values MIN and MAX. */
+inline void
+minmax_set_iter_init (minmax_set_iterator *i, IRA_INT_TYPE *vec, int min,
+ int max)
+{
+ i->vec = vec;
+ i->word_num = 0;
+ i->nel = max < min ? 0 : max - min + 1;
+ i->start_val = min;
+ i->bit_num = 0;
+ i->word = i->nel == 0 ? 0 : vec[0];
+}
+
+/* Return TRUE if we have more allocnos to visit, in which case *N is
+ set to the number of the element to be visited. Otherwise, return
+ FALSE. */
+inline bool
+minmax_set_iter_cond (minmax_set_iterator *i, int *n)
+{
+ /* Skip words that are zeros. */
+ for (; i->word == 0; i->word = i->vec[i->word_num])
+ {
+ i->word_num++;
+ i->bit_num = i->word_num * IRA_INT_BITS;
+
+ /* If we have reached the end, break. */
+ if (i->bit_num >= i->nel)
+ return false;
+ }
+
+ /* Skip bits that are zero. */
+ int off = ctz_hwi (i->word);
+ i->bit_num += off;
+ i->word >>= off;
+
+ *n = (int) i->bit_num + i->start_val;
+
+ return true;
+}
+
+/* Advance to the next element in the set. */
+inline void
+minmax_set_iter_next (minmax_set_iterator *i)
+{
+ i->word >>= 1;
+ i->bit_num++;
+}
+
+/* Loop over all elements of a min/max set given by bit vector VEC and
+ their minimal and maximal values MIN and MAX. In each iteration, N
+ is set to the number of next allocno. ITER is an instance of
+ minmax_set_iterator used to iterate over the set. */
+#define FOR_EACH_BIT_IN_MINMAX_SET(VEC, MIN, MAX, N, ITER) \
+ for (minmax_set_iter_init (&(ITER), (VEC), (MIN), (MAX)); \
+ minmax_set_iter_cond (&(ITER), &(N)); \
+ minmax_set_iter_next (&(ITER)))
+
+class target_ira_int {
+public:
+ ~target_ira_int ();
+
+ void free_ira_costs ();
+ void free_register_move_costs ();
+
+ /* Initialized once. It is a maximal possible size of the allocated
+ struct costs. */
+ size_t x_max_struct_costs_size;
+
+ /* Allocated and initialized once, and used to initialize cost values
+ for each insn. */
+ struct costs *x_init_cost;
+
+ /* Allocated once, and used for temporary purposes. */
+ struct costs *x_temp_costs;
+
+ /* Allocated once, and used for the cost calculation. */
+ struct costs *x_op_costs[MAX_RECOG_OPERANDS];
+ struct costs *x_this_op_costs[MAX_RECOG_OPERANDS];
+
+ /* Hard registers that cannot be used for the register allocator for
+ all functions of the current compilation unit. */
+ HARD_REG_SET x_no_unit_alloc_regs;
+
+ /* Map: hard regs X modes -> set of hard registers for storing value
+ of given mode starting with given hard register. */
+ HARD_REG_SET (x_ira_reg_mode_hard_regset
+ [FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES]);
+
+ /* Maximum cost of moving from a register in one class to a register
+ in another class. Based on TARGET_REGISTER_MOVE_COST. */
+ move_table *x_ira_register_move_cost[MAX_MACHINE_MODE];
+
+ /* Similar, but here we don't have to move if the first index is a
+ subset of the second so in that case the cost is zero. */
+ move_table *x_ira_may_move_in_cost[MAX_MACHINE_MODE];
+
+ /* Similar, but here we don't have to move if the first index is a
+ superset of the second so in that case the cost is zero. */
+ move_table *x_ira_may_move_out_cost[MAX_MACHINE_MODE];
+
+ /* Keep track of the last mode we initialized move costs for. */
+ int x_last_mode_for_init_move_cost;
+
+ /* Array analog of the macro MEMORY_MOVE_COST but they contain maximal
+ cost not minimal. */
+ short int x_ira_max_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
+
+ /* Map class->true if class is a possible allocno class, false
+ otherwise. */
+ bool x_ira_reg_allocno_class_p[N_REG_CLASSES];
+
+ /* Map class->true if class is a pressure class, false otherwise. */
+ bool x_ira_reg_pressure_class_p[N_REG_CLASSES];
+
+ /* Array of the number of hard registers of given class which are
+ available for allocation. The order is defined by the hard
+ register numbers. */
+ short x_ira_non_ordered_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+
+ /* Index (in ira_class_hard_regs; for given register class and hard
+ register (in general case a hard register can belong to several
+ register classes;. The index is negative for hard registers
+ unavailable for the allocation. */
+ short x_ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+
+ /* Index [CL][M] contains R if R appears somewhere in a register of the form:
+
+ (reg:M R'), R' not in x_ira_prohibited_class_mode_regs[CL][M]
+
+ For example, if:
+
+ - (reg:M 2) is valid and occupies two registers;
+ - register 2 belongs to CL; and
+ - register 3 belongs to the same pressure class as CL
+
+ then (reg:M 2) contributes to [CL][M] and registers 2 and 3 will be
+ in the set. */
+ HARD_REG_SET x_ira_useful_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
+
+ /* The value is number of elements in the subsequent array. */
+ int x_ira_important_classes_num;
+
+ /* The array containing all non-empty classes. Such classes is
+ important for calculation of the hard register usage costs. */
+ enum reg_class x_ira_important_classes[N_REG_CLASSES];
+
+ /* The array containing indexes of important classes in the previous
+ array. The array elements are defined only for important
+ classes. */
+ int x_ira_important_class_nums[N_REG_CLASSES];
+
+ /* Map class->true if class is an uniform class, false otherwise. */
+ bool x_ira_uniform_class_p[N_REG_CLASSES];
+
+ /* The biggest important class inside of intersection of the two
+ classes (that is calculated taking only hard registers available
+ for allocation into account;. If the both classes contain no hard
+ registers available for allocation, the value is calculated with
+ taking all hard-registers including fixed ones into account. */
+ enum reg_class x_ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* Classes with end marker LIM_REG_CLASSES which are intersected with
+ given class (the first index). That includes given class itself.
+ This is calculated taking only hard registers available for
+ allocation into account. */
+ enum reg_class x_ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* The biggest (smallest) important class inside of (covering) union
+ of the two classes (that is calculated taking only hard registers
+ available for allocation into account). If the both classes
+ contain no hard registers available for allocation, the value is
+ calculated with taking all hard-registers including fixed ones
+ into account. In other words, the value is the corresponding
+ reg_class_subunion (reg_class_superunion) value. */
+ enum reg_class x_ira_reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
+ enum reg_class x_ira_reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* For each reg class, table listing all the classes contained in it
+ (excluding the class itself. Non-allocatable registers are
+ excluded from the consideration). */
+ enum reg_class x_alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* Array whose values are hard regset of hard registers for which
+ move of the hard register in given mode into itself is
+ prohibited. */
+ HARD_REG_SET x_ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
+
+ /* Flag of that the above array has been initialized. */
+ bool x_ira_prohibited_mode_move_regs_initialized_p;
+};
+
+extern class target_ira_int default_target_ira_int;
+#if SWITCHABLE_TARGET
+extern class target_ira_int *this_target_ira_int;
+#else
+#define this_target_ira_int (&default_target_ira_int)
+#endif
+
+#define ira_reg_mode_hard_regset \
+ (this_target_ira_int->x_ira_reg_mode_hard_regset)
+#define ira_register_move_cost \
+ (this_target_ira_int->x_ira_register_move_cost)
+#define ira_max_memory_move_cost \
+ (this_target_ira_int->x_ira_max_memory_move_cost)
+#define ira_may_move_in_cost \
+ (this_target_ira_int->x_ira_may_move_in_cost)
+#define ira_may_move_out_cost \
+ (this_target_ira_int->x_ira_may_move_out_cost)
+#define ira_reg_allocno_class_p \
+ (this_target_ira_int->x_ira_reg_allocno_class_p)
+#define ira_reg_pressure_class_p \
+ (this_target_ira_int->x_ira_reg_pressure_class_p)
+#define ira_non_ordered_class_hard_regs \
+ (this_target_ira_int->x_ira_non_ordered_class_hard_regs)
+#define ira_class_hard_reg_index \
+ (this_target_ira_int->x_ira_class_hard_reg_index)
+#define ira_useful_class_mode_regs \
+ (this_target_ira_int->x_ira_useful_class_mode_regs)
+#define ira_important_classes_num \
+ (this_target_ira_int->x_ira_important_classes_num)
+#define ira_important_classes \
+ (this_target_ira_int->x_ira_important_classes)
+#define ira_important_class_nums \
+ (this_target_ira_int->x_ira_important_class_nums)
+#define ira_uniform_class_p \
+ (this_target_ira_int->x_ira_uniform_class_p)
+#define ira_reg_class_intersect \
+ (this_target_ira_int->x_ira_reg_class_intersect)
+#define ira_reg_class_super_classes \
+ (this_target_ira_int->x_ira_reg_class_super_classes)
+#define ira_reg_class_subunion \
+ (this_target_ira_int->x_ira_reg_class_subunion)
+#define ira_reg_class_superunion \
+ (this_target_ira_int->x_ira_reg_class_superunion)
+#define ira_prohibited_mode_move_regs \
+ (this_target_ira_int->x_ira_prohibited_mode_move_regs)
+
+/* ira.cc: */
+
+extern void *ira_allocate (size_t);
+extern void ira_free (void *addr);
+extern bitmap ira_allocate_bitmap (void);
+extern void ira_free_bitmap (bitmap);
+extern void ira_print_disposition (FILE *);
+extern void ira_debug_disposition (void);
+extern void ira_debug_allocno_classes (void);
+extern void ira_init_register_move_cost (machine_mode);
+extern alternative_mask ira_setup_alts (rtx_insn *);
+extern int ira_get_dup_out_num (int, alternative_mask, bool &);
+
+/* ira-build.cc */
+
+/* The current loop tree node and its regno allocno map. */
+extern ira_loop_tree_node_t ira_curr_loop_tree_node;
+extern ira_allocno_t *ira_curr_regno_allocno_map;
+
+extern void ira_debug_pref (ira_pref_t);
+extern void ira_debug_prefs (void);
+extern void ira_debug_allocno_prefs (ira_allocno_t);
+
+extern void ira_debug_copy (ira_copy_t);
+extern void debug (ira_allocno_copy &ref);
+extern void debug (ira_allocno_copy *ptr);
+
+extern void ira_debug_copies (void);
+extern void ira_debug_allocno_copies (ira_allocno_t);
+extern void debug (ira_allocno &ref);
+extern void debug (ira_allocno *ptr);
+
+extern void ira_traverse_loop_tree (bool, ira_loop_tree_node_t,
+ void (*) (ira_loop_tree_node_t),
+ void (*) (ira_loop_tree_node_t));
+extern ira_allocno_t ira_parent_allocno (ira_allocno_t);
+extern ira_allocno_t ira_parent_or_cap_allocno (ira_allocno_t);
+extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
+extern void ira_create_allocno_objects (ira_allocno_t);
+extern void ira_set_allocno_class (ira_allocno_t, enum reg_class);
+extern bool ira_conflict_vector_profitable_p (ira_object_t, int);
+extern void ira_allocate_conflict_vec (ira_object_t, int);
+extern void ira_allocate_object_conflicts (ira_object_t, int);
+extern void ior_hard_reg_conflicts (ira_allocno_t, const_hard_reg_set);
+extern void ira_print_expanded_allocno (ira_allocno_t);
+extern void ira_add_live_range_to_object (ira_object_t, int, int);
+extern live_range_t ira_create_live_range (ira_object_t, int, int,
+ live_range_t);
+extern live_range_t ira_copy_live_range_list (live_range_t);
+extern live_range_t ira_merge_live_ranges (live_range_t, live_range_t);
+extern bool ira_live_ranges_intersect_p (live_range_t, live_range_t);
+extern void ira_finish_live_range (live_range_t);
+extern void ira_finish_live_range_list (live_range_t);
+extern void ira_free_allocno_updated_costs (ira_allocno_t);
+extern ira_pref_t ira_create_pref (ira_allocno_t, int, int);
+extern void ira_add_allocno_pref (ira_allocno_t, int, int);
+extern void ira_remove_pref (ira_pref_t);
+extern void ira_remove_allocno_prefs (ira_allocno_t);
+extern ira_copy_t ira_create_copy (ira_allocno_t, ira_allocno_t,
+ int, bool, rtx_insn *,
+ ira_loop_tree_node_t);
+extern ira_copy_t ira_add_allocno_copy (ira_allocno_t, ira_allocno_t, int,
+ bool, rtx_insn *,
+ ira_loop_tree_node_t);
+
+extern int *ira_allocate_cost_vector (reg_class_t);
+extern void ira_free_cost_vector (int *, reg_class_t);
+
+extern void ira_flattening (int, int);
+extern bool ira_build (void);
+extern void ira_destroy (void);
+
+/* ira-costs.cc */
+extern void ira_init_costs_once (void);
+extern void ira_init_costs (void);
+extern void ira_costs (void);
+extern void ira_tune_allocno_costs (void);
+
+/* ira-lives.cc */
+
+extern void ira_rebuild_start_finish_chains (void);
+extern void ira_print_live_range_list (FILE *, live_range_t);
+extern void debug (live_range &ref);
+extern void debug (live_range *ptr);
+extern void ira_debug_live_range_list (live_range_t);
+extern void ira_debug_allocno_live_ranges (ira_allocno_t);
+extern void ira_debug_live_ranges (void);
+extern void ira_create_allocno_live_ranges (void);
+extern void ira_compress_allocno_live_ranges (void);
+extern void ira_finish_allocno_live_ranges (void);
+extern void ira_implicitly_set_insn_hard_regs (HARD_REG_SET *,
+ alternative_mask);
+
+/* ira-conflicts.cc */
+extern void ira_debug_conflicts (bool);
+extern void ira_build_conflicts (void);
+
+/* ira-color.cc */
+extern ira_allocno_t ira_soft_conflict (ira_allocno_t, ira_allocno_t);
+extern void ira_debug_hard_regs_forest (void);
+extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
+extern void ira_reassign_conflict_allocnos (int);
+extern void ira_initiate_assign (void);
+extern void ira_finish_assign (void);
+extern void ira_color (void);
+
+/* ira-emit.cc */
+extern void ira_initiate_emit_data (void);
+extern void ira_finish_emit_data (void);
+extern void ira_emit (bool);
+
+
+
+/* Return true if equivalence of pseudo REGNO is not a lvalue. */
+inline bool
+ira_equiv_no_lvalue_p (int regno)
+{
+ if (regno >= ira_reg_equiv_len)
+ return false;
+ return (ira_reg_equiv[regno].constant != NULL_RTX
+ || ira_reg_equiv[regno].invariant != NULL_RTX
+ || (ira_reg_equiv[regno].memory != NULL_RTX
+ && MEM_READONLY_P (ira_reg_equiv[regno].memory)));
+}
+
+
+
+/* Initialize register costs for MODE if necessary. */
+inline void
+ira_init_register_move_cost_if_necessary (machine_mode mode)
+{
+ if (ira_register_move_cost[mode] == NULL)
+ ira_init_register_move_cost (mode);
+}
+
+
+
+/* The iterator for all allocnos. */
+struct ira_allocno_iterator {
+ /* The number of the current element in IRA_ALLOCNOS. */
+ int n;
+};
+
+/* Initialize the iterator I. */
+inline void
+ira_allocno_iter_init (ira_allocno_iterator *i)
+{
+ i->n = 0;
+}
+
+/* Return TRUE if we have more allocnos to visit, in which case *A is
+ set to the allocno to be visited. Otherwise, return FALSE. */
+inline bool
+ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
+{
+ int n;
+
+ for (n = i->n; n < ira_allocnos_num; n++)
+ if (ira_allocnos[n] != NULL)
+ {
+ *a = ira_allocnos[n];
+ i->n = n + 1;
+ return true;
+ }
+ return false;
+}
+
+/* Loop over all allocnos. In each iteration, A is set to the next
+ allocno. ITER is an instance of ira_allocno_iterator used to iterate
+ the allocnos. */
+#define FOR_EACH_ALLOCNO(A, ITER) \
+ for (ira_allocno_iter_init (&(ITER)); \
+ ira_allocno_iter_cond (&(ITER), &(A));)
+
+/* The iterator for all objects. */
+struct ira_object_iterator {
+ /* The number of the current element in ira_object_id_map. */
+ int n;
+};
+
+/* Initialize the iterator I. */
+inline void
+ira_object_iter_init (ira_object_iterator *i)
+{
+ i->n = 0;
+}
+
+/* Return TRUE if we have more objects to visit, in which case *OBJ is
+ set to the object to be visited. Otherwise, return FALSE. */
+inline bool
+ira_object_iter_cond (ira_object_iterator *i, ira_object_t *obj)
+{
+ int n;
+
+ for (n = i->n; n < ira_objects_num; n++)
+ if (ira_object_id_map[n] != NULL)
+ {
+ *obj = ira_object_id_map[n];
+ i->n = n + 1;
+ return true;
+ }
+ return false;
+}
+
+/* Loop over all objects. In each iteration, OBJ is set to the next
+ object. ITER is an instance of ira_object_iterator used to iterate
+ the objects. */
+#define FOR_EACH_OBJECT(OBJ, ITER) \
+ for (ira_object_iter_init (&(ITER)); \
+ ira_object_iter_cond (&(ITER), &(OBJ));)
+
+/* The iterator for objects associated with an allocno. */
+struct ira_allocno_object_iterator {
+ /* The number of the element the allocno's object array. */
+ int n;
+};
+
+/* Initialize the iterator I. */
+inline void
+ira_allocno_object_iter_init (ira_allocno_object_iterator *i)
+{
+ i->n = 0;
+}
+
+/* Return TRUE if we have more objects to visit in allocno A, in which
+ case *O is set to the object to be visited. Otherwise, return
+ FALSE. */
+inline bool
+ira_allocno_object_iter_cond (ira_allocno_object_iterator *i, ira_allocno_t a,
+ ira_object_t *o)
+{
+ int n = i->n++;
+ if (n < ALLOCNO_NUM_OBJECTS (a))
+ {
+ *o = ALLOCNO_OBJECT (a, n);
+ return true;
+ }
+ return false;
+}
+
+/* Loop over all objects associated with allocno A. In each
+ iteration, O is set to the next object. ITER is an instance of
+ ira_allocno_object_iterator used to iterate the conflicts. */
+#define FOR_EACH_ALLOCNO_OBJECT(A, O, ITER) \
+ for (ira_allocno_object_iter_init (&(ITER)); \
+ ira_allocno_object_iter_cond (&(ITER), (A), &(O));)
+
+
+/* The iterator for prefs. */
+struct ira_pref_iterator {
+ /* The number of the current element in IRA_PREFS. */
+ int n;
+};
+
+/* Initialize the iterator I. */
+inline void
+ira_pref_iter_init (ira_pref_iterator *i)
+{
+ i->n = 0;
+}
+
+/* Return TRUE if we have more prefs to visit, in which case *PREF is
+ set to the pref to be visited. Otherwise, return FALSE. */
+inline bool
+ira_pref_iter_cond (ira_pref_iterator *i, ira_pref_t *pref)
+{
+ int n;
+
+ for (n = i->n; n < ira_prefs_num; n++)
+ if (ira_prefs[n] != NULL)
+ {
+ *pref = ira_prefs[n];
+ i->n = n + 1;
+ return true;
+ }
+ return false;
+}
+
+/* Loop over all prefs. In each iteration, P is set to the next
+ pref. ITER is an instance of ira_pref_iterator used to iterate
+ the prefs. */
+#define FOR_EACH_PREF(P, ITER) \
+ for (ira_pref_iter_init (&(ITER)); \
+ ira_pref_iter_cond (&(ITER), &(P));)
+
+
+/* The iterator for copies. */
+struct ira_copy_iterator {
+ /* The number of the current element in IRA_COPIES. */
+ int n;
+};
+
+/* Initialize the iterator I. */
+inline void
+ira_copy_iter_init (ira_copy_iterator *i)
+{
+ i->n = 0;
+}
+
+/* Return TRUE if we have more copies to visit, in which case *CP is
+ set to the copy to be visited. Otherwise, return FALSE. */
+inline bool
+ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
+{
+ int n;
+
+ for (n = i->n; n < ira_copies_num; n++)
+ if (ira_copies[n] != NULL)
+ {
+ *cp = ira_copies[n];
+ i->n = n + 1;
+ return true;
+ }
+ return false;
+}
+
+/* Loop over all copies. In each iteration, C is set to the next
+ copy. ITER is an instance of ira_copy_iterator used to iterate
+ the copies. */
+#define FOR_EACH_COPY(C, ITER) \
+ for (ira_copy_iter_init (&(ITER)); \
+ ira_copy_iter_cond (&(ITER), &(C));)
+
+/* The iterator for object conflicts. */
+struct ira_object_conflict_iterator {
+
+ /* TRUE if the conflicts are represented by vector of allocnos. */
+ bool conflict_vec_p;
+
+ /* The conflict vector or conflict bit vector. */
+ void *vec;
+
+ /* The number of the current element in the vector (of type
+ ira_object_t or IRA_INT_TYPE). */
+ unsigned int word_num;
+
+ /* The bit vector size. It is defined only if
+ OBJECT_CONFLICT_VEC_P is FALSE. */
+ unsigned int size;
+
+ /* The current bit index of bit vector. It is defined only if
+ OBJECT_CONFLICT_VEC_P is FALSE. */
+ unsigned int bit_num;
+
+ /* The object id corresponding to the 1st bit of the bit vector. It
+ is defined only if OBJECT_CONFLICT_VEC_P is FALSE. */
+ int base_conflict_id;
+
+ /* The word of bit vector currently visited. It is defined only if
+ OBJECT_CONFLICT_VEC_P is FALSE. */
+ unsigned IRA_INT_TYPE word;
+};
+
+/* Initialize the iterator I with ALLOCNO conflicts. */
+inline void
+ira_object_conflict_iter_init (ira_object_conflict_iterator *i,
+ ira_object_t obj)
+{
+ i->conflict_vec_p = OBJECT_CONFLICT_VEC_P (obj);
+ i->vec = OBJECT_CONFLICT_ARRAY (obj);
+ i->word_num = 0;
+ if (i->conflict_vec_p)
+ i->size = i->bit_num = i->base_conflict_id = i->word = 0;
+ else
+ {
+ if (OBJECT_MIN (obj) > OBJECT_MAX (obj))
+ i->size = 0;
+ else
+ i->size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj)
+ + IRA_INT_BITS)
+ / IRA_INT_BITS) * sizeof (IRA_INT_TYPE);
+ i->bit_num = 0;
+ i->base_conflict_id = OBJECT_MIN (obj);
+ i->word = (i->size == 0 ? 0 : ((IRA_INT_TYPE *) i->vec)[0]);
+ }
+}
+
+/* Return TRUE if we have more conflicting allocnos to visit, in which
+ case *A is set to the allocno to be visited. Otherwise, return
+ FALSE. */
+inline bool
+ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
+ ira_object_t *pobj)
+{
+ ira_object_t obj;
+
+ if (i->conflict_vec_p)
+ {
+ obj = ((ira_object_t *) i->vec)[i->word_num++];
+ if (obj == NULL)
+ return false;
+ }
+ else
+ {
+ unsigned IRA_INT_TYPE word = i->word;
+ unsigned int bit_num = i->bit_num;
+
+ /* Skip words that are zeros. */
+ for (; word == 0; word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
+ {
+ i->word_num++;
+
+ /* If we have reached the end, break. */
+ if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
+ return false;
+
+ bit_num = i->word_num * IRA_INT_BITS;
+ }
+
+ /* Skip bits that are zero. */
+ int off = ctz_hwi (word);
+ bit_num += off;
+ word >>= off;
+
+ obj = ira_object_id_map[bit_num + i->base_conflict_id];
+ i->bit_num = bit_num + 1;
+ i->word = word >> 1;
+ }
+
+ *pobj = obj;
+ return true;
+}
+
+/* Loop over all objects conflicting with OBJ. In each iteration,
+ CONF is set to the next conflicting object. ITER is an instance
+ of ira_object_conflict_iterator used to iterate the conflicts. */
+#define FOR_EACH_OBJECT_CONFLICT(OBJ, CONF, ITER) \
+ for (ira_object_conflict_iter_init (&(ITER), (OBJ)); \
+ ira_object_conflict_iter_cond (&(ITER), &(CONF));)
+
+
+
+/* The function returns TRUE if at least one hard register from ones
+ starting with HARD_REGNO and containing value of MODE are in set
+ HARD_REGSET. */
+inline bool
+ira_hard_reg_set_intersection_p (int hard_regno, machine_mode mode,
+ HARD_REG_SET hard_regset)
+{
+ int i;
+
+ gcc_assert (hard_regno >= 0);
+ for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
+ if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
+ return true;
+ return false;
+}
+
+/* Return number of hard registers in hard register SET. */
+inline int
+hard_reg_set_size (HARD_REG_SET set)
+{
+ int i, size;
+
+ for (size = i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (set, i))
+ size++;
+ return size;
+}
+
+/* The function returns TRUE if hard registers starting with
+ HARD_REGNO and containing value of MODE are fully in set
+ HARD_REGSET. */
+inline bool
+ira_hard_reg_in_set_p (int hard_regno, machine_mode mode,
+ HARD_REG_SET hard_regset)
+{
+ int i;
+
+ ira_assert (hard_regno >= 0);
+ for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--)
+ if (!TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
+ return false;
+ return true;
+}
+
+
+
+/* To save memory we use a lazy approach for allocation and
+ initialization of the cost vectors. We do this only when it is
+ really necessary. */
+
+/* Allocate cost vector *VEC for hard registers of ACLASS and
+ initialize the elements by VAL if it is necessary */
+inline void
+ira_allocate_and_set_costs (int **vec, reg_class_t aclass, int val)
+{
+ int i, *reg_costs;
+ int len;
+
+ if (*vec != NULL)
+ return;
+ *vec = reg_costs = ira_allocate_cost_vector (aclass);
+ len = ira_class_hard_regs_num[(int) aclass];
+ for (i = 0; i < len; i++)
+ reg_costs[i] = val;
+}
+
+/* Allocate cost vector *VEC for hard registers of ACLASS and copy
+ values of vector SRC into the vector if it is necessary */
+inline void
+ira_allocate_and_copy_costs (int **vec, enum reg_class aclass, int *src)
+{
+ int len;
+
+ if (*vec != NULL || src == NULL)
+ return;
+ *vec = ira_allocate_cost_vector (aclass);
+ len = ira_class_hard_regs_num[aclass];
+ memcpy (*vec, src, sizeof (int) * len);
+}
+
+/* Allocate cost vector *VEC for hard registers of ACLASS and add
+ values of vector SRC into the vector if it is necessary */
+inline void
+ira_allocate_and_accumulate_costs (int **vec, enum reg_class aclass, int *src)
+{
+ int i, len;
+
+ if (src == NULL)
+ return;
+ len = ira_class_hard_regs_num[aclass];
+ if (*vec == NULL)
+ {
+ *vec = ira_allocate_cost_vector (aclass);
+ memset (*vec, 0, sizeof (int) * len);
+ }
+ for (i = 0; i < len; i++)
+ (*vec)[i] += src[i];
+}
+
+/* Allocate cost vector *VEC for hard registers of ACLASS and copy
+ values of vector SRC into the vector or initialize it by VAL (if
+ SRC is null). */
+inline void
+ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class aclass,
+ int val, int *src)
+{
+ int i, *reg_costs;
+ int len;
+
+ if (*vec != NULL)
+ return;
+ *vec = reg_costs = ira_allocate_cost_vector (aclass);
+ len = ira_class_hard_regs_num[aclass];
+ if (src != NULL)
+ memcpy (reg_costs, src, sizeof (int) * len);
+ else
+ {
+ for (i = 0; i < len; i++)
+ reg_costs[i] = val;
+ }
+}
+
+extern rtx ira_create_new_reg (rtx);
+extern int first_moveable_pseudo, last_moveable_pseudo;
+
+/* Return the set of registers that would need a caller save if allocno A
+ overlapped them. */
+
+inline HARD_REG_SET
+ira_need_caller_save_regs (ira_allocno_t a)
+{
+ return call_clobbers_in_region (ALLOCNO_CROSSED_CALLS_ABIS (a),
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
+ ALLOCNO_MODE (a));
+}
+
+/* Return true if we would need to save allocno A around a call if we
+ assigned hard register REGNO. */
+
+inline bool
+ira_need_caller_save_p (ira_allocno_t a, unsigned int regno)
+{
+ if (ALLOCNO_CALLS_CROSSED_NUM (a) == 0)
+ return false;
+ return call_clobbered_in_region_p (ALLOCNO_CROSSED_CALLS_ABIS (a),
+ ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
+ ALLOCNO_MODE (a), regno);
+}
+
+/* Represents the boundary between an allocno in one loop and its parent
+ allocno in the enclosing loop. It is usually possible to change a
+ register's allocation on this boundary; the class provides routines
+ for calculating the cost of such changes. */
+class ira_loop_border_costs
+{
+public:
+ ira_loop_border_costs (ira_allocno_t);
+
+ int move_between_loops_cost () const;
+ int spill_outside_loop_cost () const;
+ int spill_inside_loop_cost () const;
+
+private:
+ /* The mode and class of the child allocno. */
+ machine_mode m_mode;
+ reg_class m_class;
+
+ /* Sums the frequencies of the entry edges and the exit edges. */
+ int m_entry_freq, m_exit_freq;
+};
+
+/* Return the cost of storing the register on entry to the loop and
+ loading it back on exit from the loop. This is the cost to use if
+ the register is spilled within the loop but is successfully allocated
+ in the parent loop. */
+inline int
+ira_loop_border_costs::spill_inside_loop_cost () const
+{
+ return (m_entry_freq * ira_memory_move_cost[m_mode][m_class][0]
+ + m_exit_freq * ira_memory_move_cost[m_mode][m_class][1]);
+}
+
+/* Return the cost of loading the register on entry to the loop and
+ storing it back on exit from the loop. This is the cost to use if
+ the register is successfully allocated within the loop but is spilled
+ in the parent loop. */
+inline int
+ira_loop_border_costs::spill_outside_loop_cost () const
+{
+ return (m_entry_freq * ira_memory_move_cost[m_mode][m_class][1]
+ + m_exit_freq * ira_memory_move_cost[m_mode][m_class][0]);
+}
+
+/* Return the cost of moving the pseudo register between different hard
+ registers on entry and exit from the loop. This is the cost to use
+ if the register is successfully allocated within both this loop and
+ the parent loop, but the allocations for the loops differ. */
+inline int
+ira_loop_border_costs::move_between_loops_cost () const
+{
+ ira_init_register_move_cost_if_necessary (m_mode);
+ auto move_cost = ira_register_move_cost[m_mode][m_class][m_class];
+ return move_cost * (m_entry_freq + m_exit_freq);
+}
+
+/* Return true if subloops that contain allocnos for A's register can
+ use a different assignment from A. ALLOCATED_P is true for the case
+ in which allocation succeeded for A. EXCLUDE_OLD_RELOAD is true if
+ we should always return false for non-LRA targets. (This is a hack
+ and should be removed along with old reload.) */
+inline bool
+ira_subloop_allocnos_can_differ_p (ira_allocno_t a, bool allocated_p = true,
+ bool exclude_old_reload = true)
+{
+ if (exclude_old_reload && !ira_use_lra_p)
+ return false;
+
+ auto regno = ALLOCNO_REGNO (a);
+
+ if (pic_offset_table_rtx != NULL
+ && regno == (int) REGNO (pic_offset_table_rtx))
+ return false;
+
+ ira_assert (regno < ira_reg_equiv_len);
+ if (ira_equiv_no_lvalue_p (regno))
+ return false;
+
+ /* Avoid overlapping multi-registers. Moves between them might result
+ in wrong code generation. */
+ if (allocated_p)
+ {
+ auto pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
+ if (ira_reg_class_max_nregs[pclass][ALLOCNO_MODE (a)] > 1)
+ return false;
+ }
+
+ return true;
+}
+
+/* Return true if we should treat A and SUBLOOP_A as belonging to a
+ single region. */
+inline bool
+ira_single_region_allocno_p (ira_allocno_t a, ira_allocno_t subloop_a)
+{
+ if (flag_ira_region != IRA_REGION_MIXED)
+ return false;
+
+ if (ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (subloop_a))
+ return false;
+
+ auto rclass = ALLOCNO_CLASS (a);
+ auto pclass = ira_pressure_class_translate[rclass];
+ auto loop_used_regs = ALLOCNO_LOOP_TREE_NODE (a)->reg_pressure[pclass];
+ return loop_used_regs <= ira_class_hard_regs_num[pclass];
+}
+
+/* Return the set of all hard registers that conflict with A. */
+inline HARD_REG_SET
+ira_total_conflict_hard_regs (ira_allocno_t a)
+{
+ auto obj_0 = ALLOCNO_OBJECT (a, 0);
+ HARD_REG_SET conflicts = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj_0);
+ for (int i = 1; i < ALLOCNO_NUM_OBJECTS (a); i++)
+ conflicts |= OBJECT_TOTAL_CONFLICT_HARD_REGS (ALLOCNO_OBJECT (a, i));
+ return conflicts;
+}
+
+/* Return the cost of saving a caller-saved register before each call
+ in A's live range and restoring the same register after each call. */
+inline int
+ira_caller_save_cost (ira_allocno_t a)
+{
+ auto mode = ALLOCNO_MODE (a);
+ auto rclass = ALLOCNO_CLASS (a);
+ return (ALLOCNO_CALL_FREQ (a)
+ * (ira_memory_move_cost[mode][rclass][0]
+ + ira_memory_move_cost[mode][rclass][1]));
+}
+
+/* A and SUBLOOP_A are allocnos for the same pseudo register, with A's
+ loop immediately enclosing SUBLOOP_A's loop. If we allocate to A a
+ hard register R that is clobbered by a call in SUBLOOP_A, decide
+ which of the following approaches should be used for handling the
+ conflict:
+
+ (1) Spill R on entry to SUBLOOP_A's loop, assign memory to SUBLOOP_A,
+ and restore R on exit from SUBLOOP_A's loop.
+
+ (2) Spill R before each necessary call in SUBLOOP_A's live range and
+ restore R after each such call.
+
+ Return true if (1) is better than (2). SPILL_COST is the cost of
+ doing (1). */
+inline bool
+ira_caller_save_loop_spill_p (ira_allocno_t a, ira_allocno_t subloop_a,
+ int spill_cost)
+{
+ if (!ira_subloop_allocnos_can_differ_p (a))
+ return false;
+
+ /* Calculate the cost of saving a call-clobbered register
+ before each call and restoring it afterwards. */
+ int call_cost = ira_caller_save_cost (subloop_a);
+ return call_cost && call_cost >= spill_cost;
+}
+
+#endif /* GCC_IRA_INT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira.h
new file mode 100644
index 0000000..9c48f94
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ira.h
@@ -0,0 +1,245 @@
+/* Communication between the Integrated Register Allocator (IRA) and
+ the rest of the compiler.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+ Contributed by Vladimir Makarov <vmakarov@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_IRA_H
+#define GCC_IRA_H
+
+#include "emit-rtl.h"
+
+/* True when we use LRA instead of reload pass for the current
+ function. */
+extern bool ira_use_lra_p;
+
+/* True if we have allocno conflicts. It is false for non-optimized
+ mode or when the conflict table is too big. */
+extern bool ira_conflicts_p;
+
+struct target_ira
+{
+ /* Map: hard register number -> allocno class it belongs to. If the
+ corresponding class is NO_REGS, the hard register is not available
+ for allocation. */
+ enum reg_class x_ira_hard_regno_allocno_class[FIRST_PSEUDO_REGISTER];
+
+ /* Number of allocno classes. Allocno classes are register classes
+ which can be used for allocations of allocnos. */
+ int x_ira_allocno_classes_num;
+
+ /* The array containing allocno classes. Only first
+ IRA_ALLOCNO_CLASSES_NUM elements are used for this. */
+ enum reg_class x_ira_allocno_classes[N_REG_CLASSES];
+
+ /* Map of all register classes to corresponding allocno classes
+ containing the given class. If given class is not a subset of an
+ allocno class, we translate it into the cheapest allocno class. */
+ enum reg_class x_ira_allocno_class_translate[N_REG_CLASSES];
+
+ /* Number of pressure classes. Pressure classes are register
+ classes for which we calculate register pressure. */
+ int x_ira_pressure_classes_num;
+
+ /* The array containing pressure classes. Only first
+ IRA_PRESSURE_CLASSES_NUM elements are used for this. */
+ enum reg_class x_ira_pressure_classes[N_REG_CLASSES];
+
+ /* Map of all register classes to corresponding pressure classes
+ containing the given class. If given class is not a subset of an
+ pressure class, we translate it into the cheapest pressure
+ class. */
+ enum reg_class x_ira_pressure_class_translate[N_REG_CLASSES];
+
+ /* Biggest pressure register class containing stack registers.
+ NO_REGS if there are no stack registers. */
+ enum reg_class x_ira_stack_reg_pressure_class;
+
+ /* Maps: register class x machine mode -> maximal/minimal number of
+ hard registers of given class needed to store value of given
+ mode. */
+ unsigned char x_ira_reg_class_max_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
+ unsigned char x_ira_reg_class_min_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
+
+ /* Array analogous to target hook TARGET_MEMORY_MOVE_COST. */
+ short x_ira_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
+
+ /* Array of number of hard registers of given class which are
+ available for the allocation. The order is defined by the
+ allocation order. */
+ short x_ira_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
+
+ /* The number of elements of the above array for given register
+ class. */
+ int x_ira_class_hard_regs_num[N_REG_CLASSES];
+
+ /* Register class subset relation: TRUE if the first class is a subset
+ of the second one considering only hard registers available for the
+ allocation. */
+ int x_ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* The biggest class inside of intersection of the two classes (that
+ is calculated taking only hard registers available for allocation
+ into account. If the both classes contain no hard registers
+ available for allocation, the value is calculated with taking all
+ hard-registers including fixed ones into account. */
+ enum reg_class x_ira_reg_class_subset[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* True if the two classes (that is calculated taking only hard
+ registers available for allocation into account; are
+ intersected. */
+ bool x_ira_reg_classes_intersect_p[N_REG_CLASSES][N_REG_CLASSES];
+
+ /* If class CL has a single allocatable register of mode M,
+ index [CL][M] gives the number of that register, otherwise it is -1. */
+ short x_ira_class_singleton[N_REG_CLASSES][MAX_MACHINE_MODE];
+
+ /* Function specific hard registers cannot be used for the register
+ allocation. */
+ HARD_REG_SET x_ira_no_alloc_regs;
+
+ /* Array whose values are hard regset of hard registers available for
+ the allocation of given register class whose targetm.hard_regno_mode_ok
+ values for given mode are false. */
+ HARD_REG_SET x_ira_prohibited_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
+
+ /* When an allocatable hard register in given mode can not be placed in given
+ register class, it is in the set of the following array element. It can
+ happen only when given mode requires more one hard register. */
+ HARD_REG_SET x_ira_exclude_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
+};
+
+extern struct target_ira default_target_ira;
+#if SWITCHABLE_TARGET
+extern struct target_ira *this_target_ira;
+#else
+#define this_target_ira (&default_target_ira)
+#endif
+
+#define ira_hard_regno_allocno_class \
+ (this_target_ira->x_ira_hard_regno_allocno_class)
+#define ira_allocno_classes_num \
+ (this_target_ira->x_ira_allocno_classes_num)
+#define ira_allocno_classes \
+ (this_target_ira->x_ira_allocno_classes)
+#define ira_allocno_class_translate \
+ (this_target_ira->x_ira_allocno_class_translate)
+#define ira_pressure_classes_num \
+ (this_target_ira->x_ira_pressure_classes_num)
+#define ira_pressure_classes \
+ (this_target_ira->x_ira_pressure_classes)
+#define ira_pressure_class_translate \
+ (this_target_ira->x_ira_pressure_class_translate)
+#define ira_stack_reg_pressure_class \
+ (this_target_ira->x_ira_stack_reg_pressure_class)
+#define ira_reg_class_max_nregs \
+ (this_target_ira->x_ira_reg_class_max_nregs)
+#define ira_reg_class_min_nregs \
+ (this_target_ira->x_ira_reg_class_min_nregs)
+#define ira_memory_move_cost \
+ (this_target_ira->x_ira_memory_move_cost)
+#define ira_class_hard_regs \
+ (this_target_ira->x_ira_class_hard_regs)
+#define ira_class_hard_regs_num \
+ (this_target_ira->x_ira_class_hard_regs_num)
+#define ira_class_subset_p \
+ (this_target_ira->x_ira_class_subset_p)
+#define ira_reg_class_subset \
+ (this_target_ira->x_ira_reg_class_subset)
+#define ira_reg_classes_intersect_p \
+ (this_target_ira->x_ira_reg_classes_intersect_p)
+#define ira_class_singleton \
+ (this_target_ira->x_ira_class_singleton)
+#define ira_no_alloc_regs \
+ (this_target_ira->x_ira_no_alloc_regs)
+#define ira_prohibited_class_mode_regs \
+ (this_target_ira->x_ira_prohibited_class_mode_regs)
+#define ira_exclude_class_mode_regs \
+ (this_target_ira->x_ira_exclude_class_mode_regs)
+
+/* Major structure describing equivalence info for a pseudo. */
+struct ira_reg_equiv_s
+{
+ /* True if we can use this as a general equivalence. */
+ bool defined_p;
+ /* True if we can use this equivalence only for caller save/restore
+ location. */
+ bool caller_save_p;
+ /* True if the usage of the equivalence is profitable. */
+ bool profitable_p;
+ /* Equiv. memory, constant, invariant, and initializing insns of
+ given pseudo-register or NULL_RTX. */
+ rtx memory;
+ rtx constant;
+ rtx invariant;
+ /* Always NULL_RTX if defined_p is false. */
+ rtx_insn_list *init_insns;
+};
+
+/* The length of the following array. */
+extern int ira_reg_equiv_len;
+
+/* Info about equiv. info for each register. */
+extern struct ira_reg_equiv_s *ira_reg_equiv;
+
+extern void ira_init_once (void);
+extern void ira_init (void);
+extern void ira_setup_eliminable_regset (void);
+extern rtx ira_eliminate_regs (rtx, machine_mode);
+extern void ira_set_pseudo_classes (bool, FILE *);
+extern void ira_expand_reg_equiv (void);
+extern void ira_update_equiv_info_by_shuffle_insn (int, int, rtx_insn *);
+
+extern void ira_sort_regnos_for_alter_reg (int *, int, machine_mode *);
+extern void ira_mark_allocation_change (int);
+extern void ira_mark_memory_move_deletion (int, int);
+extern bool ira_reassign_pseudos (int *, int, HARD_REG_SET, HARD_REG_SET *,
+ HARD_REG_SET *, bitmap);
+extern rtx ira_reuse_stack_slot (int, poly_uint64, poly_uint64);
+extern void ira_mark_new_stack_slot (rtx, int, poly_uint64);
+extern bool ira_better_spill_reload_regno_p (int *, int *, rtx, rtx, rtx_insn *);
+extern bool ira_bad_reload_regno (int, rtx, rtx);
+
+extern void ira_adjust_equiv_reg_cost (unsigned, int);
+
+extern bool ira_former_scratch_p (int regno);
+extern bool ira_former_scratch_operand_p (rtx_insn *insn, int nop);
+extern void ira_register_new_scratch_op (rtx_insn *insn, int nop, int icode);
+extern bool ira_remove_insn_scratches (rtx_insn *insn, bool all_p, FILE *dump_file,
+ rtx (*get_reg) (rtx original));
+extern void ira_restore_scratches (FILE *dump_file);
+extern void ira_nullify_asm_goto (rtx_insn *insn);
+
+/* ira-costs.cc */
+extern void ira_costs_cc_finalize (void);
+
+/* ira-lives.cc */
+extern rtx non_conflicting_reg_copy_p (rtx_insn *);
+
+/* Spilling static chain pseudo may result in generation of wrong
+ non-local goto code using frame-pointer to address saved stack
+ pointer value after restoring old frame pointer value. The
+ function returns TRUE if REGNO is such a static chain pseudo. */
+inline bool
+non_spilled_static_chain_regno_p (int regno)
+{
+ return (cfun->static_chain_decl && crtl->has_nonlocal_goto
+ && REG_EXPR (regno_reg_rtx[regno]) == cfun->static_chain_decl);
+}
+
+#endif /* GCC_IRA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/is-a.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/is-a.h
new file mode 100644
index 0000000..b535524
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/is-a.h
@@ -0,0 +1,284 @@
+/* Dynamic testing for abstract is-a relationships.
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+ Contributed by Lawrence Crowl.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* This header generic type query and conversion functions.
+
+
+USING THE GENERIC TYPE FACILITY
+
+
+The user functions are:
+
+bool is_a <TYPE> (pointer)
+
+ Tests whether the pointer actually points to a more derived TYPE.
+
+ Suppose you have a symtab_node *ptr, AKA symtab_node *ptr. You can test
+ whether it points to a 'derived' cgraph_node as follows.
+
+ if (is_a <cgraph_node *> (ptr))
+ ....
+
+
+TYPE as_a <TYPE> (pointer)
+
+ Converts pointer to a TYPE.
+
+ You can just assume that it is such a node.
+
+ do_something_with (as_a <cgraph_node *> *ptr);
+
+TYPE safe_as_a <TYPE> (pointer)
+
+ Like as_a <TYPE> (pointer), but where pointer could be NULL. This
+ adds a check against NULL where the regular is_a_helper hook for TYPE
+ assumes non-NULL.
+
+ do_something_with (safe_as_a <cgraph_node *> *ptr);
+
+TYPE dyn_cast <TYPE> (pointer)
+
+ Converts pointer to TYPE if and only if "is_a <TYPE> pointer". Otherwise,
+ returns NULL. This function is essentially a checked down cast.
+
+ This functions reduce compile time and increase type safety when treating a
+ generic item as a more specific item.
+
+ You can test and obtain a pointer to the 'derived' type in one indivisible
+ operation.
+
+ if (cgraph_node *cptr = dyn_cast <cgraph_node *> (ptr))
+ ....
+
+ As an example, the code change is from
+
+ if (symtab_function_p (node))
+ {
+ struct cgraph_node *cnode = cgraph (node);
+ ....
+ }
+
+ to
+
+ if (cgraph_node *cnode = dyn_cast <cgraph_node *> (node))
+ {
+ ....
+ }
+
+ The necessary conditional test defines a variable that holds a known good
+ pointer to the specific item and avoids subsequent conversion calls and
+ the assertion checks that may come with them.
+
+ When, the property test is embedded within a larger condition, the
+ variable declaration gets pulled out of the condition. (This approach
+ leaves some room for using the variable inappropriately.)
+
+ if (symtab_variable_p (node) && varpool (node)->finalized)
+ varpool_analyze_node (varpool (node));
+
+ becomes
+
+ varpool_node *vnode = dyn_cast <varpool_node *> (node);
+ if (vnode && vnode->finalized)
+ varpool_analyze_node (vnode);
+
+ Note that we have converted two sets of assertions in the calls to varpool
+ into safe and efficient use of a variable.
+
+TYPE safe_dyn_cast <TYPE> (pointer)
+
+ Like dyn_cast <TYPE> (pointer), except that it accepts null pointers
+ and returns null results for them.
+
+
+If you use these functions and get a 'inline function not defined' or a
+'missing symbol' error message for 'is_a_helper<....>::test', it means that
+the connection between the types has not been made. See below.
+
+
+EXTENDING THE GENERIC TYPE FACILITY
+
+Method 1
+--------
+
+If DERIVED is derived from BASE, and if BASE contains enough information
+to determine whether an object is actually an instance of DERIVED,
+then you can make the above routines work for DERIVED by defining
+a specialization of is_a_helper such as:
+
+ template<>
+ struct is_a_helper<DERIVED *> : static_is_a_helper<DERIVED *>
+ {
+ static inline bool test (const BASE *p) { return ...; }
+ };
+
+This test function should return true if P is an instanced of DERIVED.
+This on its own is enough; the comments below for method 2 do not apply.
+
+Method 2
+--------
+
+Alternatively, if two types are connected in ways other than C++
+inheritance, each connection between them must be made by defining a
+specialization of the template member function 'test' of the template
+class 'is_a_helper'. For example,
+
+ template <>
+ template <>
+ inline bool
+ is_a_helper <cgraph_node *>::test (symtab_node *p)
+ {
+ return p->type == SYMTAB_FUNCTION;
+ }
+
+If a simple reinterpret_cast between the pointer types is incorrect, then you
+must also specialize the template member function 'cast'. Failure to do so
+when needed may result in a crash. For example,
+
+ template <>
+ template <>
+ inline bool
+ is_a_helper <cgraph_node *>::cast (symtab_node *p)
+ {
+ return &p->x_function;
+ }
+
+*/
+
+#ifndef GCC_IS_A_H
+#define GCC_IS_A_H
+
+/* A base class that specializations of is_a_helper can use if casting
+ U * to T is simply a reinterpret_cast. */
+
+template <typename T>
+struct reinterpret_is_a_helper
+{
+ template <typename U>
+ static inline T cast (U *p) { return reinterpret_cast <T> (p); }
+};
+
+/* A base class that specializations of is_a_helper can use if casting
+ U * to T is simply a static_cast. This is more type-safe than
+ reinterpret_is_a_helper. */
+
+template <typename T>
+struct static_is_a_helper
+{
+ template <typename U>
+ static inline T cast (U *p) { return static_cast <T> (p); }
+};
+
+/* A generic type conversion internal helper class. */
+
+template <typename T>
+struct is_a_helper : reinterpret_is_a_helper<T>
+{
+ template <typename U>
+ static inline bool test (U *p);
+};
+
+/* Reuse the definition of is_a_helper<T *> to implement
+ is_a_helper<const T *>. */
+
+template <typename T>
+struct is_a_helper<const T *>
+{
+ template <typename U>
+ static inline const T *cast (const U *p)
+ {
+ return is_a_helper<T *>::cast (const_cast <U *> (p));
+ }
+ template <typename U>
+ static inline bool test (const U *p)
+ {
+ return is_a_helper<T *>::test (p);
+ }
+};
+
+/* Note that we deliberately do not define the 'test' member template. Not
+ doing so will result in a build-time error for type relationships that have
+ not been defined, rather than a run-time error. See the discussion above
+ for when to define this member. */
+
+/* The public interface. */
+
+/* A generic test for a type relationship. See the discussion above for when
+ to use this function. The question answered is "Is type T a derived type of
+ type U?". */
+
+template <typename T, typename U>
+inline bool
+is_a (U *p)
+{
+ return is_a_helper<T>::test (p);
+}
+
+/* A generic conversion from a base type U to a derived type T. See the
+ discussion above for when to use this function. */
+
+template <typename T, typename U>
+inline T
+as_a (U *p)
+{
+ gcc_checking_assert (is_a <T> (p));
+ return is_a_helper <T>::cast (p);
+}
+
+/* Similar to as_a<>, but where the pointer can be NULL, even if
+ is_a_helper<T> doesn't check for NULL. */
+
+template <typename T, typename U>
+inline T
+safe_as_a (U *p)
+{
+ if (p)
+ {
+ gcc_checking_assert (is_a <T> (p));
+ return is_a_helper <T>::cast (p);
+ }
+ else
+ return NULL;
+}
+
+/* A generic checked conversion from a base type U to a derived type T. See
+ the discussion above for when to use this function. */
+
+template <typename T, typename U>
+inline T
+dyn_cast (U *p)
+{
+ if (is_a <T> (p))
+ return is_a_helper <T>::cast (p);
+ else
+ return static_cast <T> (0);
+}
+
+/* Similar to dyn_cast, except that the pointer may be null. */
+
+template <typename T, typename U>
+inline T
+safe_dyn_cast (U *p)
+{
+ return p ? dyn_cast <T> (p) : 0;
+}
+
+#endif /* GCC_IS_A_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/iterator-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/iterator-utils.h
new file mode 100644
index 0000000..2c409bd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/iterator-utils.h
@@ -0,0 +1,203 @@
+// Iterator-related utilities.
+// Copyright (C) 2002-2023 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef GCC_ITERATOR_UTILS_H
+#define GCC_ITERATOR_UTILS_H 1
+
+// A half-open [begin, end) range of iterators.
+template<typename T>
+struct iterator_range
+{
+public:
+ using const_iterator = T;
+
+ iterator_range () = default;
+ iterator_range (const T &begin, const T &end)
+ : m_begin (begin), m_end (end) {}
+
+ T begin () const { return m_begin; }
+ T end () const { return m_end; }
+
+ explicit operator bool () const { return m_begin != m_end; }
+
+private:
+ T m_begin;
+ T m_end;
+};
+
+// Provide an iterator like BaseIT, except that it yields values of type T,
+// which is derived from the type that BaseIT normally yields.
+//
+// The class doesn't inherit from BaseIT for two reasons:
+// - using inheritance would stop the class working with plain pointers
+// - not using inheritance increases type-safety for writable iterators
+//
+// Constructing this class from a BaseIT involves an assertion that all
+// contents really do have type T. The constructor is therefore explicit.
+template<typename T, typename BaseIT>
+class derived_iterator
+{
+public:
+ using value_type = T;
+
+ derived_iterator () = default;
+
+ template<typename... Ts>
+ explicit derived_iterator (Ts... args)
+ : m_base (std::forward<Ts> (args)...) {}
+
+ derived_iterator &operator++ () { ++m_base; return *this; }
+ derived_iterator operator++ (int);
+
+ T operator* () const { return static_cast<T> (*m_base); }
+ T *operator-> () const { return static_cast<T *> (m_base.operator-> ()); }
+
+ bool operator== (const derived_iterator &other) const;
+ bool operator!= (const derived_iterator &other) const;
+
+protected:
+ BaseIT m_base;
+};
+
+template<typename T, typename BaseIT>
+inline derived_iterator<T, BaseIT>
+derived_iterator<T, BaseIT>::operator++ (int)
+{
+ derived_iterator ret = *this;
+ ++m_base;
+ return ret;
+}
+
+template<typename T, typename BaseIT>
+inline bool
+derived_iterator<T, BaseIT>::operator== (const derived_iterator &other) const
+{
+ return m_base == other.m_base;
+}
+
+template<typename T, typename BaseIT>
+inline bool
+derived_iterator<T, BaseIT>::operator!= (const derived_iterator &other) const
+{
+ return m_base != other.m_base;
+}
+
+// Provide a constant view of a BaseCT in which every value is known to
+// have type T, which is derived from the type that BaseCT normally presents.
+//
+// Constructing this class from a BaseCT involves an assertion that all
+// contents really do have type T. The constructor is therefore explicit.
+template<typename T, typename BaseCT>
+class const_derived_container : public BaseCT
+{
+ using base_const_iterator = typename BaseCT::const_iterator;
+
+public:
+ using value_type = T;
+ using const_iterator = derived_iterator<T, base_const_iterator>;
+
+ const_derived_container () = default;
+
+ template<typename... Ts>
+ explicit const_derived_container (Ts... args)
+ : BaseCT (std::forward<Ts> (args)...) {}
+
+ const_iterator begin () const { return const_iterator (BaseCT::begin ()); }
+ const_iterator end () const { return const_iterator (BaseCT::end ()); }
+
+ T front () const { return static_cast<T> (BaseCT::front ()); }
+ T back () const { return static_cast<T> (BaseCT::back ()); }
+ T operator[] (unsigned int i) const;
+};
+
+template<typename T, typename BaseCT>
+inline T
+const_derived_container<T, BaseCT>::operator[] (unsigned int i) const
+{
+ return static_cast<T> (BaseCT::operator[] (i));
+}
+
+// A base class for iterators whose contents consist of a StoredT and that
+// when dereferenced yield those StoredT contents as a T. Derived classes
+// should implement at least operator++ or operator--.
+template<typename T, typename StoredT = T>
+class wrapper_iterator
+{
+public:
+ using value_type = T;
+
+ wrapper_iterator () = default;
+
+ template<typename... Ts>
+ wrapper_iterator (Ts... args) : m_contents (std::forward<Ts> (args)...) {}
+
+ T operator* () const { return static_cast<T> (m_contents); }
+ bool operator== (const wrapper_iterator &) const;
+ bool operator!= (const wrapper_iterator &) const;
+
+protected:
+ StoredT m_contents;
+};
+
+template<typename T, typename StoredT>
+inline bool
+wrapper_iterator<T, StoredT>::operator== (const wrapper_iterator &other) const
+{
+ return m_contents == other.m_contents;
+}
+
+template<typename T, typename StoredT>
+inline bool
+wrapper_iterator<T, StoredT>::operator!= (const wrapper_iterator &other) const
+{
+ return m_contents != other.m_contents;
+}
+
+// A forward iterator for a linked list whose nodes are referenced using
+// type T. Given a node "T N", the next element is given by (N->*Next) ().
+template<typename T, T *(T::*Next) () const>
+class list_iterator : public wrapper_iterator<T *>
+{
+private:
+ using parent = wrapper_iterator<T *>;
+
+public:
+ using parent::parent;
+ list_iterator &operator++ ();
+ list_iterator operator++ (int);
+};
+
+template<typename T, T *(T::*Next) () const>
+inline list_iterator<T, Next> &
+list_iterator<T, Next>::operator++ ()
+{
+ this->m_contents = (this->m_contents->*Next) ();
+ return *this;
+}
+
+template<typename T, T *(T::*Next) () const>
+inline list_iterator<T, Next>
+list_iterator<T, Next>::operator++ (int)
+{
+ list_iterator ret = *this;
+ this->m_contents = (this->m_contents->*Next) ();
+ return ret;
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/json.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/json.h
new file mode 100644
index 0000000..057119d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/json.h
@@ -0,0 +1,200 @@
+/* JSON trees
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_JSON_H
+#define GCC_JSON_H
+
+/* Implementation of JSON, a lightweight data-interchange format.
+
+ See http://www.json.org/
+ and http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
+ and https://tools.ietf.org/html/rfc7159
+
+ Supports creating a DOM-like tree of json::value *, and then dumping
+ json::value * to text. */
+
+namespace json
+{
+
+/* Forward decls of json::value and its subclasses (using indentation
+ to denote inheritance. */
+
+class value;
+ class object;
+ class array;
+ class float_number;
+ class integer_number;
+ class string;
+ class literal;
+
+/* An enum for discriminating the subclasses of json::value. */
+
+enum kind
+{
+ /* class json::object. */
+ JSON_OBJECT,
+
+ /* class json::array. */
+ JSON_ARRAY,
+
+ /* class json::integer_number. */
+ JSON_INTEGER,
+
+ /* class json::float_number. */
+ JSON_FLOAT,
+
+ /* class json::string. */
+ JSON_STRING,
+
+ /* class json::literal uses these three values to identify the
+ particular literal. */
+ JSON_TRUE,
+ JSON_FALSE,
+ JSON_NULL
+};
+
+/* Base class of JSON value. */
+
+class value
+{
+ public:
+ virtual ~value () {}
+ virtual enum kind get_kind () const = 0;
+ virtual void print (pretty_printer *pp) const = 0;
+
+ void dump (FILE *) const;
+};
+
+/* Subclass of value for objects: a collection of key/value pairs
+ preserving the ordering in which keys were inserted.
+
+ Preserving the order eliminates non-determinism in the output,
+ making it easier for the user to compare repeated invocations. */
+
+class object : public value
+{
+ public:
+ ~object ();
+
+ enum kind get_kind () const final override { return JSON_OBJECT; }
+ void print (pretty_printer *pp) const final override;
+
+ void set (const char *key, value *v);
+ value *get (const char *key) const;
+
+ private:
+ typedef hash_map <char *, value *,
+ simple_hashmap_traits<nofree_string_hash, value *> > map_t;
+ map_t m_map;
+
+ /* Keep track of order in which keys were inserted. */
+ auto_vec <const char *> m_keys;
+};
+
+/* Subclass of value for arrays. */
+
+class array : public value
+{
+ public:
+ ~array ();
+
+ enum kind get_kind () const final override { return JSON_ARRAY; }
+ void print (pretty_printer *pp) const final override;
+
+ void append (value *v);
+
+ private:
+ auto_vec<value *> m_elements;
+};
+
+/* Subclass of value for floating-point numbers. */
+
+class float_number : public value
+{
+ public:
+ float_number (double value) : m_value (value) {}
+
+ enum kind get_kind () const final override { return JSON_FLOAT; }
+ void print (pretty_printer *pp) const final override;
+
+ double get () const { return m_value; }
+
+ private:
+ double m_value;
+};
+
+/* Subclass of value for integer-valued numbers. */
+
+class integer_number : public value
+{
+ public:
+ integer_number (long value) : m_value (value) {}
+
+ enum kind get_kind () const final override { return JSON_INTEGER; }
+ void print (pretty_printer *pp) const final override;
+
+ long get () const { return m_value; }
+
+ private:
+ long m_value;
+};
+
+
+/* Subclass of value for strings. */
+
+class string : public value
+{
+ public:
+ explicit string (const char *utf8);
+ string (const char *utf8, size_t len);
+ ~string () { free (m_utf8); }
+
+ enum kind get_kind () const final override { return JSON_STRING; }
+ void print (pretty_printer *pp) const final override;
+
+ const char *get_string () const { return m_utf8; }
+ size_t get_length () const { return m_len; }
+
+ private:
+ char *m_utf8;
+ size_t m_len;
+};
+
+/* Subclass of value for the three JSON literals "true", "false",
+ and "null". */
+
+class literal : public value
+{
+ public:
+ literal (enum kind kind) : m_kind (kind) {}
+
+ /* Construct literal for a boolean value. */
+ literal (bool value): m_kind (value ? JSON_TRUE : JSON_FALSE) {}
+
+ enum kind get_kind () const final override { return m_kind; }
+ void print (pretty_printer *pp) const final override;
+
+ private:
+ enum kind m_kind;
+};
+
+} // namespace json
+
+#endif /* GCC_JSON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks-def.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks-def.h
new file mode 100644
index 0000000..c6d1852
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks-def.h
@@ -0,0 +1,400 @@
+/* Default macros to initialize the lang_hooks data structure.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LANG_HOOKS_DEF_H
+#define GCC_LANG_HOOKS_DEF_H
+
+#include "hooks.h"
+
+struct diagnostic_info;
+class substring_loc;
+
+/* Note to creators of new hooks:
+
+ The macros in this file should NOT be surrounded by a
+ #ifdef...#endif pair, since this file declares the defaults. Each
+ front end overrides any hooks it wishes to, in the file containing
+ its struct lang_hooks, AFTER including this file. */
+
+/* See langhooks.h for the definition and documentation of each hook. */
+
+extern void lhd_do_nothing (void);
+extern void lhd_do_nothing_t (tree);
+extern void lhd_do_nothing_f (struct function *);
+extern tree lhd_pass_through_t (tree);
+extern void lhd_register_dumps (gcc::dump_manager *);
+extern bool lhd_post_options (const char **);
+extern alias_set_type lhd_get_alias_set (tree);
+extern tree lhd_return_null_tree (tree);
+extern tree lhd_return_null_const_tree (const_tree);
+extern tree lhd_do_nothing_iii_return_null_tree (int, int, int);
+extern void lhd_print_tree_nothing (FILE *, tree, int);
+extern const char *lhd_decl_printable_name (tree, int);
+extern const char *lhd_dwarf_name (tree, int);
+extern int lhd_types_compatible_p (tree, tree);
+extern void lhd_print_error_function (diagnostic_context *,
+ const char *, struct diagnostic_info *);
+extern void lhd_set_decl_assembler_name (tree decl);
+extern void lhd_overwrite_decl_assembler_name (tree decl, tree name);
+extern bool lhd_warn_unused_global_decl (const_tree);
+extern tree lhd_simulate_enum_decl (location_t, const char *,
+ vec<string_int_pair> *);
+extern tree lhd_simulate_record_decl (location_t, const char *,
+ array_slice<const tree>);
+extern tree lhd_type_for_size (unsigned precision, int unsignedp);
+extern void lhd_incomplete_type_error (location_t, const_tree, const_tree);
+extern tree lhd_type_promotes_to (tree);
+extern void lhd_register_builtin_type (tree, const char *);
+extern bool lhd_decl_ok_for_sibcall (const_tree);
+extern size_t lhd_tree_size (enum tree_code);
+extern HOST_WIDE_INT lhd_to_target_charset (HOST_WIDE_INT);
+extern tree lhd_expr_to_decl (tree, bool *, bool *);
+extern tree lhd_builtin_function (tree);
+extern tree lhd_enum_underlying_base_type (const_tree);
+
+/* Declarations of default tree inlining hooks. */
+extern void lhd_initialize_diagnostics (diagnostic_context *);
+extern void lhd_init_options (unsigned int,
+ struct cl_decoded_option *);
+extern bool lhd_complain_wrong_lang_p (const struct cl_option *);
+extern bool lhd_handle_option (size_t, const char *, HOST_WIDE_INT, int,
+ location_t, const struct cl_option_handlers *);
+
+
+/* Declarations for tree gimplification hooks. */
+extern int lhd_gimplify_expr (tree *, gimple_seq *, gimple_seq *);
+extern enum omp_clause_default_kind lhd_omp_predetermined_sharing (tree);
+extern enum omp_clause_defaultmap_kind lhd_omp_predetermined_mapping (tree);
+extern tree lhd_omp_assignment (tree, tree, tree);
+extern void lhd_omp_finish_clause (tree, gimple_seq *, bool);
+extern tree lhd_omp_array_size (tree, gimple_seq *);
+struct gimplify_omp_ctx;
+extern void lhd_omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *,
+ tree);
+extern bool lhd_omp_scalar_p (tree, bool);
+extern tree *lhd_omp_get_decl_init (tree);
+extern void lhd_omp_finish_decl_inits ();
+
+extern const char *lhd_get_substring_location (const substring_loc &,
+ location_t *out_loc);
+extern int lhd_decl_dwarf_attribute (const_tree, int);
+extern int lhd_type_dwarf_attribute (const_tree, int);
+extern void lhd_finalize_early_debug (void);
+extern const char *lhd_get_sarif_source_language (const char *);
+
+#define LANG_HOOKS_NAME "GNU unknown"
+#define LANG_HOOKS_IDENTIFIER_SIZE sizeof (struct lang_identifier)
+#define LANG_HOOKS_INIT hook_bool_void_false
+#define LANG_HOOKS_FINISH lhd_do_nothing
+#define LANG_HOOKS_PARSE_FILE lhd_do_nothing
+#define LANG_HOOKS_OPTION_LANG_MASK hook_uint_void_0
+#define LANG_HOOKS_INIT_OPTIONS_STRUCT hook_void_gcc_optionsp
+#define LANG_HOOKS_INIT_OPTIONS lhd_init_options
+#define LANG_HOOKS_INITIALIZE_DIAGNOSTICS lhd_initialize_diagnostics
+#define LANG_HOOKS_PREPROCESS_MAIN_FILE NULL
+#define LANG_HOOKS_PREPROCESS_OPTIONS NULL
+#define LANG_HOOKS_PREPROCESS_UNDEF NULL
+#define LANG_HOOKS_PREPROCESS_TOKEN NULL
+#define LANG_HOOKS_REGISTER_DUMPS lhd_register_dumps
+#define LANG_HOOKS_COMPLAIN_WRONG_LANG_P lhd_complain_wrong_lang_p
+#define LANG_HOOKS_HANDLE_OPTION lhd_handle_option
+#define LANG_HOOKS_POST_OPTIONS lhd_post_options
+#define LANG_HOOKS_MISSING_NORETURN_OK_P hook_bool_tree_true
+#define LANG_HOOKS_GET_ALIAS_SET lhd_get_alias_set
+#define LANG_HOOKS_FINISH_INCOMPLETE_DECL lhd_do_nothing_t
+#define LANG_HOOKS_DUP_LANG_SPECIFIC_DECL lhd_do_nothing_t
+#define LANG_HOOKS_SET_DECL_ASSEMBLER_NAME lhd_set_decl_assembler_name
+#define LANG_HOOKS_OVERWRITE_DECL_ASSEMBLER_NAME lhd_overwrite_decl_assembler_name
+#define LANG_HOOKS_PRINT_STATISTICS lhd_do_nothing
+#define LANG_HOOKS_PRINT_XNODE lhd_print_tree_nothing
+#define LANG_HOOKS_PRINT_DECL lhd_print_tree_nothing
+#define LANG_HOOKS_PRINT_TYPE lhd_print_tree_nothing
+#define LANG_HOOKS_PRINT_IDENTIFIER lhd_print_tree_nothing
+#define LANG_HOOKS_PRINT_ERROR_FUNCTION lhd_print_error_function
+#define LANG_HOOKS_DECL_PRINTABLE_NAME lhd_decl_printable_name
+#define LANG_HOOKS_DWARF_NAME lhd_dwarf_name
+#define LANG_HOOKS_FREE_LANG_DATA lhd_do_nothing_t
+#define LANG_HOOKS_TREE_SIZE lhd_tree_size
+#define LANG_HOOKS_TYPES_COMPATIBLE_P lhd_types_compatible_p
+#define LANG_HOOKS_BUILTIN_FUNCTION lhd_builtin_function
+#define LANG_HOOKS_BUILTIN_FUNCTION_EXT_SCOPE LANG_HOOKS_BUILTIN_FUNCTION
+#define LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL LANG_HOOKS_BUILTIN_FUNCTION
+#define LANG_HOOKS_EXPR_TO_DECL lhd_expr_to_decl
+#define LANG_HOOKS_TO_TARGET_CHARSET lhd_to_target_charset
+#define LANG_HOOKS_INIT_TS lhd_do_nothing
+#define LANG_HOOKS_EH_PERSONALITY lhd_gcc_personality
+#define LANG_HOOKS_EH_RUNTIME_TYPE lhd_pass_through_t
+#define LANG_HOOKS_EH_PROTECT_CLEANUP_ACTIONS NULL
+#define LANG_HOOKS_BLOCK_MAY_FALLTHRU hook_bool_const_tree_true
+#define LANG_HOOKS_EH_USE_CXA_END_CLEANUP false
+#define LANG_HOOKS_DEEP_UNSHARING false
+#define LANG_HOOKS_CUSTOM_FUNCTION_DESCRIPTORS false
+#define LANG_HOOKS_EMITS_BEGIN_STMT false
+#define LANG_HOOKS_RUN_LANG_SELFTESTS lhd_do_nothing
+#define LANG_HOOKS_GET_SUBSTRING_LOCATION lhd_get_substring_location
+#define LANG_HOOKS_FINALIZE_EARLY_DEBUG lhd_finalize_early_debug
+#define LANG_HOOKS_GET_SARIF_SOURCE_LANGUAGE lhd_get_sarif_source_language
+
+/* Attribute hooks. */
+#define LANG_HOOKS_ATTRIBUTE_TABLE NULL
+#define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE NULL
+#define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE NULL
+
+/* Tree inlining hooks. */
+#define LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P \
+ hook_bool_tree_tree_false
+
+#define LANG_HOOKS_TREE_INLINING_INITIALIZER { \
+ LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P, \
+}
+
+/* Hooks for tree gimplification. */
+#define LANG_HOOKS_GIMPLIFY_EXPR lhd_gimplify_expr
+
+/* Tree dump hooks. */
+extern bool lhd_tree_dump_dump_tree (void *, tree);
+extern int lhd_tree_dump_type_quals (const_tree);
+extern tree lhd_make_node (enum tree_code);
+
+#define LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN lhd_tree_dump_dump_tree
+#define LANG_HOOKS_TREE_DUMP_TYPE_QUALS_FN lhd_tree_dump_type_quals
+
+#define LANG_HOOKS_TREE_DUMP_INITIALIZER { \
+ LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN, \
+ LANG_HOOKS_TREE_DUMP_TYPE_QUALS_FN \
+}
+
+/* Types hooks. There are no reasonable defaults for most of them,
+ so we create a compile-time error instead. */
+extern tree lhd_unit_size_without_reusable_padding (tree);
+
+#define LANG_HOOKS_MAKE_TYPE lhd_make_node
+#define LANG_HOOKS_SIMULATE_ENUM_DECL lhd_simulate_enum_decl
+#define LANG_HOOKS_SIMULATE_RECORD_DECL lhd_simulate_record_decl
+#define LANG_HOOKS_CLASSIFY_RECORD NULL
+#define LANG_HOOKS_TYPE_FOR_SIZE lhd_type_for_size
+#define LANG_HOOKS_INCOMPLETE_TYPE_ERROR lhd_incomplete_type_error
+#define LANG_HOOKS_GENERIC_TYPE_P hook_bool_const_tree_false
+#define LANG_HOOKS_GET_INNERMOST_GENERIC_PARMS hook_tree_const_tree_null
+#define LANG_HOOKS_GET_INNERMOST_GENERIC_ARGS hook_tree_const_tree_null
+#define LANG_HOOKS_FUNCTION_PARAMETER_PACK_P hook_bool_const_tree_false
+#define LANG_HOOKS_GET_ARGUMENT_PACK_ELEMS hook_tree_const_tree_null
+#define LANG_HOOKS_GENERIC_GENERIC_PARAMETER_DECL_P hook_bool_const_tree_false
+#define LANG_HOOKS_FUNCTION_PARM_EXPANDED_FROM_PACK_P \
+ hook_bool_tree_tree_false
+#define LANG_HOOKS_GET_GENERIC_FUNCTION_DECL hook_tree_const_tree_null
+#define LANG_HOOKS_TYPE_PROMOTES_TO lhd_type_promotes_to
+#define LANG_HOOKS_REGISTER_BUILTIN_TYPE lhd_register_builtin_type
+#define LANG_HOOKS_TYPE_MAX_SIZE lhd_return_null_const_tree
+#define LANG_HOOKS_OMP_FIRSTPRIVATIZE_TYPE_SIZES \
+ lhd_omp_firstprivatize_type_sizes
+#define LANG_HOOKS_TYPE_HASH_EQ NULL
+#define LANG_HOOKS_COPY_LANG_QUALIFIERS NULL
+#define LANG_HOOKS_GET_ARRAY_DESCR_INFO NULL
+#define LANG_HOOKS_GET_SUBRANGE_BOUNDS NULL
+#define LANG_HOOKS_GET_TYPE_BIAS NULL
+#define LANG_HOOKS_DESCRIPTIVE_TYPE NULL
+#define LANG_HOOKS_RECONSTRUCT_COMPLEX_TYPE reconstruct_complex_type
+#define LANG_HOOKS_ENUM_UNDERLYING_BASE_TYPE lhd_enum_underlying_base_type
+#define LANG_HOOKS_GET_DEBUG_TYPE NULL
+#define LANG_HOOKS_GET_FIXED_POINT_TYPE_INFO NULL
+#define LANG_HOOKS_TYPE_DWARF_ATTRIBUTE lhd_type_dwarf_attribute
+#define LANG_HOOKS_UNIT_SIZE_WITHOUT_REUSABLE_PADDING lhd_unit_size_without_reusable_padding
+#define LANG_HOOKS_CLASSTYPE_AS_BASE hook_tree_const_tree_null
+
+#define LANG_HOOKS_FOR_TYPES_INITIALIZER { \
+ LANG_HOOKS_MAKE_TYPE, \
+ LANG_HOOKS_SIMULATE_ENUM_DECL, \
+ LANG_HOOKS_SIMULATE_RECORD_DECL, \
+ LANG_HOOKS_CLASSIFY_RECORD, \
+ LANG_HOOKS_TYPE_FOR_MODE, \
+ LANG_HOOKS_TYPE_FOR_SIZE, \
+ LANG_HOOKS_GENERIC_TYPE_P, \
+ LANG_HOOKS_GET_ARGUMENT_PACK_ELEMS, \
+ LANG_HOOKS_TYPE_PROMOTES_TO, \
+ LANG_HOOKS_REGISTER_BUILTIN_TYPE, \
+ LANG_HOOKS_INCOMPLETE_TYPE_ERROR, \
+ LANG_HOOKS_TYPE_MAX_SIZE, \
+ LANG_HOOKS_OMP_FIRSTPRIVATIZE_TYPE_SIZES, \
+ LANG_HOOKS_TYPE_HASH_EQ, \
+ LANG_HOOKS_COPY_LANG_QUALIFIERS, \
+ LANG_HOOKS_GET_ARRAY_DESCR_INFO, \
+ LANG_HOOKS_GET_SUBRANGE_BOUNDS, \
+ LANG_HOOKS_GET_TYPE_BIAS, \
+ LANG_HOOKS_DESCRIPTIVE_TYPE, \
+ LANG_HOOKS_RECONSTRUCT_COMPLEX_TYPE, \
+ LANG_HOOKS_ENUM_UNDERLYING_BASE_TYPE, \
+ LANG_HOOKS_GET_DEBUG_TYPE, \
+ LANG_HOOKS_GET_FIXED_POINT_TYPE_INFO, \
+ LANG_HOOKS_TYPE_DWARF_ATTRIBUTE, \
+ LANG_HOOKS_UNIT_SIZE_WITHOUT_REUSABLE_PADDING, \
+ LANG_HOOKS_CLASSTYPE_AS_BASE \
+}
+
+/* Declaration hooks. */
+#define LANG_HOOKS_GLOBAL_BINDINGS_P global_bindings_p
+#define LANG_HOOKS_PUSHDECL pushdecl
+#define LANG_HOOKS_GETDECLS getdecls
+#define LANG_HOOKS_DECL_DWARF_ATTRIBUTE lhd_decl_dwarf_attribute
+#define LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL lhd_warn_unused_global_decl
+#define LANG_HOOKS_POST_COMPILATION_PARSING_CLEANUPS NULL
+#define LANG_HOOKS_DECL_OK_FOR_SIBCALL lhd_decl_ok_for_sibcall
+#define LANG_HOOKS_OMP_ARRAY_DATA hook_tree_tree_bool_null
+#define LANG_HOOKS_OMP_ARRAY_SIZE lhd_omp_array_size
+#define LANG_HOOKS_OMP_IS_ALLOCATABLE_OR_PTR hook_bool_const_tree_false
+#define LANG_HOOKS_OMP_CHECK_OPTIONAL_ARGUMENT hook_tree_tree_bool_null
+#define LANG_HOOKS_OMP_PRIVATIZE_BY_REFERENCE hook_bool_const_tree_false
+#define LANG_HOOKS_OMP_PREDETERMINED_SHARING lhd_omp_predetermined_sharing
+#define LANG_HOOKS_OMP_PREDETERMINED_MAPPING lhd_omp_predetermined_mapping
+#define LANG_HOOKS_OMP_REPORT_DECL lhd_pass_through_t
+#define LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR hook_bool_tree_bool_false
+#define LANG_HOOKS_OMP_PRIVATE_DEBUG_CLAUSE hook_bool_tree_bool_false
+#define LANG_HOOKS_OMP_PRIVATE_OUTER_REF hook_bool_tree_false
+#define LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR hook_tree_tree_tree_tree_null
+#define LANG_HOOKS_OMP_CLAUSE_COPY_CTOR lhd_omp_assignment
+#define LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP lhd_omp_assignment
+#define LANG_HOOKS_OMP_CLAUSE_LINEAR_CTOR NULL
+#define LANG_HOOKS_OMP_CLAUSE_DTOR hook_tree_tree_tree_null
+#define LANG_HOOKS_OMP_FINISH_CLAUSE lhd_omp_finish_clause
+#define LANG_HOOKS_OMP_ALLOCATABLE_P hook_bool_tree_false
+#define LANG_HOOKS_OMP_SCALAR_P lhd_omp_scalar_p
+#define LANG_HOOKS_OMP_SCALAR_TARGET_P hook_bool_tree_false
+#define LANG_HOOKS_OMP_GET_DECL_INIT lhd_omp_get_decl_init
+#define LANG_HOOKS_OMP_FINISH_DECL_INITS lhd_omp_finish_decl_inits
+
+#define LANG_HOOKS_DECLS { \
+ LANG_HOOKS_GLOBAL_BINDINGS_P, \
+ LANG_HOOKS_PUSHDECL, \
+ LANG_HOOKS_GETDECLS, \
+ LANG_HOOKS_DECL_DWARF_ATTRIBUTE, \
+ LANG_HOOKS_GENERIC_GENERIC_PARAMETER_DECL_P, \
+ LANG_HOOKS_FUNCTION_PARM_EXPANDED_FROM_PACK_P, \
+ LANG_HOOKS_GET_GENERIC_FUNCTION_DECL, \
+ LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL, \
+ LANG_HOOKS_POST_COMPILATION_PARSING_CLEANUPS, \
+ LANG_HOOKS_DECL_OK_FOR_SIBCALL, \
+ LANG_HOOKS_OMP_ARRAY_DATA, \
+ LANG_HOOKS_OMP_ARRAY_SIZE, \
+ LANG_HOOKS_OMP_IS_ALLOCATABLE_OR_PTR, \
+ LANG_HOOKS_OMP_CHECK_OPTIONAL_ARGUMENT, \
+ LANG_HOOKS_OMP_PRIVATIZE_BY_REFERENCE, \
+ LANG_HOOKS_OMP_PREDETERMINED_SHARING, \
+ LANG_HOOKS_OMP_PREDETERMINED_MAPPING, \
+ LANG_HOOKS_OMP_REPORT_DECL, \
+ LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR, \
+ LANG_HOOKS_OMP_PRIVATE_DEBUG_CLAUSE, \
+ LANG_HOOKS_OMP_PRIVATE_OUTER_REF, \
+ LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR, \
+ LANG_HOOKS_OMP_CLAUSE_COPY_CTOR, \
+ LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP, \
+ LANG_HOOKS_OMP_CLAUSE_LINEAR_CTOR, \
+ LANG_HOOKS_OMP_CLAUSE_DTOR, \
+ LANG_HOOKS_OMP_FINISH_CLAUSE, \
+ LANG_HOOKS_OMP_ALLOCATABLE_P, \
+ LANG_HOOKS_OMP_SCALAR_P, \
+ LANG_HOOKS_OMP_SCALAR_TARGET_P, \
+ LANG_HOOKS_OMP_GET_DECL_INIT, \
+ LANG_HOOKS_OMP_FINISH_DECL_INITS \
+}
+
+/* LTO hooks. */
+extern void lhd_begin_section (const char *);
+extern void lhd_append_data (const void *, size_t, void *);
+extern void lhd_end_section (void);
+
+#define LANG_HOOKS_BEGIN_SECTION lhd_begin_section
+#define LANG_HOOKS_APPEND_DATA lhd_append_data
+#define LANG_HOOKS_END_SECTION lhd_end_section
+
+#define LANG_HOOKS_LTO { \
+ LANG_HOOKS_BEGIN_SECTION, \
+ LANG_HOOKS_APPEND_DATA, \
+ LANG_HOOKS_END_SECTION \
+}
+
+/* The whole thing. The structure is defined in langhooks.h. */
+#define LANG_HOOKS_INITIALIZER { \
+ LANG_HOOKS_NAME, \
+ LANG_HOOKS_IDENTIFIER_SIZE, \
+ LANG_HOOKS_FREE_LANG_DATA, \
+ LANG_HOOKS_TREE_SIZE, \
+ LANG_HOOKS_OPTION_LANG_MASK, \
+ LANG_HOOKS_INIT_OPTIONS_STRUCT, \
+ LANG_HOOKS_INIT_OPTIONS, \
+ LANG_HOOKS_INITIALIZE_DIAGNOSTICS, \
+ LANG_HOOKS_PREPROCESS_MAIN_FILE, \
+ LANG_HOOKS_PREPROCESS_OPTIONS, \
+ LANG_HOOKS_PREPROCESS_UNDEF, \
+ LANG_HOOKS_PREPROCESS_TOKEN, \
+ LANG_HOOKS_REGISTER_DUMPS, \
+ LANG_HOOKS_COMPLAIN_WRONG_LANG_P, \
+ LANG_HOOKS_HANDLE_OPTION, \
+ LANG_HOOKS_POST_OPTIONS, \
+ LANG_HOOKS_INIT, \
+ LANG_HOOKS_FINISH, \
+ LANG_HOOKS_PARSE_FILE, \
+ LANG_HOOKS_MISSING_NORETURN_OK_P, \
+ LANG_HOOKS_GET_ALIAS_SET, \
+ LANG_HOOKS_FINISH_INCOMPLETE_DECL, \
+ LANG_HOOKS_DUP_LANG_SPECIFIC_DECL, \
+ LANG_HOOKS_SET_DECL_ASSEMBLER_NAME, \
+ LANG_HOOKS_OVERWRITE_DECL_ASSEMBLER_NAME, \
+ LANG_HOOKS_PRINT_STATISTICS, \
+ LANG_HOOKS_PRINT_XNODE, \
+ LANG_HOOKS_PRINT_DECL, \
+ LANG_HOOKS_PRINT_TYPE, \
+ LANG_HOOKS_PRINT_IDENTIFIER, \
+ LANG_HOOKS_DECL_PRINTABLE_NAME, \
+ LANG_HOOKS_DWARF_NAME, \
+ LANG_HOOKS_TYPES_COMPATIBLE_P, \
+ LANG_HOOKS_PRINT_ERROR_FUNCTION, \
+ LANG_HOOKS_TO_TARGET_CHARSET, \
+ LANG_HOOKS_ATTRIBUTE_TABLE, \
+ LANG_HOOKS_COMMON_ATTRIBUTE_TABLE, \
+ LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE, \
+ LANG_HOOKS_TREE_INLINING_INITIALIZER, \
+ LANG_HOOKS_TREE_DUMP_INITIALIZER, \
+ LANG_HOOKS_DECLS, \
+ LANG_HOOKS_FOR_TYPES_INITIALIZER, \
+ LANG_HOOKS_LTO, \
+ LANG_HOOKS_GET_INNERMOST_GENERIC_PARMS, \
+ LANG_HOOKS_GET_INNERMOST_GENERIC_ARGS, \
+ LANG_HOOKS_FUNCTION_PARAMETER_PACK_P, \
+ LANG_HOOKS_GIMPLIFY_EXPR, \
+ LANG_HOOKS_BUILTIN_FUNCTION, \
+ LANG_HOOKS_BUILTIN_FUNCTION_EXT_SCOPE, \
+ LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL, \
+ LANG_HOOKS_INIT_TS, \
+ LANG_HOOKS_EXPR_TO_DECL, \
+ LANG_HOOKS_EH_PERSONALITY, \
+ LANG_HOOKS_EH_RUNTIME_TYPE, \
+ LANG_HOOKS_EH_PROTECT_CLEANUP_ACTIONS, \
+ LANG_HOOKS_BLOCK_MAY_FALLTHRU, \
+ LANG_HOOKS_EH_USE_CXA_END_CLEANUP, \
+ LANG_HOOKS_DEEP_UNSHARING, \
+ LANG_HOOKS_CUSTOM_FUNCTION_DESCRIPTORS, \
+ LANG_HOOKS_EMITS_BEGIN_STMT, \
+ LANG_HOOKS_RUN_LANG_SELFTESTS, \
+ LANG_HOOKS_GET_SUBSTRING_LOCATION, \
+ LANG_HOOKS_FINALIZE_EARLY_DEBUG, \
+ LANG_HOOKS_GET_SARIF_SOURCE_LANGUAGE \
+}
+
+#endif /* GCC_LANG_HOOKS_DEF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks.h
new file mode 100644
index 0000000..cca7528
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/langhooks.h
@@ -0,0 +1,674 @@
+/* The lang_hooks data structure.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LANG_HOOKS_H
+#define GCC_LANG_HOOKS_H
+
+/* FIXME: This file should be #include-d after tree.h (for enum tree_code). */
+
+struct diagnostic_info;
+
+struct gimplify_omp_ctx;
+
+struct array_descr_info;
+
+/* A print hook for print_tree (). */
+typedef void (*lang_print_tree_hook) (FILE *, tree, int indent);
+
+enum classify_record
+ { RECORD_IS_STRUCT, RECORD_IS_CLASS, RECORD_IS_INTERFACE };
+
+class substring_loc;
+
+/* The following hooks are documented in langhooks.cc. Must not be
+ NULL. */
+
+struct lang_hooks_for_tree_inlining
+{
+ bool (*var_mod_type_p) (tree, tree);
+};
+
+/* The following hooks are used by tree-dump.cc. */
+
+struct lang_hooks_for_tree_dump
+{
+ /* Dump language-specific parts of tree nodes. Returns nonzero if it
+ does not want the usual dumping of the second argument. */
+ bool (*dump_tree) (void *, tree);
+
+ /* Determine type qualifiers in a language-specific way. */
+ int (*type_quals) (const_tree);
+};
+
+/* Hooks related to types. */
+
+struct lang_hooks_for_types
+{
+ /* Return a new type (with the indicated CODE), doing whatever
+ language-specific processing is required. */
+ tree (*make_type) (enum tree_code);
+
+ /* Make an enum type with the given name and values, associating
+ them all with the given source location. */
+ tree (*simulate_enum_decl) (location_t, const char *, vec<string_int_pair> *);
+
+ /* Do the equivalent of:
+
+ typedef struct NAME { FIELDS; } NAME;
+
+ associating it with location LOC. Return the associated RECORD_TYPE.
+
+ FIELDS is a list of FIELD_DECLs, in layout order. */
+ tree (*simulate_record_decl) (location_t loc, const char *name,
+ array_slice<const tree> fields);
+
+ /* Return what kind of RECORD_TYPE this is, mainly for purposes of
+ debug information. If not defined, record types are assumed to
+ be structures. */
+ enum classify_record (*classify_record) (tree);
+
+ /* Given MODE and UNSIGNEDP, return a suitable type-tree with that
+ mode. */
+ tree (*type_for_mode) (machine_mode, int);
+
+ /* Given PRECISION and UNSIGNEDP, return a suitable type-tree for an
+ integer type with at least that precision. */
+ tree (*type_for_size) (unsigned, int);
+
+ /* True if the type is an instantiation of a generic type,
+ e.g. C++ template implicit specializations. */
+ bool (*generic_p) (const_tree);
+
+ /* Returns the TREE_VEC of elements of a given generic argument pack. */
+ tree (*get_argument_pack_elems) (const_tree);
+
+ /* Given a type, apply default promotions to unnamed function
+ arguments and return the new type. Return the same type if no
+ change. Required by any language that supports variadic
+ arguments. The default hook dies. */
+ tree (*type_promotes_to) (tree);
+
+ /* Register TYPE as a builtin type with the indicated NAME. The
+ TYPE is placed in the outermost lexical scope. The semantics
+ should be analogous to:
+
+ typedef TYPE NAME;
+
+ in C. The default hook ignores the declaration. */
+ void (*register_builtin_type) (tree, const char *);
+
+ /* This routine is called in tree.cc to print an error message for
+ invalid use of an incomplete type. VALUE is the expression that
+ was used (or 0 if that isn't known) and TYPE is the type that was
+ invalid. LOC is the location of the use. */
+ void (*incomplete_type_error) (location_t loc, const_tree value,
+ const_tree type);
+
+ /* Called from assign_temp to return the maximum size, if there is one,
+ for a type. */
+ tree (*max_size) (const_tree);
+
+ /* Register language specific type size variables as potentially OpenMP
+ firstprivate variables. */
+ void (*omp_firstprivatize_type_sizes) (struct gimplify_omp_ctx *, tree);
+
+ /* Return TRUE if TYPE1 and TYPE2 are identical for type hashing purposes.
+ Called only after doing all language independent checks.
+ At present, this function is only called when both TYPE1 and TYPE2 are
+ FUNCTION_TYPE or METHOD_TYPE. */
+ bool (*type_hash_eq) (const_tree, const_tree);
+
+ /* If non-NULL, return TYPE1 with any language-specific modifiers copied from
+ TYPE2. */
+ tree (*copy_lang_qualifiers) (const_tree, const_tree);
+
+ /* Return TRUE if TYPE uses a hidden descriptor and fills in information
+ for the debugger about the array bounds, strides, etc. */
+ bool (*get_array_descr_info) (const_tree, struct array_descr_info *);
+
+ /* Fill in information for the debugger about the bounds of TYPE. */
+ void (*get_subrange_bounds) (const_tree, tree *, tree *);
+
+ /* Called on INTEGER_TYPEs. Return NULL_TREE for non-biased types. For
+ biased types, return as an INTEGER_CST node the value that is represented
+ by a physical zero. */
+ tree (*get_type_bias) (const_tree);
+
+ /* A type descriptive of TYPE's complex layout generated to help the
+ debugger to decode variable-length or self-referential constructs.
+ This is only used for the AT_GNAT_descriptive_type DWARF attribute. */
+ tree (*descriptive_type) (const_tree);
+
+ /* If we requested a pointer to a vector, build up the pointers that
+ we stripped off while looking for the inner type. Similarly for
+ return values from functions. The argument TYPE is the top of the
+ chain, and BOTTOM is the new type which we will point to. */
+ tree (*reconstruct_complex_type) (tree, tree);
+
+ /* Returns the tree that represents the underlying data type used to
+ implement the enumeration. The default implementation will just use
+ type_for_size. Used in dwarf2out.cc to add a DW_AT_type base type
+ reference to a DW_TAG_enumeration. */
+ tree (*enum_underlying_base_type) (const_tree);
+
+ /* Return a type to use in the debug info instead of TYPE, or NULL_TREE to
+ keep TYPE. This is useful to keep a single "source type" when the
+ middle-end uses specialized types, for instance constrained discriminated
+ types in Ada. */
+ tree (*get_debug_type) (const_tree);
+
+ /* Return TRUE if TYPE implements a fixed point type and fills in information
+ for the debugger about scale factor, etc. */
+ bool (*get_fixed_point_type_info) (const_tree,
+ struct fixed_point_type_info *);
+
+ /* Returns -1 if dwarf ATTR shouldn't be added for TYPE, or the attribute
+ value otherwise. */
+ int (*type_dwarf_attribute) (const_tree, int);
+
+ /* Returns a tree for the unit size of T excluding tail padding that
+ might be used by objects inheriting from T. */
+ tree (*unit_size_without_reusable_padding) (tree);
+
+ /* Returns type corresponding to FIELD's type when FIELD is a C++ base class
+ i.e., type without virtual base classes or tail padding. Returns
+ NULL_TREE otherwise. */
+ tree (*classtype_as_base) (const_tree);
+};
+
+/* Language hooks related to decls and the symbol table. */
+
+struct lang_hooks_for_decls
+{
+ /* Return true if we are in the global binding level. This hook is really
+ needed only if the language supports variable-sized types at the global
+ level, i.e. declared outside subprograms. */
+ bool (*global_bindings_p) (void);
+
+ /* Function to add a decl to the current scope level. Takes one
+ argument, a decl to add. Returns that decl, or, if the same
+ symbol is already declared, may return a different decl for that
+ name. */
+ tree (*pushdecl) (tree);
+
+ /* Returns the chain of decls so far in the current scope level. */
+ tree (*getdecls) (void);
+
+ /* Returns -1 if dwarf ATTR shouldn't be added for DECL, or the attribute
+ value otherwise. */
+ int (*decl_dwarf_attribute) (const_tree, int);
+
+ /* Returns True if the parameter is a generic parameter decl
+ of a generic type, e.g a template template parameter for the C++ FE. */
+ bool (*generic_generic_parameter_decl_p) (const_tree);
+
+ /* Determine if a function parameter got expanded from a
+ function parameter pack. */
+ bool (*function_parm_expanded_from_pack_p) (tree, tree);
+
+ /* Returns the generic declaration of a generic function instantiations. */
+ tree (*get_generic_function_decl) (const_tree);
+
+ /* Returns true when we should warn for an unused global DECL.
+ We will already have checked that it has static binding. */
+ bool (*warn_unused_global) (const_tree);
+
+ /* Perform any post compilation-proper parser cleanups and
+ processing. This is currently only needed for the C++ parser,
+ which hopefully can be cleaned up so this hook is no longer
+ necessary. */
+ void (*post_compilation_parsing_cleanups) (void);
+
+ /* True if this decl may be called via a sibcall. */
+ bool (*ok_for_sibcall) (const_tree);
+
+ /* Return a tree for the actual data of an array descriptor - or NULL_TREE
+ if original tree is not an array descriptor. If the second argument
+ is true, only the TREE_TYPE is returned without generating a new tree. */
+ tree (*omp_array_data) (tree, bool);
+
+ /* Return a tree for the actual data of an array descriptor - or NULL_TREE
+ if original tree is not an array descriptor. If the second argument
+ is true, only the TREE_TYPE is returned without generating a new tree. */
+ tree (*omp_array_size) (tree, gimple_seq *pre_p);
+
+ /* True if OpenMP should regard this DECL as being a scalar which has Fortran's
+ allocatable or pointer attribute. */
+ bool (*omp_is_allocatable_or_ptr) (const_tree);
+
+ /* Check whether this DECL belongs to a Fortran optional argument.
+ With 'for_present_check' set to false, decls which are optional parameters
+ themselve are returned as tree - or a NULL_TREE otherwise. Those decls are
+ always pointers. With 'for_present_check' set to true, the decl for
+ checking whether an argument is present is returned; for arguments with
+ value attribute this is the hidden argument and of BOOLEAN_TYPE. If the
+ decl is unrelated to optional arguments, NULL_TREE is returned. */
+ tree (*omp_check_optional_argument) (tree, bool);
+
+ /* True if OpenMP should privatize what this DECL points to rather
+ than the DECL itself. */
+ bool (*omp_privatize_by_reference) (const_tree);
+
+ /* Return sharing kind if OpenMP sharing attribute of DECL is
+ predetermined, OMP_CLAUSE_DEFAULT_UNSPECIFIED otherwise. */
+ enum omp_clause_default_kind (*omp_predetermined_sharing) (tree);
+
+ /* Return mapping kind if OpenMP mapping attribute of DECL is
+ predetermined, OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED otherwise. */
+ enum omp_clause_defaultmap_kind (*omp_predetermined_mapping) (tree);
+
+ /* Return decl that should be reported for DEFAULT(NONE) failure
+ diagnostics. Usually the DECL passed in. */
+ tree (*omp_report_decl) (tree);
+
+ /* Return true if DECL's DECL_VALUE_EXPR (if any) should be
+ disregarded in OpenMP construct, because it is going to be
+ remapped during OpenMP lowering. SHARED is true if DECL
+ is going to be shared, false if it is going to be privatized. */
+ bool (*omp_disregard_value_expr) (tree, bool);
+
+ /* Return true if DECL that is shared iff SHARED is true should
+ be put into OMP_CLAUSE_PRIVATE_DEBUG. */
+ bool (*omp_private_debug_clause) (tree, bool);
+
+ /* Return true if DECL in private clause needs
+ OMP_CLAUSE_PRIVATE_OUTER_REF on the private clause. */
+ bool (*omp_private_outer_ref) (tree);
+
+ /* Build and return code for a default constructor for DECL in
+ response to CLAUSE. OUTER is corresponding outer region's
+ variable if needed. Return NULL if nothing to be done. */
+ tree (*omp_clause_default_ctor) (tree clause, tree decl, tree outer);
+
+ /* Build and return code for a copy constructor from SRC to DST. */
+ tree (*omp_clause_copy_ctor) (tree clause, tree dst, tree src);
+
+ /* Similarly, except use an assignment operator instead. */
+ tree (*omp_clause_assign_op) (tree clause, tree dst, tree src);
+
+ /* Build and return code for a constructor of DST that sets it to
+ SRC + ADD. */
+ tree (*omp_clause_linear_ctor) (tree clause, tree dst, tree src, tree add);
+
+ /* Build and return code destructing DECL. Return NULL if nothing
+ to be done. */
+ tree (*omp_clause_dtor) (tree clause, tree decl);
+
+ /* Do language specific checking on an implicitly determined clause. */
+ void (*omp_finish_clause) (tree clause, gimple_seq *pre_p, bool);
+
+ /* Return true if DECL is an allocatable variable (for the purpose of
+ implicit mapping). */
+ bool (*omp_allocatable_p) (tree decl);
+
+ /* Return true if DECL is a scalar variable (for the purpose of
+ implicit firstprivatization). If 'ptr_or', pointers and
+ allocatables are also permitted. */
+ bool (*omp_scalar_p) (tree decl, bool ptr_ok);
+
+ /* Return true if DECL is a scalar variable with Fortran target but not
+ allocatable or pointer attribute (for the purpose of implicit mapping). */
+ bool (*omp_scalar_target_p) (tree decl);
+
+ /* Return a pointer to the tree representing the initializer
+ expression for the non-local variable DECL. Return NULL if
+ DECL is not initialized. */
+ tree *(*omp_get_decl_init) (tree decl);
+
+ /* Free any extra memory used to hold initializer information for
+ variable declarations. omp_get_decl_init must not be called
+ after calling this. */
+ void (*omp_finish_decl_inits) (void);
+};
+
+/* Language hooks related to LTO serialization. */
+
+struct lang_hooks_for_lto
+{
+ /* Begin a new LTO section named NAME. */
+ void (*begin_section) (const char *name);
+
+ /* Write DATA of length LEN to the currently open LTO section. BLOCK is a
+ pointer to the dynamically allocated memory containing DATA. The
+ append_data function is responsible for freeing it when it is no longer
+ needed. */
+ void (*append_data) (const void *data, size_t len, void *block);
+
+ /* End the previously begun LTO section. */
+ void (*end_section) (void);
+};
+
+/* Language-specific hooks. See langhooks-def.h for defaults. */
+
+struct lang_hooks
+{
+ /* String identifying the front end and optionally language standard
+ version, e.g. "GNU C++98". */
+ const char *name;
+
+ /* sizeof (struct lang_identifier), so make_node () creates
+ identifier nodes long enough for the language-specific slots. */
+ size_t identifier_size;
+
+ /* Remove any parts of the tree that are used only by the FE. */
+ void (*free_lang_data) (tree);
+
+ /* Determines the size of any language-specific tcc_constant,
+ tcc_exceptional or tcc_type nodes. Since it is called from
+ make_node, the only information available is the tree code.
+ Expected to die on unrecognized codes. */
+ size_t (*tree_size) (enum tree_code);
+
+ /* Return the language mask used for converting argv into a sequence
+ of options. */
+ unsigned int (*option_lang_mask) (void);
+
+ /* Initialize variables in an options structure. */
+ void (*init_options_struct) (struct gcc_options *opts);
+
+ /* After the initialize_diagnostics hook is called, do any simple
+ initialization needed before any calls to handle_option, other
+ than that done by the init_options_struct hook. */
+ void (*init_options) (unsigned int decoded_options_count,
+ struct cl_decoded_option *decoded_options);
+
+ /* Callback used to perform language-specific initialization for the
+ global diagnostic context structure. */
+ void (*initialize_diagnostics) (diagnostic_context *);
+
+ /* Beginning the main source file. */
+ void (*preprocess_main_file) (cpp_reader *, line_maps *,
+ const line_map_ordinary *);
+
+ /* Adjust libcpp options and callbacks. */
+ void (*preprocess_options) (cpp_reader *);
+
+ /* Undefining a macro. */
+ void (*preprocess_undef) (cpp_reader *, location_t, cpp_hashnode *);
+
+ /* Observer for preprocessing stream. */
+ uintptr_t (*preprocess_token) (cpp_reader *, const cpp_token *, uintptr_t);
+ /* Various flags it can return about the token. */
+ enum PT_flags
+ {
+ PT_begin_pragma = 1 << 0
+ };
+
+ /* Register language-specific dumps. */
+ void (*register_dumps) (gcc::dump_manager *);
+
+ /* Return true if a warning should be given about option OPTION,
+ which is for the wrong language, false if it should be quietly
+ ignored. */
+ bool (*complain_wrong_lang_p) (const struct cl_option *option);
+
+ /* Handle the switch CODE, which has real type enum opt_code from
+ options.h. If the switch takes an argument, it is passed in ARG
+ which points to permanent storage. The handler is responsible for
+ checking whether ARG is NULL, which indicates that no argument
+ was in fact supplied. For -f and -W switches, VALUE is 1 or 0
+ for the positive and negative forms respectively. HANDLERS should
+ be passed to any recursive handle_option calls. LOC is the
+ location of the option.
+
+ Return true if the switch is valid, false if invalid. */
+ bool (*handle_option) (size_t code, const char *arg, HOST_WIDE_INT value,
+ int kind, location_t loc,
+ const struct cl_option_handlers *handlers);
+
+ /* Called when all command line options have been parsed to allow
+ further processing and initialization
+
+ Should return true to indicate that a compiler back-end is
+ not required, such as with the -E option.
+
+ If errorcount is nonzero after this call the compiler exits
+ immediately and the finish hook is not called. */
+ bool (*post_options) (const char **);
+
+ /* Called after post_options to initialize the front end. Return
+ false to indicate that no further compilation be performed, in
+ which case the finish hook is called immediately. */
+ bool (*init) (void);
+
+ /* Called at the end of compilation, as a finalizer. */
+ void (*finish) (void);
+
+ /* Parses the entire file. */
+ void (*parse_file) (void);
+
+ /* Determines if it's ok for a function to have no noreturn attribute. */
+ bool (*missing_noreturn_ok_p) (tree);
+
+ /* Called to obtain the alias set to be used for an expression or type.
+ Returns -1 if the language does nothing special for it. */
+ alias_set_type (*get_alias_set) (tree);
+
+ /* Function to finish handling an incomplete decl at the end of
+ compilation. Default hook is does nothing. */
+ void (*finish_incomplete_decl) (tree);
+
+ /* Replace the DECL_LANG_SPECIFIC data, which may be NULL, of the
+ DECL_NODE with a newly GC-allocated copy. */
+ void (*dup_lang_specific_decl) (tree);
+
+ /* Set the DECL_ASSEMBLER_NAME for a node. If it is the sort of
+ thing that the assembler should talk about, set
+ DECL_ASSEMBLER_NAME to an appropriate IDENTIFIER_NODE.
+ Otherwise, set it to the ERROR_MARK_NODE to ensure that the
+ assembler does not talk about it. */
+ void (*set_decl_assembler_name) (tree);
+
+ /* Overwrite the DECL_ASSEMBLER_NAME for a node. The name is being
+ changed (including to or from NULL_TREE). */
+ void (*overwrite_decl_assembler_name) (tree, tree);
+
+ /* The front end can add its own statistics to -fmem-report with
+ this hook. It should output to stderr. */
+ void (*print_statistics) (void);
+
+ /* Called by print_tree when there is a tree of class tcc_exceptional
+ or tcc_constant that it doesn't know how to display. */
+ lang_print_tree_hook print_xnode;
+
+ /* Called to print language-dependent parts of tcc_decl, tcc_type,
+ and IDENTIFIER_NODE nodes. */
+ lang_print_tree_hook print_decl;
+ lang_print_tree_hook print_type;
+ lang_print_tree_hook print_identifier;
+
+ /* Computes the name to use to print a declaration. DECL is the
+ non-NULL declaration in question. VERBOSITY determines what
+ information will be printed: 0: DECL_NAME, demangled as
+ necessary. 1: and scope information. 2: and any other
+ information that might be interesting, such as function parameter
+ types in C++. The name is in the internal character set and
+ needs to be converted to the locale character set of diagnostics,
+ or to the execution character set for strings such as
+ __PRETTY_FUNCTION__. */
+ const char *(*decl_printable_name) (tree decl, int verbosity);
+
+ /* Computes the dwarf-2/3 name for a tree. VERBOSITY determines what
+ information will be printed: 0: DECL_NAME, demangled as
+ necessary. 1: and scope information. */
+ const char *(*dwarf_name) (tree, int verbosity);
+
+ /* This compares two types for equivalence ("compatible" in C-based languages).
+ This routine should only return 1 if it is sure. It should not be used
+ in contexts where erroneously returning 0 causes problems. */
+ int (*types_compatible_p) (tree x, tree y);
+
+ /* Called by report_error_function to print out function name. */
+ void (*print_error_function) (diagnostic_context *, const char *,
+ struct diagnostic_info *);
+
+ /* Convert a character from the host's to the target's character
+ set. The character should be in what C calls the "basic source
+ character set" (roughly, the set of characters defined by plain
+ old ASCII). The default is to return the character unchanged,
+ which is correct in most circumstances. Note that both argument
+ and result should be sign-extended under -fsigned-char,
+ zero-extended under -fno-signed-char. */
+ HOST_WIDE_INT (*to_target_charset) (HOST_WIDE_INT);
+
+ /* Pointers to machine-independent attribute tables, for front ends
+ using attribs.cc. If one is NULL, it is ignored. Respectively, a
+ table of attributes specific to the language, a table of
+ attributes common to two or more languages (to allow easy
+ sharing), and a table of attributes for checking formats. */
+ const struct attribute_spec *attribute_table;
+ const struct attribute_spec *common_attribute_table;
+ const struct attribute_spec *format_attribute_table;
+
+ struct lang_hooks_for_tree_inlining tree_inlining;
+
+ struct lang_hooks_for_tree_dump tree_dump;
+
+ struct lang_hooks_for_decls decls;
+
+ struct lang_hooks_for_types types;
+
+ struct lang_hooks_for_lto lto;
+
+ /* Returns a TREE_VEC of the generic parameters of an instantiation of
+ a generic type or decl, e.g. C++ template instantiation. If
+ TREE_CHAIN of the return value is set, it is an INTEGER_CST
+ indicating how many of the elements are non-default. */
+ tree (*get_innermost_generic_parms) (const_tree);
+
+ /* Returns the TREE_VEC of arguments of an instantiation
+ of a generic type of decl, e.g. C++ template instantiation. */
+ tree (*get_innermost_generic_args) (const_tree);
+
+ /* Determine if a tree is a function parameter pack. */
+ bool (*function_parameter_pack_p) (const_tree);
+
+ /* Perform language-specific gimplification on the argument. Returns an
+ enum gimplify_status, though we can't see that type here. */
+ int (*gimplify_expr) (tree *, gimple_seq *, gimple_seq *);
+
+ /* Do language specific processing in the builtin function DECL */
+ tree (*builtin_function) (tree decl);
+
+ /* Like builtin_function, but make sure the scope is the external scope.
+ This is used to delay putting in back end builtin functions until the ISA
+ that defines the builtin is declared via function specific target options,
+ which can save memory for machines like the x86_64 that have multiple
+ ISAs. If this points to the same function as builtin_function, the
+ backend must add all of the builtins at program initialization time. */
+ tree (*builtin_function_ext_scope) (tree decl);
+
+ /* Do language-specific processing for target-specific built-in
+ function DECL, so that it is defined in the global scope (only)
+ and is available without needing to be explicitly declared.
+
+ This is intended for targets that want to inject declarations of
+ built-in functions into the source language (such as in response
+ to a pragma) rather than providing them in the source language itself. */
+ tree (*simulate_builtin_function_decl) (tree decl);
+
+ /* Used to set up the tree_contains_structure array for a frontend. */
+ void (*init_ts) (void);
+
+ /* Called by recompute_tree_invariant_for_addr_expr to go from EXPR
+ to a contained expression or DECL, possibly updating *TC or *SE
+ if in the process TREE_CONSTANT or TREE_SIDE_EFFECTS need updating. */
+ tree (*expr_to_decl) (tree expr, bool *tc, bool *se);
+
+ /* The EH personality function decl. */
+ tree (*eh_personality) (void);
+
+ /* Map a type to a runtime object to match type. */
+ tree (*eh_runtime_type) (tree);
+
+ /* If non-NULL, this is a function that returns a function decl to be
+ executed if an unhandled exception is propagated out of a cleanup
+ region. For example, in C++, an exception thrown by a destructor
+ during stack unwinding is required to result in a call to
+ `std::terminate', so the C++ version of this function returns a
+ FUNCTION_DECL for `std::terminate'. */
+ tree (*eh_protect_cleanup_actions) (void);
+
+ /* Return true if a stmt can fallthru. Used by block_may_fallthru
+ to possibly handle language trees. */
+ bool (*block_may_fallthru) (const_tree);
+
+ /* True if this language uses __cxa_end_cleanup when the ARM EABI
+ is enabled. */
+ bool eh_use_cxa_end_cleanup;
+
+ /* True if this language requires deep unsharing of tree nodes prior to
+ gimplification. */
+ bool deep_unsharing;
+
+ /* True if this language may use custom descriptors for nested functions
+ instead of trampolines. */
+ bool custom_function_descriptors;
+
+ /* True if this language emits begin stmt notes. */
+ bool emits_begin_stmt;
+
+ /* Run all lang-specific selftests. */
+ void (*run_lang_selftests) (void);
+
+ /* Attempt to determine the source location of the substring.
+ If successful, return NULL and write the source location to *OUT_LOC.
+ Otherwise return an error message. Error messages are intended
+ for GCC developers (to help debugging) rather than for end-users. */
+ const char *(*get_substring_location) (const substring_loc &,
+ location_t *out_loc);
+
+ /* Invoked before the early_finish debug hook is invoked. */
+ void (*finalize_early_debug) (void);
+
+ /* Get a value for the SARIF v2.1.0 "artifact.sourceLanguage" property
+ for FILENAME, or return NULL.
+ See SARIF v2.1.0 Appendix J for suggested values for common programming
+ languages. */
+ const char *(*get_sarif_source_language) (const char *filename);
+
+ /* Whenever you add entries here, make sure you adjust langhooks-def.h
+ and langhooks.cc accordingly. */
+};
+
+/* Each front end provides its own. */
+extern struct lang_hooks lang_hooks;
+
+extern tree add_builtin_function (const char *name, tree type,
+ int function_code, enum built_in_class cl,
+ const char *library_name,
+ tree attrs);
+
+extern tree add_builtin_function_ext_scope (const char *name, tree type,
+ int function_code,
+ enum built_in_class cl,
+ const char *library_name,
+ tree attrs);
+extern tree simulate_builtin_function_decl (location_t, const char *, tree,
+ int, const char *, tree);
+extern tree add_builtin_type (const char *name, tree type);
+
+/* Language helper functions. */
+
+extern bool lang_GNU_C (void);
+extern bool lang_GNU_CXX (void);
+extern bool lang_GNU_Fortran (void);
+extern bool lang_GNU_OBJC (void);
+
+#endif /* GCC_LANG_HOOKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lcm.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lcm.h
new file mode 100644
index 0000000..e083393
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lcm.h
@@ -0,0 +1,34 @@
+/* Generic partial redundancy elimination with lazy code motion header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LCM_H
+#define GCC_LCM_H
+
+extern struct edge_list *pre_edge_lcm_avs (int, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap **, sbitmap **);
+extern struct edge_list *pre_edge_lcm (int, sbitmap *, sbitmap *,
+ sbitmap *, sbitmap *, sbitmap **,
+ sbitmap **);
+extern void compute_available (sbitmap *, sbitmap *, sbitmap *, sbitmap *);
+extern struct edge_list *pre_edge_rev_lcm (int, sbitmap *,
+ sbitmap *, sbitmap *,
+ sbitmap *, sbitmap **,
+ sbitmap **);
+#endif /* GCC_LCM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/libfuncs.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/libfuncs.h
new file mode 100644
index 0000000..6f1bc70
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/libfuncs.h
@@ -0,0 +1,84 @@
+/* Definitions for code generation pass of GNU compiler.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LIBFUNCS_H
+#define GCC_LIBFUNCS_H
+
+
+/* Enumeration of indexes into libfunc_table. */
+enum libfunc_index
+{
+ LTI_unwind_sjlj_register,
+ LTI_unwind_sjlj_unregister,
+ LTI_synchronize,
+ LTI_MAX
+};
+
+/* Information about an optab-related libfunc. The op field is logically
+ an enum optab_d, and the mode fields are logically machine_mode.
+ However, in the absence of forward-declared enums, there's no practical
+ benefit of pulling in the defining headers.
+
+ We use the same hashtable for normal optabs and conversion optabs. In
+ the first case mode2 is forced to VOIDmode. */
+
+struct GTY((for_user)) libfunc_entry {
+ int op, mode1, mode2;
+ rtx libfunc;
+};
+
+/* Descriptor for libfunc_entry. */
+
+struct libfunc_hasher : ggc_ptr_hash<libfunc_entry>
+{
+ static hashval_t hash (libfunc_entry *);
+ static bool equal (libfunc_entry *, libfunc_entry *);
+};
+
+/* Target-dependent globals. */
+struct GTY(()) target_libfuncs {
+ /* SYMBOL_REF rtx's for the library functions that are called
+ implicitly and not via optabs. */
+ rtx x_libfunc_table[LTI_MAX];
+
+ /* Hash table used to convert declarations into nodes. */
+ hash_table<libfunc_hasher> *GTY(()) x_libfunc_hash;
+};
+
+extern GTY(()) struct target_libfuncs default_target_libfuncs;
+#if SWITCHABLE_TARGET
+extern struct target_libfuncs *this_target_libfuncs;
+#else
+#define this_target_libfuncs (&default_target_libfuncs)
+#endif
+
+#define libfunc_table \
+ (this_target_libfuncs->x_libfunc_table)
+
+/* Accessor macros for libfunc_table. */
+
+#define unwind_sjlj_register_libfunc (libfunc_table[LTI_unwind_sjlj_register])
+#define unwind_sjlj_unregister_libfunc \
+ (libfunc_table[LTI_unwind_sjlj_unregister])
+#define synchronize_libfunc (libfunc_table[LTI_synchronize])
+
+/* In explow.cc */
+extern void set_stack_check_libfunc (const char *);
+
+#endif /* GCC_LIBFUNCS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/libiberty.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/libiberty.h
new file mode 100644
index 0000000..1d5c779
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/libiberty.h
@@ -0,0 +1,761 @@
+/* Function declarations for libiberty.
+
+ Copyright (C) 1997-2023 Free Software Foundation, Inc.
+
+ Note - certain prototypes declared in this header file are for
+ functions whoes implementation copyright does not belong to the
+ FSF. Those prototypes are present in this file for reference
+ purposes only and their presence in this file should not construed
+ as an indication of ownership by the FSF of the implementation of
+ those functions in any way or form whatsoever.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+ Written by Cygnus Support, 1994.
+
+ The libiberty library provides a number of functions which are
+ missing on some operating systems. We do not declare those here,
+ to avoid conflicts with the system header files on operating
+ systems that do support those functions. In this file we only
+ declare those functions which are specific to libiberty. */
+
+#ifndef LIBIBERTY_H
+#define LIBIBERTY_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ansidecl.h"
+
+/* Get a definition for size_t. */
+#include <stddef.h>
+/* Get a definition for va_list. */
+#include <stdarg.h>
+
+#include <stdio.h>
+
+/* If the OS supports it, ensure that the supplied stream is setup to
+ avoid any multi-threaded locking. Otherwise leave the FILE pointer
+ unchanged. If the stream is NULL do nothing. */
+
+extern void unlock_stream (FILE *);
+
+/* If the OS supports it, ensure that the standard I/O streams, stdin,
+ stdout and stderr are setup to avoid any multi-threaded locking.
+ Otherwise do nothing. */
+
+extern void unlock_std_streams (void);
+
+/* Open and return a FILE pointer. If the OS supports it, ensure that
+ the stream is setup to avoid any multi-threaded locking. Otherwise
+ return the FILE pointer unchanged. */
+
+extern FILE *fopen_unlocked (const char *, const char *);
+extern FILE *fdopen_unlocked (int, const char *);
+extern FILE *freopen_unlocked (const char *, const char *, FILE *);
+
+/* Build an argument vector from a string. Allocates memory using
+ malloc. Use freeargv to free the vector. */
+
+extern char **buildargv (const char *) ATTRIBUTE_MALLOC;
+
+/* Free a vector returned by buildargv. */
+
+extern void freeargv (char **);
+
+/* Duplicate an argument vector. Allocates memory using malloc. Use
+ freeargv to free the vector. */
+
+extern char **dupargv (char * const *) ATTRIBUTE_MALLOC;
+
+/* Expand "@file" arguments in argv. */
+
+extern void expandargv (int *, char ***);
+
+/* Write argv to an @-file, inserting necessary quoting. */
+
+extern int writeargv (char * const *, FILE *);
+
+/* Return the number of elements in argv. */
+
+extern int countargv (char * const *);
+
+/* Return the last component of a path name. Note that we can't use a
+ prototype here because the parameter is declared inconsistently
+ across different systems, sometimes as "char *" and sometimes as
+ "const char *" */
+
+/* HAVE_DECL_* is a three-state macro: undefined, 0 or 1. If it is
+ undefined, we haven't run the autoconf check so provide the
+ declaration without arguments. If it is 0, we checked and failed
+ to find the declaration so provide a fully prototyped one. If it
+ is 1, we found it so don't provide any declaration at all. */
+#if !HAVE_DECL_BASENAME
+#if defined (__GNU_LIBRARY__ ) || defined (__linux__) \
+ || defined (__FreeBSD__) || defined (__OpenBSD__) || defined (__NetBSD__) \
+ || defined (__CYGWIN__) || defined (__CYGWIN32__) || defined (__MINGW32__) \
+ || defined (__DragonFly__) || defined (HAVE_DECL_BASENAME)
+extern char *basename (const char *) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_NONNULL(1);
+#else
+/* Do not allow basename to be used if there is no prototype seen. We
+ either need to use the above prototype or have one from
+ autoconf which would result in HAVE_DECL_BASENAME being set. */
+#define basename basename_cannot_be_used_without_a_prototype
+#endif
+#endif
+
+/* A well-defined basename () that is always compiled in. */
+
+extern const char *lbasename (const char *) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_NONNULL(1);
+
+/* Same, but assumes DOS semantics (drive name, backslash is also a
+ dir separator) regardless of host. */
+
+extern const char *dos_lbasename (const char *) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_NONNULL(1);
+
+/* Same, but assumes Unix semantics (absolute paths always start with
+ a slash, only forward slash is accepted as dir separator)
+ regardless of host. */
+
+extern const char *unix_lbasename (const char *) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_NONNULL(1);
+
+/* A well-defined realpath () that is always compiled in. */
+
+extern char *lrealpath (const char *);
+
+/* Return true when FD file descriptor exists. */
+
+extern int is_valid_fd (int fd);
+
+/* Concatenate an arbitrary number of strings. You must pass NULL as
+ the last argument of this function, to terminate the list of
+ strings. Allocates memory using xmalloc. */
+
+extern char *concat (const char *, ...) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_SENTINEL;
+
+/* Concatenate an arbitrary number of strings. You must pass NULL as
+ the last argument of this function, to terminate the list of
+ strings. Allocates memory using xmalloc. The first argument is
+ not one of the strings to be concatenated, but if not NULL is a
+ pointer to be freed after the new string is created, similar to the
+ way xrealloc works. */
+
+extern char *reconcat (char *, const char *, ...) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_SENTINEL;
+
+/* Determine the length of concatenating an arbitrary number of
+ strings. You must pass NULL as the last argument of this function,
+ to terminate the list of strings. */
+
+extern unsigned long concat_length (const char *, ...) ATTRIBUTE_SENTINEL;
+
+/* Concatenate an arbitrary number of strings into a SUPPLIED area of
+ memory. You must pass NULL as the last argument of this function,
+ to terminate the list of strings. The supplied memory is assumed
+ to be large enough. */
+
+extern char *concat_copy (char *, const char *, ...) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_NONNULL(1) ATTRIBUTE_SENTINEL;
+
+/* Concatenate an arbitrary number of strings into a GLOBAL area of
+ memory. You must pass NULL as the last argument of this function,
+ to terminate the list of strings. The supplied memory is assumed
+ to be large enough. */
+
+extern char *concat_copy2 (const char *, ...) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_SENTINEL;
+
+/* This is the global area used by concat_copy2. */
+
+extern char *libiberty_concat_ptr;
+
+/* Concatenate an arbitrary number of strings. You must pass NULL as
+ the last argument of this function, to terminate the list of
+ strings. Allocates memory using alloca. The arguments are
+ evaluated twice! */
+#define ACONCAT(ACONCAT_PARAMS) \
+ (libiberty_concat_ptr = (char *) alloca (concat_length ACONCAT_PARAMS + 1), \
+ concat_copy2 ACONCAT_PARAMS)
+
+/* Check whether two file descriptors refer to the same file. */
+
+extern int fdmatch (int fd1, int fd2);
+
+/* Return the position of the first bit set in the argument. */
+/* Prototypes vary from system to system, so we only provide a
+ prototype on systems where we know that we need it. */
+#if defined (HAVE_DECL_FFS) && !HAVE_DECL_FFS
+extern int ffs(int);
+#endif
+
+/* Get the working directory. The result is cached, so don't call
+ chdir() between calls to getpwd(). */
+
+extern char * getpwd (void);
+
+/* Get the current time. */
+/* Prototypes vary from system to system, so we only provide a
+ prototype on systems where we know that we need it. */
+#ifdef __MINGW32__
+/* Forward declaration to avoid #include <sys/time.h>. */
+struct timeval;
+extern int gettimeofday (struct timeval *, void *);
+#endif
+
+/* Get the amount of time the process has run, in microseconds. */
+
+extern long get_run_time (void);
+
+/* Generate a relocated path to some installation directory. Allocates
+ return value using malloc. */
+
+extern char *make_relative_prefix (const char *, const char *,
+ const char *) ATTRIBUTE_MALLOC;
+
+/* Generate a relocated path to some installation directory without
+ attempting to follow any soft links. Allocates
+ return value using malloc. */
+
+extern char *make_relative_prefix_ignore_links (const char *, const char *,
+ const char *) ATTRIBUTE_MALLOC;
+
+/* Returns a pointer to a directory path suitable for creating temporary
+ files in. */
+
+extern const char *choose_tmpdir (void) ATTRIBUTE_RETURNS_NONNULL;
+
+/* Choose a temporary directory to use for scratch files. */
+
+extern char *choose_temp_base (void) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL;
+
+/* Return a temporary file name or NULL if unable to create one. */
+
+extern char *make_temp_file (const char *) ATTRIBUTE_MALLOC;
+
+/* Return a temporary file name with given PREFIX and SUFFIX
+ or NULL if unable to create one. */
+
+extern char *make_temp_file_with_prefix (const char *, const char *) ATTRIBUTE_MALLOC;
+
+/* Remove a link to a file unless it is special. */
+
+extern int unlink_if_ordinary (const char *);
+
+/* Allocate memory filled with spaces. Allocates using malloc. */
+
+extern const char *spaces (int count);
+
+/* Return the maximum error number for which strerror will return a
+ string. */
+
+extern int errno_max (void);
+
+/* Return the name of an errno value (e.g., strerrno (EINVAL) returns
+ "EINVAL"). */
+
+extern const char *strerrno (int);
+
+/* Given the name of an errno value, return the value. */
+
+extern int strtoerrno (const char *);
+
+/* ANSI's strerror(), but more robust. */
+
+extern char *xstrerror (int) ATTRIBUTE_RETURNS_NONNULL;
+
+/* Return the maximum signal number for which strsignal will return a
+ string. */
+
+extern int signo_max (void);
+
+/* Return a signal message string for a signal number
+ (e.g., strsignal (SIGHUP) returns something like "Hangup"). */
+/* This is commented out as it can conflict with one in system headers.
+ We still document its existence though. */
+
+/*extern const char *strsignal (int);*/
+
+/* Return the name of a signal number (e.g., strsigno (SIGHUP) returns
+ "SIGHUP"). */
+
+extern const char *strsigno (int);
+
+/* Given the name of a signal, return its number. */
+
+extern int strtosigno (const char *);
+
+/* Register a function to be run by xexit. Returns 0 on success. */
+
+extern int xatexit (void (*fn) (void));
+
+/* Exit, calling all the functions registered with xatexit. */
+
+extern void xexit (int status) ATTRIBUTE_NORETURN;
+
+/* Set the program name used by xmalloc. */
+
+extern void xmalloc_set_program_name (const char *);
+
+/* Report an allocation failure. */
+extern void xmalloc_failed (size_t) ATTRIBUTE_NORETURN;
+
+/* Allocate memory without fail. If malloc fails, this will print a
+ message to stderr (using the name set by xmalloc_set_program_name,
+ if any) and then call xexit. */
+
+extern void *xmalloc (size_t) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_RESULT_SIZE_1 ATTRIBUTE_WARN_UNUSED_RESULT;
+
+/* Reallocate memory without fail. This works like xmalloc. Note,
+ realloc type functions are not suitable for attribute malloc since
+ they may return the same address across multiple calls. */
+
+extern void *xrealloc (void *, size_t) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_RESULT_SIZE_2 ATTRIBUTE_WARN_UNUSED_RESULT;
+
+/* Allocate memory without fail and set it to zero. This works like
+ xmalloc. */
+
+extern void *xcalloc (size_t, size_t) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_RESULT_SIZE_1_2 ATTRIBUTE_WARN_UNUSED_RESULT;
+
+/* Copy a string into a memory buffer without fail. */
+
+extern char *xstrdup (const char *) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_WARN_UNUSED_RESULT;
+
+/* Copy at most N characters from string into a buffer without fail. */
+
+extern char *xstrndup (const char *, size_t) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_WARN_UNUSED_RESULT;
+
+/* Copy an existing memory buffer to a new memory buffer without fail. */
+
+extern void *xmemdup (const void *, size_t, size_t) ATTRIBUTE_MALLOC ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_WARN_UNUSED_RESULT;
+
+/* Physical memory routines. Return values are in BYTES. */
+extern double physmem_total (void);
+extern double physmem_available (void);
+
+/* Compute the 32-bit CRC of a block of memory. */
+extern unsigned int xcrc32 (const unsigned char *, int, unsigned int);
+
+/* These macros provide a K&R/C89/C++-friendly way of allocating structures
+ with nice encapsulation. The XDELETE*() macros are technically
+ superfluous, but provided here for symmetry. Using them consistently
+ makes it easier to update client code to use different allocators such
+ as new/delete and new[]/delete[]. */
+
+/* Scalar allocators. */
+
+#define XALLOCA(T) ((T *) alloca (sizeof (T)))
+#define XNEW(T) ((T *) xmalloc (sizeof (T)))
+#define XCNEW(T) ((T *) xcalloc (1, sizeof (T)))
+#define XDUP(T, P) ((T *) xmemdup ((P), sizeof (T), sizeof (T)))
+#define XDELETE(P) free ((void*) (P))
+
+/* Array allocators. */
+
+#define XALLOCAVEC(T, N) ((T *) alloca (sizeof (T) * (N)))
+#define XNEWVEC(T, N) ((T *) xmalloc (sizeof (T) * (N)))
+#define XCNEWVEC(T, N) ((T *) xcalloc ((N), sizeof (T)))
+#define XDUPVEC(T, P, N) ((T *) xmemdup ((P), sizeof (T) * (N), sizeof (T) * (N)))
+#define XRESIZEVEC(T, P, N) ((T *) xrealloc ((void *) (P), sizeof (T) * (N)))
+#define XDELETEVEC(P) free ((void*) (P))
+
+/* Allocators for variable-sized structures and raw buffers. */
+
+#define XALLOCAVAR(T, S) ((T *) alloca ((S)))
+#define XNEWVAR(T, S) ((T *) xmalloc ((S)))
+#define XCNEWVAR(T, S) ((T *) xcalloc (1, (S)))
+#define XDUPVAR(T, P, S1, S2) ((T *) xmemdup ((P), (S1), (S2)))
+#define XRESIZEVAR(T, P, S) ((T *) xrealloc ((P), (S)))
+
+/* Type-safe obstack allocator. */
+
+#define XOBNEW(O, T) ((T *) obstack_alloc ((O), sizeof (T)))
+#define XOBNEWVEC(O, T, N) ((T *) obstack_alloc ((O), sizeof (T) * (N)))
+#define XOBNEWVAR(O, T, S) ((T *) obstack_alloc ((O), (S)))
+#define XOBFINISH(O, T) ((T) obstack_finish ((O)))
+
+/* hex character manipulation routines */
+
+#define _hex_array_size 256
+#define _hex_bad 99
+extern const unsigned char _hex_value[_hex_array_size];
+extern void hex_init (void);
+#define hex_p(c) (hex_value (c) != _hex_bad)
+/* If you change this, note well: Some code relies on side effects in
+ the argument being performed exactly once. */
+#define hex_value(c) ((unsigned int) _hex_value[(unsigned char) (c)])
+
+/* Flags for pex_init. These are bits to be or'ed together. */
+
+/* Record subprocess times, if possible. */
+#define PEX_RECORD_TIMES 0x1
+
+/* Use pipes for communication between processes, if possible. */
+#define PEX_USE_PIPES 0x2
+
+/* Save files used for communication between processes. */
+#define PEX_SAVE_TEMPS 0x4
+
+/* Max number of alloca bytes per call before we must switch to malloc.
+
+ ?? Swiped from gnulib's regex_internal.h header. Is this actually
+ the case? This number seems arbitrary, though sane.
+
+ The OS usually guarantees only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ allocate anything larger than 4096 bytes. Also care for the possibility
+ of a few compiler-allocated temporary stack slots. */
+#define MAX_ALLOCA_SIZE 4032
+
+/* Prepare to execute one or more programs, with standard output of
+ each program fed to standard input of the next.
+ FLAGS As above.
+ PNAME The name of the program to report in error messages.
+ TEMPBASE A base name to use for temporary files; may be NULL to
+ use a random name.
+ Returns NULL on error. */
+
+extern struct pex_obj *pex_init (int flags, const char *pname,
+ const char *tempbase) ATTRIBUTE_RETURNS_NONNULL;
+
+/* Flags for pex_run. These are bits to be or'ed together. */
+
+/* Last program in pipeline. Standard output of program goes to
+ OUTNAME, or, if OUTNAME is NULL, to standard output of caller. Do
+ not set this if you want to call pex_read_output. After this is
+ set, pex_run may no longer be called with the same struct
+ pex_obj. */
+#define PEX_LAST 0x1
+
+/* Search for program in executable search path. */
+#define PEX_SEARCH 0x2
+
+/* OUTNAME is a suffix. */
+#define PEX_SUFFIX 0x4
+
+/* Send program's standard error to standard output. */
+#define PEX_STDERR_TO_STDOUT 0x8
+
+/* Input file should be opened in binary mode. This flag is ignored
+ on Unix. */
+#define PEX_BINARY_INPUT 0x10
+
+/* Output file should be opened in binary mode. This flag is ignored
+ on Unix. For proper behaviour PEX_BINARY_INPUT and
+ PEX_BINARY_OUTPUT have to match appropriately--i.e., a call using
+ PEX_BINARY_OUTPUT should be followed by a call using
+ PEX_BINARY_INPUT. */
+#define PEX_BINARY_OUTPUT 0x20
+
+/* Capture stderr to a pipe. The output can be read by
+ calling pex_read_err and reading from the returned
+ FILE object. This flag may be specified only for
+ the last program in a pipeline.
+
+ This flag is supported only on Unix and Windows. */
+#define PEX_STDERR_TO_PIPE 0x40
+
+/* Capture stderr in binary mode. This flag is ignored
+ on Unix. */
+#define PEX_BINARY_ERROR 0x80
+
+/* Append stdout to existing file instead of truncating it. */
+#define PEX_STDOUT_APPEND 0x100
+
+/* Thes same as PEX_STDOUT_APPEND, but for STDERR. */
+#define PEX_STDERR_APPEND 0x200
+
+/* Execute one program. Returns NULL on success. On error returns an
+ error string (typically just the name of a system call); the error
+ string is statically allocated.
+
+ OBJ Returned by pex_init.
+
+ FLAGS As above.
+
+ EXECUTABLE The program to execute.
+
+ ARGV NULL terminated array of arguments to pass to the program.
+
+ OUTNAME Sets the output file name as follows:
+
+ PEX_SUFFIX set (OUTNAME may not be NULL):
+ TEMPBASE parameter to pex_init not NULL:
+ Output file name is the concatenation of TEMPBASE
+ and OUTNAME.
+ TEMPBASE is NULL:
+ Output file name is a random file name ending in
+ OUTNAME.
+ PEX_SUFFIX not set:
+ OUTNAME not NULL:
+ Output file name is OUTNAME.
+ OUTNAME NULL, TEMPBASE not NULL:
+ Output file name is randomly chosen using
+ TEMPBASE.
+ OUTNAME NULL, TEMPBASE NULL:
+ Output file name is randomly chosen.
+
+ If PEX_LAST is not set, the output file name is the
+ name to use for a temporary file holding stdout, if
+ any (there will not be a file if PEX_USE_PIPES is set
+ and the system supports pipes). If a file is used, it
+ will be removed when no longer needed unless
+ PEX_SAVE_TEMPS is set.
+
+ If PEX_LAST is set, and OUTNAME is not NULL, standard
+ output is written to the output file name. The file
+ will not be removed. If PEX_LAST and PEX_SUFFIX are
+ both set, TEMPBASE may not be NULL.
+
+ ERRNAME If not NULL, this is the name of a file to which
+ standard error is written. If NULL, standard error of
+ the program is standard error of the caller.
+
+ ERR On an error return, *ERR is set to an errno value, or
+ to 0 if there is no relevant errno.
+*/
+
+extern const char *pex_run (struct pex_obj *obj, int flags,
+ const char *executable, char * const *argv,
+ const char *outname, const char *errname,
+ int *err);
+
+/* As for pex_run (), but takes an extra parameter to enable the
+ environment for the child process to be specified.
+
+ ENV The environment for the child process, specified as
+ an array of character pointers. Each element of the
+ array should point to a string of the form VAR=VALUE,
+ with the exception of the last element which must be
+ a null pointer.
+*/
+
+extern const char *pex_run_in_environment (struct pex_obj *obj, int flags,
+ const char *executable,
+ char * const *argv,
+ char * const *env,
+ const char *outname,
+ const char *errname, int *err);
+
+/* Return a stream for a temporary file to pass to the first program
+ in the pipeline as input. The file name is chosen as for pex_run.
+ pex_run closes the file automatically; don't close it yourself. */
+
+extern FILE *pex_input_file (struct pex_obj *obj, int flags,
+ const char *in_name);
+
+/* Return a stream for a pipe connected to the standard input of the
+ first program in the pipeline. You must have passed
+ `PEX_USE_PIPES' to `pex_init'. Close the returned stream
+ yourself. */
+
+extern FILE *pex_input_pipe (struct pex_obj *obj, int binary);
+
+/* Read the standard output of the last program to be executed.
+ pex_run cannot be called after this. BINARY should be non-zero if
+ the file should be opened in binary mode; this is ignored on Unix.
+ Returns NULL on error. Don't call fclose on the returned FILE; it
+ will be closed by pex_free. */
+
+extern FILE *pex_read_output (struct pex_obj *, int binary);
+
+/* Read the standard error of the last program to be executed.
+ pex_run cannot be called after this. BINARY should be non-zero if
+ the file should be opened in binary mode; this is ignored on Unix.
+ Returns NULL on error. Don't call fclose on the returned FILE; it
+ will be closed by pex_free. */
+
+extern FILE *pex_read_err (struct pex_obj *, int binary);
+
+/* Return exit status of all programs in VECTOR. COUNT indicates the
+ size of VECTOR. The status codes in the vector are in the order of
+ the calls to pex_run. Returns 0 on error, 1 on success. */
+
+extern int pex_get_status (struct pex_obj *, int count, int *vector);
+
+/* Return times of all programs in VECTOR. COUNT indicates the size
+ of VECTOR. struct pex_time is really just struct timeval, but that
+ is not portable to all systems. Returns 0 on error, 1 on
+ success. */
+
+struct pex_time
+{
+ unsigned long user_seconds;
+ unsigned long user_microseconds;
+ unsigned long system_seconds;
+ unsigned long system_microseconds;
+};
+
+extern int pex_get_times (struct pex_obj *, int count,
+ struct pex_time *vector);
+
+/* Clean up a pex_obj. If you have not called pex_get_times or
+ pex_get_status, this will try to kill the subprocesses. */
+
+extern void pex_free (struct pex_obj *);
+
+/* Just execute one program. Return value is as for pex_run.
+ FLAGS Combination of PEX_SEARCH and PEX_STDERR_TO_STDOUT.
+ EXECUTABLE As for pex_run.
+ ARGV As for pex_run.
+ PNAME As for pex_init.
+ OUTNAME As for pex_run when PEX_LAST is set.
+ ERRNAME As for pex_run.
+ STATUS Set to exit status on success.
+ ERR As for pex_run.
+*/
+
+extern const char *pex_one (int flags, const char *executable,
+ char * const *argv, const char *pname,
+ const char *outname, const char *errname,
+ int *status, int *err);
+
+/* pexecute and pwait are the old pexecute interface, still here for
+ backward compatibility. Don't use these for new code. Instead,
+ use pex_init/pex_run/pex_get_status/pex_free, or pex_one. */
+
+/* Definitions used by the pexecute routine. */
+
+#define PEXECUTE_FIRST 1
+#define PEXECUTE_LAST 2
+#define PEXECUTE_ONE (PEXECUTE_FIRST + PEXECUTE_LAST)
+#define PEXECUTE_SEARCH 4
+#define PEXECUTE_VERBOSE 8
+
+/* Execute a program. */
+
+extern int pexecute (const char *, char * const *, const char *,
+ const char *, char **, char **, int);
+
+/* Wait for pexecute to finish. */
+
+extern int pwait (int, int *, int);
+
+/* Like bsearch, but takes and passes on an argument like qsort_r. */
+
+extern void *bsearch_r (const void *, const void *,
+ size_t, size_t,
+ int (*)(const void *, const void *, void *),
+ void *);
+
+#if defined(HAVE_DECL_ASPRINTF) && !HAVE_DECL_ASPRINTF
+/* Like sprintf but provides a pointer to malloc'd storage, which must
+ be freed by the caller. */
+
+extern int asprintf (char **, const char *, ...) ATTRIBUTE_PRINTF_2;
+#endif
+
+/* Like asprintf but allocates memory without fail. This works like
+ xmalloc. */
+
+extern char *xasprintf (const char *, ...) ATTRIBUTE_MALLOC ATTRIBUTE_PRINTF_1;
+
+#if defined(HAVE_DECL_VASPRINTF) && !HAVE_DECL_VASPRINTF
+/* Like vsprintf but provides a pointer to malloc'd storage, which
+ must be freed by the caller. */
+
+extern int vasprintf (char **, const char *, va_list) ATTRIBUTE_PRINTF(2,0);
+#endif
+
+/* Like vasprintf but allocates memory without fail. This works like
+ xmalloc. */
+
+extern char *xvasprintf (const char *, va_list) ATTRIBUTE_MALLOC ATTRIBUTE_PRINTF(1,0);
+
+#if defined(HAVE_DECL_SNPRINTF) && !HAVE_DECL_SNPRINTF
+/* Like sprintf but prints at most N characters. */
+extern int snprintf (char *, size_t, const char *, ...) ATTRIBUTE_PRINTF_3;
+#endif
+
+#if defined(HAVE_DECL_VSNPRINTF) && !HAVE_DECL_VSNPRINTF
+/* Like vsprintf but prints at most N characters. */
+extern int vsnprintf (char *, size_t, const char *, va_list) ATTRIBUTE_PRINTF(3,0);
+#endif
+
+#if defined (HAVE_DECL_STRNLEN) && !HAVE_DECL_STRNLEN
+extern size_t strnlen (const char *, size_t);
+#endif
+
+#if defined(HAVE_DECL_STRVERSCMP) && !HAVE_DECL_STRVERSCMP
+/* Compare version strings. */
+extern int strverscmp (const char *, const char *);
+#endif
+
+#if defined(HAVE_DECL_STRTOL) && !HAVE_DECL_STRTOL
+extern long int strtol (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_DECL_STRTOUL) && !HAVE_DECL_STRTOUL
+extern unsigned long int strtoul (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_LONG_LONG) && defined(HAVE_DECL_STRTOLL) && !HAVE_DECL_STRTOLL
+__extension__
+extern long long int strtoll (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_LONG_LONG) && defined(HAVE_DECL_STRTOULL) && !HAVE_DECL_STRTOULL
+__extension__
+extern unsigned long long int strtoull (const char *nptr,
+ char **endptr, int base);
+#endif
+
+/* Set the title of a process */
+extern void setproctitle (const char *name, ...);
+
+/* Increase stack limit if possible. */
+extern void stack_limit_increase (unsigned long);
+
+#define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0]))
+
+/* Drastically simplified alloca configurator. If we're using GCC,
+ we use __builtin_alloca; otherwise we use the C alloca. The C
+ alloca is always available. You can override GCC by defining
+ USE_C_ALLOCA yourself. The canonical autoconf macro C_ALLOCA is
+ also set/unset as it is often used to indicate whether code needs
+ to call alloca(0). */
+extern void *C_alloca (size_t) ATTRIBUTE_MALLOC;
+#undef alloca
+#if GCC_VERSION >= 2000 && !defined USE_C_ALLOCA
+# define alloca(x) __builtin_alloca(x)
+# undef C_ALLOCA
+# define ASTRDUP(X) \
+ (__extension__ ({ const char *const libiberty_optr = (X); \
+ const unsigned long libiberty_len = strlen (libiberty_optr) + 1; \
+ char *const libiberty_nptr = (char *) alloca (libiberty_len); \
+ (char *) memcpy (libiberty_nptr, libiberty_optr, libiberty_len); }))
+#else
+# define alloca(x) C_alloca(x)
+# undef USE_C_ALLOCA
+# define USE_C_ALLOCA 1
+# undef C_ALLOCA
+# define C_ALLOCA 1
+extern const char *libiberty_optr;
+extern char *libiberty_nptr;
+extern unsigned long libiberty_len;
+# define ASTRDUP(X) \
+ (libiberty_optr = (X), \
+ libiberty_len = strlen (libiberty_optr) + 1, \
+ libiberty_nptr = (char *) alloca (libiberty_len), \
+ (char *) memcpy (libiberty_nptr, libiberty_optr, libiberty_len))
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* ! defined (LIBIBERTY_H) */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/limitx.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/limitx.h
new file mode 100644
index 0000000..a897a4d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/limitx.h
@@ -0,0 +1,35 @@
+/* Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This administrivia gets added to the beginning of limits.h
+ if the system has its own version of limits.h. */
+
+/* We use _GCC_LIMITS_H_ because we want this not to match
+ any macros that the system's limits.h uses for its own purposes. */
+#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */
+#define _GCC_LIMITS_H_
+
+#ifndef _LIBC_LIMITS_H_
+/* Use "..." so that we find syslimits.h only in this same directory. */
+#include "syslimits.h"
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/limity.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/limity.h
new file mode 100644
index 0000000..8bb398f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/limity.h
@@ -0,0 +1,10 @@
+/* This administrivia gets added to the end of limits.h
+ if the system has its own version of limits.h. */
+
+#else /* not _GCC_LIMITS_H_ */
+
+#ifdef _GCC_NEXT_LIMITS_H
+#include_next <limits.h> /* recurse down to the real one */
+#endif
+
+#endif /* not _GCC_LIMITS_H_ */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/line-map.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/line-map.h
new file mode 100644
index 0000000..44fea0e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/line-map.h
@@ -0,0 +1,2152 @@
+/* Map (unsigned int) keys to (source file, line, column) triples.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+#ifndef LIBCPP_LINE_MAP_H
+#define LIBCPP_LINE_MAP_H
+
+#include <utility>
+
+#ifndef GTY
+#define GTY(x) /* nothing */
+#endif
+
+/* Both gcc and emacs number source *lines* starting at 1, but
+ they have differing conventions for *columns*.
+
+ GCC uses a 1-based convention for source columns,
+ whereas Emacs's M-x column-number-mode uses a 0-based convention.
+
+ For example, an error in the initial, left-hand
+ column of source line 3 is reported by GCC as:
+
+ some-file.c:3:1: error: ...etc...
+
+ On navigating to the location of that error in Emacs
+ (e.g. via "next-error"),
+ the locus is reported in the Mode Line
+ (assuming M-x column-number-mode) as:
+
+ some-file.c 10% (3, 0)
+
+ i.e. "3:1:" in GCC corresponds to "(3, 0)" in Emacs. */
+
+/* The type of line numbers. */
+typedef unsigned int linenum_type;
+
+/* A type for doing arithmetic on line numbers. */
+typedef long long linenum_arith_t;
+
+/* A function for for use by qsort for comparing line numbers. */
+
+inline int compare (linenum_type lhs, linenum_type rhs)
+{
+ /* Avoid truncation issues by using linenum_arith_t for the comparison,
+ and only consider the sign of the result. */
+ linenum_arith_t diff = (linenum_arith_t)lhs - (linenum_arith_t)rhs;
+ if (diff)
+ return diff > 0 ? 1 : -1;
+ return 0;
+}
+
+/* Reason for creating a new line map with linemap_add. */
+enum lc_reason
+{
+ LC_ENTER = 0, /* Begin #include. */
+ LC_LEAVE, /* Return to including file. */
+ LC_RENAME, /* Other reason for name change. */
+ LC_RENAME_VERBATIM, /* Likewise, but "" != stdin. */
+ LC_ENTER_MACRO, /* Begin macro expansion. */
+ LC_MODULE, /* A (C++) Module. */
+ /* FIXME: add support for stringize and paste. */
+ LC_HWM /* High Water Mark. */
+};
+
+/* The typedef "location_t" is a key within the location database,
+ identifying a source location or macro expansion, along with range
+ information, and (optionally) a pointer for use by gcc.
+
+ This key only has meaning in relation to a line_maps instance. Within
+ gcc there is a single line_maps instance: "line_table", declared in
+ gcc/input.h and defined in gcc/input.cc.
+
+ The values of the keys are intended to be internal to libcpp,
+ but for ease-of-understanding the implementation, they are currently
+ assigned as follows:
+
+ Actual | Value | Meaning
+ -----------+-------------------------------+-------------------------------
+ 0x00000000 | UNKNOWN_LOCATION (gcc/input.h)| Unknown/invalid location.
+ -----------+-------------------------------+-------------------------------
+ 0x00000001 | BUILTINS_LOCATION | The location for declarations
+ | (gcc/input.h) | in "<built-in>"
+ -----------+-------------------------------+-------------------------------
+ 0x00000002 | RESERVED_LOCATION_COUNT | The first location to be
+ | (also | handed out, and the
+ | ordmap[0]->start_location) | first line in ordmap 0
+ -----------+-------------------------------+-------------------------------
+ | ordmap[1]->start_location | First line in ordmap 1
+ | ordmap[1]->start_location+32 | First column in that line
+ | (assuming range_bits == 5) |
+ | ordmap[1]->start_location+64 | 2nd column in that line
+ | ordmap[1]->start_location+4096| Second line in ordmap 1
+ | (assuming column_bits == 12)
+ |
+ | Subsequent lines are offset by (1 << column_bits),
+ | e.g. 4096 for 12 bits, with a column value of 0 representing
+ | "the whole line".
+ |
+ | Within a line, the low "range_bits" (typically 5) are used for
+ | storing short ranges, so that there's an offset of
+ | (1 << range_bits) between individual columns within a line,
+ | typically 32.
+ | The low range_bits store the offset of the end point from the
+ | start point, and the start point is found by masking away
+ | the range bits.
+ |
+ | For example:
+ | ordmap[1]->start_location+64 "2nd column in that line"
+ | above means a caret at that location, with a range
+ | starting and finishing at the same place (the range bits
+ | are 0), a range of length 1.
+ |
+ | By contrast:
+ | ordmap[1]->start_location+68
+ | has range bits 0x4, meaning a caret with a range starting at
+ | that location, but with endpoint 4 columns further on: a range
+ | of length 5.
+ |
+ | Ranges that have caret != start, or have an endpoint too
+ | far away to fit in range_bits are instead stored as ad-hoc
+ | locations. Hence for range_bits == 5 we can compactly store
+ | tokens of length <= 32 without needing to use the ad-hoc
+ | table.
+ |
+ | This packing scheme means we effectively have
+ | (column_bits - range_bits)
+ | of bits for the columns, typically (12 - 5) = 7, for 128
+ | columns; longer line widths are accomodated by starting a
+ | new ordmap with a higher column_bits.
+ |
+ | ordmap[2]->start_location-1 | Final location in ordmap 1
+ -----------+-------------------------------+-------------------------------
+ | ordmap[2]->start_location | First line in ordmap 2
+ | ordmap[3]->start_location-1 | Final location in ordmap 2
+ -----------+-------------------------------+-------------------------------
+ | | (etc)
+ -----------+-------------------------------+-------------------------------
+ | ordmap[n-1]->start_location | First line in final ord map
+ | | (etc)
+ | set->highest_location - 1 | Final location in that ordmap
+ -----------+-------------------------------+-------------------------------
+ | set->highest_location | Location of the where the next
+ | | ordinary linemap would start
+ -----------+-------------------------------+-------------------------------
+ | |
+ | VVVVVVVVVVVVVVVVVVVVVVVVVVV
+ | Ordinary maps grow this way
+ |
+ | (unallocated integers)
+ |
+ 0x60000000 | LINE_MAP_MAX_LOCATION_WITH_COLS
+ | Beyond this point, ordinary linemaps have 0 bits per column:
+ | each increment of the value corresponds to a new source line.
+ |
+ 0x70000000 | LINE_MAP_MAX_LOCATION
+ | Beyond the point, we give up on ordinary maps; attempts to
+ | create locations in them lead to UNKNOWN_LOCATION (0).
+ |
+ | (unallocated integers)
+ |
+ | Macro maps grow this way
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ -----------+-------------------------------+-------------------------------
+ | LINEMAPS_MACRO_LOWEST_LOCATION| Locations within macro maps
+ | macromap[m-1]->start_location | Start of last macro map
+ | |
+ -----------+-------------------------------+-------------------------------
+ | macromap[m-2]->start_location | Start of penultimate macro map
+ -----------+-------------------------------+-------------------------------
+ | macromap[1]->start_location | Start of macro map 1
+ -----------+-------------------------------+-------------------------------
+ | macromap[0]->start_location | Start of macro map 0
+ 0x7fffffff | MAX_LOCATION_T | Also used as a mask for
+ | | accessing the ad-hoc data table
+ -----------+-------------------------------+-------------------------------
+ 0x80000000 | Start of ad-hoc values; the lower 31 bits are used as an index
+ ... | into the line_table->location_adhoc_data_map.data array.
+ 0xffffffff | UINT_MAX |
+ -----------+-------------------------------+-------------------------------
+
+ Examples of location encoding.
+
+ Packed ranges
+ =============
+
+ Consider encoding the location of a token "foo", seen underlined here
+ on line 523, within an ordinary line_map that starts at line 500:
+
+ 11111111112
+ 12345678901234567890
+ 522
+ 523 return foo + bar;
+ ^~~
+ 524
+
+ The location's caret and start are both at line 523, column 11; the
+ location's finish is on the same line, at column 13 (an offset of 2
+ columns, for length 3).
+
+ Line 523 is offset 23 from the starting line of the ordinary line_map.
+
+ caret == start, and the offset of the finish fits within 5 bits, so
+ this can be stored as a packed range.
+
+ This is encoded as:
+ ordmap->start
+ + (line_offset << ordmap->m_column_and_range_bits)
+ + (column << ordmap->m_range_bits)
+ + (range_offset);
+ i.e. (for line offset 23, column 11, range offset 2):
+ ordmap->start
+ + (23 << 12)
+ + (11 << 5)
+ + 2;
+ i.e.:
+ ordmap->start + 0x17162
+ assuming that the line_map uses the default of 7 bits for columns and
+ 5 bits for packed range (giving 12 bits for m_column_and_range_bits).
+
+
+ "Pure" locations
+ ================
+
+ These are a special case of the above, where
+ caret == start == finish
+ They are stored as packed ranges with offset == 0.
+ For example, the location of the "f" of "foo" could be stored
+ as above, but with range offset 0, giving:
+ ordmap->start
+ + (23 << 12)
+ + (11 << 5)
+ + 0;
+ i.e.:
+ ordmap->start + 0x17160
+
+
+ Unoptimized ranges
+ ==================
+
+ Consider encoding the location of the binary expression
+ below:
+
+ 11111111112
+ 12345678901234567890
+ 522
+ 523 return foo + bar;
+ ~~~~^~~~~
+ 524
+
+ The location's caret is at the "+", line 523 column 15, but starts
+ earlier, at the "f" of "foo" at column 11. The finish is at the "r"
+ of "bar" at column 19.
+
+ This can't be stored as a packed range since start != caret.
+ Hence it is stored as an ad-hoc location e.g. 0x80000003.
+
+ Stripping off the top bit gives us an index into the ad-hoc
+ lookaside table:
+
+ line_table->location_adhoc_data_map.data[0x3]
+
+ from which the caret, start and finish can be looked up,
+ encoded as "pure" locations:
+
+ start == ordmap->start + (23 << 12) + (11 << 5)
+ == ordmap->start + 0x17160 (as above; the "f" of "foo")
+
+ caret == ordmap->start + (23 << 12) + (15 << 5)
+ == ordmap->start + 0x171e0
+
+ finish == ordmap->start + (23 << 12) + (19 << 5)
+ == ordmap->start + 0x17260
+
+ To further see how location_t works in practice, see the
+ worked example in libcpp/location-example.txt. */
+typedef unsigned int location_t;
+
+/* Do not track column numbers higher than this one. As a result, the
+ range of column_bits is [12, 18] (or 0 if column numbers are
+ disabled). */
+const unsigned int LINE_MAP_MAX_COLUMN_NUMBER = (1U << 12);
+
+/* Do not pack ranges if locations get higher than this.
+ If you change this, update:
+ gcc.dg/plugin/location-overflow-test-*.c. */
+const location_t LINE_MAP_MAX_LOCATION_WITH_PACKED_RANGES = 0x50000000;
+
+/* Do not track column numbers if locations get higher than this.
+ If you change this, update:
+ gcc.dg/plugin/location-overflow-test-*.c. */
+const location_t LINE_MAP_MAX_LOCATION_WITH_COLS = 0x60000000;
+
+/* Highest possible source location encoded within an ordinary map. */
+const location_t LINE_MAP_MAX_LOCATION = 0x70000000;
+
+/* A range of source locations.
+
+ Ranges are closed:
+ m_start is the first location within the range,
+ m_finish is the last location within the range.
+
+ We may need a more compact way to store these, but for now,
+ let's do it the simple way, as a pair. */
+struct GTY(()) source_range
+{
+ location_t m_start;
+ location_t m_finish;
+
+ /* We avoid using constructors, since various structs that
+ don't yet have constructors will embed instances of
+ source_range. */
+
+ /* Make a source_range from a location_t. */
+ static source_range from_location (location_t loc)
+ {
+ source_range result;
+ result.m_start = loc;
+ result.m_finish = loc;
+ return result;
+ }
+
+ /* Make a source_range from a pair of location_t. */
+ static source_range from_locations (location_t start,
+ location_t finish)
+ {
+ source_range result;
+ result.m_start = start;
+ result.m_finish = finish;
+ return result;
+ }
+};
+
+/* Memory allocation function typedef. Works like xrealloc. */
+typedef void *(*line_map_realloc) (void *, size_t);
+
+/* Memory allocator function that returns the actual allocated size,
+ for a given requested allocation. */
+typedef size_t (*line_map_round_alloc_size_func) (size_t);
+
+/* A line_map encodes a sequence of locations.
+ There are two kinds of maps. Ordinary maps and macro expansion
+ maps, a.k.a macro maps.
+
+ A macro map encodes source locations of tokens that are part of a
+ macro replacement-list, at a macro expansion point. E.g, in:
+
+ #define PLUS(A,B) A + B
+
+ No macro map is going to be created there, because we are not at a
+ macro expansion point. We are at a macro /definition/ point. So the
+ locations of the tokens of the macro replacement-list (i.e, A + B)
+ will be locations in an ordinary map, not a macro map.
+
+ On the other hand, if we later do:
+
+ int a = PLUS (1,2);
+
+ The invocation of PLUS here is a macro expansion. So we are at a
+ macro expansion point. The preprocessor expands PLUS (1,2) and
+ replaces it with the tokens of its replacement-list: 1 + 2. A macro
+ map is going to be created to hold (or rather to map, haha ...) the
+ locations of the tokens 1, + and 2. The macro map also records the
+ location of the expansion point of PLUS. That location is mapped in
+ the map that is active right before the location of the invocation
+ of PLUS. */
+
+/* This contains GTY mark-up to support precompiled headers.
+ line_map is an abstract class, only derived objects exist. */
+struct GTY((tag ("0"), desc ("MAP_ORDINARY_P (&%h) ? 1 : 2"))) line_map {
+ location_t start_location;
+
+ /* Size and alignment is (usually) 4 bytes. */
+};
+
+/* An ordinary line map encodes physical source locations. Those
+ physical source locations are called "spelling locations".
+
+ Physical source file TO_FILE at line TO_LINE at column 0 is represented
+ by the logical START_LOCATION. TO_LINE+L at column C is represented by
+ START_LOCATION+(L*(1<<m_column_and_range_bits))+(C*1<<m_range_bits), as
+ long as C<(1<<effective range bits), and the result_location is less than
+ the next line_map's start_location.
+ (The top line is line 1 and the leftmost column is column 1; line/column 0
+ means "entire file/line" or "unknown line/column" or "not applicable".)
+
+ The highest possible source location is MAX_LOCATION_T. */
+struct GTY((tag ("1"))) line_map_ordinary : public line_map {
+ /* Base class is 4 bytes. */
+
+ /* 4 bytes of integers, each 1 byte for easy extraction/insertion. */
+
+ /* The reason for creation of this line map. */
+ ENUM_BITFIELD (lc_reason) reason : 8;
+
+ /* SYSP is one for a system header, two for a C system header file
+ that therefore needs to be extern "C" protected in C++, and zero
+ otherwise. This field isn't really needed now that it's in
+ cpp_buffer. */
+ unsigned char sysp;
+
+ /* Number of the low-order location_t bits used for column numbers
+ and ranges. */
+ unsigned int m_column_and_range_bits : 8;
+
+ /* Number of the low-order "column" bits used for storing short ranges
+ inline, rather than in the ad-hoc table.
+ MSB LSB
+ 31 0
+ +-------------------------+-------------------------------------------+
+ | |<---map->column_and_range_bits (e.g. 12)-->|
+ +-------------------------+-----------------------+-------------------+
+ | | column_and_range_bits | map->range_bits |
+ | | - range_bits | |
+ +-------------------------+-----------------------+-------------------+
+ | row bits | effective column bits | short range bits |
+ | | (e.g. 7) | (e.g. 5) |
+ +-------------------------+-----------------------+-------------------+ */
+ unsigned int m_range_bits : 8;
+
+ /* Pointer alignment boundary on both 32 and 64-bit systems. */
+
+ const char *to_file;
+ linenum_type to_line;
+
+ /* Location from whence this line map was included. For regular
+ #includes, this location will be the last location of a map. For
+ outermost file, this is 0. For modules it could be anywhere
+ within a map. */
+ location_t included_from;
+
+ /* Size is 20 or 24 bytes, no padding */
+};
+
+/* This is the highest possible source location encoded within an
+ ordinary or macro map. */
+const location_t MAX_LOCATION_T = 0x7FFFFFFF;
+
+struct cpp_hashnode;
+
+/* A macro line map encodes location of tokens coming from a macro
+ expansion.
+
+ The offset from START_LOCATION is used to index into
+ MACRO_LOCATIONS; this holds the original location of the token. */
+struct GTY((tag ("2"))) line_map_macro : public line_map {
+ /* Base is 4 bytes. */
+
+ /* The number of tokens inside the replacement-list of MACRO. */
+ unsigned int n_tokens;
+
+ /* Pointer alignment boundary. */
+
+ /* The cpp macro whose expansion gave birth to this macro map. */
+ struct cpp_hashnode *
+ GTY ((nested_ptr (union tree_node,
+ "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL",
+ "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL")))
+ macro;
+
+ /* This array of location is actually an array of pairs of
+ locations. The elements inside it thus look like:
+
+ x0,y0, x1,y1, x2,y2, ...., xn,yn.
+
+ where n == n_tokens;
+
+ Remember that these xI,yI are collected when libcpp is about to
+ expand a given macro.
+
+ yI is the location in the macro definition, either of the token
+ itself or of a macro parameter that it replaces.
+
+ Imagine this:
+
+ #define PLUS(A, B) A + B <--- #1
+
+ int a = PLUS (1,2); <--- #2
+
+ There is a macro map for the expansion of PLUS in #2. PLUS is
+ expanded into its expansion-list. The expansion-list is the
+ replacement-list of PLUS where the macro parameters are replaced
+ with their arguments. So the replacement-list of PLUS is made of
+ the tokens:
+
+ A, +, B
+
+ and the expansion-list is made of the tokens:
+
+ 1, +, 2
+
+ Let's consider the case of token "+". Its y1 [yI for I == 1] is
+ its spelling location in #1.
+
+ y0 (thus for token "1") is the spelling location of A in #1.
+
+ And y2 (of token "2") is the spelling location of B in #1.
+
+ When the token is /not/ an argument for a macro, xI is the same
+ location as yI. Otherwise, xI is the location of the token
+ outside this macro expansion. If this macro was expanded from
+ another macro expansion, xI is a virtual location representing
+ the token in that macro expansion; otherwise, it is the spelling
+ location of the token.
+
+ Note that a virtual location is a location returned by
+ linemap_add_macro_token. It encodes the relevant locations (x,y
+ pairs) of that token across the macro expansions from which it
+ (the token) might come from.
+
+ In the example above x1 (for token "+") is going to be the same
+ as y1. x0 is the spelling location for the argument token "1",
+ and x2 is the spelling location for the argument token "2". */
+ location_t * GTY((atomic)) macro_locations;
+
+ /* This is the location of the expansion point of the current macro
+ map. It's the location of the macro name. That location is held
+ by the map that was current right before the current one. It
+ could have been either a macro or an ordinary map, depending on
+ if we are in a nested expansion context not. */
+ location_t expansion;
+
+ /* Size is 20 or 32 (4 bytes padding on 64-bit). */
+};
+
+#if CHECKING_P && (GCC_VERSION >= 2007)
+
+/* Assertion macro to be used in line-map code. */
+#define linemap_assert(EXPR) \
+ do { \
+ if (! (EXPR)) \
+ abort (); \
+ } while (0)
+
+/* Assert that becomes a conditional expression when checking is disabled at
+ compilation time. Use this for conditions that should not happen but if
+ they happen, it is better to handle them gracefully rather than crash
+ randomly later.
+ Usage:
+
+ if (linemap_assert_fails(EXPR)) handle_error(); */
+#define linemap_assert_fails(EXPR) __extension__ \
+ ({linemap_assert (EXPR); false;})
+
+#else
+/* Include EXPR, so that unused variable warnings do not occur. */
+#define linemap_assert(EXPR) ((void)(0 && (EXPR)))
+#define linemap_assert_fails(EXPR) (! (EXPR))
+#endif
+
+/* Get whether location LOC is an ordinary location. */
+
+inline bool
+IS_ORDINARY_LOC (location_t loc)
+{
+ return loc < LINE_MAP_MAX_LOCATION;
+}
+
+/* Get whether location LOC is an ad-hoc location. */
+
+inline bool
+IS_ADHOC_LOC (location_t loc)
+{
+ return loc > MAX_LOCATION_T;
+}
+
+/* Categorize line map kinds. */
+
+inline bool
+MAP_ORDINARY_P (const line_map *map)
+{
+ return IS_ORDINARY_LOC (map->start_location);
+}
+
+/* Return TRUE if MAP encodes locations coming from a macro
+ replacement-list at macro expansion point. */
+bool
+linemap_macro_expansion_map_p (const line_map *);
+
+/* Assert that MAP encodes locations of tokens that are not part of
+ the replacement-list of a macro expansion, downcasting from
+ line_map * to line_map_ordinary *. */
+
+inline line_map_ordinary *
+linemap_check_ordinary (line_map *map)
+{
+ linemap_assert (MAP_ORDINARY_P (map));
+ return (line_map_ordinary *)map;
+}
+
+/* Assert that MAP encodes locations of tokens that are not part of
+ the replacement-list of a macro expansion, downcasting from
+ const line_map * to const line_map_ordinary *. */
+
+inline const line_map_ordinary *
+linemap_check_ordinary (const line_map *map)
+{
+ linemap_assert (MAP_ORDINARY_P (map));
+ return (const line_map_ordinary *)map;
+}
+
+/* Assert that MAP is a macro expansion and downcast to the appropriate
+ subclass. */
+
+inline line_map_macro *linemap_check_macro (line_map *map)
+{
+ linemap_assert (!MAP_ORDINARY_P (map));
+ return (line_map_macro *)map;
+}
+
+/* Assert that MAP is a macro expansion and downcast to the appropriate
+ subclass. */
+
+inline const line_map_macro *
+linemap_check_macro (const line_map *map)
+{
+ linemap_assert (!MAP_ORDINARY_P (map));
+ return (const line_map_macro *)map;
+}
+
+/* Read the start location of MAP. */
+
+inline location_t
+MAP_START_LOCATION (const line_map *map)
+{
+ return map->start_location;
+}
+
+/* Get the starting line number of ordinary map MAP. */
+
+inline linenum_type
+ORDINARY_MAP_STARTING_LINE_NUMBER (const line_map_ordinary *ord_map)
+{
+ return ord_map->to_line;
+}
+
+/* Return a positive value if map encodes locations from a system
+ header, 0 otherwise. Returns 1 if ordinary map MAP encodes locations
+ in a system header and 2 if it encodes locations in a C system header
+ that therefore needs to be extern "C" protected in C++. */
+
+inline unsigned char
+ORDINARY_MAP_IN_SYSTEM_HEADER_P (const line_map_ordinary *ord_map)
+{
+ return ord_map->sysp;
+}
+
+/* TRUE if this line map is for a module (not a source file). */
+
+inline bool
+MAP_MODULE_P (const line_map *map)
+{
+ return (MAP_ORDINARY_P (map)
+ && linemap_check_ordinary (map)->reason == LC_MODULE);
+}
+
+/* Get the filename of ordinary map MAP. */
+
+inline const char *
+ORDINARY_MAP_FILE_NAME (const line_map_ordinary *ord_map)
+{
+ return ord_map->to_file;
+}
+
+/* Get the cpp macro whose expansion gave birth to macro map MAP. */
+
+inline cpp_hashnode *
+MACRO_MAP_MACRO (const line_map_macro *macro_map)
+{
+ return macro_map->macro;
+}
+
+/* Get the number of tokens inside the replacement-list of the macro
+ that led to macro map MAP. */
+
+inline unsigned int
+MACRO_MAP_NUM_MACRO_TOKENS (const line_map_macro *macro_map)
+{
+ return macro_map->n_tokens;
+}
+
+/* Get the array of pairs of locations within macro map MAP.
+ See the declaration of line_map_macro for more information. */
+
+inline location_t *
+MACRO_MAP_LOCATIONS (const line_map_macro *macro_map)
+{
+ return macro_map->macro_locations;
+}
+
+/* Get the location of the expansion point of the macro map MAP. */
+
+inline location_t
+MACRO_MAP_EXPANSION_POINT_LOCATION (const line_map_macro *macro_map)
+{
+ return macro_map->expansion;
+}
+
+/* The abstraction of a set of location maps. There can be several
+ types of location maps. This abstraction contains the attributes
+ that are independent from the type of the map.
+
+ Essentially this is just a vector of T_linemap_subclass,
+ which can only ever grow in size. */
+
+struct GTY(()) maps_info_ordinary {
+ /* This array contains the "ordinary" line maps, for all
+ events other than macro expansion
+ (e.g. when a new preprocessing unit starts or ends). */
+ line_map_ordinary * GTY ((length ("%h.used"))) maps;
+
+ /* The total number of allocated maps. */
+ unsigned int allocated;
+
+ /* The number of elements used in maps. This number is smaller
+ or equal to ALLOCATED. */
+ unsigned int used;
+
+ mutable unsigned int cache;
+};
+
+struct GTY(()) maps_info_macro {
+ /* This array contains the macro line maps.
+ A macro line map is created whenever a macro expansion occurs. */
+ line_map_macro * GTY ((length ("%h.used"))) maps;
+
+ /* The total number of allocated maps. */
+ unsigned int allocated;
+
+ /* The number of elements used in maps. This number is smaller
+ or equal to ALLOCATED. */
+ unsigned int used;
+
+ mutable unsigned int cache;
+};
+
+/* Data structure to associate a source_range together with an arbitrary
+ data pointer with a source location. */
+struct GTY(()) location_adhoc_data {
+ location_t locus;
+ source_range src_range;
+ void * GTY((skip)) data;
+ unsigned discriminator;
+};
+
+struct htab;
+
+/* The following data structure encodes a location with some adhoc data
+ and maps it to a new unsigned integer (called an adhoc location)
+ that replaces the original location to represent the mapping.
+
+ The new adhoc_loc uses the highest bit as the enabling bit, i.e. if the
+ highest bit is 1, then the number is adhoc_loc. Otherwise, it serves as
+ the original location. Once identified as the adhoc_loc, the lower 31
+ bits of the integer is used to index the location_adhoc_data array,
+ in which the locus and associated data is stored. */
+
+struct GTY(()) location_adhoc_data_map {
+ struct htab * GTY((skip)) htab;
+ location_t curr_loc;
+ unsigned int allocated;
+ struct location_adhoc_data GTY((length ("%h.allocated"))) *data;
+};
+
+/* A set of chronological line_map structures. */
+class GTY(()) line_maps {
+public:
+
+ ~line_maps ();
+
+ maps_info_ordinary info_ordinary;
+
+ maps_info_macro info_macro;
+
+ /* Depth of the include stack, including the current file. */
+ unsigned int depth;
+
+ /* If true, prints an include trace a la -H. */
+ bool trace_includes;
+
+ /* True if we've seen a #line or # 44 "file" directive. */
+ bool seen_line_directive;
+
+ /* Highest location_t "given out". */
+ location_t highest_location;
+
+ /* Start of line of highest location_t "given out". */
+ location_t highest_line;
+
+ /* The maximum column number we can quickly allocate. Higher numbers
+ may require allocating a new line_map. */
+ unsigned int max_column_hint;
+
+ /* The allocator to use when resizing 'maps', defaults to xrealloc. */
+ line_map_realloc GTY((callback)) reallocator;
+
+ /* The allocators' function used to know the actual size it
+ allocated, for a certain allocation size requested. */
+ line_map_round_alloc_size_func GTY((callback)) round_alloc_size;
+
+ struct location_adhoc_data_map location_adhoc_data_map;
+
+ /* The special location value that is used as spelling location for
+ built-in tokens. */
+ location_t builtin_location;
+
+ /* The default value of range_bits in ordinary line maps. */
+ unsigned int default_range_bits;
+
+ unsigned int num_optimized_ranges;
+ unsigned int num_unoptimized_ranges;
+};
+
+/* Returns the number of allocated maps so far. MAP_KIND shall be TRUE
+ if we are interested in macro maps, FALSE otherwise. */
+inline unsigned int
+LINEMAPS_ALLOCATED (const line_maps *set, bool map_kind)
+{
+ if (map_kind)
+ return set->info_macro.allocated;
+ else
+ return set->info_ordinary.allocated;
+}
+
+/* As above, but by reference (e.g. as an lvalue). */
+
+inline unsigned int &
+LINEMAPS_ALLOCATED (line_maps *set, bool map_kind)
+{
+ if (map_kind)
+ return set->info_macro.allocated;
+ else
+ return set->info_ordinary.allocated;
+}
+
+/* Returns the number of used maps so far. MAP_KIND shall be TRUE if
+ we are interested in macro maps, FALSE otherwise.*/
+inline unsigned int
+LINEMAPS_USED (const line_maps *set, bool map_kind)
+{
+ if (map_kind)
+ return set->info_macro.used;
+ else
+ return set->info_ordinary.used;
+}
+
+/* As above, but by reference (e.g. as an lvalue). */
+
+inline unsigned int &
+LINEMAPS_USED (line_maps *set, bool map_kind)
+{
+ if (map_kind)
+ return set->info_macro.used;
+ else
+ return set->info_ordinary.used;
+}
+
+/* Returns the index of the last map that was looked up with
+ linemap_lookup. MAP_KIND shall be TRUE if we are interested in
+ macro maps, FALSE otherwise. */
+inline unsigned int &
+LINEMAPS_CACHE (const line_maps *set, bool map_kind)
+{
+ if (map_kind)
+ return set->info_macro.cache;
+ else
+ return set->info_ordinary.cache;
+}
+
+/* Return the map at a given index. */
+inline line_map *
+LINEMAPS_MAP_AT (const line_maps *set, bool map_kind, int index)
+{
+ if (map_kind)
+ return &set->info_macro.maps[index];
+ else
+ return &set->info_ordinary.maps[index];
+}
+
+/* Returns the last map used in the line table SET. MAP_KIND
+ shall be TRUE if we are interested in macro maps, FALSE
+ otherwise.*/
+inline line_map *
+LINEMAPS_LAST_MAP (const line_maps *set, bool map_kind)
+{
+ return LINEMAPS_MAP_AT (set, map_kind,
+ LINEMAPS_USED (set, map_kind) - 1);
+}
+
+/* Returns the last map that was allocated in the line table SET.
+ MAP_KIND shall be TRUE if we are interested in macro maps, FALSE
+ otherwise.*/
+inline line_map *
+LINEMAPS_LAST_ALLOCATED_MAP (const line_maps *set, bool map_kind)
+{
+ return LINEMAPS_MAP_AT (set, map_kind,
+ LINEMAPS_ALLOCATED (set, map_kind) - 1);
+}
+
+/* Returns a pointer to the memory region where ordinary maps are
+ allocated in the line table SET. */
+inline line_map_ordinary *
+LINEMAPS_ORDINARY_MAPS (const line_maps *set)
+{
+ return set->info_ordinary.maps;
+}
+
+/* Returns the INDEXth ordinary map. */
+inline line_map_ordinary *
+LINEMAPS_ORDINARY_MAP_AT (const line_maps *set, int index)
+{
+ linemap_assert (index >= 0
+ && (unsigned int)index < LINEMAPS_USED (set, false));
+ return (line_map_ordinary *)LINEMAPS_MAP_AT (set, false, index);
+}
+
+/* Return the number of ordinary maps allocated in the line table
+ SET. */
+inline unsigned int
+LINEMAPS_ORDINARY_ALLOCATED (const line_maps *set)
+{
+ return LINEMAPS_ALLOCATED (set, false);
+}
+
+/* Return the number of ordinary maps used in the line table SET. */
+inline unsigned int
+LINEMAPS_ORDINARY_USED (const line_maps *set)
+{
+ return LINEMAPS_USED (set, false);
+}
+
+/* Return the index of the last ordinary map that was looked up with
+ linemap_lookup. */
+inline unsigned int &
+LINEMAPS_ORDINARY_CACHE (const line_maps *set)
+{
+ return LINEMAPS_CACHE (set, false);
+}
+
+/* Returns a pointer to the last ordinary map used in the line table
+ SET. */
+inline line_map_ordinary *
+LINEMAPS_LAST_ORDINARY_MAP (const line_maps *set)
+{
+ return (line_map_ordinary *)LINEMAPS_LAST_MAP (set, false);
+}
+
+/* Returns a pointer to the last ordinary map allocated the line table
+ SET. */
+inline line_map_ordinary *
+LINEMAPS_LAST_ALLOCATED_ORDINARY_MAP (const line_maps *set)
+{
+ return (line_map_ordinary *)LINEMAPS_LAST_ALLOCATED_MAP (set, false);
+}
+
+/* Returns a pointer to the beginning of the region where macro maps
+ are allocated. */
+inline line_map_macro *
+LINEMAPS_MACRO_MAPS (const line_maps *set)
+{
+ return set->info_macro.maps;
+}
+
+/* Returns the INDEXth macro map. */
+inline line_map_macro *
+LINEMAPS_MACRO_MAP_AT (const line_maps *set, int index)
+{
+ linemap_assert (index >= 0
+ && (unsigned int)index < LINEMAPS_USED (set, true));
+ return (line_map_macro *)LINEMAPS_MAP_AT (set, true, index);
+}
+
+/* Returns the number of macro maps that were allocated in the line
+ table SET. */
+inline unsigned int
+LINEMAPS_MACRO_ALLOCATED (const line_maps *set)
+{
+ return LINEMAPS_ALLOCATED (set, true);
+}
+
+/* Returns the number of macro maps used in the line table SET. */
+inline unsigned int
+LINEMAPS_MACRO_USED (const line_maps *set)
+{
+ return LINEMAPS_USED (set, true);
+}
+
+/* Return the index of the last macro map that was looked up with
+ linemap_lookup. */
+inline unsigned int &
+LINEMAPS_MACRO_CACHE (const line_maps *set)
+{
+ return LINEMAPS_CACHE (set, true);
+}
+
+/* Returns the last macro map used in the line table SET. */
+inline line_map_macro *
+LINEMAPS_LAST_MACRO_MAP (const line_maps *set)
+{
+ return (line_map_macro *)LINEMAPS_LAST_MAP (set, true);
+}
+
+/* Returns the lowest location [of a token resulting from macro
+ expansion] encoded in this line table. */
+inline location_t
+LINEMAPS_MACRO_LOWEST_LOCATION (const line_maps *set)
+{
+ return LINEMAPS_MACRO_USED (set)
+ ? MAP_START_LOCATION (LINEMAPS_LAST_MACRO_MAP (set))
+ : MAX_LOCATION_T + 1;
+}
+
+/* Returns the last macro map allocated in the line table SET. */
+inline line_map_macro *
+LINEMAPS_LAST_ALLOCATED_MACRO_MAP (const line_maps *set)
+{
+ return (line_map_macro *)LINEMAPS_LAST_ALLOCATED_MAP (set, true);
+}
+
+extern location_t get_combined_adhoc_loc (line_maps *, location_t,
+ source_range, void *, unsigned);
+extern void *get_data_from_adhoc_loc (const line_maps *, location_t);
+extern unsigned get_discriminator_from_adhoc_loc (const line_maps *, location_t);
+extern location_t get_location_from_adhoc_loc (const line_maps *,
+ location_t);
+
+extern source_range get_range_from_loc (line_maps *set, location_t loc);
+extern unsigned get_discriminator_from_loc (line_maps *set, location_t loc);
+
+/* Get whether location LOC is a "pure" location, or
+ whether it is an ad-hoc location, or embeds range information. */
+
+bool
+pure_location_p (line_maps *set, location_t loc);
+
+/* Given location LOC within SET, strip away any packed range information
+ or ad-hoc information. */
+
+extern location_t get_pure_location (line_maps *set, location_t loc);
+
+/* Combine LOC and BLOCK, giving a combined adhoc location. */
+
+inline location_t
+COMBINE_LOCATION_DATA (class line_maps *set,
+ location_t loc,
+ source_range src_range,
+ void *block,
+ unsigned discriminator)
+{
+ return get_combined_adhoc_loc (set, loc, src_range, block, discriminator);
+}
+
+extern void rebuild_location_adhoc_htab (class line_maps *);
+
+/* Initialize a line map set. SET is the line map set to initialize
+ and BUILTIN_LOCATION is the special location value to be used as
+ spelling location for built-in tokens. This BUILTIN_LOCATION has
+ to be strictly less than RESERVED_LOCATION_COUNT. */
+extern void linemap_init (class line_maps *set,
+ location_t builtin_location);
+
+/* Check for and warn about line_maps entered but not exited. */
+
+extern void linemap_check_files_exited (class line_maps *);
+
+/* Return a location_t for the start (i.e. column==0) of
+ (physical) line TO_LINE in the current source file (as in the
+ most recent linemap_add). MAX_COLUMN_HINT is the highest column
+ number we expect to use in this line (but it does not change
+ the highest_location). */
+
+extern location_t linemap_line_start
+(class line_maps *set, linenum_type to_line, unsigned int max_column_hint);
+
+/* Allocate a raw block of line maps, zero initialized. */
+extern line_map *line_map_new_raw (line_maps *, bool, unsigned);
+
+/* Add a mapping of logical source line to physical source file and
+ line number. This function creates an "ordinary map", which is a
+ map that records locations of tokens that are not part of macro
+ replacement-lists present at a macro expansion point.
+
+ The text pointed to by TO_FILE must have a lifetime
+ at least as long as the lifetime of SET. An empty
+ TO_FILE means standard input. If reason is LC_LEAVE, and
+ TO_FILE is NULL, then TO_FILE, TO_LINE and SYSP are given their
+ natural values considering the file we are returning to.
+
+ A call to this function can relocate the previous set of
+ maps, so any stored line_map pointers should not be used. */
+extern const line_map *linemap_add
+ (class line_maps *, enum lc_reason, unsigned int sysp,
+ const char *to_file, linenum_type to_line);
+
+/* Create a macro map. A macro map encodes source locations of tokens
+ that are part of a macro replacement-list, at a macro expansion
+ point. See the extensive comments of struct line_map and struct
+ line_map_macro, in line-map.h.
+
+ This map shall be created when the macro is expanded. The map
+ encodes the source location of the expansion point of the macro as
+ well as the "original" source location of each token that is part
+ of the macro replacement-list. If a macro is defined but never
+ expanded, it has no macro map. SET is the set of maps the macro
+ map should be part of. MACRO_NODE is the macro which the new macro
+ map should encode source locations for. EXPANSION is the location
+ of the expansion point of MACRO. For function-like macros
+ invocations, it's best to make it point to the closing parenthesis
+ of the macro, rather than the the location of the first character
+ of the macro. NUM_TOKENS is the number of tokens that are part of
+ the replacement-list of MACRO. */
+const line_map_macro *linemap_enter_macro (line_maps *, cpp_hashnode *,
+ location_t, unsigned int);
+
+/* Create a source location for a module. The creator must either do
+ this after the TU is tokenized, or deal with saving and restoring
+ map state. */
+
+extern location_t linemap_module_loc
+ (line_maps *, location_t from, const char *name);
+extern void linemap_module_reparent
+ (line_maps *, location_t loc, location_t new_parent);
+
+/* Restore the linemap state such that the map at LWM-1 continues.
+ Return start location of the new map. */
+extern unsigned linemap_module_restore
+ (line_maps *, unsigned lwm);
+
+/* Given a logical source location, returns the map which the
+ corresponding (source file, line, column) triplet can be deduced
+ from. Since the set is built chronologically, the logical lines are
+ monotonic increasing, and so the list is sorted and we can use a
+ binary search. If no line map have been allocated yet, this
+ function returns NULL. */
+extern const line_map *linemap_lookup
+ (const line_maps *, location_t);
+
+unsigned linemap_lookup_macro_index (const line_maps *, location_t);
+
+/* Returns TRUE if the line table set tracks token locations across
+ macro expansion, FALSE otherwise. */
+bool linemap_tracks_macro_expansion_locs_p (class line_maps *);
+
+/* Return the name of the macro associated to MACRO_MAP. */
+const char* linemap_map_get_macro_name (const line_map_macro *);
+
+/* Return a positive value if LOCATION is the locus of a token that is
+ located in a system header, O otherwise. It returns 1 if LOCATION
+ is the locus of a token that is located in a system header, and 2
+ if LOCATION is the locus of a token located in a C system header
+ that therefore needs to be extern "C" protected in C++.
+
+ Note that this function returns 1 if LOCATION belongs to a token
+ that is part of a macro replacement-list defined in a system
+ header, but expanded in a non-system file. */
+int linemap_location_in_system_header_p (class line_maps *,
+ location_t);
+
+/* Return TRUE if LOCATION is a source code location of a token that is part of
+ a macro expansion, FALSE otherwise. */
+bool linemap_location_from_macro_expansion_p (const line_maps *,
+ location_t);
+
+/* TRUE if LOCATION is a source code location of a token that is part of the
+ definition of a macro, FALSE otherwise. */
+bool linemap_location_from_macro_definition_p (class line_maps *,
+ location_t);
+
+/* With the precondition that LOCATION is the locus of a token that is
+ an argument of a function-like macro MACRO_MAP and appears in the
+ expansion of MACRO_MAP, return the locus of that argument in the
+ context of the caller of MACRO_MAP. */
+
+extern location_t linemap_macro_map_loc_unwind_toward_spelling
+ (line_maps *set, const line_map_macro *macro_map, location_t location);
+
+/* location_t values from 0 to RESERVED_LOCATION_COUNT-1 will
+ be reserved for libcpp user as special values, no token from libcpp
+ will contain any of those locations. */
+const location_t RESERVED_LOCATION_COUNT = 2;
+
+/* Converts a map and a location_t to source line. */
+inline linenum_type
+SOURCE_LINE (const line_map_ordinary *ord_map, location_t loc)
+{
+ return ((loc - ord_map->start_location)
+ >> ord_map->m_column_and_range_bits) + ord_map->to_line;
+}
+
+/* Convert a map and location_t to source column number. */
+inline linenum_type
+SOURCE_COLUMN (const line_map_ordinary *ord_map, location_t loc)
+{
+ return ((loc - ord_map->start_location)
+ & ((1 << ord_map->m_column_and_range_bits) - 1)) >> ord_map->m_range_bits;
+}
+
+
+inline location_t
+linemap_included_from (const line_map_ordinary *ord_map)
+{
+ return ord_map->included_from;
+}
+
+/* The linemap containing the included-from location of MAP. */
+const line_map_ordinary *linemap_included_from_linemap
+ (line_maps *set, const line_map_ordinary *map);
+
+/* True if the map is at the bottom of the include stack. */
+
+inline bool
+MAIN_FILE_P (const line_map_ordinary *ord_map)
+{
+ return ord_map->included_from == 0;
+}
+
+/* Encode and return a location_t from a column number. The
+ source line considered is the last source line used to call
+ linemap_line_start, i.e, the last source line which a location was
+ encoded from. */
+extern location_t
+linemap_position_for_column (class line_maps *, unsigned int);
+
+/* Encode and return a source location from a given line and
+ column. */
+location_t
+linemap_position_for_line_and_column (line_maps *set,
+ const line_map_ordinary *,
+ linenum_type, unsigned int);
+
+/* Encode and return a location_t starting from location LOC and
+ shifting it by OFFSET columns. This function does not support
+ virtual locations. */
+location_t
+linemap_position_for_loc_and_offset (class line_maps *set,
+ location_t loc,
+ unsigned int offset);
+
+/* Return the file this map is for. */
+inline const char *
+LINEMAP_FILE (const line_map_ordinary *ord_map)
+{
+ return ord_map->to_file;
+}
+
+/* Return the line number this map started encoding location from. */
+inline linenum_type
+LINEMAP_LINE (const line_map_ordinary *ord_map)
+{
+ return ord_map->to_line;
+}
+
+/* Return a positive value if map encodes locations from a system
+ header, 0 otherwise. Returns 1 if MAP encodes locations in a
+ system header and 2 if it encodes locations in a C system header
+ that therefore needs to be extern "C" protected in C++. */
+inline unsigned char
+LINEMAP_SYSP (const line_map_ordinary *ord_map)
+{
+ return ord_map->sysp;
+}
+
+const struct line_map *first_map_in_common (line_maps *set,
+ location_t loc0,
+ location_t loc1,
+ location_t *res_loc0,
+ location_t *res_loc1);
+
+/* Return a positive value if PRE denotes the location of a token that
+ comes before the token of POST, 0 if PRE denotes the location of
+ the same token as the token for POST, and a negative value
+ otherwise. */
+int linemap_compare_locations (class line_maps *set,
+ location_t pre,
+ location_t post);
+
+/* Return TRUE if LOC_A denotes the location a token that comes
+ topogically before the token denoted by location LOC_B, or if they
+ are equal. */
+inline bool
+linemap_location_before_p (class line_maps *set,
+ location_t loc_a,
+ location_t loc_b)
+{
+ return linemap_compare_locations (set, loc_a, loc_b) >= 0;
+}
+
+typedef struct
+{
+ /* The name of the source file involved. */
+ const char *file;
+
+ /* The line-location in the source file. */
+ int line;
+
+ int column;
+
+ void *data;
+
+ /* In a system header?. */
+ bool sysp;
+} expanded_location;
+
+class range_label;
+
+/* A hint to diagnostic_show_locus on how to print a source range within a
+ rich_location.
+
+ Typically this is SHOW_RANGE_WITH_CARET for the 0th range, and
+ SHOW_RANGE_WITHOUT_CARET for subsequent ranges,
+ but the Fortran frontend uses SHOW_RANGE_WITH_CARET repeatedly for
+ printing things like:
+
+ x = x + y
+ 1 2
+ Error: Shapes for operands at (1) and (2) are not conformable
+
+ where "1" and "2" are notionally carets. */
+
+enum range_display_kind
+{
+ /* Show the pertinent source line(s), the caret, and underline(s). */
+ SHOW_RANGE_WITH_CARET,
+
+ /* Show the pertinent source line(s) and underline(s), but don't
+ show the caret (just an underline). */
+ SHOW_RANGE_WITHOUT_CARET,
+
+ /* Just show the source lines; don't show the range itself.
+ This is for use when displaying some line-insertion fix-it hints (for
+ showing the user context on the change, for when it doesn't make sense
+ to highlight the first column on the next line). */
+ SHOW_LINES_WITHOUT_RANGE
+};
+
+/* A location within a rich_location: a caret&range, with
+ the caret potentially flagged for display, and an optional
+ label. */
+
+struct location_range
+{
+ location_t m_loc;
+
+ enum range_display_kind m_range_display_kind;
+
+ /* If non-NULL, the label for this range. */
+ const range_label *m_label;
+};
+
+/* A partially-embedded vec for use within rich_location for storing
+ ranges and fix-it hints.
+
+ Elements [0..NUM_EMBEDDED) are allocated within m_embed, after
+ that they are within the dynamically-allocated m_extra.
+
+ This allows for static allocation in the common case, whilst
+ supporting the rarer case of an arbitrary number of elements.
+
+ Dynamic allocation is not performed unless it's needed. */
+
+template <typename T, int NUM_EMBEDDED>
+class semi_embedded_vec
+{
+ public:
+ semi_embedded_vec ();
+ ~semi_embedded_vec ();
+
+ unsigned int count () const { return m_num; }
+ T& operator[] (int idx);
+ const T& operator[] (int idx) const;
+
+ void push (const T&);
+ void truncate (int len);
+
+ private:
+ int m_num;
+ T m_embedded[NUM_EMBEDDED];
+ int m_alloc;
+ T *m_extra;
+};
+
+/* Constructor for semi_embedded_vec. In particular, no dynamic allocation
+ is done. */
+
+template <typename T, int NUM_EMBEDDED>
+semi_embedded_vec<T, NUM_EMBEDDED>::semi_embedded_vec ()
+: m_num (0), m_alloc (0), m_extra (NULL)
+{
+}
+
+/* semi_embedded_vec's dtor. Release any dynamically-allocated memory. */
+
+template <typename T, int NUM_EMBEDDED>
+semi_embedded_vec<T, NUM_EMBEDDED>::~semi_embedded_vec ()
+{
+ XDELETEVEC (m_extra);
+}
+
+/* Look up element IDX, mutably. */
+
+template <typename T, int NUM_EMBEDDED>
+T&
+semi_embedded_vec<T, NUM_EMBEDDED>::operator[] (int idx)
+{
+ linemap_assert (idx < m_num);
+ if (idx < NUM_EMBEDDED)
+ return m_embedded[idx];
+ else
+ {
+ linemap_assert (m_extra != NULL);
+ return m_extra[idx - NUM_EMBEDDED];
+ }
+}
+
+/* Look up element IDX (const). */
+
+template <typename T, int NUM_EMBEDDED>
+const T&
+semi_embedded_vec<T, NUM_EMBEDDED>::operator[] (int idx) const
+{
+ linemap_assert (idx < m_num);
+ if (idx < NUM_EMBEDDED)
+ return m_embedded[idx];
+ else
+ {
+ linemap_assert (m_extra != NULL);
+ return m_extra[idx - NUM_EMBEDDED];
+ }
+}
+
+/* Append VALUE to the end of the semi_embedded_vec. */
+
+template <typename T, int NUM_EMBEDDED>
+void
+semi_embedded_vec<T, NUM_EMBEDDED>::push (const T& value)
+{
+ int idx = m_num++;
+ if (idx < NUM_EMBEDDED)
+ m_embedded[idx] = value;
+ else
+ {
+ /* Offset "idx" to be an index within m_extra. */
+ idx -= NUM_EMBEDDED;
+ if (NULL == m_extra)
+ {
+ linemap_assert (m_alloc == 0);
+ m_alloc = 16;
+ m_extra = XNEWVEC (T, m_alloc);
+ }
+ else if (idx >= m_alloc)
+ {
+ linemap_assert (m_alloc > 0);
+ m_alloc *= 2;
+ m_extra = XRESIZEVEC (T, m_extra, m_alloc);
+ }
+ linemap_assert (m_extra);
+ linemap_assert (idx < m_alloc);
+ m_extra[idx] = value;
+ }
+}
+
+/* Truncate to length LEN. No deallocation is performed. */
+
+template <typename T, int NUM_EMBEDDED>
+void
+semi_embedded_vec<T, NUM_EMBEDDED>::truncate (int len)
+{
+ linemap_assert (len <= m_num);
+ m_num = len;
+}
+
+class fixit_hint;
+class diagnostic_path;
+
+/* A "rich" source code location, for use when printing diagnostics.
+ A rich_location has one or more carets&ranges, where the carets
+ are optional. These are referred to as "ranges" from here.
+ Typically the zeroth range has a caret; other ranges sometimes
+ have carets.
+
+ The "primary" location of a rich_location is the caret of range 0,
+ used for determining the line/column when printing diagnostic
+ text, such as:
+
+ some-file.c:3:1: error: ...etc...
+
+ Additional ranges may be added to help the user identify other
+ pertinent clauses in a diagnostic.
+
+ Ranges can (optionally) be given labels via class range_label.
+
+ rich_location instances are intended to be allocated on the stack
+ when generating diagnostics, and to be short-lived.
+
+ Examples of rich locations
+ --------------------------
+
+ Example A
+ *********
+ int i = "foo";
+ ^
+ This "rich" location is simply a single range (range 0), with
+ caret = start = finish at the given point.
+
+ Example B
+ *********
+ a = (foo && bar)
+ ~~~~~^~~~~~~
+ This rich location has a single range (range 0), with the caret
+ at the first "&", and the start/finish at the parentheses.
+ Compare with example C below.
+
+ Example C
+ *********
+ a = (foo && bar)
+ ~~~ ^~ ~~~
+ This rich location has three ranges:
+ - Range 0 has its caret and start location at the first "&" and
+ end at the second "&.
+ - Range 1 has its start and finish at the "f" and "o" of "foo";
+ the caret is not flagged for display, but is perhaps at the "f"
+ of "foo".
+ - Similarly, range 2 has its start and finish at the "b" and "r" of
+ "bar"; the caret is not flagged for display, but is perhaps at the
+ "b" of "bar".
+ Compare with example B above.
+
+ Example D (Fortran frontend)
+ ****************************
+ x = x + y
+ 1 2
+ This rich location has range 0 at "1", and range 1 at "2".
+ Both are flagged for caret display. Both ranges have start/finish
+ equal to their caret point. The frontend overrides the diagnostic
+ context's default caret character for these ranges.
+
+ Example E (range labels)
+ ************************
+ printf ("arg0: %i arg1: %s arg2: %i",
+ ^~
+ |
+ const char *
+ 100, 101, 102);
+ ~~~
+ |
+ int
+ This rich location has two ranges:
+ - range 0 is at the "%s" with start = caret = "%" and finish at
+ the "s". It has a range_label ("const char *").
+ - range 1 has start/finish covering the "101" and is not flagged for
+ caret printing. The caret is at the start of "101", where its
+ range_label is printed ("int").
+
+ Fix-it hints
+ ------------
+
+ Rich locations can also contain "fix-it hints", giving suggestions
+ for the user on how to edit their code to fix a problem. These
+ can be expressed as insertions, replacements, and removals of text.
+ The edits by default are relative to the zeroth range within the
+ rich_location, but optionally they can be expressed relative to
+ other locations (using various overloaded methods of the form
+ rich_location::add_fixit_*).
+
+ For example:
+
+ Example F: fix-it hint: insert_before
+ *************************************
+ ptr = arr[0];
+ ^~~~~~
+ &
+ This rich location has a single range (range 0) covering "arr[0]",
+ with the caret at the start. The rich location has a single
+ insertion fix-it hint, inserted before range 0, added via
+ richloc.add_fixit_insert_before ("&");
+
+ Example G: multiple fix-it hints: insert_before and insert_after
+ ****************************************************************
+ #define FN(ARG0, ARG1, ARG2) fn(ARG0, ARG1, ARG2)
+ ^~~~ ^~~~ ^~~~
+ ( ) ( ) ( )
+ This rich location has three ranges, covering "arg0", "arg1",
+ and "arg2", all with caret-printing enabled.
+ The rich location has 6 insertion fix-it hints: each arg
+ has a pair of insertion fix-it hints, suggesting wrapping
+ them with parentheses: one a '(' inserted before,
+ the other a ')' inserted after, added via
+ richloc.add_fixit_insert_before (LOC, "(");
+ and
+ richloc.add_fixit_insert_after (LOC, ")");
+
+ Example H: fix-it hint: removal
+ *******************************
+ struct s {int i};;
+ ^
+ -
+ This rich location has a single range at the stray trailing
+ semicolon, along with a single removal fix-it hint, covering
+ the same range, added via:
+ richloc.add_fixit_remove ();
+
+ Example I: fix-it hint: replace
+ *******************************
+ c = s.colour;
+ ^~~~~~
+ color
+ This rich location has a single range (range 0) covering "colour",
+ and a single "replace" fix-it hint, covering the same range,
+ added via
+ richloc.add_fixit_replace ("color");
+
+ Example J: fix-it hint: line insertion
+ **************************************
+
+ 3 | #include <stddef.h>
+ + |+#include <stdio.h>
+ 4 | int the_next_line;
+
+ This rich location has a single range at line 4 column 1, marked
+ with SHOW_LINES_WITHOUT_RANGE (to avoid printing a meaningless caret
+ on the "i" of int). It has a insertion fix-it hint of the string
+ "#include <stdio.h>\n".
+
+ Adding a fix-it hint can fail: for example, attempts to insert content
+ at the transition between two line maps may fail due to there being no
+ location_t value to express the new location.
+
+ Attempts to add a fix-it hint within a macro expansion will fail.
+
+ There is only limited support for newline characters in fix-it hints:
+ only hints with newlines which insert an entire new line are permitted,
+ inserting at the start of a line, and finishing with a newline
+ (with no interior newline characters). Other attempts to add
+ fix-it hints containing newline characters will fail.
+ Similarly, attempts to delete or replace a range *affecting* multiple
+ lines will fail.
+
+ The rich_location API handles these failures gracefully, so that
+ diagnostics can attempt to add fix-it hints without each needing
+ extensive checking.
+
+ Fix-it hints within a rich_location are "atomic": if any hints can't
+ be applied, none of them will be (tracked by the m_seen_impossible_fixit
+ flag), and no fix-its hints will be displayed for that rich_location.
+ This implies that diagnostic messages need to be worded in such a way
+ that they make sense whether or not the fix-it hints are displayed,
+ or that richloc.seen_impossible_fixit_p () should be checked before
+ issuing the diagnostics. */
+
+class rich_location
+{
+ public:
+ /* Constructors. */
+
+ /* Constructing from a location. */
+ rich_location (line_maps *set, location_t loc,
+ const range_label *label = NULL);
+
+ /* Destructor. */
+ ~rich_location ();
+
+ /* The class manages the memory pointed to by the elements of
+ the M_FIXIT_HINTS vector and is not meant to be copied or
+ assigned. */
+ rich_location (const rich_location &) = delete;
+ void operator= (const rich_location &) = delete;
+
+ /* Accessors. */
+ location_t get_loc () const { return get_loc (0); }
+ location_t get_loc (unsigned int idx) const;
+
+ void
+ add_range (location_t loc,
+ enum range_display_kind range_display_kind
+ = SHOW_RANGE_WITHOUT_CARET,
+ const range_label *label = NULL);
+
+ void
+ set_range (unsigned int idx, location_t loc,
+ enum range_display_kind range_display_kind);
+
+ unsigned int get_num_locations () const { return m_ranges.count (); }
+
+ const location_range *get_range (unsigned int idx) const;
+ location_range *get_range (unsigned int idx);
+
+ expanded_location get_expanded_location (unsigned int idx);
+
+ void
+ override_column (int column);
+
+ /* Fix-it hints. */
+
+ /* Methods for adding insertion fix-it hints. */
+
+ /* Suggest inserting NEW_CONTENT immediately before the primary
+ range's start. */
+ void
+ add_fixit_insert_before (const char *new_content);
+
+ /* Suggest inserting NEW_CONTENT immediately before the start of WHERE. */
+ void
+ add_fixit_insert_before (location_t where,
+ const char *new_content);
+
+ /* Suggest inserting NEW_CONTENT immediately after the end of the primary
+ range. */
+ void
+ add_fixit_insert_after (const char *new_content);
+
+ /* Suggest inserting NEW_CONTENT immediately after the end of WHERE. */
+ void
+ add_fixit_insert_after (location_t where,
+ const char *new_content);
+
+ /* Methods for adding removal fix-it hints. */
+
+ /* Suggest removing the content covered by range 0. */
+ void
+ add_fixit_remove ();
+
+ /* Suggest removing the content covered between the start and finish
+ of WHERE. */
+ void
+ add_fixit_remove (location_t where);
+
+ /* Suggest removing the content covered by SRC_RANGE. */
+ void
+ add_fixit_remove (source_range src_range);
+
+ /* Methods for adding "replace" fix-it hints. */
+
+ /* Suggest replacing the content covered by range 0 with NEW_CONTENT. */
+ void
+ add_fixit_replace (const char *new_content);
+
+ /* Suggest replacing the content between the start and finish of
+ WHERE with NEW_CONTENT. */
+ void
+ add_fixit_replace (location_t where,
+ const char *new_content);
+
+ /* Suggest replacing the content covered by SRC_RANGE with
+ NEW_CONTENT. */
+ void
+ add_fixit_replace (source_range src_range,
+ const char *new_content);
+
+ unsigned int get_num_fixit_hints () const { return m_fixit_hints.count (); }
+ fixit_hint *get_fixit_hint (int idx) const { return m_fixit_hints[idx]; }
+ fixit_hint *get_last_fixit_hint () const;
+ bool seen_impossible_fixit_p () const { return m_seen_impossible_fixit; }
+
+ /* Set this if the fix-it hints are not suitable to be
+ automatically applied.
+
+ For example, if you are suggesting more than one
+ mutually exclusive solution to a problem, then
+ it doesn't make sense to apply all of the solutions;
+ manual intervention is required.
+
+ If set, then the fix-it hints in the rich_location will
+ be printed, but will not be added to generated patches,
+ or affect the modified version of the file. */
+ void fixits_cannot_be_auto_applied ()
+ {
+ m_fixits_cannot_be_auto_applied = true;
+ }
+
+ bool fixits_can_be_auto_applied_p () const
+ {
+ return !m_fixits_cannot_be_auto_applied;
+ }
+
+ /* An optional path through the code. */
+ const diagnostic_path *get_path () const { return m_path; }
+ void set_path (const diagnostic_path *path) { m_path = path; }
+
+ /* A flag for hinting that the diagnostic involves character encoding
+ issues, and thus that it will be helpful to the user if we show some
+ representation of how the characters in the pertinent source lines
+ are encoded.
+ The default is false (i.e. do not escape).
+ When set to true, non-ASCII bytes in the pertinent source lines will
+ be escaped in a manner controlled by the user-supplied option
+ -fdiagnostics-escape-format=, so that the user can better understand
+ what's going on with the encoding in their source file. */
+ bool escape_on_output_p () const { return m_escape_on_output; }
+ void set_escape_on_output (bool flag) { m_escape_on_output = flag; }
+
+private:
+ bool reject_impossible_fixit (location_t where);
+ void stop_supporting_fixits ();
+ void maybe_add_fixit (location_t start,
+ location_t next_loc,
+ const char *new_content);
+
+public:
+ static const int STATICALLY_ALLOCATED_RANGES = 3;
+
+protected:
+ line_maps *m_line_table;
+ semi_embedded_vec <location_range, STATICALLY_ALLOCATED_RANGES> m_ranges;
+
+ int m_column_override;
+
+ bool m_have_expanded_location;
+ bool m_seen_impossible_fixit;
+ bool m_fixits_cannot_be_auto_applied;
+ bool m_escape_on_output;
+
+ expanded_location m_expanded_location;
+
+ static const int MAX_STATIC_FIXIT_HINTS = 2;
+ semi_embedded_vec <fixit_hint *, MAX_STATIC_FIXIT_HINTS> m_fixit_hints;
+
+ const diagnostic_path *m_path;
+};
+
+/* A struct for the result of range_label::get_text: a NUL-terminated buffer
+ of localized text, and a flag to determine if the caller should "free" the
+ buffer. */
+
+class label_text
+{
+public:
+ label_text ()
+ : m_buffer (NULL), m_owned (false)
+ {}
+
+ ~label_text ()
+ {
+ if (m_owned)
+ free (m_buffer);
+ }
+
+ /* Move ctor. */
+ label_text (label_text &&other)
+ : m_buffer (other.m_buffer), m_owned (other.m_owned)
+ {
+ other.release ();
+ }
+
+ /* Move assignment. */
+ label_text & operator= (label_text &&other)
+ {
+ if (m_owned)
+ free (m_buffer);
+ m_buffer = other.m_buffer;
+ m_owned = other.m_owned;
+ other.release ();
+ return *this;
+ }
+
+ /* Delete the copy ctor and copy-assignment operator. */
+ label_text (const label_text &) = delete;
+ label_text & operator= (const label_text &) = delete;
+
+ /* Create a label_text instance that borrows BUFFER from a
+ longer-lived owner. */
+ static label_text borrow (const char *buffer)
+ {
+ return label_text (const_cast <char *> (buffer), false);
+ }
+
+ /* Create a label_text instance that takes ownership of BUFFER. */
+ static label_text take (char *buffer)
+ {
+ return label_text (buffer, true);
+ }
+
+ void release ()
+ {
+ m_buffer = NULL;
+ m_owned = false;
+ }
+
+ const char *get () const
+ {
+ return m_buffer;
+ }
+
+ bool is_owner () const
+ {
+ return m_owned;
+ }
+
+private:
+ char *m_buffer;
+ bool m_owned;
+
+ label_text (char *buffer, bool owned)
+ : m_buffer (buffer), m_owned (owned)
+ {}
+};
+
+/* Abstract base class for labelling a range within a rich_location
+ (e.g. for labelling expressions with their type).
+
+ Generating the text could require non-trivial work, so this work
+ is delayed (via the "get_text" virtual function) until the diagnostic
+ printing code "knows" it needs it, thus avoiding doing it e.g. for
+ warnings that are filtered by command-line flags. This virtual
+ function also isolates libcpp and the diagnostics subsystem from
+ the front-end and middle-end-specific code for generating the text
+ for the labels.
+
+ Like the rich_location instances they annotate, range_label instances
+ are intended to be allocated on the stack when generating diagnostics,
+ and to be short-lived. */
+
+class range_label
+{
+ public:
+ virtual ~range_label () {}
+
+ /* Get localized text for the label.
+ The RANGE_IDX is provided, allowing for range_label instances to be
+ shared by multiple ranges if need be (the "flyweight" design pattern). */
+ virtual label_text get_text (unsigned range_idx) const = 0;
+};
+
+/* A fix-it hint: a suggested insertion, replacement, or deletion of text.
+ We handle these three types of edit with one class, by representing
+ them as replacement of a half-open range:
+ [start, next_loc)
+ Insertions have start == next_loc: "replace" the empty string at the
+ start location with the new string.
+ Deletions are replacement with the empty string.
+
+ There is only limited support for newline characters in fix-it hints
+ as noted above in the comment for class rich_location.
+ A fixit_hint instance can have at most one newline character; if
+ present, the newline character must be the final character of
+ the content (preventing e.g. fix-its that split a pre-existing line). */
+
+class fixit_hint
+{
+ public:
+ fixit_hint (location_t start,
+ location_t next_loc,
+ const char *new_content);
+ ~fixit_hint () { free (m_bytes); }
+
+ bool affects_line_p (const char *file, int line) const;
+ location_t get_start_loc () const { return m_start; }
+ location_t get_next_loc () const { return m_next_loc; }
+ bool maybe_append (location_t start,
+ location_t next_loc,
+ const char *new_content);
+
+ const char *get_string () const { return m_bytes; }
+ size_t get_length () const { return m_len; }
+
+ bool insertion_p () const { return m_start == m_next_loc; }
+
+ bool ends_with_newline_p () const;
+
+ private:
+ /* We don't use source_range here since, unlike most places,
+ this is a half-open/half-closed range:
+ [start, next_loc)
+ so that we can support insertion via start == next_loc. */
+ location_t m_start;
+ location_t m_next_loc;
+ char *m_bytes;
+ size_t m_len;
+};
+
+
+/* This is enum is used by the function linemap_resolve_location
+ below. The meaning of the values is explained in the comment of
+ that function. */
+enum location_resolution_kind
+{
+ LRK_MACRO_EXPANSION_POINT,
+ LRK_SPELLING_LOCATION,
+ LRK_MACRO_DEFINITION_LOCATION
+};
+
+/* Resolve a virtual location into either a spelling location, an
+ expansion point location or a token argument replacement point
+ location. Return the map that encodes the virtual location as well
+ as the resolved location.
+
+ If LOC is *NOT* the location of a token resulting from the
+ expansion of a macro, then the parameter LRK (which stands for
+ Location Resolution Kind) is ignored and the resulting location
+ just equals the one given in argument.
+
+ Now if LOC *IS* the location of a token resulting from the
+ expansion of a macro, this is what happens.
+
+ * If LRK is set to LRK_MACRO_EXPANSION_POINT
+ -------------------------------
+
+ The virtual location is resolved to the first macro expansion point
+ that led to this macro expansion.
+
+ * If LRK is set to LRK_SPELLING_LOCATION
+ -------------------------------------
+
+ The virtual location is resolved to the locus where the token has
+ been spelled in the source. This can follow through all the macro
+ expansions that led to the token.
+
+ * If LRK is set to LRK_MACRO_DEFINITION_LOCATION
+ --------------------------------------
+
+ The virtual location is resolved to the locus of the token in the
+ context of the macro definition.
+
+ If LOC is the locus of a token that is an argument of a
+ function-like macro [replacing a parameter in the replacement list
+ of the macro] the virtual location is resolved to the locus of the
+ parameter that is replaced, in the context of the definition of the
+ macro.
+
+ If LOC is the locus of a token that is not an argument of a
+ function-like macro, then the function behaves as if LRK was set to
+ LRK_SPELLING_LOCATION.
+
+ If LOC_MAP is not NULL, *LOC_MAP is set to the map encoding the
+ returned location. Note that if the returned location wasn't originally
+ encoded by a map, the *MAP is set to NULL. This can happen if LOC
+ resolves to a location reserved for the client code, like
+ UNKNOWN_LOCATION or BUILTINS_LOCATION in GCC. */
+
+location_t linemap_resolve_location (class line_maps *,
+ location_t loc,
+ enum location_resolution_kind lrk,
+ const line_map_ordinary **loc_map);
+
+/* Suppose that LOC is the virtual location of a token coming from the
+ expansion of a macro M. This function then steps up to get the
+ location L of the point where M got expanded. If L is a spelling
+ location inside a macro expansion M', then this function returns
+ the point where M' was expanded. LOC_MAP is an output parameter.
+ When non-NULL, *LOC_MAP is set to the map of the returned
+ location. */
+location_t linemap_unwind_toward_expansion (class line_maps *,
+ location_t loc,
+ const line_map **loc_map);
+
+/* If LOC is the virtual location of a token coming from the expansion
+ of a macro M and if its spelling location is reserved (e.g, a
+ location for a built-in token), then this function unwinds (using
+ linemap_unwind_toward_expansion) the location until a location that
+ is not reserved and is not in a system header is reached. In other
+ words, this unwinds the reserved location until a location that is
+ in real source code is reached.
+
+ Otherwise, if the spelling location for LOC is not reserved or if
+ LOC doesn't come from the expansion of a macro, the function
+ returns LOC as is and *MAP is not touched.
+
+ *MAP is set to the map of the returned location if the later is
+ different from LOC. */
+location_t linemap_unwind_to_first_non_reserved_loc (class line_maps *,
+ location_t loc,
+ const line_map **map);
+
+/* Expand source code location LOC and return a user readable source
+ code location. LOC must be a spelling (non-virtual) location. If
+ it's a location < RESERVED_LOCATION_COUNT a zeroed expanded source
+ location is returned. */
+expanded_location linemap_expand_location (class line_maps *,
+ const line_map *,
+ location_t loc);
+
+/* Statistics about maps allocation and usage as returned by
+ linemap_get_statistics. */
+struct linemap_stats
+{
+ long num_ordinary_maps_allocated;
+ long num_ordinary_maps_used;
+ long ordinary_maps_allocated_size;
+ long ordinary_maps_used_size;
+ long num_expanded_macros;
+ long num_macro_tokens;
+ long num_macro_maps_used;
+ long macro_maps_allocated_size;
+ long macro_maps_used_size;
+ long macro_maps_locations_size;
+ long duplicated_macro_maps_locations_size;
+ long adhoc_table_size;
+ long adhoc_table_entries_used;
+};
+
+/* Return the highest location emitted for a given file for which
+ there is a line map in SET. FILE_NAME is the file name to
+ consider. If the function returns TRUE, *LOC is set to the highest
+ location emitted for that file. */
+bool linemap_get_file_highest_location (class line_maps * set,
+ const char *file_name,
+ location_t *loc);
+
+/* Compute and return statistics about the memory consumption of some
+ parts of the line table SET. */
+void linemap_get_statistics (line_maps *, struct linemap_stats *);
+
+/* Dump debugging information about source location LOC into the file
+ stream STREAM. SET is the line map set LOC comes from. */
+void linemap_dump_location (line_maps *, location_t, FILE *);
+
+/* Dump line map at index IX in line table SET to STREAM. If STREAM
+ is NULL, use stderr. IS_MACRO is true if the caller wants to
+ dump a macro map, false otherwise. */
+void linemap_dump (FILE *, line_maps *, unsigned, bool);
+
+/* Dump line table SET to STREAM. If STREAM is NULL, stderr is used.
+ NUM_ORDINARY specifies how many ordinary maps to dump. NUM_MACRO
+ specifies how many macro maps to dump. */
+void line_table_dump (FILE *, line_maps *, unsigned int, unsigned int);
+
+/* An enum for distinguishing the various parts within a location_t. */
+
+enum location_aspect
+{
+ LOCATION_ASPECT_CARET,
+ LOCATION_ASPECT_START,
+ LOCATION_ASPECT_FINISH
+};
+
+/* The rich_location class requires a way to expand location_t instances.
+ We would directly use expand_location_to_spelling_point, which is
+ implemented in gcc/input.cc, but we also need to use it for rich_location
+ within genmatch.cc.
+ Hence we require client code of libcpp to implement the following
+ symbol. */
+extern expanded_location
+linemap_client_expand_location_to_spelling_point (location_t,
+ enum location_aspect);
+
+#endif /* !LIBCPP_LINE_MAP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/logical-location.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/logical-location.h
new file mode 100644
index 0000000..d6b5c51
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/logical-location.h
@@ -0,0 +1,72 @@
+/* Logical location support, without knowledge of "tree".
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LOGICAL_LOCATION_H
+#define GCC_LOGICAL_LOCATION_H
+
+/* An enum for discriminating between different kinds of logical location
+ for a diagnostic.
+
+ Roughly corresponds to logicalLocation's "kind" property in SARIF v2.1.0
+ (section 3.33.7). */
+
+enum logical_location_kind
+{
+ LOGICAL_LOCATION_KIND_UNKNOWN,
+
+ LOGICAL_LOCATION_KIND_FUNCTION,
+ LOGICAL_LOCATION_KIND_MEMBER,
+ LOGICAL_LOCATION_KIND_MODULE,
+ LOGICAL_LOCATION_KIND_NAMESPACE,
+ LOGICAL_LOCATION_KIND_TYPE,
+ LOGICAL_LOCATION_KIND_RETURN_TYPE,
+ LOGICAL_LOCATION_KIND_PARAMETER,
+ LOGICAL_LOCATION_KIND_VARIABLE
+};
+
+/* Abstract base class for passing around logical locations in the
+ diagnostics subsystem, such as:
+ - "within function 'foo'", or
+ - "within method 'bar'",
+ but *without* requiring knowledge of trees
+ (see tree-logical-location.h for subclasses relating to trees). */
+
+class logical_location
+{
+public:
+ virtual ~logical_location () {}
+
+ /* Get a string (or NULL) suitable for use by the SARIF logicalLocation
+ "name" property (SARIF v2.1.0 section 3.33.4). */
+ virtual const char *get_short_name () const = 0;
+
+ /* Get a string (or NULL) suitable for use by the SARIF logicalLocation
+ "fullyQualifiedName" property (SARIF v2.1.0 section 3.33.5). */
+ virtual const char *get_name_with_scope () const = 0;
+
+ /* Get a string (or NULL) suitable for use by the SARIF logicalLocation
+ "decoratedName" property (SARIF v2.1.0 section 3.33.6). */
+ virtual const char *get_internal_name () const = 0;
+
+ /* Get what kind of SARIF logicalLocation this is (if any). */
+ virtual enum logical_location_kind get_kind () const = 0;
+};
+
+#endif /* GCC_LOGICAL_LOCATION_H. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/loop-unroll.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/loop-unroll.h
new file mode 100644
index 0000000..9d5446a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/loop-unroll.h
@@ -0,0 +1,27 @@
+/* Loop unrolling header file.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LOOP_UNROLL_H
+#define GCC_LOOP_UNROLL_H
+
+extern void unroll_loops (int);
+extern basic_block split_edge_and_insert (edge, rtx_insn *);
+
+
+#endif /* GCC_LOOP_UNROLL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lower-subreg.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lower-subreg.h
new file mode 100644
index 0000000..75b9a7d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lower-subreg.h
@@ -0,0 +1,60 @@
+/* Target-dependent costs for lower-subreg.cc.
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef LOWER_SUBREG_H
+#define LOWER_SUBREG_H 1
+
+/* Information about whether, and where, lower-subreg should be applied. */
+struct lower_subreg_choices {
+ /* A boolean vector for move splitting that is indexed by mode and is
+ true for each mode that is to have its copies split. */
+ bool move_modes_to_split[MAX_MACHINE_MODE];
+
+ /* True if zero-extensions from word_mode to twice_word_mode should
+ be split. */
+ bool splitting_zext;
+
+ /* Index X is true if twice_word_mode shifts by X + BITS_PER_WORD
+ should be split. */
+ bool splitting_ashift[MAX_BITS_PER_WORD];
+ bool splitting_lshiftrt[MAX_BITS_PER_WORD];
+ bool splitting_ashiftrt[MAX_BITS_PER_WORD];
+
+ /* True if there is at least one mode that is worth splitting. */
+ bool something_to_do;
+};
+
+/* Target-specific information for the subreg lowering pass. */
+struct target_lower_subreg {
+ /* An integer mode that is twice as wide as word_mode. */
+ scalar_int_mode_pod x_twice_word_mode;
+
+ /* What we have decided to do when optimizing for size (index 0)
+ and speed (index 1). */
+ struct lower_subreg_choices x_choices[2];
+};
+
+extern struct target_lower_subreg default_target_lower_subreg;
+#if SWITCHABLE_TARGET
+extern struct target_lower_subreg *this_target_lower_subreg;
+#else
+#define this_target_lower_subreg (&default_target_lower_subreg)
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra-int.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra-int.h
new file mode 100644
index 0000000..a400a0f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra-int.h
@@ -0,0 +1,528 @@
+/* Local Register Allocator (LRA) intercommunication header file.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+ Contributed by Vladimir Makarov <vmakarov@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LRA_INT_H
+#define GCC_LRA_INT_H
+
+#define lra_assert(c) gcc_checking_assert (c)
+
+/* The parameter used to prevent infinite reloading for an insn. Each
+ insn operands might require a reload and, if it is a memory, its
+ base and index registers might require a reload too. */
+#define LRA_MAX_INSN_RELOADS (MAX_RECOG_OPERANDS * 3)
+
+typedef struct lra_live_range *lra_live_range_t;
+
+/* The structure describes program points where a given pseudo lives.
+ The live ranges can be used to find conflicts with other pseudos.
+ If the live ranges of two pseudos are intersected, the pseudos are
+ in conflict. */
+struct lra_live_range
+{
+ /* Pseudo regno whose live range is described by given
+ structure. */
+ int regno;
+ /* Program point range. */
+ int start, finish;
+ /* Next structure describing program points where the pseudo
+ lives. */
+ lra_live_range_t next;
+ /* Pointer to structures with the same start. */
+ lra_live_range_t start_next;
+};
+
+typedef struct lra_copy *lra_copy_t;
+
+/* Copy between pseudos which affects assigning hard registers. */
+struct lra_copy
+{
+ /* True if regno1 is the destination of the copy. */
+ bool regno1_dest_p;
+ /* Execution frequency of the copy. */
+ int freq;
+ /* Pseudos connected by the copy. REGNO1 < REGNO2. */
+ int regno1, regno2;
+ /* Next copy with correspondingly REGNO1 and REGNO2. */
+ lra_copy_t regno1_next, regno2_next;
+};
+
+/* Common info about a register (pseudo or hard register). */
+class lra_reg
+{
+public:
+ /* Bitmap of UIDs of insns (including debug insns) referring the
+ reg. */
+ bitmap_head insn_bitmap;
+ /* The following fields are defined only for pseudos. */
+ /* Hard registers with which the pseudo conflicts. */
+ HARD_REG_SET conflict_hard_regs;
+ /* Pseudo allocno class hard registers which cannot be a start hard register
+ of the pseudo. */
+ HARD_REG_SET exclude_start_hard_regs;
+ /* We assign hard registers to reload pseudos which can occur in few
+ places. So two hard register preferences are enough for them.
+ The following fields define the preferred hard registers. If
+ there are no such hard registers the first field value is
+ negative. If there is only one preferred hard register, the 2nd
+ field is negative. */
+ int preferred_hard_regno1, preferred_hard_regno2;
+ /* Profits to use the corresponding preferred hard registers. If
+ the both hard registers defined, the first hard register has not
+ less profit than the second one. */
+ int preferred_hard_regno_profit1, preferred_hard_regno_profit2;
+#ifdef STACK_REGS
+ /* True if the pseudo should not be assigned to a stack register. */
+ bool no_stack_p;
+#endif
+ /* Number of references and execution frequencies of the register in
+ *non-debug* insns. */
+ int nrefs, freq;
+ int last_reload;
+ /* rtx used to undo the inheritance. It can be non-null only
+ between subsequent inheritance and undo inheritance passes. */
+ rtx restore_rtx;
+ /* Value holding by register. If the pseudos have the same value
+ they do not conflict. */
+ int val;
+ /* Offset from relative eliminate register to pesudo reg. */
+ poly_int64 offset;
+ /* These members are set up in lra-lives.cc and updated in
+ lra-coalesce.cc. */
+ /* The biggest size mode in which each pseudo reg is referred in
+ whole function (possibly via subreg). */
+ machine_mode biggest_mode;
+ /* Live ranges of the pseudo. */
+ lra_live_range_t live_ranges;
+ /* This member is set up in lra-lives.cc for subsequent
+ assignments. */
+ lra_copy_t copies;
+};
+
+/* References to the common info about each register. */
+extern class lra_reg *lra_reg_info;
+
+extern HARD_REG_SET hard_regs_spilled_into;
+
+/* Static info about each insn operand (common for all insns with the
+ same ICODE). Warning: if the structure definition is changed, the
+ initializer for debug_operand_data in lra.cc should be changed
+ too. */
+struct lra_operand_data
+{
+ /* The machine description constraint string of the operand. */
+ const char *constraint;
+ /* Alternatives for which early_clobber can be true. */
+ alternative_mask early_clobber_alts;
+ /* It is taken only from machine description (which is different
+ from recog_data.operand_mode) and can be of VOIDmode. */
+ ENUM_BITFIELD(machine_mode) mode : 16;
+ /* The type of the operand (in/out/inout). */
+ ENUM_BITFIELD (op_type) type : 8;
+ /* Through if accessed through STRICT_LOW. */
+ unsigned int strict_low : 1;
+ /* True if the operand is an operator. */
+ unsigned int is_operator : 1;
+ /* True if the operand is an address. */
+ unsigned int is_address : 1;
+};
+
+/* Info about register occurrence in an insn. */
+struct lra_insn_reg
+{
+ /* Alternatives for which early_clobber can be true. */
+ alternative_mask early_clobber_alts;
+ /* The biggest mode through which the insn refers to the register
+ occurrence (remember the register can be accessed through a
+ subreg in the insn). */
+ ENUM_BITFIELD(machine_mode) biggest_mode : 16;
+ /* The type of the corresponding operand which is the register. */
+ ENUM_BITFIELD (op_type) type : 8;
+ /* True if the reg is accessed through a subreg and the subreg is
+ just a part of the register. */
+ unsigned int subreg_p : 1;
+ /* The corresponding regno of the register. */
+ int regno;
+ /* Next reg info of the same insn. */
+ struct lra_insn_reg *next;
+};
+
+/* Static part (common info for insns with the same ICODE) of LRA
+ internal insn info. It exists in at most one exemplar for each
+ non-negative ICODE. There is only one exception. Each asm insn has
+ own structure. Warning: if the structure definition is changed,
+ the initializer for debug_insn_static_data in lra.cc should be
+ changed too. */
+struct lra_static_insn_data
+{
+ /* Static info about each insn operand. */
+ struct lra_operand_data *operand;
+ /* Each duplication refers to the number of the corresponding
+ operand which is duplicated. */
+ int *dup_num;
+ /* The number of an operand marked as commutative, -1 otherwise. */
+ int commutative;
+ /* Number of operands, duplications, and alternatives of the
+ insn. */
+ char n_operands;
+ char n_dups;
+ char n_alternatives;
+ /* Insns in machine description (or clobbers in asm) may contain
+ explicit hard regs which are not operands. The following list
+ describes such hard registers. */
+ struct lra_insn_reg *hard_regs;
+ /* Array [n_alternatives][n_operand] of static constraint info for
+ given operand in given alternative. This info can be changed if
+ the target reg info is changed. */
+ const struct operand_alternative *operand_alternative;
+};
+
+/* Negative insn alternative numbers used for special cases. */
+#define LRA_UNKNOWN_ALT -1
+#define LRA_NON_CLOBBERED_ALT -2
+
+/* LRA internal info about an insn (LRA internal insn
+ representation). */
+class lra_insn_recog_data
+{
+public:
+ /* The insn code. */
+ int icode;
+ /* The alternative should be used for the insn, LRA_UNKNOWN_ALT if
+ unknown, or we should assume any alternative, or the insn is a
+ debug insn. LRA_NON_CLOBBERED_ALT means ignoring any earlier
+ clobbers for the insn. */
+ int used_insn_alternative;
+ /* SP offset before the insn relative to one at the func start. */
+ poly_int64 sp_offset;
+ /* The insn itself. */
+ rtx_insn *insn;
+ /* Common data for insns with the same ICODE. Asm insns (their
+ ICODE is negative) do not share such structures. */
+ struct lra_static_insn_data *insn_static_data;
+ /* Two arrays of size correspondingly equal to the operand and the
+ duplication numbers: */
+ rtx **operand_loc; /* The operand locations, NULL if no operands. */
+ rtx **dup_loc; /* The dup locations, NULL if no dups. */
+ /* Number of hard registers implicitly used/clobbered in given call
+ insn. The value can be NULL or points to array of the hard
+ register numbers ending with a negative value. To differ
+ clobbered and used hard regs, clobbered hard regs are incremented
+ by FIRST_PSEUDO_REGISTER. */
+ int *arg_hard_regs;
+ /* Cached value of get_preferred_alternatives. */
+ alternative_mask preferred_alternatives;
+ /* The following member value is always NULL for a debug insn. */
+ struct lra_insn_reg *regs;
+};
+
+typedef class lra_insn_recog_data *lra_insn_recog_data_t;
+
+/* Whether the clobber is used temporary in LRA. */
+#define LRA_TEMP_CLOBBER_P(x) \
+ (RTL_FLAG_CHECK1 ("TEMP_CLOBBER_P", (x), CLOBBER)->unchanging)
+
+/* Cost factor for each additional reload and maximal cost reject for
+ insn reloads. One might ask about such strange numbers. Their
+ values occurred historically from former reload pass. */
+#define LRA_LOSER_COST_FACTOR 6
+#define LRA_MAX_REJECT 600
+
+/* Maximum allowed number of assignment pass iterations after the
+ latest spill pass when any former reload pseudo was spilled. It is
+ for preventing LRA cycling in a bug case. */
+#define LRA_MAX_ASSIGNMENT_ITERATION_NUMBER 30
+
+/* The maximal number of inheritance/split passes in LRA. It should
+ be more 1 in order to perform caller saves transformations and much
+ less MAX_CONSTRAINT_ITERATION_NUMBER to prevent LRA to do as many
+ as permitted constraint passes in some complicated cases. The
+ first inheritance/split pass has a biggest impact on generated code
+ quality. Each subsequent affects generated code in less degree.
+ For example, the 3rd pass does not change generated SPEC2000 code
+ at all on x86-64. */
+#define LRA_MAX_INHERITANCE_PASSES 2
+
+#if LRA_MAX_INHERITANCE_PASSES <= 0 \
+ || LRA_MAX_INHERITANCE_PASSES >= LRA_MAX_ASSIGNMENT_ITERATION_NUMBER - 8
+#error wrong LRA_MAX_INHERITANCE_PASSES value
+#endif
+
+/* Analogous macro to the above one but for rematerialization. */
+#define LRA_MAX_REMATERIALIZATION_PASSES 2
+
+#if LRA_MAX_REMATERIALIZATION_PASSES <= 0 \
+ || LRA_MAX_REMATERIALIZATION_PASSES >= LRA_MAX_ASSIGNMENT_ITERATION_NUMBER - 8
+#error wrong LRA_MAX_REMATERIALIZATION_PASSES value
+#endif
+
+/* lra.cc: */
+
+extern FILE *lra_dump_file;
+
+extern bool lra_hard_reg_split_p;
+extern bool lra_asm_error_p;
+extern bool lra_reg_spill_p;
+
+extern HARD_REG_SET lra_no_alloc_regs;
+
+extern int lra_insn_recog_data_len;
+extern lra_insn_recog_data_t *lra_insn_recog_data;
+
+extern int lra_curr_reload_num;
+
+extern void lra_dump_bitmap_with_title (const char *, bitmap, int);
+extern hashval_t lra_rtx_hash (rtx x);
+extern void lra_push_insn (rtx_insn *);
+extern void lra_push_insn_by_uid (unsigned int);
+extern void lra_push_insn_and_update_insn_regno_info (rtx_insn *);
+extern rtx_insn *lra_pop_insn (void);
+extern unsigned int lra_insn_stack_length (void);
+
+extern rtx lra_create_new_reg (machine_mode, rtx, enum reg_class, HARD_REG_SET *,
+ const char *);
+extern rtx lra_create_new_reg_with_unique_value (machine_mode, rtx,
+ enum reg_class, HARD_REG_SET *,
+ const char *);
+extern void lra_set_regno_unique_value (int);
+extern void lra_invalidate_insn_data (rtx_insn *);
+extern void lra_set_insn_deleted (rtx_insn *);
+extern void lra_delete_dead_insn (rtx_insn *);
+extern void lra_emit_add (rtx, rtx, rtx);
+extern void lra_emit_move (rtx, rtx);
+extern void lra_update_dups (lra_insn_recog_data_t, signed char *);
+
+extern void lra_process_new_insns (rtx_insn *, rtx_insn *, rtx_insn *,
+ const char *);
+
+extern bool lra_substitute_pseudo (rtx *, int, rtx, bool, bool);
+extern bool lra_substitute_pseudo_within_insn (rtx_insn *, int, rtx, bool);
+
+extern lra_insn_recog_data_t lra_set_insn_recog_data (rtx_insn *);
+extern lra_insn_recog_data_t lra_update_insn_recog_data (rtx_insn *);
+extern void lra_set_used_insn_alternative (rtx_insn *, int);
+extern void lra_set_used_insn_alternative_by_uid (int, int);
+
+extern void lra_invalidate_insn_regno_info (rtx_insn *);
+extern void lra_update_insn_regno_info (rtx_insn *);
+extern struct lra_insn_reg *lra_get_insn_regs (int);
+
+extern void lra_free_copies (void);
+extern void lra_create_copy (int, int, int);
+extern lra_copy_t lra_get_copy (int);
+
+extern int lra_new_regno_start;
+extern int lra_constraint_new_regno_start;
+extern int lra_bad_spill_regno_start;
+extern rtx lra_pmode_pseudo;
+extern bitmap_head lra_inheritance_pseudos;
+extern bitmap_head lra_split_regs;
+extern bitmap_head lra_subreg_reload_pseudos;
+extern bitmap_head lra_optional_reload_pseudos;
+
+/* lra-constraints.cc: */
+
+extern void lra_init_equiv (void);
+extern int lra_constraint_offset (int, machine_mode);
+
+extern int lra_constraint_iter;
+extern bool check_and_force_assignment_correctness_p;
+extern int lra_inheritance_iter;
+extern int lra_undo_inheritance_iter;
+extern bool lra_constrain_insn (rtx_insn *);
+extern bool lra_constraints (bool);
+extern void lra_constraints_init (void);
+extern void lra_constraints_finish (void);
+extern bool spill_hard_reg_in_range (int, enum reg_class, rtx_insn *, rtx_insn *);
+extern void lra_inheritance (void);
+extern bool lra_undo_inheritance (void);
+
+/* lra-lives.cc: */
+
+extern int lra_live_max_point;
+extern int *lra_point_freq;
+
+extern int lra_hard_reg_usage[FIRST_PSEUDO_REGISTER];
+
+extern int lra_live_range_iter;
+extern void lra_create_live_ranges (bool, bool);
+extern lra_live_range_t lra_copy_live_range_list (lra_live_range_t);
+extern lra_live_range_t lra_merge_live_ranges (lra_live_range_t,
+ lra_live_range_t);
+extern bool lra_intersected_live_ranges_p (lra_live_range_t,
+ lra_live_range_t);
+extern void lra_print_live_range_list (FILE *, lra_live_range_t);
+extern void debug (lra_live_range &ref);
+extern void debug (lra_live_range *ptr);
+extern void lra_debug_live_range_list (lra_live_range_t);
+extern void lra_debug_pseudo_live_ranges (int);
+extern void lra_debug_live_ranges (void);
+extern void lra_clear_live_ranges (void);
+extern void lra_live_ranges_init (void);
+extern void lra_live_ranges_finish (void);
+extern void lra_setup_reload_pseudo_preferenced_hard_reg (int, int, int);
+
+/* lra-assigns.cc: */
+
+extern int lra_assignment_iter;
+extern int lra_assignment_iter_after_spill;
+extern void lra_setup_reg_renumber (int, int, bool);
+extern bool lra_assign (bool &);
+extern bool lra_split_hard_reg_for (void);
+
+/* lra-coalesce.cc: */
+
+extern int lra_coalesce_iter;
+extern bool lra_coalesce (void);
+
+/* lra-spills.cc: */
+
+extern bool lra_need_for_scratch_reg_p (void);
+extern bool lra_need_for_spills_p (void);
+extern void lra_spill (void);
+extern void lra_final_code_change (void);
+
+/* lra-remat.cc: */
+
+extern int lra_rematerialization_iter;
+extern bool lra_remat (void);
+
+/* lra-elimination.c: */
+
+extern void lra_debug_elim_table (void);
+extern int lra_get_elimination_hard_regno (int);
+extern rtx lra_eliminate_regs_1 (rtx_insn *, rtx, machine_mode,
+ bool, bool, poly_int64, bool);
+extern void eliminate_regs_in_insn (rtx_insn *insn, bool, bool, poly_int64);
+extern void lra_eliminate (bool, bool);
+
+extern void lra_eliminate_reg_if_possible (rtx *);
+
+
+
+/* Return the hard register which given pseudo REGNO assigned to.
+ Negative value means that the register got memory or we don't know
+ allocation yet. */
+inline int
+lra_get_regno_hard_regno (int regno)
+{
+ resize_reg_info ();
+ return reg_renumber[regno];
+}
+
+/* Change class of pseudo REGNO to NEW_CLASS. Print info about it
+ using TITLE. Output a new line if NL_P. */
+inline void
+lra_change_class (int regno, enum reg_class new_class,
+ const char *title, bool nl_p)
+{
+ lra_assert (regno >= FIRST_PSEUDO_REGISTER);
+ if (lra_dump_file != NULL)
+ fprintf (lra_dump_file, "%s class %s for r%d",
+ title, reg_class_names[new_class], regno);
+ setup_reg_classes (regno, new_class, NO_REGS, new_class);
+ if (lra_dump_file != NULL && nl_p)
+ fprintf (lra_dump_file, "\n");
+}
+
+/* Update insn operands which are duplication of NOP operand. The
+ insn is represented by its LRA internal representation ID. */
+inline void
+lra_update_dup (lra_insn_recog_data_t id, int nop)
+{
+ int i;
+ struct lra_static_insn_data *static_id = id->insn_static_data;
+
+ for (i = 0; i < static_id->n_dups; i++)
+ if (static_id->dup_num[i] == nop)
+ *id->dup_loc[i] = *id->operand_loc[nop];
+}
+
+/* Process operator duplications in insn with ID. We do it after the
+ operands processing. Generally speaking, we could do this probably
+ simultaneously with operands processing because a common practice
+ is to enumerate the operators after their operands. */
+inline void
+lra_update_operator_dups (lra_insn_recog_data_t id)
+{
+ int i;
+ struct lra_static_insn_data *static_id = id->insn_static_data;
+
+ for (i = 0; i < static_id->n_dups; i++)
+ {
+ int ndup = static_id->dup_num[i];
+
+ if (static_id->operand[ndup].is_operator)
+ *id->dup_loc[i] = *id->operand_loc[ndup];
+ }
+}
+
+/* Return info about INSN. Set up the info if it is not done yet. */
+inline lra_insn_recog_data_t
+lra_get_insn_recog_data (rtx_insn *insn)
+{
+ lra_insn_recog_data_t data;
+ unsigned int uid = INSN_UID (insn);
+
+ if (lra_insn_recog_data_len > (int) uid
+ && (data = lra_insn_recog_data[uid]) != NULL)
+ {
+ /* Check that we did not change insn without updating the insn
+ info. */
+ lra_assert (data->insn == insn
+ && (INSN_CODE (insn) < 0
+ || data->icode == INSN_CODE (insn)));
+ return data;
+ }
+ return lra_set_insn_recog_data (insn);
+}
+
+/* Update offset from pseudos with VAL by INCR. */
+inline void
+lra_update_reg_val_offset (int val, poly_int64 incr)
+{
+ int i;
+
+ for (i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++)
+ {
+ if (lra_reg_info[i].val == val)
+ lra_reg_info[i].offset += incr;
+ }
+}
+
+/* Return true if register content is equal to VAL with OFFSET. */
+inline bool
+lra_reg_val_equal_p (int regno, int val, poly_int64 offset)
+{
+ if (lra_reg_info[regno].val == val
+ && known_eq (lra_reg_info[regno].offset, offset))
+ return true;
+
+ return false;
+}
+
+/* Assign value of register FROM to TO. */
+inline void
+lra_assign_reg_val (int from, int to)
+{
+ lra_reg_info[to].val = lra_reg_info[from].val;
+ lra_reg_info[to].offset = lra_reg_info[from].offset;
+}
+
+#endif /* GCC_LRA_INT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra.h
new file mode 100644
index 0000000..85dbf92
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lra.h
@@ -0,0 +1,42 @@
+/* Communication between the Local Register Allocator (LRA) and
+ the rest of the compiler.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+ Contributed by Vladimir Makarov <vmakarov@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LRA_H
+#define GCC_LRA_H
+
+extern bool lra_simple_p;
+
+/* Return the allocno reg class of REGNO. If it is a reload pseudo,
+ the pseudo should finally get hard register of the allocno
+ class. */
+inline enum reg_class
+lra_get_allocno_class (int regno)
+{
+ resize_reg_info ();
+ return reg_allocno_class (regno);
+}
+
+extern rtx lra_eliminate_regs (rtx, machine_mode, rtx);
+extern void lra (FILE *);
+extern void lra_init_once (void);
+extern void lra_finish_once (void);
+
+#endif /* GCC_LRA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-compress.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-compress.h
new file mode 100644
index 0000000..6866117
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-compress.h
@@ -0,0 +1,43 @@
+/* LTO IL compression streams.
+
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+ Contributed by Simon Baldwin <simonb@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LTO_COMPRESS_H
+#define GCC_LTO_COMPRESS_H
+
+struct lto_compression_stream;
+
+/* In lto-compress.cc. */
+extern struct lto_compression_stream
+ *lto_start_compression (void (*callback) (const char *, unsigned, void *),
+ void *opaque);
+extern void lto_compress_block (struct lto_compression_stream *stream,
+ const char *base, size_t num_chars);
+extern void lto_end_compression (struct lto_compression_stream *stream);
+
+extern struct lto_compression_stream
+ *lto_start_uncompression (void (*callback) (const char *, unsigned, void *),
+ void *opaque);
+extern void lto_uncompress_block (struct lto_compression_stream *stream,
+ const char *base, size_t num_chars);
+extern void lto_end_uncompression (struct lto_compression_stream *stream,
+ lto_compression compression);
+
+#endif /* GCC_LTO_COMPRESS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-section-names.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-section-names.h
new file mode 100644
index 0000000..aa1b2f2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-section-names.h
@@ -0,0 +1,41 @@
+/* Definitions for LTO section names.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LTO_SECTION_NAMES_H
+#define GCC_LTO_SECTION_NAMES_H
+
+/* The string that is the prefix on the section names we make for lto.
+ For decls the DECL_ASSEMBLER_NAME is appended to make the section
+ name for the functions and static_initializers. For other types of
+ sections a '.' and the section type are appended. */
+#define LTO_SECTION_NAME_PREFIX ".gnu.lto_"
+#define OFFLOAD_SECTION_NAME_PREFIX ".gnu.offload_lto_"
+
+/* Can be either OFFLOAD_SECTION_NAME_PREFIX when we stream IR for offload
+ compiler, or LTO_SECTION_NAME_PREFIX for LTO case. */
+extern const char *section_name_prefix;
+
+/* Segment name for LTO sections. This is only used for Mach-O. */
+
+#define LTO_SEGMENT_NAME "__GNU_LTO"
+
+#define OFFLOAD_VAR_TABLE_SECTION_NAME ".gnu.offload_vars"
+#define OFFLOAD_FUNC_TABLE_SECTION_NAME ".gnu.offload_funcs"
+
+#endif /* GCC_LTO_SECTION_NAMES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-streamer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-streamer.h
new file mode 100644
index 0000000..fc7133d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/lto-streamer.h
@@ -0,0 +1,1248 @@
+/* Data structures and declarations used for reading and writing
+ GIMPLE to a file stream.
+
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+ Contributed by Doug Kwan <dougkwan@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_LTO_STREAMER_H
+#define GCC_LTO_STREAMER_H
+
+#include "plugin-api.h"
+#include "gcov-io.h"
+#include "diagnostic.h"
+#include "version.h"
+
+/* The encoding for a function consists of the following sections:
+
+ 1) The header.
+ 2) FIELD_DECLS.
+ 3) FUNCTION_DECLS.
+ 4) global VAR_DECLS.
+ 5) type_decls
+ 6) types.
+ 7) Names for the labels that have names
+ 8) The SSA names.
+ 9) The control flow graph.
+ 10-11)Gimple for local decls.
+ 12) Gimple for the function.
+ 13) Strings.
+
+ 1) THE HEADER.
+ 2-6) THE GLOBAL DECLS AND TYPES.
+
+ The global decls and types are encoded in the same way. For each
+ entry, there is word with the offset within the section to the
+ entry.
+
+ 7) THE LABEL NAMES.
+
+ Since most labels do not have names, this section my be of zero
+ length. It consists of an array of string table references, one
+ per label. In the lto code, the labels are given either
+ positive or negative indexes. the positive ones have names and
+ the negative ones do not. The positive index can be used to
+ find the name in this array.
+
+ 9) THE CFG.
+
+ 10) Index into the local decls. Since local decls can have local
+ decls inside them, they must be read in randomly in order to
+ properly restore them.
+
+ 11-12) GIMPLE FOR THE LOCAL DECLS AND THE FUNCTION BODY.
+
+ The gimple consists of a set of records.
+
+ THE FUNCTION
+
+ At the top level of (8) is the function. It consists of five
+ pieces:
+
+ LTO_function - The tag.
+ eh tree - This is all of the exception handling regions
+ put out in a post order traversial of the
+ tree. Siblings are output as lists terminated
+ by a 0. The set of fields matches the fields
+ defined in except.cc.
+
+ last_basic_block - in uleb128 form.
+
+ basic blocks - This is the set of basic blocks.
+
+ zero - The termination of the basic blocks.
+
+ BASIC BLOCKS
+
+ There are two forms of basic blocks depending on if they are
+ empty or not.
+
+ The basic block consists of:
+
+ LTO_bb1 or LTO_bb0 - The tag.
+
+ bb->index - the index in uleb128 form.
+
+ #succs - The number of successors un uleb128 form.
+
+ the successors - For each edge, a pair. The first of the
+ pair is the index of the successor in
+ uleb128 form and the second are the flags in
+ uleb128 form.
+
+ the statements - A gimple tree, as described above.
+ These are only present for LTO_BB1.
+ Following each statement is an optional
+ exception handling record LTO_eh_region
+ which contains the region number (for
+ regions >= 0).
+
+ zero - This is only present for LTO_BB1 and is used
+ to terminate the statements and exception
+ regions within this block.
+
+ 12) STRINGS
+
+ String are represented in the table as pairs, a length in ULEB128
+ form followed by the data for the string. */
+
+#define LTO_major_version GCC_major_version
+#define LTO_minor_version 0
+
+typedef unsigned char lto_decl_flags_t;
+
+/* Stream additional data to LTO object files to make it easier to debug
+ streaming code. This changes object files. */
+static const bool streamer_debugging = false;
+
+/* Tags representing the various IL objects written to the bytecode file
+ (GIMPLE statements, basic blocks, EH regions, tree nodes, etc).
+
+ NOTE, when adding new LTO tags, also update lto_tag_name. */
+enum LTO_tags
+{
+ LTO_null = 0,
+
+ /* Reference to previously-streamed node. */
+ LTO_tree_pickle_reference,
+
+ /* References to indexable tree nodes. These objects are stored in
+ tables that are written separately from the function bodies
+ and variable constructors that reference them. This way they can be
+ instantiated even when the referencing functions aren't (e.g., during WPA)
+ and it also allows functions to be copied from one file to another without
+ having to unpickle the body first (the references are location
+ independent). */
+ LTO_global_stream_ref,
+
+ LTO_ssa_name_ref,
+
+ /* Special for global streamer. A blob of unnamed tree nodes. */
+ LTO_tree_scc,
+
+ /* Sequence of trees. */
+ LTO_trees,
+
+ /* Shared INTEGER_CST node. */
+ LTO_integer_cst,
+
+ /* Tags of trees are encoded as
+ LTO_first_tree_tag + TREE_CODE. */
+ LTO_first_tree_tag,
+ /* Tags of gimple typles are encoded as
+ LTO_first_gimple_tag + gimple_code. */
+ LTO_first_gimple_tag = LTO_first_tree_tag + MAX_TREE_CODES,
+
+ /* Entry and exit basic blocks. */
+ LTO_bb0 = LTO_first_gimple_tag + LAST_AND_UNUSED_GIMPLE_CODE,
+ LTO_bb1,
+
+ /* EH region holding the previous statement. */
+ LTO_eh_region,
+
+ /* Function body. */
+ LTO_function,
+
+ /* EH table. */
+ LTO_eh_table,
+
+ /* EH region types. These mirror enum eh_region_type. */
+ LTO_ert_cleanup,
+ LTO_ert_try,
+ LTO_ert_allowed_exceptions,
+ LTO_ert_must_not_throw,
+
+ /* EH landing pad. */
+ LTO_eh_landing_pad,
+
+ /* EH try/catch node. */
+ LTO_eh_catch,
+
+ /* This tag must always be last. */
+ LTO_NUM_TAGS
+};
+
+
+/* Set of section types that are in an LTO file. This list will grow
+ as the number of IPA passes grows since each IPA pass will need its
+ own section type to store its summary information.
+
+ When adding a new section type, you must also extend the
+ LTO_SECTION_NAME array in lto-section-in.cc. */
+enum lto_section_type
+{
+ LTO_section_decls = 0,
+ LTO_section_function_body,
+ LTO_section_static_initializer,
+ LTO_section_symtab,
+ LTO_section_symtab_extension,
+ LTO_section_refs,
+ LTO_section_asm,
+ LTO_section_jump_functions,
+ LTO_section_ipa_pure_const,
+ LTO_section_ipa_reference,
+ LTO_section_ipa_profile,
+ LTO_section_symtab_nodes,
+ LTO_section_opts,
+ LTO_section_cgraph_opt_sum,
+ LTO_section_ipa_fn_summary,
+ LTO_section_ipcp_transform,
+ LTO_section_ipa_icf,
+ LTO_section_offload_table,
+ LTO_section_mode_table,
+ LTO_section_lto,
+ LTO_section_ipa_sra,
+ LTO_section_odr_types,
+ LTO_section_ipa_modref,
+ LTO_N_SECTION_TYPES /* Must be last. */
+};
+
+/* Indices to the various function, type and symbol streams. */
+enum lto_decl_stream_e_t
+{
+ LTO_DECL_STREAM = 0, /* Must be first. */
+ LTO_N_DECL_STREAMS
+};
+
+typedef enum ld_plugin_symbol_resolution ld_plugin_symbol_resolution_t;
+
+/* Return a char pointer to the start of a data stream for an lto pass
+ or function. The first parameter is the file data that contains
+ the information. The second parameter is the type of information
+ to be obtained. The third parameter is the name of the function
+ and is only used when finding a function body; otherwise it is
+ NULL. The fourth parameter is the length of the data returned. */
+typedef const char* (lto_get_section_data_f) (struct lto_file_decl_data *,
+ enum lto_section_type,
+ const char *,
+ int,
+ size_t *);
+
+/* Return the data found from the above call. The first three
+ parameters are the same as above. The fourth parameter is the data
+ itself and the fifth is the length of the data. */
+typedef void (lto_free_section_data_f) (struct lto_file_decl_data *,
+ enum lto_section_type,
+ const char *,
+ const char *,
+ size_t);
+
+/* The location cache holds expanded locations for streamed in trees.
+ This is done to reduce memory usage of libcpp linemap that strongly prefers
+ locations to be inserted in the source order. */
+
+class lto_location_cache
+{
+public:
+ /* Apply all changes in location cache. Add locations into linemap and patch
+ trees. */
+ bool apply_location_cache ();
+ /* Tree merging did not suceed; mark all changes in the cache as accepted. */
+ void accept_location_cache ();
+ /* Tree merging did suceed; throw away recent changes. */
+ void revert_location_cache ();
+ void input_location (location_t *loc, struct bitpack_d *bp,
+ class data_in *data_in);
+ void input_location_and_block (location_t *loc, struct bitpack_d *bp,
+ class lto_input_block *ib,
+ class data_in *data_in);
+ lto_location_cache ()
+ : loc_cache (), accepted_length (0), current_file (NULL), current_line (0),
+ current_col (0), current_sysp (false), current_loc (UNKNOWN_LOCATION),
+ current_block (NULL_TREE)
+ {
+ gcc_assert (!current_cache);
+ current_cache = this;
+ }
+ ~lto_location_cache ()
+ {
+ apply_location_cache ();
+ gcc_assert (current_cache == this);
+ current_cache = NULL;
+ }
+
+ /* There can be at most one instance of location cache (combining multiple
+ would bring it out of sync with libcpp linemap); point to current
+ one. */
+ static lto_location_cache *current_cache;
+
+private:
+ static int cmp_loc (const void *pa, const void *pb);
+
+ struct cached_location
+ {
+ const char *file;
+ location_t *loc;
+ int line, col;
+ bool sysp;
+ tree block;
+ unsigned discr;
+ };
+
+ /* The location cache. */
+
+ auto_vec<cached_location> loc_cache;
+
+ /* Accepted entries are ones used by trees that are known to be not unified
+ by tree merging. */
+
+ int accepted_length;
+
+ /* Bookkeeping to remember state in between calls to lto_apply_location_cache
+ When streaming gimple, the location cache is not used and thus
+ lto_apply_location_cache happens per location basis. It is then
+ useful to avoid redundant calls of linemap API. */
+
+ const char *current_file;
+ int current_line;
+ int current_col;
+ bool current_sysp;
+ location_t current_loc;
+ tree current_block;
+ unsigned current_discr;
+};
+
+/* Structure used as buffer for reading an LTO file. */
+class lto_input_block
+{
+public:
+ /* Special constructor for the string table, it abuses this to
+ do random access but use the uhwi decoder. */
+ lto_input_block (const char *data_, unsigned int p_, unsigned int len_,
+ const unsigned char *mode_table_)
+ : data (data_), mode_table (mode_table_), p (p_), len (len_) {}
+ lto_input_block (const char *data_, unsigned int len_,
+ const unsigned char *mode_table_)
+ : data (data_), mode_table (mode_table_), p (0), len (len_) {}
+
+ const char *data;
+ const unsigned char *mode_table;
+ unsigned int p;
+ unsigned int len;
+};
+
+/* Compression algorithm used for compression of LTO bytecode. */
+
+enum lto_compression
+{
+ ZLIB,
+ ZSTD
+};
+
+/* Structure that represents LTO ELF section with information
+ about the format. */
+
+struct lto_section
+{
+ int16_t major_version;
+ int16_t minor_version;
+ unsigned char slim_object;
+ unsigned char _padding;
+
+ /* Flags is a private field that is not defined publicly. */
+ uint16_t flags;
+
+ /* Set compression to FLAGS. */
+ inline void set_compression (lto_compression c)
+ {
+ flags = c;
+ }
+
+ /* Get compression from FLAGS. */
+ inline lto_compression get_compression ()
+ {
+ return (lto_compression) flags;
+ }
+};
+
+STATIC_ASSERT (sizeof (lto_section) == 8);
+
+/* The is the first part of the record in an LTO file for many of the
+ IPA passes. */
+struct lto_simple_header
+{
+ /* Size of main gimple body of function. */
+ int32_t main_size;
+};
+
+struct lto_simple_header_with_strings : lto_simple_header
+{
+ /* Size of the string table. */
+ int32_t string_size;
+};
+
+/* The header for a function body. */
+struct lto_function_header : lto_simple_header_with_strings
+{
+ /* Size of the cfg. */
+ int32_t cfg_size;
+};
+
+
+/* Structure describing a symbol section. */
+struct lto_decl_header : lto_simple_header_with_strings
+{
+ /* Size of region for decl state. */
+ int32_t decl_state_size;
+
+ /* Number of nodes in globals stream. */
+ int32_t num_nodes;
+};
+
+
+/* Statistics gathered during LTO, WPA and LTRANS. */
+struct lto_stats_d
+{
+ unsigned HOST_WIDE_INT num_input_cgraph_nodes;
+ unsigned HOST_WIDE_INT num_output_symtab_nodes;
+ unsigned HOST_WIDE_INT num_input_files;
+ unsigned HOST_WIDE_INT num_output_files;
+ unsigned HOST_WIDE_INT num_cgraph_partitions;
+ unsigned HOST_WIDE_INT section_size[LTO_N_SECTION_TYPES];
+ unsigned HOST_WIDE_INT num_function_bodies;
+ unsigned HOST_WIDE_INT num_trees[NUM_TREE_CODES];
+ unsigned HOST_WIDE_INT num_output_il_bytes;
+ unsigned HOST_WIDE_INT num_compressed_il_bytes;
+ unsigned HOST_WIDE_INT num_input_il_bytes;
+ unsigned HOST_WIDE_INT num_uncompressed_il_bytes;
+ unsigned HOST_WIDE_INT num_tree_bodies_output;
+ unsigned HOST_WIDE_INT num_pickle_refs_output;
+};
+
+/* Entry of LTO symtab encoder. */
+struct lto_encoder_entry
+{
+ symtab_node *node;
+ /* Is the node in this partition (i.e. ltrans of this partition will
+ be responsible for outputting it)? */
+ unsigned int in_partition:1;
+ /* Do we encode body in this partition? */
+ unsigned int body:1;
+ /* Do we encode initializer in this partition?
+ For example the readonly variable initializers are encoded to aid
+ constant folding even if they are not in the partition. */
+ unsigned int initializer:1;
+};
+
+
+/* Encoder data structure used to stream callgraph nodes. */
+struct lto_symtab_encoder_d
+{
+ vec<lto_encoder_entry> nodes;
+ hash_map<symtab_node *, size_t> *map;
+};
+
+typedef struct lto_symtab_encoder_d *lto_symtab_encoder_t;
+
+/* Iterator structure for cgraph node sets. */
+struct lto_symtab_encoder_iterator
+{
+ lto_symtab_encoder_t encoder;
+ unsigned index;
+};
+
+
+
+/* The lto_tree_ref_encoder struct is used to encode trees into indices. */
+
+struct lto_tree_ref_encoder
+{
+ hash_map<tree, unsigned> *tree_hash_table; /* Maps pointers to indices. */
+ vec<tree> trees; /* Maps indices to pointers. */
+};
+
+
+/* Structure to hold states of input scope. */
+struct GTY((for_user)) lto_in_decl_state
+{
+ /* Array of lto_in_decl_buffers to store type and decls streams. */
+ vec<tree, va_gc> *streams[LTO_N_DECL_STREAMS];
+
+ /* If this in-decl state is associated with a function. FN_DECL
+ point to the FUNCTION_DECL. */
+ tree fn_decl;
+
+ /* True if decl state is compressed. */
+ bool compressed;
+};
+
+typedef struct lto_in_decl_state *lto_in_decl_state_ptr;
+
+struct decl_state_hasher : ggc_ptr_hash<lto_in_decl_state>
+{
+ static hashval_t
+ hash (lto_in_decl_state *s)
+ {
+ return htab_hash_pointer (s->fn_decl);
+ }
+
+ static bool
+ equal (lto_in_decl_state *a, lto_in_decl_state *b)
+ {
+ return a->fn_decl == b->fn_decl;
+ }
+};
+
+/* The structure that holds all of the vectors of global types,
+ decls and cgraph nodes used in the serialization of this file. */
+struct lto_out_decl_state
+{
+ /* The buffers contain the sets of decls of various kinds and types we have
+ seen so far and the indexes assigned to them. */
+ struct lto_tree_ref_encoder streams[LTO_N_DECL_STREAMS];
+
+ /* Encoder for cgraph nodes. */
+ lto_symtab_encoder_t symtab_node_encoder;
+
+ /* If this out-decl state belongs to a function, fn_decl points to that
+ function. Otherwise, it is NULL. */
+ tree fn_decl;
+
+ /* True if decl state is compressed. */
+ bool compressed;
+};
+
+typedef struct lto_out_decl_state *lto_out_decl_state_ptr;
+
+
+/* Compact representation of a index <-> resolution pair. Unpacked to an
+ vector later. */
+struct res_pair
+{
+ ld_plugin_symbol_resolution_t res;
+ unsigned index;
+};
+
+
+/* One of these is allocated for each object file that being compiled
+ by lto. This structure contains the tables that are needed by the
+ serialized functions and ipa passes to connect themselves to the
+ global types and decls as they are reconstituted. */
+struct GTY(()) lto_file_decl_data
+{
+ /* Decl state currently used. */
+ struct lto_in_decl_state *current_decl_state;
+
+ /* Decl state corresponding to regions outside of any functions
+ in the compilation unit. */
+ struct lto_in_decl_state *global_decl_state;
+
+ /* Table of cgraph nodes present in this file. */
+ lto_symtab_encoder_t GTY((skip)) symtab_node_encoder;
+
+ /* Hash table maps lto-related section names to location in file. */
+ hash_table<decl_state_hasher> *function_decl_states;
+
+ /* The .o file that these offsets relate to. */
+ const char *GTY((skip)) file_name;
+
+ /* Hash table maps lto-related section names to location in file. */
+ htab_t GTY((skip)) section_hash_table;
+
+ /* Hash new name of renamed global declaration to its original name. */
+ htab_t GTY((skip)) renaming_hash_table;
+
+ /* Linked list used temporarily in reader */
+ struct lto_file_decl_data *next;
+
+ /* Order in which the file appears on the command line. */
+ int order;
+
+ /* Sub ID for merged objects. */
+ unsigned HOST_WIDE_INT id;
+
+ /* Symbol resolutions for this file */
+ vec<res_pair> GTY((skip)) respairs;
+ unsigned max_index;
+
+ gcov_summary GTY((skip)) profile_info;
+
+ /* Map assigning declarations their resolutions. */
+ hash_map<tree, ld_plugin_symbol_resolution> * GTY((skip)) resolution_map;
+
+ /* Mode translation table. */
+ const unsigned char *mode_table;
+
+ /* Read LTO section. */
+ lto_section lto_section_header;
+
+ int order_base;
+
+ int unit_base;
+};
+
+typedef struct lto_file_decl_data *lto_file_decl_data_ptr;
+
+struct lto_char_ptr_base
+{
+ char *ptr;
+};
+
+/* An incore byte stream to buffer the various parts of the function.
+ The entire structure should be zeroed when created. The record
+ consists of a set of blocks. The first sizeof (ptr) bytes are used
+ as a chain, and the rest store the bytes to be written. */
+struct lto_output_stream
+{
+ /* The pointer to the first block in the stream. */
+ struct lto_char_ptr_base * first_block;
+
+ /* The pointer to the last and current block in the stream. */
+ struct lto_char_ptr_base * current_block;
+
+ /* The pointer to where the next char should be written. */
+ char * current_pointer;
+
+ /* The number of characters left in the current block. */
+ unsigned int left_in_block;
+
+ /* The block size of the last block allocated. */
+ unsigned int block_size;
+
+ /* The total number of characters written. */
+ unsigned int total_size;
+};
+
+/* A simple output block. This can be used for simple IPA passes that
+ do not need more than one stream. */
+struct lto_simple_output_block
+{
+ enum lto_section_type section_type;
+ struct lto_out_decl_state *decl_state;
+
+ /* The stream that the main tree codes are written to. */
+ struct lto_output_stream *main_stream;
+};
+
+/* String hashing. */
+
+struct string_slot
+{
+ const char *s;
+ int len;
+ unsigned int slot_num;
+};
+
+/* Hashtable helpers. */
+
+struct string_slot_hasher : nofree_ptr_hash <string_slot>
+{
+ static inline hashval_t hash (const string_slot *);
+ static inline bool equal (const string_slot *, const string_slot *);
+};
+
+/* Returns a hash code for DS. Adapted from libiberty's htab_hash_string
+ to support strings that may not end in '\0'. */
+
+inline hashval_t
+string_slot_hasher::hash (const string_slot *ds)
+{
+ hashval_t r = ds->len;
+ int i;
+
+ for (i = 0; i < ds->len; i++)
+ r = r * 67 + (unsigned)ds->s[i] - 113;
+ return r;
+}
+
+/* Returns nonzero if DS1 and DS2 are equal. */
+
+inline bool
+string_slot_hasher::equal (const string_slot *ds1, const string_slot *ds2)
+{
+ if (ds1->len == ds2->len)
+ return memcmp (ds1->s, ds2->s, ds1->len) == 0;
+
+ return 0;
+}
+
+/* Data structure holding all the data and descriptors used when writing
+ an LTO file. */
+struct output_block
+{
+ enum lto_section_type section_type;
+ struct lto_out_decl_state *decl_state;
+
+ /* The stream that the main tree codes are written to. */
+ struct lto_output_stream *main_stream;
+
+ /* The stream that contains the string table. */
+ struct lto_output_stream *string_stream;
+
+ /* The stream that contains the cfg. */
+ struct lto_output_stream *cfg_stream;
+
+ /* The hash table that contains the set of strings we have seen so
+ far and the indexes assigned to them. */
+ hash_table<string_slot_hasher> *string_hash_table;
+
+ /* The current symbol that we are currently serializing. Null
+ if we are serializing something else. */
+ symtab_node *symbol;
+
+ /* These are the last file and line that were seen in the stream.
+ If the current node differs from these, it needs to insert
+ something into the stream and fix these up. */
+ const char *current_file;
+ int current_line;
+ int current_col;
+ bool current_sysp;
+ bool reset_locus;
+ bool emit_pwd;
+ tree current_block;
+ unsigned current_discr;
+
+ /* Cache of nodes written in this section. */
+ struct streamer_tree_cache_d *writer_cache;
+
+ /* All trees identified as local to the unit streamed. */
+ hash_set<tree> *local_trees;
+
+ /* All data persistent across whole duration of output block
+ can go here. */
+ struct obstack obstack;
+};
+
+
+/* Data and descriptors used when reading from an LTO file. */
+class data_in
+{
+public:
+ /* The global decls and types. */
+ struct lto_file_decl_data *file_data;
+
+ /* The string table. */
+ const char *strings;
+
+ /* The length of the string table. */
+ unsigned int strings_len;
+
+ /* Maps each reference number to the resolution done by the linker. */
+ vec<ld_plugin_symbol_resolution_t> globals_resolution;
+
+ /* Cache of pickled nodes. */
+ struct streamer_tree_cache_d *reader_cache;
+
+ /* Cache of source code location. */
+ lto_location_cache location_cache;
+};
+
+
+/* In lto-section-in.cc */
+extern class lto_input_block * lto_create_simple_input_block (
+ struct lto_file_decl_data *,
+ enum lto_section_type, const char **, size_t *);
+extern void
+lto_destroy_simple_input_block (struct lto_file_decl_data *,
+ enum lto_section_type,
+ class lto_input_block *, const char *, size_t);
+extern void lto_set_in_hooks (struct lto_file_decl_data **,
+ lto_get_section_data_f *,
+ lto_free_section_data_f *);
+extern struct lto_file_decl_data **lto_get_file_decl_data (void);
+extern const char *lto_get_section_data (struct lto_file_decl_data *,
+ enum lto_section_type,
+ const char *, int, size_t *,
+ bool decompress = false);
+extern const char *lto_get_summary_section_data (struct lto_file_decl_data *,
+ enum lto_section_type,
+ size_t *);
+extern const char *lto_get_raw_section_data (struct lto_file_decl_data *,
+ enum lto_section_type,
+ const char *, int, size_t *);
+extern void lto_free_section_data (struct lto_file_decl_data *,
+ enum lto_section_type,
+ const char *, const char *, size_t,
+ bool decompress = false);
+extern void lto_free_raw_section_data (struct lto_file_decl_data *,
+ enum lto_section_type,
+ const char *, const char *, size_t);
+extern htab_t lto_create_renaming_table (void);
+extern void lto_record_renamed_decl (struct lto_file_decl_data *,
+ const char *, const char *);
+extern const char *lto_get_decl_name_mapping (struct lto_file_decl_data *,
+ const char *);
+extern struct lto_in_decl_state *lto_new_in_decl_state (void);
+extern void lto_delete_in_decl_state (struct lto_in_decl_state *);
+extern struct lto_in_decl_state *lto_get_function_in_decl_state (
+ struct lto_file_decl_data *, tree);
+extern void lto_free_function_in_decl_state (struct lto_in_decl_state *);
+extern void lto_free_function_in_decl_state_for_node (symtab_node *);
+extern void lto_section_overrun (class lto_input_block *) ATTRIBUTE_NORETURN;
+extern void lto_value_range_error (const char *,
+ HOST_WIDE_INT, HOST_WIDE_INT,
+ HOST_WIDE_INT) ATTRIBUTE_NORETURN;
+
+/* In lto-section-out.cc */
+extern void lto_begin_section (const char *, bool);
+extern void lto_end_section (void);
+extern void lto_write_data (const void *, unsigned int);
+extern void lto_write_raw_data (const void *, unsigned int);
+extern void lto_write_stream (struct lto_output_stream *);
+extern struct lto_simple_output_block *lto_create_simple_output_block (
+ enum lto_section_type);
+extern void lto_destroy_simple_output_block (struct lto_simple_output_block *);
+extern struct lto_out_decl_state *lto_new_out_decl_state (void);
+extern void lto_delete_out_decl_state (struct lto_out_decl_state *);
+extern struct lto_out_decl_state *lto_get_out_decl_state (void);
+extern void lto_push_out_decl_state (struct lto_out_decl_state *);
+extern struct lto_out_decl_state *lto_pop_out_decl_state (void);
+extern void lto_record_function_out_decl_state (tree,
+ struct lto_out_decl_state *);
+extern void lto_append_block (struct lto_output_stream *);
+
+
+/* In lto-streamer.cc. */
+
+/* Set when streaming LTO for offloading compiler. */
+extern bool lto_stream_offload_p;
+
+extern const char *lto_tag_name (enum LTO_tags);
+extern char *lto_get_section_name (int, const char *, int,
+ struct lto_file_decl_data *);
+extern void print_lto_report (const char *);
+extern void lto_streamer_init (void);
+extern bool gate_lto_out (void);
+extern void lto_check_version (int, int, const char *);
+extern void lto_streamer_hooks_init (void);
+
+/* In lto-streamer-in.cc */
+extern void lto_input_cgraph (struct lto_file_decl_data *, const char *);
+extern void lto_reader_init (void);
+extern void lto_free_file_name_hash (void);
+extern void lto_input_function_body (struct lto_file_decl_data *,
+ struct cgraph_node *,
+ const char *);
+extern void lto_input_variable_constructor (struct lto_file_decl_data *,
+ struct varpool_node *,
+ const char *);
+extern void lto_input_constructors_and_inits (struct lto_file_decl_data *,
+ const char *);
+extern void lto_input_toplevel_asms (struct lto_file_decl_data *, int);
+extern void lto_input_mode_table (struct lto_file_decl_data *);
+extern class data_in *lto_data_in_create (struct lto_file_decl_data *,
+ const char *, unsigned,
+ vec<ld_plugin_symbol_resolution_t> );
+extern void lto_data_in_delete (class data_in *);
+extern void lto_input_data_block (class lto_input_block *, void *, size_t);
+void lto_input_location (location_t *, struct bitpack_d *, class data_in *);
+tree lto_input_tree_ref (class lto_input_block *, class data_in *,
+ struct function *, enum LTO_tags);
+void lto_tag_check_set (enum LTO_tags, int, ...);
+void lto_init_eh (void);
+hashval_t lto_input_scc (class lto_input_block *, class data_in *,
+ unsigned *, unsigned *, bool);
+tree lto_input_tree_1 (class lto_input_block *, class data_in *,
+ enum LTO_tags, hashval_t hash);
+tree lto_input_tree (class lto_input_block *, class data_in *);
+tree stream_read_tree_ref (class lto_input_block *, class data_in *);
+
+
+/* In lto-streamer-out.cc */
+extern void lto_register_decl_definition (tree, struct lto_file_decl_data *);
+extern struct output_block *create_output_block (enum lto_section_type);
+extern void destroy_output_block (struct output_block *);
+extern void lto_output_tree (struct output_block *, tree, bool, bool);
+extern void stream_write_tree_ref (struct output_block *, tree);
+extern void lto_output_var_decl_ref (struct lto_out_decl_state *,
+ struct lto_output_stream *, tree);
+extern void lto_output_fn_decl_ref (struct lto_out_decl_state *,
+ struct lto_output_stream *, tree);
+extern tree lto_input_var_decl_ref (lto_input_block *, lto_file_decl_data *);
+extern tree lto_input_fn_decl_ref (lto_input_block *, lto_file_decl_data *);
+extern void lto_output_toplevel_asms (void);
+extern void produce_asm (struct output_block *ob, tree fn);
+extern void lto_output ();
+extern void produce_asm_for_decls ();
+void lto_output_decl_state_streams (struct output_block *,
+ struct lto_out_decl_state *);
+void lto_output_decl_state_refs (struct output_block *,
+ struct lto_output_stream *,
+ struct lto_out_decl_state *);
+void lto_output_location (struct output_block *, struct bitpack_d *,
+ location_t);
+void lto_output_location_and_block (struct output_block *, struct bitpack_d *,
+ location_t);
+void lto_output_init_mode_table (void);
+void lto_prepare_function_for_streaming (cgraph_node *);
+
+
+/* In lto-cgraph.cc */
+extern bool asm_nodes_output;
+lto_symtab_encoder_t lto_symtab_encoder_new (bool);
+int lto_symtab_encoder_encode (lto_symtab_encoder_t, symtab_node *);
+void lto_symtab_encoder_delete (lto_symtab_encoder_t);
+bool lto_symtab_encoder_delete_node (lto_symtab_encoder_t, symtab_node *);
+bool lto_symtab_encoder_encode_body_p (lto_symtab_encoder_t,
+ struct cgraph_node *);
+bool lto_symtab_encoder_in_partition_p (lto_symtab_encoder_t,
+ symtab_node *);
+void lto_set_symtab_encoder_in_partition (lto_symtab_encoder_t,
+ symtab_node *);
+
+bool lto_symtab_encoder_encode_initializer_p (lto_symtab_encoder_t,
+ varpool_node *);
+void output_symtab (void);
+void input_symtab (void);
+void output_offload_tables (void);
+void input_offload_tables (bool);
+bool referenced_from_other_partition_p (struct ipa_ref_list *,
+ lto_symtab_encoder_t);
+bool reachable_from_other_partition_p (struct cgraph_node *,
+ lto_symtab_encoder_t);
+bool referenced_from_this_partition_p (symtab_node *,
+ lto_symtab_encoder_t);
+bool reachable_from_this_partition_p (struct cgraph_node *,
+ lto_symtab_encoder_t);
+lto_symtab_encoder_t compute_ltrans_boundary (lto_symtab_encoder_t encoder);
+void select_what_to_stream (void);
+
+/* In omp-general.cc. */
+void omp_lto_output_declare_variant_alt (lto_simple_output_block *,
+ cgraph_node *, lto_symtab_encoder_t);
+void omp_lto_input_declare_variant_alt (lto_input_block *, cgraph_node *,
+ vec<symtab_node *>);
+
+/* In options-save.cc. */
+void cl_target_option_stream_out (struct output_block *, struct bitpack_d *,
+ struct cl_target_option *);
+
+void cl_target_option_stream_in (class data_in *,
+ struct bitpack_d *,
+ struct cl_target_option *);
+
+void cl_optimization_stream_out (struct output_block *,
+ struct bitpack_d *, struct cl_optimization *);
+
+void cl_optimization_stream_in (class data_in *,
+ struct bitpack_d *, struct cl_optimization *);
+
+
+
+/* In lto-opts.cc. */
+extern void lto_write_options (void);
+
+
+/* Statistics gathered during LTO, WPA and LTRANS. */
+extern struct lto_stats_d lto_stats;
+
+/* Section names corresponding to the values of enum lto_section_type. */
+extern const char *lto_section_name[];
+
+/* Holds all the out decl states of functions output so far in the
+ current output file. */
+extern vec<lto_out_decl_state_ptr> lto_function_decl_states;
+
+/* Return true if LTO tag TAG corresponds to a tree code. */
+inline bool
+lto_tag_is_tree_code_p (enum LTO_tags tag)
+{
+ return tag > LTO_first_tree_tag && (unsigned) tag <= MAX_TREE_CODES;
+}
+
+
+/* Return true if LTO tag TAG corresponds to a gimple code. */
+inline bool
+lto_tag_is_gimple_code_p (enum LTO_tags tag)
+{
+ return (unsigned) tag >= LTO_first_gimple_tag
+ && (unsigned) tag
+ < LTO_first_gimple_tag + LAST_AND_UNUSED_GIMPLE_CODE;
+}
+
+
+/* Return the LTO tag corresponding to gimple code CODE. See enum
+ LTO_tags for details on the conversion. */
+inline enum LTO_tags
+lto_gimple_code_to_tag (enum gimple_code code)
+{
+ return (enum LTO_tags) ((unsigned) code + LTO_first_gimple_tag);
+}
+
+
+/* Return the GIMPLE code corresponding to TAG. See enum LTO_tags for
+ details on the conversion. */
+inline enum gimple_code
+lto_tag_to_gimple_code (enum LTO_tags tag)
+{
+ gcc_assert (lto_tag_is_gimple_code_p (tag));
+ return (enum gimple_code) ((unsigned) tag - LTO_first_gimple_tag);
+}
+
+
+/* Return the LTO tag corresponding to tree code CODE. See enum
+ LTO_tags for details on the conversion. */
+inline enum LTO_tags
+lto_tree_code_to_tag (enum tree_code code)
+{
+ return (enum LTO_tags) ((unsigned) code + LTO_first_tree_tag);
+}
+
+
+/* Return the tree code corresponding to TAG. See enum LTO_tags for
+ details on the conversion. */
+inline enum tree_code
+lto_tag_to_tree_code (enum LTO_tags tag)
+{
+ gcc_assert (lto_tag_is_tree_code_p (tag));
+ return (enum tree_code) ((unsigned) tag - LTO_first_tree_tag);
+}
+
+/* Check that tag ACTUAL == EXPECTED. */
+inline void
+lto_tag_check (enum LTO_tags actual, enum LTO_tags expected)
+{
+ if (actual != expected)
+ internal_error ("bytecode stream: expected tag %s instead of %s",
+ lto_tag_name (expected), lto_tag_name (actual));
+}
+
+/* Check that tag ACTUAL is in the range [TAG1, TAG2]. */
+inline void
+lto_tag_check_range (enum LTO_tags actual, enum LTO_tags tag1,
+ enum LTO_tags tag2)
+{
+ if (actual < tag1 || actual > tag2)
+ internal_error ("bytecode stream: tag %s is not in the expected range "
+ "[%s, %s]",
+ lto_tag_name (actual),
+ lto_tag_name (tag1),
+ lto_tag_name (tag2));
+}
+
+/* Initialize an lto_out_decl_buffer ENCODER. */
+inline void
+lto_init_tree_ref_encoder (struct lto_tree_ref_encoder *encoder)
+{
+ encoder->tree_hash_table = new hash_map<tree, unsigned> (251);
+ encoder->trees.create (0);
+}
+
+
+/* Destroy an lto_tree_ref_encoder ENCODER by freeing its contents. The
+ memory used by ENCODER is not freed by this function. */
+inline void
+lto_destroy_tree_ref_encoder (struct lto_tree_ref_encoder *encoder)
+{
+ /* Hash table may be delete already. */
+ delete encoder->tree_hash_table;
+ encoder->tree_hash_table = NULL;
+ encoder->trees.release ();
+}
+
+/* Return the number of trees encoded in ENCODER. */
+inline unsigned int
+lto_tree_ref_encoder_size (struct lto_tree_ref_encoder *encoder)
+{
+ return encoder->trees.length ();
+}
+
+/* Return the IDX-th tree in ENCODER. */
+inline tree
+lto_tree_ref_encoder_get_tree (struct lto_tree_ref_encoder *encoder,
+ unsigned int idx)
+{
+ return encoder->trees[idx];
+}
+
+/* Return number of encoded nodes in ENCODER. */
+inline int
+lto_symtab_encoder_size (lto_symtab_encoder_t encoder)
+{
+ return encoder->nodes.length ();
+}
+
+/* Value used to represent failure of lto_symtab_encoder_lookup. */
+#define LCC_NOT_FOUND (-1)
+
+/* Look up NODE in encoder. Return NODE's reference if it has been encoded
+ or LCC_NOT_FOUND if it is not there. */
+
+inline int
+lto_symtab_encoder_lookup (lto_symtab_encoder_t encoder,
+ symtab_node *node)
+{
+ size_t *slot = encoder->map->get (node);
+ return (slot && *slot ? *(slot) - 1 : LCC_NOT_FOUND);
+}
+
+/* Return true if iterator LSE points to nothing. */
+inline bool
+lsei_end_p (lto_symtab_encoder_iterator lsei)
+{
+ return lsei.index >= (unsigned)lto_symtab_encoder_size (lsei.encoder);
+}
+
+/* Advance iterator LSE. */
+inline void
+lsei_next (lto_symtab_encoder_iterator *lsei)
+{
+ lsei->index++;
+}
+
+/* Return the node pointed to by LSI. */
+inline symtab_node *
+lsei_node (lto_symtab_encoder_iterator lsei)
+{
+ return lsei.encoder->nodes[lsei.index].node;
+}
+
+/* Return the node pointed to by LSI. */
+inline struct cgraph_node *
+lsei_cgraph_node (lto_symtab_encoder_iterator lsei)
+{
+ return dyn_cast<cgraph_node *> (lsei.encoder->nodes[lsei.index].node);
+}
+
+/* Return the node pointed to by LSI. */
+inline varpool_node *
+lsei_varpool_node (lto_symtab_encoder_iterator lsei)
+{
+ return dyn_cast<varpool_node *> (lsei.encoder->nodes[lsei.index].node);
+}
+
+/* Return the cgraph node corresponding to REF using ENCODER. */
+
+inline symtab_node *
+lto_symtab_encoder_deref (lto_symtab_encoder_t encoder, int ref)
+{
+ if (ref == LCC_NOT_FOUND)
+ return NULL;
+
+ return encoder->nodes[ref].node;
+}
+
+/* Return an iterator to the first node in LSI. */
+inline lto_symtab_encoder_iterator
+lsei_start (lto_symtab_encoder_t encoder)
+{
+ lto_symtab_encoder_iterator lsei;
+
+ lsei.encoder = encoder;
+ lsei.index = 0;
+ return lsei;
+}
+
+/* Advance iterator LSE. */
+inline void
+lsei_next_in_partition (lto_symtab_encoder_iterator *lsei)
+{
+ lsei_next (lsei);
+ while (!lsei_end_p (*lsei)
+ && !lto_symtab_encoder_in_partition_p (lsei->encoder, lsei_node (*lsei)))
+ lsei_next (lsei);
+}
+
+/* Return an iterator to the first node in LSI. */
+inline lto_symtab_encoder_iterator
+lsei_start_in_partition (lto_symtab_encoder_t encoder)
+{
+ lto_symtab_encoder_iterator lsei = lsei_start (encoder);
+
+ if (lsei_end_p (lsei))
+ return lsei;
+ if (!lto_symtab_encoder_in_partition_p (encoder, lsei_node (lsei)))
+ lsei_next_in_partition (&lsei);
+
+ return lsei;
+}
+
+/* Advance iterator LSE. */
+inline void
+lsei_next_function_in_partition (lto_symtab_encoder_iterator *lsei)
+{
+ lsei_next (lsei);
+ while (!lsei_end_p (*lsei)
+ && (!is_a <cgraph_node *> (lsei_node (*lsei))
+ || !lto_symtab_encoder_in_partition_p (lsei->encoder, lsei_node (*lsei))))
+ lsei_next (lsei);
+}
+
+/* Return an iterator to the first node in LSI. */
+inline lto_symtab_encoder_iterator
+lsei_start_function_in_partition (lto_symtab_encoder_t encoder)
+{
+ lto_symtab_encoder_iterator lsei = lsei_start (encoder);
+
+ if (lsei_end_p (lsei))
+ return lsei;
+ if (!is_a <cgraph_node *> (lsei_node (lsei))
+ || !lto_symtab_encoder_in_partition_p (encoder, lsei_node (lsei)))
+ lsei_next_function_in_partition (&lsei);
+
+ return lsei;
+}
+
+/* Advance iterator LSE. */
+inline void
+lsei_next_variable_in_partition (lto_symtab_encoder_iterator *lsei)
+{
+ lsei_next (lsei);
+ while (!lsei_end_p (*lsei)
+ && (!is_a <varpool_node *> (lsei_node (*lsei))
+ || !lto_symtab_encoder_in_partition_p (lsei->encoder, lsei_node (*lsei))))
+ lsei_next (lsei);
+}
+
+/* Return an iterator to the first node in LSI. */
+inline lto_symtab_encoder_iterator
+lsei_start_variable_in_partition (lto_symtab_encoder_t encoder)
+{
+ lto_symtab_encoder_iterator lsei = lsei_start (encoder);
+
+ if (lsei_end_p (lsei))
+ return lsei;
+ if (!is_a <varpool_node *> (lsei_node (lsei))
+ || !lto_symtab_encoder_in_partition_p (encoder, lsei_node (lsei)))
+ lsei_next_variable_in_partition (&lsei);
+
+ return lsei;
+}
+
+/* Entry for the delayed registering of decl -> DIE references. */
+struct dref_entry {
+ tree decl;
+ const char *sym;
+ unsigned HOST_WIDE_INT off;
+};
+
+extern vec<dref_entry> dref_queue;
+
+extern FILE *streamer_dump_file;
+
+#endif /* GCC_LTO_STREAMER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/m2/m2-tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/m2/m2-tree.def
new file mode 100644
index 0000000..be7c5b7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/m2/m2-tree.def
@@ -0,0 +1,24 @@
+/* gm2-tree.def a component of a C header file used to define a SET type.
+
+Copyright (C) 2006-2023 Free Software Foundation, Inc.
+Contributed by Gaius Mulley <gaius@glam.ac.uk>.
+
+This file is part of GNU Modula-2.
+
+GNU Modula-2 is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GNU Modula-2 is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU Modula-2; see the file COPYING. If not, write to the
+Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+/* A SET_TYPE type. */
+DEFTREECODE (SET_TYPE, "set_type", tcc_type, 0)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.def
new file mode 100644
index 0000000..62e2ba1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.def
@@ -0,0 +1,284 @@
+/* This file contains the definitions and documentation for the
+ machine modes used in the GNU compiler.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* This file defines all the MACHINE MODES used by GCC.
+
+ A machine mode specifies a size and format of data
+ at the machine level.
+
+ Each RTL expression has a machine mode.
+
+ At the syntax tree level, each ..._TYPE and each ..._DECL node
+ has a machine mode which describes data of that type or the
+ data of the variable declared. */
+
+/* This file is included by the genmodes program. Its text is the
+ body of a function. Do not rely on this, it will change in the
+ future.
+
+ The following statements can be used in this file -- all have
+ the form of a C macro call. In their arguments:
+
+ A CLASS argument must be one of the constants defined in
+ mode-classes.def, less the leading MODE_ prefix; some statements
+ that take CLASS arguments have restrictions on which classes are
+ acceptable. For instance, INT.
+
+ A MODE argument must be the printable name of a machine mode,
+ without quotation marks or trailing "mode". For instance, SI.
+
+ A PRECISION, BYTESIZE, or COUNT argument must be a positive integer
+ constant.
+
+ A FORMAT argument must be one of the real_mode_format structures
+ declared in real.h, or else a literal 0. Do not put a leading &
+ on the argument.
+
+ An EXPR argument must be a syntactically valid C expression.
+ If an EXPR contains commas, you may need to write an extra pair of
+ parentheses around it, so it appears to be a single argument to the
+ statement.
+
+ This file defines only those modes which are of use on almost all
+ machines. Other modes can be defined in the target-specific
+ mode definition file, config/ARCH/ARCH-modes.def.
+
+ Order matters in this file in so far as statements which refer to
+ other modes must appear after the modes they refer to. However,
+ statements which do not refer to other modes may appear in any
+ order.
+
+ RANDOM_MODE (MODE);
+ declares MODE to be of class RANDOM.
+
+ CC_MODE (MODE);
+ declares MODE to be of class CC.
+
+ INT_MODE (MODE, BYTESIZE);
+ declares MODE to be of class INT and BYTESIZE bytes wide.
+ All of the bits of its representation are significant.
+
+ FRACTIONAL_INT_MODE (MODE, PRECISION, BYTESIZE);
+ declares MODE to be of class INT, BYTESIZE bytes wide in
+ storage, but with only PRECISION significant bits.
+
+ FLOAT_MODE (MODE, BYTESIZE, FORMAT);
+ declares MODE to be of class FLOAT and BYTESIZE bytes wide,
+ using floating point format FORMAT.
+ All of the bits of its representation are significant.
+
+ FRACTIONAL_FLOAT_MODE (MODE, PRECISION, BYTESIZE, FORMAT);
+ declares MODE to be of class FLOAT, BYTESIZE bytes wide in
+ storage, but with only PRECISION significant bits, using
+ floating point format FORMAT.
+
+ DECIMAL_FLOAT_MODE (MODE, BYTESIZE, FORMAT);
+ declares MODE to be of class DECIMAL_FLOAT and BYTESIZE bytes
+ wide. All of the bits of its representation are significant.
+
+ FRACTIONAL_DECIMAL_FLOAT_MODE (MODE, BYTESIZE, FORMAT);
+ declares MODE to be of class DECIMAL_FLOAT and BYTESIZE bytes
+ wide. All of the bits of its representation are significant.
+
+ FRACT_MODE (MODE, BYTESIZE, FBIT);
+ declares MODE to be of class FRACT and BYTESIZE bytes wide
+ with FBIT fractional bits. There may be padding bits.
+
+ UFRACT_MODE (MODE, BYTESIZE, FBIT);
+ declares MODE to be of class UFRACT and BYTESIZE bytes wide
+ with FBIT fractional bits. There may be padding bits.
+
+ ACCUM_MODE (MODE, BYTESIZE, IBIT, FBIT);
+ declares MODE to be of class ACCUM and BYTESIZE bytes wide
+ with IBIT integral bits and FBIT fractional bits.
+ There may be padding bits.
+
+ UACCUM_MODE (MODE, BYTESIZE, IBIT, FBIT);
+ declares MODE to be of class UACCUM and BYTESIZE bytes wide
+ with IBIT integral bits and FBIT fractional bits.
+ There may be padding bits.
+
+ RESET_FLOAT_FORMAT (MODE, FORMAT);
+ changes the format of MODE, which must be class FLOAT,
+ to FORMAT. Use in an ARCH-modes.def to reset the format
+ of one of the float modes defined in this file.
+
+ PARTIAL_INT_MODE (MODE, PRECISION, NAME);
+ declares a mode of class PARTIAL_INT with the same size as
+ MODE (which must be an INT mode) and precision PREC.
+ Optionally, NAME is the new name of the mode. NAME is the
+ name of the mode.
+
+ VECTOR_MODE (CLASS, MODE, COUNT);
+ Declare a vector mode whose component mode is MODE (of class
+ CLASS) with COUNT components. CLASS must be INT or FLOAT.
+ The name of the vector mode takes the form VnX where n is
+ COUNT in decimal and X is MODE.
+
+ VECTOR_MODES (CLASS, WIDTH);
+ For all modes presently declared in class CLASS, construct
+ corresponding vector modes having width WIDTH. Modes whose
+ byte sizes do not evenly divide WIDTH are ignored, as are
+ modes that would produce vector modes with only one component,
+ and modes smaller than one byte (if CLASS is INT) or smaller
+ than two bytes (if CLASS is FLOAT). CLASS must be INT or
+ FLOAT. The names follow the same rule as VECTOR_MODE uses.
+
+ VECTOR_MODES_WITH_PREFIX (PREFIX, CLASS, WIDTH, ORDER);
+ Like VECTOR_MODES, but start the mode names with PREFIX instead
+ of the usual "V". ORDER is the top-level sorting order of the
+ mode, with smaller numbers indicating a higher priority.
+
+ VECTOR_BOOL_MODE (NAME, COUNT, COMPONENT, BYTESIZE)
+ Create a vector mode called NAME that contains COUNT boolean
+ elements and occupies BYTESIZE bytes in total. Each boolean
+ element is of COMPONENT type and occupies (COUNT * BITS_PER_UNIT) /
+ BYTESIZE bits, with the element at index 0 occupying the lsb of the
+ first byte in memory. Only the lowest bit of each element is
+ significant.
+
+ OPAQUE_MODE (NAME, BYTESIZE)
+ Create an opaque mode called NAME that is BYTESIZE bytes wide.
+
+ COMPLEX_MODES (CLASS);
+ For all modes presently declared in class CLASS, construct
+ corresponding complex modes. Modes smaller than one byte
+ are ignored. For FLOAT modes, the names are derived by
+ replacing the 'F' in the mode name with a 'C'. (It is an
+ error if there is no 'F'. For INT modes, the names are
+ derived by prefixing a C to the name.
+
+ ADJUST_BYTESIZE (MODE, EXPR);
+ ADJUST_ALIGNMENT (MODE, EXPR);
+ ADJUST_FLOAT_FORMAT (MODE, EXPR);
+ ADJUST_IBIT (MODE, EXPR);
+ ADJUST_FBIT (MODE, EXPR);
+ Arrange for the byte size, alignment, floating point format, ibit,
+ or fbit of MODE to be adjustable at run time. EXPR will be executed
+ once after processing all command line options, and should
+ evaluate to the desired byte size, alignment, format, ibit or fbit.
+
+ Unlike a FORMAT argument, if you are adjusting a float format
+ you must put an & in front of the name of each format structure.
+
+ ADJUST_NUNITS (MODE, EXPR);
+ Like the above, but set the number of nunits of MODE to EXPR.
+ This changes the size and precision of the mode in proportion
+ to the change in the number of units; for example, doubling
+ the number of units doubles the size and precision as well.
+
+ Note: If a mode is ever made which is more than 255 bytes wide,
+ machmode.h and genmodes.cc will have to be changed to allocate
+ more space for the mode_size and mode_alignment arrays. */
+
+/* VOIDmode is used when no mode needs to be specified,
+ as for example on CONST_INT RTL expressions. */
+RANDOM_MODE (VOID);
+
+/* BLKmode is used for structures, arrays, etc.
+ that fit no more specific mode. */
+RANDOM_MODE (BLK);
+
+/* Single bit mode used for booleans. */
+BOOL_MODE (BI, 1, 1);
+
+/* Basic integer modes. We go up to TI in generic code (128 bits).
+ TImode is needed here because the some front ends now genericly
+ support __int128. If the front ends decide to generically support
+ larger types, then corresponding modes must be added here. The
+ name OI is reserved for a 256-bit type (needed by some back ends).
+ */
+INT_MODE (QI, 1);
+INT_MODE (HI, 2);
+INT_MODE (SI, 4);
+INT_MODE (DI, 8);
+INT_MODE (TI, 16);
+
+/* No partial integer modes are defined by default. */
+
+/* The target normally defines any target-specific __intN types and
+ their modes, but __int128 for TImode is fairly common so define it
+ here. The type will not be created unless the target supports
+ TImode. */
+
+INT_N (TI, 128);
+
+/* Basic floating point modes. SF and DF are the only modes provided
+ by default. The names QF, HF, XF, and TF are reserved for targets
+ that need 1-word, 2-word, 80-bit, or 128-bit float types respectively.
+
+ These are the IEEE mappings. They can be overridden with
+ RESET_FLOAT_FORMAT or at runtime (in TARGET_OPTION_OVERRIDE). */
+
+FLOAT_MODE (SF, 4, ieee_single_format);
+FLOAT_MODE (DF, 8, ieee_double_format);
+
+/* Basic CC modes.
+ FIXME define this only for targets that need it. */
+CC_MODE (CC);
+
+/* Fixed-point modes. */
+FRACT_MODE (QQ, 1, 7); /* s.7 */
+FRACT_MODE (HQ, 2, 15); /* s.15 */
+FRACT_MODE (SQ, 4, 31); /* s.31 */
+FRACT_MODE (DQ, 8, 63); /* s.63 */
+FRACT_MODE (TQ, 16, 127); /* s.127 */
+
+UFRACT_MODE (UQQ, 1, 8); /* .8 */
+UFRACT_MODE (UHQ, 2, 16); /* .16 */
+UFRACT_MODE (USQ, 4, 32); /* .32 */
+UFRACT_MODE (UDQ, 8, 64); /* .64 */
+UFRACT_MODE (UTQ, 16, 128); /* .128 */
+
+ACCUM_MODE (HA, 2, 8, 7); /* s8.7 */
+ACCUM_MODE (SA, 4, 16, 15); /* s16.15 */
+ACCUM_MODE (DA, 8, 32, 31); /* s32.31 */
+ACCUM_MODE (TA, 16, 64, 63); /* s64.63 */
+
+UACCUM_MODE (UHA, 2, 8, 8); /* 8.8 */
+UACCUM_MODE (USA, 4, 16, 16); /* 16.16 */
+UACCUM_MODE (UDA, 8, 32, 32); /* 32.32 */
+UACCUM_MODE (UTA, 16, 64, 64); /* 64.64 */
+
+/* Allow the target to specify additional modes of various kinds. */
+#if HAVE_EXTRA_MODES
+# include EXTRA_MODES_FILE
+#endif
+
+/* Complex modes. */
+COMPLEX_MODES (INT);
+COMPLEX_MODES (PARTIAL_INT);
+COMPLEX_MODES (FLOAT);
+
+/* Decimal floating point modes. */
+DECIMAL_FLOAT_MODE (SD, 4, decimal_single_format);
+DECIMAL_FLOAT_MODE (DD, 8, decimal_double_format);
+DECIMAL_FLOAT_MODE (TD, 16, decimal_quad_format);
+
+/* The symbol Pmode stands for one of the above machine modes (usually SImode).
+ The tm.h file specifies which one. It is not a distinct mode. */
+
+/*
+Local variables:
+mode:c
+version-control: t
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.h
new file mode 100644
index 0000000..f1865c1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/machmode.h
@@ -0,0 +1,1264 @@
+/* Machine mode definitions for GCC; included by rtl.h and tree.h.
+ Copyright (C) 1991-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef HAVE_MACHINE_MODES
+#define HAVE_MACHINE_MODES
+
+typedef opt_mode<machine_mode> opt_machine_mode;
+
+extern CONST_MODE_SIZE poly_uint16_pod mode_size[NUM_MACHINE_MODES];
+extern CONST_MODE_PRECISION poly_uint16_pod mode_precision[NUM_MACHINE_MODES];
+extern const unsigned char mode_inner[NUM_MACHINE_MODES];
+extern CONST_MODE_NUNITS poly_uint16_pod mode_nunits[NUM_MACHINE_MODES];
+extern CONST_MODE_UNIT_SIZE unsigned char mode_unit_size[NUM_MACHINE_MODES];
+extern const unsigned short mode_unit_precision[NUM_MACHINE_MODES];
+extern const unsigned char mode_next[NUM_MACHINE_MODES];
+extern const unsigned char mode_wider[NUM_MACHINE_MODES];
+extern const unsigned char mode_2xwider[NUM_MACHINE_MODES];
+
+template<typename T>
+struct mode_traits
+{
+ /* For use by the machmode support code only.
+
+ There are cases in which the machmode support code needs to forcibly
+ convert a machine_mode to a specific mode class T, and in which the
+ context guarantees that this is valid without the need for an assert.
+ This can be done using:
+
+ return typename mode_traits<T>::from_int (mode);
+
+ when returning a T and:
+
+ res = T (typename mode_traits<T>::from_int (mode));
+
+ when assigning to a value RES that must be assignment-compatible
+ with (but possibly not the same as) T. */
+#ifdef USE_ENUM_MODES
+ /* Allow direct conversion of enums to specific mode classes only
+ when USE_ENUM_MODES is defined. This is only intended for use
+ by gencondmd, so that it can tell more easily when .md conditions
+ are always false. */
+ typedef machine_mode from_int;
+#else
+ /* Here we use an enum type distinct from machine_mode but with the
+ same range as machine_mode. T should have a constructor that
+ accepts this enum type; it should not have a constructor that
+ accepts machine_mode.
+
+ We use this somewhat indirect approach to avoid too many constructor
+ calls when the compiler is built with -O0. For example, even in
+ unoptimized code, the return statement above would construct the
+ returned T directly from the numerical value of MODE. */
+ enum from_int { dummy = MAX_MACHINE_MODE };
+#endif
+};
+
+template<>
+struct mode_traits<machine_mode>
+{
+ /* machine_mode itself needs no conversion. */
+ typedef machine_mode from_int;
+};
+
+/* Always treat machine modes as fixed-size while compiling code specific
+ to targets that have no variable-size modes. */
+#if defined (IN_TARGET_CODE) && NUM_POLY_INT_COEFFS == 1
+#define ONLY_FIXED_SIZE_MODES 1
+#else
+#define ONLY_FIXED_SIZE_MODES 0
+#endif
+
+/* Get the name of mode MODE as a string. */
+
+extern const char * const mode_name[NUM_MACHINE_MODES];
+#define GET_MODE_NAME(MODE) mode_name[MODE]
+
+/* Mode classes. */
+
+#include "mode-classes.def"
+#define DEF_MODE_CLASS(M) M
+enum mode_class { MODE_CLASSES, MAX_MODE_CLASS };
+#undef DEF_MODE_CLASS
+#undef MODE_CLASSES
+
+/* Get the general kind of object that mode MODE represents
+ (integer, floating, complex, etc.) */
+
+extern const unsigned char mode_class[NUM_MACHINE_MODES];
+#define GET_MODE_CLASS(MODE) ((enum mode_class) mode_class[MODE])
+
+/* Nonzero if MODE is an integral mode. */
+#define INTEGRAL_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_INT \
+ || GET_MODE_CLASS (MODE) == MODE_PARTIAL_INT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_BOOL \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_INT)
+
+/* Nonzero if MODE is a floating-point mode. */
+#define FLOAT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_DECIMAL_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT)
+
+/* Nonzero if MODE is a complex mode. */
+#define COMPLEX_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT)
+
+/* Nonzero if MODE is a vector mode. */
+#define VECTOR_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_VECTOR_BOOL \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FRACT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_UFRACT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_ACCUM \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_UACCUM)
+
+/* Nonzero if MODE is a scalar integral mode. */
+#define SCALAR_INT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_INT \
+ || GET_MODE_CLASS (MODE) == MODE_PARTIAL_INT)
+
+/* Nonzero if MODE is a scalar floating point mode. */
+#define SCALAR_FLOAT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_DECIMAL_FLOAT)
+
+/* Nonzero if MODE is a decimal floating point mode. */
+#define DECIMAL_FLOAT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_DECIMAL_FLOAT)
+
+/* Nonzero if MODE is a scalar fract mode. */
+#define SCALAR_FRACT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FRACT)
+
+/* Nonzero if MODE is a scalar ufract mode. */
+#define SCALAR_UFRACT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_UFRACT)
+
+/* Nonzero if MODE is a scalar fract or ufract mode. */
+#define ALL_SCALAR_FRACT_MODE_P(MODE) \
+ (SCALAR_FRACT_MODE_P (MODE) || SCALAR_UFRACT_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar accum mode. */
+#define SCALAR_ACCUM_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_ACCUM)
+
+/* Nonzero if MODE is a scalar uaccum mode. */
+#define SCALAR_UACCUM_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_UACCUM)
+
+/* Nonzero if MODE is a scalar accum or uaccum mode. */
+#define ALL_SCALAR_ACCUM_MODE_P(MODE) \
+ (SCALAR_ACCUM_MODE_P (MODE) || SCALAR_UACCUM_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar fract or accum mode. */
+#define SIGNED_SCALAR_FIXED_POINT_MODE_P(MODE) \
+ (SCALAR_FRACT_MODE_P (MODE) || SCALAR_ACCUM_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar ufract or uaccum mode. */
+#define UNSIGNED_SCALAR_FIXED_POINT_MODE_P(MODE) \
+ (SCALAR_UFRACT_MODE_P (MODE) || SCALAR_UACCUM_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar fract, ufract, accum or uaccum mode. */
+#define ALL_SCALAR_FIXED_POINT_MODE_P(MODE) \
+ (SIGNED_SCALAR_FIXED_POINT_MODE_P (MODE) \
+ || UNSIGNED_SCALAR_FIXED_POINT_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar/vector fract mode. */
+#define FRACT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FRACT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FRACT)
+
+/* Nonzero if MODE is a scalar/vector ufract mode. */
+#define UFRACT_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_UFRACT \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_UFRACT)
+
+/* Nonzero if MODE is a scalar/vector fract or ufract mode. */
+#define ALL_FRACT_MODE_P(MODE) \
+ (FRACT_MODE_P (MODE) || UFRACT_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar/vector accum mode. */
+#define ACCUM_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_ACCUM \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_ACCUM)
+
+/* Nonzero if MODE is a scalar/vector uaccum mode. */
+#define UACCUM_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_UACCUM \
+ || GET_MODE_CLASS (MODE) == MODE_VECTOR_UACCUM)
+
+/* Nonzero if MODE is a scalar/vector accum or uaccum mode. */
+#define ALL_ACCUM_MODE_P(MODE) \
+ (ACCUM_MODE_P (MODE) || UACCUM_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar/vector fract or accum mode. */
+#define SIGNED_FIXED_POINT_MODE_P(MODE) \
+ (FRACT_MODE_P (MODE) || ACCUM_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar/vector ufract or uaccum mode. */
+#define UNSIGNED_FIXED_POINT_MODE_P(MODE) \
+ (UFRACT_MODE_P (MODE) || UACCUM_MODE_P (MODE))
+
+/* Nonzero if MODE is a scalar/vector fract, ufract, accum or uaccum mode. */
+#define ALL_FIXED_POINT_MODE_P(MODE) \
+ (SIGNED_FIXED_POINT_MODE_P (MODE) \
+ || UNSIGNED_FIXED_POINT_MODE_P (MODE))
+
+/* Nonzero if MODE is opaque. */
+#define OPAQUE_MODE_P(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_OPAQUE)
+
+/* Nonzero if CLASS modes can be widened. */
+#define CLASS_HAS_WIDER_MODES_P(CLASS) \
+ (CLASS == MODE_INT \
+ || CLASS == MODE_PARTIAL_INT \
+ || CLASS == MODE_FLOAT \
+ || CLASS == MODE_DECIMAL_FLOAT \
+ || CLASS == MODE_COMPLEX_FLOAT \
+ || CLASS == MODE_FRACT \
+ || CLASS == MODE_UFRACT \
+ || CLASS == MODE_ACCUM \
+ || CLASS == MODE_UACCUM)
+
+/* An optional T (i.e. a T or nothing), where T is some form of mode class. */
+template<typename T>
+class opt_mode
+{
+public:
+ enum from_int { dummy = MAX_MACHINE_MODE };
+
+ ALWAYS_INLINE CONSTEXPR opt_mode () : m_mode (E_VOIDmode) {}
+ ALWAYS_INLINE CONSTEXPR opt_mode (const T &m) : m_mode (m) {}
+ template<typename U>
+ ALWAYS_INLINE CONSTEXPR opt_mode (const U &m) : m_mode (T (m)) {}
+ ALWAYS_INLINE CONSTEXPR opt_mode (from_int m) : m_mode (machine_mode (m)) {}
+
+ machine_mode else_void () const;
+ machine_mode else_blk () const { return else_mode (BLKmode); }
+ machine_mode else_mode (machine_mode) const;
+ T require () const;
+
+ bool exists () const;
+ template<typename U> bool exists (U *) const;
+
+ bool operator== (const T &m) const { return m_mode == m; }
+ bool operator!= (const T &m) const { return m_mode != m; }
+
+private:
+ machine_mode m_mode;
+};
+
+/* If the object contains a T, return its enum value, otherwise return
+ E_VOIDmode. */
+
+template<typename T>
+ALWAYS_INLINE machine_mode
+opt_mode<T>::else_void () const
+{
+ return m_mode;
+}
+
+/* If the T exists, return its enum value, otherwise return FALLBACK. */
+
+template<typename T>
+inline machine_mode
+opt_mode<T>::else_mode (machine_mode fallback) const
+{
+ return m_mode == E_VOIDmode ? fallback : m_mode;
+}
+
+/* Assert that the object contains a T and return it. */
+
+template<typename T>
+inline T
+opt_mode<T>::require () const
+{
+ gcc_checking_assert (m_mode != E_VOIDmode);
+ return typename mode_traits<T>::from_int (m_mode);
+}
+
+/* Return true if the object contains a T rather than nothing. */
+
+template<typename T>
+ALWAYS_INLINE bool
+opt_mode<T>::exists () const
+{
+ return m_mode != E_VOIDmode;
+}
+
+/* Return true if the object contains a T, storing it in *MODE if so. */
+
+template<typename T>
+template<typename U>
+inline bool
+opt_mode<T>::exists (U *mode) const
+{
+ if (m_mode != E_VOIDmode)
+ {
+ *mode = T (typename mode_traits<T>::from_int (m_mode));
+ return true;
+ }
+ return false;
+}
+
+/* A POD version of mode class T. */
+
+template<typename T>
+struct pod_mode
+{
+ typedef typename mode_traits<T>::from_int from_int;
+ typedef typename T::measurement_type measurement_type;
+
+ machine_mode m_mode;
+ ALWAYS_INLINE CONSTEXPR
+ operator machine_mode () const { return m_mode; }
+
+ ALWAYS_INLINE CONSTEXPR
+ operator T () const { return from_int (m_mode); }
+
+ ALWAYS_INLINE pod_mode &operator = (const T &m) { m_mode = m; return *this; }
+};
+
+/* Return true if mode M has type T. */
+
+template<typename T>
+inline bool
+is_a (machine_mode m)
+{
+ return T::includes_p (m);
+}
+
+template<typename T, typename U>
+inline bool
+is_a (const opt_mode<U> &m)
+{
+ return T::includes_p (m.else_void ());
+}
+
+/* Assert that mode M has type T, and return it in that form. */
+
+template<typename T>
+inline T
+as_a (machine_mode m)
+{
+ gcc_checking_assert (T::includes_p (m));
+ return typename mode_traits<T>::from_int (m);
+}
+
+template<typename T, typename U>
+inline T
+as_a (const opt_mode<U> &m)
+{
+ return as_a <T> (m.else_void ());
+}
+
+/* Convert M to an opt_mode<T>. */
+
+template<typename T>
+inline opt_mode<T>
+dyn_cast (machine_mode m)
+{
+ if (T::includes_p (m))
+ return T (typename mode_traits<T>::from_int (m));
+ return opt_mode<T> ();
+}
+
+template<typename T, typename U>
+inline opt_mode<T>
+dyn_cast (const opt_mode<U> &m)
+{
+ return dyn_cast <T> (m.else_void ());
+}
+
+/* Return true if mode M has type T, storing it as a T in *RESULT
+ if so. */
+
+template<typename T, typename U>
+inline bool
+is_a (machine_mode m, U *result)
+{
+ if (T::includes_p (m))
+ {
+ *result = T (typename mode_traits<T>::from_int (m));
+ return true;
+ }
+ return false;
+}
+
+/* Represents a machine mode that is known to be a SCALAR_INT_MODE_P. */
+class scalar_int_mode
+{
+public:
+ typedef mode_traits<scalar_int_mode>::from_int from_int;
+ typedef unsigned short measurement_type;
+
+ ALWAYS_INLINE scalar_int_mode () {}
+
+ ALWAYS_INLINE CONSTEXPR
+ scalar_int_mode (from_int m) : m_mode (machine_mode (m)) {}
+
+ ALWAYS_INLINE CONSTEXPR operator machine_mode () const { return m_mode; }
+
+ static bool includes_p (machine_mode);
+
+protected:
+ machine_mode m_mode;
+};
+
+/* Return true if M is a scalar_int_mode. */
+
+inline bool
+scalar_int_mode::includes_p (machine_mode m)
+{
+ return SCALAR_INT_MODE_P (m);
+}
+
+/* Represents a machine mode that is known to be a SCALAR_FLOAT_MODE_P. */
+class scalar_float_mode
+{
+public:
+ typedef mode_traits<scalar_float_mode>::from_int from_int;
+ typedef unsigned short measurement_type;
+
+ ALWAYS_INLINE scalar_float_mode () {}
+
+ ALWAYS_INLINE CONSTEXPR
+ scalar_float_mode (from_int m) : m_mode (machine_mode (m)) {}
+
+ ALWAYS_INLINE CONSTEXPR operator machine_mode () const { return m_mode; }
+
+ static bool includes_p (machine_mode);
+
+protected:
+ machine_mode m_mode;
+};
+
+/* Return true if M is a scalar_float_mode. */
+
+inline bool
+scalar_float_mode::includes_p (machine_mode m)
+{
+ return SCALAR_FLOAT_MODE_P (m);
+}
+
+/* Represents a machine mode that is known to be scalar. */
+class scalar_mode
+{
+public:
+ typedef mode_traits<scalar_mode>::from_int from_int;
+ typedef unsigned short measurement_type;
+
+ ALWAYS_INLINE scalar_mode () {}
+
+ ALWAYS_INLINE CONSTEXPR
+ scalar_mode (from_int m) : m_mode (machine_mode (m)) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ scalar_mode (const scalar_int_mode &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ scalar_mode (const scalar_float_mode &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ scalar_mode (const scalar_int_mode_pod &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR operator machine_mode () const { return m_mode; }
+
+ static bool includes_p (machine_mode);
+
+protected:
+ machine_mode m_mode;
+};
+
+/* Return true if M represents some kind of scalar value. */
+
+inline bool
+scalar_mode::includes_p (machine_mode m)
+{
+ switch (GET_MODE_CLASS (m))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_FRACT:
+ case MODE_UFRACT:
+ case MODE_ACCUM:
+ case MODE_UACCUM:
+ case MODE_FLOAT:
+ case MODE_DECIMAL_FLOAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Represents a machine mode that is known to be a COMPLEX_MODE_P. */
+class complex_mode
+{
+public:
+ typedef mode_traits<complex_mode>::from_int from_int;
+ typedef unsigned short measurement_type;
+
+ ALWAYS_INLINE complex_mode () {}
+
+ ALWAYS_INLINE CONSTEXPR
+ complex_mode (from_int m) : m_mode (machine_mode (m)) {}
+
+ ALWAYS_INLINE CONSTEXPR operator machine_mode () const { return m_mode; }
+
+ static bool includes_p (machine_mode);
+
+protected:
+ machine_mode m_mode;
+};
+
+/* Return true if M is a complex_mode. */
+
+inline bool
+complex_mode::includes_p (machine_mode m)
+{
+ return COMPLEX_MODE_P (m);
+}
+
+/* Return the base GET_MODE_SIZE value for MODE. */
+
+ALWAYS_INLINE poly_uint16
+mode_to_bytes (machine_mode mode)
+{
+#if GCC_VERSION >= 4001
+ return (__builtin_constant_p (mode)
+ ? mode_size_inline (mode) : mode_size[mode]);
+#else
+ return mode_size[mode];
+#endif
+}
+
+/* Return the base GET_MODE_BITSIZE value for MODE. */
+
+ALWAYS_INLINE poly_uint16
+mode_to_bits (machine_mode mode)
+{
+ return mode_to_bytes (mode) * BITS_PER_UNIT;
+}
+
+/* Return the base GET_MODE_PRECISION value for MODE. */
+
+ALWAYS_INLINE poly_uint16
+mode_to_precision (machine_mode mode)
+{
+ return mode_precision[mode];
+}
+
+/* Return the base GET_MODE_INNER value for MODE. */
+
+ALWAYS_INLINE scalar_mode
+mode_to_inner (machine_mode mode)
+{
+#if GCC_VERSION >= 4001
+ return scalar_mode::from_int (__builtin_constant_p (mode)
+ ? mode_inner_inline (mode)
+ : mode_inner[mode]);
+#else
+ return scalar_mode::from_int (mode_inner[mode]);
+#endif
+}
+
+/* Return the base GET_MODE_UNIT_SIZE value for MODE. */
+
+ALWAYS_INLINE unsigned char
+mode_to_unit_size (machine_mode mode)
+{
+#if GCC_VERSION >= 4001
+ return (__builtin_constant_p (mode)
+ ? mode_unit_size_inline (mode) : mode_unit_size[mode]);
+#else
+ return mode_unit_size[mode];
+#endif
+}
+
+/* Return the base GET_MODE_UNIT_PRECISION value for MODE. */
+
+ALWAYS_INLINE unsigned short
+mode_to_unit_precision (machine_mode mode)
+{
+#if GCC_VERSION >= 4001
+ return (__builtin_constant_p (mode)
+ ? mode_unit_precision_inline (mode) : mode_unit_precision[mode]);
+#else
+ return mode_unit_precision[mode];
+#endif
+}
+
+/* Return the base GET_MODE_NUNITS value for MODE. */
+
+ALWAYS_INLINE poly_uint16
+mode_to_nunits (machine_mode mode)
+{
+#if GCC_VERSION >= 4001
+ return (__builtin_constant_p (mode)
+ ? mode_nunits_inline (mode) : mode_nunits[mode]);
+#else
+ return mode_nunits[mode];
+#endif
+}
+
+/* Get the size in bytes of an object of mode MODE. */
+
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_SIZE(MODE) ((unsigned short) mode_to_bytes (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_SIZE (machine_mode mode)
+{
+ return mode_to_bytes (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
+GET_MODE_SIZE (const T &mode)
+{
+ return mode_to_bytes (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
+GET_MODE_SIZE (const T &mode)
+{
+ return mode_to_bytes (mode).coeffs[0];
+}
+#endif
+
+/* Get the size in bits of an object of mode MODE. */
+
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_BITSIZE(MODE) ((unsigned short) mode_to_bits (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_BITSIZE (machine_mode mode)
+{
+ return mode_to_bits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
+GET_MODE_BITSIZE (const T &mode)
+{
+ return mode_to_bits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
+GET_MODE_BITSIZE (const T &mode)
+{
+ return mode_to_bits (mode).coeffs[0];
+}
+#endif
+
+/* Get the number of value bits of an object of mode MODE. */
+
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_PRECISION(MODE) \
+ ((unsigned short) mode_to_precision (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_PRECISION (machine_mode mode)
+{
+ return mode_to_precision (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
+GET_MODE_PRECISION (const T &mode)
+{
+ return mode_to_precision (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
+GET_MODE_PRECISION (const T &mode)
+{
+ return mode_to_precision (mode).coeffs[0];
+}
+#endif
+
+/* Get the number of integral bits of an object of mode MODE. */
+extern CONST_MODE_IBIT unsigned char mode_ibit[NUM_MACHINE_MODES];
+#define GET_MODE_IBIT(MODE) mode_ibit[MODE]
+
+/* Get the number of fractional bits of an object of mode MODE. */
+extern CONST_MODE_FBIT unsigned char mode_fbit[NUM_MACHINE_MODES];
+#define GET_MODE_FBIT(MODE) mode_fbit[MODE]
+
+/* Get a bitmask containing 1 for all bits in a word
+ that fit within mode MODE. */
+
+extern CONST_MODE_MASK unsigned HOST_WIDE_INT
+ mode_mask_array[NUM_MACHINE_MODES];
+
+#define GET_MODE_MASK(MODE) mode_mask_array[MODE]
+
+/* Return the mode of the basic parts of MODE. For vector modes this is the
+ mode of the vector elements. For complex modes it is the mode of the real
+ and imaginary parts. For other modes it is MODE itself. */
+
+#define GET_MODE_INNER(MODE) (mode_to_inner (MODE))
+
+/* Get the size in bytes or bits of the basic parts of an
+ object of mode MODE. */
+
+#define GET_MODE_UNIT_SIZE(MODE) mode_to_unit_size (MODE)
+
+#define GET_MODE_UNIT_BITSIZE(MODE) \
+ ((unsigned short) (GET_MODE_UNIT_SIZE (MODE) * BITS_PER_UNIT))
+
+#define GET_MODE_UNIT_PRECISION(MODE) (mode_to_unit_precision (MODE))
+
+/* Get the number of units in an object of mode MODE. This is 2 for
+ complex modes and the number of elements for vector modes. */
+
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_NUNITS(MODE) (mode_to_nunits (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_NUNITS (machine_mode mode)
+{
+ return mode_to_nunits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
+GET_MODE_NUNITS (const T &mode)
+{
+ return mode_to_nunits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
+GET_MODE_NUNITS (const T &mode)
+{
+ return mode_to_nunits (mode).coeffs[0];
+}
+#endif
+
+/* Get the next natural mode (not narrower, eg, QI -> HI -> SI -> DI -> TI
+ or HF -> BF -> SF -> DF -> XF -> TF). */
+
+template<typename T>
+ALWAYS_INLINE opt_mode<T>
+GET_MODE_NEXT_MODE (const T &m)
+{
+ return typename opt_mode<T>::from_int (mode_next[m]);
+}
+
+/* Get the next wider mode (eg, QI -> HI -> SI -> DI -> TI
+ or { HF, BF } -> SF -> DF -> XF -> TF).
+ This is similar to GET_MODE_NEXT_MODE, but while GET_MODE_NEXT_MODE
+ can include mode that have the same precision (e.g.
+ GET_MODE_NEXT_MODE (HFmode) can be BFmode even when both have the same
+ precision), this one will skip those. And always VOIDmode for
+ modes whose class is !CLASS_HAS_WIDER_MODES_P. */
+
+template<typename T>
+ALWAYS_INLINE opt_mode<T>
+GET_MODE_WIDER_MODE (const T &m)
+{
+ return typename opt_mode<T>::from_int (mode_wider[m]);
+}
+
+/* For scalars, this is a mode with twice the precision. For vectors,
+ this is a mode with the same inner mode but with twice the elements. */
+
+template<typename T>
+ALWAYS_INLINE opt_mode<T>
+GET_MODE_2XWIDER_MODE (const T &m)
+{
+ return typename opt_mode<T>::from_int (mode_2xwider[m]);
+}
+
+/* Get the complex mode from the component mode. */
+extern const unsigned char mode_complex[NUM_MACHINE_MODES];
+#define GET_MODE_COMPLEX_MODE(MODE) ((machine_mode) mode_complex[MODE])
+
+/* Represents a machine mode that must have a fixed size. The main
+ use of this class is to represent the modes of objects that always
+ have static storage duration, such as constant pool entries.
+ (No current target supports the concept of variable-size static data.) */
+class fixed_size_mode
+{
+public:
+ typedef mode_traits<fixed_size_mode>::from_int from_int;
+ typedef unsigned short measurement_type;
+
+ ALWAYS_INLINE fixed_size_mode () {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (from_int m) : m_mode (machine_mode (m)) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (const scalar_mode &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (const scalar_int_mode &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (const scalar_float_mode &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (const scalar_mode_pod &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (const scalar_int_mode_pod &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR
+ fixed_size_mode (const complex_mode &m) : m_mode (m) {}
+
+ ALWAYS_INLINE CONSTEXPR operator machine_mode () const { return m_mode; }
+
+ static bool includes_p (machine_mode);
+
+protected:
+ machine_mode m_mode;
+};
+
+/* Return true if MODE has a fixed size. */
+
+inline bool
+fixed_size_mode::includes_p (machine_mode mode)
+{
+ return mode_to_bytes (mode).is_constant ();
+}
+
+/* Wrapper for mode arguments to target macros, so that if a target
+ doesn't need polynomial-sized modes, its header file can continue
+ to treat everything as fixed_size_mode. This should go away once
+ macros are moved to target hooks. It shouldn't be used in other
+ contexts. */
+#if NUM_POLY_INT_COEFFS == 1
+#define MACRO_MODE(MODE) (as_a <fixed_size_mode> (MODE))
+#else
+#define MACRO_MODE(MODE) (MODE)
+#endif
+
+extern opt_machine_mode mode_for_size (poly_uint64, enum mode_class, int);
+
+/* Return the machine mode to use for a MODE_INT of SIZE bits, if one
+ exists. If LIMIT is nonzero, modes wider than MAX_FIXED_MODE_SIZE
+ will not be used. */
+
+inline opt_scalar_int_mode
+int_mode_for_size (poly_uint64 size, int limit)
+{
+ return dyn_cast <scalar_int_mode> (mode_for_size (size, MODE_INT, limit));
+}
+
+/* Return the machine mode to use for a MODE_FLOAT of SIZE bits, if one
+ exists. */
+
+inline opt_scalar_float_mode
+float_mode_for_size (poly_uint64 size)
+{
+ return dyn_cast <scalar_float_mode> (mode_for_size (size, MODE_FLOAT, 0));
+}
+
+/* Likewise for MODE_DECIMAL_FLOAT. */
+
+inline opt_scalar_float_mode
+decimal_float_mode_for_size (unsigned int size)
+{
+ return dyn_cast <scalar_float_mode>
+ (mode_for_size (size, MODE_DECIMAL_FLOAT, 0));
+}
+
+extern machine_mode smallest_mode_for_size (poly_uint64, enum mode_class);
+
+/* Find the narrowest integer mode that contains at least SIZE bits.
+ Such a mode must exist. */
+
+inline scalar_int_mode
+smallest_int_mode_for_size (poly_uint64 size)
+{
+ return as_a <scalar_int_mode> (smallest_mode_for_size (size, MODE_INT));
+}
+
+extern opt_scalar_int_mode int_mode_for_mode (machine_mode);
+extern opt_machine_mode bitwise_mode_for_mode (machine_mode);
+extern opt_machine_mode mode_for_vector (scalar_mode, poly_uint64);
+extern opt_machine_mode related_vector_mode (machine_mode, scalar_mode,
+ poly_uint64 = 0);
+extern opt_machine_mode related_int_vector_mode (machine_mode);
+
+/* A class for iterating through possible bitfield modes. */
+class bit_field_mode_iterator
+{
+public:
+ bit_field_mode_iterator (HOST_WIDE_INT, HOST_WIDE_INT,
+ poly_int64, poly_int64,
+ unsigned int, bool);
+ bool next_mode (scalar_int_mode *);
+ bool prefer_smaller_modes ();
+
+private:
+ opt_scalar_int_mode m_mode;
+ /* We use signed values here because the bit position can be negative
+ for invalid input such as gcc.dg/pr48335-8.c. */
+ HOST_WIDE_INT m_bitsize;
+ HOST_WIDE_INT m_bitpos;
+ poly_int64 m_bitregion_start;
+ poly_int64 m_bitregion_end;
+ unsigned int m_align;
+ bool m_volatilep;
+ int m_count;
+};
+
+/* Find the best mode to use to access a bit field. */
+
+extern bool get_best_mode (int, int, poly_uint64, poly_uint64, unsigned int,
+ unsigned HOST_WIDE_INT, bool, scalar_int_mode *);
+
+/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
+
+extern CONST_MODE_BASE_ALIGN unsigned short mode_base_align[NUM_MACHINE_MODES];
+
+extern unsigned get_mode_alignment (machine_mode);
+
+#define GET_MODE_ALIGNMENT(MODE) get_mode_alignment (MODE)
+
+/* For each class, get the narrowest mode in that class. */
+
+extern const unsigned char class_narrowest_mode[MAX_MODE_CLASS];
+#define GET_CLASS_NARROWEST_MODE(CLASS) \
+ ((machine_mode) class_narrowest_mode[CLASS])
+
+/* The narrowest full integer mode available on the target. */
+
+#define NARROWEST_INT_MODE \
+ (scalar_int_mode \
+ (scalar_int_mode::from_int (class_narrowest_mode[MODE_INT])))
+
+/* Return the narrowest mode in T's class. */
+
+template<typename T>
+inline T
+get_narrowest_mode (T mode)
+{
+ return typename mode_traits<T>::from_int
+ (class_narrowest_mode[GET_MODE_CLASS (mode)]);
+}
+
+/* Define the integer modes whose sizes are BITS_PER_UNIT and BITS_PER_WORD
+ and the mode whose class is Pmode and whose size is POINTER_SIZE. */
+
+extern scalar_int_mode byte_mode;
+extern scalar_int_mode word_mode;
+extern scalar_int_mode ptr_mode;
+
+/* Target-dependent machine mode initialization - in insn-modes.cc. */
+extern void init_adjust_machine_modes (void);
+
+#define TRULY_NOOP_TRUNCATION_MODES_P(MODE1, MODE2) \
+ (targetm.truly_noop_truncation (GET_MODE_PRECISION (MODE1), \
+ GET_MODE_PRECISION (MODE2)))
+
+/* Return true if MODE is a scalar integer mode that fits in a
+ HOST_WIDE_INT. */
+
+inline bool
+HWI_COMPUTABLE_MODE_P (machine_mode mode)
+{
+ machine_mode mme = mode;
+ return (SCALAR_INT_MODE_P (mme)
+ && mode_to_precision (mme).coeffs[0] <= HOST_BITS_PER_WIDE_INT);
+}
+
+inline bool
+HWI_COMPUTABLE_MODE_P (scalar_int_mode mode)
+{
+ return GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT;
+}
+
+struct int_n_data_t {
+ /* These parts are initailized by genmodes output */
+ unsigned int bitsize;
+ scalar_int_mode_pod m;
+ /* RID_* is RID_INTN_BASE + index into this array */
+};
+
+/* This is also in tree.h. genmodes.cc guarantees the're sorted from
+ smallest bitsize to largest bitsize. */
+extern bool int_n_enabled_p[NUM_INT_N_ENTS];
+extern const int_n_data_t int_n_data[NUM_INT_N_ENTS];
+
+/* Return true if MODE has class MODE_INT, storing it as a scalar_int_mode
+ in *INT_MODE if so. */
+
+template<typename T>
+inline bool
+is_int_mode (machine_mode mode, T *int_mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ *int_mode = scalar_int_mode (scalar_int_mode::from_int (mode));
+ return true;
+ }
+ return false;
+}
+
+/* Return true if MODE has class MODE_FLOAT, storing it as a
+ scalar_float_mode in *FLOAT_MODE if so. */
+
+template<typename T>
+inline bool
+is_float_mode (machine_mode mode, T *float_mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ *float_mode = scalar_float_mode (scalar_float_mode::from_int (mode));
+ return true;
+ }
+ return false;
+}
+
+/* Return true if MODE has class MODE_COMPLEX_INT, storing it as
+ a complex_mode in *CMODE if so. */
+
+template<typename T>
+inline bool
+is_complex_int_mode (machine_mode mode, T *cmode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ {
+ *cmode = complex_mode (complex_mode::from_int (mode));
+ return true;
+ }
+ return false;
+}
+
+/* Return true if MODE has class MODE_COMPLEX_FLOAT, storing it as
+ a complex_mode in *CMODE if so. */
+
+template<typename T>
+inline bool
+is_complex_float_mode (machine_mode mode, T *cmode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ *cmode = complex_mode (complex_mode::from_int (mode));
+ return true;
+ }
+ return false;
+}
+
+/* Return true if MODE is a scalar integer mode with a precision
+ smaller than LIMIT's precision. */
+
+inline bool
+is_narrower_int_mode (machine_mode mode, scalar_int_mode limit)
+{
+ scalar_int_mode int_mode;
+ return (is_a <scalar_int_mode> (mode, &int_mode)
+ && GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (limit));
+}
+
+namespace mode_iterator
+{
+ /* Start mode iterator *ITER at the first mode in class MCLASS, if any. */
+
+ template<typename T>
+ inline void
+ start (opt_mode<T> *iter, enum mode_class mclass)
+ {
+ if (GET_CLASS_NARROWEST_MODE (mclass) == E_VOIDmode)
+ *iter = opt_mode<T> ();
+ else
+ *iter = as_a<T> (GET_CLASS_NARROWEST_MODE (mclass));
+ }
+
+ inline void
+ start (machine_mode *iter, enum mode_class mclass)
+ {
+ *iter = GET_CLASS_NARROWEST_MODE (mclass);
+ }
+
+ /* Return true if mode iterator *ITER has not reached the end. */
+
+ template<typename T>
+ inline bool
+ iterate_p (opt_mode<T> *iter)
+ {
+ return iter->exists ();
+ }
+
+ inline bool
+ iterate_p (machine_mode *iter)
+ {
+ return *iter != E_VOIDmode;
+ }
+
+ /* Set mode iterator *ITER to the next mode in the same class,
+ if any. */
+
+ template<typename T>
+ inline void
+ get_next (opt_mode<T> *iter)
+ {
+ *iter = GET_MODE_NEXT_MODE (iter->require ());
+ }
+
+ inline void
+ get_next (machine_mode *iter)
+ {
+ *iter = GET_MODE_NEXT_MODE (*iter).else_void ();
+ }
+
+ /* Set mode iterator *ITER to the next mode in the same class.
+ Such a mode is known to exist. */
+
+ template<typename T>
+ inline void
+ get_known_next (T *iter)
+ {
+ *iter = GET_MODE_NEXT_MODE (*iter).require ();
+ }
+
+ /* Set mode iterator *ITER to the next wider mode in the same class,
+ if any. */
+
+ template<typename T>
+ inline void
+ get_wider (opt_mode<T> *iter)
+ {
+ *iter = GET_MODE_WIDER_MODE (iter->require ());
+ }
+
+ inline void
+ get_wider (machine_mode *iter)
+ {
+ *iter = GET_MODE_WIDER_MODE (*iter).else_void ();
+ }
+
+ /* Set mode iterator *ITER to the next wider mode in the same class.
+ Such a mode is known to exist. */
+
+ template<typename T>
+ inline void
+ get_known_wider (T *iter)
+ {
+ *iter = GET_MODE_WIDER_MODE (*iter).require ();
+ }
+
+ /* Set mode iterator *ITER to the mode that is two times wider than the
+ current one, if such a mode exists. */
+
+ template<typename T>
+ inline void
+ get_2xwider (opt_mode<T> *iter)
+ {
+ *iter = GET_MODE_2XWIDER_MODE (iter->require ());
+ }
+
+ inline void
+ get_2xwider (machine_mode *iter)
+ {
+ *iter = GET_MODE_2XWIDER_MODE (*iter).else_void ();
+ }
+}
+
+/* Make ITERATOR iterate over all the modes in mode class CLASS,
+ from narrowest to widest. */
+#define FOR_EACH_MODE_IN_CLASS(ITERATOR, CLASS) \
+ for (mode_iterator::start (&(ITERATOR), CLASS); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_next (&(ITERATOR)))
+
+/* Make ITERATOR iterate over all the modes in the range [START, END),
+ in order of increasing width. */
+#define FOR_EACH_MODE(ITERATOR, START, END) \
+ for ((ITERATOR) = (START); \
+ (ITERATOR) != (END); \
+ mode_iterator::get_known_next (&(ITERATOR)))
+
+/* Make ITERATOR iterate over START and all non-narrower modes in the same
+ class, in order of increasing width. */
+#define FOR_EACH_MODE_FROM(ITERATOR, START) \
+ for ((ITERATOR) = (START); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_next (&(ITERATOR)))
+
+/* Make ITERATOR iterate over START and all wider modes in the same
+ class, in order of strictly increasing width. */
+#define FOR_EACH_WIDER_MODE_FROM(ITERATOR, START) \
+ for ((ITERATOR) = (START); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_wider (&(ITERATOR)))
+
+/* Make ITERATOR iterate over modes in the range [NARROWEST, END)
+ in order of increasing width, where NARROWEST is the narrowest mode
+ in END's class. */
+#define FOR_EACH_MODE_UNTIL(ITERATOR, END) \
+ FOR_EACH_MODE (ITERATOR, get_narrowest_mode (END), END)
+
+/* Make ITERATOR iterate over modes in the same class as MODE, in order
+ of non-decreasing width. Start at next such mode after START,
+ or don't iterate at all if there is no such mode. */
+#define FOR_EACH_NEXT_MODE(ITERATOR, START) \
+ for ((ITERATOR) = (START), mode_iterator::get_next (&(ITERATOR)); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_next (&(ITERATOR)))
+
+/* Make ITERATOR iterate over modes in the same class as MODE, in order
+ of increasing width. Start at the first mode wider than START,
+ or don't iterate at all if there is no wider mode. */
+#define FOR_EACH_WIDER_MODE(ITERATOR, START) \
+ for ((ITERATOR) = (START), mode_iterator::get_wider (&(ITERATOR)); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_wider (&(ITERATOR)))
+
+/* Make ITERATOR iterate over modes in the same class as MODE, in order
+ of increasing width, and with each mode being twice the width of the
+ previous mode. Start at the mode that is two times wider than START,
+ or don't iterate at all if there is no such mode. */
+#define FOR_EACH_2XWIDER_MODE(ITERATOR, START) \
+ for ((ITERATOR) = (START), mode_iterator::get_2xwider (&(ITERATOR)); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_2xwider (&(ITERATOR)))
+
+template<typename T>
+void
+gt_ggc_mx (pod_mode<T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (pod_mode<T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (pod_mode<T> *, gt_pointer_operator, void *)
+{
+}
+
+#endif /* not HAVE_MACHINE_MODES */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/make-unique.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/make-unique.h
new file mode 100644
index 0000000..cd4dca0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/make-unique.h
@@ -0,0 +1,44 @@
+/* Minimal implementation of make_unique for C++11 compatibility.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_MAKE_UNIQUE
+#define GCC_MAKE_UNIQUE
+
+/* This header uses std::unique_ptr, but <memory> can't be directly
+ included due to issues with macros. Hence <memory> must be included
+ from system.h by defining INCLUDE_MEMORY in any source file using
+ make-unique.h. */
+
+#ifndef INCLUDE_MEMORY
+# error "You must define INCLUDE_MEMORY before including system.h to use make-unique.h"
+#endif
+
+#include <type_traits>
+
+/* Minimal implementation of make_unique for C++11 compatibility
+ (std::make_unique is C++14). */
+
+template<typename T, typename... Args>
+inline typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
+make_unique(Args&&... args)
+{
+ return std::unique_ptr<T> (new T (std::forward<Args> (args)...));
+}
+
+#endif /* ! GCC_MAKE_UNIQUE */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/md5.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/md5.h
new file mode 100644
index 0000000..96607c3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/md5.h
@@ -0,0 +1,160 @@
+/* md5.h - Declaration of functions and data types used for MD5 sum
+ computing library functions.
+ Copyright (C) 1995-2023 Free Software Foundation, Inc.
+ NOTE: The canonical source of this file is maintained with the GNU C
+ Library. Bugs can be reported to bug-glibc@prep.ai.mit.edu.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _MD5_H
+#define _MD5_H 1
+
+#ifdef USE_SYSTEM_MD5
+#include_next <md5.h>
+#else
+
+#include <stdio.h>
+
+#if defined HAVE_LIMITS_H || _LIBC
+# include <limits.h>
+#endif
+
+#include "ansidecl.h"
+
+/* The following contortions are an attempt to use the C preprocessor
+ to determine an unsigned integral type that is 32 bits wide. An
+ alternative approach is to use autoconf's AC_CHECK_SIZEOF macro, but
+ doing that would require that the configure script compile and *run*
+ the resulting executable. Locally running cross-compiled executables
+ is usually not possible. */
+
+#ifdef _LIBC
+# include <sys/types.h>
+typedef u_int32_t md5_uint32;
+typedef uintptr_t md5_uintptr;
+#elif defined (HAVE_SYS_TYPES_H) && defined (HAVE_STDINT_H)
+#include <stdint.h>
+#include <sys/types.h>
+typedef uint32_t md5_uint32;
+typedef uintptr_t md5_uintptr;
+#else
+# define INT_MAX_32_BITS 2147483647
+
+/* If UINT_MAX isn't defined, assume it's a 32-bit type.
+ This should be valid for all systems GNU cares about because
+ that doesn't include 16-bit systems, and only modern systems
+ (that certainly have <limits.h>) have 64+-bit integral types. */
+
+# ifndef INT_MAX
+# define INT_MAX INT_MAX_32_BITS
+# endif
+
+# if INT_MAX == INT_MAX_32_BITS
+ typedef unsigned int md5_uint32;
+# else
+# if SHRT_MAX == INT_MAX_32_BITS
+ typedef unsigned short md5_uint32;
+# else
+# if LONG_MAX == INT_MAX_32_BITS
+ typedef unsigned long md5_uint32;
+# else
+ /* The following line is intended to evoke an error.
+ Using #error is not portable enough. */
+ "Cannot determine unsigned 32-bit data type."
+# endif
+# endif
+# endif
+/* We have to make a guess about the integer type equivalent in size
+ to pointers which should always be correct. */
+typedef unsigned long int md5_uintptr;
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Structure to save state of computation between the single steps. */
+struct md5_ctx
+{
+ md5_uint32 A;
+ md5_uint32 B;
+ md5_uint32 C;
+ md5_uint32 D;
+
+ md5_uint32 total[2];
+ md5_uint32 buflen;
+ char buffer[128] ATTRIBUTE_ALIGNED_ALIGNOF(md5_uint32);
+};
+
+/*
+ * The following three functions are build up the low level used in
+ * the functions `md5_stream' and `md5_buffer'.
+ */
+
+/* Initialize structure containing state of computation.
+ (RFC 1321, 3.3: Step 3) */
+extern void md5_init_ctx (struct md5_ctx *ctx);
+
+/* Starting with the result of former calls of this function (or the
+ initialization function update the context for the next LEN bytes
+ starting at BUFFER.
+ It is necessary that LEN is a multiple of 64!!! */
+extern void md5_process_block (const void *buffer, size_t len,
+ struct md5_ctx *ctx);
+
+/* Starting with the result of former calls of this function (or the
+ initialization function update the context for the next LEN bytes
+ starting at BUFFER.
+ It is NOT required that LEN is a multiple of 64. */
+extern void md5_process_bytes (const void *buffer, size_t len,
+ struct md5_ctx *ctx);
+
+/* Process the remaining bytes in the buffer and put result from CTX
+ in first 16 bytes following RESBUF. The result is always in little
+ endian byte order, so that a byte-wise output yields to the wanted
+ ASCII representation of the message digest.
+
+ IMPORTANT: On some systems it is required that RESBUF is correctly
+ aligned for a 32 bits value. */
+extern void *md5_finish_ctx (struct md5_ctx *ctx, void *resbuf);
+
+
+/* Put result from CTX in first 16 bytes following RESBUF. The result is
+ always in little endian byte order, so that a byte-wise output yields
+ to the wanted ASCII representation of the message digest.
+
+ IMPORTANT: On some systems it is required that RESBUF is correctly
+ aligned for a 32 bits value. */
+extern void *md5_read_ctx (const struct md5_ctx *ctx, void *resbuf);
+
+
+/* Compute MD5 message digest for bytes read from STREAM. The
+ resulting message digest number will be written into the 16 bytes
+ beginning at RESBLOCK. */
+extern int md5_stream (FILE *stream, void *resblock);
+
+/* Compute MD5 message digest for LEN bytes beginning at BUFFER. The
+ result is always in little endian byte order, so that a byte-wise
+ output yields to the wanted ASCII representation of the message
+ digest. */
+extern void *md5_buffer (const char *buffer, size_t len, void *resblock);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // USE_SYSTEM_MD5
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats-traits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats-traits.h
new file mode 100644
index 0000000..0ffa05e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats-traits.h
@@ -0,0 +1,41 @@
+/* A memory statistics traits.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+ Contributed by Martin Liska <mliska@suse.cz>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_MEM_STATS_TRAITS_H
+#define GCC_MEM_STATS_TRAITS_H
+
+/* Memory allocation origin. */
+enum mem_alloc_origin
+{
+ HASH_TABLE_ORIGIN,
+ HASH_MAP_ORIGIN,
+ HASH_SET_ORIGIN,
+ VEC_ORIGIN,
+ BITMAP_ORIGIN,
+ GGC_ORIGIN,
+ ALLOC_POOL_ORIGIN,
+ MEM_ALLOC_ORIGIN_LENGTH
+};
+
+/* Verbose names of the memory allocation origin. */
+static const char * mem_alloc_origin_names[] = { "Hash tables", "Hash maps",
+ "Hash sets", "Heap vectors", "Bitmaps", "GGC memory", "Allocation pool" };
+
+#endif // GCC_MEM_STATS_TRAITS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats.h
new file mode 100644
index 0000000..1ddc256
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mem-stats.h
@@ -0,0 +1,658 @@
+/* A memory statistics tracking infrastructure.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+ Contributed by Martin Liska <mliska@suse.cz>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_MEM_STATS_H
+#define GCC_MEM_STATS_H
+
+/* Forward declaration. */
+template<typename Key, typename Value,
+ typename Traits = simple_hashmap_traits<default_hash_traits<Key>,
+ Value> >
+class hash_map;
+
+#define LOCATION_LINE_EXTRA_SPACE 30
+#define LOCATION_LINE_WIDTH 48
+
+/* Memory allocation location. */
+class mem_location
+{
+public:
+ /* Default constructor. */
+ inline
+ mem_location () {}
+
+ /* Constructor. */
+ inline
+ mem_location (mem_alloc_origin origin, bool ggc,
+ const char *filename = NULL, int line = 0,
+ const char *function = NULL):
+ m_filename (filename), m_function (function), m_line (line), m_origin
+ (origin), m_ggc (ggc) {}
+
+ /* Copy constructor. */
+ inline
+ mem_location (mem_location &other): m_filename (other.m_filename),
+ m_function (other.m_function), m_line (other.m_line),
+ m_origin (other.m_origin), m_ggc (other.m_ggc) {}
+
+ /* Compute hash value based on file name, function name and line in
+ source code. As there is just a single pointer registered for every
+ constant that points to e.g. the same file name, we can use hash
+ of the pointer. */
+ hashval_t
+ hash ()
+ {
+ inchash::hash hash;
+
+ hash.add_ptr (m_filename);
+ hash.add_ptr (m_function);
+ hash.add_int (m_line);
+
+ return hash.end ();
+ }
+
+ /* Return true if the memory location is equal to OTHER. */
+ int
+ equal (const mem_location &other)
+ {
+ return m_filename == other.m_filename && m_function == other.m_function
+ && m_line == other.m_line;
+ }
+
+ /* Return trimmed filename for the location. */
+ inline const char *
+ get_trimmed_filename ()
+ {
+ const char *s1 = m_filename;
+ const char *s2;
+
+ while ((s2 = strstr (s1, "gcc/")))
+ s1 = s2 + 4;
+
+ return s1;
+ }
+
+ inline char *
+ to_string ()
+ {
+ unsigned l = strlen (get_trimmed_filename ()) + strlen (m_function)
+ + LOCATION_LINE_EXTRA_SPACE;
+
+ char *s = XNEWVEC (char, l);
+ sprintf (s, "%s:%i (%s)", get_trimmed_filename (),
+ m_line, m_function);
+
+ s[MIN (LOCATION_LINE_WIDTH, l - 1)] = '\0';
+
+ return s;
+ }
+
+ /* Return display name associated to ORIGIN type. */
+ static const char *
+ get_origin_name (mem_alloc_origin origin)
+ {
+ return mem_alloc_origin_names[(unsigned) origin];
+ }
+
+ /* File name of source code. */
+ const char *m_filename;
+ /* Funcation name. */
+ const char *m_function;
+ /* Line number in source code. */
+ int m_line;
+ /* Origin type. */
+ mem_alloc_origin m_origin;
+ /* Flag if used by GGC allocation. */
+ bool m_ggc;
+};
+
+/* Memory usage register to a memory location. */
+class mem_usage
+{
+public:
+ /* Default constructor. */
+ mem_usage (): m_allocated (0), m_times (0), m_peak (0), m_instances (1) {}
+
+ /* Constructor. */
+ mem_usage (size_t allocated, size_t times, size_t peak, size_t instances = 0):
+ m_allocated (allocated), m_times (times), m_peak (peak),
+ m_instances (instances) {}
+
+ /* Register overhead of SIZE bytes. */
+ inline void
+ register_overhead (size_t size)
+ {
+ m_allocated += size;
+ m_times++;
+
+ if (m_peak < m_allocated)
+ m_peak = m_allocated;
+ }
+
+ /* Release overhead of SIZE bytes. */
+ inline void
+ release_overhead (size_t size)
+ {
+ gcc_assert (size <= m_allocated);
+
+ m_allocated -= size;
+ }
+
+ /* Sum the usage with SECOND usage. */
+ mem_usage
+ operator+ (const mem_usage &second)
+ {
+ return mem_usage (m_allocated + second.m_allocated,
+ m_times + second.m_times,
+ m_peak + second.m_peak,
+ m_instances + second.m_instances);
+ }
+
+ /* Equality operator. */
+ inline bool
+ operator== (const mem_usage &second) const
+ {
+ return (m_allocated == second.m_allocated
+ && m_peak == second.m_peak
+ && m_times == second.m_times);
+ }
+
+ /* Comparison operator. */
+ inline bool
+ operator< (const mem_usage &second) const
+ {
+ if (*this == second)
+ return false;
+
+ return (m_allocated == second.m_allocated ?
+ (m_peak == second.m_peak ? m_times < second.m_times
+ : m_peak < second.m_peak) : m_allocated < second.m_allocated);
+ }
+
+ /* Compare wrapper used by qsort method. */
+ static int
+ compare (const void *first, const void *second)
+ {
+ typedef std::pair<mem_location *, mem_usage *> mem_pair_t;
+
+ const mem_pair_t f = *(const mem_pair_t *)first;
+ const mem_pair_t s = *(const mem_pair_t *)second;
+
+ if (*f.second == *s.second)
+ return 0;
+
+ return *f.second < *s.second ? 1 : -1;
+ }
+
+ /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
+ inline void
+ dump (mem_location *loc, const mem_usage &total) const
+ {
+ char *location_string = loc->to_string ();
+
+ fprintf (stderr, "%-48s " PRsa (9) ":%5.1f%%"
+ PRsa (9) PRsa (9) ":%5.1f%%%10s\n",
+ location_string, SIZE_AMOUNT (m_allocated),
+ get_percent (m_allocated, total.m_allocated),
+ SIZE_AMOUNT (m_peak), SIZE_AMOUNT (m_times),
+ get_percent (m_times, total.m_times), loc->m_ggc ? "ggc" : "heap");
+
+ free (location_string);
+ }
+
+ /* Dump footer. */
+ inline void
+ dump_footer () const
+ {
+ fprintf (stderr, "%s" PRsa (53) PRsa (26) "\n", "Total",
+ SIZE_AMOUNT (m_allocated), SIZE_AMOUNT (m_times));
+ }
+
+ /* Return fraction of NOMINATOR and DENOMINATOR in percent. */
+ static inline float
+ get_percent (size_t nominator, size_t denominator)
+ {
+ return denominator == 0 ? 0.0f : nominator * 100.0 / denominator;
+ }
+
+ /* Print line made of dashes. */
+ static inline void
+ print_dash_line (size_t count = 140)
+ {
+ while (count--)
+ fputc ('-', stderr);
+ fputc ('\n', stderr);
+ }
+
+ /* Dump header with NAME. */
+ static inline void
+ dump_header (const char *name)
+ {
+ fprintf (stderr, "%-48s %11s%16s%10s%17s\n", name, "Leak", "Peak",
+ "Times", "Type");
+ }
+
+ /* Current number of allocated bytes. */
+ size_t m_allocated;
+ /* Number of allocations. */
+ size_t m_times;
+ /* Peak allocation in bytes. */
+ size_t m_peak;
+ /* Number of container instances. */
+ size_t m_instances;
+};
+
+/* Memory usage pair that connectes memory usage and number
+ of allocated bytes. */
+template <class T>
+class mem_usage_pair
+{
+public:
+ mem_usage_pair (T *usage_, size_t allocated_): usage (usage_),
+ allocated (allocated_) {}
+
+ T *usage;
+ size_t allocated;
+};
+
+/* Memory allocation description. */
+template <class T>
+class mem_alloc_description
+{
+public:
+ struct mem_location_hash : nofree_ptr_hash <mem_location>
+ {
+ static hashval_t
+ hash (value_type l)
+ {
+ inchash::hash hstate;
+
+ hstate.add_ptr ((const void *)l->m_filename);
+ hstate.add_ptr (l->m_function);
+ hstate.add_int (l->m_line);
+
+ return hstate.end ();
+ }
+
+ static bool
+ equal (value_type l1, value_type l2)
+ {
+ return (l1->m_filename == l2->m_filename
+ && l1->m_function == l2->m_function
+ && l1->m_line == l2->m_line);
+ }
+ };
+
+ /* Internal class type definitions. */
+ typedef hash_map <mem_location_hash, T *> mem_map_t;
+ typedef hash_map <const void *, mem_usage_pair<T> > reverse_mem_map_t;
+ typedef hash_map <const void *, std::pair<T *, size_t> > reverse_object_map_t;
+ typedef std::pair <mem_location *, T *> mem_list_t;
+
+ /* Default contructor. */
+ mem_alloc_description ();
+
+ /* Default destructor. */
+ ~mem_alloc_description ();
+
+ /* Returns true if instance PTR is registered by the memory description. */
+ bool contains_descriptor_for_instance (const void *ptr);
+
+ /* Return descriptor for instance PTR. */
+ T *get_descriptor_for_instance (const void *ptr);
+
+ /* Register memory allocation descriptor for container PTR which is
+ described by a memory LOCATION. */
+ T *register_descriptor (const void *ptr, mem_location *location);
+
+ /* Register memory allocation descriptor for container PTR. ORIGIN identifies
+ type of container and GGC identifes if the allocation is handled in GGC
+ memory. Each location is identified by file NAME, LINE in source code and
+ FUNCTION name. */
+ T *register_descriptor (const void *ptr, mem_alloc_origin origin,
+ bool ggc, const char *name, int line,
+ const char *function);
+
+ /* Register instance overhead identified by PTR pointer. Allocation takes
+ SIZE bytes. */
+ T *register_instance_overhead (size_t size, const void *ptr);
+
+ /* For containers (and GGC) where we want to track every instance object,
+ we register allocation of SIZE bytes, identified by PTR pointer, belonging
+ to USAGE descriptor. */
+ void register_object_overhead (T *usage, size_t size, const void *ptr);
+
+ /* Release PTR pointer of SIZE bytes. If REMOVE_FROM_MAP is set to true,
+ remove the instance from reverse map. Return memory usage that belongs
+ to this memory description. */
+ T *release_instance_overhead (void *ptr, size_t size,
+ bool remove_from_map = false);
+
+ /* Release instance object identified by PTR pointer. */
+ void release_object_overhead (void *ptr);
+
+ /* Unregister a memory allocation descriptor registered with
+ register_descriptor (remove from reverse map), unless it is
+ unregistered through release_instance_overhead with
+ REMOVE_FROM_MAP = true. */
+ void unregister_descriptor (void *ptr);
+
+ /* Get sum value for ORIGIN type of allocation for the descriptor. */
+ T get_sum (mem_alloc_origin origin);
+
+ /* Get all tracked instances registered by the description. Items
+ are filtered by ORIGIN type, LENGTH is return value where we register
+ the number of elements in the list. If we want to process custom order,
+ CMP comparator can be provided. */
+ mem_list_t *get_list (mem_alloc_origin origin, unsigned *length);
+
+ /* Dump all tracked instances of type ORIGIN. If we want to process custom
+ order, CMP comparator can be provided. */
+ void dump (mem_alloc_origin origin);
+
+ /* Reverse object map used for every object allocation mapping. */
+ reverse_object_map_t *m_reverse_object_map;
+
+private:
+ /* Register overhead of SIZE bytes of ORIGIN type. PTR pointer is allocated
+ in NAME source file, at LINE in source code, in FUNCTION. */
+ T *register_overhead (size_t size, mem_alloc_origin origin, const char *name,
+ int line, const char *function, const void *ptr);
+
+ /* Allocation location coupled to the description. */
+ mem_location m_location;
+
+ /* Location to usage mapping. */
+ mem_map_t *m_map;
+
+ /* Reverse pointer to usage mapping. */
+ reverse_mem_map_t *m_reverse_map;
+};
+
+/* Returns true if instance PTR is registered by the memory description. */
+
+template <class T>
+inline bool
+mem_alloc_description<T>::contains_descriptor_for_instance (const void *ptr)
+{
+ return m_reverse_map->get (ptr);
+}
+
+/* Return descriptor for instance PTR. */
+
+template <class T>
+inline T*
+mem_alloc_description<T>::get_descriptor_for_instance (const void *ptr)
+{
+ return m_reverse_map->get (ptr) ? (*m_reverse_map->get (ptr)).usage : NULL;
+}
+
+/* Register memory allocation descriptor for container PTR which is
+ described by a memory LOCATION. */
+
+template <class T>
+inline T*
+mem_alloc_description<T>::register_descriptor (const void *ptr,
+ mem_location *location)
+{
+ T *usage = NULL;
+
+ T **slot = m_map->get (location);
+ if (slot)
+ {
+ delete location;
+ usage = *slot;
+ usage->m_instances++;
+ }
+ else
+ {
+ usage = new T ();
+ m_map->put (location, usage);
+ }
+
+ if (!m_reverse_map->get (ptr))
+ m_reverse_map->put (ptr, mem_usage_pair<T> (usage, 0));
+
+ return usage;
+}
+
+/* Register memory allocation descriptor for container PTR. ORIGIN identifies
+ type of container and GGC identifes if the allocation is handled in GGC
+ memory. Each location is identified by file NAME, LINE in source code and
+ FUNCTION name. */
+
+template <class T>
+inline T*
+mem_alloc_description<T>::register_descriptor (const void *ptr,
+ mem_alloc_origin origin,
+ bool ggc,
+ const char *filename,
+ int line,
+ const char *function)
+{
+ mem_location *l = new mem_location (origin, ggc, filename, line, function);
+ return register_descriptor (ptr, l);
+}
+
+/* Register instance overhead identified by PTR pointer. Allocation takes
+ SIZE bytes. */
+
+template <class T>
+inline T*
+mem_alloc_description<T>::register_instance_overhead (size_t size,
+ const void *ptr)
+{
+ mem_usage_pair <T> *slot = m_reverse_map->get (ptr);
+ if (!slot)
+ {
+ /* Due to PCH, it can really happen. */
+ return NULL;
+ }
+
+ T *usage = (*slot).usage;
+ usage->register_overhead (size);
+
+ return usage;
+}
+
+/* For containers (and GGC) where we want to track every instance object,
+ we register allocation of SIZE bytes, identified by PTR pointer, belonging
+ to USAGE descriptor. */
+
+template <class T>
+void
+mem_alloc_description<T>::register_object_overhead (T *usage, size_t size,
+ const void *ptr)
+{
+ /* In case of GGC, it is possible to have already occupied the memory
+ location. */
+ m_reverse_object_map->put (ptr, std::pair<T *, size_t> (usage, size));
+}
+
+/* Register overhead of SIZE bytes of ORIGIN type. PTR pointer is allocated
+ in NAME source file, at LINE in source code, in FUNCTION. */
+
+template <class T>
+inline T*
+mem_alloc_description<T>::register_overhead (size_t size,
+ mem_alloc_origin origin,
+ const char *filename,
+ int line,
+ const char *function,
+ const void *ptr)
+{
+ T *usage = register_descriptor (ptr, origin, filename, line, function);
+ usage->register_overhead (size);
+
+ return usage;
+}
+
+/* Release PTR pointer of SIZE bytes. */
+
+template <class T>
+inline T *
+mem_alloc_description<T>::release_instance_overhead (void *ptr, size_t size,
+ bool remove_from_map)
+{
+ mem_usage_pair<T> *slot = m_reverse_map->get (ptr);
+
+ if (!slot)
+ {
+ /* Due to PCH, it can really happen. */
+ return NULL;
+ }
+
+ T *usage = (*slot).usage;
+ usage->release_overhead (size);
+
+ if (remove_from_map)
+ m_reverse_map->remove (ptr);
+
+ return usage;
+}
+
+/* Release instance object identified by PTR pointer. */
+
+template <class T>
+inline void
+mem_alloc_description<T>::release_object_overhead (void *ptr)
+{
+ std::pair <T *, size_t> *entry = m_reverse_object_map->get (ptr);
+ entry->first->release_overhead (entry->second);
+ m_reverse_object_map->remove (ptr);
+}
+
+/* Unregister a memory allocation descriptor registered with
+ register_descriptor (remove from reverse map), unless it is
+ unregistered through release_instance_overhead with
+ REMOVE_FROM_MAP = true. */
+template <class T>
+inline void
+mem_alloc_description<T>::unregister_descriptor (void *ptr)
+{
+ m_reverse_map->remove (ptr);
+}
+
+/* Default contructor. */
+
+template <class T>
+inline
+mem_alloc_description<T>::mem_alloc_description ()
+{
+ m_map = new mem_map_t (13, false, false, false);
+ m_reverse_map = new reverse_mem_map_t (13, false, false, false);
+ m_reverse_object_map = new reverse_object_map_t (13, false, false, false);
+}
+
+/* Default destructor. */
+
+template <class T>
+inline
+mem_alloc_description<T>::~mem_alloc_description ()
+{
+ for (typename mem_map_t::iterator it = m_map->begin (); it != m_map->end ();
+ ++it)
+ {
+ delete (*it).first;
+ delete (*it).second;
+ }
+
+ delete m_map;
+ delete m_reverse_map;
+ delete m_reverse_object_map;
+}
+
+/* Get all tracked instances registered by the description. Items are filtered
+ by ORIGIN type, LENGTH is return value where we register the number of
+ elements in the list. If we want to process custom order, CMP comparator
+ can be provided. */
+
+template <class T>
+inline
+typename mem_alloc_description<T>::mem_list_t *
+mem_alloc_description<T>::get_list (mem_alloc_origin origin, unsigned *length)
+{
+ /* vec data structure is not used because all vectors generate memory
+ allocation info a it would create a cycle. */
+ size_t element_size = sizeof (mem_list_t);
+ mem_list_t *list = XCNEWVEC (mem_list_t, m_map->elements ());
+ unsigned i = 0;
+
+ for (typename mem_map_t::iterator it = m_map->begin (); it != m_map->end ();
+ ++it)
+ if ((*it).first->m_origin == origin)
+ list[i++] = std::pair<mem_location*, T*> (*it);
+
+ qsort (list, i, element_size, T::compare);
+ *length = i;
+
+ return list;
+}
+
+/* Get sum value for ORIGIN type of allocation for the descriptor. */
+
+template <class T>
+inline T
+mem_alloc_description<T>::get_sum (mem_alloc_origin origin)
+{
+ unsigned length;
+ mem_list_t *list = get_list (origin, &length);
+ T sum;
+
+ for (unsigned i = 0; i < length; i++)
+ sum = sum + *list[i].second;
+
+ XDELETEVEC (list);
+
+ return sum;
+}
+
+/* Dump all tracked instances of type ORIGIN. If we want to process custom
+ order, CMP comparator can be provided. */
+
+template <class T>
+inline void
+mem_alloc_description<T>::dump (mem_alloc_origin origin)
+{
+ unsigned length;
+
+ fprintf (stderr, "\n");
+
+ mem_list_t *list = get_list (origin, &length);
+ T total = get_sum (origin);
+
+ T::print_dash_line ();
+ T::dump_header (mem_location::get_origin_name (origin));
+ T::print_dash_line ();
+ for (int i = length - 1; i >= 0; i--)
+ list[i].second->dump (list[i].first, total);
+ T::print_dash_line ();
+
+ T::dump_header (mem_location::get_origin_name (origin));
+ T::print_dash_line ();
+ total.dump_footer ();
+ T::print_dash_line ();
+
+ XDELETEVEC (list);
+
+ fprintf (stderr, "\n");
+}
+
+#endif // GCC_MEM_STATS_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/memmodel.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/memmodel.h
new file mode 100644
index 0000000..7dfad2f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/memmodel.h
@@ -0,0 +1,116 @@
+/* Prototypes of memory model helper functions.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_MEMMODEL_H
+#define GCC_MEMMODEL_H
+
+/* Suppose that higher bits are target dependent. */
+#define MEMMODEL_MASK ((1<<16)-1)
+
+/* Legacy sync operations set this upper flag in the memory model. This allows
+ targets that need to do something stronger for sync operations to
+ differentiate with their target patterns and issue a more appropriate insn
+ sequence. See bugzilla 65697 for background. */
+#define MEMMODEL_SYNC (1<<15)
+
+/* Memory model without SYNC bit for targets/operations that do not care. */
+#define MEMMODEL_BASE_MASK (MEMMODEL_SYNC-1)
+
+/* Memory model types for the __atomic* builtins.
+ This must match the order in libstdc++-v3/include/bits/atomic_base.h. */
+enum memmodel
+{
+ MEMMODEL_RELAXED = 0,
+ MEMMODEL_CONSUME = 1,
+ MEMMODEL_ACQUIRE = 2,
+ MEMMODEL_RELEASE = 3,
+ MEMMODEL_ACQ_REL = 4,
+ MEMMODEL_SEQ_CST = 5,
+ MEMMODEL_LAST = 6,
+ MEMMODEL_SYNC_ACQUIRE = MEMMODEL_ACQUIRE | MEMMODEL_SYNC,
+ MEMMODEL_SYNC_RELEASE = MEMMODEL_RELEASE | MEMMODEL_SYNC,
+ MEMMODEL_SYNC_SEQ_CST = MEMMODEL_SEQ_CST | MEMMODEL_SYNC,
+ /* Say that all the higher bits are valid target extensions. */
+ MEMMODEL_MAX = INTTYPE_MAXIMUM (int)
+};
+
+/* Return the memory model from a host integer. */
+inline enum memmodel
+memmodel_from_int (unsigned HOST_WIDE_INT val)
+{
+ return (enum memmodel) (val & MEMMODEL_MASK);
+}
+
+/* Return the base memory model from a host integer. */
+inline enum memmodel
+memmodel_base (unsigned HOST_WIDE_INT val)
+{
+ return (enum memmodel) (val & MEMMODEL_BASE_MASK);
+}
+
+/* Return TRUE if the memory model is RELAXED. */
+inline bool
+is_mm_relaxed (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELAXED;
+}
+
+/* Return TRUE if the memory model is CONSUME. */
+inline bool
+is_mm_consume (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_CONSUME;
+}
+
+/* Return TRUE if the memory model is ACQUIRE. */
+inline bool
+is_mm_acquire (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQUIRE;
+}
+
+/* Return TRUE if the memory model is RELEASE. */
+inline bool
+is_mm_release (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELEASE;
+}
+
+/* Return TRUE if the memory model is ACQ_REL. */
+inline bool
+is_mm_acq_rel (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQ_REL;
+}
+
+/* Return TRUE if the memory model is SEQ_CST. */
+inline bool
+is_mm_seq_cst (enum memmodel model)
+{
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_SEQ_CST;
+}
+
+/* Return TRUE if the memory model is a SYNC variant. */
+inline bool
+is_mm_sync (enum memmodel model)
+{
+ return (model & MEMMODEL_SYNC);
+}
+
+#endif /* GCC_MEMMODEL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/memory-block.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/memory-block.h
new file mode 100644
index 0000000..16d5d25
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/memory-block.h
@@ -0,0 +1,84 @@
+/* Shared pool of memory blocks for pool allocators.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef MEMORY_BLOCK_H
+#define MEMORY_BLOCK_H
+
+/* Shared pool which allows other memory pools to reuse each others' allocated
+ memory blocks instead of calling free/malloc again. */
+class memory_block_pool
+{
+public:
+ /* Blocks have fixed size. This is necessary for sharing. */
+ static const size_t block_size = 64 * 1024;
+ /* Number of blocks we keep in the freelists. */
+ static const size_t freelist_size = 1024 * 1024 / block_size;
+
+ memory_block_pool ();
+
+ static inline void *allocate () ATTRIBUTE_MALLOC;
+ static inline void release (void *);
+ static void trim (int nblocks = freelist_size);
+ void reduce_free_list (int);
+
+private:
+ /* memory_block_pool singleton instance, defined in memory-block.cc. */
+ static memory_block_pool instance;
+
+ struct block_list
+ {
+ block_list *m_next;
+ };
+
+ /* Free list. */
+ block_list *m_blocks;
+};
+
+/* Allocate a single block. Reuse a previously returned block, if possible. */
+inline void *
+memory_block_pool::allocate ()
+{
+ if (instance.m_blocks == NULL)
+ return XNEWVEC (char, block_size);
+
+ void *result = instance.m_blocks;
+ instance.m_blocks = instance.m_blocks->m_next;
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, block_size));
+ return result;
+}
+
+/* Return UNCAST_BLOCK to the pool. */
+inline void
+memory_block_pool::release (void *uncast_block)
+{
+ block_list *block = new (uncast_block) block_list;
+ block->m_next = instance.m_blocks;
+ instance.m_blocks = block;
+
+ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)uncast_block
+ + sizeof (block_list),
+ block_size
+ - sizeof (block_list)));
+}
+
+extern void *mempool_obstack_chunk_alloc (size_t) ATTRIBUTE_MALLOC;
+extern void mempool_obstack_chunk_free (void *);
+
+#endif /* MEMORY_BLOCK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mode-classes.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mode-classes.def
new file mode 100644
index 0000000..de42d7e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mode-classes.def
@@ -0,0 +1,40 @@
+/* Machine mode class definitions for GCC.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define MODE_CLASSES \
+ DEF_MODE_CLASS (MODE_RANDOM), /* other */ \
+ DEF_MODE_CLASS (MODE_CC), /* condition code in a register */ \
+ DEF_MODE_CLASS (MODE_INT), /* integer */ \
+ DEF_MODE_CLASS (MODE_PARTIAL_INT), /* integer with padding bits */ \
+ DEF_MODE_CLASS (MODE_FRACT), /* signed fractional number */ \
+ DEF_MODE_CLASS (MODE_UFRACT), /* unsigned fractional number */ \
+ DEF_MODE_CLASS (MODE_ACCUM), /* signed accumulator */ \
+ DEF_MODE_CLASS (MODE_UACCUM), /* unsigned accumulator */ \
+ DEF_MODE_CLASS (MODE_FLOAT), /* floating point */ \
+ DEF_MODE_CLASS (MODE_DECIMAL_FLOAT), /* decimal floating point */ \
+ DEF_MODE_CLASS (MODE_COMPLEX_INT), /* complex numbers */ \
+ DEF_MODE_CLASS (MODE_COMPLEX_FLOAT), \
+ DEF_MODE_CLASS (MODE_VECTOR_BOOL), /* vectors of single bits */ \
+ DEF_MODE_CLASS (MODE_VECTOR_INT), /* SIMD vectors */ \
+ DEF_MODE_CLASS (MODE_VECTOR_FRACT), /* SIMD vectors */ \
+ DEF_MODE_CLASS (MODE_VECTOR_UFRACT), /* SIMD vectors */ \
+ DEF_MODE_CLASS (MODE_VECTOR_ACCUM), /* SIMD vectors */ \
+ DEF_MODE_CLASS (MODE_VECTOR_UACCUM), /* SIMD vectors */ \
+ DEF_MODE_CLASS (MODE_VECTOR_FLOAT), \
+ DEF_MODE_CLASS (MODE_OPAQUE) /* opaque modes */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mux-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mux-utils.h
new file mode 100644
index 0000000..1023540
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/mux-utils.h
@@ -0,0 +1,251 @@
+// Multiplexer utilities
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef GCC_MUX_UTILS_H
+#define GCC_MUX_UTILS_H 1
+
+// A class that stores a choice "A or B", where A has type T1 * and B has
+// type T2 *. Both T1 and T2 must have an alignment greater than 1, since
+// the low bit is used to identify B over A. T1 and T2 can be the same.
+//
+// A can be a null pointer but B cannot.
+//
+// Barring the requirement that B must be nonnull, using the class is
+// equivalent to using:
+//
+// union { T1 *A; T2 *B; };
+//
+// and having a separate tag bit to indicate which alternative is active.
+// However, using this class can have two advantages over a union:
+//
+// - It avoides the need to find somewhere to store the tag bit.
+//
+// - The compiler is aware that B cannot be null, which can make checks
+// of the form:
+//
+// if (auto *B = mux.dyn_cast<T2 *> ())
+//
+// more efficient. With a union-based representation, the dyn_cast
+// check could fail either because MUX is an A or because MUX is a
+// null B, both of which require a run-time test. With a pointer_mux,
+// only a check for MUX being A is needed.
+template<typename T1, typename T2 = T1>
+class pointer_mux
+{
+public:
+ // Return an A pointer with the given value.
+ static pointer_mux first (T1 *);
+
+ // Return a B pointer with the given (nonnull) value.
+ static pointer_mux second (T2 *);
+
+ pointer_mux () = default;
+
+ // Create a null A pointer.
+ pointer_mux (std::nullptr_t) : m_ptr (nullptr) {}
+
+ // Create an A or B pointer with the given value. This is only valid
+ // if T1 and T2 are distinct and if T can be resolved to exactly one
+ // of them.
+ template<typename T,
+ typename Enable = typename
+ std::enable_if<std::is_convertible<T *, T1 *>::value
+ != std::is_convertible<T *, T2 *>::value>::type>
+ pointer_mux (T *ptr);
+
+ // Return true unless the pointer is a null A pointer.
+ explicit operator bool () const { return m_ptr; }
+
+ // Assign A and B pointers respectively.
+ void set_first (T1 *ptr) { *this = first (ptr); }
+ void set_second (T2 *ptr) { *this = second (ptr); }
+
+ // Return true if the pointer is an A pointer.
+ bool is_first () const { return !(uintptr_t (m_ptr) & 1); }
+
+ // Return true if the pointer is a B pointer.
+ bool is_second () const { return uintptr_t (m_ptr) & 1; }
+
+ // Return the contents of the pointer, given that it is known to be
+ // an A pointer.
+ T1 *known_first () const { return reinterpret_cast<T1 *> (m_ptr); }
+
+ // Return the contents of the pointer, given that it is known to be
+ // a B pointer.
+ T2 *known_second () const { return reinterpret_cast<T2 *> (m_ptr - 1); }
+
+ // If the pointer is an A pointer, return its contents, otherwise
+ // return null. Thus a null return can mean that the pointer is
+ // either a null A pointer or a B pointer.
+ //
+ // If all A pointers are nonnull, it is more efficient to use:
+ //
+ // if (ptr.is_first ())
+ // ...use ptr.known_first ()...
+ //
+ // over:
+ //
+ // if (T1 *a = ptr.first_or_null ())
+ // ...use a...
+ T1 *first_or_null () const;
+
+ // If the pointer is a B pointer, return its contents, otherwise
+ // return null. Using:
+ //
+ // if (T1 *b = ptr.second_or_null ())
+ // ...use b...
+ //
+ // should be at least as efficient as:
+ //
+ // if (ptr.is_second ())
+ // ...use ptr.known_second ()...
+ T2 *second_or_null () const;
+
+ // Return true if the pointer is a T.
+ //
+ // This is only valid if T1 and T2 are distinct and if T can be
+ // resolved to exactly one of them. The condition is checked using
+ // a static assertion rather than SFINAE because it gives a clearer
+ // error message.
+ template<typename T>
+ bool is_a () const;
+
+ // Assert that the pointer is a T and return it as such. See is_a
+ // for the restrictions on T.
+ template<typename T>
+ T as_a () const;
+
+ // If the pointer is a T, return it as such, otherwise return null.
+ // See is_a for the restrictions on T.
+ template<typename T>
+ T dyn_cast () const;
+
+private:
+ pointer_mux (char *ptr) : m_ptr (ptr) {}
+
+ // Points to the first byte of an object for A pointers or the second
+ // byte of an object for B pointers. Using a pointer rather than a
+ // uintptr_t tells the compiler that second () can never return null,
+ // and that second_or_null () is only null if is_first ().
+ char *m_ptr;
+};
+
+template<typename T1, typename T2>
+inline pointer_mux<T1, T2>
+pointer_mux<T1, T2>::first (T1 *ptr)
+{
+ gcc_checking_assert (!(uintptr_t (ptr) & 1));
+ return reinterpret_cast<char *> (ptr);
+}
+
+template<typename T1, typename T2>
+inline pointer_mux<T1, T2>
+pointer_mux<T1, T2>::second (T2 *ptr)
+{
+ gcc_checking_assert (ptr && !(uintptr_t (ptr) & 1));
+ return reinterpret_cast<char *> (ptr) + 1;
+}
+
+template<typename T1, typename T2>
+template<typename T, typename Enable>
+inline pointer_mux<T1, T2>::pointer_mux (T *ptr)
+ : m_ptr (reinterpret_cast<char *> (ptr))
+{
+ if (std::is_convertible<T *, T2 *>::value)
+ {
+ gcc_checking_assert (m_ptr);
+ m_ptr += 1;
+ }
+}
+
+template<typename T1, typename T2>
+inline T1 *
+pointer_mux<T1, T2>::first_or_null () const
+{
+ return is_first () ? known_first () : nullptr;
+}
+
+template<typename T1, typename T2>
+inline T2 *
+pointer_mux<T1, T2>::second_or_null () const
+{
+ // Micro optimization that's effective as of GCC 11: compute the value
+ // of the second pointer as an integer and test that, so that the integer
+ // result can be reused as the pointer and so that all computation can
+ // happen before a branch on null. This reduces the number of branches
+ // needed for loops.
+ return (uintptr_t (m_ptr) - 1) & 1 ? nullptr : known_second ();
+}
+
+template<typename T1, typename T2>
+template<typename T>
+inline bool
+pointer_mux<T1, T2>::is_a () const
+{
+ static_assert (std::is_convertible<T1 *, T>::value
+ != std::is_convertible<T2 *, T>::value,
+ "Ambiguous pointer type");
+ if (std::is_convertible<T2 *, T>::value)
+ return is_second ();
+ else
+ return is_first ();
+}
+
+template<typename T1, typename T2>
+template<typename T>
+inline T
+pointer_mux<T1, T2>::as_a () const
+{
+ static_assert (std::is_convertible<T1 *, T>::value
+ != std::is_convertible<T2 *, T>::value,
+ "Ambiguous pointer type");
+ if (std::is_convertible<T2 *, T>::value)
+ {
+ gcc_checking_assert (is_second ());
+ return reinterpret_cast<T> (m_ptr - 1);
+ }
+ else
+ {
+ gcc_checking_assert (is_first ());
+ return reinterpret_cast<T> (m_ptr);
+ }
+}
+
+template<typename T1, typename T2>
+template<typename T>
+inline T
+pointer_mux<T1, T2>::dyn_cast () const
+{
+ static_assert (std::is_convertible<T1 *, T>::value
+ != std::is_convertible<T2 *, T>::value,
+ "Ambiguous pointer type");
+ if (std::is_convertible<T2 *, T>::value)
+ {
+ if (is_second ())
+ return reinterpret_cast<T> (m_ptr - 1);
+ }
+ else
+ {
+ if (is_first ())
+ return reinterpret_cast<T> (m_ptr);
+ }
+ return nullptr;
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/objc/objc-tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/objc/objc-tree.def
new file mode 100644
index 0000000..2c6a135
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/objc/objc-tree.def
@@ -0,0 +1,76 @@
+/* This file contains the definitions and documentation for the
+ additional tree codes used in the Objective C front end (see tree.def
+ for the standard codes).
+ Copyright (C) 1990-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Objective-C types. */
+DEFTREECODE (CLASS_INTERFACE_TYPE, "class_interface_type", tcc_type, 0)
+DEFTREECODE (CLASS_IMPLEMENTATION_TYPE, "class_implementation_type",
+ tcc_type, 0)
+DEFTREECODE (CATEGORY_INTERFACE_TYPE, "category_interface_type", tcc_type, 0)
+DEFTREECODE (CATEGORY_IMPLEMENTATION_TYPE,"category_implementation_type",
+ tcc_type, 0)
+DEFTREECODE (PROTOCOL_INTERFACE_TYPE, "protocol_interface_type", tcc_type, 0)
+
+/* Objective-C decls. */
+DEFTREECODE (KEYWORD_DECL, "keyword_decl", tcc_declaration, 0)
+DEFTREECODE (INSTANCE_METHOD_DECL, "instance_method_decl", tcc_declaration, 0)
+DEFTREECODE (CLASS_METHOD_DECL, "class_method_decl", tcc_declaration, 0)
+DEFTREECODE (PROPERTY_DECL, "property_decl", tcc_declaration, 0)
+
+/* Objective-C expressions. */
+DEFTREECODE (MESSAGE_SEND_EXPR, "message_send_expr", tcc_expression, 3)
+DEFTREECODE (CLASS_REFERENCE_EXPR, "class_reference_expr", tcc_expression, 1)
+
+/* This tree is used to represent the expression 'object.property',
+ where 'object' is an Objective-C object and 'property' is an
+ Objective-C property. Operand 0 is the object (the tree
+ representing the expression), and Operand 1 is the property (the
+ PROPERTY_DECL). Operand 2 is the 'getter' call, ready to be used;
+ we pregenerate it because it is hard to generate it properly later
+ on. Operand 3 records whether using the 'getter' call should
+ generate a deprecation warning or not.
+
+ A PROPERTY_REF tree needs to be transformed into 'setter' and
+ 'getter' calls at some point; at the moment this happens in two
+ places:
+
+ * if we detect that a modify expression is being applied to a
+ PROPERTY_REF, then we transform that into a 'getter' call (this
+ happens in build_modify_expr() or cp_build_modify_expr()).
+
+ * else, it will remain as a PROPERTY_REF until we get to
+ gimplification; at that point, we convert each PROPERTY_REF into
+ a 'getter' call during ObjC/ObjC++ gimplify. At that point, it
+ is quite hard to build a 'getter' call, but we have already built
+ it and we just need to swap Operand 2 in, and emit the deprecation
+ warnings from Operand 3 if needed.
+
+ Please note that when the Objective-C 2.0 "dot-syntax" 'object.component'
+ is encountered, where 'component' is not a property but there are valid
+ setter/getter methods for it, an artificial PROPERTY_DECL is generated
+ and used in the PROPERTY_REF. */
+DEFTREECODE (PROPERTY_REF, "property_ref", tcc_expression, 4)
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack-utils.h
new file mode 100644
index 0000000..fde2211
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack-utils.h
@@ -0,0 +1,86 @@
+// Obstack-related utilities.
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef GCC_OBSTACK_UTILS_H
+#define GCC_OBSTACK_UTILS_H
+
+// This RAII class automatically frees memory allocated on an obstack,
+// unless told not to via keep (). It automatically converts to an
+// obstack, so it can (optionally) be used in place of the obstack
+// to make the scoping clearer. For example:
+//
+// obstack_watermark watermark (ob);
+// auto *ptr1 = XOBNEW (watermark, struct1);
+// if (...)
+// // Frees ptr1.
+// return false;
+//
+// auto *ptr2 = XOBNEW (watermark, struct2);
+// if (...)
+// // Frees ptr1 and ptr2.
+// return false;
+//
+// // Retains ptr1 and ptr2.
+// watermark.keep ();
+//
+// auto *ptr3 = XOBNEW (watermark, struct3);
+// if (...)
+// // Frees ptr3.
+// return false;
+//
+// // Retains ptr3 (in addition to ptr1 and ptr2 above).
+// watermark.keep ();
+// return true;
+//
+// The move constructor makes it possible to transfer ownership to a caller:
+//
+// obstack_watermark
+// foo ()
+// {
+// obstack_watermark watermark (ob);
+// ...
+// return watermark;
+// }
+//
+// void
+// bar ()
+// {
+// // Inherit ownership of everything that foo allocated.
+// obstack_watermark watermark = foo ();
+// ...
+// }
+class obstack_watermark
+{
+public:
+ obstack_watermark (obstack *ob) : m_obstack (ob) { keep (); }
+ constexpr obstack_watermark (obstack_watermark &&) = default;
+ ~obstack_watermark () { obstack_free (m_obstack, m_start); }
+
+ operator obstack *() const { return m_obstack; }
+ void keep () { m_start = XOBNEWVAR (m_obstack, char, 0); }
+
+private:
+ DISABLE_COPY_AND_ASSIGN (obstack_watermark);
+
+protected:
+ obstack *m_obstack;
+ char *m_start;
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack.h
new file mode 100644
index 0000000..c05718f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/obstack.h
@@ -0,0 +1,535 @@
+/* obstack.h - object stack macros
+ Copyright (C) 1988-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Summary:
+
+ All the apparent functions defined here are macros. The idea
+ is that you would use these pre-tested macros to solve a
+ very specific set of problems, and they would run fast.
+ Caution: no side-effects in arguments please!! They may be
+ evaluated MANY times!!
+
+ These macros operate a stack of objects. Each object starts life
+ small, and may grow to maturity. (Consider building a word syllable
+ by syllable.) An object can move while it is growing. Once it has
+ been "finished" it never changes address again. So the "top of the
+ stack" is typically an immature growing object, while the rest of the
+ stack is of mature, fixed size and fixed address objects.
+
+ These routines grab large chunks of memory, using a function you
+ supply, called 'obstack_chunk_alloc'. On occasion, they free chunks,
+ by calling 'obstack_chunk_free'. You must define them and declare
+ them before using any obstack macros.
+
+ Each independent stack is represented by a 'struct obstack'.
+ Each of the obstack macros expects a pointer to such a structure
+ as the first argument.
+
+ One motivation for this package is the problem of growing char strings
+ in symbol tables. Unless you are "fascist pig with a read-only mind"
+ --Gosper's immortal quote from HAKMEM item 154, out of context--you
+ would not like to put any arbitrary upper limit on the length of your
+ symbols.
+
+ In practice this often means you will build many short symbols and a
+ few long symbols. At the time you are reading a symbol you don't know
+ how long it is. One traditional method is to read a symbol into a
+ buffer, realloc()ating the buffer every time you try to read a symbol
+ that is longer than the buffer. This is beaut, but you still will
+ want to copy the symbol from the buffer to a more permanent
+ symbol-table entry say about half the time.
+
+ With obstacks, you can work differently. Use one obstack for all symbol
+ names. As you read a symbol, grow the name in the obstack gradually.
+ When the name is complete, finalize it. Then, if the symbol exists already,
+ free the newly read name.
+
+ The way we do this is to take a large chunk, allocating memory from
+ low addresses. When you want to build a symbol in the chunk you just
+ add chars above the current "high water mark" in the chunk. When you
+ have finished adding chars, because you got to the end of the symbol,
+ you know how long the chars are, and you can create a new object.
+ Mostly the chars will not burst over the highest address of the chunk,
+ because you would typically expect a chunk to be (say) 100 times as
+ long as an average object.
+
+ In case that isn't clear, when we have enough chars to make up
+ the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
+ so we just point to it where it lies. No moving of chars is
+ needed and this is the second win: potentially long strings need
+ never be explicitly shuffled. Once an object is formed, it does not
+ change its address during its lifetime.
+
+ When the chars burst over a chunk boundary, we allocate a larger
+ chunk, and then copy the partly formed object from the end of the old
+ chunk to the beginning of the new larger chunk. We then carry on
+ accreting characters to the end of the object as we normally would.
+
+ A special macro is provided to add a single char at a time to a
+ growing object. This allows the use of register variables, which
+ break the ordinary 'growth' macro.
+
+ Summary:
+ We allocate large chunks.
+ We carve out one object at a time from the current chunk.
+ Once carved, an object never moves.
+ We are free to append data of any size to the currently
+ growing object.
+ Exactly one object is growing in an obstack at any one time.
+ You can run one obstack per control block.
+ You may have as many control blocks as you dare.
+ Because of the way we do it, you can "unwind" an obstack
+ back to a previous state. (You may remove objects much
+ as you would with a stack.)
+ */
+
+
+/* Don't do the contents of this file more than once. */
+
+#ifndef _OBSTACK_H
+#define _OBSTACK_H 1
+
+#ifndef _OBSTACK_INTERFACE_VERSION
+# define _OBSTACK_INTERFACE_VERSION 2
+#endif
+
+#include <stddef.h> /* For size_t and ptrdiff_t. */
+#include <string.h> /* For __GNU_LIBRARY__, and memcpy. */
+
+#if _OBSTACK_INTERFACE_VERSION == 1
+/* For binary compatibility with obstack version 1, which used "int"
+ and "long" for these two types. */
+# define _OBSTACK_SIZE_T unsigned int
+# define _CHUNK_SIZE_T unsigned long
+# define _OBSTACK_CAST(type, expr) ((type) (expr))
+#else
+/* Version 2 with sane types, especially for 64-bit hosts. */
+# define _OBSTACK_SIZE_T size_t
+# define _CHUNK_SIZE_T size_t
+# define _OBSTACK_CAST(type, expr) (expr)
+#endif
+
+/* If B is the base of an object addressed by P, return the result of
+ aligning P to the next multiple of A + 1. B and P must be of type
+ char *. A + 1 must be a power of 2. */
+
+#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A)))
+
+/* Similar to __BPTR_ALIGN (B, P, A), except optimize the common case
+ where pointers can be converted to integers, aligned as integers,
+ and converted back again. If ptrdiff_t is narrower than a
+ pointer (e.g., the AS/400), play it safe and compute the alignment
+ relative to B. Otherwise, use the faster strategy of computing the
+ alignment relative to 0. */
+
+#define __PTR_ALIGN(B, P, A) \
+ (sizeof (ptrdiff_t) < sizeof (void *) ? __BPTR_ALIGN (B, P, A) \
+ : (char *) (((ptrdiff_t) (P) + (A)) & ~(A)))
+
+#ifndef __attribute_pure__
+# if defined __GNUC_MINOR__ && __GNUC__ * 1000 + __GNUC_MINOR__ >= 2096
+# define __attribute_pure__ __attribute__ ((__pure__))
+# else
+# define __attribute_pure__
+# endif
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct _obstack_chunk /* Lives at front of each chunk. */
+{
+ char *limit; /* 1 past end of this chunk */
+ struct _obstack_chunk *prev; /* address of prior chunk or NULL */
+ char contents[4]; /* objects begin here */
+};
+
+struct obstack /* control current object in current chunk */
+{
+ _CHUNK_SIZE_T chunk_size; /* preferred size to allocate chunks in */
+ struct _obstack_chunk *chunk; /* address of current struct obstack_chunk */
+ char *object_base; /* address of object we are building */
+ char *next_free; /* where to add next char to current object */
+ char *chunk_limit; /* address of char after current chunk */
+ union
+ {
+ _OBSTACK_SIZE_T i;
+ void *p;
+ } temp; /* Temporary for some macros. */
+ _OBSTACK_SIZE_T alignment_mask; /* Mask of alignment for each object. */
+
+ /* These prototypes vary based on 'use_extra_arg'. */
+ union
+ {
+ void *(*plain) (size_t);
+ void *(*extra) (void *, size_t);
+ } chunkfun;
+ union
+ {
+ void (*plain) (void *);
+ void (*extra) (void *, void *);
+ } freefun;
+
+ void *extra_arg; /* first arg for chunk alloc/dealloc funcs */
+ unsigned use_extra_arg : 1; /* chunk alloc/dealloc funcs take extra arg */
+ unsigned maybe_empty_object : 1; /* There is a possibility that the current
+ chunk contains a zero-length object. This
+ prevents freeing the chunk if we allocate
+ a bigger chunk to replace it. */
+ unsigned alloc_failed : 1; /* No longer used, as we now call the failed
+ handler on error, but retained for binary
+ compatibility. */
+};
+
+/* Declare the external functions we use; they are in obstack.c. */
+
+extern void _obstack_newchunk (struct obstack *, _OBSTACK_SIZE_T);
+extern void _obstack_free (struct obstack *, void *);
+extern int _obstack_begin (struct obstack *,
+ _OBSTACK_SIZE_T, _OBSTACK_SIZE_T,
+ void *(*) (size_t), void (*) (void *));
+extern int _obstack_begin_1 (struct obstack *,
+ _OBSTACK_SIZE_T, _OBSTACK_SIZE_T,
+ void *(*) (void *, size_t),
+ void (*) (void *, void *), void *);
+extern _OBSTACK_SIZE_T _obstack_memory_used (struct obstack *)
+ __attribute_pure__;
+
+
+/* Error handler called when 'obstack_chunk_alloc' failed to allocate
+ more memory. This can be set to a user defined function which
+ should either abort gracefully or use longjump - but shouldn't
+ return. The default action is to print a message and abort. */
+extern void (*obstack_alloc_failed_handler) (void);
+
+/* Exit value used when 'print_and_abort' is used. */
+extern int obstack_exit_failure;
+
+/* Pointer to beginning of object being allocated or to be allocated next.
+ Note that this might not be the final address of the object
+ because a new chunk might be needed to hold the final size. */
+
+#define obstack_base(h) ((void *) (h)->object_base)
+
+/* Size for allocating ordinary chunks. */
+
+#define obstack_chunk_size(h) ((h)->chunk_size)
+
+/* Pointer to next byte not yet allocated in current chunk. */
+
+#define obstack_next_free(h) ((void *) (h)->next_free)
+
+/* Mask specifying low bits that should be clear in address of an object. */
+
+#define obstack_alignment_mask(h) ((h)->alignment_mask)
+
+/* To prevent prototype warnings provide complete argument list. */
+#define obstack_init(h) \
+ _obstack_begin ((h), 0, 0, \
+ _OBSTACK_CAST (void *(*) (size_t), obstack_chunk_alloc), \
+ _OBSTACK_CAST (void (*) (void *), obstack_chunk_free))
+
+#define obstack_begin(h, size) \
+ _obstack_begin ((h), (size), 0, \
+ _OBSTACK_CAST (void *(*) (size_t), obstack_chunk_alloc), \
+ _OBSTACK_CAST (void (*) (void *), obstack_chunk_free))
+
+#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \
+ _obstack_begin ((h), (size), (alignment), \
+ _OBSTACK_CAST (void *(*) (size_t), chunkfun), \
+ _OBSTACK_CAST (void (*) (void *), freefun))
+
+#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \
+ _obstack_begin_1 ((h), (size), (alignment), \
+ _OBSTACK_CAST (void *(*) (void *, size_t), chunkfun), \
+ _OBSTACK_CAST (void (*) (void *, void *), freefun), arg)
+
+#define obstack_chunkfun(h, newchunkfun) \
+ ((void) ((h)->chunkfun.extra = (void *(*) (void *, size_t)) (newchunkfun)))
+
+#define obstack_freefun(h, newfreefun) \
+ ((void) ((h)->freefun.extra = (void *(*) (void *, void *)) (newfreefun)))
+
+#define obstack_1grow_fast(h, achar) ((void) (*((h)->next_free)++ = (achar)))
+
+#define obstack_blank_fast(h, n) ((void) ((h)->next_free += (n)))
+
+#define obstack_memory_used(h) _obstack_memory_used (h)
+
+#if defined __GNUC__
+# if !defined __GNUC_MINOR__ || __GNUC__ * 1000 + __GNUC_MINOR__ < 2008
+# define __extension__
+# endif
+
+/* For GNU C, if not -traditional,
+ we can define these macros to compute all args only once
+ without using a global variable.
+ Also, we can avoid using the 'temp' slot, to make faster code. */
+
+# define obstack_object_size(OBSTACK) \
+ __extension__ \
+ ({ struct obstack const *__o = (OBSTACK); \
+ (_OBSTACK_SIZE_T) (__o->next_free - __o->object_base); })
+
+/* The local variable is named __o1 to avoid a shadowed variable
+ warning when invoked from other obstack macros. */
+# define obstack_room(OBSTACK) \
+ __extension__ \
+ ({ struct obstack const *__o1 = (OBSTACK); \
+ (_OBSTACK_SIZE_T) (__o1->chunk_limit - __o1->next_free); })
+
+# define obstack_make_room(OBSTACK, length) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ _OBSTACK_SIZE_T __len = (length); \
+ if (obstack_room (__o) < __len) \
+ _obstack_newchunk (__o, __len); \
+ (void) 0; })
+
+# define obstack_empty_p(OBSTACK) \
+ __extension__ \
+ ({ struct obstack const *__o = (OBSTACK); \
+ (__o->chunk->prev == 0 \
+ && __o->next_free == __PTR_ALIGN ((char *) __o->chunk, \
+ __o->chunk->contents, \
+ __o->alignment_mask)); })
+
+# define obstack_grow(OBSTACK, where, length) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ _OBSTACK_SIZE_T __len = (length); \
+ if (obstack_room (__o) < __len) \
+ _obstack_newchunk (__o, __len); \
+ memcpy (__o->next_free, where, __len); \
+ __o->next_free += __len; \
+ (void) 0; })
+
+# define obstack_grow0(OBSTACK, where, length) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ _OBSTACK_SIZE_T __len = (length); \
+ if (obstack_room (__o) < __len + 1) \
+ _obstack_newchunk (__o, __len + 1); \
+ memcpy (__o->next_free, where, __len); \
+ __o->next_free += __len; \
+ *(__o->next_free)++ = 0; \
+ (void) 0; })
+
+# define obstack_1grow(OBSTACK, datum) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ if (obstack_room (__o) < 1) \
+ _obstack_newchunk (__o, 1); \
+ obstack_1grow_fast (__o, datum); })
+
+/* These assume that the obstack alignment is good enough for pointers
+ or ints, and that the data added so far to the current object
+ shares that much alignment. */
+
+# define obstack_ptr_grow(OBSTACK, datum) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ if (obstack_room (__o) < sizeof (void *)) \
+ _obstack_newchunk (__o, sizeof (void *)); \
+ obstack_ptr_grow_fast (__o, datum); })
+
+# define obstack_int_grow(OBSTACK, datum) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ if (obstack_room (__o) < sizeof (int)) \
+ _obstack_newchunk (__o, sizeof (int)); \
+ obstack_int_grow_fast (__o, datum); })
+
+# define obstack_ptr_grow_fast(OBSTACK, aptr) \
+ __extension__ \
+ ({ struct obstack *__o1 = (OBSTACK); \
+ void *__p1 = __o1->next_free; \
+ *(const void **) __p1 = (aptr); \
+ __o1->next_free += sizeof (const void *); \
+ (void) 0; })
+
+# define obstack_int_grow_fast(OBSTACK, aint) \
+ __extension__ \
+ ({ struct obstack *__o1 = (OBSTACK); \
+ void *__p1 = __o1->next_free; \
+ *(int *) __p1 = (aint); \
+ __o1->next_free += sizeof (int); \
+ (void) 0; })
+
+# define obstack_blank(OBSTACK, length) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ _OBSTACK_SIZE_T __len = (length); \
+ if (obstack_room (__o) < __len) \
+ _obstack_newchunk (__o, __len); \
+ obstack_blank_fast (__o, __len); })
+
+# define obstack_alloc(OBSTACK, length) \
+ __extension__ \
+ ({ struct obstack *__h = (OBSTACK); \
+ obstack_blank (__h, (length)); \
+ obstack_finish (__h); })
+
+# define obstack_copy(OBSTACK, where, length) \
+ __extension__ \
+ ({ struct obstack *__h = (OBSTACK); \
+ obstack_grow (__h, (where), (length)); \
+ obstack_finish (__h); })
+
+# define obstack_copy0(OBSTACK, where, length) \
+ __extension__ \
+ ({ struct obstack *__h = (OBSTACK); \
+ obstack_grow0 (__h, (where), (length)); \
+ obstack_finish (__h); })
+
+/* The local variable is named __o1 to avoid a shadowed variable
+ warning when invoked from other obstack macros, typically obstack_free. */
+# define obstack_finish(OBSTACK) \
+ __extension__ \
+ ({ struct obstack *__o1 = (OBSTACK); \
+ void *__value = (void *) __o1->object_base; \
+ if (__o1->next_free == __value) \
+ __o1->maybe_empty_object = 1; \
+ __o1->next_free \
+ = __PTR_ALIGN (__o1->object_base, __o1->next_free, \
+ __o1->alignment_mask); \
+ if ((size_t) (__o1->next_free - (char *) __o1->chunk) \
+ > (size_t) (__o1->chunk_limit - (char *) __o1->chunk)) \
+ __o1->next_free = __o1->chunk_limit; \
+ __o1->object_base = __o1->next_free; \
+ __value; })
+
+# define obstack_free(OBSTACK, OBJ) \
+ __extension__ \
+ ({ struct obstack *__o = (OBSTACK); \
+ void *__obj = (void *) (OBJ); \
+ if (__obj > (void *) __o->chunk && __obj < (void *) __o->chunk_limit) \
+ __o->next_free = __o->object_base = (char *) __obj; \
+ else \
+ _obstack_free (__o, __obj); })
+
+#else /* not __GNUC__ */
+
+# define obstack_object_size(h) \
+ ((_OBSTACK_SIZE_T) ((h)->next_free - (h)->object_base))
+
+# define obstack_room(h) \
+ ((_OBSTACK_SIZE_T) ((h)->chunk_limit - (h)->next_free))
+
+# define obstack_empty_p(h) \
+ ((h)->chunk->prev == 0 \
+ && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk, \
+ (h)->chunk->contents, \
+ (h)->alignment_mask))
+
+/* Note that the call to _obstack_newchunk is enclosed in (..., 0)
+ so that we can avoid having void expressions
+ in the arms of the conditional expression.
+ Casting the third operand to void was tried before,
+ but some compilers won't accept it. */
+
+# define obstack_make_room(h, length) \
+ ((h)->temp.i = (length), \
+ ((obstack_room (h) < (h)->temp.i) \
+ ? (_obstack_newchunk (h, (h)->temp.i), 0) : 0), \
+ (void) 0)
+
+# define obstack_grow(h, where, length) \
+ ((h)->temp.i = (length), \
+ ((obstack_room (h) < (h)->temp.i) \
+ ? (_obstack_newchunk ((h), (h)->temp.i), 0) : 0), \
+ memcpy ((h)->next_free, where, (h)->temp.i), \
+ (h)->next_free += (h)->temp.i, \
+ (void) 0)
+
+# define obstack_grow0(h, where, length) \
+ ((h)->temp.i = (length), \
+ ((obstack_room (h) < (h)->temp.i + 1) \
+ ? (_obstack_newchunk ((h), (h)->temp.i + 1), 0) : 0), \
+ memcpy ((h)->next_free, where, (h)->temp.i), \
+ (h)->next_free += (h)->temp.i, \
+ *((h)->next_free)++ = 0, \
+ (void) 0)
+
+# define obstack_1grow(h, datum) \
+ (((obstack_room (h) < 1) \
+ ? (_obstack_newchunk ((h), 1), 0) : 0), \
+ obstack_1grow_fast (h, datum))
+
+# define obstack_ptr_grow(h, datum) \
+ (((obstack_room (h) < sizeof (char *)) \
+ ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0), \
+ obstack_ptr_grow_fast (h, datum))
+
+# define obstack_int_grow(h, datum) \
+ (((obstack_room (h) < sizeof (int)) \
+ ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0), \
+ obstack_int_grow_fast (h, datum))
+
+# define obstack_ptr_grow_fast(h, aptr) \
+ (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr), \
+ (void) 0)
+
+# define obstack_int_grow_fast(h, aint) \
+ (((int *) ((h)->next_free += sizeof (int)))[-1] = (aint), \
+ (void) 0)
+
+# define obstack_blank(h, length) \
+ ((h)->temp.i = (length), \
+ ((obstack_room (h) < (h)->temp.i) \
+ ? (_obstack_newchunk ((h), (h)->temp.i), 0) : 0), \
+ obstack_blank_fast (h, (h)->temp.i))
+
+# define obstack_alloc(h, length) \
+ (obstack_blank ((h), (length)), obstack_finish ((h)))
+
+# define obstack_copy(h, where, length) \
+ (obstack_grow ((h), (where), (length)), obstack_finish ((h)))
+
+# define obstack_copy0(h, where, length) \
+ (obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
+
+# define obstack_finish(h) \
+ (((h)->next_free == (h)->object_base \
+ ? (((h)->maybe_empty_object = 1), 0) \
+ : 0), \
+ (h)->temp.p = (h)->object_base, \
+ (h)->next_free \
+ = __PTR_ALIGN ((h)->object_base, (h)->next_free, \
+ (h)->alignment_mask), \
+ (((size_t) ((h)->next_free - (char *) (h)->chunk) \
+ > (size_t) ((h)->chunk_limit - (char *) (h)->chunk)) \
+ ? ((h)->next_free = (h)->chunk_limit) : 0), \
+ (h)->object_base = (h)->next_free, \
+ (h)->temp.p)
+
+# define obstack_free(h, obj) \
+ ((h)->temp.p = (void *) (obj), \
+ (((h)->temp.p > (void *) (h)->chunk \
+ && (h)->temp.p < (void *) (h)->chunk_limit) \
+ ? (void) ((h)->next_free = (h)->object_base = (char *) (h)->temp.p) \
+ : _obstack_free ((h), (h)->temp.p)))
+
+#endif /* not __GNUC__ */
+
+#ifdef __cplusplus
+} /* C++ */
+#endif
+
+#endif /* _OBSTACK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-builtins.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-builtins.def
new file mode 100644
index 0000000..e0f0326
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-builtins.def
@@ -0,0 +1,472 @@
+/* This file contains the definitions and documentation for the
+ Offloading and Multi Processing builtins used in the GNU compiler.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, you should define a macro:
+
+ DEF_GOACC_BUILTIN (ENUM, NAME, TYPE, ATTRS)
+ DEF_GOACC_BUILTIN_COMPILER (ENUM, NAME, TYPE, ATTRS)
+ DEF_GOMP_BUILTIN (ENUM, NAME, TYPE, ATTRS)
+
+ See builtins.def for details. */
+
+/* The reason why they aren't in gcc/builtins.def is that the Fortran front end
+ doesn't source those. */
+
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_DATA_START, "GOACC_data_start",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR, ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_DATA_END, "GOACC_data_end",
+ BT_FN_VOID, ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_ENTER_DATA, "GOACC_enter_data",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_INT_INT_VAR,
+ ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_EXIT_DATA, "GOACC_exit_data",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_INT_INT_VAR,
+ ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_PARALLEL, "GOACC_parallel_keyed",
+ BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_VAR,
+ ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_UPDATE, "GOACC_update",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_INT_INT_VAR,
+ ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_WAIT, "GOACC_wait",
+ BT_FN_VOID_INT_INT_VAR,
+ ATTR_NOTHROW_LIST)
+DEF_GOACC_BUILTIN (BUILT_IN_GOACC_DECLARE, "GOACC_declare",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR, ATTR_NOTHROW_LIST)
+
+DEF_GOACC_BUILTIN_COMPILER (BUILT_IN_ACC_ON_DEVICE, "acc_on_device",
+ BT_FN_INT_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+
+DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_PARLEVEL_ID, "goacc_parlevel_id",
+ BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_PARLEVEL_SIZE, "goacc_parlevel_size",
+ BT_FN_INT_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_BARRIER, "GOACC_barrier",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_SINGLE_START, "GOACC_single_start",
+ BT_FN_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_SINGLE_COPY_START, "GOACC_single_copy_start",
+ BT_FN_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_SINGLE_COPY_END, "GOACC_single_copy_end",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_THREAD_NUM, "omp_get_thread_num",
+ BT_FN_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_NUM_THREADS, "omp_get_num_threads",
+ BT_FN_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_TEAM_NUM, "omp_get_team_num",
+ BT_FN_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_NUM_TEAMS, "omp_get_num_teams",
+ BT_FN_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
+
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ATOMIC_START, "GOMP_atomic_start",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ATOMIC_END, "GOMP_atomic_end",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_BARRIER, "GOMP_barrier",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_BARRIER_CANCEL, "GOMP_barrier_cancel",
+ BT_FN_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKWAIT, "GOMP_taskwait",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKWAIT_DEPEND, "GOMP_taskwait_depend",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKWAIT_DEPEND_NOWAIT,
+ "GOMP_taskwait_depend_nowait",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKYIELD, "GOMP_taskyield",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKGROUP_START, "GOMP_taskgroup_start",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKGROUP_END, "GOMP_taskgroup_end",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CANCEL, "GOMP_cancel",
+ BT_FN_BOOL_INT_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CANCELLATION_POINT, "GOMP_cancellation_point",
+ BT_FN_BOOL_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CRITICAL_START, "GOMP_critical_start",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CRITICAL_END, "GOMP_critical_end",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CRITICAL_NAME_START,
+ "GOMP_critical_name_start",
+ BT_FN_VOID_PTRPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_CRITICAL_NAME_END,
+ "GOMP_critical_name_end",
+ BT_FN_VOID_PTRPTR, ATTR_NOTHROW_LEAF_LIST)
+/* NOTE: Do not change the order of BUILT_IN_GOMP_LOOP_*_START. They
+ are used in index arithmetic with enum omp_clause_schedule_kind
+ in omp-low.cc. */
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_STATIC_START,
+ "GOMP_loop_static_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DYNAMIC_START,
+ "GOMP_loop_dynamic_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_GUIDED_START,
+ "GOMP_loop_guided_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_RUNTIME_START,
+ "GOMP_loop_runtime_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START,
+ "GOMP_loop_nonmonotonic_dynamic_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_NONMONOTONIC_GUIDED_START,
+ "GOMP_loop_nonmonotonic_guided_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_NONMONOTONIC_RUNTIME_START,
+ "GOMP_loop_nonmonotonic_runtime_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START,
+ "GOMP_loop_maybe_nonmonotonic_runtime_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_STATIC_START,
+ "GOMP_loop_ordered_static_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_DYNAMIC_START,
+ "GOMP_loop_ordered_dynamic_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_GUIDED_START,
+ "GOMP_loop_ordered_guided_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_RUNTIME_START,
+ "GOMP_loop_ordered_runtime_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START,
+ "GOMP_loop_doacross_static_start",
+ BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DOACROSS_DYNAMIC_START,
+ "GOMP_loop_doacross_dynamic_start",
+ BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DOACROSS_GUIDED_START,
+ "GOMP_loop_doacross_guided_start",
+ BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DOACROSS_RUNTIME_START,
+ "GOMP_loop_doacross_runtime_start",
+ BT_FN_BOOL_UINT_LONGPTR_LONGPTR_LONGPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_START,
+ "GOMP_loop_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_START,
+ "GOMP_loop_ordered_start",
+ BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DOACROSS_START,
+ "GOMP_loop_doacross_start",
+ BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_STATIC_NEXT, "GOMP_loop_static_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_DYNAMIC_NEXT, "GOMP_loop_dynamic_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_GUIDED_NEXT, "GOMP_loop_guided_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_RUNTIME_NEXT, "GOMP_loop_runtime_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT,
+ "GOMP_loop_nonmonotonic_dynamic_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT,
+ "GOMP_loop_nonmonotonic_guided_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT,
+ "GOMP_loop_nonmonotonic_runtime_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
+ "GOMP_loop_maybe_nonmonotonic_runtime_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_STATIC_NEXT,
+ "GOMP_loop_ordered_static_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_DYNAMIC_NEXT,
+ "GOMP_loop_ordered_dynamic_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_GUIDED_NEXT,
+ "GOMP_loop_ordered_guided_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ORDERED_RUNTIME_NEXT,
+ "GOMP_loop_ordered_runtime_next",
+ BT_FN_BOOL_LONGPTR_LONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_STATIC_START,
+ "GOMP_loop_ull_static_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_START,
+ "GOMP_loop_ull_dynamic_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_GUIDED_START,
+ "GOMP_loop_ull_guided_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_RUNTIME_START,
+ "GOMP_loop_ull_runtime_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START,
+ "GOMP_loop_ull_nonmonotonic_dynamic_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START,
+ "GOMP_loop_ull_nonmonotonic_guided_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START,
+ "GOMP_loop_ull_nonmonotonic_runtime_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START,
+ "GOMP_loop_ull_maybe_nonmonotonic_runtime_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_START,
+ "GOMP_loop_ull_ordered_static_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START,
+ "GOMP_loop_ull_ordered_dynamic_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_START,
+ "GOMP_loop_ull_ordered_guided_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_START,
+ "GOMP_loop_ull_ordered_runtime_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DOACROSS_STATIC_START,
+ "GOMP_loop_ull_doacross_static_start",
+ BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START,
+ "GOMP_loop_ull_doacross_dynamic_start",
+ BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DOACROSS_GUIDED_START,
+ "GOMP_loop_ull_doacross_guided_start",
+ BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START,
+ "GOMP_loop_ull_doacross_runtime_start",
+ BT_FN_BOOL_UINT_ULLPTR_ULLPTR_ULLPTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_START,
+ "GOMP_loop_ull_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_START,
+ "GOMP_loop_ull_ordered_start",
+ BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DOACROSS_START,
+ "GOMP_loop_ull_doacross_start",
+ BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT,
+ "GOMP_loop_ull_static_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_NEXT,
+ "GOMP_loop_ull_dynamic_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_GUIDED_NEXT,
+ "GOMP_loop_ull_guided_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_RUNTIME_NEXT,
+ "GOMP_loop_ull_runtime_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT,
+ "GOMP_loop_ull_nonmonotonic_dynamic_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT,
+ "GOMP_loop_ull_nonmonotonic_guided_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT,
+ "GOMP_loop_ull_nonmonotonic_runtime_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
+ "GOMP_loop_ull_maybe_nonmonotonic_runtime_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT,
+ "GOMP_loop_ull_ordered_static_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT,
+ "GOMP_loop_ull_ordered_dynamic_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT,
+ "GOMP_loop_ull_ordered_guided_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT,
+ "GOMP_loop_ull_ordered_runtime_next",
+ BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, ATTR_NOTHROW_LEAF_LIST)
+/* NOTE: Do not change the order of BUILT_IN_GOMP_PARALLEL_LOOP_*.
+ They are used in index arithmetic with enum omp_clause_schedule_kind
+ in omp-low.cc. */
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_STATIC,
+ "GOMP_parallel_loop_static",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC,
+ "GOMP_parallel_loop_dynamic",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED,
+ "GOMP_parallel_loop_guided",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME,
+ "GOMP_parallel_loop_runtime",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC,
+ "GOMP_parallel_loop_nonmonotonic_dynamic",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED,
+ "GOMP_parallel_loop_nonmonotonic_guided",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME,
+ "GOMP_parallel_loop_nonmonotonic_runtime",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME,
+ "GOMP_parallel_loop_maybe_nonmonotonic_runtime",
+ BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_END, "GOMP_loop_end",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_END_CANCEL, "GOMP_loop_end_cancel",
+ BT_FN_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_LOOP_END_NOWAIT, "GOMP_loop_end_nowait",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ORDERED_START, "GOMP_ordered_start",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ORDERED_END, "GOMP_ordered_end",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_DOACROSS_POST, "GOMP_doacross_post",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_DOACROSS_WAIT, "GOMP_doacross_wait",
+ BT_FN_VOID_LONG_VAR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_DOACROSS_ULL_POST, "GOMP_doacross_ull_post",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_DOACROSS_ULL_WAIT, "GOMP_doacross_ull_wait",
+ BT_FN_VOID_ULL_VAR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL, "GOMP_parallel",
+ BT_FN_VOID_OMPFN_PTR_UINT_UINT, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_REDUCTIONS,
+ "GOMP_parallel_reductions",
+ BT_FN_UINT_OMPFN_PTR_UINT_UINT, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASK, "GOMP_task",
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT_PTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKLOOP, "GOMP_taskloop",
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_LONG_LONG_LONG,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKLOOP_ULL, "GOMP_taskloop_ull",
+ BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_ULL_ULL_ULL,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_START, "GOMP_sections_start",
+ BT_FN_UINT_UINT, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS2_START, "GOMP_sections2_start",
+ BT_FN_UINT_UINT_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_NEXT, "GOMP_sections_next",
+ BT_FN_UINT, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_PARALLEL_SECTIONS,
+ "GOMP_parallel_sections",
+ BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_END, "GOMP_sections_end",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_END_CANCEL,
+ "GOMP_sections_end_cancel",
+ BT_FN_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SECTIONS_END_NOWAIT,
+ "GOMP_sections_end_nowait",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SINGLE_START, "GOMP_single_start",
+ BT_FN_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SINGLE_COPY_START, "GOMP_single_copy_start",
+ BT_FN_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SINGLE_COPY_END, "GOMP_single_copy_end",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SCOPE_START, "GOMP_scope_start",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_OFFLOAD_REGISTER, "GOMP_offload_register_ver",
+ BT_FN_VOID_UINT_PTR_INT_PTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_OFFLOAD_UNREGISTER,
+ "GOMP_offload_unregister_ver",
+ BT_FN_VOID_UINT_PTR_INT_PTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TARGET, "GOMP_target_ext",
+ BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TARGET_DATA, "GOMP_target_data_ext",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TARGET_END_DATA, "GOMP_target_end_data",
+ BT_FN_VOID, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TARGET_UPDATE, "GOMP_target_update_ext",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_UINT_PTR,
+ ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA,
+ "GOMP_target_enter_exit_data",
+ BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_UINT_PTR, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TEAMS4, "GOMP_teams4",
+ BT_FN_BOOL_UINT_UINT_UINT_BOOL, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TEAMS_REG, "GOMP_teams_reg",
+ BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT, ATTR_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKGROUP_REDUCTION_REGISTER,
+ "GOMP_taskgroup_reduction_register",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASKGROUP_REDUCTION_UNREGISTER,
+ "GOMP_taskgroup_reduction_unregister",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_TASK_REDUCTION_REMAP,
+ "GOMP_task_reduction_remap",
+ BT_FN_VOID_SIZE_SIZE_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER,
+ "GOMP_workshare_task_reduction_unregister",
+ BT_FN_VOID_BOOL, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ALLOC,
+ "GOMP_alloc", BT_FN_PTR_SIZE_SIZE_PTRMODE,
+ ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_FREE,
+ "GOMP_free", BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_WARNING, "GOMP_warning",
+ BT_FN_VOID_CONST_PTR_SIZE, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ERROR, "GOMP_error",
+ BT_FN_VOID_CONST_PTR_SIZE, ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-expand.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-expand.h
new file mode 100644
index 0000000..d95d19a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-expand.h
@@ -0,0 +1,32 @@
+/* Expansion pass for OMP directives. Outlines regions of certain OMP
+ directives to separate functions, converts others into explicit calls to the
+ runtime library (libgomp) and so forth
+
+Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OMP_EXPAND_H
+#define GCC_OMP_EXPAND_H
+
+struct omp_region;
+extern void omp_expand_local (basic_block head);
+extern void omp_free_regions (void);
+extern bool omp_make_gimple_edges (basic_block bb, struct omp_region **region,
+ int *region_idx);
+
+#endif /* GCC_OMP_EXPAND_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-general.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-general.h
new file mode 100644
index 0000000..92717db
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-general.h
@@ -0,0 +1,155 @@
+/* General types and functions that are uselful for processing of OpenMP,
+ OpenACC and similar directivers at various stages of compilation.
+
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OMP_GENERAL_H
+#define GCC_OMP_GENERAL_H
+
+#include "gomp-constants.h"
+
+/* Flags for an OpenACC loop. */
+
+enum oacc_loop_flags {
+ OLF_SEQ = 1u << 0, /* Explicitly sequential */
+ OLF_AUTO = 1u << 1, /* Compiler chooses axes. */
+ OLF_INDEPENDENT = 1u << 2, /* Iterations are known independent. */
+ OLF_GANG_STATIC = 1u << 3, /* Gang partitioning is static (has op). */
+ OLF_TILE = 1u << 4, /* Tiled loop. */
+ OLF_REDUCTION = 1u << 5, /* Reduction loop. */
+
+ /* Explicitly specified loop axes. */
+ OLF_DIM_BASE = 6,
+ OLF_DIM_GANG = 1u << (OLF_DIM_BASE + GOMP_DIM_GANG),
+ OLF_DIM_WORKER = 1u << (OLF_DIM_BASE + GOMP_DIM_WORKER),
+ OLF_DIM_VECTOR = 1u << (OLF_DIM_BASE + GOMP_DIM_VECTOR),
+
+ OLF_MAX = OLF_DIM_BASE + GOMP_DIM_MAX
+};
+
+/* A structure holding the elements of:
+ for (V = N1; V cond N2; V += STEP) [...]
+ or for non-rectangular loops:
+ for (V = M1 * W + N1; V cond M2 * W + N2; V += STEP;
+ where W is V of the OUTER-th loop (e.g. for OUTER 1 it is the
+ the index of the immediately surrounding loop).
+ NON_RECT_REFERENCED is true for loops referenced by loops
+ with non-NULL M1 or M2. */
+
+struct omp_for_data_loop
+{
+ tree v, n1, n2, step, m1, m2;
+ enum tree_code cond_code;
+ int outer;
+ bool non_rect_referenced;
+};
+
+/* A structure describing the main elements of a parallel loop. */
+
+struct omp_for_data
+{
+ struct omp_for_data_loop loop;
+ tree chunk_size;
+ gomp_for *for_stmt;
+ tree pre, iter_type;
+ tree tiling; /* Tiling values (if non null). */
+ int collapse; /* Collapsed loops, 1 for a non-collapsed loop. */
+ int ordered;
+ int first_nonrect, last_nonrect;
+ bool have_nowait, have_ordered, simd_schedule, have_reductemp;
+ bool have_pointer_condtemp, have_scantemp, have_nonctrl_scantemp;
+ bool non_rect;
+ int lastprivate_conditional;
+ unsigned char sched_modifiers;
+ enum omp_clause_schedule_kind sched_kind;
+ struct omp_for_data_loop *loops;
+ /* The following are relevant only for non-rectangular loops
+ where only a single loop depends on an outer loop iterator. */
+ tree first_inner_iterations; /* Number of iterations of the inner
+ loop with the first outer iterator
+ (or adjn1, if that is non-NULL). */
+ tree factor; /* (m2 - m1) * outer_step / inner_step. */
+ /* Adjusted n1 of the outer loop in such loop nests (if needed). */
+ tree adjn1;
+};
+
+#define OACC_FN_ATTRIB "oacc function"
+
+extern tree omp_find_clause (tree clauses, enum omp_clause_code kind);
+extern bool omp_is_allocatable_or_ptr (tree decl);
+extern tree omp_check_optional_argument (tree decl, bool for_present_check);
+extern bool omp_mappable_type (tree type);
+extern bool omp_privatize_by_reference (tree decl);
+extern void omp_adjust_for_condition (location_t loc, enum tree_code *cond_code,
+ tree *n2, tree v, tree step);
+extern tree omp_get_for_step_from_incr (location_t loc, tree incr);
+extern void omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
+ struct omp_for_data_loop *loops);
+extern gimple *omp_build_barrier (tree lhs);
+extern tree find_combined_omp_for (tree *, int *, void *);
+extern poly_uint64 omp_max_vf (void);
+extern int omp_max_simt_vf (void);
+extern int omp_constructor_traits_to_codes (tree, enum tree_code *);
+extern tree omp_check_context_selector (location_t loc, tree ctx);
+extern void omp_mark_declare_variant (location_t loc, tree variant,
+ tree construct);
+extern int omp_context_selector_matches (tree);
+extern int omp_context_selector_set_compare (const char *, tree, tree);
+extern tree omp_get_context_selector (tree, const char *, const char *);
+extern tree omp_resolve_declare_variant (tree);
+extern tree oacc_launch_pack (unsigned code, tree device, unsigned op);
+extern tree oacc_replace_fn_attrib_attr (tree attribs, tree dims);
+extern void oacc_replace_fn_attrib (tree fn, tree dims);
+extern void oacc_set_fn_attrib (tree fn, tree clauses, vec<tree> *args);
+extern int oacc_verify_routine_clauses (tree, tree *, location_t,
+ const char *);
+extern tree oacc_build_routine_dims (tree clauses);
+extern tree oacc_get_fn_attrib (tree fn);
+extern bool offloading_function_p (tree fn);
+extern int oacc_get_fn_dim_size (tree fn, int axis);
+extern int oacc_get_ifn_dim_arg (const gimple *stmt);
+
+enum omp_requires {
+ OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER = 0xf,
+ OMP_REQUIRES_UNIFIED_ADDRESS = GOMP_REQUIRES_UNIFIED_ADDRESS,
+ OMP_REQUIRES_UNIFIED_SHARED_MEMORY = GOMP_REQUIRES_UNIFIED_SHARED_MEMORY,
+ OMP_REQUIRES_DYNAMIC_ALLOCATORS = 0x40,
+ OMP_REQUIRES_REVERSE_OFFLOAD = GOMP_REQUIRES_REVERSE_OFFLOAD,
+ OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER_USED = 0x100,
+ OMP_REQUIRES_TARGET_USED = GOMP_REQUIRES_TARGET_USED,
+};
+
+extern GTY(()) enum omp_requires omp_requires_mask;
+
+inline dump_flags_t
+get_openacc_privatization_dump_flags ()
+{
+ dump_flags_t l_dump_flags = MSG_NOTE;
+
+ /* For '--param=openacc-privatization=quiet', diagnostics only go to dump
+ files. */
+ if (param_openacc_privatization == OPENACC_PRIVATIZATION_QUIET)
+ l_dump_flags |= MSG_PRIORITY_INTERNALS;
+
+ return l_dump_flags;
+}
+
+extern tree omp_build_component_ref (tree obj, tree field);
+
+#endif /* GCC_OMP_GENERAL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-low.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-low.h
new file mode 100644
index 0000000..6783cd8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-low.h
@@ -0,0 +1,31 @@
+/* Header file for openMP lowering directives.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OMP_LOW_H
+#define GCC_OMP_LOW_H
+
+extern tree omp_reduction_init_op (location_t, enum tree_code, tree);
+extern tree omp_reduction_init (tree, tree);
+extern tree omp_member_access_dummy_var (tree);
+extern tree omp_find_combined_for (gimple_stmt_iterator *gsi_p,
+ bool *handled_ops_p,
+ struct walk_stmt_info *wi);
+
+
+#endif /* GCC_OMP_LOW_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-offload.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-offload.h
new file mode 100644
index 0000000..73711e7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-offload.h
@@ -0,0 +1,35 @@
+/* Bits of OpenMP and OpenACC handling that is specific to device offloading
+ and a lowering pass for OpenACC device directives.
+
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OMP_DEVICE_H
+#define GCC_OMP_DEVICE_H
+
+extern int oacc_get_default_dim (int dim);
+extern int oacc_get_min_dim (int dim);
+extern int oacc_fn_attrib_level (tree attr);
+
+extern GTY(()) vec<tree, va_gc> *offload_funcs;
+extern GTY(()) vec<tree, va_gc> *offload_vars;
+
+extern void omp_finish_file (void);
+extern void omp_discover_implicit_declare_target (void);
+
+#endif /* GCC_OMP_DEVICE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-simd-clone.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-simd-clone.h
new file mode 100644
index 0000000..ce27caa
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/omp-simd-clone.h
@@ -0,0 +1,26 @@
+/* OMP constructs' SIMD clone supporting code.
+
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OMP_SIMD_CLONE_H
+#define GCC_OMP_SIMD_CLONE_H
+
+extern void expand_simd_clones (struct cgraph_node *);
+
+#endif /* GCC_OMP_SIMD_CLONE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-problem.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-problem.h
new file mode 100644
index 0000000..e0c9cb3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-problem.h
@@ -0,0 +1,289 @@
+/* Rich information on why an optimization wasn't possible.
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPT_PROBLEM_H
+#define GCC_OPT_PROBLEM_H
+
+#include "diagnostic-core.h" /* for ATTRIBUTE_GCC_DIAG. */
+#include "optinfo.h" /* for optinfo. */
+
+/* This header declares a family of wrapper classes for tracking a
+ success/failure value, while optionally supporting propagating an
+ opt_problem * describing any failure back up the call stack.
+
+ For instance, at the deepest point of the callstack where the failure
+ happens, rather than:
+
+ if (!check_something ())
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "foo is unsupported.\n");
+ return false;
+ }
+ // [...more checks...]
+
+ // All checks passed:
+ return true;
+
+ we can capture the cause of the failure via:
+
+ if (!check_something ())
+ return opt_result::failure_at (stmt, "foo is unsupported");
+ // [...more checks...]
+
+ // All checks passed:
+ return opt_result::success ();
+
+ which effectively returns true or false, whilst recording any problem.
+
+ opt_result::success and opt_result::failure return opt_result values
+ which "looks like" true/false respectively, via operator bool().
+ If dump_enabled_p, then opt_result::failure also creates an opt_problem *,
+ capturing the pertinent data (here, "foo is unsupported " and "stmt").
+ If dumps are disabled, then opt_problem instances aren't
+ created, and it's equivalent to just returning a bool.
+
+ The opt_problem can be propagated via opt_result values back up
+ the call stack to where it makes most sense to the user.
+ For instance, rather than:
+
+ bool ok = try_something_that_might_fail ();
+ if (!ok)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "some message.\n");
+ return false;
+ }
+
+ we can replace the bool with an opt_result, so if dump_enabled_p, we
+ assume that if try_something_that_might_fail, an opt_problem * will be
+ created, and we can propagate it up the call chain:
+
+ opt_result ok = try_something_that_might_fail ();
+ if (!ok)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "some message.\n");
+ return ok; // propagating the opt_result
+ }
+
+ opt_result is an opt_wrapper<bool>, where opt_wrapper<T> is a base
+ class for wrapping a T, optionally propagating an opt_problem in
+ case of failure_at (when dumps are enabled). Similarly,
+ opt_pointer_wrapper<T> can be used to wrap pointer types (where non-NULL
+ signifies success, NULL signifies failure).
+
+ In all cases, opt_wrapper<T> acts as if the opt_problem were one of its
+ fields, but the opt_problem is actually stored in a global, so that when
+ compiled, an opt_wrapper<T> is effectively just a T, so that we're
+ still just passing e.g. a bool around; the opt_wrapper<T> classes
+ simply provide type-checking and an API to ensure that we provide
+ error-messages deep in the callstack at the places where problems
+ occur, and that we propagate them. This also avoids having
+ to manage the ownership of the opt_problem instances.
+
+ Using opt_result and opt_wrapper<T> documents the intent of the code
+ for the places where we represent success values, and allows the C++ type
+ system to track where the deepest points in the callstack are where we
+ need to emit the failure messages from. */
+
+/* A bundle of information about why an optimization failed (e.g.
+ vectorization), and the location in both the user's code and
+ in GCC itself where the problem occurred.
+
+ Instances are created by static member functions in opt_wrapper
+ subclasses, such as opt_result::failure.
+
+ Instances are only created when dump_enabled_p (). */
+
+class opt_problem
+{
+ public:
+ static opt_problem *get_singleton () { return s_the_problem; }
+
+ opt_problem (const dump_location_t &loc,
+ const char *fmt, va_list *ap)
+ ATTRIBUTE_GCC_DUMP_PRINTF (3, 0);
+
+ const dump_location_t &
+ get_dump_location () const { return m_optinfo.get_dump_location (); }
+
+ const optinfo & get_optinfo () const { return m_optinfo; }
+
+ void emit_and_clear ();
+
+ private:
+ optinfo m_optinfo;
+
+ static opt_problem *s_the_problem;
+};
+
+/* A base class for wrapper classes that track a success/failure value, while
+ optionally supporting propagating an opt_problem * describing any
+ failure back up the call stack. */
+
+template <typename T>
+class opt_wrapper
+{
+ public:
+ typedef T wrapped_t;
+
+ /* Be accessible as the wrapped type. */
+ operator wrapped_t () const { return m_result; }
+
+ /* No public ctor. */
+
+ wrapped_t get_result () const { return m_result; }
+ opt_problem *get_problem () const { return opt_problem::get_singleton (); }
+
+ protected:
+ opt_wrapper (wrapped_t result, opt_problem */*problem*/)
+ : m_result (result)
+ {
+ /* "problem" is ignored: although it looks like a field, we
+ actually just use the opt_problem singleton, so that
+ opt_wrapper<T> in memory is just a T. */
+ }
+
+ private:
+ wrapped_t m_result;
+};
+
+/* Subclass of opt_wrapper<T> for bool, where
+ - true signifies "success", and
+ - false signifies "failure"
+ whilst effectively propagating an opt_problem * describing any failure
+ back up the call stack. */
+
+class opt_result : public opt_wrapper <bool>
+{
+ public:
+ /* Generate a "success" value: a wrapper around "true". */
+
+ static opt_result success () { return opt_result (true, NULL); }
+
+ /* Generate a "failure" value: a wrapper around "false", and,
+ if dump_enabled_p, an opt_problem. */
+
+ static opt_result failure_at (const dump_location_t &loc,
+ const char *fmt, ...)
+ ATTRIBUTE_GCC_DUMP_PRINTF (2, 3)
+ {
+ opt_problem *problem = NULL;
+ if (dump_enabled_p ())
+ {
+ va_list ap;
+ va_start (ap, fmt);
+ problem = new opt_problem (loc, fmt, &ap);
+ va_end (ap);
+ }
+ return opt_result (false, problem);
+ }
+
+ /* Given a failure wrapper of some other kind, make an opt_result failure
+ object, for propagating the opt_problem up the call stack. */
+
+ template <typename S>
+ static opt_result
+ propagate_failure (opt_wrapper <S> other)
+ {
+ return opt_result (false, other.get_problem ());
+ }
+
+ private:
+ /* Private ctor. Instances should be created by the success and failure
+ static member functions. */
+ opt_result (wrapped_t result, opt_problem *problem)
+ : opt_wrapper <bool> (result, problem)
+ {}
+};
+
+/* Subclass of opt_wrapper<T> where T is a pointer type, for tracking
+ success/failure, where:
+ - a non-NULL value signifies "success", and
+ - a NULL value signifies "failure",
+ whilst effectively propagating an opt_problem * describing any failure
+ back up the call stack. */
+
+template <typename PtrType_t>
+class opt_pointer_wrapper : public opt_wrapper <PtrType_t>
+{
+ public:
+ typedef PtrType_t wrapped_pointer_t;
+
+ /* Given a non-NULL pointer, make a success object wrapping it. */
+
+ static opt_pointer_wrapper <wrapped_pointer_t>
+ success (wrapped_pointer_t ptr)
+ {
+ return opt_pointer_wrapper <wrapped_pointer_t> (ptr, NULL);
+ }
+
+ /* Make a NULL pointer failure object, with the given message
+ (if dump_enabled_p). */
+
+ static opt_pointer_wrapper <wrapped_pointer_t>
+ failure_at (const dump_location_t &loc,
+ const char *fmt, ...)
+ ATTRIBUTE_GCC_DUMP_PRINTF (2, 3)
+ {
+ opt_problem *problem = NULL;
+ if (dump_enabled_p ())
+ {
+ va_list ap;
+ va_start (ap, fmt);
+ problem = new opt_problem (loc, fmt, &ap);
+ va_end (ap);
+ }
+ return opt_pointer_wrapper <wrapped_pointer_t> (NULL, problem);
+ }
+
+ /* Given a failure wrapper of some other kind, make a NULL pointer
+ failure object, propagating the problem. */
+
+ template <typename S>
+ static opt_pointer_wrapper <wrapped_pointer_t>
+ propagate_failure (opt_wrapper <S> other)
+ {
+ return opt_pointer_wrapper <wrapped_pointer_t> (NULL,
+ other.get_problem ());
+ }
+
+ /* Support accessing the underlying pointer via ->. */
+
+ wrapped_pointer_t operator-> () const { return this->get_result (); }
+
+ private:
+ /* Private ctor. Instances should be built using the static member
+ functions "success" and "failure". */
+ opt_pointer_wrapper (wrapped_pointer_t result, opt_problem *problem)
+ : opt_wrapper<PtrType_t> (result, problem)
+ {}
+};
+
+/* A typedef for wrapping "tree" so that NULL_TREE can carry an
+ opt_problem describing the failure (if dump_enabled_p). */
+
+typedef opt_pointer_wrapper<tree> opt_tree;
+
+#endif /* #ifndef GCC_OPT_PROBLEM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-suggestions.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-suggestions.h
new file mode 100644
index 0000000..89dad01
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opt-suggestions.h
@@ -0,0 +1,71 @@
+/* Provide suggestions to handle misspelled options, and implement the
+ --complete option for auto-completing options from a prefix.
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPT_PROPOSER_H
+#define GCC_OPT_PROPOSER_H
+
+/* Option proposer is class used by driver in order to provide hints
+ for wrong options provided. And it's used by --complete option that's
+ intended to be invoked by BASH in order to provide better option
+ completion support. */
+
+class option_proposer
+{
+public:
+ /* Default constructor. */
+ option_proposer (): m_option_suggestions (NULL)
+ {}
+
+ /* Default destructor. */
+ ~option_proposer ();
+
+ /* Helper function for driver::handle_unrecognized_options.
+
+ Given an unrecognized option BAD_OPT (without the leading dash),
+ locate the closest reasonable matching option (again, without the
+ leading dash), or NULL.
+
+ The returned string is owned by the option_proposer instance. */
+ const char *suggest_option (const char *bad_opt);
+
+ /* Print on stdout a list of valid options that begin with OPTION_PREFIX,
+ one per line, suitable for use by Bash completion.
+
+ Implementation of the "-completion=" option. */
+ void suggest_completion (const char *option_prefix);
+
+ /* Populate RESULTS with valid completions of options that begin
+ with OPTION_PREFIX. */
+ void get_completions (const char *option_prefix, auto_string_vec &results);
+
+private:
+ /* Helper function for option_proposer::suggest_option. Populate
+ m_option_suggestions with candidate strings for misspelled options.
+ The strings will be freed by the option_proposer's dtor.
+ PREFIX is used for bash completion suggestions, otherwise
+ it's set to NULL. */
+ void build_option_suggestions (const char *prefix);
+
+private:
+ /* Cache with all suggestions. */
+ auto_string_vec *m_option_suggestions;
+};
+
+#endif /* GCC_OPT_PROPOSER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-libfuncs.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-libfuncs.h
new file mode 100644
index 0000000..88438c4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-libfuncs.h
@@ -0,0 +1,79 @@
+/* Mapping from optabs to underlying library functions
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTABS_LIBFUNCS_H
+#define GCC_OPTABS_LIBFUNCS_H
+
+#include "insn-opinit.h"
+
+rtx convert_optab_libfunc (convert_optab, machine_mode, machine_mode);
+rtx optab_libfunc (optab, machine_mode);
+
+void gen_int_libfunc (optab, const char *, char, machine_mode);
+void gen_fp_libfunc (optab, const char *, char, machine_mode);
+void gen_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_signed_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_unsigned_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_int_fp_libfunc (optab, const char *, char, machine_mode);
+void gen_intv_fp_libfunc (optab, const char *, char, machine_mode);
+void gen_int_fp_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_int_fp_signed_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_int_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_int_signed_fixed_libfunc (optab, const char *, char, machine_mode);
+void gen_int_unsigned_fixed_libfunc (optab, const char *, char, machine_mode);
+
+void gen_interclass_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_int_to_fp_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_ufloat_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_int_to_fp_nondecimal_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_fp_to_int_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_intraclass_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_trunc_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_extend_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_fract_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_fractuns_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_satfract_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+void gen_satfractuns_conv_libfunc (convert_optab, const char *,
+ machine_mode, machine_mode);
+
+tree build_libfunc_function_visibility (const char *, symbol_visibility);
+tree build_libfunc_function (const char *);
+rtx init_one_libfunc_visibility (const char *, symbol_visibility);
+rtx init_one_libfunc (const char *);
+rtx set_user_assembler_libfunc (const char *, const char *);
+
+void set_optab_libfunc (optab, machine_mode, const char *);
+void set_conv_libfunc (convert_optab, machine_mode,
+ machine_mode, const char *);
+
+void init_optabs (void);
+void init_sync_libfuncs (int max);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-query.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-query.h
new file mode 100644
index 0000000..b266d2f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-query.h
@@ -0,0 +1,216 @@
+/* IR-agnostic target query functions relating to optabs
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTABS_QUERY_H
+#define GCC_OPTABS_QUERY_H
+
+#include "insn-opinit.h"
+#include "target.h"
+
+/* Return true if OP is a conversion optab. */
+
+inline bool
+convert_optab_p (optab op)
+{
+ return op > unknown_optab && op <= LAST_CONV_OPTAB;
+}
+
+/* Return the insn used to implement mode MODE of OP, or CODE_FOR_nothing
+ if the target does not have such an insn. */
+
+inline enum insn_code
+optab_handler (optab op, machine_mode mode)
+{
+ unsigned scode = (op << 16) | mode;
+ gcc_assert (op > LAST_CONV_OPTAB);
+ return raw_optab_handler (scode);
+}
+
+/* Return the insn used to perform conversion OP from mode FROM_MODE
+ to mode TO_MODE; return CODE_FOR_nothing if the target does not have
+ such an insn. */
+
+inline enum insn_code
+convert_optab_handler (convert_optab op, machine_mode to_mode,
+ machine_mode from_mode)
+{
+ unsigned scode = (op << 16) | (from_mode << 8) | to_mode;
+ gcc_assert (convert_optab_p (op));
+ return raw_optab_handler (scode);
+}
+
+enum insn_code convert_optab_handler (convert_optab, machine_mode,
+ machine_mode, optimization_type);
+
+/* Return the insn used to implement mode MODE of OP, or CODE_FOR_nothing
+ if the target does not have such an insn. */
+
+inline enum insn_code
+direct_optab_handler (direct_optab op, machine_mode mode)
+{
+ return optab_handler (op, mode);
+}
+
+enum insn_code direct_optab_handler (convert_optab, machine_mode,
+ optimization_type);
+
+/* Return true if UNOPTAB is for a trapping-on-overflow operation. */
+
+inline bool
+trapv_unoptab_p (optab unoptab)
+{
+ return (unoptab == negv_optab
+ || unoptab == absv_optab);
+}
+
+/* Return true if BINOPTAB is for a trapping-on-overflow operation. */
+
+inline bool
+trapv_binoptab_p (optab binoptab)
+{
+ return (binoptab == addv_optab
+ || binoptab == subv_optab
+ || binoptab == smulv_optab);
+}
+
+/* Return insn code for a comparison operator with VMODE
+ resultin MASK_MODE, unsigned if UNS is true. */
+
+inline enum insn_code
+get_vec_cmp_icode (machine_mode vmode, machine_mode mask_mode, bool uns)
+{
+ optab tab = uns ? vec_cmpu_optab : vec_cmp_optab;
+ return convert_optab_handler (tab, vmode, mask_mode);
+}
+
+/* Return insn code for a comparison operator with VMODE
+ resultin MASK_MODE (only for EQ/NE). */
+
+inline enum insn_code
+get_vec_cmp_eq_icode (machine_mode vmode, machine_mode mask_mode)
+{
+ return convert_optab_handler (vec_cmpeq_optab, vmode, mask_mode);
+}
+
+/* Return insn code for a conditional operator with a comparison in
+ mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE. */
+
+inline enum insn_code
+get_vcond_icode (machine_mode vmode, machine_mode cmode, bool uns)
+{
+ enum insn_code icode = CODE_FOR_nothing;
+ if (uns)
+ icode = convert_optab_handler (vcondu_optab, vmode, cmode);
+ else
+ icode = convert_optab_handler (vcond_optab, vmode, cmode);
+ return icode;
+}
+
+/* Return insn code for a conditional operator with a mask mode
+ MMODE resulting in a value of mode VMODE. */
+
+inline enum insn_code
+get_vcond_mask_icode (machine_mode vmode, machine_mode mmode)
+{
+ return convert_optab_handler (vcond_mask_optab, vmode, mmode);
+}
+
+/* Return insn code for a conditional operator with a comparison in
+ mode CMODE (only EQ/NE), resulting in a value of mode VMODE. */
+
+inline enum insn_code
+get_vcond_eq_icode (machine_mode vmode, machine_mode cmode)
+{
+ return convert_optab_handler (vcondeq_optab, vmode, cmode);
+}
+
+/* Enumerates the possible extraction_insn operations. */
+enum extraction_pattern { EP_insv, EP_extv, EP_extzv };
+
+/* Describes an instruction that inserts or extracts a bitfield. */
+class extraction_insn
+{
+public:
+ /* The code of the instruction. */
+ enum insn_code icode;
+
+ /* The mode that the structure operand should have. This is byte_mode
+ when using the legacy insv, extv and extzv patterns to access memory.
+ If no mode is given, the structure is a BLKmode memory. */
+ opt_scalar_int_mode struct_mode;
+
+ /* The mode of the field to be inserted or extracted, and by extension
+ the mode of the insertion or extraction itself. */
+ scalar_int_mode field_mode;
+
+ /* The mode of the field's bit position. This is only important
+ when the position is variable rather than constant. */
+ scalar_int_mode pos_mode;
+};
+
+bool get_best_reg_extraction_insn (extraction_insn *,
+ enum extraction_pattern,
+ unsigned HOST_WIDE_INT, machine_mode);
+bool get_best_mem_extraction_insn (extraction_insn *,
+ enum extraction_pattern,
+ HOST_WIDE_INT, HOST_WIDE_INT, machine_mode);
+
+enum insn_code can_extend_p (machine_mode, machine_mode, int);
+enum insn_code can_float_p (machine_mode, machine_mode, int);
+enum insn_code can_fix_p (machine_mode, machine_mode, int, bool *);
+bool can_conditionally_move_p (machine_mode mode);
+opt_machine_mode qimode_for_vec_perm (machine_mode);
+bool selector_fits_mode_p (machine_mode, const vec_perm_indices &);
+bool can_vec_perm_var_p (machine_mode);
+bool can_vec_perm_const_p (machine_mode, machine_mode,
+ const vec_perm_indices &, bool = true);
+/* Find a widening optab even if it doesn't widen as much as we want. */
+#define find_widening_optab_handler(A, B, C) \
+ find_widening_optab_handler_and_mode (A, B, C, NULL)
+enum insn_code find_widening_optab_handler_and_mode (optab, machine_mode,
+ machine_mode,
+ machine_mode *);
+int can_mult_highpart_p (machine_mode, bool);
+bool can_vec_mask_load_store_p (machine_mode, machine_mode, bool);
+opt_machine_mode get_len_load_store_mode (machine_mode, bool);
+bool can_compare_and_swap_p (machine_mode, bool);
+bool can_atomic_exchange_p (machine_mode, bool);
+bool can_atomic_load_p (machine_mode);
+bool lshift_cheap_p (bool);
+bool supports_vec_gather_load_p (machine_mode = E_VOIDmode);
+bool supports_vec_scatter_store_p (machine_mode = E_VOIDmode);
+bool can_vec_extract (machine_mode, machine_mode);
+
+/* Version of find_widening_optab_handler_and_mode that operates on
+ specific mode types. */
+
+template<typename T>
+inline enum insn_code
+find_widening_optab_handler_and_mode (optab op, const T &to_mode,
+ const T &from_mode, T *found_mode)
+{
+ machine_mode tmp;
+ enum insn_code icode = find_widening_optab_handler_and_mode
+ (op, machine_mode (to_mode), machine_mode (from_mode), &tmp);
+ if (icode != CODE_FOR_nothing && found_mode)
+ *found_mode = as_a <T> (tmp);
+ return icode;
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-tree.h
new file mode 100644
index 0000000..526bfe8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs-tree.h
@@ -0,0 +1,51 @@
+/* Tree-based target query functions relating to optabs
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTABS_TREE_H
+#define GCC_OPTABS_TREE_H
+
+#include "optabs-query.h"
+
+/* An extra flag to control optab_for_tree_code's behavior. This is needed to
+ distinguish between machines with a vector shift that takes a scalar for the
+ shift amount vs. machines that take a vector for the shift amount. */
+enum optab_subtype
+{
+ optab_default,
+ optab_scalar,
+ optab_vector,
+ optab_vector_mixed_sign
+};
+
+/* Return the optab used for computing the given operation on the type given by
+ the second argument. The third argument distinguishes between the types of
+ vector shifts and rotates. */
+optab optab_for_tree_code (enum tree_code, const_tree, enum optab_subtype);
+bool
+supportable_half_widening_operation (enum tree_code, tree, tree,
+ enum tree_code *);
+bool supportable_convert_operation (enum tree_code, tree, tree,
+ enum tree_code *);
+bool expand_vec_cmp_expr_p (tree, tree, enum tree_code);
+bool expand_vec_cond_expr_p (tree, tree, enum tree_code);
+void init_tree_optimization_optabs (tree);
+bool target_supports_op_p (tree, enum tree_code,
+ enum optab_subtype = optab_default);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.def
new file mode 100644
index 0000000..695f591
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.def
@@ -0,0 +1,478 @@
+/* Definitions for operation tables, or "optabs".
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The entries in optabs.def are categorized:
+ C: A "conversion" optab, which uses two modes; has libcall data.
+ N: A "normal" optab, which uses one mode; has libcall data.
+ D: A "direct" optab, which uses one mode; does not have libcall data.
+ V: An "oVerflow" optab. Like N, but does not record its code in
+ code_to_optab.
+
+ CX, NX, VX: An extra pattern entry for a conversion or normal optab.
+
+ These patterns may be present in the MD file with names that contain
+ the mode(s) used and the name of the operation. This array contains
+ a list of optabs that need to be initialized. Within each name,
+ $a and $b are used to match a short mode name (the part of the mode
+ name not including `mode' and converted to lower-case).
+
+ $I means that only full integer modes should be considered for the
+ next mode, and $F means that only float modes should be considered.
+ $P means that both full and partial integer modes should be considered.
+ $Q means that only fixed-point modes should be considered.
+
+ The pattern may be NULL if the optab exists only for the libcalls
+ that we plan to attach to it, and there are no named patterns in
+ the md files. */
+
+/* The extension libcalls are used for float extension. */
+OPTAB_CL(sext_optab, "extend$b$a2", SIGN_EXTEND, "extend", gen_extend_conv_libfunc)
+OPTAB_CL(trunc_optab, "trunc$b$a2", TRUNCATE, "trunc", gen_trunc_conv_libfunc)
+OPTAB_CL(zext_optab, "zero_extend$b$a2", ZERO_EXTEND, NULL, NULL)
+
+OPTAB_CL(sfix_optab, "fix$F$b$I$a2", FIX, "fix", gen_fp_to_int_conv_libfunc)
+OPTAB_CL(ufix_optab, "fixuns$F$b$a2", UNSIGNED_FIX, "fixuns", gen_fp_to_int_conv_libfunc)
+OPTAB_CL(sfloat_optab, "float$I$b$F$a2", FLOAT, "float", gen_int_to_fp_conv_libfunc)
+OPTAB_CL(ufloat_optab, "floatuns$I$b$F$a2", UNSIGNED_FLOAT, NULL, gen_ufloat_conv_libfunc)
+
+OPTAB_CL(lrint_optab, "lrint$F$b$I$a2", UNKNOWN, "lrint", gen_int_to_fp_nondecimal_conv_libfunc)
+OPTAB_CL(lround_optab, "lround$F$b$I$a2", UNKNOWN, "lround", gen_int_to_fp_nondecimal_conv_libfunc)
+OPTAB_CL(lfloor_optab, "lfloor$F$b$I$a2", UNKNOWN, "lfloor", gen_int_to_fp_nondecimal_conv_libfunc)
+OPTAB_CL(lceil_optab, "lceil$F$b$I$a2", UNKNOWN, "lceil", gen_int_to_fp_nondecimal_conv_libfunc)
+
+/* Conversions for fixed-point modes and other modes. */
+OPTAB_CL(fract_optab, "fract$b$a2", FRACT_CONVERT, "fract", gen_fract_conv_libfunc)
+OPTAB_CL(fractuns_optab, "fractuns$I$b$Q$a2", UNSIGNED_FRACT_CONVERT, "fractuns", gen_fractuns_conv_libfunc)
+OPTAB_CX(fractuns_optab, "fractuns$Q$b$I$a2")
+OPTAB_CL(satfract_optab, "satfract$b$Q$a2", SAT_FRACT, "satfract", gen_satfract_conv_libfunc)
+OPTAB_CL(satfractuns_optab, "satfractuns$I$b$Q$a2", UNSIGNED_SAT_FRACT, "satfractuns", gen_satfractuns_conv_libfunc)
+
+OPTAB_CD(sfixtrunc_optab, "fix_trunc$F$b$I$a2")
+OPTAB_CD(ufixtrunc_optab, "fixuns_trunc$F$b$I$a2")
+
+/* Misc optabs that use two modes; model them as "conversions". */
+OPTAB_CD(smul_widen_optab, "mul$b$a3")
+OPTAB_CD(umul_widen_optab, "umul$b$a3")
+OPTAB_CD(usmul_widen_optab, "usmul$b$a3")
+OPTAB_CD(smadd_widen_optab, "madd$b$a4")
+OPTAB_CD(umadd_widen_optab, "umadd$b$a4")
+OPTAB_CD(ssmadd_widen_optab, "ssmadd$b$a4")
+OPTAB_CD(usmadd_widen_optab, "usmadd$b$a4")
+OPTAB_CD(smsub_widen_optab, "msub$b$a4")
+OPTAB_CD(umsub_widen_optab, "umsub$b$a4")
+OPTAB_CD(ssmsub_widen_optab, "ssmsub$b$a4")
+OPTAB_CD(usmsub_widen_optab, "usmsub$a$b4")
+OPTAB_CD(vec_load_lanes_optab, "vec_load_lanes$a$b")
+OPTAB_CD(vec_store_lanes_optab, "vec_store_lanes$a$b")
+OPTAB_CD(vec_mask_load_lanes_optab, "vec_mask_load_lanes$a$b")
+OPTAB_CD(vec_mask_store_lanes_optab, "vec_mask_store_lanes$a$b")
+OPTAB_CD(vcond_optab, "vcond$a$b")
+OPTAB_CD(vcondu_optab, "vcondu$a$b")
+OPTAB_CD(vcondeq_optab, "vcondeq$a$b")
+OPTAB_CD(vcond_mask_optab, "vcond_mask_$a$b")
+OPTAB_CD(vec_cmp_optab, "vec_cmp$a$b")
+OPTAB_CD(vec_cmpu_optab, "vec_cmpu$a$b")
+OPTAB_CD(vec_cmpeq_optab, "vec_cmpeq$a$b")
+OPTAB_CD(maskload_optab, "maskload$a$b")
+OPTAB_CD(maskstore_optab, "maskstore$a$b")
+OPTAB_CD(gather_load_optab, "gather_load$a$b")
+OPTAB_CD(mask_gather_load_optab, "mask_gather_load$a$b")
+OPTAB_CD(scatter_store_optab, "scatter_store$a$b")
+OPTAB_CD(mask_scatter_store_optab, "mask_scatter_store$a$b")
+OPTAB_CD(vec_extract_optab, "vec_extract$a$b")
+OPTAB_CD(vec_init_optab, "vec_init$a$b")
+
+OPTAB_CD (while_ult_optab, "while_ult$a$b")
+
+OPTAB_NL(add_optab, "add$P$a3", PLUS, "add", '3', gen_int_fp_fixed_libfunc)
+OPTAB_NX(add_optab, "add$F$a3")
+OPTAB_NX(add_optab, "add$Q$a3")
+OPTAB_VL(addv_optab, "addv$I$a3", PLUS, "add", '3', gen_intv_fp_libfunc)
+OPTAB_VX(addv_optab, "add$F$a3")
+OPTAB_NL(ssadd_optab, "ssadd$Q$a3", SS_PLUS, "ssadd", '3', gen_signed_fixed_libfunc)
+OPTAB_NL(usadd_optab, "usadd$Q$a3", US_PLUS, "usadd", '3', gen_unsigned_fixed_libfunc)
+OPTAB_NL(sub_optab, "sub$P$a3", MINUS, "sub", '3', gen_int_fp_fixed_libfunc)
+OPTAB_NX(sub_optab, "sub$F$a3")
+OPTAB_NX(sub_optab, "sub$Q$a3")
+OPTAB_VL(subv_optab, "subv$I$a3", MINUS, "sub", '3', gen_intv_fp_libfunc)
+OPTAB_VX(subv_optab, "sub$F$a3")
+OPTAB_NL(sssub_optab, "sssub$Q$a3", SS_MINUS, "sssub", '3', gen_signed_fixed_libfunc)
+OPTAB_NL(ussub_optab, "ussub$Q$a3", US_MINUS, "ussub", '3', gen_unsigned_fixed_libfunc)
+OPTAB_NL(smul_optab, "mul$Q$a3", MULT, "mul", '3', gen_int_fp_fixed_libfunc)
+OPTAB_NX(smul_optab, "mul$P$a3")
+OPTAB_NX(smul_optab, "mul$F$a3")
+OPTAB_VL(smulv_optab, "mulv$I$a3", MULT, "mul", '3', gen_intv_fp_libfunc)
+OPTAB_VX(smulv_optab, "mul$F$a3")
+OPTAB_NL(ssmul_optab, "ssmul$Q$a3", SS_MULT, "ssmul", '3', gen_signed_fixed_libfunc)
+OPTAB_NL(usmul_optab, "usmul$Q$a3", US_MULT, "usmul", '3', gen_unsigned_fixed_libfunc)
+OPTAB_NL(sdiv_optab, "div$a3", DIV, "div", '3', gen_int_fp_signed_fixed_libfunc)
+OPTAB_VL(sdivv_optab, "divv$I$a3", DIV, "divv", '3', gen_int_libfunc)
+OPTAB_VX(sdivv_optab, "div$F$a3")
+OPTAB_NL(ssdiv_optab, "ssdiv$Q$a3", SS_DIV, "ssdiv", '3', gen_signed_fixed_libfunc)
+OPTAB_NL(udiv_optab, "udiv$I$a3", UDIV, "udiv", '3', gen_int_unsigned_fixed_libfunc)
+OPTAB_NX(udiv_optab, "udiv$Q$a3")
+OPTAB_NL(usdiv_optab, "usdiv$Q$a3", US_DIV, "usdiv", '3', gen_unsigned_fixed_libfunc)
+OPTAB_NC(sdivmod_optab, "divmod$a4", UNKNOWN)
+OPTAB_NC(udivmod_optab, "udivmod$a4", UNKNOWN)
+OPTAB_NL(smod_optab, "mod$a3", MOD, "mod", '3', gen_int_libfunc)
+OPTAB_NL(umod_optab, "umod$a3", UMOD, "umod", '3', gen_int_libfunc)
+OPTAB_NL(ftrunc_optab, "ftrunc$F$a2", UNKNOWN, "ftrunc", '2', gen_fp_libfunc)
+OPTAB_NL(and_optab, "and$a3", AND, "and", '3', gen_int_libfunc)
+OPTAB_NL(ior_optab, "ior$a3", IOR, "ior", '3', gen_int_libfunc)
+OPTAB_NL(xor_optab, "xor$a3", XOR, "xor", '3', gen_int_libfunc)
+OPTAB_NL(ashl_optab, "ashl$a3", ASHIFT, "ashl", '3', gen_int_fixed_libfunc)
+OPTAB_NL(ssashl_optab, "ssashl$Q$a3", SS_ASHIFT, "ssashl", '3', gen_signed_fixed_libfunc)
+OPTAB_NL(usashl_optab, "usashl$Q$a3", US_ASHIFT, "usashl", '3', gen_unsigned_fixed_libfunc)
+OPTAB_NL(ashr_optab, "ashr$a3", ASHIFTRT, "ashr", '3', gen_int_signed_fixed_libfunc)
+OPTAB_NL(lshr_optab, "lshr$a3", LSHIFTRT, "lshr", '3', gen_int_unsigned_fixed_libfunc)
+OPTAB_NC(rotl_optab, "rotl$a3", ROTATE)
+OPTAB_NC(rotr_optab, "rotr$a3", ROTATERT)
+OPTAB_VC(vashl_optab, "vashl$a3", ASHIFT)
+OPTAB_VC(vashr_optab, "vashr$a3", ASHIFTRT)
+OPTAB_VC(vlshr_optab, "vlshr$a3", LSHIFTRT)
+OPTAB_VC(vrotl_optab, "vrotl$a3", ROTATE)
+OPTAB_VC(vrotr_optab, "vrotr$a3", ROTATERT)
+OPTAB_NL(smin_optab, "smin$a3", SMIN, "min", '3', gen_int_fp_libfunc)
+OPTAB_NL(smax_optab, "smax$a3", SMAX, "max", '3', gen_int_fp_libfunc)
+OPTAB_NL(umin_optab, "umin$I$a3", UMIN, "umin", '3', gen_int_libfunc)
+OPTAB_NL(umax_optab, "umax$I$a3", UMAX, "umax", '3', gen_int_libfunc)
+
+OPTAB_NL(neg_optab, "neg$P$a2", NEG, "neg", '2', gen_int_fp_fixed_libfunc)
+OPTAB_NX(neg_optab, "neg$F$a2")
+OPTAB_NX(neg_optab, "neg$Q$a2")
+OPTAB_VL(negv_optab, "negv$I$a2", NEG, "neg", '2', gen_intv_fp_libfunc)
+OPTAB_VX(negv_optab, "neg$F$a2")
+OPTAB_NL(ssneg_optab, "ssneg$Q$a2", SS_NEG, "ssneg", '2', gen_signed_fixed_libfunc)
+OPTAB_NL(usneg_optab, "usneg$Q$a2", US_NEG, "usneg", '2', gen_unsigned_fixed_libfunc)
+OPTAB_NC(abs_optab, "abs$P$a2", ABS)
+OPTAB_NX(abs_optab, "abs$F$a2")
+OPTAB_VC(absv_optab, "absv$I$a2", ABS)
+OPTAB_VX(absv_optab, "abs$F$a2")
+OPTAB_NL(one_cmpl_optab, "one_cmpl$a2", NOT, "one_cmpl", '2', gen_int_libfunc)
+OPTAB_NC(bswap_optab, "bswap$a2", BSWAP)
+OPTAB_NL(ffs_optab, "ffs$a2", FFS, "ffs", '2', gen_int_libfunc)
+OPTAB_NL(clz_optab, "clz$a2", CLZ, "clz", '2', gen_int_libfunc)
+OPTAB_NL(ctz_optab, "ctz$a2", CTZ, "ctz", '2', gen_int_libfunc)
+OPTAB_NL(clrsb_optab, "clrsb$a2", CLRSB, "clrsb", '2', gen_int_libfunc)
+OPTAB_NL(popcount_optab, "popcount$a2", POPCOUNT, "popcount", '2', gen_int_libfunc)
+OPTAB_NL(parity_optab, "parity$a2", PARITY, "parity", '2', gen_int_libfunc)
+
+/* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
+OPTAB_NL(cmp_optab, NULL, UNKNOWN, "cmp", '2', gen_int_fp_fixed_libfunc)
+OPTAB_NL(ucmp_optab, NULL, UNKNOWN, "ucmp", '2', gen_int_libfunc)
+
+/* EQ etc are floating point comparisons. */
+OPTAB_NL(eq_optab, NULL, EQ, "eq", '2', gen_fp_libfunc)
+OPTAB_NL(ne_optab, NULL, NE, "ne", '2', gen_fp_libfunc)
+OPTAB_NL(gt_optab, NULL, GT, "gt", '2', gen_fp_libfunc)
+OPTAB_NL(ge_optab, NULL, GE, "ge", '2', gen_fp_libfunc)
+OPTAB_NL(lt_optab, NULL, LT, "lt", '2', gen_fp_libfunc)
+OPTAB_NL(le_optab, NULL, LE, "le", '2', gen_fp_libfunc)
+OPTAB_NL(unord_optab, NULL, UNORDERED, "unord", '2', gen_fp_libfunc)
+
+OPTAB_NL(powi_optab, NULL, UNKNOWN, "powi", '2', gen_fp_libfunc)
+
+/* These are all initialized individually, on a per-host basis. */
+OPTAB_NC(sqrt_optab, "sqrt$a2", SQRT)
+OPTAB_NC(sync_old_add_optab, "sync_old_add$I$a", UNKNOWN)
+OPTAB_NC(sync_old_sub_optab, "sync_old_sub$I$a", UNKNOWN)
+OPTAB_NC(sync_old_ior_optab, "sync_old_ior$I$a", UNKNOWN)
+OPTAB_NC(sync_old_and_optab, "sync_old_and$I$a", UNKNOWN)
+OPTAB_NC(sync_old_xor_optab, "sync_old_xor$I$a", UNKNOWN)
+OPTAB_NC(sync_old_nand_optab, "sync_old_nand$I$a", UNKNOWN)
+OPTAB_NC(sync_new_add_optab, "sync_new_add$I$a", UNKNOWN)
+OPTAB_NC(sync_new_sub_optab, "sync_new_sub$I$a", UNKNOWN)
+OPTAB_NC(sync_new_ior_optab, "sync_new_ior$I$a", UNKNOWN)
+OPTAB_NC(sync_new_and_optab, "sync_new_and$I$a", UNKNOWN)
+OPTAB_NC(sync_new_xor_optab, "sync_new_xor$I$a", UNKNOWN)
+OPTAB_NC(sync_new_nand_optab, "sync_new_nand$I$a", UNKNOWN)
+OPTAB_NC(sync_compare_and_swap_optab, "sync_compare_and_swap$I$a", UNKNOWN)
+OPTAB_NC(sync_lock_test_and_set_optab, "sync_lock_test_and_set$I$a", UNKNOWN)
+
+OPTAB_DC(mov_optab, "mov$a", SET)
+OPTAB_DC(movstrict_optab, "movstrict$a", STRICT_LOW_PART)
+OPTAB_D (movmisalign_optab, "movmisalign$a")
+OPTAB_D (storent_optab, "storent$a")
+OPTAB_D (insv_optab, "insv$a")
+OPTAB_D (extv_optab, "extv$a")
+OPTAB_D (extzv_optab, "extzv$a")
+OPTAB_D (insvmisalign_optab, "insvmisalign$a")
+OPTAB_D (extvmisalign_optab, "extvmisalign$a")
+OPTAB_D (extzvmisalign_optab, "extzvmisalign$a")
+OPTAB_D (push_optab, "push$a1")
+OPTAB_D (reload_in_optab, "reload_in$a")
+OPTAB_D (reload_out_optab, "reload_out$a")
+
+OPTAB_DC(cbranch_optab, "cbranch$a4", COMPARE)
+OPTAB_D (tbranch_eq_optab, "tbranch_eq$a3")
+OPTAB_D (tbranch_ne_optab, "tbranch_ne$a3")
+OPTAB_D (addcc_optab, "add$acc")
+OPTAB_D (negcc_optab, "neg$acc")
+OPTAB_D (notcc_optab, "not$acc")
+OPTAB_D (movcc_optab, "mov$acc")
+OPTAB_D (cond_add_optab, "cond_add$a")
+OPTAB_D (cond_sub_optab, "cond_sub$a")
+OPTAB_D (cond_smul_optab, "cond_mul$a")
+OPTAB_D (cond_sdiv_optab, "cond_div$a")
+OPTAB_D (cond_smod_optab, "cond_mod$a")
+OPTAB_D (cond_udiv_optab, "cond_udiv$a")
+OPTAB_D (cond_umod_optab, "cond_umod$a")
+OPTAB_D (cond_and_optab, "cond_and$a")
+OPTAB_D (cond_ior_optab, "cond_ior$a")
+OPTAB_D (cond_xor_optab, "cond_xor$a")
+OPTAB_D (cond_ashl_optab, "cond_ashl$a")
+OPTAB_D (cond_ashr_optab, "cond_ashr$a")
+OPTAB_D (cond_lshr_optab, "cond_lshr$a")
+OPTAB_D (cond_smin_optab, "cond_smin$a")
+OPTAB_D (cond_smax_optab, "cond_smax$a")
+OPTAB_D (cond_umin_optab, "cond_umin$a")
+OPTAB_D (cond_umax_optab, "cond_umax$a")
+OPTAB_D (cond_fmin_optab, "cond_fmin$a")
+OPTAB_D (cond_fmax_optab, "cond_fmax$a")
+OPTAB_D (cond_fma_optab, "cond_fma$a")
+OPTAB_D (cond_fms_optab, "cond_fms$a")
+OPTAB_D (cond_fnma_optab, "cond_fnma$a")
+OPTAB_D (cond_fnms_optab, "cond_fnms$a")
+OPTAB_D (cond_neg_optab, "cond_neg$a")
+OPTAB_D (cmov_optab, "cmov$a6")
+OPTAB_D (cstore_optab, "cstore$a4")
+OPTAB_D (ctrap_optab, "ctrap$a4")
+OPTAB_D (addv4_optab, "addv$I$a4")
+OPTAB_D (subv4_optab, "subv$I$a4")
+OPTAB_D (mulv4_optab, "mulv$I$a4")
+OPTAB_D (uaddv4_optab, "uaddv$I$a4")
+OPTAB_D (usubv4_optab, "usubv$I$a4")
+OPTAB_D (umulv4_optab, "umulv$I$a4")
+OPTAB_D (negv3_optab, "negv$I$a3")
+OPTAB_D (addptr3_optab, "addptr$a3")
+OPTAB_D (spaceship_optab, "spaceship$a3")
+
+OPTAB_D (smul_highpart_optab, "smul$a3_highpart")
+OPTAB_D (umul_highpart_optab, "umul$a3_highpart")
+
+OPTAB_D (cmpmem_optab, "cmpmem$a")
+OPTAB_D (cmpstr_optab, "cmpstr$a")
+OPTAB_D (cmpstrn_optab, "cmpstrn$a")
+OPTAB_D (cpymem_optab, "cpymem$a")
+OPTAB_D (movmem_optab, "movmem$a")
+OPTAB_D (setmem_optab, "setmem$a")
+OPTAB_D (strlen_optab, "strlen$a")
+OPTAB_D (rawmemchr_optab, "rawmemchr$a")
+
+OPTAB_DC(fma_optab, "fma$a4", FMA)
+OPTAB_D (fms_optab, "fms$a4")
+OPTAB_D (fnma_optab, "fnma$a4")
+OPTAB_D (fnms_optab, "fnms$a4")
+
+OPTAB_D (rint_optab, "rint$a2")
+OPTAB_D (round_optab, "round$a2")
+OPTAB_D (roundeven_optab, "roundeven$a2")
+OPTAB_D (floor_optab, "floor$a2")
+OPTAB_D (ceil_optab, "ceil$a2")
+OPTAB_D (btrunc_optab, "btrunc$a2")
+OPTAB_D (nearbyint_optab, "nearbyint$a2")
+
+OPTAB_D (acos_optab, "acos$a2")
+OPTAB_D (acosh_optab, "acosh$a2")
+OPTAB_D (asin_optab, "asin$a2")
+OPTAB_D (asinh_optab, "asinh$a2")
+OPTAB_D (atan2_optab, "atan2$a3")
+OPTAB_D (atan_optab, "atan$a2")
+OPTAB_D (atanh_optab, "atanh$a2")
+OPTAB_D (copysign_optab, "copysign$F$a3")
+OPTAB_D (xorsign_optab, "xorsign$F$a3")
+OPTAB_D (cadd90_optab, "cadd90$a3")
+OPTAB_D (cadd270_optab, "cadd270$a3")
+OPTAB_D (cmul_optab, "cmul$a3")
+OPTAB_D (cmul_conj_optab, "cmul_conj$a3")
+OPTAB_D (cmla_optab, "cmla$a4")
+OPTAB_D (cmla_conj_optab, "cmla_conj$a4")
+OPTAB_D (cmls_optab, "cmls$a4")
+OPTAB_D (cmls_conj_optab, "cmls_conj$a4")
+OPTAB_D (cos_optab, "cos$a2")
+OPTAB_D (cosh_optab, "cosh$a2")
+OPTAB_D (exp10_optab, "exp10$a2")
+OPTAB_D (exp2_optab, "exp2$a2")
+OPTAB_D (exp_optab, "exp$a2")
+OPTAB_D (expm1_optab, "expm1$a2")
+OPTAB_D (fmod_optab, "fmod$a3")
+OPTAB_D (hypot_optab, "hypot$a3")
+OPTAB_D (ilogb_optab, "ilogb$a2")
+OPTAB_D (isinf_optab, "isinf$a2")
+OPTAB_D (issignaling_optab, "issignaling$a2")
+OPTAB_D (ldexp_optab, "ldexp$a3")
+OPTAB_D (log10_optab, "log10$a2")
+OPTAB_D (log1p_optab, "log1p$a2")
+OPTAB_D (log2_optab, "log2$a2")
+OPTAB_D (log_optab, "log$a2")
+OPTAB_D (logb_optab, "logb$a2")
+OPTAB_D (pow_optab, "pow$a3")
+OPTAB_D (remainder_optab, "remainder$a3")
+OPTAB_D (rsqrt_optab, "rsqrt$a2")
+OPTAB_D (scalb_optab, "scalb$a3")
+OPTAB_D (signbit_optab, "signbit$F$a2")
+OPTAB_D (significand_optab, "significand$a2")
+OPTAB_D (sin_optab, "sin$a2")
+OPTAB_D (sincos_optab, "sincos$a3")
+OPTAB_D (sinh_optab, "sinh$a2")
+OPTAB_D (tan_optab, "tan$a2")
+OPTAB_D (tanh_optab, "tanh$a2")
+
+OPTAB_D (fegetround_optab, "fegetround$a")
+OPTAB_D (feclearexcept_optab, "feclearexcept$a")
+OPTAB_D (feraiseexcept_optab, "feraiseexcept$a")
+
+/* C99 implementations of fmax/fmin. */
+OPTAB_D (fmax_optab, "fmax$a3")
+OPTAB_D (fmin_optab, "fmin$a3")
+
+/* Vector reduction to a scalar. */
+OPTAB_D (reduc_fmax_scal_optab, "reduc_fmax_scal_$a")
+OPTAB_D (reduc_fmin_scal_optab, "reduc_fmin_scal_$a")
+OPTAB_D (reduc_smax_scal_optab, "reduc_smax_scal_$a")
+OPTAB_D (reduc_smin_scal_optab, "reduc_smin_scal_$a")
+OPTAB_D (reduc_plus_scal_optab, "reduc_plus_scal_$a")
+OPTAB_D (reduc_umax_scal_optab, "reduc_umax_scal_$a")
+OPTAB_D (reduc_umin_scal_optab, "reduc_umin_scal_$a")
+OPTAB_D (reduc_and_scal_optab, "reduc_and_scal_$a")
+OPTAB_D (reduc_ior_scal_optab, "reduc_ior_scal_$a")
+OPTAB_D (reduc_xor_scal_optab, "reduc_xor_scal_$a")
+OPTAB_D (fold_left_plus_optab, "fold_left_plus_$a")
+OPTAB_D (mask_fold_left_plus_optab, "mask_fold_left_plus_$a")
+
+OPTAB_D (extract_last_optab, "extract_last_$a")
+OPTAB_D (fold_extract_last_optab, "fold_extract_last_$a")
+
+OPTAB_D (savg_floor_optab, "avg$a3_floor")
+OPTAB_D (uavg_floor_optab, "uavg$a3_floor")
+OPTAB_D (savg_ceil_optab, "avg$a3_ceil")
+OPTAB_D (uavg_ceil_optab, "uavg$a3_ceil")
+OPTAB_D (sdot_prod_optab, "sdot_prod$I$a")
+OPTAB_D (ssum_widen_optab, "widen_ssum$I$a3")
+OPTAB_D (udot_prod_optab, "udot_prod$I$a")
+OPTAB_D (usdot_prod_optab, "usdot_prod$I$a")
+OPTAB_D (usum_widen_optab, "widen_usum$I$a3")
+OPTAB_D (usad_optab, "usad$I$a")
+OPTAB_D (ssad_optab, "ssad$I$a")
+OPTAB_D (smulhs_optab, "smulhs$a3")
+OPTAB_D (smulhrs_optab, "smulhrs$a3")
+OPTAB_D (umulhs_optab, "umulhs$a3")
+OPTAB_D (umulhrs_optab, "umulhrs$a3")
+OPTAB_D (sdiv_pow2_optab, "sdiv_pow2$a3")
+OPTAB_D (vec_pack_sfix_trunc_optab, "vec_pack_sfix_trunc_$a")
+OPTAB_D (vec_pack_ssat_optab, "vec_pack_ssat_$a")
+OPTAB_D (vec_pack_trunc_optab, "vec_pack_trunc_$a")
+OPTAB_D (vec_pack_ufix_trunc_optab, "vec_pack_ufix_trunc_$a")
+OPTAB_D (vec_pack_sbool_trunc_optab, "vec_pack_sbool_trunc_$a")
+OPTAB_D (vec_pack_usat_optab, "vec_pack_usat_$a")
+OPTAB_D (vec_packs_float_optab, "vec_packs_float_$a")
+OPTAB_D (vec_packu_float_optab, "vec_packu_float_$a")
+OPTAB_D (vec_perm_optab, "vec_perm$a")
+OPTAB_D (vec_realign_load_optab, "vec_realign_load_$a")
+OPTAB_D (vec_set_optab, "vec_set$a")
+OPTAB_D (vec_shl_optab, "vec_shl_$a")
+OPTAB_D (vec_shr_optab, "vec_shr_$a")
+OPTAB_D (vec_unpack_sfix_trunc_hi_optab, "vec_unpack_sfix_trunc_hi_$a")
+OPTAB_D (vec_unpack_sfix_trunc_lo_optab, "vec_unpack_sfix_trunc_lo_$a")
+OPTAB_D (vec_unpack_ufix_trunc_hi_optab, "vec_unpack_ufix_trunc_hi_$a")
+OPTAB_D (vec_unpack_ufix_trunc_lo_optab, "vec_unpack_ufix_trunc_lo_$a")
+OPTAB_D (vec_unpacks_float_hi_optab, "vec_unpacks_float_hi_$a")
+OPTAB_D (vec_unpacks_float_lo_optab, "vec_unpacks_float_lo_$a")
+OPTAB_D (vec_unpacks_hi_optab, "vec_unpacks_hi_$a")
+OPTAB_D (vec_unpacks_lo_optab, "vec_unpacks_lo_$a")
+OPTAB_D (vec_unpacks_sbool_hi_optab, "vec_unpacks_sbool_hi_$a")
+OPTAB_D (vec_unpacks_sbool_lo_optab, "vec_unpacks_sbool_lo_$a")
+OPTAB_D (vec_unpacku_float_hi_optab, "vec_unpacku_float_hi_$a")
+OPTAB_D (vec_unpacku_float_lo_optab, "vec_unpacku_float_lo_$a")
+OPTAB_D (vec_unpacku_hi_optab, "vec_unpacku_hi_$a")
+OPTAB_D (vec_unpacku_lo_optab, "vec_unpacku_lo_$a")
+OPTAB_D (vec_widen_smult_even_optab, "vec_widen_smult_even_$a")
+OPTAB_D (vec_widen_smult_hi_optab, "vec_widen_smult_hi_$a")
+OPTAB_D (vec_widen_smult_lo_optab, "vec_widen_smult_lo_$a")
+OPTAB_D (vec_widen_smult_odd_optab, "vec_widen_smult_odd_$a")
+OPTAB_D (vec_widen_ssubl_hi_optab, "vec_widen_ssubl_hi_$a")
+OPTAB_D (vec_widen_ssubl_lo_optab, "vec_widen_ssubl_lo_$a")
+OPTAB_D (vec_widen_saddl_hi_optab, "vec_widen_saddl_hi_$a")
+OPTAB_D (vec_widen_saddl_lo_optab, "vec_widen_saddl_lo_$a")
+OPTAB_D (vec_widen_sshiftl_hi_optab, "vec_widen_sshiftl_hi_$a")
+OPTAB_D (vec_widen_sshiftl_lo_optab, "vec_widen_sshiftl_lo_$a")
+OPTAB_D (vec_widen_umult_even_optab, "vec_widen_umult_even_$a")
+OPTAB_D (vec_widen_umult_hi_optab, "vec_widen_umult_hi_$a")
+OPTAB_D (vec_widen_umult_lo_optab, "vec_widen_umult_lo_$a")
+OPTAB_D (vec_widen_umult_odd_optab, "vec_widen_umult_odd_$a")
+OPTAB_D (vec_widen_ushiftl_hi_optab, "vec_widen_ushiftl_hi_$a")
+OPTAB_D (vec_widen_ushiftl_lo_optab, "vec_widen_ushiftl_lo_$a")
+OPTAB_D (vec_widen_usubl_hi_optab, "vec_widen_usubl_hi_$a")
+OPTAB_D (vec_widen_usubl_lo_optab, "vec_widen_usubl_lo_$a")
+OPTAB_D (vec_widen_uaddl_hi_optab, "vec_widen_uaddl_hi_$a")
+OPTAB_D (vec_widen_uaddl_lo_optab, "vec_widen_uaddl_lo_$a")
+OPTAB_D (vec_addsub_optab, "vec_addsub$a3")
+OPTAB_D (vec_fmaddsub_optab, "vec_fmaddsub$a4")
+OPTAB_D (vec_fmsubadd_optab, "vec_fmsubadd$a4")
+
+OPTAB_D (sync_add_optab, "sync_add$I$a")
+OPTAB_D (sync_and_optab, "sync_and$I$a")
+OPTAB_D (sync_ior_optab, "sync_ior$I$a")
+OPTAB_D (sync_lock_release_optab, "sync_lock_release$I$a")
+OPTAB_D (sync_nand_optab, "sync_nand$I$a")
+OPTAB_D (sync_sub_optab, "sync_sub$I$a")
+OPTAB_D (sync_xor_optab, "sync_xor$I$a")
+
+OPTAB_D (atomic_add_fetch_optab, "atomic_add_fetch$I$a")
+OPTAB_D (atomic_add_optab, "atomic_add$I$a")
+OPTAB_D (atomic_and_fetch_optab, "atomic_and_fetch$I$a")
+OPTAB_D (atomic_and_optab, "atomic_and$I$a")
+OPTAB_D (atomic_bit_test_and_set_optab, "atomic_bit_test_and_set$I$a")
+OPTAB_D (atomic_bit_test_and_complement_optab, "atomic_bit_test_and_complement$I$a")
+OPTAB_D (atomic_bit_test_and_reset_optab, "atomic_bit_test_and_reset$I$a")
+OPTAB_D (atomic_compare_and_swap_optab, "atomic_compare_and_swap$I$a")
+OPTAB_D (atomic_exchange_optab, "atomic_exchange$I$a")
+OPTAB_D (atomic_fetch_add_optab, "atomic_fetch_add$I$a")
+OPTAB_D (atomic_fetch_and_optab, "atomic_fetch_and$I$a")
+OPTAB_D (atomic_fetch_nand_optab, "atomic_fetch_nand$I$a")
+OPTAB_D (atomic_fetch_or_optab, "atomic_fetch_or$I$a")
+OPTAB_D (atomic_fetch_sub_optab, "atomic_fetch_sub$I$a")
+OPTAB_D (atomic_fetch_xor_optab, "atomic_fetch_xor$I$a")
+OPTAB_D (atomic_load_optab, "atomic_load$I$a")
+OPTAB_D (atomic_nand_fetch_optab, "atomic_nand_fetch$I$a")
+OPTAB_D (atomic_nand_optab, "atomic_nand$I$a")
+OPTAB_D (atomic_or_fetch_optab, "atomic_or_fetch$I$a")
+OPTAB_D (atomic_or_optab, "atomic_or$I$a")
+OPTAB_D (atomic_store_optab, "atomic_store$I$a")
+OPTAB_D (atomic_sub_fetch_optab, "atomic_sub_fetch$I$a")
+OPTAB_D (atomic_sub_optab, "atomic_sub$I$a")
+OPTAB_D (atomic_xor_fetch_optab, "atomic_xor_fetch$I$a")
+OPTAB_D (atomic_xor_optab, "atomic_xor$I$a")
+OPTAB_D (atomic_add_fetch_cmp_0_optab, "atomic_add_fetch_cmp_0$I$a")
+OPTAB_D (atomic_sub_fetch_cmp_0_optab, "atomic_sub_fetch_cmp_0$I$a")
+OPTAB_D (atomic_and_fetch_cmp_0_optab, "atomic_and_fetch_cmp_0$I$a")
+OPTAB_D (atomic_or_fetch_cmp_0_optab, "atomic_or_fetch_cmp_0$I$a")
+OPTAB_D (atomic_xor_fetch_cmp_0_optab, "atomic_xor_fetch_cmp_0$I$a")
+
+OPTAB_D (get_thread_pointer_optab, "get_thread_pointer$I$a")
+OPTAB_D (set_thread_pointer_optab, "set_thread_pointer$I$a")
+
+OPTAB_D (check_raw_ptrs_optab, "check_raw_ptrs$a")
+OPTAB_D (check_war_ptrs_optab, "check_war_ptrs$a")
+
+OPTAB_DC (vec_duplicate_optab, "vec_duplicate$a", VEC_DUPLICATE)
+OPTAB_DC (vec_series_optab, "vec_series$a", VEC_SERIES)
+OPTAB_D (vec_shl_insert_optab, "vec_shl_insert_$a")
+OPTAB_D (len_load_optab, "len_load_$a")
+OPTAB_D (len_store_optab, "len_store_$a")
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.h
new file mode 100644
index 0000000..29ccbe9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optabs.h
@@ -0,0 +1,387 @@
+/* Definitions for code generation pass of GNU compiler.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTABS_H
+#define GCC_OPTABS_H
+
+#include "optabs-query.h"
+#include "optabs-libfuncs.h"
+#include "vec-perm-indices.h"
+
+/* Generate code for a widening multiply. */
+extern rtx expand_widening_mult (machine_mode, rtx, rtx, rtx, int, optab);
+
+/* Describes the type of an expand_operand. Each value is associated
+ with a create_*_operand function; see the comments above those
+ functions for details. */
+enum expand_operand_type {
+ EXPAND_FIXED,
+ EXPAND_OUTPUT,
+ EXPAND_INPUT,
+ EXPAND_CONVERT_TO,
+ EXPAND_CONVERT_FROM,
+ EXPAND_ADDRESS,
+ EXPAND_INTEGER
+};
+
+/* Information about an operand for instruction expansion. */
+class expand_operand {
+public:
+ /* The type of operand. */
+ ENUM_BITFIELD (expand_operand_type) type : 8;
+
+ /* True if any conversion should treat VALUE as being unsigned
+ rather than signed. Only meaningful for certain types. */
+ unsigned int unsigned_p : 1;
+
+ /* Is the target operand. */
+ unsigned int target : 1;
+
+ /* Unused; available for future use. */
+ unsigned int unused : 6;
+
+ /* The mode passed to the convert_*_operand function. It has a
+ type-dependent meaning. */
+ ENUM_BITFIELD (machine_mode) mode : 16;
+
+ /* The value of the operand. */
+ rtx value;
+
+ /* The value of an EXPAND_INTEGER operand. */
+ poly_int64 int_value;
+};
+
+/* Initialize OP with the given fields. Initialise the other fields
+ to their default values. */
+
+inline void
+create_expand_operand (class expand_operand *op,
+ enum expand_operand_type type,
+ rtx value, machine_mode mode,
+ bool unsigned_p, poly_int64 int_value = 0)
+{
+ op->type = type;
+ op->unsigned_p = unsigned_p;
+ op->target = 0;
+ op->unused = 0;
+ op->mode = mode;
+ op->value = value;
+ op->int_value = int_value;
+}
+
+/* Make OP describe an operand that must use rtx X, even if X is volatile. */
+
+inline void
+create_fixed_operand (class expand_operand *op, rtx x)
+{
+ create_expand_operand (op, EXPAND_FIXED, x, VOIDmode, false);
+}
+
+/* Make OP describe an output operand that must have mode MODE.
+ X, if nonnull, is a suggestion for where the output should be stored.
+ It is OK for VALUE to be inconsistent with MODE, although it will just
+ be ignored in that case. */
+
+inline void
+create_output_operand (class expand_operand *op, rtx x,
+ machine_mode mode)
+{
+ create_expand_operand (op, EXPAND_OUTPUT, x, mode, false);
+}
+
+/* Make OP describe an input operand that must have mode MODE and
+ value VALUE; MODE cannot be VOIDmode. The backend may request that
+ VALUE be copied into a different kind of rtx before being passed
+ as an operand. */
+
+inline void
+create_input_operand (class expand_operand *op, rtx value,
+ machine_mode mode)
+{
+ create_expand_operand (op, EXPAND_INPUT, value, mode, false);
+}
+
+/* Like create_input_operand, except that VALUE must first be converted
+ to mode MODE. UNSIGNED_P says whether VALUE is unsigned. */
+
+inline void
+create_convert_operand_to (class expand_operand *op, rtx value,
+ machine_mode mode, bool unsigned_p)
+{
+ create_expand_operand (op, EXPAND_CONVERT_TO, value, mode, unsigned_p);
+}
+
+/* Make OP describe an input operand that should have the same value
+ as VALUE, after any mode conversion that the backend might request.
+ If VALUE is a CONST_INT, it should be treated as having mode MODE.
+ UNSIGNED_P says whether VALUE is unsigned.
+
+ The conversion of VALUE can include a combination of numerical
+ conversion (as for convert_modes) and duplicating a scalar to fill
+ a vector (if VALUE is a scalar but the operand is a vector). */
+
+inline void
+create_convert_operand_from (class expand_operand *op, rtx value,
+ machine_mode mode, bool unsigned_p)
+{
+ create_expand_operand (op, EXPAND_CONVERT_FROM, value, mode, unsigned_p);
+}
+
+
+/* Make OP describe an input Pmode address operand. VALUE is the value
+ of the address, but it may need to be converted to Pmode first. */
+
+inline void
+create_address_operand (class expand_operand *op, rtx value)
+{
+ create_expand_operand (op, EXPAND_ADDRESS, value, Pmode, false);
+}
+
+extern void create_integer_operand (class expand_operand *, poly_int64);
+
+/* Passed to expand_simple_binop and expand_binop to say which options
+ to try to use if the requested operation can't be open-coded on the
+ requisite mode. Either OPTAB_LIB or OPTAB_LIB_WIDEN says try using
+ a library call. Either OPTAB_WIDEN or OPTAB_LIB_WIDEN says try
+ using a wider mode. OPTAB_MUST_WIDEN says try widening and don't
+ try anything else. */
+
+enum optab_methods
+{
+ OPTAB_DIRECT,
+ OPTAB_LIB,
+ OPTAB_WIDEN,
+ OPTAB_LIB_WIDEN,
+ OPTAB_MUST_WIDEN
+};
+
+extern rtx expand_widen_pattern_expr (struct separate_ops *, rtx , rtx , rtx,
+ rtx, int);
+extern rtx expand_ternary_op (machine_mode mode, optab ternary_optab,
+ rtx op0, rtx op1, rtx op2, rtx target,
+ int unsignedp);
+extern rtx simplify_expand_binop (machine_mode mode, optab binoptab,
+ rtx op0, rtx op1, rtx target, int unsignedp,
+ enum optab_methods methods);
+extern bool force_expand_binop (machine_mode, optab, rtx, rtx, rtx, int,
+ enum optab_methods);
+extern rtx expand_vector_broadcast (machine_mode, rtx);
+
+extern rtx expand_doubleword_divmod (machine_mode, rtx, rtx, rtx *, bool);
+
+/* Generate code for a simple binary or unary operation. "Simple" in
+ this case means "can be unambiguously described by a (mode, code)
+ pair and mapped to a single optab." */
+extern rtx expand_simple_binop (machine_mode, enum rtx_code, rtx,
+ rtx, rtx, int, enum optab_methods);
+
+/* Expand a binary operation given optab and rtx operands. */
+extern rtx expand_binop (machine_mode, optab, rtx, rtx, rtx, int,
+ enum optab_methods);
+
+/* Expand a binary operation with both signed and unsigned forms. */
+extern rtx sign_expand_binop (machine_mode, optab, optab, rtx, rtx,
+ rtx, int, enum optab_methods);
+
+/* Generate code to perform an operation on one operand with two results. */
+extern int expand_twoval_unop (optab, rtx, rtx, rtx, int);
+
+/* Generate code to perform an operation on two operands with two results. */
+extern int expand_twoval_binop (optab, rtx, rtx, rtx, rtx, int);
+
+/* Generate code to perform an operation on two operands with two
+ results, using a library function. */
+extern bool expand_twoval_binop_libfunc (optab, rtx, rtx, rtx, rtx,
+ enum rtx_code);
+extern rtx expand_simple_unop (machine_mode, enum rtx_code, rtx, rtx,
+ int);
+
+/* Expand a unary arithmetic operation given optab rtx operand. */
+extern rtx expand_unop (machine_mode, optab, rtx, rtx, int);
+
+/* Expand the absolute value operation. */
+extern rtx expand_abs_nojump (machine_mode, rtx, rtx, int);
+extern rtx expand_abs (machine_mode, rtx, rtx, int, int);
+
+/* Expand the one's complement absolute value operation. */
+extern rtx expand_one_cmpl_abs_nojump (machine_mode, rtx, rtx);
+
+/* Expand the copysign operation. */
+extern rtx expand_copysign (rtx, rtx, rtx);
+/* Generate an instruction with a given INSN_CODE with an output and
+ an input. */
+extern bool maybe_emit_unop_insn (enum insn_code, rtx, rtx, enum rtx_code);
+extern void emit_unop_insn (enum insn_code, rtx, rtx, enum rtx_code);
+
+/* Emit code to make a call to a constant function or a library call. */
+extern void emit_libcall_block (rtx_insn *, rtx, rtx, rtx);
+
+/* The various uses that a comparison can have; used by can_compare_p:
+ jumps, conditional moves, store flag operations. */
+enum can_compare_purpose
+{
+ ccp_jump,
+ ccp_cmov,
+ ccp_store_flag
+};
+
+/* Nonzero if a compare of mode MODE can be done straightforwardly
+ (without splitting it into pieces). */
+extern int can_compare_p (enum rtx_code, machine_mode,
+ enum can_compare_purpose);
+
+/* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
+ for code CODE, comparing operands of mode VALUE_MODE and producing a result
+ with MASK_MODE. */
+extern bool can_vec_cmp_compare_p (enum rtx_code, machine_mode, machine_mode);
+
+/* Return whether the backend can emit a vector comparison (vcond/vcondu) for
+ code CODE, comparing operands of mode CMP_OP_MODE and producing a result
+ with VALUE_MODE. */
+extern bool can_vcond_compare_p (enum rtx_code, machine_mode, machine_mode);
+
+/* Return whether the backend can emit vector set instructions for inserting
+ element into vector at variable index position. */
+extern bool can_vec_set_var_idx_p (machine_mode);
+
+extern rtx prepare_operand (enum insn_code, rtx, int, machine_mode,
+ machine_mode, int);
+/* Emit a pair of rtl insns to compare two rtx's and to jump
+ to a label if the comparison is true. */
+extern void emit_cmp_and_jump_insns (rtx, rtx, enum rtx_code, rtx,
+ machine_mode, int, rtx,
+ profile_probability prob
+ = profile_probability::uninitialized ());
+extern void emit_cmp_and_jump_insns (rtx, rtx, enum rtx_code, rtx,
+ machine_mode, int, tree, rtx,
+ profile_probability prob
+ = profile_probability::uninitialized ());
+
+/* Generate code to indirectly jump to a location given in the rtx LOC. */
+extern void emit_indirect_jump (rtx);
+
+#include "insn-config.h"
+
+#ifndef GCC_INSN_CONFIG_H
+#error "insn-config.h must be included before optabs.h"
+#endif
+
+/* Emit a conditional move operation. */
+rtx emit_conditional_move (rtx, rtx_comparison, rtx, rtx, machine_mode, int);
+rtx emit_conditional_move (rtx, rtx, rtx, rtx, rtx, machine_mode);
+
+/* Emit a conditional negate or bitwise complement operation. */
+rtx emit_conditional_neg_or_complement (rtx, rtx_code, machine_mode, rtx,
+ rtx, rtx);
+
+rtx emit_conditional_add (rtx, enum rtx_code, rtx, rtx, machine_mode,
+ rtx, rtx, machine_mode, int);
+
+/* Create but don't emit one rtl instruction to perform certain operations.
+ Modes must match; operands must meet the operation's predicates.
+ Likewise for subtraction and for just copying. */
+extern rtx_insn *gen_add2_insn (rtx, rtx);
+extern rtx_insn *gen_add3_insn (rtx, rtx, rtx);
+extern int have_add2_insn (rtx, rtx);
+extern rtx_insn *gen_addptr3_insn (rtx, rtx, rtx);
+extern int have_addptr3_insn (rtx, rtx, rtx);
+extern rtx_insn *gen_sub2_insn (rtx, rtx);
+extern rtx_insn *gen_sub3_insn (rtx, rtx, rtx);
+extern int have_sub2_insn (rtx, rtx);
+
+/* Generate the body of an insn to extend Y (with mode MFROM)
+ into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
+extern rtx_insn *gen_extend_insn (rtx, rtx, machine_mode, machine_mode, int);
+
+/* Generate code for a FLOAT_EXPR. */
+extern void expand_float (rtx, rtx, int);
+
+/* Generate code for a FIX_EXPR. */
+extern void expand_fix (rtx, rtx, int);
+
+/* Generate code for a FIXED_CONVERT_EXPR. */
+extern void expand_fixed_convert (rtx, rtx, int, int);
+
+/* Generate code for float to integral conversion. */
+extern bool expand_sfix_optab (rtx, rtx, convert_optab);
+
+/* Report whether the machine description contains an insn which can
+ perform the operation described by CODE and MODE. */
+extern int have_insn_for (enum rtx_code, machine_mode);
+
+/* Generate a conditional trap instruction. */
+extern rtx_insn *gen_cond_trap (enum rtx_code, rtx, rtx, rtx);
+
+/* Generate code for VEC_PERM_EXPR. */
+extern rtx expand_vec_perm_var (machine_mode, rtx, rtx, rtx, rtx);
+extern rtx expand_vec_perm_const (machine_mode, rtx, rtx,
+ const vec_perm_builder &, machine_mode, rtx);
+
+/* Generate code for vector comparison. */
+extern rtx expand_vec_cmp_expr (tree, tree, rtx);
+
+/* Generate code for VEC_SERIES_EXPR. */
+extern rtx expand_vec_series_expr (machine_mode, rtx, rtx, rtx);
+
+/* Generate code for MULT_HIGHPART_EXPR. */
+extern rtx expand_mult_highpart (machine_mode, rtx, rtx, rtx, bool);
+
+extern rtx expand_sync_lock_test_and_set (rtx, rtx, rtx);
+extern rtx expand_atomic_test_and_set (rtx, rtx, enum memmodel);
+extern rtx expand_atomic_exchange (rtx, rtx, rtx, enum memmodel);
+extern bool expand_atomic_compare_and_swap (rtx *, rtx *, rtx, rtx, rtx, bool,
+ enum memmodel, enum memmodel);
+/* Generate memory barriers. */
+extern void expand_mem_thread_fence (enum memmodel);
+extern void expand_mem_signal_fence (enum memmodel);
+
+rtx expand_atomic_load (rtx, rtx, enum memmodel);
+rtx expand_atomic_store (rtx, rtx, enum memmodel, bool);
+rtx expand_atomic_fetch_op (rtx, rtx, rtx, enum rtx_code, enum memmodel,
+ bool);
+
+extern void expand_asm_reg_clobber_mem_blockage (HARD_REG_SET);
+
+extern bool insn_operand_matches (enum insn_code icode, unsigned int opno,
+ rtx operand);
+extern bool valid_multiword_target_p (rtx);
+extern void create_convert_operand_from_type (class expand_operand *op,
+ rtx value, tree type);
+extern bool maybe_legitimize_operands (enum insn_code icode,
+ unsigned int opno, unsigned int nops,
+ class expand_operand *ops);
+extern rtx_insn *maybe_gen_insn (enum insn_code icode, unsigned int nops,
+ class expand_operand *ops);
+extern bool maybe_expand_insn (enum insn_code icode, unsigned int nops,
+ class expand_operand *ops);
+extern bool maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
+ class expand_operand *ops);
+extern void expand_insn (enum insn_code icode, unsigned int nops,
+ class expand_operand *ops);
+extern void expand_jump_insn (enum insn_code icode, unsigned int nops,
+ class expand_operand *ops);
+
+extern enum rtx_code get_rtx_code_1 (enum tree_code tcode, bool unsignedp);
+extern enum rtx_code get_rtx_code (enum tree_code tcode, bool unsignedp);
+extern rtx vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
+ tree t_op0, tree t_op1, bool unsignedp,
+ enum insn_code icode, unsigned int opno);
+
+
+#endif /* GCC_OPTABS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo-emit-json.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo-emit-json.h
new file mode 100644
index 0000000..62b0619
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo-emit-json.h
@@ -0,0 +1,60 @@
+/* Emit optimization information as JSON files.
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTINFO_EMIT_JSON_H
+#define GCC_OPTINFO_EMIT_JSON_H
+
+#include "json.h"
+
+class optinfo;
+
+/* A class for writing out optimization records in JSON format. */
+
+class optrecord_json_writer
+{
+public:
+ optrecord_json_writer ();
+ ~optrecord_json_writer ();
+ void write () const;
+ void add_record (const optinfo *optinfo);
+ void pop_scope ();
+
+ void add_record (json::object *obj);
+ json::object *impl_location_to_json (dump_impl_location_t loc);
+ json::object *location_to_json (location_t loc);
+ json::object *profile_count_to_json (profile_count count);
+ json::string *get_id_value_for_pass (opt_pass *pass);
+ json::object *pass_to_json (opt_pass *pass);
+ json::value *inlining_chain_to_json (location_t loc);
+ json::object *optinfo_to_json (const optinfo *optinfo);
+ void add_pass_list (json::array *arr, opt_pass *pass);
+
+ private:
+ /* The root value for the JSON file.
+ Currently the JSON values are stored in memory, and flushed when the
+ compiler exits. It would probably be better to simply write out
+ the JSON as we go. */
+ json::array *m_root_tuple;
+
+ /* The currently open scopes, for expressing nested optimization records. */
+ auto_vec<json::array *> m_scopes;
+};
+
+#endif /* #ifndef GCC_OPTINFO_EMIT_JSON_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo.h
new file mode 100644
index 0000000..185c316
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/optinfo.h
@@ -0,0 +1,170 @@
+/* Optimization information.
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTINFO_H
+#define GCC_OPTINFO_H
+
+/* An "optinfo" is a bundle of information describing part of an
+ optimization, which can be emitted to zero or more of several
+ destinations, such as:
+
+ * saved to a file as an "optimization record"
+
+ They are generated in response to calls to the "dump_*" API in
+ dumpfile.h; repeated calls to the "dump_*" API are consolidated
+ into a pending optinfo instance, with a "dump_*_loc" starting a new
+ optinfo instance.
+
+ The data sent to the dump calls are captured within the pending optinfo
+ instance as a sequence of optinfo_items. For example, given:
+
+ if (dump_enabled_p ())
+ {
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "not vectorized: live stmt not supported: ");
+ dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+ }
+
+ the "dump_printf_loc" call begins a new optinfo containing two items:
+ (1) a text item containing "not vectorized: live stmt not supported: "
+ (2) a gimple item for "stmt"
+
+ Dump destinations are thus able to access rich metadata about the
+ items when the optinfo is emitted to them, rather than just having plain
+ text. For example, when saving the above optinfo to a file as an
+ "optimization record", the record could capture the source location of
+ "stmt" above, rather than just its textual form.
+
+ The currently pending optinfo is emitted and deleted:
+ * each time a "dump_*_loc" call occurs (which starts the next optinfo), or
+ * when the dump files are changed (at the end of a pass)
+
+ Dumping to an optinfo instance is non-trivial (due to building optinfo_item
+ instances), so all usage should be guarded by
+
+ if (optinfo_enabled_p ())
+
+ which is off by default. */
+
+
+/* Forward decls. */
+class opt_pass;
+class optinfo_item;
+
+/* Return true if any of the active optinfo destinations make use
+ of inlining information.
+ (if true, then the information is preserved). */
+
+extern bool optinfo_wants_inlining_info_p ();
+
+/* The various kinds of optinfo. */
+
+enum optinfo_kind
+{
+ OPTINFO_KIND_SUCCESS,
+ OPTINFO_KIND_FAILURE,
+ OPTINFO_KIND_NOTE,
+ OPTINFO_KIND_SCOPE
+};
+
+extern const char *optinfo_kind_to_string (enum optinfo_kind kind);
+
+class dump_context;
+
+/* A bundle of information describing part of an optimization. */
+
+class optinfo
+{
+ friend class dump_context;
+
+ public:
+ optinfo (const dump_location_t &loc,
+ enum optinfo_kind kind,
+ opt_pass *pass)
+ : m_loc (loc), m_kind (kind), m_pass (pass), m_items ()
+ {}
+ ~optinfo ();
+
+ const dump_location_t &
+ get_dump_location () const { return m_loc; }
+
+ const dump_user_location_t &
+ get_user_location () const { return m_loc.get_user_location (); }
+
+ const dump_impl_location_t &
+ get_impl_location () const { return m_loc.get_impl_location (); }
+
+ enum optinfo_kind get_kind () const { return m_kind; }
+ opt_pass *get_pass () const { return m_pass; }
+ unsigned int num_items () const { return m_items.length (); }
+ const optinfo_item *get_item (unsigned int i) const { return m_items[i]; }
+
+ location_t get_location_t () const { return m_loc.get_location_t (); }
+ profile_count get_count () const { return m_loc.get_count (); }
+
+ void add_item (optinfo_item *item);
+
+ void emit_for_opt_problem () const;
+
+ private:
+ /* Pre-canned ways of manipulating the optinfo, for use by friend class
+ dump_context. */
+ void handle_dump_file_kind (dump_flags_t);
+
+ private:
+ dump_location_t m_loc;
+ enum optinfo_kind m_kind;
+ opt_pass *m_pass;
+ auto_vec <optinfo_item *> m_items;
+};
+
+/* An enum for discriminating between different kinds of optinfo_item. */
+
+enum optinfo_item_kind
+{
+ OPTINFO_ITEM_KIND_TEXT,
+ OPTINFO_ITEM_KIND_TREE,
+ OPTINFO_ITEM_KIND_GIMPLE,
+ OPTINFO_ITEM_KIND_SYMTAB_NODE
+};
+
+/* An item within an optinfo. */
+
+class optinfo_item
+{
+ public:
+ optinfo_item (enum optinfo_item_kind kind, location_t location,
+ char *text);
+ ~optinfo_item ();
+
+ enum optinfo_item_kind get_kind () const { return m_kind; }
+ location_t get_location () const { return m_location; }
+ const char *get_text () const { return m_text; }
+
+ private:
+ /* Metadata (e.g. for optimization records). */
+ enum optinfo_item_kind m_kind;
+ location_t m_location;
+
+ /* The textual form of the item, owned by the item. */
+ char *m_text;
+};
+
+#endif /* #ifndef GCC_OPTINFO_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/options.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/options.h
new file mode 100644
index 0000000..7abe4cd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/options.h
@@ -0,0 +1,11279 @@
+/* This file is auto-generated by opth-gen.awk. */
+
+#ifndef OPTIONS_H
+#define OPTIONS_H
+
+#include "flag-types.h"
+
+#include "config/arm/aarch-common.h"
+#include "config/arm/arm-opts.h"
+
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+#ifndef GENERATOR_FILE
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS)
+struct GTY(()) gcc_options
+#else
+struct gcc_options
+#endif
+{
+#endif
+#ifdef GENERATOR_FILE
+extern enum aarch_function_type aarch_ra_sign_scope;
+#else
+ enum aarch_function_type x_aarch_ra_sign_scope;
+#define aarch_ra_sign_scope global_options.x_aarch_ra_sign_scope
+#endif
+#ifdef GENERATOR_FILE
+extern enum aarch_key_type aarch_ra_sign_key;
+#else
+ enum aarch_key_type x_aarch_ra_sign_key;
+#define aarch_ra_sign_key global_options.x_aarch_ra_sign_key
+#endif
+#ifdef GENERATOR_FILE
+extern long arm_stack_protector_guard_offset;
+#else
+ long x_arm_stack_protector_guard_offset;
+#define arm_stack_protector_guard_offset global_options.x_arm_stack_protector_guard_offset
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned aarch_enable_bti;
+#else
+ unsigned x_aarch_enable_bti;
+#define aarch_enable_bti global_options.x_aarch_enable_bti
+#endif
+#ifdef GENERATOR_FILE
+extern bool dump_base_name_prefixed;
+#else
+ bool x_dump_base_name_prefixed;
+#define dump_base_name_prefixed global_options.x_dump_base_name_prefixed
+#endif
+#ifdef GENERATOR_FILE
+extern bool exit_after_options;
+#else
+ bool x_exit_after_options;
+#define exit_after_options global_options.x_exit_after_options
+#endif
+#ifdef GENERATOR_FILE
+extern bool flag_dump_all_passed;
+#else
+ bool x_flag_dump_all_passed;
+#define flag_dump_all_passed global_options.x_flag_dump_all_passed
+#endif
+#ifdef GENERATOR_FILE
+extern bool flag_opts_finished;
+#else
+ bool x_flag_opts_finished;
+#define flag_opts_finished global_options.x_flag_opts_finished
+#endif
+#ifdef GENERATOR_FILE
+extern bool flag_stack_usage_info;
+#else
+ bool x_flag_stack_usage_info;
+#define flag_stack_usage_info global_options.x_flag_stack_usage_info
+#endif
+#ifdef GENERATOR_FILE
+extern bool flag_warn_unused_result;
+#else
+ bool x_flag_warn_unused_result;
+#define flag_warn_unused_result global_options.x_flag_warn_unused_result
+#endif
+#ifdef GENERATOR_FILE
+extern bool in_lto_p;
+#else
+ bool x_in_lto_p;
+#define in_lto_p global_options.x_in_lto_p
+#endif
+#ifdef GENERATOR_FILE
+extern char *help_enum_printed;
+#else
+ char * x_help_enum_printed;
+#define help_enum_printed global_options.x_help_enum_printed
+#endif
+#ifdef GENERATOR_FILE
+extern char *help_printed;
+#else
+ char * x_help_printed;
+#define help_printed global_options.x_help_printed
+#endif
+#ifdef GENERATOR_FILE
+extern const char *aux_base_name;
+#else
+ const char * x_aux_base_name;
+#define aux_base_name global_options.x_aux_base_name
+#endif
+#ifdef GENERATOR_FILE
+extern const char *main_input_basename;
+#else
+ const char * x_main_input_basename;
+#define main_input_basename global_options.x_main_input_basename
+#endif
+#ifdef GENERATOR_FILE
+extern const char *main_input_filename;
+#else
+ const char * x_main_input_filename;
+#define main_input_filename global_options.x_main_input_filename
+#endif
+#ifdef GENERATOR_FILE
+extern enum ctf_debug_info_levels ctf_debug_info_level;
+#else
+ enum ctf_debug_info_levels x_ctf_debug_info_level;
+#define ctf_debug_info_level global_options.x_ctf_debug_info_level
+#endif
+#ifdef GENERATOR_FILE
+extern enum debug_info_levels debug_info_level;
+#else
+ enum debug_info_levels x_debug_info_level;
+#define debug_info_level global_options.x_debug_info_level
+#endif
+#ifdef GENERATOR_FILE
+extern enum debug_struct_file debug_struct_generic[DINFO_USAGE_NUM_ENUMS];
+#else
+ enum debug_struct_file x_debug_struct_generic[DINFO_USAGE_NUM_ENUMS];
+#define debug_struct_generic global_options.x_debug_struct_generic
+#endif
+#ifdef GENERATOR_FILE
+extern enum debug_struct_file debug_struct_ordinary[DINFO_USAGE_NUM_ENUMS];
+#else
+ enum debug_struct_file x_debug_struct_ordinary[DINFO_USAGE_NUM_ENUMS];
+#define debug_struct_ordinary global_options.x_debug_struct_ordinary
+#endif
+#ifdef GENERATOR_FILE
+extern enum dwarf_gnat_encodings gnat_encodings;
+#else
+ enum dwarf_gnat_encodings x_gnat_encodings;
+#define gnat_encodings global_options.x_gnat_encodings
+#endif
+#ifdef GENERATOR_FILE
+extern enum incremental_link flag_incremental_link;
+#else
+ enum incremental_link x_flag_incremental_link;
+#define flag_incremental_link global_options.x_flag_incremental_link
+#endif
+#ifdef GENERATOR_FILE
+extern enum stack_check_type flag_stack_check;
+#else
+ enum stack_check_type x_flag_stack_check;
+#define flag_stack_check global_options.x_flag_stack_check
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_complex_method;
+#else
+ int x_flag_complex_method;
+#define flag_complex_method global_options.x_flag_complex_method
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_debug_asm;
+#else
+ int x_flag_debug_asm;
+#define flag_debug_asm global_options.x_flag_debug_asm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_default_complex_method;
+#else
+ int x_flag_default_complex_method;
+#define flag_default_complex_method global_options.x_flag_default_complex_method
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_rtl_in_asm;
+#else
+ int x_flag_dump_rtl_in_asm;
+#define flag_dump_rtl_in_asm global_options.x_flag_dump_rtl_in_asm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gen_aux_info;
+#else
+ int x_flag_gen_aux_info;
+#define flag_gen_aux_info global_options.x_flag_gen_aux_info
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_generate_lto;
+#else
+ int x_flag_generate_lto;
+#define flag_generate_lto global_options.x_flag_generate_lto
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_generate_offload;
+#else
+ int x_flag_generate_offload;
+#define flag_generate_offload global_options.x_flag_generate_offload
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_print_asm_name;
+#else
+ int x_flag_print_asm_name;
+#define flag_print_asm_name global_options.x_flag_print_asm_name
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_shlib;
+#else
+ int x_flag_shlib;
+#define flag_shlib global_options.x_flag_shlib
+#endif
+#ifdef GENERATOR_FILE
+extern int main_input_baselength;
+#else
+ int x_main_input_baselength;
+#define main_input_baselength global_options.x_main_input_baselength
+#endif
+#ifdef GENERATOR_FILE
+extern int optimize;
+#else
+ int x_optimize;
+#define optimize global_options.x_optimize
+#endif
+#ifdef GENERATOR_FILE
+extern int optimize_debug;
+#else
+ int x_optimize_debug;
+#define optimize_debug global_options.x_optimize_debug
+#endif
+#ifdef GENERATOR_FILE
+extern int optimize_fast;
+#else
+ int x_optimize_fast;
+#define optimize_fast global_options.x_optimize_fast
+#endif
+#ifdef GENERATOR_FILE
+extern int optimize_size;
+#else
+ int x_optimize_size;
+#define optimize_size global_options.x_optimize_size
+#endif
+#ifdef GENERATOR_FILE
+extern int rtl_dump_and_exit;
+#else
+ int x_rtl_dump_and_exit;
+#define rtl_dump_and_exit global_options.x_rtl_dump_and_exit
+#endif
+#ifdef GENERATOR_FILE
+extern int target_flags;
+#else
+ int x_target_flags;
+#define target_flags global_options.x_target_flags
+#endif
+#ifdef GENERATOR_FILE
+extern uint32_t write_symbols;
+#else
+ uint32_t x_write_symbols;
+#define write_symbols global_options.x_write_symbols
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned int flag_sanitize;
+#else
+ unsigned int x_flag_sanitize;
+#define flag_sanitize global_options.x_flag_sanitize
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned int flag_sanitize_recover;
+#else
+ unsigned int x_flag_sanitize_recover;
+#define flag_sanitize_recover global_options.x_flag_sanitize_recover
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned int flag_sanitize_trap;
+#else
+ unsigned int x_flag_sanitize_trap;
+#define flag_sanitize_trap global_options.x_flag_sanitize_trap
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned int flag_zero_call_used_regs;
+#else
+ unsigned int x_flag_zero_call_used_regs;
+#define flag_zero_call_used_regs global_options.x_flag_zero_call_used_regs
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned int help_columns;
+#else
+ unsigned int x_help_columns;
+#define help_columns global_options.x_help_columns
+#endif
+#ifdef GENERATOR_FILE
+extern unsigned int initial_max_fld_align;
+#else
+ unsigned int x_initial_max_fld_align;
+#define initial_max_fld_align global_options.x_initial_max_fld_align
+#endif
+#ifdef GENERATOR_FILE
+extern void *flag_ignored_attributes;
+#else
+ void * x_flag_ignored_attributes;
+#define flag_ignored_attributes global_options.x_flag_ignored_attributes
+#endif
+#ifdef GENERATOR_FILE
+extern void *flag_instrument_functions_exclude_files;
+#else
+ void * x_flag_instrument_functions_exclude_files;
+#define flag_instrument_functions_exclude_files global_options.x_flag_instrument_functions_exclude_files
+#endif
+#ifdef GENERATOR_FILE
+extern void *flag_instrument_functions_exclude_functions;
+#else
+ void * x_flag_instrument_functions_exclude_functions;
+#define flag_instrument_functions_exclude_functions global_options.x_flag_instrument_functions_exclude_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int help_flag;
+#else
+ int x_help_flag;
+#define help_flag global_options.x_help_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int no_sysroot_suffix;
+#else
+ int x_no_sysroot_suffix;
+#define no_sysroot_suffix global_options.x_no_sysroot_suffix
+#endif
+#ifdef GENERATOR_FILE
+extern int param_align_loop_iterations;
+#else
+ int x_param_align_loop_iterations;
+#define param_align_loop_iterations global_options.x_param_align_loop_iterations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_align_threshold;
+#else
+ int x_param_align_threshold;
+#define param_align_threshold global_options.x_param_align_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_bb_explosion_factor;
+#else
+ int x_param_analyzer_bb_explosion_factor;
+#define param_analyzer_bb_explosion_factor global_options.x_param_analyzer_bb_explosion_factor
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_max_constraints;
+#else
+ int x_param_analyzer_max_constraints;
+#define param_analyzer_max_constraints global_options.x_param_analyzer_max_constraints
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_max_enodes_for_full_dump;
+#else
+ int x_param_analyzer_max_enodes_for_full_dump;
+#define param_analyzer_max_enodes_for_full_dump global_options.x_param_analyzer_max_enodes_for_full_dump
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_max_enodes_per_program_point;
+#else
+ int x_param_analyzer_max_enodes_per_program_point;
+#define param_analyzer_max_enodes_per_program_point global_options.x_param_analyzer_max_enodes_per_program_point
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_max_infeasible_edges;
+#else
+ int x_param_analyzer_max_infeasible_edges;
+#define param_analyzer_max_infeasible_edges global_options.x_param_analyzer_max_infeasible_edges
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_max_recursion_depth;
+#else
+ int x_param_analyzer_max_recursion_depth;
+#define param_analyzer_max_recursion_depth global_options.x_param_analyzer_max_recursion_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_max_svalue_depth;
+#else
+ int x_param_analyzer_max_svalue_depth;
+#define param_analyzer_max_svalue_depth global_options.x_param_analyzer_max_svalue_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_analyzer_min_snodes_for_call_summary;
+#else
+ int x_param_analyzer_min_snodes_for_call_summary;
+#define param_analyzer_min_snodes_for_call_summary global_options.x_param_analyzer_min_snodes_for_call_summary
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_globals;
+#else
+ int x_param_asan_globals;
+#define param_asan_globals global_options.x_param_asan_globals
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_protect_allocas;
+#else
+ int x_param_asan_protect_allocas;
+#define param_asan_protect_allocas global_options.x_param_asan_protect_allocas
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_instrument_reads;
+#else
+ int x_param_asan_instrument_reads;
+#define param_asan_instrument_reads global_options.x_param_asan_instrument_reads
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_instrument_writes;
+#else
+ int x_param_asan_instrument_writes;
+#define param_asan_instrument_writes global_options.x_param_asan_instrument_writes
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_instrumentation_with_call_threshold;
+#else
+ int x_param_asan_instrumentation_with_call_threshold;
+#define param_asan_instrumentation_with_call_threshold global_options.x_param_asan_instrumentation_with_call_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_kernel_mem_intrinsic_prefix;
+#else
+ int x_param_asan_kernel_mem_intrinsic_prefix;
+#define param_asan_kernel_mem_intrinsic_prefix global_options.x_param_asan_kernel_mem_intrinsic_prefix
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_memintrin;
+#else
+ int x_param_asan_memintrin;
+#define param_asan_memintrin global_options.x_param_asan_memintrin
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_stack;
+#else
+ int x_param_asan_stack;
+#define param_asan_stack global_options.x_param_asan_stack
+#endif
+#ifdef GENERATOR_FILE
+extern int param_asan_use_after_return;
+#else
+ int x_param_asan_use_after_return;
+#define param_asan_use_after_return global_options.x_param_asan_use_after_return
+#endif
+#ifdef GENERATOR_FILE
+extern int param_avg_loop_niter;
+#else
+ int x_param_avg_loop_niter;
+#define param_avg_loop_niter global_options.x_param_avg_loop_niter
+#endif
+#ifdef GENERATOR_FILE
+extern int param_avoid_fma_max_bits;
+#else
+ int x_param_avoid_fma_max_bits;
+#define param_avoid_fma_max_bits global_options.x_param_avoid_fma_max_bits
+#endif
+#ifdef GENERATOR_FILE
+extern int param_builtin_expect_probability;
+#else
+ int x_param_builtin_expect_probability;
+#define param_builtin_expect_probability global_options.x_param_builtin_expect_probability
+#endif
+#ifdef GENERATOR_FILE
+extern int param_builtin_string_cmp_inline_length;
+#else
+ int x_param_builtin_string_cmp_inline_length;
+#define param_builtin_string_cmp_inline_length global_options.x_param_builtin_string_cmp_inline_length
+#endif
+#ifdef GENERATOR_FILE
+extern int param_case_values_threshold;
+#else
+ int x_param_case_values_threshold;
+#define param_case_values_threshold global_options.x_param_case_values_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_comdat_sharing_probability;
+#else
+ int x_param_comdat_sharing_probability;
+#define param_comdat_sharing_probability global_options.x_param_comdat_sharing_probability
+#endif
+#ifdef GENERATOR_FILE
+extern int param_construct_interfere_size;
+#else
+ int x_param_construct_interfere_size;
+#define param_construct_interfere_size global_options.x_param_construct_interfere_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_cxx_max_namespaces_for_diagnostic_help;
+#else
+ int x_param_cxx_max_namespaces_for_diagnostic_help;
+#define param_cxx_max_namespaces_for_diagnostic_help global_options.x_param_cxx_max_namespaces_for_diagnostic_help
+#endif
+#ifdef GENERATOR_FILE
+extern int param_destruct_interfere_size;
+#else
+ int x_param_destruct_interfere_size;
+#define param_destruct_interfere_size global_options.x_param_destruct_interfere_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_dse_max_alias_queries_per_store;
+#else
+ int x_param_dse_max_alias_queries_per_store;
+#define param_dse_max_alias_queries_per_store global_options.x_param_dse_max_alias_queries_per_store
+#endif
+#ifdef GENERATOR_FILE
+extern int param_dse_max_object_size;
+#else
+ int x_param_dse_max_object_size;
+#define param_dse_max_object_size global_options.x_param_dse_max_object_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_early_inlining_insns;
+#else
+ int x_param_early_inlining_insns;
+#define param_early_inlining_insns global_options.x_param_early_inlining_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_evrp_sparse_threshold;
+#else
+ int x_param_evrp_sparse_threshold;
+#define param_evrp_sparse_threshold global_options.x_param_evrp_sparse_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_evrp_switch_limit;
+#else
+ int x_param_evrp_switch_limit;
+#define param_evrp_switch_limit global_options.x_param_evrp_switch_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_fsm_scale_path_stmts;
+#else
+ int x_param_fsm_scale_path_stmts;
+#define param_fsm_scale_path_stmts global_options.x_param_fsm_scale_path_stmts
+#endif
+#ifdef GENERATOR_FILE
+extern int param_gcse_after_reload_critical_fraction;
+#else
+ int x_param_gcse_after_reload_critical_fraction;
+#define param_gcse_after_reload_critical_fraction global_options.x_param_gcse_after_reload_critical_fraction
+#endif
+#ifdef GENERATOR_FILE
+extern int param_gcse_after_reload_partial_fraction;
+#else
+ int x_param_gcse_after_reload_partial_fraction;
+#define param_gcse_after_reload_partial_fraction global_options.x_param_gcse_after_reload_partial_fraction
+#endif
+#ifdef GENERATOR_FILE
+extern int param_gcse_cost_distance_ratio;
+#else
+ int x_param_gcse_cost_distance_ratio;
+#define param_gcse_cost_distance_ratio global_options.x_param_gcse_cost_distance_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern int param_gcse_unrestricted_cost;
+#else
+ int x_param_gcse_unrestricted_cost;
+#define param_gcse_unrestricted_cost global_options.x_param_gcse_unrestricted_cost
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ggc_min_expand;
+#else
+ int x_param_ggc_min_expand;
+#define param_ggc_min_expand global_options.x_param_ggc_min_expand
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ggc_min_heapsize;
+#else
+ int x_param_ggc_min_heapsize;
+#define param_ggc_min_heapsize global_options.x_param_ggc_min_heapsize
+#endif
+#ifdef GENERATOR_FILE
+extern int param_gimple_fe_computed_hot_bb_threshold;
+#else
+ int x_param_gimple_fe_computed_hot_bb_threshold;
+#define param_gimple_fe_computed_hot_bb_threshold global_options.x_param_gimple_fe_computed_hot_bb_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_graphite_allow_codegen_errors;
+#else
+ int x_param_graphite_allow_codegen_errors;
+#define param_graphite_allow_codegen_errors global_options.x_param_graphite_allow_codegen_errors
+#endif
+#ifdef GENERATOR_FILE
+extern int param_graphite_max_arrays_per_scop;
+#else
+ int x_param_graphite_max_arrays_per_scop;
+#define param_graphite_max_arrays_per_scop global_options.x_param_graphite_max_arrays_per_scop
+#endif
+#ifdef GENERATOR_FILE
+extern int param_graphite_max_nb_scop_params;
+#else
+ int x_param_graphite_max_nb_scop_params;
+#define param_graphite_max_nb_scop_params global_options.x_param_graphite_max_nb_scop_params
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hash_table_verification_limit;
+#else
+ int x_param_hash_table_verification_limit;
+#define param_hash_table_verification_limit global_options.x_param_hash_table_verification_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hot_bb_count_fraction;
+#else
+ int x_param_hot_bb_count_fraction;
+#define param_hot_bb_count_fraction global_options.x_param_hot_bb_count_fraction
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hot_bb_count_ws_permille;
+#else
+ int x_param_hot_bb_count_ws_permille;
+#define param_hot_bb_count_ws_permille global_options.x_param_hot_bb_count_ws_permille
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hot_bb_frequency_fraction;
+#else
+ int x_param_hot_bb_frequency_fraction;
+#define param_hot_bb_frequency_fraction global_options.x_param_hot_bb_frequency_fraction
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hwasan_instrument_allocas;
+#else
+ int x_param_hwasan_instrument_allocas;
+#define param_hwasan_instrument_allocas global_options.x_param_hwasan_instrument_allocas
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hwasan_instrument_mem_intrinsics;
+#else
+ int x_param_hwasan_instrument_mem_intrinsics;
+#define param_hwasan_instrument_mem_intrinsics global_options.x_param_hwasan_instrument_mem_intrinsics
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hwasan_instrument_reads;
+#else
+ int x_param_hwasan_instrument_reads;
+#define param_hwasan_instrument_reads global_options.x_param_hwasan_instrument_reads
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hwasan_instrument_stack;
+#else
+ int x_param_hwasan_instrument_stack;
+#define param_hwasan_instrument_stack global_options.x_param_hwasan_instrument_stack
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hwasan_instrument_writes;
+#else
+ int x_param_hwasan_instrument_writes;
+#define param_hwasan_instrument_writes global_options.x_param_hwasan_instrument_writes
+#endif
+#ifdef GENERATOR_FILE
+extern int param_hwasan_random_frame_tag;
+#else
+ int x_param_hwasan_random_frame_tag;
+#define param_hwasan_random_frame_tag global_options.x_param_hwasan_random_frame_tag
+#endif
+#ifdef GENERATOR_FILE
+extern int param_inline_heuristics_hint_percent;
+#else
+ int x_param_inline_heuristics_hint_percent;
+#define param_inline_heuristics_hint_percent global_options.x_param_inline_heuristics_hint_percent
+#endif
+#ifdef GENERATOR_FILE
+extern int param_inline_min_speedup;
+#else
+ int x_param_inline_min_speedup;
+#define param_inline_min_speedup global_options.x_param_inline_min_speedup
+#endif
+#ifdef GENERATOR_FILE
+extern int param_inline_unit_growth;
+#else
+ int x_param_inline_unit_growth;
+#define param_inline_unit_growth global_options.x_param_inline_unit_growth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_integer_share_limit;
+#else
+ int x_param_integer_share_limit;
+#define param_integer_share_limit global_options.x_param_integer_share_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_eval_threshold;
+#else
+ int x_param_ipa_cp_eval_threshold;
+#define param_ipa_cp_eval_threshold global_options.x_param_ipa_cp_eval_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_large_unit_insns;
+#else
+ int x_param_ipa_cp_large_unit_insns;
+#define param_ipa_cp_large_unit_insns global_options.x_param_ipa_cp_large_unit_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_loop_hint_bonus;
+#else
+ int x_param_ipa_cp_loop_hint_bonus;
+#define param_ipa_cp_loop_hint_bonus global_options.x_param_ipa_cp_loop_hint_bonus
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_max_recursive_depth;
+#else
+ int x_param_ipa_cp_max_recursive_depth;
+#define param_ipa_cp_max_recursive_depth global_options.x_param_ipa_cp_max_recursive_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_min_recursive_probability;
+#else
+ int x_param_ipa_cp_min_recursive_probability;
+#define param_ipa_cp_min_recursive_probability global_options.x_param_ipa_cp_min_recursive_probability
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_profile_count_base;
+#else
+ int x_param_ipa_cp_profile_count_base;
+#define param_ipa_cp_profile_count_base global_options.x_param_ipa_cp_profile_count_base
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_recursion_penalty;
+#else
+ int x_param_ipa_cp_recursion_penalty;
+#define param_ipa_cp_recursion_penalty global_options.x_param_ipa_cp_recursion_penalty
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_recursive_freq_factor;
+#else
+ int x_param_ipa_cp_recursive_freq_factor;
+#define param_ipa_cp_recursive_freq_factor global_options.x_param_ipa_cp_recursive_freq_factor
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_single_call_penalty;
+#else
+ int x_param_ipa_cp_single_call_penalty;
+#define param_ipa_cp_single_call_penalty global_options.x_param_ipa_cp_single_call_penalty
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_unit_growth;
+#else
+ int x_param_ipa_cp_unit_growth;
+#define param_ipa_cp_unit_growth global_options.x_param_ipa_cp_unit_growth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_cp_value_list_size;
+#else
+ int x_param_ipa_cp_value_list_size;
+#define param_ipa_cp_value_list_size global_options.x_param_ipa_cp_value_list_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_jump_function_lookups;
+#else
+ int x_param_ipa_jump_function_lookups;
+#define param_ipa_jump_function_lookups global_options.x_param_ipa_jump_function_lookups
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_max_aa_steps;
+#else
+ int x_param_ipa_max_aa_steps;
+#define param_ipa_max_aa_steps global_options.x_param_ipa_max_aa_steps
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_max_agg_items;
+#else
+ int x_param_ipa_max_agg_items;
+#define param_ipa_max_agg_items global_options.x_param_ipa_max_agg_items
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_max_loop_predicates;
+#else
+ int x_param_ipa_max_loop_predicates;
+#define param_ipa_max_loop_predicates global_options.x_param_ipa_max_loop_predicates
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_max_param_expr_ops;
+#else
+ int x_param_ipa_max_param_expr_ops;
+#define param_ipa_max_param_expr_ops global_options.x_param_ipa_max_param_expr_ops
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_max_switch_predicate_bounds;
+#else
+ int x_param_ipa_max_switch_predicate_bounds;
+#define param_ipa_max_switch_predicate_bounds global_options.x_param_ipa_max_switch_predicate_bounds
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_sra_deref_prob_threshold;
+#else
+ int x_param_ipa_sra_deref_prob_threshold;
+#define param_ipa_sra_deref_prob_threshold global_options.x_param_ipa_sra_deref_prob_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_sra_max_replacements;
+#else
+ int x_param_ipa_sra_max_replacements;
+#define param_ipa_sra_max_replacements global_options.x_param_ipa_sra_max_replacements
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_sra_ptr_growth_factor;
+#else
+ int x_param_ipa_sra_ptr_growth_factor;
+#define param_ipa_sra_ptr_growth_factor global_options.x_param_ipa_sra_ptr_growth_factor
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ipa_sra_ptrwrap_growth_factor;
+#else
+ int x_param_ipa_sra_ptrwrap_growth_factor;
+#define param_ipa_sra_ptrwrap_growth_factor global_options.x_param_ipa_sra_ptrwrap_growth_factor
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ira_consider_dup_in_all_alts;
+#else
+ int x_param_ira_consider_dup_in_all_alts;
+#define param_ira_consider_dup_in_all_alts global_options.x_param_ira_consider_dup_in_all_alts
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ira_loop_reserved_regs;
+#else
+ int x_param_ira_loop_reserved_regs;
+#define param_ira_loop_reserved_regs global_options.x_param_ira_loop_reserved_regs
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ira_max_conflict_table_size;
+#else
+ int x_param_ira_max_conflict_table_size;
+#define param_ira_max_conflict_table_size global_options.x_param_ira_max_conflict_table_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ira_max_loops_num;
+#else
+ int x_param_ira_max_loops_num;
+#define param_ira_max_loops_num global_options.x_param_ira_max_loops_num
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ira_simple_lra_insn_threshold;
+#else
+ int x_param_ira_simple_lra_insn_threshold;
+#define param_ira_simple_lra_insn_threshold global_options.x_param_ira_simple_lra_insn_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_iv_always_prune_cand_set_bound;
+#else
+ int x_param_iv_always_prune_cand_set_bound;
+#define param_iv_always_prune_cand_set_bound global_options.x_param_iv_always_prune_cand_set_bound
+#endif
+#ifdef GENERATOR_FILE
+extern int param_iv_consider_all_candidates_bound;
+#else
+ int x_param_iv_consider_all_candidates_bound;
+#define param_iv_consider_all_candidates_bound global_options.x_param_iv_consider_all_candidates_bound
+#endif
+#ifdef GENERATOR_FILE
+extern int param_iv_max_considered_uses;
+#else
+ int x_param_iv_max_considered_uses;
+#define param_iv_max_considered_uses global_options.x_param_iv_max_considered_uses
+#endif
+#ifdef GENERATOR_FILE
+extern int param_jump_table_max_growth_ratio_for_size;
+#else
+ int x_param_jump_table_max_growth_ratio_for_size;
+#define param_jump_table_max_growth_ratio_for_size global_options.x_param_jump_table_max_growth_ratio_for_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_jump_table_max_growth_ratio_for_speed;
+#else
+ int x_param_jump_table_max_growth_ratio_for_speed;
+#define param_jump_table_max_growth_ratio_for_speed global_options.x_param_jump_table_max_growth_ratio_for_speed
+#endif
+#ifdef GENERATOR_FILE
+extern int param_l1_cache_line_size;
+#else
+ int x_param_l1_cache_line_size;
+#define param_l1_cache_line_size global_options.x_param_l1_cache_line_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_l1_cache_size;
+#else
+ int x_param_l1_cache_size;
+#define param_l1_cache_size global_options.x_param_l1_cache_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_l2_cache_size;
+#else
+ int x_param_l2_cache_size;
+#define param_l2_cache_size global_options.x_param_l2_cache_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_large_function_growth;
+#else
+ int x_param_large_function_growth;
+#define param_large_function_growth global_options.x_param_large_function_growth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_large_function_insns;
+#else
+ int x_param_large_function_insns;
+#define param_large_function_insns global_options.x_param_large_function_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_stack_frame_growth;
+#else
+ int x_param_stack_frame_growth;
+#define param_stack_frame_growth global_options.x_param_stack_frame_growth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_large_stack_frame;
+#else
+ int x_param_large_stack_frame;
+#define param_large_stack_frame global_options.x_param_large_stack_frame
+#endif
+#ifdef GENERATOR_FILE
+extern int param_large_unit_insns;
+#else
+ int x_param_large_unit_insns;
+#define param_large_unit_insns global_options.x_param_large_unit_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_lazy_modules;
+#else
+ int x_param_lazy_modules;
+#define param_lazy_modules global_options.x_param_lazy_modules
+#endif
+#ifdef GENERATOR_FILE
+extern int param_lim_expensive;
+#else
+ int x_param_lim_expensive;
+#define param_lim_expensive global_options.x_param_lim_expensive
+#endif
+#ifdef GENERATOR_FILE
+extern int param_logical_op_non_short_circuit;
+#else
+ int x_param_logical_op_non_short_circuit;
+#define param_logical_op_non_short_circuit global_options.x_param_logical_op_non_short_circuit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_block_tile_size;
+#else
+ int x_param_loop_block_tile_size;
+#define param_loop_block_tile_size global_options.x_param_loop_block_tile_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_interchange_max_num_stmts;
+#else
+ int x_param_loop_interchange_max_num_stmts;
+#define param_loop_interchange_max_num_stmts global_options.x_param_loop_interchange_max_num_stmts
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_interchange_stride_ratio;
+#else
+ int x_param_loop_interchange_stride_ratio;
+#define param_loop_interchange_stride_ratio global_options.x_param_loop_interchange_stride_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_invariant_max_bbs_in_loop;
+#else
+ int x_param_loop_invariant_max_bbs_in_loop;
+#define param_loop_invariant_max_bbs_in_loop global_options.x_param_loop_invariant_max_bbs_in_loop
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_max_datarefs_for_datadeps;
+#else
+ int x_param_loop_max_datarefs_for_datadeps;
+#define param_loop_max_datarefs_for_datadeps global_options.x_param_loop_max_datarefs_for_datadeps
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_versioning_max_inner_insns;
+#else
+ int x_param_loop_versioning_max_inner_insns;
+#define param_loop_versioning_max_inner_insns global_options.x_param_loop_versioning_max_inner_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_loop_versioning_max_outer_insns;
+#else
+ int x_param_loop_versioning_max_outer_insns;
+#define param_loop_versioning_max_outer_insns global_options.x_param_loop_versioning_max_outer_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_lra_inheritance_ebb_probability_cutoff;
+#else
+ int x_param_lra_inheritance_ebb_probability_cutoff;
+#define param_lra_inheritance_ebb_probability_cutoff global_options.x_param_lra_inheritance_ebb_probability_cutoff
+#endif
+#ifdef GENERATOR_FILE
+extern int param_lra_max_considered_reload_pseudos;
+#else
+ int x_param_lra_max_considered_reload_pseudos;
+#define param_lra_max_considered_reload_pseudos global_options.x_param_lra_max_considered_reload_pseudos
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_partition_size;
+#else
+ int x_param_max_partition_size;
+#define param_max_partition_size global_options.x_param_max_partition_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_lto_streaming_parallelism;
+#else
+ int x_param_max_lto_streaming_parallelism;
+#define param_max_lto_streaming_parallelism global_options.x_param_max_lto_streaming_parallelism
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_partition_size;
+#else
+ int x_param_min_partition_size;
+#define param_min_partition_size global_options.x_param_min_partition_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_lto_partitions;
+#else
+ int x_param_lto_partitions;
+#define param_lto_partitions global_options.x_param_lto_partitions
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_average_unrolled_insns;
+#else
+ int x_param_max_average_unrolled_insns;
+#define param_max_average_unrolled_insns global_options.x_param_max_average_unrolled_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_combine_insns;
+#else
+ int x_param_max_combine_insns;
+#define param_max_combine_insns global_options.x_param_max_combine_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_unroll_iterations;
+#else
+ int x_param_max_unroll_iterations;
+#define param_max_unroll_iterations global_options.x_param_max_unroll_iterations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_completely_peel_times;
+#else
+ int x_param_max_completely_peel_times;
+#define param_max_completely_peel_times global_options.x_param_max_completely_peel_times
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_completely_peeled_insns;
+#else
+ int x_param_max_completely_peeled_insns;
+#define param_max_completely_peeled_insns global_options.x_param_max_completely_peeled_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_crossjump_edges;
+#else
+ int x_param_max_crossjump_edges;
+#define param_max_crossjump_edges global_options.x_param_max_crossjump_edges
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_cse_insns;
+#else
+ int x_param_max_cse_insns;
+#define param_max_cse_insns global_options.x_param_max_cse_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_cse_path_length;
+#else
+ int x_param_max_cse_path_length;
+#define param_max_cse_path_length global_options.x_param_max_cse_path_length
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_cselib_memory_locations;
+#else
+ int x_param_max_cselib_memory_locations;
+#define param_max_cselib_memory_locations global_options.x_param_max_cselib_memory_locations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_debug_marker_count;
+#else
+ int x_param_max_debug_marker_count;
+#define param_max_debug_marker_count global_options.x_param_max_debug_marker_count
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_delay_slot_insn_search;
+#else
+ int x_param_max_delay_slot_insn_search;
+#define param_max_delay_slot_insn_search global_options.x_param_max_delay_slot_insn_search
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_delay_slot_live_search;
+#else
+ int x_param_max_delay_slot_live_search;
+#define param_max_delay_slot_live_search global_options.x_param_max_delay_slot_live_search
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_dse_active_local_stores;
+#else
+ int x_param_max_dse_active_local_stores;
+#define param_max_dse_active_local_stores global_options.x_param_max_dse_active_local_stores
+#endif
+#ifdef GENERATOR_FILE
+extern int param_early_inliner_max_iterations;
+#else
+ int x_param_early_inliner_max_iterations;
+#define param_early_inliner_max_iterations global_options.x_param_early_inliner_max_iterations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_fields_for_field_sensitive;
+#else
+ int x_param_max_fields_for_field_sensitive;
+#define param_max_fields_for_field_sensitive global_options.x_param_max_fields_for_field_sensitive
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_find_base_term_values;
+#else
+ int x_param_max_find_base_term_values;
+#define param_max_find_base_term_values global_options.x_param_max_find_base_term_values
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_fsm_thread_path_insns;
+#else
+ int x_param_max_fsm_thread_path_insns;
+#define param_max_fsm_thread_path_insns global_options.x_param_max_fsm_thread_path_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_gcse_insertion_ratio;
+#else
+ int x_param_max_gcse_insertion_ratio;
+#define param_max_gcse_insertion_ratio global_options.x_param_max_gcse_insertion_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_gcse_memory;
+#else
+ int x_param_max_gcse_memory;
+#define param_max_gcse_memory global_options.x_param_max_gcse_memory
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_goto_duplication_insns;
+#else
+ int x_param_max_goto_duplication_insns;
+#define param_max_goto_duplication_insns global_options.x_param_max_goto_duplication_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_grow_copy_bb_insns;
+#else
+ int x_param_max_grow_copy_bb_insns;
+#define param_max_grow_copy_bb_insns global_options.x_param_max_grow_copy_bb_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_hoist_depth;
+#else
+ int x_param_max_hoist_depth;
+#define param_max_hoist_depth global_options.x_param_max_hoist_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_inline_functions_called_once_insns;
+#else
+ int x_param_inline_functions_called_once_insns;
+#define param_inline_functions_called_once_insns global_options.x_param_inline_functions_called_once_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_inline_functions_called_once_loop_depth;
+#else
+ int x_param_inline_functions_called_once_loop_depth;
+#define param_inline_functions_called_once_loop_depth global_options.x_param_inline_functions_called_once_loop_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_insns_auto;
+#else
+ int x_param_max_inline_insns_auto;
+#define param_max_inline_insns_auto global_options.x_param_max_inline_insns_auto
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_insns_recursive_auto;
+#else
+ int x_param_max_inline_insns_recursive_auto;
+#define param_max_inline_insns_recursive_auto global_options.x_param_max_inline_insns_recursive_auto
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_insns_recursive;
+#else
+ int x_param_max_inline_insns_recursive;
+#define param_max_inline_insns_recursive global_options.x_param_max_inline_insns_recursive
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_insns_single;
+#else
+ int x_param_max_inline_insns_single;
+#define param_max_inline_insns_single global_options.x_param_max_inline_insns_single
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_insns_size;
+#else
+ int x_param_max_inline_insns_size;
+#define param_max_inline_insns_size global_options.x_param_max_inline_insns_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_insns_small;
+#else
+ int x_param_max_inline_insns_small;
+#define param_max_inline_insns_small global_options.x_param_max_inline_insns_small
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_recursive_depth_auto;
+#else
+ int x_param_max_inline_recursive_depth_auto;
+#define param_max_inline_recursive_depth_auto global_options.x_param_max_inline_recursive_depth_auto
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_inline_recursive_depth;
+#else
+ int x_param_max_inline_recursive_depth;
+#define param_max_inline_recursive_depth global_options.x_param_max_inline_recursive_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_isl_operations;
+#else
+ int x_param_max_isl_operations;
+#define param_max_isl_operations global_options.x_param_max_isl_operations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_iterations_computation_cost;
+#else
+ int x_param_max_iterations_computation_cost;
+#define param_max_iterations_computation_cost global_options.x_param_max_iterations_computation_cost
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_iterations_to_track;
+#else
+ int x_param_max_iterations_to_track;
+#define param_max_iterations_to_track global_options.x_param_max_iterations_to_track
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_jump_thread_duplication_stmts;
+#else
+ int x_param_max_jump_thread_duplication_stmts;
+#define param_max_jump_thread_duplication_stmts global_options.x_param_max_jump_thread_duplication_stmts
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_jump_thread_paths;
+#else
+ int x_param_max_jump_thread_paths;
+#define param_max_jump_thread_paths global_options.x_param_max_jump_thread_paths
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_last_value_rtl;
+#else
+ int x_param_max_last_value_rtl;
+#define param_max_last_value_rtl global_options.x_param_max_last_value_rtl
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_loop_header_insns;
+#else
+ int x_param_max_loop_header_insns;
+#define param_max_loop_header_insns global_options.x_param_max_loop_header_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_modulo_backtrack_attempts;
+#else
+ int x_param_max_modulo_backtrack_attempts;
+#define param_max_modulo_backtrack_attempts global_options.x_param_max_modulo_backtrack_attempts
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_partial_antic_length;
+#else
+ int x_param_max_partial_antic_length;
+#define param_max_partial_antic_length global_options.x_param_max_partial_antic_length
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_peel_branches;
+#else
+ int x_param_max_peel_branches;
+#define param_max_peel_branches global_options.x_param_max_peel_branches
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_peel_times;
+#else
+ int x_param_max_peel_times;
+#define param_max_peel_times global_options.x_param_max_peel_times
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_peeled_insns;
+#else
+ int x_param_max_peeled_insns;
+#define param_max_peeled_insns global_options.x_param_max_peeled_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_pending_list_length;
+#else
+ int x_param_max_pending_list_length;
+#define param_max_pending_list_length global_options.x_param_max_pending_list_length
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_pipeline_region_blocks;
+#else
+ int x_param_max_pipeline_region_blocks;
+#define param_max_pipeline_region_blocks global_options.x_param_max_pipeline_region_blocks
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_pipeline_region_insns;
+#else
+ int x_param_max_pipeline_region_insns;
+#define param_max_pipeline_region_insns global_options.x_param_max_pipeline_region_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_pow_sqrt_depth;
+#else
+ int x_param_max_pow_sqrt_depth;
+#define param_max_pow_sqrt_depth global_options.x_param_max_pow_sqrt_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_predicted_iterations;
+#else
+ int x_param_max_predicted_iterations;
+#define param_max_predicted_iterations global_options.x_param_max_predicted_iterations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_reload_search_insns;
+#else
+ int x_param_max_reload_search_insns;
+#define param_max_reload_search_insns global_options.x_param_max_reload_search_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_rtl_if_conversion_insns;
+#else
+ int x_param_max_rtl_if_conversion_insns;
+#define param_max_rtl_if_conversion_insns global_options.x_param_max_rtl_if_conversion_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_rtl_if_conversion_predictable_cost;
+#else
+ int x_param_max_rtl_if_conversion_predictable_cost;
+#define param_max_rtl_if_conversion_predictable_cost global_options.x_param_max_rtl_if_conversion_predictable_cost
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_rtl_if_conversion_unpredictable_cost;
+#else
+ int x_param_max_rtl_if_conversion_unpredictable_cost;
+#define param_max_rtl_if_conversion_unpredictable_cost global_options.x_param_max_rtl_if_conversion_unpredictable_cost
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_sched_extend_regions_iters;
+#else
+ int x_param_max_sched_extend_regions_iters;
+#define param_max_sched_extend_regions_iters global_options.x_param_max_sched_extend_regions_iters
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_sched_insn_conflict_delay;
+#else
+ int x_param_max_sched_insn_conflict_delay;
+#define param_max_sched_insn_conflict_delay global_options.x_param_max_sched_insn_conflict_delay
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_sched_ready_insns;
+#else
+ int x_param_max_sched_ready_insns;
+#define param_max_sched_ready_insns global_options.x_param_max_sched_ready_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_sched_region_blocks;
+#else
+ int x_param_max_sched_region_blocks;
+#define param_max_sched_region_blocks global_options.x_param_max_sched_region_blocks
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_sched_region_insns;
+#else
+ int x_param_max_sched_region_insns;
+#define param_max_sched_region_insns global_options.x_param_max_sched_region_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_slsr_candidate_scan;
+#else
+ int x_param_max_slsr_candidate_scan;
+#define param_max_slsr_candidate_scan global_options.x_param_max_slsr_candidate_scan
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_speculative_devirt_maydefs;
+#else
+ int x_param_max_speculative_devirt_maydefs;
+#define param_max_speculative_devirt_maydefs global_options.x_param_max_speculative_devirt_maydefs
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_ssa_name_query_depth;
+#else
+ int x_param_max_ssa_name_query_depth;
+#define param_max_ssa_name_query_depth global_options.x_param_max_ssa_name_query_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_store_chains_to_track;
+#else
+ int x_param_max_store_chains_to_track;
+#define param_max_store_chains_to_track global_options.x_param_max_store_chains_to_track
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_stores_to_merge;
+#else
+ int x_param_max_stores_to_merge;
+#define param_max_stores_to_merge global_options.x_param_max_stores_to_merge
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_stores_to_sink;
+#else
+ int x_param_max_stores_to_sink;
+#define param_max_stores_to_sink global_options.x_param_max_stores_to_sink
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_stores_to_track;
+#else
+ int x_param_max_stores_to_track;
+#define param_max_stores_to_track global_options.x_param_max_stores_to_track
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_tail_merge_comparisons;
+#else
+ int x_param_max_tail_merge_comparisons;
+#define param_max_tail_merge_comparisons global_options.x_param_max_tail_merge_comparisons
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_tail_merge_iterations;
+#else
+ int x_param_max_tail_merge_iterations;
+#define param_max_tail_merge_iterations global_options.x_param_max_tail_merge_iterations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_tracked_strlens;
+#else
+ int x_param_max_tracked_strlens;
+#define param_max_tracked_strlens global_options.x_param_max_tracked_strlens
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_tree_if_conversion_phi_args;
+#else
+ int x_param_max_tree_if_conversion_phi_args;
+#define param_max_tree_if_conversion_phi_args global_options.x_param_max_tree_if_conversion_phi_args
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_unroll_times;
+#else
+ int x_param_max_unroll_times;
+#define param_max_unroll_times global_options.x_param_max_unroll_times
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_unrolled_insns;
+#else
+ int x_param_max_unrolled_insns;
+#define param_max_unrolled_insns global_options.x_param_max_unrolled_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_unswitch_depth;
+#else
+ int x_param_max_unswitch_depth;
+#define param_max_unswitch_depth global_options.x_param_max_unswitch_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_unswitch_insns;
+#else
+ int x_param_max_unswitch_insns;
+#define param_max_unswitch_insns global_options.x_param_max_unswitch_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_variable_expansions;
+#else
+ int x_param_max_variable_expansions;
+#define param_max_variable_expansions global_options.x_param_max_variable_expansions
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_vartrack_expr_depth;
+#else
+ int x_param_max_vartrack_expr_depth;
+#define param_max_vartrack_expr_depth global_options.x_param_max_vartrack_expr_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_vartrack_reverse_op_size;
+#else
+ int x_param_max_vartrack_reverse_op_size;
+#define param_max_vartrack_reverse_op_size global_options.x_param_max_vartrack_reverse_op_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_max_vartrack_size;
+#else
+ int x_param_max_vartrack_size;
+#define param_max_vartrack_size global_options.x_param_max_vartrack_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_crossjump_insns;
+#else
+ int x_param_min_crossjump_insns;
+#define param_min_crossjump_insns global_options.x_param_min_crossjump_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_inline_recursive_probability;
+#else
+ int x_param_min_inline_recursive_probability;
+#define param_min_inline_recursive_probability global_options.x_param_min_inline_recursive_probability
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_insn_to_prefetch_ratio;
+#else
+ int x_param_min_insn_to_prefetch_ratio;
+#define param_min_insn_to_prefetch_ratio global_options.x_param_min_insn_to_prefetch_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_loop_cond_split_prob;
+#else
+ int x_param_min_loop_cond_split_prob;
+#define param_min_loop_cond_split_prob global_options.x_param_min_loop_cond_split_prob
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_nondebug_insn_uid;
+#else
+ int x_param_min_nondebug_insn_uid;
+#define param_min_nondebug_insn_uid global_options.x_param_min_nondebug_insn_uid
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_pagesize;
+#else
+ int x_param_min_pagesize;
+#define param_min_pagesize global_options.x_param_min_pagesize
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_size_for_stack_sharing;
+#else
+ int x_param_min_size_for_stack_sharing;
+#define param_min_size_for_stack_sharing global_options.x_param_min_size_for_stack_sharing
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_spec_prob;
+#else
+ int x_param_min_spec_prob;
+#define param_min_spec_prob global_options.x_param_min_spec_prob
+#endif
+#ifdef GENERATOR_FILE
+extern int param_min_vect_loop_bound;
+#else
+ int x_param_min_vect_loop_bound;
+#define param_min_vect_loop_bound global_options.x_param_min_vect_loop_bound
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_accesses;
+#else
+ int x_param_modref_max_accesses;
+#define param_modref_max_accesses global_options.x_param_modref_max_accesses
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_adjustments;
+#else
+ int x_param_modref_max_adjustments;
+#define param_modref_max_adjustments global_options.x_param_modref_max_adjustments
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_bases;
+#else
+ int x_param_modref_max_bases;
+#define param_modref_max_bases global_options.x_param_modref_max_bases
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_depth;
+#else
+ int x_param_modref_max_depth;
+#define param_modref_max_depth global_options.x_param_modref_max_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_escape_points;
+#else
+ int x_param_modref_max_escape_points;
+#define param_modref_max_escape_points global_options.x_param_modref_max_escape_points
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_refs;
+#else
+ int x_param_modref_max_refs;
+#define param_modref_max_refs global_options.x_param_modref_max_refs
+#endif
+#ifdef GENERATOR_FILE
+extern int param_modref_max_tests;
+#else
+ int x_param_modref_max_tests;
+#define param_modref_max_tests global_options.x_param_modref_max_tests
+#endif
+#ifdef GENERATOR_FILE
+extern enum openacc_kernels param_openacc_kernels;
+#else
+ enum openacc_kernels x_param_openacc_kernels;
+#define param_openacc_kernels global_options.x_param_openacc_kernels
+#endif
+#ifdef GENERATOR_FILE
+extern enum openacc_privatization param_openacc_privatization;
+#else
+ enum openacc_privatization x_param_openacc_privatization;
+#define param_openacc_privatization global_options.x_param_openacc_privatization
+#endif
+#ifdef GENERATOR_FILE
+extern int param_parloops_chunk_size;
+#else
+ int x_param_parloops_chunk_size;
+#define param_parloops_chunk_size global_options.x_param_parloops_chunk_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_parloops_min_per_thread;
+#else
+ int x_param_parloops_min_per_thread;
+#define param_parloops_min_per_thread global_options.x_param_parloops_min_per_thread
+#endif
+#ifdef GENERATOR_FILE
+extern int param_parloops_schedule;
+#else
+ int x_param_parloops_schedule;
+#define param_parloops_schedule global_options.x_param_parloops_schedule
+#endif
+#ifdef GENERATOR_FILE
+extern int param_partial_inlining_entry_probability;
+#else
+ int x_param_partial_inlining_entry_probability;
+#define param_partial_inlining_entry_probability global_options.x_param_partial_inlining_entry_probability
+#endif
+#ifdef GENERATOR_FILE
+extern int param_predictable_branch_outcome;
+#else
+ int x_param_predictable_branch_outcome;
+#define param_predictable_branch_outcome global_options.x_param_predictable_branch_outcome
+#endif
+#ifdef GENERATOR_FILE
+extern int param_prefetch_dynamic_strides;
+#else
+ int x_param_prefetch_dynamic_strides;
+#define param_prefetch_dynamic_strides global_options.x_param_prefetch_dynamic_strides
+#endif
+#ifdef GENERATOR_FILE
+extern int param_prefetch_latency;
+#else
+ int x_param_prefetch_latency;
+#define param_prefetch_latency global_options.x_param_prefetch_latency
+#endif
+#ifdef GENERATOR_FILE
+extern int param_prefetch_min_insn_to_mem_ratio;
+#else
+ int x_param_prefetch_min_insn_to_mem_ratio;
+#define param_prefetch_min_insn_to_mem_ratio global_options.x_param_prefetch_min_insn_to_mem_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern int param_prefetch_minimum_stride;
+#else
+ int x_param_prefetch_minimum_stride;
+#define param_prefetch_minimum_stride global_options.x_param_prefetch_minimum_stride
+#endif
+#ifdef GENERATOR_FILE
+extern int param_profile_func_internal_id;
+#else
+ int x_param_profile_func_internal_id;
+#define param_profile_func_internal_id global_options.x_param_profile_func_internal_id
+#endif
+#ifdef GENERATOR_FILE
+extern enum ranger_debug param_ranger_debug;
+#else
+ enum ranger_debug x_param_ranger_debug;
+#define param_ranger_debug global_options.x_param_ranger_debug
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ranger_logical_depth;
+#else
+ int x_param_ranger_logical_depth;
+#define param_ranger_logical_depth global_options.x_param_ranger_logical_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ranger_recompute_depth;
+#else
+ int x_param_ranger_recompute_depth;
+#define param_ranger_recompute_depth global_options.x_param_ranger_recompute_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_relation_block_limit;
+#else
+ int x_param_relation_block_limit;
+#define param_relation_block_limit global_options.x_param_relation_block_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_rpo_vn_max_loop_depth;
+#else
+ int x_param_rpo_vn_max_loop_depth;
+#define param_rpo_vn_max_loop_depth global_options.x_param_rpo_vn_max_loop_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sccvn_max_alias_queries_per_access;
+#else
+ int x_param_sccvn_max_alias_queries_per_access;
+#define param_sccvn_max_alias_queries_per_access global_options.x_param_sccvn_max_alias_queries_per_access
+#endif
+#ifdef GENERATOR_FILE
+extern int param_scev_max_expr_complexity;
+#else
+ int x_param_scev_max_expr_complexity;
+#define param_scev_max_expr_complexity global_options.x_param_scev_max_expr_complexity
+#endif
+#ifdef GENERATOR_FILE
+extern int param_scev_max_expr_size;
+#else
+ int x_param_scev_max_expr_size;
+#define param_scev_max_expr_size global_options.x_param_scev_max_expr_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sched_autopref_queue_depth;
+#else
+ int x_param_sched_autopref_queue_depth;
+#define param_sched_autopref_queue_depth global_options.x_param_sched_autopref_queue_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sched_mem_true_dep_cost;
+#else
+ int x_param_sched_mem_true_dep_cost;
+#define param_sched_mem_true_dep_cost global_options.x_param_sched_mem_true_dep_cost
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sched_pressure_algorithm;
+#else
+ int x_param_sched_pressure_algorithm;
+#define param_sched_pressure_algorithm global_options.x_param_sched_pressure_algorithm
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sched_spec_prob_cutoff;
+#else
+ int x_param_sched_spec_prob_cutoff;
+#define param_sched_spec_prob_cutoff global_options.x_param_sched_spec_prob_cutoff
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sched_state_edge_prob_cutoff;
+#else
+ int x_param_sched_state_edge_prob_cutoff;
+#define param_sched_state_edge_prob_cutoff global_options.x_param_sched_state_edge_prob_cutoff
+#endif
+#ifdef GENERATOR_FILE
+extern int param_selsched_insns_to_rename;
+#else
+ int x_param_selsched_insns_to_rename;
+#define param_selsched_insns_to_rename global_options.x_param_selsched_insns_to_rename
+#endif
+#ifdef GENERATOR_FILE
+extern int param_selsched_max_lookahead;
+#else
+ int x_param_selsched_max_lookahead;
+#define param_selsched_max_lookahead global_options.x_param_selsched_max_lookahead
+#endif
+#ifdef GENERATOR_FILE
+extern int param_selsched_max_sched_times;
+#else
+ int x_param_selsched_max_sched_times;
+#define param_selsched_max_sched_times global_options.x_param_selsched_max_sched_times
+#endif
+#ifdef GENERATOR_FILE
+extern int param_simultaneous_prefetches;
+#else
+ int x_param_simultaneous_prefetches;
+#define param_simultaneous_prefetches global_options.x_param_simultaneous_prefetches
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sink_frequency_threshold;
+#else
+ int x_param_sink_frequency_threshold;
+#define param_sink_frequency_threshold global_options.x_param_sink_frequency_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sms_dfa_history;
+#else
+ int x_param_sms_dfa_history;
+#define param_sms_dfa_history global_options.x_param_sms_dfa_history
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sms_loop_average_count_threshold;
+#else
+ int x_param_sms_loop_average_count_threshold;
+#define param_sms_loop_average_count_threshold global_options.x_param_sms_loop_average_count_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sms_max_ii_factor;
+#else
+ int x_param_sms_max_ii_factor;
+#define param_sms_max_ii_factor global_options.x_param_sms_max_ii_factor
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sms_min_sc;
+#else
+ int x_param_sms_min_sc;
+#define param_sms_min_sc global_options.x_param_sms_min_sc
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sra_max_propagations;
+#else
+ int x_param_sra_max_propagations;
+#define param_sra_max_propagations global_options.x_param_sra_max_propagations
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sra_max_scalarization_size_size;
+#else
+ int x_param_sra_max_scalarization_size_size;
+#define param_sra_max_scalarization_size_size global_options.x_param_sra_max_scalarization_size_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_sra_max_scalarization_size_speed;
+#else
+ int x_param_sra_max_scalarization_size_speed;
+#define param_sra_max_scalarization_size_speed global_options.x_param_sra_max_scalarization_size_speed
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ssa_name_def_chain_limit;
+#else
+ int x_param_ssa_name_def_chain_limit;
+#define param_ssa_name_def_chain_limit global_options.x_param_ssa_name_def_chain_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_ssp_buffer_size;
+#else
+ int x_param_ssp_buffer_size;
+#define param_ssp_buffer_size global_options.x_param_ssp_buffer_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_stack_clash_protection_guard_size;
+#else
+ int x_param_stack_clash_protection_guard_size;
+#define param_stack_clash_protection_guard_size global_options.x_param_stack_clash_protection_guard_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_stack_clash_protection_probe_interval;
+#else
+ int x_param_stack_clash_protection_probe_interval;
+#define param_stack_clash_protection_probe_interval global_options.x_param_stack_clash_protection_probe_interval
+#endif
+#ifdef GENERATOR_FILE
+extern int param_store_merging_allow_unaligned;
+#else
+ int x_param_store_merging_allow_unaligned;
+#define param_store_merging_allow_unaligned global_options.x_param_store_merging_allow_unaligned
+#endif
+#ifdef GENERATOR_FILE
+extern int param_store_merging_max_size;
+#else
+ int x_param_store_merging_max_size;
+#define param_store_merging_max_size global_options.x_param_store_merging_max_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_switch_conversion_branch_ratio;
+#else
+ int x_param_switch_conversion_branch_ratio;
+#define param_switch_conversion_branch_ratio global_options.x_param_switch_conversion_branch_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern enum threader_debug param_threader_debug;
+#else
+ enum threader_debug x_param_threader_debug;
+#define param_threader_debug global_options.x_param_threader_debug
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tm_max_aggregate_size;
+#else
+ int x_param_tm_max_aggregate_size;
+#define param_tm_max_aggregate_size global_options.x_param_tm_max_aggregate_size
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tracer_dynamic_coverage_feedback;
+#else
+ int x_param_tracer_dynamic_coverage_feedback;
+#define param_tracer_dynamic_coverage_feedback global_options.x_param_tracer_dynamic_coverage_feedback
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tracer_dynamic_coverage;
+#else
+ int x_param_tracer_dynamic_coverage;
+#define param_tracer_dynamic_coverage global_options.x_param_tracer_dynamic_coverage
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tracer_max_code_growth;
+#else
+ int x_param_tracer_max_code_growth;
+#define param_tracer_max_code_growth global_options.x_param_tracer_max_code_growth
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tracer_min_branch_probability_feedback;
+#else
+ int x_param_tracer_min_branch_probability_feedback;
+#define param_tracer_min_branch_probability_feedback global_options.x_param_tracer_min_branch_probability_feedback
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tracer_min_branch_probability;
+#else
+ int x_param_tracer_min_branch_probability;
+#define param_tracer_min_branch_probability global_options.x_param_tracer_min_branch_probability
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tracer_min_branch_ratio;
+#else
+ int x_param_tracer_min_branch_ratio;
+#define param_tracer_min_branch_ratio global_options.x_param_tracer_min_branch_ratio
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tree_reassoc_width;
+#else
+ int x_param_tree_reassoc_width;
+#define param_tree_reassoc_width global_options.x_param_tree_reassoc_width
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tsan_distinguish_volatile;
+#else
+ int x_param_tsan_distinguish_volatile;
+#define param_tsan_distinguish_volatile global_options.x_param_tsan_distinguish_volatile
+#endif
+#ifdef GENERATOR_FILE
+extern int param_tsan_instrument_func_entry_exit;
+#else
+ int x_param_tsan_instrument_func_entry_exit;
+#define param_tsan_instrument_func_entry_exit global_options.x_param_tsan_instrument_func_entry_exit
+#endif
+#ifdef GENERATOR_FILE
+extern int param_uninit_control_dep_attempts;
+#else
+ int x_param_uninit_control_dep_attempts;
+#define param_uninit_control_dep_attempts global_options.x_param_uninit_control_dep_attempts
+#endif
+#ifdef GENERATOR_FILE
+extern int param_uninlined_function_insns;
+#else
+ int x_param_uninlined_function_insns;
+#define param_uninlined_function_insns global_options.x_param_uninlined_function_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_uninlined_function_time;
+#else
+ int x_param_uninlined_function_time;
+#define param_uninlined_function_time global_options.x_param_uninlined_function_time
+#endif
+#ifdef GENERATOR_FILE
+extern int param_uninlined_function_thunk_insns;
+#else
+ int x_param_uninlined_function_thunk_insns;
+#define param_uninlined_function_thunk_insns global_options.x_param_uninlined_function_thunk_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int param_uninlined_function_thunk_time;
+#else
+ int x_param_uninlined_function_thunk_time;
+#define param_uninlined_function_thunk_time global_options.x_param_uninlined_function_thunk_time
+#endif
+#ifdef GENERATOR_FILE
+extern int param_unlikely_bb_count_fraction;
+#else
+ int x_param_unlikely_bb_count_fraction;
+#define param_unlikely_bb_count_fraction global_options.x_param_unlikely_bb_count_fraction
+#endif
+#ifdef GENERATOR_FILE
+extern int param_unroll_jam_max_unroll;
+#else
+ int x_param_unroll_jam_max_unroll;
+#define param_unroll_jam_max_unroll global_options.x_param_unroll_jam_max_unroll
+#endif
+#ifdef GENERATOR_FILE
+extern int param_unroll_jam_min_percent;
+#else
+ int x_param_unroll_jam_min_percent;
+#define param_unroll_jam_min_percent global_options.x_param_unroll_jam_min_percent
+#endif
+#ifdef GENERATOR_FILE
+extern int param_use_after_scope_direct_emission_threshold;
+#else
+ int x_param_use_after_scope_direct_emission_threshold;
+#define param_use_after_scope_direct_emission_threshold global_options.x_param_use_after_scope_direct_emission_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int param_use_canonical_types;
+#else
+ int x_param_use_canonical_types;
+#define param_use_canonical_types global_options.x_param_use_canonical_types
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_epilogues_nomask;
+#else
+ int x_param_vect_epilogues_nomask;
+#define param_vect_epilogues_nomask global_options.x_param_vect_epilogues_nomask
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_induction_float;
+#else
+ int x_param_vect_induction_float;
+#define param_vect_induction_float global_options.x_param_vect_induction_float
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_inner_loop_cost_factor;
+#else
+ int x_param_vect_inner_loop_cost_factor;
+#define param_vect_inner_loop_cost_factor global_options.x_param_vect_inner_loop_cost_factor
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_max_layout_candidates;
+#else
+ int x_param_vect_max_layout_candidates;
+#define param_vect_max_layout_candidates global_options.x_param_vect_max_layout_candidates
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_max_peeling_for_alignment;
+#else
+ int x_param_vect_max_peeling_for_alignment;
+#define param_vect_max_peeling_for_alignment global_options.x_param_vect_max_peeling_for_alignment
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_max_version_for_alias_checks;
+#else
+ int x_param_vect_max_version_for_alias_checks;
+#define param_vect_max_version_for_alias_checks global_options.x_param_vect_max_version_for_alias_checks
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_max_version_for_alignment_checks;
+#else
+ int x_param_vect_max_version_for_alignment_checks;
+#define param_vect_max_version_for_alignment_checks global_options.x_param_vect_max_version_for_alignment_checks
+#endif
+#ifdef GENERATOR_FILE
+extern int param_vect_partial_vector_usage;
+#else
+ int x_param_vect_partial_vector_usage;
+#define param_vect_partial_vector_usage global_options.x_param_vect_partial_vector_usage
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_preprocess_only;
+#else
+ int x_flag_preprocess_only;
+#define flag_preprocess_only global_options.x_flag_preprocess_only
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_nsobject_attribute;
+#else
+ int x_warn_nsobject_attribute;
+#define warn_nsobject_attribute global_options.x_warn_nsobject_attribute
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_abi;
+#else
+ int x_warn_abi;
+#define warn_abi global_options.x_warn_abi
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_abi_tag;
+#else
+ int x_warn_abi_tag;
+#define warn_abi_tag global_options.x_warn_abi_tag
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_absolute_value;
+#else
+ int x_warn_absolute_value;
+#define warn_absolute_value global_options.x_warn_absolute_value
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_address;
+#else
+ int x_warn_address;
+#define warn_address global_options.x_warn_address
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_address_of_packed_member;
+#else
+ int x_warn_address_of_packed_member;
+#define warn_address_of_packed_member global_options.x_warn_address_of_packed_member
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_aggregate_return;
+#else
+ int x_warn_aggregate_return;
+#define warn_aggregate_return global_options.x_warn_aggregate_return
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_aggressive_loop_optimizations;
+#else
+ int x_warn_aggressive_loop_optimizations;
+#define warn_aggressive_loop_optimizations global_options.x_warn_aggressive_loop_optimizations
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_aliasing;
+#else
+ int x_warn_aliasing;
+#define warn_aliasing global_options.x_warn_aliasing
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_align_commons;
+#else
+ int x_warn_align_commons;
+#define warn_align_commons global_options.x_warn_align_commons
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_aligned_new;
+#else
+ int x_warn_aligned_new;
+#define warn_aligned_new global_options.x_warn_aligned_new
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT warn_alloc_size_limit;
+#else
+ HOST_WIDE_INT x_warn_alloc_size_limit;
+#define warn_alloc_size_limit global_options.x_warn_alloc_size_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_alloc_zero;
+#else
+ int x_warn_alloc_zero;
+#define warn_alloc_zero global_options.x_warn_alloc_zero
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_alloca;
+#else
+ int x_warn_alloca;
+#define warn_alloca global_options.x_warn_alloca
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT warn_alloca_limit;
+#else
+ HOST_WIDE_INT x_warn_alloca_limit;
+#define warn_alloca_limit global_options.x_warn_alloca_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_ampersand;
+#else
+ int x_warn_ampersand;
+#define warn_ampersand global_options.x_warn_ampersand
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_allocation_size;
+#else
+ int x_warn_analyzer_allocation_size;
+#define warn_analyzer_allocation_size global_options.x_warn_analyzer_allocation_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_deref_before_check;
+#else
+ int x_warn_analyzer_deref_before_check;
+#define warn_analyzer_deref_before_check global_options.x_warn_analyzer_deref_before_check
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_double_fclose;
+#else
+ int x_warn_analyzer_double_fclose;
+#define warn_analyzer_double_fclose global_options.x_warn_analyzer_double_fclose
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_double_free;
+#else
+ int x_warn_analyzer_double_free;
+#define warn_analyzer_double_free global_options.x_warn_analyzer_double_free
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_exposure_through_output_file;
+#else
+ int x_warn_analyzer_exposure_through_output_file;
+#define warn_analyzer_exposure_through_output_file global_options.x_warn_analyzer_exposure_through_output_file
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_exposure_through_uninit_copy;
+#else
+ int x_warn_analyzer_exposure_through_uninit_copy;
+#define warn_analyzer_exposure_through_uninit_copy global_options.x_warn_analyzer_exposure_through_uninit_copy
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_mode_mismatch;
+#else
+ int x_warn_analyzer_fd_mode_mismatch;
+#define warn_analyzer_fd_mode_mismatch global_options.x_warn_analyzer_fd_mode_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_double_close;
+#else
+ int x_warn_analyzer_fd_double_close;
+#define warn_analyzer_fd_double_close global_options.x_warn_analyzer_fd_double_close
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_leak;
+#else
+ int x_warn_analyzer_fd_leak;
+#define warn_analyzer_fd_leak global_options.x_warn_analyzer_fd_leak
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_phase_mismatch;
+#else
+ int x_warn_analyzer_fd_phase_mismatch;
+#define warn_analyzer_fd_phase_mismatch global_options.x_warn_analyzer_fd_phase_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_type_mismatch;
+#else
+ int x_warn_analyzer_fd_type_mismatch;
+#define warn_analyzer_fd_type_mismatch global_options.x_warn_analyzer_fd_type_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_use_after_close;
+#else
+ int x_warn_analyzer_fd_use_after_close;
+#define warn_analyzer_fd_use_after_close global_options.x_warn_analyzer_fd_use_after_close
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_fd_use_without_check;
+#else
+ int x_warn_analyzer_fd_use_without_check;
+#define warn_analyzer_fd_use_without_check global_options.x_warn_analyzer_fd_use_without_check
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_file_leak;
+#else
+ int x_warn_analyzer_file_leak;
+#define warn_analyzer_file_leak global_options.x_warn_analyzer_file_leak
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_free_of_non_heap;
+#else
+ int x_warn_analyzer_free_of_non_heap;
+#define warn_analyzer_free_of_non_heap global_options.x_warn_analyzer_free_of_non_heap
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_imprecise_fp_arithmetic;
+#else
+ int x_warn_analyzer_imprecise_fp_arithmetic;
+#define warn_analyzer_imprecise_fp_arithmetic global_options.x_warn_analyzer_imprecise_fp_arithmetic
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_infinite_recursion;
+#else
+ int x_warn_analyzer_infinite_recursion;
+#define warn_analyzer_infinite_recursion global_options.x_warn_analyzer_infinite_recursion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_jump_through_null;
+#else
+ int x_warn_analyzer_jump_through_null;
+#define warn_analyzer_jump_through_null global_options.x_warn_analyzer_jump_through_null
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_malloc_leak;
+#else
+ int x_warn_analyzer_malloc_leak;
+#define warn_analyzer_malloc_leak global_options.x_warn_analyzer_malloc_leak
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_mismatching_deallocation;
+#else
+ int x_warn_analyzer_mismatching_deallocation;
+#define warn_analyzer_mismatching_deallocation global_options.x_warn_analyzer_mismatching_deallocation
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_null_argument;
+#else
+ int x_warn_analyzer_null_argument;
+#define warn_analyzer_null_argument global_options.x_warn_analyzer_null_argument
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_null_dereference;
+#else
+ int x_warn_analyzer_null_dereference;
+#define warn_analyzer_null_dereference global_options.x_warn_analyzer_null_dereference
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_out_of_bounds;
+#else
+ int x_warn_analyzer_out_of_bounds;
+#define warn_analyzer_out_of_bounds global_options.x_warn_analyzer_out_of_bounds
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_possible_null_argument;
+#else
+ int x_warn_analyzer_possible_null_argument;
+#define warn_analyzer_possible_null_argument global_options.x_warn_analyzer_possible_null_argument
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_possible_null_dereference;
+#else
+ int x_warn_analyzer_possible_null_dereference;
+#define warn_analyzer_possible_null_dereference global_options.x_warn_analyzer_possible_null_dereference
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_putenv_of_auto_var;
+#else
+ int x_warn_analyzer_putenv_of_auto_var;
+#define warn_analyzer_putenv_of_auto_var global_options.x_warn_analyzer_putenv_of_auto_var
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_shift_count_negative;
+#else
+ int x_warn_analyzer_shift_count_negative;
+#define warn_analyzer_shift_count_negative global_options.x_warn_analyzer_shift_count_negative
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_shift_count_overflow;
+#else
+ int x_warn_analyzer_shift_count_overflow;
+#define warn_analyzer_shift_count_overflow global_options.x_warn_analyzer_shift_count_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_stale_setjmp_buffer;
+#else
+ int x_warn_analyzer_stale_setjmp_buffer;
+#define warn_analyzer_stale_setjmp_buffer global_options.x_warn_analyzer_stale_setjmp_buffer
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_tainted_allocation_size;
+#else
+ int x_warn_analyzer_tainted_allocation_size;
+#define warn_analyzer_tainted_allocation_size global_options.x_warn_analyzer_tainted_allocation_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_tainted_array_index;
+#else
+ int x_warn_analyzer_tainted_array_index;
+#define warn_analyzer_tainted_array_index global_options.x_warn_analyzer_tainted_array_index
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_tainted_assertion;
+#else
+ int x_warn_analyzer_tainted_assertion;
+#define warn_analyzer_tainted_assertion global_options.x_warn_analyzer_tainted_assertion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_tainted_divisor;
+#else
+ int x_warn_analyzer_tainted_divisor;
+#define warn_analyzer_tainted_divisor global_options.x_warn_analyzer_tainted_divisor
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_tainted_offset;
+#else
+ int x_warn_analyzer_tainted_offset;
+#define warn_analyzer_tainted_offset global_options.x_warn_analyzer_tainted_offset
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_tainted_size;
+#else
+ int x_warn_analyzer_tainted_size;
+#define warn_analyzer_tainted_size global_options.x_warn_analyzer_tainted_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_too_complex;
+#else
+ int x_warn_analyzer_too_complex;
+#define warn_analyzer_too_complex global_options.x_warn_analyzer_too_complex
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_unsafe_call_within_signal_handler;
+#else
+ int x_warn_analyzer_unsafe_call_within_signal_handler;
+#define warn_analyzer_unsafe_call_within_signal_handler global_options.x_warn_analyzer_unsafe_call_within_signal_handler
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_use_after_free;
+#else
+ int x_warn_analyzer_use_after_free;
+#define warn_analyzer_use_after_free global_options.x_warn_analyzer_use_after_free
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_use_of_pointer_in_stale_stack_frame;
+#else
+ int x_warn_analyzer_use_of_pointer_in_stale_stack_frame;
+#define warn_analyzer_use_of_pointer_in_stale_stack_frame global_options.x_warn_analyzer_use_of_pointer_in_stale_stack_frame
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_use_of_uninitialized_value;
+#else
+ int x_warn_analyzer_use_of_uninitialized_value;
+#define warn_analyzer_use_of_uninitialized_value global_options.x_warn_analyzer_use_of_uninitialized_value
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_va_arg_type_mismatch;
+#else
+ int x_warn_analyzer_va_arg_type_mismatch;
+#define warn_analyzer_va_arg_type_mismatch global_options.x_warn_analyzer_va_arg_type_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_va_list_exhausted;
+#else
+ int x_warn_analyzer_va_list_exhausted;
+#define warn_analyzer_va_list_exhausted global_options.x_warn_analyzer_va_list_exhausted
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_va_list_leak;
+#else
+ int x_warn_analyzer_va_list_leak;
+#define warn_analyzer_va_list_leak global_options.x_warn_analyzer_va_list_leak
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_va_list_use_after_va_end;
+#else
+ int x_warn_analyzer_va_list_use_after_va_end;
+#define warn_analyzer_va_list_use_after_va_end global_options.x_warn_analyzer_va_list_use_after_va_end
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_write_to_const;
+#else
+ int x_warn_analyzer_write_to_const;
+#define warn_analyzer_write_to_const global_options.x_warn_analyzer_write_to_const
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_analyzer_write_to_string_literal;
+#else
+ int x_warn_analyzer_write_to_string_literal;
+#define warn_analyzer_write_to_string_literal global_options.x_warn_analyzer_write_to_string_literal
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_arith_conv;
+#else
+ int x_warn_arith_conv;
+#define warn_arith_conv global_options.x_warn_arith_conv
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_array_bounds;
+#else
+ int x_warn_array_bounds;
+#define warn_array_bounds global_options.x_warn_array_bounds
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_array_compare;
+#else
+ int x_warn_array_compare;
+#define warn_array_compare global_options.x_warn_array_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_array_parameter;
+#else
+ int x_warn_array_parameter;
+#define warn_array_parameter global_options.x_warn_array_parameter
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_array_temporaries;
+#else
+ int x_warn_array_temporaries;
+#define warn_array_temporaries global_options.x_warn_array_temporaries
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_assign_intercept;
+#else
+ int x_warn_assign_intercept;
+#define warn_assign_intercept global_options.x_warn_assign_intercept
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_attribute_alias;
+#else
+ int x_warn_attribute_alias;
+#define warn_attribute_alias global_options.x_warn_attribute_alias
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_attribute_warning;
+#else
+ int x_warn_attribute_warning;
+#define warn_attribute_warning global_options.x_warn_attribute_warning
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_attributes;
+#else
+ int x_warn_attributes;
+#define warn_attributes global_options.x_warn_attributes
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_bad_function_cast;
+#else
+ int x_warn_bad_function_cast;
+#define warn_bad_function_cast global_options.x_warn_bad_function_cast
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_bidirectional;
+#else
+ int x_warn_bidirectional;
+#define warn_bidirectional global_options.x_warn_bidirectional
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_bool_compare;
+#else
+ int x_warn_bool_compare;
+#define warn_bool_compare global_options.x_warn_bool_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_bool_op;
+#else
+ int x_warn_bool_op;
+#define warn_bool_op global_options.x_warn_bool_op
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_builtin_declaration_mismatch;
+#else
+ int x_warn_builtin_declaration_mismatch;
+#define warn_builtin_declaration_mismatch global_options.x_warn_builtin_declaration_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_builtin_macro_redefined;
+#else
+ int x_cpp_warn_builtin_macro_redefined;
+#define cpp_warn_builtin_macro_redefined global_options.x_cpp_warn_builtin_macro_redefined
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx_compat;
+#else
+ int x_warn_cxx_compat;
+#define warn_cxx_compat global_options.x_warn_cxx_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx11_compat;
+#else
+ int x_warn_cxx11_compat;
+#define warn_cxx11_compat global_options.x_warn_cxx11_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx11_extensions;
+#else
+ int x_warn_cxx11_extensions;
+#define warn_cxx11_extensions global_options.x_warn_cxx11_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx14_compat;
+#else
+ int x_warn_cxx14_compat;
+#define warn_cxx14_compat global_options.x_warn_cxx14_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx14_extensions;
+#else
+ int x_warn_cxx14_extensions;
+#define warn_cxx14_extensions global_options.x_warn_cxx14_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx17_compat;
+#else
+ int x_warn_cxx17_compat;
+#define warn_cxx17_compat global_options.x_warn_cxx17_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx17_extensions;
+#else
+ int x_warn_cxx17_extensions;
+#define warn_cxx17_extensions global_options.x_warn_cxx17_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx20_compat;
+#else
+ int x_warn_cxx20_compat;
+#define warn_cxx20_compat global_options.x_warn_cxx20_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx20_extensions;
+#else
+ int x_warn_cxx20_extensions;
+#define warn_cxx20_extensions global_options.x_warn_cxx20_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cxx23_extensions;
+#else
+ int x_warn_cxx23_extensions;
+#define warn_cxx23_extensions global_options.x_warn_cxx23_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_c_binding_type;
+#else
+ int x_warn_c_binding_type;
+#define warn_c_binding_type global_options.x_warn_c_binding_type
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_c11_c2x_compat;
+#else
+ int x_warn_c11_c2x_compat;
+#define warn_c11_c2x_compat global_options.x_warn_c11_c2x_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_c90_c99_compat;
+#else
+ int x_warn_c90_c99_compat;
+#define warn_c90_c99_compat global_options.x_warn_c90_c99_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_c99_c11_compat;
+#else
+ int x_warn_c99_c11_compat;
+#define warn_c99_c11_compat global_options.x_warn_c99_c11_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cannot_profile;
+#else
+ int x_warn_cannot_profile;
+#define warn_cannot_profile global_options.x_warn_cannot_profile
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cast_align;
+#else
+ int x_warn_cast_align;
+#define warn_cast_align global_options.x_warn_cast_align
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cast_function_type;
+#else
+ int x_warn_cast_function_type;
+#define warn_cast_function_type global_options.x_warn_cast_function_type
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cast_qual;
+#else
+ int x_warn_cast_qual;
+#define warn_cast_qual global_options.x_warn_cast_qual
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cast_result;
+#else
+ int x_warn_cast_result;
+#define warn_cast_result global_options.x_warn_cast_result
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_catch_value;
+#else
+ int x_warn_catch_value;
+#define warn_catch_value global_options.x_warn_catch_value
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_changes_meaning;
+#else
+ int x_warn_changes_meaning;
+#define warn_changes_meaning global_options.x_warn_changes_meaning
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_char_subscripts;
+#else
+ int x_warn_char_subscripts;
+#define warn_char_subscripts global_options.x_warn_char_subscripts
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_character_truncation;
+#else
+ int x_warn_character_truncation;
+#define warn_character_truncation global_options.x_warn_character_truncation
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_class_conversion;
+#else
+ int x_warn_class_conversion;
+#define warn_class_conversion global_options.x_warn_class_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_class_memaccess;
+#else
+ int x_warn_class_memaccess;
+#define warn_class_memaccess global_options.x_warn_class_memaccess
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_clobbered;
+#else
+ int x_warn_clobbered;
+#define warn_clobbered global_options.x_warn_clobbered
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_comma_subscript;
+#else
+ int x_warn_comma_subscript;
+#define warn_comma_subscript global_options.x_warn_comma_subscript
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_comment;
+#else
+ int x_cpp_warn_comment;
+#define cpp_warn_comment global_options.x_cpp_warn_comment
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_compare_reals;
+#else
+ int x_warn_compare_reals;
+#define warn_compare_reals global_options.x_warn_compare_reals
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_complain_wrong_lang;
+#else
+ int x_warn_complain_wrong_lang;
+#define warn_complain_wrong_lang global_options.x_warn_complain_wrong_lang
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_conditionally_supported;
+#else
+ int x_warn_conditionally_supported;
+#define warn_conditionally_supported global_options.x_warn_conditionally_supported
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_conversion;
+#else
+ int x_warn_conversion;
+#define warn_conversion global_options.x_warn_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_conversion_extra;
+#else
+ int x_warn_conversion_extra;
+#define warn_conversion_extra global_options.x_warn_conversion_extra
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_conversion_null;
+#else
+ int x_warn_conversion_null;
+#define warn_conversion_null global_options.x_warn_conversion_null
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_coverage_invalid_linenum;
+#else
+ int x_warn_coverage_invalid_linenum;
+#define warn_coverage_invalid_linenum global_options.x_warn_coverage_invalid_linenum
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_coverage_mismatch;
+#else
+ int x_warn_coverage_mismatch;
+#define warn_coverage_mismatch global_options.x_warn_coverage_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_cpp;
+#else
+ int x_warn_cpp;
+#define warn_cpp global_options.x_warn_cpp
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_ctad_maybe_unsupported;
+#else
+ int x_warn_ctad_maybe_unsupported;
+#define warn_ctad_maybe_unsupported global_options.x_warn_ctad_maybe_unsupported
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_ctor_dtor_privacy;
+#else
+ int x_warn_ctor_dtor_privacy;
+#define warn_ctor_dtor_privacy global_options.x_warn_ctor_dtor_privacy
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_dangling_else;
+#else
+ int x_warn_dangling_else;
+#define warn_dangling_else global_options.x_warn_dangling_else
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_dangling_pointer;
+#else
+ int x_warn_dangling_pointer;
+#define warn_dangling_pointer global_options.x_warn_dangling_pointer
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_dangling_reference;
+#else
+ int x_warn_dangling_reference;
+#define warn_dangling_reference global_options.x_warn_dangling_reference
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_date_time;
+#else
+ int x_cpp_warn_date_time;
+#define cpp_warn_date_time global_options.x_cpp_warn_date_time
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_declaration_after_statement;
+#else
+ int x_warn_declaration_after_statement;
+#define warn_declaration_after_statement global_options.x_warn_declaration_after_statement
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_delete_incomplete;
+#else
+ int x_warn_delete_incomplete;
+#define warn_delete_incomplete global_options.x_warn_delete_incomplete
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_delnonvdtor;
+#else
+ int x_warn_delnonvdtor;
+#define warn_delnonvdtor global_options.x_warn_delnonvdtor
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_deprecated;
+#else
+ int x_warn_deprecated;
+#define warn_deprecated global_options.x_warn_deprecated
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_deprecated_copy;
+#else
+ int x_warn_deprecated_copy;
+#define warn_deprecated_copy global_options.x_warn_deprecated_copy
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_deprecated_decl;
+#else
+ int x_warn_deprecated_decl;
+#define warn_deprecated_decl global_options.x_warn_deprecated_decl
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_deprecated_enum_enum_conv;
+#else
+ int x_warn_deprecated_enum_enum_conv;
+#define warn_deprecated_enum_enum_conv global_options.x_warn_deprecated_enum_enum_conv
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_deprecated_enum_float_conv;
+#else
+ int x_warn_deprecated_enum_float_conv;
+#define warn_deprecated_enum_float_conv global_options.x_warn_deprecated_enum_float_conv
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_designated_init;
+#else
+ int x_warn_designated_init;
+#define warn_designated_init global_options.x_warn_designated_init
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_disabled_optimization;
+#else
+ int x_warn_disabled_optimization;
+#define warn_disabled_optimization global_options.x_warn_disabled_optimization
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_discarded_array_qualifiers;
+#else
+ int x_warn_discarded_array_qualifiers;
+#define warn_discarded_array_qualifiers global_options.x_warn_discarded_array_qualifiers
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_discarded_qualifiers;
+#else
+ int x_warn_discarded_qualifiers;
+#define warn_discarded_qualifiers global_options.x_warn_discarded_qualifiers
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_div_by_zero;
+#else
+ int x_warn_div_by_zero;
+#define warn_div_by_zero global_options.x_warn_div_by_zero
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_do_subscript;
+#else
+ int x_warn_do_subscript;
+#define warn_do_subscript global_options.x_warn_do_subscript
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_double_promotion;
+#else
+ int x_warn_double_promotion;
+#define warn_double_promotion global_options.x_warn_double_promotion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_duplicate_decl_specifier;
+#else
+ int x_warn_duplicate_decl_specifier;
+#define warn_duplicate_decl_specifier global_options.x_warn_duplicate_decl_specifier
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_duplicated_branches;
+#else
+ int x_warn_duplicated_branches;
+#define warn_duplicated_branches global_options.x_warn_duplicated_branches
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_duplicated_cond;
+#else
+ int x_warn_duplicated_cond;
+#define warn_duplicated_cond global_options.x_warn_duplicated_cond
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_ecpp;
+#else
+ int x_warn_ecpp;
+#define warn_ecpp global_options.x_warn_ecpp
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_empty_body;
+#else
+ int x_warn_empty_body;
+#define warn_empty_body global_options.x_warn_empty_body
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_endif_labels;
+#else
+ int x_cpp_warn_endif_labels;
+#define cpp_warn_endif_labels global_options.x_cpp_warn_endif_labels
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_enum_compare;
+#else
+ int x_warn_enum_compare;
+#define warn_enum_compare global_options.x_warn_enum_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_enum_conversion;
+#else
+ int x_warn_enum_conversion;
+#define warn_enum_conversion global_options.x_warn_enum_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_enum_int_mismatch;
+#else
+ int x_warn_enum_int_mismatch;
+#define warn_enum_int_mismatch global_options.x_warn_enum_int_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warnings_are_errors;
+#else
+ int x_warnings_are_errors;
+#define warnings_are_errors global_options.x_warnings_are_errors
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_exceptions;
+#else
+ int x_warn_exceptions;
+#define warn_exceptions global_options.x_warn_exceptions
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_expansion_to_defined;
+#else
+ int x_cpp_warn_expansion_to_defined;
+#define cpp_warn_expansion_to_defined global_options.x_cpp_warn_expansion_to_defined
+#endif
+#ifdef GENERATOR_FILE
+extern int extra_warnings;
+#else
+ int x_extra_warnings;
+#define extra_warnings global_options.x_extra_warnings
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_extra_semi;
+#else
+ int x_warn_extra_semi;
+#define warn_extra_semi global_options.x_warn_extra_semi
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_fatal_errors;
+#else
+ int x_flag_fatal_errors;
+#define flag_fatal_errors global_options.x_flag_fatal_errors
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_float_conversion;
+#else
+ int x_warn_float_conversion;
+#define warn_float_conversion global_options.x_warn_float_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_float_equal;
+#else
+ int x_warn_float_equal;
+#define warn_float_equal global_options.x_warn_float_equal
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_contains_nul;
+#else
+ int x_warn_format_contains_nul;
+#define warn_format_contains_nul global_options.x_warn_format_contains_nul
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_diag;
+#else
+ int x_warn_format_diag;
+#define warn_format_diag global_options.x_warn_format_diag
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_extra_args;
+#else
+ int x_warn_format_extra_args;
+#define warn_format_extra_args global_options.x_warn_format_extra_args
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_nonliteral;
+#else
+ int x_warn_format_nonliteral;
+#define warn_format_nonliteral global_options.x_warn_format_nonliteral
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_overflow;
+#else
+ int x_warn_format_overflow;
+#define warn_format_overflow global_options.x_warn_format_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_security;
+#else
+ int x_warn_format_security;
+#define warn_format_security global_options.x_warn_format_security
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_signedness;
+#else
+ int x_warn_format_signedness;
+#define warn_format_signedness global_options.x_warn_format_signedness
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_trunc;
+#else
+ int x_warn_format_trunc;
+#define warn_format_trunc global_options.x_warn_format_trunc
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_y2k;
+#else
+ int x_warn_format_y2k;
+#define warn_format_y2k global_options.x_warn_format_y2k
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format_zero_length;
+#else
+ int x_warn_format_zero_length;
+#define warn_format_zero_length global_options.x_warn_format_zero_length
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_format;
+#else
+ int x_warn_format;
+#define warn_format global_options.x_warn_format
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_frame_address;
+#else
+ int x_warn_frame_address;
+#define warn_frame_address global_options.x_warn_frame_address
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT warn_frame_larger_than_size;
+#else
+ HOST_WIDE_INT x_warn_frame_larger_than_size;
+#define warn_frame_larger_than_size global_options.x_warn_frame_larger_than_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_free_nonheap_object;
+#else
+ int x_warn_free_nonheap_object;
+#define warn_free_nonheap_object global_options.x_warn_free_nonheap_object
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_warn_frontend_loop_interchange;
+#else
+ int x_flag_warn_frontend_loop_interchange;
+#define flag_warn_frontend_loop_interchange global_options.x_flag_warn_frontend_loop_interchange
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_function_elimination;
+#else
+ int x_warn_function_elimination;
+#define warn_function_elimination global_options.x_warn_function_elimination
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_if_not_aligned;
+#else
+ int x_warn_if_not_aligned;
+#define warn_if_not_aligned global_options.x_warn_if_not_aligned
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_ignored_attributes;
+#else
+ int x_warn_ignored_attributes;
+#define warn_ignored_attributes global_options.x_warn_ignored_attributes
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_ignored_qualifiers;
+#else
+ int x_warn_ignored_qualifiers;
+#define warn_ignored_qualifiers global_options.x_warn_ignored_qualifiers
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_implicit;
+#else
+ int x_warn_implicit;
+#define warn_implicit global_options.x_warn_implicit
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_implicit_fallthrough;
+#else
+ int x_warn_implicit_fallthrough;
+#define warn_implicit_fallthrough global_options.x_warn_implicit_fallthrough
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_implicit_function_declaration;
+#else
+ int x_warn_implicit_function_declaration;
+#define warn_implicit_function_declaration global_options.x_warn_implicit_function_declaration
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_implicit_int;
+#else
+ int x_warn_implicit_int;
+#define warn_implicit_int global_options.x_warn_implicit_int
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_implicit_interface;
+#else
+ int x_warn_implicit_interface;
+#define warn_implicit_interface global_options.x_warn_implicit_interface
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_implicit_procedure;
+#else
+ int x_warn_implicit_procedure;
+#define warn_implicit_procedure global_options.x_warn_implicit_procedure
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_inaccessible_base;
+#else
+ int x_warn_inaccessible_base;
+#define warn_inaccessible_base global_options.x_warn_inaccessible_base
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_incompatible_pointer_types;
+#else
+ int x_warn_incompatible_pointer_types;
+#define warn_incompatible_pointer_types global_options.x_warn_incompatible_pointer_types
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_infinite_recursion;
+#else
+ int x_warn_infinite_recursion;
+#define warn_infinite_recursion global_options.x_warn_infinite_recursion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_inh_var_ctor;
+#else
+ int x_warn_inh_var_ctor;
+#define warn_inh_var_ctor global_options.x_warn_inh_var_ctor
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_init_list;
+#else
+ int x_warn_init_list;
+#define warn_init_list global_options.x_warn_init_list
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_init_self;
+#else
+ int x_warn_init_self;
+#define warn_init_self global_options.x_warn_init_self
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_inline;
+#else
+ int x_warn_inline;
+#define warn_inline global_options.x_warn_inline
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_int_conversion;
+#else
+ int x_warn_int_conversion;
+#define warn_int_conversion global_options.x_warn_int_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_int_in_bool_context;
+#else
+ int x_warn_int_in_bool_context;
+#define warn_int_in_bool_context global_options.x_warn_int_in_bool_context
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_int_to_pointer_cast;
+#else
+ int x_warn_int_to_pointer_cast;
+#define warn_int_to_pointer_cast global_options.x_warn_int_to_pointer_cast
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_integer_division;
+#else
+ int x_warn_integer_division;
+#define warn_integer_division global_options.x_warn_integer_division
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_interference_size;
+#else
+ int x_warn_interference_size;
+#define warn_interference_size global_options.x_warn_interference_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_intrinsic_shadow;
+#else
+ int x_warn_intrinsic_shadow;
+#define warn_intrinsic_shadow global_options.x_warn_intrinsic_shadow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_intrinsics_std;
+#else
+ int x_warn_intrinsics_std;
+#define warn_intrinsics_std global_options.x_warn_intrinsics_std
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_invalid_constexpr;
+#else
+ int x_warn_invalid_constexpr;
+#define warn_invalid_constexpr global_options.x_warn_invalid_constexpr
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_imported_macros;
+#else
+ int x_warn_imported_macros;
+#define warn_imported_macros global_options.x_warn_imported_macros
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_invalid_memory_model;
+#else
+ int x_warn_invalid_memory_model;
+#define warn_invalid_memory_model global_options.x_warn_invalid_memory_model
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_invalid_offsetof;
+#else
+ int x_warn_invalid_offsetof;
+#define warn_invalid_offsetof global_options.x_warn_invalid_offsetof
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_invalid_pch;
+#else
+ int x_cpp_warn_invalid_pch;
+#define cpp_warn_invalid_pch global_options.x_cpp_warn_invalid_pch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_invalid_utf8;
+#else
+ int x_warn_invalid_utf8;
+#define warn_invalid_utf8 global_options.x_warn_invalid_utf8
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_jump_misses_init;
+#else
+ int x_warn_jump_misses_init;
+#define warn_jump_misses_init global_options.x_warn_jump_misses_init
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT warn_larger_than_size;
+#else
+ HOST_WIDE_INT x_warn_larger_than_size;
+#define warn_larger_than_size global_options.x_warn_larger_than_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_line_truncation;
+#else
+ int x_warn_line_truncation;
+#define warn_line_truncation global_options.x_warn_line_truncation
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_literal_suffix;
+#else
+ int x_cpp_warn_literal_suffix;
+#define cpp_warn_literal_suffix global_options.x_cpp_warn_literal_suffix
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_logical_not_paren;
+#else
+ int x_warn_logical_not_paren;
+#define warn_logical_not_paren global_options.x_warn_logical_not_paren
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_logical_op;
+#else
+ int x_warn_logical_op;
+#define warn_logical_op global_options.x_warn_logical_op
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_long_long;
+#else
+ int x_warn_long_long;
+#define warn_long_long global_options.x_warn_long_long
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_lto_type_mismatch;
+#else
+ int x_warn_lto_type_mismatch;
+#define warn_lto_type_mismatch global_options.x_warn_lto_type_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_main;
+#else
+ int x_warn_main;
+#define warn_main global_options.x_warn_main
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_maybe_uninitialized;
+#else
+ int x_warn_maybe_uninitialized;
+#define warn_maybe_uninitialized global_options.x_warn_maybe_uninitialized
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_memset_elt_size;
+#else
+ int x_warn_memset_elt_size;
+#define warn_memset_elt_size global_options.x_warn_memset_elt_size
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_memset_transposed_args;
+#else
+ int x_warn_memset_transposed_args;
+#define warn_memset_transposed_args global_options.x_warn_memset_transposed_args
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_misleading_indentation;
+#else
+ int x_warn_misleading_indentation;
+#define warn_misleading_indentation global_options.x_warn_misleading_indentation
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_mismatched_alloc;
+#else
+ int x_warn_mismatched_alloc;
+#define warn_mismatched_alloc global_options.x_warn_mismatched_alloc
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_mismatched_new_delete;
+#else
+ int x_warn_mismatched_new_delete;
+#define warn_mismatched_new_delete global_options.x_warn_mismatched_new_delete
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_mismatched_special_enum;
+#else
+ int x_warn_mismatched_special_enum;
+#define warn_mismatched_special_enum global_options.x_warn_mismatched_special_enum
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_mismatched_tags;
+#else
+ int x_warn_mismatched_tags;
+#define warn_mismatched_tags global_options.x_warn_mismatched_tags
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_attributes;
+#else
+ int x_warn_missing_attributes;
+#define warn_missing_attributes global_options.x_warn_missing_attributes
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_braces;
+#else
+ int x_warn_missing_braces;
+#define warn_missing_braces global_options.x_warn_missing_braces
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_declarations;
+#else
+ int x_warn_missing_declarations;
+#define warn_missing_declarations global_options.x_warn_missing_declarations
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_field_initializers;
+#else
+ int x_warn_missing_field_initializers;
+#define warn_missing_field_initializers global_options.x_warn_missing_field_initializers
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_missing_include_dirs;
+#else
+ int x_cpp_warn_missing_include_dirs;
+#define cpp_warn_missing_include_dirs global_options.x_cpp_warn_missing_include_dirs
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_parameter_type;
+#else
+ int x_warn_missing_parameter_type;
+#define warn_missing_parameter_type global_options.x_warn_missing_parameter_type
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_profile;
+#else
+ int x_warn_missing_profile;
+#define warn_missing_profile global_options.x_warn_missing_profile
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_prototypes;
+#else
+ int x_warn_missing_prototypes;
+#define warn_missing_prototypes global_options.x_warn_missing_prototypes
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_requires;
+#else
+ int x_warn_missing_requires;
+#define warn_missing_requires global_options.x_warn_missing_requires
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_missing_template_keyword;
+#else
+ int x_warn_missing_template_keyword;
+#define warn_missing_template_keyword global_options.x_warn_missing_template_keyword
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_multichar;
+#else
+ int x_cpp_warn_multichar;
+#define cpp_warn_multichar global_options.x_cpp_warn_multichar
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_multiple_inheritance;
+#else
+ int x_warn_multiple_inheritance;
+#define warn_multiple_inheritance global_options.x_warn_multiple_inheritance
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_multistatement_macros;
+#else
+ int x_warn_multistatement_macros;
+#define warn_multistatement_macros global_options.x_warn_multistatement_macros
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_namespaces;
+#else
+ int x_warn_namespaces;
+#define warn_namespaces global_options.x_warn_namespaces
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_narrowing;
+#else
+ int x_warn_narrowing;
+#define warn_narrowing global_options.x_warn_narrowing
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_nested_externs;
+#else
+ int x_warn_nested_externs;
+#define warn_nested_externs global_options.x_warn_nested_externs
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_noexcept;
+#else
+ int x_warn_noexcept;
+#define warn_noexcept global_options.x_warn_noexcept
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_noexcept_type;
+#else
+ int x_warn_noexcept_type;
+#define warn_noexcept_type global_options.x_warn_noexcept_type
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_nontemplate_friend;
+#else
+ int x_warn_nontemplate_friend;
+#define warn_nontemplate_friend global_options.x_warn_nontemplate_friend
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_nonvdtor;
+#else
+ int x_warn_nonvdtor;
+#define warn_nonvdtor global_options.x_warn_nonvdtor
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_nonnull;
+#else
+ int x_warn_nonnull;
+#define warn_nonnull global_options.x_warn_nonnull
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_nonnull_compare;
+#else
+ int x_warn_nonnull_compare;
+#define warn_nonnull_compare global_options.x_warn_nonnull_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_normalize;
+#else
+ int x_cpp_warn_normalize;
+#define cpp_warn_normalize global_options.x_cpp_warn_normalize
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_null_dereference;
+#else
+ int x_warn_null_dereference;
+#define warn_null_dereference global_options.x_warn_null_dereference
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_objc_root_class;
+#else
+ int x_warn_objc_root_class;
+#define warn_objc_root_class global_options.x_warn_objc_root_class
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_odr_violations;
+#else
+ int x_warn_odr_violations;
+#define warn_odr_violations global_options.x_warn_odr_violations
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_old_style_cast;
+#else
+ int x_warn_old_style_cast;
+#define warn_old_style_cast global_options.x_warn_old_style_cast
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_old_style_declaration;
+#else
+ int x_warn_old_style_declaration;
+#define warn_old_style_declaration global_options.x_warn_old_style_declaration
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_old_style_definition;
+#else
+ int x_warn_old_style_definition;
+#define warn_old_style_definition global_options.x_warn_old_style_definition
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_openacc_parallelism;
+#else
+ int x_warn_openacc_parallelism;
+#define warn_openacc_parallelism global_options.x_warn_openacc_parallelism
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_openmp_simd;
+#else
+ int x_warn_openmp_simd;
+#define warn_openmp_simd global_options.x_warn_openmp_simd
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_overflow;
+#else
+ int x_warn_overflow;
+#define warn_overflow global_options.x_warn_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_overlength_strings;
+#else
+ int x_warn_overlength_strings;
+#define warn_overlength_strings global_options.x_warn_overlength_strings
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_overloaded_virtual;
+#else
+ int x_warn_overloaded_virtual;
+#define warn_overloaded_virtual global_options.x_warn_overloaded_virtual
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_override_init;
+#else
+ int x_warn_override_init;
+#define warn_override_init global_options.x_warn_override_init
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_override_init_side_effects;
+#else
+ int x_warn_override_init_side_effects;
+#define warn_override_init_side_effects global_options.x_warn_override_init_side_effects
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_overwrite_recursive;
+#else
+ int x_warn_overwrite_recursive;
+#define warn_overwrite_recursive global_options.x_warn_overwrite_recursive
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_packed;
+#else
+ int x_warn_packed;
+#define warn_packed global_options.x_warn_packed
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_packed_bitfield_compat;
+#else
+ int x_warn_packed_bitfield_compat;
+#define warn_packed_bitfield_compat global_options.x_warn_packed_bitfield_compat
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_packed_not_aligned;
+#else
+ int x_warn_packed_not_aligned;
+#define warn_packed_not_aligned global_options.x_warn_packed_not_aligned
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_padded;
+#else
+ int x_warn_padded;
+#define warn_padded global_options.x_warn_padded
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_parentheses;
+#else
+ int x_warn_parentheses;
+#define warn_parentheses global_options.x_warn_parentheses
+#endif
+#ifdef GENERATOR_FILE
+extern int pedantic;
+#else
+ int x_pedantic;
+#define pedantic global_options.x_pedantic
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pessimizing_move;
+#else
+ int x_warn_pessimizing_move;
+#define warn_pessimizing_move global_options.x_warn_pessimizing_move
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_placement_new;
+#else
+ int x_warn_placement_new;
+#define warn_placement_new global_options.x_warn_placement_new
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pmf2ptr;
+#else
+ int x_warn_pmf2ptr;
+#define warn_pmf2ptr global_options.x_warn_pmf2ptr
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pointer_arith;
+#else
+ int x_warn_pointer_arith;
+#define warn_pointer_arith global_options.x_warn_pointer_arith
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pointer_compare;
+#else
+ int x_warn_pointer_compare;
+#define warn_pointer_compare global_options.x_warn_pointer_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pointer_sign;
+#else
+ int x_warn_pointer_sign;
+#define warn_pointer_sign global_options.x_warn_pointer_sign
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pointer_to_int_cast;
+#else
+ int x_warn_pointer_to_int_cast;
+#define warn_pointer_to_int_cast global_options.x_warn_pointer_to_int_cast
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_pragmas;
+#else
+ int x_warn_pragmas;
+#define warn_pragmas global_options.x_warn_pragmas
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_prio_ctor_dtor;
+#else
+ int x_warn_prio_ctor_dtor;
+#define warn_prio_ctor_dtor global_options.x_warn_prio_ctor_dtor
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_property_assign_default;
+#else
+ int x_warn_property_assign_default;
+#define warn_property_assign_default global_options.x_warn_property_assign_default
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_protocol;
+#else
+ int x_warn_protocol;
+#define warn_protocol global_options.x_warn_protocol
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_psabi;
+#else
+ int x_warn_psabi;
+#define warn_psabi global_options.x_warn_psabi
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_range_loop_construct;
+#else
+ int x_warn_range_loop_construct;
+#define warn_range_loop_construct global_options.x_warn_range_loop_construct
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_real_q_constant;
+#else
+ int x_warn_real_q_constant;
+#define warn_real_q_constant global_options.x_warn_real_q_constant
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_realloc_lhs;
+#else
+ int x_warn_realloc_lhs;
+#define warn_realloc_lhs global_options.x_warn_realloc_lhs
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_realloc_lhs_all;
+#else
+ int x_warn_realloc_lhs_all;
+#define warn_realloc_lhs_all global_options.x_warn_realloc_lhs_all
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_redundant_decls;
+#else
+ int x_warn_redundant_decls;
+#define warn_redundant_decls global_options.x_warn_redundant_decls
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_redundant_move;
+#else
+ int x_warn_redundant_move;
+#define warn_redundant_move global_options.x_warn_redundant_move
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_redundant_tags;
+#else
+ int x_warn_redundant_tags;
+#define warn_redundant_tags global_options.x_warn_redundant_tags
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_register;
+#else
+ int x_warn_register;
+#define warn_register global_options.x_warn_register
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_reorder;
+#else
+ int x_warn_reorder;
+#define warn_reorder global_options.x_warn_reorder
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_restrict;
+#else
+ int x_warn_restrict;
+#define warn_restrict global_options.x_warn_restrict
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_return_local_addr;
+#else
+ int x_warn_return_local_addr;
+#define warn_return_local_addr global_options.x_warn_return_local_addr
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_return_type;
+#else
+ int x_warn_return_type;
+#define warn_return_type global_options.x_warn_return_type
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_scalar_storage_order;
+#else
+ int x_warn_scalar_storage_order;
+#define warn_scalar_storage_order global_options.x_warn_scalar_storage_order
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_selector;
+#else
+ int x_warn_selector;
+#define warn_selector global_options.x_warn_selector
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_self_move;
+#else
+ int x_warn_self_move;
+#define warn_self_move global_options.x_warn_self_move
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sequence_point;
+#else
+ int x_warn_sequence_point;
+#define warn_sequence_point global_options.x_warn_sequence_point
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shadow;
+#else
+ int x_warn_shadow;
+#define warn_shadow global_options.x_warn_shadow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shadow_ivar;
+#else
+ int x_warn_shadow_ivar;
+#define warn_shadow_ivar global_options.x_warn_shadow_ivar
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shadow_compatible_local;
+#else
+ int x_warn_shadow_compatible_local;
+#define warn_shadow_compatible_local global_options.x_warn_shadow_compatible_local
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shadow_local;
+#else
+ int x_warn_shadow_local;
+#define warn_shadow_local global_options.x_warn_shadow_local
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shift_count_negative;
+#else
+ int x_warn_shift_count_negative;
+#define warn_shift_count_negative global_options.x_warn_shift_count_negative
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shift_count_overflow;
+#else
+ int x_warn_shift_count_overflow;
+#define warn_shift_count_overflow global_options.x_warn_shift_count_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shift_negative_value;
+#else
+ int x_warn_shift_negative_value;
+#define warn_shift_negative_value global_options.x_warn_shift_negative_value
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_shift_overflow;
+#else
+ int x_warn_shift_overflow;
+#define warn_shift_overflow global_options.x_warn_shift_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sign_compare;
+#else
+ int x_warn_sign_compare;
+#define warn_sign_compare global_options.x_warn_sign_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sign_conversion;
+#else
+ int x_warn_sign_conversion;
+#define warn_sign_conversion global_options.x_warn_sign_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sign_promo;
+#else
+ int x_warn_sign_promo;
+#define warn_sign_promo global_options.x_warn_sign_promo
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sized_deallocation;
+#else
+ int x_warn_sized_deallocation;
+#define warn_sized_deallocation global_options.x_warn_sized_deallocation
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sizeof_array_argument;
+#else
+ int x_warn_sizeof_array_argument;
+#define warn_sizeof_array_argument global_options.x_warn_sizeof_array_argument
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sizeof_array_div;
+#else
+ int x_warn_sizeof_array_div;
+#define warn_sizeof_array_div global_options.x_warn_sizeof_array_div
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sizeof_pointer_div;
+#else
+ int x_warn_sizeof_pointer_div;
+#define warn_sizeof_pointer_div global_options.x_warn_sizeof_pointer_div
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sizeof_pointer_memaccess;
+#else
+ int x_warn_sizeof_pointer_memaccess;
+#define warn_sizeof_pointer_memaccess global_options.x_warn_sizeof_pointer_memaccess
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_stack_protect;
+#else
+ int x_warn_stack_protect;
+#define warn_stack_protect global_options.x_warn_stack_protect
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT warn_stack_usage;
+#else
+ HOST_WIDE_INT x_warn_stack_usage;
+#define warn_stack_usage global_options.x_warn_stack_usage
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_strict_aliasing;
+#else
+ int x_warn_strict_aliasing;
+#define warn_strict_aliasing global_options.x_warn_strict_aliasing
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_strict_flex_arrays;
+#else
+ int x_warn_strict_flex_arrays;
+#define warn_strict_flex_arrays global_options.x_warn_strict_flex_arrays
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_strict_null_sentinel;
+#else
+ int x_warn_strict_null_sentinel;
+#define warn_strict_null_sentinel global_options.x_warn_strict_null_sentinel
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_strict_overflow;
+#else
+ int x_warn_strict_overflow;
+#define warn_strict_overflow global_options.x_warn_strict_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_strict_prototypes;
+#else
+ int x_warn_strict_prototypes;
+#define warn_strict_prototypes global_options.x_warn_strict_prototypes
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_strict_selector_match;
+#else
+ int x_warn_strict_selector_match;
+#define warn_strict_selector_match global_options.x_warn_strict_selector_match
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_string_compare;
+#else
+ int x_warn_string_compare;
+#define warn_string_compare global_options.x_warn_string_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_stringop_overflow;
+#else
+ int x_warn_stringop_overflow;
+#define warn_stringop_overflow global_options.x_warn_stringop_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_stringop_overread;
+#else
+ int x_warn_stringop_overread;
+#define warn_stringop_overread global_options.x_warn_stringop_overread
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_stringop_truncation;
+#else
+ int x_warn_stringop_truncation;
+#define warn_stringop_truncation global_options.x_warn_stringop_truncation
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_subobject_linkage;
+#else
+ int x_warn_subobject_linkage;
+#define warn_subobject_linkage global_options.x_warn_subobject_linkage
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_attribute_cold;
+#else
+ int x_warn_suggest_attribute_cold;
+#define warn_suggest_attribute_cold global_options.x_warn_suggest_attribute_cold
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_attribute_const;
+#else
+ int x_warn_suggest_attribute_const;
+#define warn_suggest_attribute_const global_options.x_warn_suggest_attribute_const
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_attribute_format;
+#else
+ int x_warn_suggest_attribute_format;
+#define warn_suggest_attribute_format global_options.x_warn_suggest_attribute_format
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_attribute_malloc;
+#else
+ int x_warn_suggest_attribute_malloc;
+#define warn_suggest_attribute_malloc global_options.x_warn_suggest_attribute_malloc
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_attribute_noreturn;
+#else
+ int x_warn_suggest_attribute_noreturn;
+#define warn_suggest_attribute_noreturn global_options.x_warn_suggest_attribute_noreturn
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_attribute_pure;
+#else
+ int x_warn_suggest_attribute_pure;
+#define warn_suggest_attribute_pure global_options.x_warn_suggest_attribute_pure
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_final_methods;
+#else
+ int x_warn_suggest_final_methods;
+#define warn_suggest_final_methods global_options.x_warn_suggest_final_methods
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_suggest_final_types;
+#else
+ int x_warn_suggest_final_types;
+#define warn_suggest_final_types global_options.x_warn_suggest_final_types
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_override;
+#else
+ int x_warn_override;
+#define warn_override global_options.x_warn_override
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_surprising;
+#else
+ int x_warn_surprising;
+#define warn_surprising global_options.x_warn_surprising
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_switch;
+#else
+ int x_warn_switch;
+#define warn_switch global_options.x_warn_switch
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_switch_bool;
+#else
+ int x_warn_switch_bool;
+#define warn_switch_bool global_options.x_warn_switch_bool
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_switch_default;
+#else
+ int x_warn_switch_default;
+#define warn_switch_default global_options.x_warn_switch_default
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_switch_enum;
+#else
+ int x_warn_switch_enum;
+#define warn_switch_enum global_options.x_warn_switch_enum
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_switch_outside_range;
+#else
+ int x_warn_switch_outside_range;
+#define warn_switch_outside_range global_options.x_warn_switch_outside_range
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_switch_unreachable;
+#else
+ int x_warn_switch_unreachable;
+#define warn_switch_unreachable global_options.x_warn_switch_unreachable
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_sync_nand;
+#else
+ int x_warn_sync_nand;
+#define warn_sync_nand global_options.x_warn_sync_nand
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_synth;
+#else
+ int x_warn_synth;
+#define warn_synth global_options.x_warn_synth
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_system_headers;
+#else
+ int x_warn_system_headers;
+#define warn_system_headers global_options.x_warn_system_headers
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_tabs;
+#else
+ int x_warn_tabs;
+#define warn_tabs global_options.x_warn_tabs
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_target_lifetime;
+#else
+ int x_warn_target_lifetime;
+#define warn_target_lifetime global_options.x_warn_target_lifetime
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_tautological_compare;
+#else
+ int x_warn_tautological_compare;
+#define warn_tautological_compare global_options.x_warn_tautological_compare
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_templates;
+#else
+ int x_warn_templates;
+#define warn_templates global_options.x_warn_templates
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_terminate;
+#else
+ int x_warn_terminate;
+#define warn_terminate global_options.x_warn_terminate
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_traditional;
+#else
+ int x_warn_traditional;
+#define warn_traditional global_options.x_warn_traditional
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_traditional_conversion;
+#else
+ int x_warn_traditional_conversion;
+#define warn_traditional_conversion global_options.x_warn_traditional_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_trampolines;
+#else
+ int x_warn_trampolines;
+#define warn_trampolines global_options.x_warn_trampolines
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_trigraphs;
+#else
+ int x_cpp_warn_trigraphs;
+#define cpp_warn_trigraphs global_options.x_cpp_warn_trigraphs
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_trivial_auto_var_init;
+#else
+ int x_warn_trivial_auto_var_init;
+#define warn_trivial_auto_var_init global_options.x_warn_trivial_auto_var_init
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_tsan;
+#else
+ int x_warn_tsan;
+#define warn_tsan global_options.x_warn_tsan
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_type_limits;
+#else
+ int x_warn_type_limits;
+#define warn_type_limits global_options.x_warn_type_limits
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_undeclared_selector;
+#else
+ int x_warn_undeclared_selector;
+#define warn_undeclared_selector global_options.x_warn_undeclared_selector
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_undef;
+#else
+ int x_cpp_warn_undef;
+#define cpp_warn_undef global_options.x_cpp_warn_undef
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_undefined_do_loop;
+#else
+ int x_warn_undefined_do_loop;
+#define warn_undefined_do_loop global_options.x_warn_undefined_do_loop
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_underflow;
+#else
+ int x_warn_underflow;
+#define warn_underflow global_options.x_warn_underflow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unicode;
+#else
+ int x_warn_unicode;
+#define warn_unicode global_options.x_warn_unicode
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_uninitialized;
+#else
+ int x_warn_uninitialized;
+#define warn_uninitialized global_options.x_warn_uninitialized
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unknown_pragmas;
+#else
+ int x_warn_unknown_pragmas;
+#define warn_unknown_pragmas global_options.x_warn_unknown_pragmas
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unsuffixed_float_constants;
+#else
+ int x_warn_unsuffixed_float_constants;
+#define warn_unsuffixed_float_constants global_options.x_warn_unsuffixed_float_constants
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused;
+#else
+ int x_warn_unused;
+#define warn_unused global_options.x_warn_unused
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_but_set_parameter;
+#else
+ int x_warn_unused_but_set_parameter;
+#define warn_unused_but_set_parameter global_options.x_warn_unused_but_set_parameter
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_but_set_variable;
+#else
+ int x_warn_unused_but_set_variable;
+#define warn_unused_but_set_variable global_options.x_warn_unused_but_set_variable
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_const_variable;
+#else
+ int x_warn_unused_const_variable;
+#define warn_unused_const_variable global_options.x_warn_unused_const_variable
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_dummy_argument;
+#else
+ int x_warn_unused_dummy_argument;
+#define warn_unused_dummy_argument global_options.x_warn_unused_dummy_argument
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_function;
+#else
+ int x_warn_unused_function;
+#define warn_unused_function global_options.x_warn_unused_function
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_label;
+#else
+ int x_warn_unused_label;
+#define warn_unused_label global_options.x_warn_unused_label
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_local_typedefs;
+#else
+ int x_warn_unused_local_typedefs;
+#define warn_unused_local_typedefs global_options.x_warn_unused_local_typedefs
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_unused_macros;
+#else
+ int x_cpp_warn_unused_macros;
+#define cpp_warn_unused_macros global_options.x_cpp_warn_unused_macros
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_parameter;
+#else
+ int x_warn_unused_parameter;
+#define warn_unused_parameter global_options.x_warn_unused_parameter
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_result;
+#else
+ int x_warn_unused_result;
+#define warn_unused_result global_options.x_warn_unused_result
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_value;
+#else
+ int x_warn_unused_value;
+#define warn_unused_value global_options.x_warn_unused_value
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_unused_variable;
+#else
+ int x_warn_unused_variable;
+#define warn_unused_variable global_options.x_warn_unused_variable
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_use_after_free;
+#else
+ int x_warn_use_after_free;
+#define warn_use_after_free global_options.x_warn_use_after_free
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_use_without_only;
+#else
+ int x_warn_use_without_only;
+#define warn_use_without_only global_options.x_warn_use_without_only
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_useless_cast;
+#else
+ int x_warn_useless_cast;
+#define warn_useless_cast global_options.x_warn_useless_cast
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_varargs;
+#else
+ int x_warn_varargs;
+#define warn_varargs global_options.x_warn_varargs
+#endif
+#ifdef GENERATOR_FILE
+extern int cpp_warn_variadic_macros;
+#else
+ int x_cpp_warn_variadic_macros;
+#define cpp_warn_variadic_macros global_options.x_cpp_warn_variadic_macros
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_vector_operation_performance;
+#else
+ int x_warn_vector_operation_performance;
+#define warn_vector_operation_performance global_options.x_warn_vector_operation_performance
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_vexing_parse;
+#else
+ int x_warn_vexing_parse;
+#define warn_vexing_parse global_options.x_warn_vexing_parse
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_virtual_inheritance;
+#else
+ int x_warn_virtual_inheritance;
+#define warn_virtual_inheritance global_options.x_warn_virtual_inheritance
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_virtual_move_assign;
+#else
+ int x_warn_virtual_move_assign;
+#define warn_virtual_move_assign global_options.x_warn_virtual_move_assign
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_vla;
+#else
+ int x_warn_vla;
+#define warn_vla global_options.x_warn_vla
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT warn_vla_limit;
+#else
+ HOST_WIDE_INT x_warn_vla_limit;
+#define warn_vla_limit global_options.x_warn_vla_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_vla_parameter;
+#else
+ int x_warn_vla_parameter;
+#define warn_vla_parameter global_options.x_warn_vla_parameter
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_volatile;
+#else
+ int x_warn_volatile;
+#define warn_volatile global_options.x_warn_volatile
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_volatile_register_var;
+#else
+ int x_warn_volatile_register_var;
+#define warn_volatile_register_var global_options.x_warn_volatile_register_var
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_write_strings;
+#else
+ int x_warn_write_strings;
+#define warn_write_strings global_options.x_warn_write_strings
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_xor_used_as_pow;
+#else
+ int x_warn_xor_used_as_pow;
+#define warn_xor_used_as_pow global_options.x_warn_xor_used_as_pow
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_zero_as_null_pointer_constant;
+#else
+ int x_warn_zero_as_null_pointer_constant;
+#define warn_zero_as_null_pointer_constant global_options.x_warn_zero_as_null_pointer_constant
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_zero_length_bounds;
+#else
+ int x_warn_zero_length_bounds;
+#define warn_zero_length_bounds global_options.x_warn_zero_length_bounds
+#endif
+#ifdef GENERATOR_FILE
+extern int warn_zerotrip;
+#else
+ int x_warn_zerotrip;
+#define warn_zerotrip global_options.x_warn_zerotrip
+#endif
+#ifdef GENERATOR_FILE
+extern const char *aux_info_file_name;
+#else
+ const char *x_aux_info_file_name;
+#define aux_info_file_name global_options.x_aux_info_file_name
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_callgraph;
+#else
+ int x_flag_dump_callgraph;
+#define flag_dump_callgraph global_options.x_flag_dump_callgraph
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_dump_defined;
+#else
+ int x_flag_lto_dump_defined;
+#define flag_lto_dump_defined global_options.x_flag_lto_dump_defined
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_dump_demangle;
+#else
+ int x_flag_lto_dump_demangle;
+#define flag_lto_dump_demangle global_options.x_flag_lto_dump_demangle
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_dump_body;
+#else
+ const char *x_flag_dump_body;
+#define flag_dump_body global_options.x_flag_dump_body
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_dump_level;
+#else
+ const char *x_flag_dump_level;
+#define flag_dump_level global_options.x_flag_dump_level
+#endif
+#ifdef GENERATOR_FILE
+extern const char *dump_base_name;
+#else
+ const char *x_dump_base_name;
+#define dump_base_name global_options.x_dump_base_name
+#endif
+#ifdef GENERATOR_FILE
+extern const char *dump_base_ext;
+#else
+ const char *x_dump_base_ext;
+#define dump_base_ext global_options.x_dump_base_ext
+#endif
+#ifdef GENERATOR_FILE
+extern const char *dump_dir_name;
+#else
+ const char *x_dump_dir_name;
+#define dump_dir_name global_options.x_dump_dir_name
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pic;
+#else
+ int x_flag_pic;
+#define flag_pic global_options.x_flag_pic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pie;
+#else
+ int x_flag_pie;
+#define flag_pie global_options.x_flag_pie
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_abi_compat_version;
+#else
+ int x_flag_abi_compat_version;
+#define flag_abi_compat_version global_options.x_flag_abi_compat_version
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_abi_version;
+#else
+ int x_flag_abi_version;
+#define flag_abi_version global_options.x_flag_abi_version
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_access_control;
+#else
+ int x_flag_access_control;
+#define flag_access_control global_options.x_flag_access_control
+#endif
+#ifdef GENERATOR_FILE
+extern const char *ada_specs_parent;
+#else
+ const char *x_ada_specs_parent;
+#define ada_specs_parent global_options.x_ada_specs_parent
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_aggressive_function_elimination;
+#else
+ int x_flag_aggressive_function_elimination;
+#define flag_aggressive_function_elimination global_options.x_flag_aggressive_function_elimination
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_aggressive_loop_optimizations;
+#else
+ int x_flag_aggressive_loop_optimizations;
+#define flag_aggressive_loop_optimizations global_options.x_flag_aggressive_loop_optimizations
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_align_commons;
+#else
+ int x_flag_align_commons;
+#define flag_align_commons global_options.x_flag_align_commons
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_align_functions;
+#else
+ int x_flag_align_functions;
+#define flag_align_functions global_options.x_flag_align_functions
+#endif
+#ifdef GENERATOR_FILE
+extern const char *str_align_functions;
+#else
+ const char *x_str_align_functions;
+#define str_align_functions global_options.x_str_align_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_align_jumps;
+#else
+ int x_flag_align_jumps;
+#define flag_align_jumps global_options.x_flag_align_jumps
+#endif
+#ifdef GENERATOR_FILE
+extern const char *str_align_jumps;
+#else
+ const char *x_str_align_jumps;
+#define str_align_jumps global_options.x_str_align_jumps
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_align_labels;
+#else
+ int x_flag_align_labels;
+#define flag_align_labels global_options.x_flag_align_labels
+#endif
+#ifdef GENERATOR_FILE
+extern const char *str_align_labels;
+#else
+ const char *x_str_align_labels;
+#define str_align_labels global_options.x_str_align_labels
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_align_loops;
+#else
+ int x_flag_align_loops;
+#define flag_align_loops global_options.x_flag_align_loops
+#endif
+#ifdef GENERATOR_FILE
+extern const char *str_align_loops;
+#else
+ const char *x_str_align_loops;
+#define str_align_loops global_options.x_str_align_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int aligned_new_threshold;
+#else
+ int x_aligned_new_threshold;
+#define aligned_new_threshold global_options.x_aligned_new_threshold
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_all_intrinsics;
+#else
+ int x_flag_all_intrinsics;
+#define flag_all_intrinsics global_options.x_flag_all_intrinsics
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_allocation_dce;
+#else
+ int x_flag_allocation_dce;
+#define flag_allocation_dce global_options.x_flag_allocation_dce
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_allow_argument_mismatch;
+#else
+ int x_flag_allow_argument_mismatch;
+#define flag_allow_argument_mismatch global_options.x_flag_allow_argument_mismatch
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_allow_invalid_boz;
+#else
+ int x_flag_allow_invalid_boz;
+#define flag_allow_invalid_boz global_options.x_flag_allow_invalid_boz
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_allow_leading_underscore;
+#else
+ int x_flag_allow_leading_underscore;
+#define flag_allow_leading_underscore global_options.x_flag_allow_leading_underscore
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_store_data_races;
+#else
+ int x_flag_store_data_races;
+#define flag_store_data_races global_options.x_flag_store_data_races
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer;
+#else
+ int x_flag_analyzer;
+#define flag_analyzer global_options.x_flag_analyzer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_call_summaries;
+#else
+ int x_flag_analyzer_call_summaries;
+#define flag_analyzer_call_summaries global_options.x_flag_analyzer_call_summaries
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_analyzer_checker;
+#else
+ const char *x_flag_analyzer_checker;
+#define flag_analyzer_checker global_options.x_flag_analyzer_checker
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_feasibility;
+#else
+ int x_flag_analyzer_feasibility;
+#define flag_analyzer_feasibility global_options.x_flag_analyzer_feasibility
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_fine_grained;
+#else
+ int x_flag_analyzer_fine_grained;
+#define flag_analyzer_fine_grained global_options.x_flag_analyzer_fine_grained
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_show_duplicate_count;
+#else
+ int x_flag_analyzer_show_duplicate_count;
+#define flag_analyzer_show_duplicate_count global_options.x_flag_analyzer_show_duplicate_count
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_state_merge;
+#else
+ int x_flag_analyzer_state_merge;
+#define flag_analyzer_state_merge global_options.x_flag_analyzer_state_merge
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_state_purge;
+#else
+ int x_flag_analyzer_state_purge;
+#define flag_analyzer_state_purge global_options.x_flag_analyzer_state_purge
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_suppress_followups;
+#else
+ int x_flag_analyzer_suppress_followups;
+#define flag_analyzer_suppress_followups global_options.x_flag_analyzer_suppress_followups
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_transitivity;
+#else
+ int x_flag_analyzer_transitivity;
+#define flag_analyzer_transitivity global_options.x_flag_analyzer_transitivity
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_undo_inlining;
+#else
+ int x_flag_analyzer_undo_inlining;
+#define flag_analyzer_undo_inlining global_options.x_flag_analyzer_undo_inlining
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_verbose_edges;
+#else
+ int x_flag_analyzer_verbose_edges;
+#define flag_analyzer_verbose_edges global_options.x_flag_analyzer_verbose_edges
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_analyzer_verbose_state_changes;
+#else
+ int x_flag_analyzer_verbose_state_changes;
+#define flag_analyzer_verbose_state_changes global_options.x_flag_analyzer_verbose_state_changes
+#endif
+#ifdef GENERATOR_FILE
+extern int analyzer_verbosity;
+#else
+ int x_analyzer_verbosity;
+#define analyzer_verbosity global_options.x_analyzer_verbosity
+#endif
+#ifdef GENERATOR_FILE
+extern void *common_deferred_options;
+#else
+ void *x_common_deferred_options;
+#define common_deferred_options global_options.x_common_deferred_options
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_asm;
+#else
+ int x_flag_no_asm;
+#define flag_no_asm global_options.x_flag_no_asm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_assert;
+#else
+ int x_flag_assert;
+#define flag_assert global_options.x_flag_assert
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_associative_math;
+#else
+ int x_flag_associative_math;
+#define flag_associative_math global_options.x_flag_associative_math
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_asynchronous_unwind_tables;
+#else
+ int x_flag_asynchronous_unwind_tables;
+#define flag_asynchronous_unwind_tables global_options.x_flag_asynchronous_unwind_tables
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_auto_inc_dec;
+#else
+ int x_flag_auto_inc_dec;
+#define flag_auto_inc_dec global_options.x_flag_auto_inc_dec
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_auto_profile;
+#else
+ int x_flag_auto_profile;
+#define flag_auto_profile global_options.x_flag_auto_profile
+#endif
+#ifdef GENERATOR_FILE
+extern const char *auto_profile_file;
+#else
+ const char *x_auto_profile_file;
+#define auto_profile_file global_options.x_auto_profile_file
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_automatic;
+#else
+ int x_flag_automatic;
+#define flag_automatic global_options.x_flag_automatic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_backslash;
+#else
+ int x_flag_backslash;
+#define flag_backslash global_options.x_flag_backslash
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_backtrace;
+#else
+ int x_flag_backtrace;
+#define flag_backtrace global_options.x_flag_backtrace
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_bit_tests;
+#else
+ int x_flag_bit_tests;
+#define flag_bit_tests global_options.x_flag_bit_tests
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_blas_matmul_limit;
+#else
+ int x_flag_blas_matmul_limit;
+#define flag_blas_matmul_limit global_options.x_flag_blas_matmul_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_bounds_check;
+#else
+ int x_flag_bounds_check;
+#define flag_bounds_check global_options.x_flag_bounds_check
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_branch_on_count_reg;
+#else
+ int x_flag_branch_on_count_reg;
+#define flag_branch_on_count_reg global_options.x_flag_branch_on_count_reg
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_branch_probabilities;
+#else
+ int x_flag_branch_probabilities;
+#define flag_branch_probabilities global_options.x_flag_branch_probabilities
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_building_libgcc;
+#else
+ int x_flag_building_libgcc;
+#define flag_building_libgcc global_options.x_flag_building_libgcc
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_building_libgfortran;
+#else
+ int x_flag_building_libgfortran;
+#define flag_building_libgfortran global_options.x_flag_building_libgfortran
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_building_libphobos_tests;
+#else
+ int x_flag_building_libphobos_tests;
+#define flag_building_libphobos_tests global_options.x_flag_building_libphobos_tests
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_builtin;
+#else
+ int x_flag_no_builtin;
+#define flag_no_builtin global_options.x_flag_no_builtin
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_c_prototypes;
+#else
+ int x_flag_c_prototypes;
+#define flag_c_prototypes global_options.x_flag_c_prototypes
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_c_prototypes_external;
+#else
+ int x_flag_c_prototypes_external;
+#define flag_c_prototypes_external global_options.x_flag_c_prototypes_external
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_caller_saves;
+#else
+ int x_flag_caller_saves;
+#define flag_caller_saves global_options.x_flag_caller_saves
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_callgraph_info;
+#else
+ int x_flag_callgraph_info;
+#define flag_callgraph_info global_options.x_flag_callgraph_info
+#endif
+#ifdef GENERATOR_FILE
+extern enum cf_protection_level flag_cf_protection;
+#else
+ enum cf_protection_level x_flag_cf_protection;
+#define flag_cf_protection global_options.x_flag_cf_protection
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_char8_t;
+#else
+ int x_flag_char8_t;
+#define flag_char8_t global_options.x_flag_char8_t
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_check_new;
+#else
+ int x_flag_check_new;
+#define flag_check_new global_options.x_flag_check_new
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_check_action;
+#else
+ int x_flag_check_action;
+#define flag_check_action global_options.x_flag_check_action
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_checking;
+#else
+ int x_flag_checking;
+#define flag_checking global_options.x_flag_checking
+#endif
+#ifdef GENERATOR_FILE
+extern enum gfc_fcoarray flag_coarray;
+#else
+ enum gfc_fcoarray x_flag_coarray;
+#define flag_coarray global_options.x_flag_coarray
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_code_hoisting;
+#else
+ int x_flag_code_hoisting;
+#define flag_code_hoisting global_options.x_flag_code_hoisting
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_combine_stack_adjustments;
+#else
+ int x_flag_combine_stack_adjustments;
+#define flag_combine_stack_adjustments global_options.x_flag_combine_stack_adjustments
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_common;
+#else
+ int x_flag_no_common;
+#define flag_no_common global_options.x_flag_no_common
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_compare_debug;
+#else
+ int x_flag_compare_debug;
+#define flag_compare_debug global_options.x_flag_compare_debug
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_compare_debug_opt;
+#else
+ const char *x_flag_compare_debug_opt;
+#define flag_compare_debug_opt global_options.x_flag_compare_debug_opt
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_compare_elim_after_reload;
+#else
+ int x_flag_compare_elim_after_reload;
+#define flag_compare_elim_after_reload global_options.x_flag_compare_elim_after_reload
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_concepts;
+#else
+ int x_flag_concepts;
+#define flag_concepts global_options.x_flag_concepts
+#endif
+#ifdef GENERATOR_FILE
+extern int concepts_diagnostics_max_depth;
+#else
+ int x_concepts_diagnostics_max_depth;
+#define concepts_diagnostics_max_depth global_options.x_concepts_diagnostics_max_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_concepts_ts;
+#else
+ int x_flag_concepts_ts;
+#define flag_concepts_ts global_options.x_flag_concepts_ts
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_conserve_stack;
+#else
+ int x_flag_conserve_stack;
+#define flag_conserve_stack global_options.x_flag_conserve_stack
+#endif
+#ifdef GENERATOR_FILE
+extern int constexpr_cache_depth;
+#else
+ int x_constexpr_cache_depth;
+#define constexpr_cache_depth global_options.x_constexpr_cache_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int max_constexpr_depth;
+#else
+ int x_max_constexpr_depth;
+#define max_constexpr_depth global_options.x_max_constexpr_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_constexpr_fp_except;
+#else
+ int x_flag_constexpr_fp_except;
+#define flag_constexpr_fp_except global_options.x_flag_constexpr_fp_except
+#endif
+#ifdef GENERATOR_FILE
+extern int constexpr_loop_limit;
+#else
+ int x_constexpr_loop_limit;
+#define constexpr_loop_limit global_options.x_constexpr_loop_limit
+#endif
+#ifdef GENERATOR_FILE
+extern HOST_WIDE_INT constexpr_ops_limit;
+#else
+ HOST_WIDE_INT x_constexpr_ops_limit;
+#define constexpr_ops_limit global_options.x_constexpr_ops_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_contract_mode;
+#else
+ int x_flag_contract_mode;
+#define flag_contract_mode global_options.x_flag_contract_mode
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_contract_strict_declarations;
+#else
+ int x_flag_contract_strict_declarations;
+#define flag_contract_strict_declarations global_options.x_flag_contract_strict_declarations
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_contracts;
+#else
+ int x_flag_contracts;
+#define flag_contracts global_options.x_flag_contracts
+#endif
+#ifdef GENERATOR_FILE
+extern enum gfc_convert flag_convert;
+#else
+ enum gfc_convert x_flag_convert;
+#define flag_convert global_options.x_flag_convert
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_coroutines;
+#else
+ int x_flag_coroutines;
+#define flag_coroutines global_options.x_flag_coroutines
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_cprop_registers;
+#else
+ int x_flag_cprop_registers;
+#define flag_cprop_registers global_options.x_flag_cprop_registers
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_cray_pointer;
+#else
+ int x_flag_cray_pointer;
+#define flag_cray_pointer global_options.x_flag_cray_pointer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_crossjumping;
+#else
+ int x_flag_crossjumping;
+#define flag_crossjumping global_options.x_flag_crossjumping
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_cse_follow_jumps;
+#else
+ int x_flag_cse_follow_jumps;
+#define flag_cse_follow_jumps global_options.x_flag_cse_follow_jumps
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_cx_fortran_rules;
+#else
+ int x_flag_cx_fortran_rules;
+#define flag_cx_fortran_rules global_options.x_flag_cx_fortran_rules
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_cx_limited_range;
+#else
+ int x_flag_cx_limited_range;
+#define flag_cx_limited_range global_options.x_flag_cx_limited_range
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_data_sections;
+#else
+ int x_flag_data_sections;
+#define flag_data_sections global_options.x_flag_data_sections
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dbg_cnt_list;
+#else
+ int x_flag_dbg_cnt_list;
+#define flag_dbg_cnt_list global_options.x_flag_dbg_cnt_list
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dce;
+#else
+ int x_flag_dce;
+#define flag_dce global_options.x_flag_dce
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_debug_aux_vars;
+#else
+ int x_flag_debug_aux_vars;
+#define flag_debug_aux_vars global_options.x_flag_debug_aux_vars
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_debug_types_section;
+#else
+ int x_flag_debug_types_section;
+#define flag_debug_types_section global_options.x_flag_debug_types_section
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec;
+#else
+ int x_flag_dec;
+#define flag_dec global_options.x_flag_dec
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_blank_format_item;
+#else
+ int x_flag_dec_blank_format_item;
+#define flag_dec_blank_format_item global_options.x_flag_dec_blank_format_item
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_char_conversions;
+#else
+ int x_flag_dec_char_conversions;
+#define flag_dec_char_conversions global_options.x_flag_dec_char_conversions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_format_defaults;
+#else
+ int x_flag_dec_format_defaults;
+#define flag_dec_format_defaults global_options.x_flag_dec_format_defaults
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_include;
+#else
+ int x_flag_dec_include;
+#define flag_dec_include global_options.x_flag_dec_include
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_intrinsic_ints;
+#else
+ int x_flag_dec_intrinsic_ints;
+#define flag_dec_intrinsic_ints global_options.x_flag_dec_intrinsic_ints
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_math;
+#else
+ int x_flag_dec_math;
+#define flag_dec_math global_options.x_flag_dec_math
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_static;
+#else
+ int x_flag_dec_static;
+#define flag_dec_static global_options.x_flag_dec_static
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dec_structure;
+#else
+ int x_flag_dec_structure;
+#define flag_dec_structure global_options.x_flag_dec_structure
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_declone_ctor_dtor;
+#else
+ int x_flag_declone_ctor_dtor;
+#define flag_declone_ctor_dtor global_options.x_flag_declone_ctor_dtor
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_default_double;
+#else
+ int x_flag_default_double;
+#define flag_default_double global_options.x_flag_default_double
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_default_integer;
+#else
+ int x_flag_default_integer;
+#define flag_default_integer global_options.x_flag_default_integer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_default_real_10;
+#else
+ int x_flag_default_real_10;
+#define flag_default_real_10 global_options.x_flag_default_real_10
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_default_real_16;
+#else
+ int x_flag_default_real_16;
+#define flag_default_real_16 global_options.x_flag_default_real_16
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_default_real_8;
+#else
+ int x_flag_default_real_8;
+#define flag_default_real_8 global_options.x_flag_default_real_8
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_defer_pop;
+#else
+ int x_flag_defer_pop;
+#define flag_defer_pop global_options.x_flag_defer_pop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_delayed_branch;
+#else
+ int x_flag_delayed_branch;
+#define flag_delayed_branch global_options.x_flag_delayed_branch
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_delete_dead_exceptions;
+#else
+ int x_flag_delete_dead_exceptions;
+#define flag_delete_dead_exceptions global_options.x_flag_delete_dead_exceptions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_delete_null_pointer_checks;
+#else
+ int x_flag_delete_null_pointer_checks;
+#define flag_delete_null_pointer_checks global_options.x_flag_delete_null_pointer_checks
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_devirtualize;
+#else
+ int x_flag_devirtualize;
+#define flag_devirtualize global_options.x_flag_devirtualize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ltrans_devirtualize;
+#else
+ int x_flag_ltrans_devirtualize;
+#define flag_ltrans_devirtualize global_options.x_flag_ltrans_devirtualize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_devirtualize_speculatively;
+#else
+ int x_flag_devirtualize_speculatively;
+#define flag_devirtualize_speculatively global_options.x_flag_devirtualize_speculatively
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_color;
+#else
+ int x_flag_diagnostics_show_color;
+#define flag_diagnostics_show_color global_options.x_flag_diagnostics_show_color
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_generate_patch;
+#else
+ int x_flag_diagnostics_generate_patch;
+#define flag_diagnostics_generate_patch global_options.x_flag_diagnostics_generate_patch
+#endif
+#ifdef GENERATOR_FILE
+extern int diagnostics_minimum_margin_width;
+#else
+ int x_diagnostics_minimum_margin_width;
+#define diagnostics_minimum_margin_width global_options.x_diagnostics_minimum_margin_width
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_parseable_fixits;
+#else
+ int x_flag_diagnostics_parseable_fixits;
+#define flag_diagnostics_parseable_fixits global_options.x_flag_diagnostics_parseable_fixits
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_path_format;
+#else
+ int x_flag_diagnostics_path_format;
+#define flag_diagnostics_path_format global_options.x_flag_diagnostics_path_format
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_caret;
+#else
+ int x_flag_diagnostics_show_caret;
+#define flag_diagnostics_show_caret global_options.x_flag_diagnostics_show_caret
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_cwe;
+#else
+ int x_flag_diagnostics_show_cwe;
+#define flag_diagnostics_show_cwe global_options.x_flag_diagnostics_show_cwe
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_labels;
+#else
+ int x_flag_diagnostics_show_labels;
+#define flag_diagnostics_show_labels global_options.x_flag_diagnostics_show_labels
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_line_numbers;
+#else
+ int x_flag_diagnostics_show_line_numbers;
+#define flag_diagnostics_show_line_numbers global_options.x_flag_diagnostics_show_line_numbers
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_option;
+#else
+ int x_flag_diagnostics_show_option;
+#define flag_diagnostics_show_option global_options.x_flag_diagnostics_show_option
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_path_depths;
+#else
+ int x_flag_diagnostics_show_path_depths;
+#define flag_diagnostics_show_path_depths global_options.x_flag_diagnostics_show_path_depths
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_rules;
+#else
+ int x_flag_diagnostics_show_rules;
+#define flag_diagnostics_show_rules global_options.x_flag_diagnostics_show_rules
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_template_tree;
+#else
+ int x_flag_diagnostics_show_template_tree;
+#define flag_diagnostics_show_template_tree global_options.x_flag_diagnostics_show_template_tree
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_diagnostics_show_urls;
+#else
+ int x_flag_diagnostics_show_urls;
+#define flag_diagnostics_show_urls global_options.x_flag_diagnostics_show_urls
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dollar_ok;
+#else
+ int x_flag_dollar_ok;
+#define flag_dollar_ok global_options.x_flag_dollar_ok
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dse;
+#else
+ int x_flag_dse;
+#define flag_dse global_options.x_flag_dse
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_ada_spec;
+#else
+ int x_flag_dump_ada_spec;
+#define flag_dump_ada_spec global_options.x_flag_dump_ada_spec
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_ada_spec_slim;
+#else
+ int x_flag_dump_ada_spec_slim;
+#define flag_dump_ada_spec_slim global_options.x_flag_dump_ada_spec_slim
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer;
+#else
+ int x_flag_dump_analyzer;
+#define flag_dump_analyzer global_options.x_flag_dump_analyzer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_callgraph;
+#else
+ int x_flag_dump_analyzer_callgraph;
+#define flag_dump_analyzer_callgraph global_options.x_flag_dump_analyzer_callgraph
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_exploded_graph;
+#else
+ int x_flag_dump_analyzer_exploded_graph;
+#define flag_dump_analyzer_exploded_graph global_options.x_flag_dump_analyzer_exploded_graph
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_exploded_nodes;
+#else
+ int x_flag_dump_analyzer_exploded_nodes;
+#define flag_dump_analyzer_exploded_nodes global_options.x_flag_dump_analyzer_exploded_nodes
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_exploded_nodes_2;
+#else
+ int x_flag_dump_analyzer_exploded_nodes_2;
+#define flag_dump_analyzer_exploded_nodes_2 global_options.x_flag_dump_analyzer_exploded_nodes_2
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_exploded_nodes_3;
+#else
+ int x_flag_dump_analyzer_exploded_nodes_3;
+#define flag_dump_analyzer_exploded_nodes_3 global_options.x_flag_dump_analyzer_exploded_nodes_3
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_exploded_paths;
+#else
+ int x_flag_dump_analyzer_exploded_paths;
+#define flag_dump_analyzer_exploded_paths global_options.x_flag_dump_analyzer_exploded_paths
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_feasibility;
+#else
+ int x_flag_dump_analyzer_feasibility;
+#define flag_dump_analyzer_feasibility global_options.x_flag_dump_analyzer_feasibility
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_json;
+#else
+ int x_flag_dump_analyzer_json;
+#define flag_dump_analyzer_json global_options.x_flag_dump_analyzer_json
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_state_purge;
+#else
+ int x_flag_dump_analyzer_state_purge;
+#define flag_dump_analyzer_state_purge global_options.x_flag_dump_analyzer_state_purge
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_stderr;
+#else
+ int x_flag_dump_analyzer_stderr;
+#define flag_dump_analyzer_stderr global_options.x_flag_dump_analyzer_stderr
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_supergraph;
+#else
+ int x_flag_dump_analyzer_supergraph;
+#define flag_dump_analyzer_supergraph global_options.x_flag_dump_analyzer_supergraph
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_analyzer_untracked;
+#else
+ int x_flag_dump_analyzer_untracked;
+#define flag_dump_analyzer_untracked global_options.x_flag_dump_analyzer_untracked
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_dump_final_insns;
+#else
+ const char *x_flag_dump_final_insns;
+#define flag_dump_final_insns global_options.x_flag_dump_final_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_fortran_global;
+#else
+ int x_flag_dump_fortran_global;
+#define flag_dump_fortran_global global_options.x_flag_dump_fortran_global
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_fortran_optimized;
+#else
+ int x_flag_dump_fortran_optimized;
+#define flag_dump_fortran_optimized global_options.x_flag_dump_fortran_optimized
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_fortran_original;
+#else
+ int x_flag_dump_fortran_original;
+#define flag_dump_fortran_original global_options.x_flag_dump_fortran_original
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_dump_go_spec;
+#else
+ const char *x_flag_dump_go_spec;
+#define flag_dump_go_spec global_options.x_flag_dump_go_spec
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_locations;
+#else
+ int x_flag_dump_locations;
+#define flag_dump_locations global_options.x_flag_dump_locations
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_noaddr;
+#else
+ int x_flag_dump_noaddr;
+#define flag_dump_noaddr global_options.x_flag_dump_noaddr
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_passes;
+#else
+ int x_flag_dump_passes;
+#define flag_dump_passes global_options.x_flag_dump_passes
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_scos;
+#else
+ int x_flag_dump_scos;
+#define flag_dump_scos global_options.x_flag_dump_scos
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_unnumbered;
+#else
+ int x_flag_dump_unnumbered;
+#define flag_dump_unnumbered global_options.x_flag_dump_unnumbered
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dump_unnumbered_links;
+#else
+ int x_flag_dump_unnumbered_links;
+#define flag_dump_unnumbered_links global_options.x_flag_dump_unnumbered_links
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_dwarf2_cfi_asm;
+#else
+ int x_flag_dwarf2_cfi_asm;
+#define flag_dwarf2_cfi_asm global_options.x_flag_dwarf2_cfi_asm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_early_inlining;
+#else
+ int x_flag_early_inlining;
+#define flag_early_inlining global_options.x_flag_early_inlining
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_elide_constructors;
+#else
+ int x_flag_elide_constructors;
+#define flag_elide_constructors global_options.x_flag_elide_constructors
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_elide_type;
+#else
+ int x_flag_elide_type;
+#define flag_elide_type global_options.x_flag_elide_type
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_debug_only_used_symbols;
+#else
+ int x_flag_debug_only_used_symbols;
+#define flag_debug_only_used_symbols global_options.x_flag_debug_only_used_symbols
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_eliminate_unused_debug_types;
+#else
+ int x_flag_eliminate_unused_debug_types;
+#define flag_eliminate_unused_debug_types global_options.x_flag_eliminate_unused_debug_types
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_emit_class_debug_always;
+#else
+ int x_flag_emit_class_debug_always;
+#define flag_emit_class_debug_always global_options.x_flag_emit_class_debug_always
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_enforce_eh_specs;
+#else
+ int x_flag_enforce_eh_specs;
+#define flag_enforce_eh_specs global_options.x_flag_enforce_eh_specs
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_exceptions;
+#else
+ int x_flag_exceptions;
+#define flag_exceptions global_options.x_flag_exceptions
+#endif
+#ifdef GENERATOR_FILE
+extern enum excess_precision flag_excess_precision;
+#else
+ enum excess_precision x_flag_excess_precision;
+#define flag_excess_precision global_options.x_flag_excess_precision
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_expensive_optimizations;
+#else
+ int x_flag_expensive_optimizations;
+#define flag_expensive_optimizations global_options.x_flag_expensive_optimizations
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ext_numeric_literals;
+#else
+ int x_flag_ext_numeric_literals;
+#define flag_ext_numeric_literals global_options.x_flag_ext_numeric_literals
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_extern_stdcpp;
+#else
+ int x_flag_extern_stdcpp;
+#define flag_extern_stdcpp global_options.x_flag_extern_stdcpp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_extern_tls_init;
+#else
+ int x_flag_extern_tls_init;
+#define flag_extern_tls_init global_options.x_flag_extern_tls_init
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_external_blas;
+#else
+ int x_flag_external_blas;
+#define flag_external_blas global_options.x_flag_external_blas
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_f2c;
+#else
+ int x_flag_f2c;
+#define flag_f2c global_options.x_flag_f2c
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_fat_lto_objects;
+#else
+ int x_flag_fat_lto_objects;
+#define flag_fat_lto_objects global_options.x_flag_fat_lto_objects
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_finite_loops;
+#else
+ int x_flag_finite_loops;
+#define flag_finite_loops global_options.x_flag_finite_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_finite_math_only;
+#else
+ int x_flag_finite_math_only;
+#define flag_finite_math_only global_options.x_flag_finite_math_only
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_fixed_line_length;
+#else
+ int x_flag_fixed_line_length;
+#define flag_fixed_line_length global_options.x_flag_fixed_line_length
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_float_store;
+#else
+ int x_flag_float_store;
+#define flag_float_store global_options.x_flag_float_store
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_fold_simple_inlines;
+#else
+ int x_flag_fold_simple_inlines;
+#define flag_fold_simple_inlines global_options.x_flag_fold_simple_inlines
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_forward_propagate;
+#else
+ int x_flag_forward_propagate;
+#define flag_forward_propagate global_options.x_flag_forward_propagate
+#endif
+#ifdef GENERATOR_FILE
+extern enum fp_contract_mode flag_fp_contract_mode;
+#else
+ enum fp_contract_mode x_flag_fp_contract_mode;
+#define flag_fp_contract_mode global_options.x_flag_fp_contract_mode
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_fp_int_builtin_inexact;
+#else
+ int x_flag_fp_int_builtin_inexact;
+#define flag_fp_int_builtin_inexact global_options.x_flag_fp_int_builtin_inexact
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_free_line_length;
+#else
+ int x_flag_free_line_length;
+#define flag_free_line_length global_options.x_flag_free_line_length
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_frontend_loop_interchange;
+#else
+ int x_flag_frontend_loop_interchange;
+#define flag_frontend_loop_interchange global_options.x_flag_frontend_loop_interchange
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_frontend_optimize;
+#else
+ int x_flag_frontend_optimize;
+#define flag_frontend_optimize global_options.x_flag_frontend_optimize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_function_cse;
+#else
+ int x_flag_no_function_cse;
+#define flag_no_function_cse global_options.x_flag_no_function_cse
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_function_sections;
+#else
+ int x_flag_function_sections;
+#define flag_function_sections global_options.x_flag_function_sections
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gcse;
+#else
+ int x_flag_gcse;
+#define flag_gcse global_options.x_flag_gcse
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gcse_after_reload;
+#else
+ int x_flag_gcse_after_reload;
+#define flag_gcse_after_reload global_options.x_flag_gcse_after_reload
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gcse_las;
+#else
+ int x_flag_gcse_las;
+#define flag_gcse_las global_options.x_flag_gcse_las
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gcse_lm;
+#else
+ int x_flag_gcse_lm;
+#define flag_gcse_lm global_options.x_flag_gcse_lm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gcse_sm;
+#else
+ int x_flag_gcse_sm;
+#define flag_gcse_sm global_options.x_flag_gcse_sm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gimple;
+#else
+ int x_flag_gimple;
+#define flag_gimple global_options.x_flag_gimple
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_gnu_keywords;
+#else
+ int x_flag_no_gnu_keywords;
+#define flag_no_gnu_keywords global_options.x_flag_no_gnu_keywords
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_next_runtime;
+#else
+ int x_flag_next_runtime;
+#define flag_next_runtime global_options.x_flag_next_runtime
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tm;
+#else
+ int x_flag_tm;
+#define flag_tm global_options.x_flag_tm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gnu_unique;
+#else
+ int x_flag_gnu_unique;
+#define flag_gnu_unique global_options.x_flag_gnu_unique
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gnu89_inline;
+#else
+ int x_flag_gnu89_inline;
+#define flag_gnu89_inline global_options.x_flag_gnu89_inline
+#endif
+#ifdef GENERATOR_FILE
+extern int go_check_divide_overflow;
+#else
+ int x_go_check_divide_overflow;
+#define go_check_divide_overflow global_options.x_go_check_divide_overflow
+#endif
+#ifdef GENERATOR_FILE
+extern int go_check_divide_zero;
+#else
+ int x_go_check_divide_zero;
+#define go_check_divide_zero global_options.x_go_check_divide_zero
+#endif
+#ifdef GENERATOR_FILE
+extern int go_compiling_runtime;
+#else
+ int x_go_compiling_runtime;
+#define go_compiling_runtime global_options.x_go_compiling_runtime
+#endif
+#ifdef GENERATOR_FILE
+extern int go_debug_escape_level;
+#else
+ int x_go_debug_escape_level;
+#define go_debug_escape_level global_options.x_go_debug_escape_level
+#endif
+#ifdef GENERATOR_FILE
+extern const char *go_debug_escape_hash;
+#else
+ const char *x_go_debug_escape_hash;
+#define go_debug_escape_hash global_options.x_go_debug_escape_hash
+#endif
+#ifdef GENERATOR_FILE
+extern int go_debug_optimization;
+#else
+ int x_go_debug_optimization;
+#define go_debug_optimization global_options.x_go_debug_optimization
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_graphite;
+#else
+ int x_flag_graphite;
+#define flag_graphite global_options.x_flag_graphite
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_graphite_identity;
+#else
+ int x_flag_graphite_identity;
+#define flag_graphite_identity global_options.x_flag_graphite_identity
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_guess_branch_prob;
+#else
+ int x_flag_guess_branch_prob;
+#define flag_guess_branch_prob global_options.x_flag_guess_branch_prob
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_harden_compares;
+#else
+ int x_flag_harden_compares;
+#define flag_harden_compares global_options.x_flag_harden_compares
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_harden_conditional_branches;
+#else
+ int x_flag_harden_conditional_branches;
+#define flag_harden_conditional_branches global_options.x_flag_harden_conditional_branches
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_hoist_adjacent_loads;
+#else
+ int x_flag_hoist_adjacent_loads;
+#define flag_hoist_adjacent_loads global_options.x_flag_hoist_adjacent_loads
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_ident;
+#else
+ int x_flag_no_ident;
+#define flag_no_ident global_options.x_flag_no_ident
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_if_conversion;
+#else
+ int x_flag_if_conversion;
+#define flag_if_conversion global_options.x_flag_if_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_if_conversion2;
+#else
+ int x_flag_if_conversion2;
+#define flag_if_conversion2 global_options.x_flag_if_conversion2
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_implement_inlines;
+#else
+ int x_flag_implement_inlines;
+#define flag_implement_inlines global_options.x_flag_implement_inlines
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_implicit_constexpr;
+#else
+ int x_flag_implicit_constexpr;
+#define flag_implicit_constexpr global_options.x_flag_implicit_constexpr
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_implicit_inline_templates;
+#else
+ int x_flag_implicit_inline_templates;
+#define flag_implicit_inline_templates global_options.x_flag_implicit_inline_templates
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_implicit_none;
+#else
+ int x_flag_implicit_none;
+#define flag_implicit_none global_options.x_flag_implicit_none
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_implicit_templates;
+#else
+ int x_flag_implicit_templates;
+#define flag_implicit_templates global_options.x_flag_implicit_templates
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_indirect_inlining;
+#else
+ int x_flag_indirect_inlining;
+#define flag_indirect_inlining global_options.x_flag_indirect_inlining
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inhibit_size_directive;
+#else
+ int x_flag_inhibit_size_directive;
+#define flag_inhibit_size_directive global_options.x_flag_inhibit_size_directive
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_init_derived;
+#else
+ int x_flag_init_derived;
+#define flag_init_derived global_options.x_flag_init_derived
+#endif
+#ifdef GENERATOR_FILE
+extern enum gfc_init_local_real flag_init_real;
+#else
+ enum gfc_init_local_real x_flag_init_real;
+#define flag_init_real global_options.x_flag_init_real
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_inline;
+#else
+ int x_flag_no_inline;
+#define flag_no_inline global_options.x_flag_no_inline
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inline_arg_packing;
+#else
+ int x_flag_inline_arg_packing;
+#define flag_inline_arg_packing global_options.x_flag_inline_arg_packing
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inline_atomics;
+#else
+ int x_flag_inline_atomics;
+#define flag_inline_atomics global_options.x_flag_inline_atomics
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inline_functions;
+#else
+ int x_flag_inline_functions;
+#define flag_inline_functions global_options.x_flag_inline_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inline_functions_called_once;
+#else
+ int x_flag_inline_functions_called_once;
+#define flag_inline_functions_called_once global_options.x_flag_inline_functions_called_once
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inline_matmul_limit;
+#else
+ int x_flag_inline_matmul_limit;
+#define flag_inline_matmul_limit global_options.x_flag_inline_matmul_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_inline_small_functions;
+#else
+ int x_flag_inline_small_functions;
+#define flag_inline_small_functions global_options.x_flag_inline_small_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_instrument_function_entry_exit;
+#else
+ int x_flag_instrument_function_entry_exit;
+#define flag_instrument_function_entry_exit global_options.x_flag_instrument_function_entry_exit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_integer4_kind;
+#else
+ int x_flag_integer4_kind;
+#define flag_integer4_kind global_options.x_flag_integer4_kind
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_invariants;
+#else
+ int x_flag_invariants;
+#define flag_invariants global_options.x_flag_invariants
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_bit_cp;
+#else
+ int x_flag_ipa_bit_cp;
+#define flag_ipa_bit_cp global_options.x_flag_ipa_bit_cp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_cp;
+#else
+ int x_flag_ipa_cp;
+#define flag_ipa_cp global_options.x_flag_ipa_cp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_cp_clone;
+#else
+ int x_flag_ipa_cp_clone;
+#define flag_ipa_cp_clone global_options.x_flag_ipa_cp_clone
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_icf;
+#else
+ int x_flag_ipa_icf;
+#define flag_ipa_icf global_options.x_flag_ipa_icf
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_icf_functions;
+#else
+ int x_flag_ipa_icf_functions;
+#define flag_ipa_icf_functions global_options.x_flag_ipa_icf_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_icf_variables;
+#else
+ int x_flag_ipa_icf_variables;
+#define flag_ipa_icf_variables global_options.x_flag_ipa_icf_variables
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_modref;
+#else
+ int x_flag_ipa_modref;
+#define flag_ipa_modref global_options.x_flag_ipa_modref
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_profile;
+#else
+ int x_flag_ipa_profile;
+#define flag_ipa_profile global_options.x_flag_ipa_profile
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_pta;
+#else
+ int x_flag_ipa_pta;
+#define flag_ipa_pta global_options.x_flag_ipa_pta
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_pure_const;
+#else
+ int x_flag_ipa_pure_const;
+#define flag_ipa_pure_const global_options.x_flag_ipa_pure_const
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_ra;
+#else
+ int x_flag_ipa_ra;
+#define flag_ipa_ra global_options.x_flag_ipa_ra
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_reference;
+#else
+ int x_flag_ipa_reference;
+#define flag_ipa_reference global_options.x_flag_ipa_reference
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_reference_addressable;
+#else
+ int x_flag_ipa_reference_addressable;
+#define flag_ipa_reference_addressable global_options.x_flag_ipa_reference_addressable
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_sra;
+#else
+ int x_flag_ipa_sra;
+#define flag_ipa_sra global_options.x_flag_ipa_sra
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_stack_alignment;
+#else
+ int x_flag_ipa_stack_alignment;
+#define flag_ipa_stack_alignment global_options.x_flag_ipa_stack_alignment
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_strict_aliasing;
+#else
+ int x_flag_ipa_strict_aliasing;
+#define flag_ipa_strict_aliasing global_options.x_flag_ipa_strict_aliasing
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ipa_vrp;
+#else
+ int x_flag_ipa_vrp;
+#define flag_ipa_vrp global_options.x_flag_ipa_vrp
+#endif
+#ifdef GENERATOR_FILE
+extern enum ira_algorithm flag_ira_algorithm;
+#else
+ enum ira_algorithm x_flag_ira_algorithm;
+#define flag_ira_algorithm global_options.x_flag_ira_algorithm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ira_hoist_pressure;
+#else
+ int x_flag_ira_hoist_pressure;
+#define flag_ira_hoist_pressure global_options.x_flag_ira_hoist_pressure
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ira_loop_pressure;
+#else
+ int x_flag_ira_loop_pressure;
+#define flag_ira_loop_pressure global_options.x_flag_ira_loop_pressure
+#endif
+#ifdef GENERATOR_FILE
+extern enum ira_region flag_ira_region;
+#else
+ enum ira_region x_flag_ira_region;
+#define flag_ira_region global_options.x_flag_ira_region
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ira_share_save_slots;
+#else
+ int x_flag_ira_share_save_slots;
+#define flag_ira_share_save_slots global_options.x_flag_ira_share_save_slots
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ira_share_spill_slots;
+#else
+ int x_flag_ira_share_spill_slots;
+#define flag_ira_share_spill_slots global_options.x_flag_ira_share_spill_slots
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ira_verbose;
+#else
+ int x_flag_ira_verbose;
+#define flag_ira_verbose global_options.x_flag_ira_verbose
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_isolate_erroneous_paths_attribute;
+#else
+ int x_flag_isolate_erroneous_paths_attribute;
+#define flag_isolate_erroneous_paths_attribute global_options.x_flag_isolate_erroneous_paths_attribute
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_isolate_erroneous_paths_dereference;
+#else
+ int x_flag_isolate_erroneous_paths_dereference;
+#define flag_isolate_erroneous_paths_dereference global_options.x_flag_isolate_erroneous_paths_dereference
+#endif
+#ifdef GENERATOR_FILE
+extern enum ivar_visibility default_ivar_visibility;
+#else
+ enum ivar_visibility x_default_ivar_visibility;
+#define default_ivar_visibility global_options.x_default_ivar_visibility
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ivopts;
+#else
+ int x_flag_ivopts;
+#define flag_ivopts global_options.x_flag_ivopts
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_jump_tables;
+#else
+ int x_flag_jump_tables;
+#define flag_jump_tables global_options.x_flag_jump_tables
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_keep_gc_roots_live;
+#else
+ int x_flag_keep_gc_roots_live;
+#define flag_keep_gc_roots_live global_options.x_flag_keep_gc_roots_live
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_keep_inline_dllexport;
+#else
+ int x_flag_keep_inline_dllexport;
+#define flag_keep_inline_dllexport global_options.x_flag_keep_inline_dllexport
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_keep_inline_functions;
+#else
+ int x_flag_keep_inline_functions;
+#define flag_keep_inline_functions global_options.x_flag_keep_inline_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_keep_static_consts;
+#else
+ int x_flag_keep_static_consts;
+#define flag_keep_static_consts global_options.x_flag_keep_static_consts
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_keep_static_functions;
+#else
+ int x_flag_keep_static_functions;
+#define flag_keep_static_functions global_options.x_flag_keep_static_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int note_include_translate_yes;
+#else
+ int x_note_include_translate_yes;
+#define note_include_translate_yes global_options.x_note_include_translate_yes
+#endif
+#ifdef GENERATOR_FILE
+extern int note_include_translate_no;
+#else
+ int x_note_include_translate_no;
+#define note_include_translate_no global_options.x_note_include_translate_no
+#endif
+#ifdef GENERATOR_FILE
+extern int note_module_cmi_yes;
+#else
+ int x_note_module_cmi_yes;
+#define note_module_cmi_yes global_options.x_note_module_cmi_yes
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_large_source_files;
+#else
+ int x_flag_large_source_files;
+#define flag_large_source_files global_options.x_flag_large_source_files
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lax_vector_conversions;
+#else
+ int x_flag_lax_vector_conversions;
+#define flag_lax_vector_conversions global_options.x_flag_lax_vector_conversions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_leading_underscore;
+#else
+ int x_flag_leading_underscore;
+#define flag_leading_underscore global_options.x_flag_leading_underscore
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lifetime_dse;
+#else
+ int x_flag_lifetime_dse;
+#define flag_lifetime_dse global_options.x_flag_lifetime_dse
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_limit_function_alignment;
+#else
+ int x_flag_limit_function_alignment;
+#define flag_limit_function_alignment global_options.x_flag_limit_function_alignment
+#endif
+#ifdef GENERATOR_FILE
+extern enum lto_linker_output flag_lto_linker_output;
+#else
+ enum lto_linker_output x_flag_lto_linker_output;
+#define flag_lto_linker_output global_options.x_flag_lto_linker_output
+#endif
+#ifdef GENERATOR_FILE
+extern enum live_patching_level flag_live_patching;
+#else
+ enum live_patching_level x_flag_live_patching;
+#define flag_live_patching global_options.x_flag_live_patching
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_live_range_shrinkage;
+#else
+ int x_flag_live_range_shrinkage;
+#define flag_live_range_shrinkage global_options.x_flag_live_range_shrinkage
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_local_ivars;
+#else
+ int x_flag_local_ivars;
+#define flag_local_ivars global_options.x_flag_local_ivars
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_loop_interchange;
+#else
+ int x_flag_loop_interchange;
+#define flag_loop_interchange global_options.x_flag_loop_interchange
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_loop_nest_optimize;
+#else
+ int x_flag_loop_nest_optimize;
+#define flag_loop_nest_optimize global_options.x_flag_loop_nest_optimize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_loop_parallelize_all;
+#else
+ int x_flag_loop_parallelize_all;
+#define flag_loop_parallelize_all global_options.x_flag_loop_parallelize_all
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unroll_jam;
+#else
+ int x_flag_unroll_jam;
+#define flag_unroll_jam global_options.x_flag_unroll_jam
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lra_remat;
+#else
+ int x_flag_lra_remat;
+#define flag_lra_remat global_options.x_flag_lra_remat
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_compression_level;
+#else
+ int x_flag_lto_compression_level;
+#define flag_lto_compression_level global_options.x_flag_lto_compression_level
+#endif
+#ifdef GENERATOR_FILE
+extern enum lto_partition_model flag_lto_partition;
+#else
+ enum lto_partition_model x_flag_lto_partition;
+#define flag_lto_partition global_options.x_flag_lto_partition
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_report;
+#else
+ int x_flag_lto_report;
+#define flag_lto_report global_options.x_flag_lto_report
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_report_wpa;
+#else
+ int x_flag_lto_report_wpa;
+#define flag_lto_report_wpa global_options.x_flag_lto_report_wpa
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_lto;
+#else
+ const char *x_flag_lto;
+#define flag_lto global_options.x_flag_lto
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ltrans;
+#else
+ int x_flag_ltrans;
+#define flag_ltrans global_options.x_flag_ltrans
+#endif
+#ifdef GENERATOR_FILE
+extern const char *ltrans_output_list;
+#else
+ const char *x_ltrans_output_list;
+#define ltrans_output_list global_options.x_ltrans_output_list
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_errno_math;
+#else
+ int x_flag_errno_math;
+#define flag_errno_math global_options.x_flag_errno_math
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_max_array_constructor;
+#else
+ int x_flag_max_array_constructor;
+#define flag_max_array_constructor global_options.x_flag_max_array_constructor
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_max_errors;
+#else
+ int x_flag_max_errors;
+#define flag_max_errors global_options.x_flag_max_errors
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_max_stack_var_size;
+#else
+ int x_flag_max_stack_var_size;
+#define flag_max_stack_var_size global_options.x_flag_max_stack_var_size
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_max_subrecord_length;
+#else
+ int x_flag_max_subrecord_length;
+#define flag_max_subrecord_length global_options.x_flag_max_subrecord_length
+#endif
+#ifdef GENERATOR_FILE
+extern int mem_report;
+#else
+ int x_mem_report;
+#define mem_report global_options.x_mem_report
+#endif
+#ifdef GENERATOR_FILE
+extern int mem_report_wpa;
+#else
+ int x_mem_report_wpa;
+#define mem_report_wpa global_options.x_mem_report_wpa
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_merge_constants;
+#else
+ int x_flag_merge_constants;
+#define flag_merge_constants global_options.x_flag_merge_constants
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_merge_debug_strings;
+#else
+ int x_flag_merge_debug_strings;
+#define flag_merge_debug_strings global_options.x_flag_merge_debug_strings
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_header_unit;
+#else
+ int x_flag_header_unit;
+#define flag_header_unit global_options.x_flag_header_unit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_module_implicit_inline;
+#else
+ int x_flag_module_implicit_inline;
+#define flag_module_implicit_inline global_options.x_flag_module_implicit_inline
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_module_lazy;
+#else
+ int x_flag_module_lazy;
+#define flag_module_lazy global_options.x_flag_module_lazy
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_module_only;
+#else
+ int x_flag_module_only;
+#define flag_module_only global_options.x_flag_module_only
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_module_private;
+#else
+ int x_flag_module_private;
+#define flag_module_private global_options.x_flag_module_private
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_module_version_ignore;
+#else
+ int x_flag_module_version_ignore;
+#define flag_module_version_ignore global_options.x_flag_module_version_ignore
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_moduleinfo;
+#else
+ int x_flag_moduleinfo;
+#define flag_moduleinfo global_options.x_flag_moduleinfo
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_modules;
+#else
+ int x_flag_modules;
+#define flag_modules global_options.x_flag_modules
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_modulo_sched;
+#else
+ int x_flag_modulo_sched;
+#define flag_modulo_sched global_options.x_flag_modulo_sched
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_modulo_sched_allow_regmoves;
+#else
+ int x_flag_modulo_sched_allow_regmoves;
+#define flag_modulo_sched_allow_regmoves global_options.x_flag_modulo_sched_allow_regmoves
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_move_loop_invariants;
+#else
+ int x_flag_move_loop_invariants;
+#define flag_move_loop_invariants global_options.x_flag_move_loop_invariants
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_move_loop_stores;
+#else
+ int x_flag_move_loop_stores;
+#define flag_move_loop_stores global_options.x_flag_move_loop_stores
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ms_extensions;
+#else
+ int x_flag_ms_extensions;
+#define flag_ms_extensions global_options.x_flag_ms_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_new_inheriting_ctors;
+#else
+ int x_flag_new_inheriting_ctors;
+#define flag_new_inheriting_ctors global_options.x_flag_new_inheriting_ctors
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_new_ttp;
+#else
+ int x_flag_new_ttp;
+#define flag_new_ttp global_options.x_flag_new_ttp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_nil_receivers;
+#else
+ int x_flag_nil_receivers;
+#define flag_nil_receivers global_options.x_flag_nil_receivers
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_non_call_exceptions;
+#else
+ int x_flag_non_call_exceptions;
+#define flag_non_call_exceptions global_options.x_flag_non_call_exceptions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_nonansi_builtin;
+#else
+ int x_flag_no_nonansi_builtin;
+#define flag_no_nonansi_builtin global_options.x_flag_no_nonansi_builtin
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_nothrow_opt;
+#else
+ int x_flag_nothrow_opt;
+#define flag_nothrow_opt global_options.x_flag_nothrow_opt
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_abi;
+#else
+ int x_flag_objc_abi;
+#define flag_objc_abi global_options.x_flag_objc_abi
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_call_cxx_cdtors;
+#else
+ int x_flag_objc_call_cxx_cdtors;
+#define flag_objc_call_cxx_cdtors global_options.x_flag_objc_call_cxx_cdtors
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_direct_dispatch;
+#else
+ int x_flag_objc_direct_dispatch;
+#define flag_objc_direct_dispatch global_options.x_flag_objc_direct_dispatch
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_exceptions;
+#else
+ int x_flag_objc_exceptions;
+#define flag_objc_exceptions global_options.x_flag_objc_exceptions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_gc;
+#else
+ int x_flag_objc_gc;
+#define flag_objc_gc global_options.x_flag_objc_gc
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_nilcheck;
+#else
+ int x_flag_objc_nilcheck;
+#define flag_objc_nilcheck global_options.x_flag_objc_nilcheck
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc_sjlj_exceptions;
+#else
+ int x_flag_objc_sjlj_exceptions;
+#define flag_objc_sjlj_exceptions global_options.x_flag_objc_sjlj_exceptions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_objc1_only;
+#else
+ int x_flag_objc1_only;
+#define flag_objc1_only global_options.x_flag_objc1_only
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_omit_frame_pointer;
+#else
+ int x_flag_omit_frame_pointer;
+#define flag_omit_frame_pointer global_options.x_flag_omit_frame_pointer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_openacc;
+#else
+ int x_flag_openacc;
+#define flag_openacc global_options.x_flag_openacc
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_openacc_dims;
+#else
+ const char *x_flag_openacc_dims;
+#define flag_openacc_dims global_options.x_flag_openacc_dims
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_openmp;
+#else
+ int x_flag_openmp;
+#define flag_openmp global_options.x_flag_openmp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_openmp_simd;
+#else
+ int x_flag_openmp_simd;
+#define flag_openmp_simd global_options.x_flag_openmp_simd
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_openmp_target_simd_clone;
+#else
+ int x_flag_openmp_target_simd_clone;
+#define flag_openmp_target_simd_clone global_options.x_flag_openmp_target_simd_clone
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_opt_info;
+#else
+ int x_flag_opt_info;
+#define flag_opt_info global_options.x_flag_opt_info
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_optimize_sibling_calls;
+#else
+ int x_flag_optimize_sibling_calls;
+#define flag_optimize_sibling_calls global_options.x_flag_optimize_sibling_calls
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_optimize_strlen;
+#else
+ int x_flag_optimize_strlen;
+#define flag_optimize_strlen global_options.x_flag_optimize_strlen
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pack_derived;
+#else
+ int x_flag_pack_derived;
+#define flag_pack_derived global_options.x_flag_pack_derived
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pack_struct;
+#else
+ int x_flag_pack_struct;
+#define flag_pack_struct global_options.x_flag_pack_struct
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pad_source;
+#else
+ int x_flag_pad_source;
+#define flag_pad_source global_options.x_flag_pad_source
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_partial_inlining;
+#else
+ int x_flag_partial_inlining;
+#define flag_partial_inlining global_options.x_flag_partial_inlining
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_patchable_function_entry;
+#else
+ const char *x_flag_patchable_function_entry;
+#define flag_patchable_function_entry global_options.x_flag_patchable_function_entry
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pcc_struct_return;
+#else
+ int x_flag_pcc_struct_return;
+#define flag_pcc_struct_return global_options.x_flag_pcc_struct_return
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_peel_loops;
+#else
+ int x_flag_peel_loops;
+#define flag_peel_loops global_options.x_flag_peel_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_no_peephole;
+#else
+ int x_flag_no_peephole;
+#define flag_no_peephole global_options.x_flag_no_peephole
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_peephole2;
+#else
+ int x_flag_peephole2;
+#define flag_peephole2 global_options.x_flag_peephole2
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_permissive;
+#else
+ int x_flag_permissive;
+#define flag_permissive global_options.x_flag_permissive
+#endif
+#ifdef GENERATOR_FILE
+extern enum permitted_flt_eval_methods flag_permitted_flt_eval_methods;
+#else
+ enum permitted_flt_eval_methods x_flag_permitted_flt_eval_methods;
+#define flag_permitted_flt_eval_methods global_options.x_flag_permitted_flt_eval_methods
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_plan9_extensions;
+#else
+ int x_flag_plan9_extensions;
+#define flag_plan9_extensions global_options.x_flag_plan9_extensions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_plt;
+#else
+ int x_flag_plt;
+#define flag_plt global_options.x_flag_plt
+#endif
+#ifdef GENERATOR_FILE
+extern int post_ipa_mem_report;
+#else
+ int x_post_ipa_mem_report;
+#define post_ipa_mem_report global_options.x_post_ipa_mem_report
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_postconditions;
+#else
+ int x_flag_postconditions;
+#define flag_postconditions global_options.x_flag_postconditions
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_pre_include;
+#else
+ const char *x_flag_pre_include;
+#define flag_pre_include global_options.x_flag_pre_include
+#endif
+#ifdef GENERATOR_FILE
+extern int pre_ipa_mem_report;
+#else
+ int x_pre_ipa_mem_report;
+#define pre_ipa_mem_report global_options.x_pre_ipa_mem_report
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_preconditions;
+#else
+ int x_flag_preconditions;
+#define flag_preconditions global_options.x_flag_preconditions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_predictive_commoning;
+#else
+ int x_flag_predictive_commoning;
+#define flag_predictive_commoning global_options.x_flag_predictive_commoning
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_prefetch_loop_arrays;
+#else
+ int x_flag_prefetch_loop_arrays;
+#define flag_prefetch_loop_arrays global_options.x_flag_prefetch_loop_arrays
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pretty_templates;
+#else
+ int x_flag_pretty_templates;
+#define flag_pretty_templates global_options.x_flag_pretty_templates
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_printf_return_value;
+#else
+ int x_flag_printf_return_value;
+#define flag_printf_return_value global_options.x_flag_printf_return_value
+#endif
+#ifdef GENERATOR_FILE
+extern int profile_flag;
+#else
+ int x_profile_flag;
+#define profile_flag global_options.x_profile_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int profile_abs_path_flag;
+#else
+ int x_profile_abs_path_flag;
+#define profile_abs_path_flag global_options.x_profile_abs_path_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int profile_arc_flag;
+#else
+ int x_profile_arc_flag;
+#define profile_arc_flag global_options.x_profile_arc_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_profile_correction;
+#else
+ int x_flag_profile_correction;
+#define flag_profile_correction global_options.x_flag_profile_correction
+#endif
+#ifdef GENERATOR_FILE
+extern const char *profile_data_prefix;
+#else
+ const char *x_profile_data_prefix;
+#define profile_data_prefix global_options.x_profile_data_prefix
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_profile_exclude_files;
+#else
+ const char *x_flag_profile_exclude_files;
+#define flag_profile_exclude_files global_options.x_flag_profile_exclude_files
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_profile_filter_files;
+#else
+ const char *x_flag_profile_filter_files;
+#define flag_profile_filter_files global_options.x_flag_profile_filter_files
+#endif
+#ifdef GENERATOR_FILE
+extern const char *profile_info_section;
+#else
+ const char *x_profile_info_section;
+#define profile_info_section global_options.x_profile_info_section
+#endif
+#ifdef GENERATOR_FILE
+extern const char *profile_note_location;
+#else
+ const char *x_profile_note_location;
+#define profile_note_location global_options.x_profile_note_location
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_profile_partial_training;
+#else
+ int x_flag_profile_partial_training;
+#define flag_profile_partial_training global_options.x_flag_profile_partial_training
+#endif
+#ifdef GENERATOR_FILE
+extern const char *profile_prefix_path;
+#else
+ const char *x_profile_prefix_path;
+#define profile_prefix_path global_options.x_profile_prefix_path
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_profile_reorder_functions;
+#else
+ int x_flag_profile_reorder_functions;
+#define flag_profile_reorder_functions global_options.x_flag_profile_reorder_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int profile_report;
+#else
+ int x_profile_report;
+#define profile_report global_options.x_profile_report
+#endif
+#ifdef GENERATOR_FILE
+extern enum profile_reproducibility flag_profile_reproducible;
+#else
+ enum profile_reproducibility x_flag_profile_reproducible;
+#define flag_profile_reproducible global_options.x_flag_profile_reproducible
+#endif
+#ifdef GENERATOR_FILE
+extern enum profile_update flag_profile_update;
+#else
+ enum profile_update x_flag_profile_update;
+#define flag_profile_update global_options.x_flag_profile_update
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_profile_use;
+#else
+ int x_flag_profile_use;
+#define flag_profile_use global_options.x_flag_profile_use
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_profile_values;
+#else
+ int x_flag_profile_values;
+#define flag_profile_values global_options.x_flag_profile_values
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_protect_parens;
+#else
+ int x_flag_protect_parens;
+#define flag_protect_parens global_options.x_flag_protect_parens
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_range_check;
+#else
+ int x_flag_range_check;
+#define flag_range_check global_options.x_flag_range_check
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_real4_kind;
+#else
+ int x_flag_real4_kind;
+#define flag_real4_kind global_options.x_flag_real4_kind
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_real8_kind;
+#else
+ int x_flag_real8_kind;
+#define flag_real8_kind global_options.x_flag_real8_kind
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_realloc_lhs;
+#else
+ int x_flag_realloc_lhs;
+#define flag_realloc_lhs global_options.x_flag_realloc_lhs
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_reciprocal_math;
+#else
+ int x_flag_reciprocal_math;
+#define flag_reciprocal_math global_options.x_flag_reciprocal_math
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_record_gcc_switches;
+#else
+ int x_flag_record_gcc_switches;
+#define flag_record_gcc_switches global_options.x_flag_record_gcc_switches
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_record_marker;
+#else
+ int x_flag_record_marker;
+#define flag_record_marker global_options.x_flag_record_marker
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_recursive;
+#else
+ int x_flag_recursive;
+#define flag_recursive global_options.x_flag_recursive
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ree;
+#else
+ int x_flag_ree;
+#define flag_ree global_options.x_flag_ree
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rename_registers;
+#else
+ int x_flag_rename_registers;
+#define flag_rename_registers global_options.x_flag_rename_registers
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_reorder_blocks;
+#else
+ int x_flag_reorder_blocks;
+#define flag_reorder_blocks global_options.x_flag_reorder_blocks
+#endif
+#ifdef GENERATOR_FILE
+extern enum reorder_blocks_algorithm flag_reorder_blocks_algorithm;
+#else
+ enum reorder_blocks_algorithm x_flag_reorder_blocks_algorithm;
+#define flag_reorder_blocks_algorithm global_options.x_flag_reorder_blocks_algorithm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_reorder_blocks_and_partition;
+#else
+ int x_flag_reorder_blocks_and_partition;
+#define flag_reorder_blocks_and_partition global_options.x_flag_reorder_blocks_and_partition
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_reorder_functions;
+#else
+ int x_flag_reorder_functions;
+#define flag_reorder_functions global_options.x_flag_reorder_functions
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_repack_arrays;
+#else
+ int x_flag_repack_arrays;
+#define flag_repack_arrays global_options.x_flag_repack_arrays
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_replace_objc_classes;
+#else
+ int x_flag_replace_objc_classes;
+#define flag_replace_objc_classes global_options.x_flag_replace_objc_classes
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_report_bug;
+#else
+ int x_flag_report_bug;
+#define flag_report_bug global_options.x_flag_report_bug
+#endif
+#ifdef GENERATOR_FILE
+extern int go_require_return_statement;
+#else
+ int x_go_require_return_statement;
+#define go_require_return_statement global_options.x_go_require_return_statement
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rerun_cse_after_loop;
+#else
+ int x_flag_rerun_cse_after_loop;
+#define flag_rerun_cse_after_loop global_options.x_flag_rerun_cse_after_loop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_resched_modulo_sched;
+#else
+ int x_flag_resched_modulo_sched;
+#define flag_resched_modulo_sched global_options.x_flag_resched_modulo_sched
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rounding_math;
+#else
+ int x_flag_rounding_math;
+#define flag_rounding_math global_options.x_flag_rounding_math
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rtti;
+#else
+ int x_flag_rtti;
+#define flag_rtti global_options.x_flag_rtti
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rust_compile_until;
+#else
+ int x_flag_rust_compile_until;
+#define flag_rust_compile_until global_options.x_flag_rust_compile_until
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rust_debug;
+#else
+ int x_flag_rust_debug;
+#define flag_rust_debug global_options.x_flag_rust_debug
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rust_edition;
+#else
+ int x_flag_rust_edition;
+#define flag_rust_edition global_options.x_flag_rust_edition
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rust_embed_metadata;
+#else
+ int x_flag_rust_embed_metadata;
+#define flag_rust_embed_metadata global_options.x_flag_rust_embed_metadata
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rust_experimental;
+#else
+ int x_flag_rust_experimental;
+#define flag_rust_experimental global_options.x_flag_rust_experimental
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_rust_mangling;
+#else
+ int x_flag_rust_mangling;
+#define flag_rust_mangling global_options.x_flag_rust_mangling
+#endif
+#ifdef GENERATOR_FILE
+extern int rust_max_recursion_depth;
+#else
+ int x_rust_max_recursion_depth;
+#define rust_max_recursion_depth global_options.x_rust_max_recursion_depth
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sanitize_address_use_after_scope;
+#else
+ int x_flag_sanitize_address_use_after_scope;
+#define flag_sanitize_address_use_after_scope global_options.x_flag_sanitize_address_use_after_scope
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sanitize_coverage;
+#else
+ int x_flag_sanitize_coverage;
+#define flag_sanitize_coverage global_options.x_flag_sanitize_coverage
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_save_optimization_record;
+#else
+ int x_flag_save_optimization_record;
+#define flag_save_optimization_record global_options.x_flag_save_optimization_record
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_critical_path_heuristic;
+#else
+ int x_flag_sched_critical_path_heuristic;
+#define flag_sched_critical_path_heuristic global_options.x_flag_sched_critical_path_heuristic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_dep_count_heuristic;
+#else
+ int x_flag_sched_dep_count_heuristic;
+#define flag_sched_dep_count_heuristic global_options.x_flag_sched_dep_count_heuristic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_group_heuristic;
+#else
+ int x_flag_sched_group_heuristic;
+#define flag_sched_group_heuristic global_options.x_flag_sched_group_heuristic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_interblock;
+#else
+ int x_flag_schedule_interblock;
+#define flag_schedule_interblock global_options.x_flag_schedule_interblock
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_last_insn_heuristic;
+#else
+ int x_flag_sched_last_insn_heuristic;
+#define flag_sched_last_insn_heuristic global_options.x_flag_sched_last_insn_heuristic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_pressure;
+#else
+ int x_flag_sched_pressure;
+#define flag_sched_pressure global_options.x_flag_sched_pressure
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_rank_heuristic;
+#else
+ int x_flag_sched_rank_heuristic;
+#define flag_sched_rank_heuristic global_options.x_flag_sched_rank_heuristic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_speculative;
+#else
+ int x_flag_schedule_speculative;
+#define flag_schedule_speculative global_options.x_flag_schedule_speculative
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_spec_insn_heuristic;
+#else
+ int x_flag_sched_spec_insn_heuristic;
+#define flag_sched_spec_insn_heuristic global_options.x_flag_sched_spec_insn_heuristic
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_speculative_load;
+#else
+ int x_flag_schedule_speculative_load;
+#define flag_schedule_speculative_load global_options.x_flag_schedule_speculative_load
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_speculative_load_dangerous;
+#else
+ int x_flag_schedule_speculative_load_dangerous;
+#define flag_schedule_speculative_load_dangerous global_options.x_flag_schedule_speculative_load_dangerous
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_stalled_insns;
+#else
+ int x_flag_sched_stalled_insns;
+#define flag_sched_stalled_insns global_options.x_flag_sched_stalled_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched_stalled_insns_dep;
+#else
+ int x_flag_sched_stalled_insns_dep;
+#define flag_sched_stalled_insns_dep global_options.x_flag_sched_stalled_insns_dep
+#endif
+#ifdef GENERATOR_FILE
+extern int sched_verbose_param;
+#else
+ int x_sched_verbose_param;
+#define sched_verbose_param global_options.x_sched_verbose_param
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sched2_use_superblocks;
+#else
+ int x_flag_sched2_use_superblocks;
+#define flag_sched2_use_superblocks global_options.x_flag_sched2_use_superblocks
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_fusion;
+#else
+ int x_flag_schedule_fusion;
+#define flag_schedule_fusion global_options.x_flag_schedule_fusion
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_insns;
+#else
+ int x_flag_schedule_insns;
+#define flag_schedule_insns global_options.x_flag_schedule_insns
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_schedule_insns_after_reload;
+#else
+ int x_flag_schedule_insns_after_reload;
+#define flag_schedule_insns_after_reload global_options.x_flag_schedule_insns_after_reload
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_second_underscore;
+#else
+ int x_flag_second_underscore;
+#define flag_second_underscore global_options.x_flag_second_underscore
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_section_anchors;
+#else
+ int x_flag_section_anchors;
+#define flag_section_anchors global_options.x_flag_section_anchors
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sel_sched_pipelining;
+#else
+ int x_flag_sel_sched_pipelining;
+#define flag_sel_sched_pipelining global_options.x_flag_sel_sched_pipelining
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sel_sched_pipelining_outer_loops;
+#else
+ int x_flag_sel_sched_pipelining_outer_loops;
+#define flag_sel_sched_pipelining_outer_loops global_options.x_flag_sel_sched_pipelining_outer_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sel_sched_reschedule_pipelined;
+#else
+ int x_flag_sel_sched_reschedule_pipelined;
+#define flag_sel_sched_reschedule_pipelined global_options.x_flag_sel_sched_reschedule_pipelined
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_selective_scheduling;
+#else
+ int x_flag_selective_scheduling;
+#define flag_selective_scheduling global_options.x_flag_selective_scheduling
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_selective_scheduling2;
+#else
+ int x_flag_selective_scheduling2;
+#define flag_selective_scheduling2 global_options.x_flag_selective_scheduling2
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_self_test;
+#else
+ const char *x_flag_self_test;
+#define flag_self_test global_options.x_flag_self_test
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_semantic_interposition;
+#else
+ int x_flag_semantic_interposition;
+#define flag_semantic_interposition global_options.x_flag_semantic_interposition
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_short_enums;
+#else
+ int x_flag_short_enums;
+#define flag_short_enums global_options.x_flag_short_enums
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_short_wchar;
+#else
+ int x_flag_short_wchar;
+#define flag_short_wchar global_options.x_flag_short_wchar
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_show_column;
+#else
+ int x_flag_show_column;
+#define flag_show_column global_options.x_flag_show_column
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_shrink_wrap;
+#else
+ int x_flag_shrink_wrap;
+#define flag_shrink_wrap global_options.x_flag_shrink_wrap
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_shrink_wrap_separate;
+#else
+ int x_flag_shrink_wrap_separate;
+#define flag_shrink_wrap_separate global_options.x_flag_shrink_wrap_separate
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sign_zero;
+#else
+ int x_flag_sign_zero;
+#define flag_sign_zero global_options.x_flag_sign_zero
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_signaling_nans;
+#else
+ int x_flag_signaling_nans;
+#define flag_signaling_nans global_options.x_flag_signaling_nans
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_signed_bitfields;
+#else
+ int x_flag_signed_bitfields;
+#define flag_signed_bitfields global_options.x_flag_signed_bitfields
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_signed_char;
+#else
+ int x_flag_signed_char;
+#define flag_signed_char global_options.x_flag_signed_char
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_signed_zeros;
+#else
+ int x_flag_signed_zeros;
+#define flag_signed_zeros global_options.x_flag_signed_zeros
+#endif
+#ifdef GENERATOR_FILE
+extern enum vect_cost_model flag_simd_cost_model;
+#else
+ enum vect_cost_model x_flag_simd_cost_model;
+#define flag_simd_cost_model global_options.x_flag_simd_cost_model
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_single_precision_constant;
+#else
+ int x_flag_single_precision_constant;
+#define flag_single_precision_constant global_options.x_flag_single_precision_constant
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sized_deallocation;
+#else
+ int x_flag_sized_deallocation;
+#define flag_sized_deallocation global_options.x_flag_sized_deallocation
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_split_ivs_in_unroller;
+#else
+ int x_flag_split_ivs_in_unroller;
+#define flag_split_ivs_in_unroller global_options.x_flag_split_ivs_in_unroller
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_split_loops;
+#else
+ int x_flag_split_loops;
+#define flag_split_loops global_options.x_flag_split_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_split_paths;
+#else
+ int x_flag_split_paths;
+#define flag_split_paths global_options.x_flag_split_paths
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_split_stack;
+#else
+ int x_flag_split_stack;
+#define flag_split_stack global_options.x_flag_split_stack
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_split_wide_types;
+#else
+ int x_flag_split_wide_types;
+#define flag_split_wide_types global_options.x_flag_split_wide_types
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_split_wide_types_early;
+#else
+ int x_flag_split_wide_types_early;
+#define flag_split_wide_types_early global_options.x_flag_split_wide_types_early
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ssa_backprop;
+#else
+ int x_flag_ssa_backprop;
+#define flag_ssa_backprop global_options.x_flag_ssa_backprop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_ssa_phiopt;
+#else
+ int x_flag_ssa_phiopt;
+#define flag_ssa_phiopt global_options.x_flag_ssa_phiopt
+#endif
+#ifdef GENERATOR_FILE
+extern enum scalar_storage_order_kind default_sso;
+#else
+ enum scalar_storage_order_kind x_default_sso;
+#define default_sso global_options.x_default_sso
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_stack_arrays;
+#else
+ int x_flag_stack_arrays;
+#define flag_stack_arrays global_options.x_flag_stack_arrays
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_stack_clash_protection;
+#else
+ int x_flag_stack_clash_protection;
+#define flag_stack_clash_protection global_options.x_flag_stack_clash_protection
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_stack_protect;
+#else
+ int x_flag_stack_protect;
+#define flag_stack_protect global_options.x_flag_stack_protect
+#endif
+#ifdef GENERATOR_FILE
+extern enum stack_reuse_level flag_stack_reuse;
+#else
+ enum stack_reuse_level x_flag_stack_reuse;
+#define flag_stack_reuse global_options.x_flag_stack_reuse
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_stack_usage;
+#else
+ int x_flag_stack_usage;
+#define flag_stack_usage global_options.x_flag_stack_usage
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_detailed_statistics;
+#else
+ int x_flag_detailed_statistics;
+#define flag_detailed_statistics global_options.x_flag_detailed_statistics
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_stdarg_opt;
+#else
+ int x_flag_stdarg_opt;
+#define flag_stdarg_opt global_options.x_flag_stdarg_opt
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_store_merging;
+#else
+ int x_flag_store_merging;
+#define flag_store_merging global_options.x_flag_store_merging
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_strict_aliasing;
+#else
+ int x_flag_strict_aliasing;
+#define flag_strict_aliasing global_options.x_flag_strict_aliasing
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_strict_enums;
+#else
+ int x_flag_strict_enums;
+#define flag_strict_enums global_options.x_flag_strict_enums
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_strict_flex_arrays;
+#else
+ int x_flag_strict_flex_arrays;
+#define flag_strict_flex_arrays global_options.x_flag_strict_flex_arrays
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_strict_volatile_bitfields;
+#else
+ int x_flag_strict_volatile_bitfields;
+#define flag_strict_volatile_bitfields global_options.x_flag_strict_volatile_bitfields
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_strong_eval_order;
+#else
+ int x_flag_strong_eval_order;
+#define flag_strong_eval_order global_options.x_flag_strong_eval_order
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_switch_errors;
+#else
+ int x_flag_switch_errors;
+#define flag_switch_errors global_options.x_flag_switch_errors
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_sync_libcalls;
+#else
+ int x_flag_sync_libcalls;
+#define flag_sync_libcalls global_options.x_flag_sync_libcalls
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_syntax_only;
+#else
+ int x_flag_syntax_only;
+#define flag_syntax_only global_options.x_flag_syntax_only
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tail_call_workaround;
+#else
+ int x_flag_tail_call_workaround;
+#define flag_tail_call_workaround global_options.x_flag_tail_call_workaround
+#endif
+#ifdef GENERATOR_FILE
+extern int template_backtrace_limit;
+#else
+ int x_template_backtrace_limit;
+#define template_backtrace_limit global_options.x_template_backtrace_limit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_test_coverage;
+#else
+ int x_flag_test_coverage;
+#define flag_test_coverage global_options.x_flag_test_coverage
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_test_forall_temp;
+#else
+ int x_flag_test_forall_temp;
+#define flag_test_forall_temp global_options.x_flag_test_forall_temp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_thread_jumps;
+#else
+ int x_flag_thread_jumps;
+#define flag_thread_jumps global_options.x_flag_thread_jumps
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_threadsafe_statics;
+#else
+ int x_flag_threadsafe_statics;
+#define flag_threadsafe_statics global_options.x_flag_threadsafe_statics
+#endif
+#ifdef GENERATOR_FILE
+extern int time_report;
+#else
+ int x_time_report;
+#define time_report global_options.x_time_report
+#endif
+#ifdef GENERATOR_FILE
+extern int time_report_details;
+#else
+ int x_time_report_details;
+#define time_report_details global_options.x_time_report_details
+#endif
+#ifdef GENERATOR_FILE
+extern enum tls_model flag_tls_default;
+#else
+ enum tls_model x_flag_tls_default;
+#define flag_tls_default global_options.x_flag_tls_default
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_toplevel_reorder;
+#else
+ int x_flag_toplevel_reorder;
+#define flag_toplevel_reorder global_options.x_flag_toplevel_reorder
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tracer;
+#else
+ int x_flag_tracer;
+#define flag_tracer global_options.x_flag_tracer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_trampolines;
+#else
+ int x_flag_trampolines;
+#define flag_trampolines global_options.x_flag_trampolines
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_trapping_math;
+#else
+ int x_flag_trapping_math;
+#define flag_trapping_math global_options.x_flag_trapping_math
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_trapv;
+#else
+ int x_flag_trapv;
+#define flag_trapv global_options.x_flag_trapv
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_bit_ccp;
+#else
+ int x_flag_tree_bit_ccp;
+#define flag_tree_bit_ccp global_options.x_flag_tree_bit_ccp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_builtin_call_dce;
+#else
+ int x_flag_tree_builtin_call_dce;
+#define flag_tree_builtin_call_dce global_options.x_flag_tree_builtin_call_dce
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_ccp;
+#else
+ int x_flag_tree_ccp;
+#define flag_tree_ccp global_options.x_flag_tree_ccp
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_ch;
+#else
+ int x_flag_tree_ch;
+#define flag_tree_ch global_options.x_flag_tree_ch
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_coalesce_vars;
+#else
+ int x_flag_tree_coalesce_vars;
+#define flag_tree_coalesce_vars global_options.x_flag_tree_coalesce_vars
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_copy_prop;
+#else
+ int x_flag_tree_copy_prop;
+#define flag_tree_copy_prop global_options.x_flag_tree_copy_prop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_cselim;
+#else
+ int x_flag_tree_cselim;
+#define flag_tree_cselim global_options.x_flag_tree_cselim
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_dce;
+#else
+ int x_flag_tree_dce;
+#define flag_tree_dce global_options.x_flag_tree_dce
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_dom;
+#else
+ int x_flag_tree_dom;
+#define flag_tree_dom global_options.x_flag_tree_dom
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_dse;
+#else
+ int x_flag_tree_dse;
+#define flag_tree_dse global_options.x_flag_tree_dse
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_forwprop;
+#else
+ int x_flag_tree_forwprop;
+#define flag_tree_forwprop global_options.x_flag_tree_forwprop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_fre;
+#else
+ int x_flag_tree_fre;
+#define flag_tree_fre global_options.x_flag_tree_fre
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_distribute_patterns;
+#else
+ int x_flag_tree_loop_distribute_patterns;
+#define flag_tree_loop_distribute_patterns global_options.x_flag_tree_loop_distribute_patterns
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_distribution;
+#else
+ int x_flag_tree_loop_distribution;
+#define flag_tree_loop_distribution global_options.x_flag_tree_loop_distribution
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_if_convert;
+#else
+ int x_flag_tree_loop_if_convert;
+#define flag_tree_loop_if_convert global_options.x_flag_tree_loop_if_convert
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_im;
+#else
+ int x_flag_tree_loop_im;
+#define flag_tree_loop_im global_options.x_flag_tree_loop_im
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_ivcanon;
+#else
+ int x_flag_tree_loop_ivcanon;
+#define flag_tree_loop_ivcanon global_options.x_flag_tree_loop_ivcanon
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_optimize;
+#else
+ int x_flag_tree_loop_optimize;
+#define flag_tree_loop_optimize global_options.x_flag_tree_loop_optimize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_loop_vectorize;
+#else
+ int x_flag_tree_loop_vectorize;
+#define flag_tree_loop_vectorize global_options.x_flag_tree_loop_vectorize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_live_range_split;
+#else
+ int x_flag_tree_live_range_split;
+#define flag_tree_live_range_split global_options.x_flag_tree_live_range_split
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_parallelize_loops;
+#else
+ int x_flag_tree_parallelize_loops;
+#define flag_tree_parallelize_loops global_options.x_flag_tree_parallelize_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_partial_pre;
+#else
+ int x_flag_tree_partial_pre;
+#define flag_tree_partial_pre global_options.x_flag_tree_partial_pre
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_phiprop;
+#else
+ int x_flag_tree_phiprop;
+#define flag_tree_phiprop global_options.x_flag_tree_phiprop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_pre;
+#else
+ int x_flag_tree_pre;
+#define flag_tree_pre global_options.x_flag_tree_pre
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_pta;
+#else
+ int x_flag_tree_pta;
+#define flag_tree_pta global_options.x_flag_tree_pta
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_reassoc;
+#else
+ int x_flag_tree_reassoc;
+#define flag_tree_reassoc global_options.x_flag_tree_reassoc
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_scev_cprop;
+#else
+ int x_flag_tree_scev_cprop;
+#define flag_tree_scev_cprop global_options.x_flag_tree_scev_cprop
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_sink;
+#else
+ int x_flag_tree_sink;
+#define flag_tree_sink global_options.x_flag_tree_sink
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_slp_vectorize;
+#else
+ int x_flag_tree_slp_vectorize;
+#define flag_tree_slp_vectorize global_options.x_flag_tree_slp_vectorize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_slsr;
+#else
+ int x_flag_tree_slsr;
+#define flag_tree_slsr global_options.x_flag_tree_slsr
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_sra;
+#else
+ int x_flag_tree_sra;
+#define flag_tree_sra global_options.x_flag_tree_sra
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_switch_conversion;
+#else
+ int x_flag_tree_switch_conversion;
+#define flag_tree_switch_conversion global_options.x_flag_tree_switch_conversion
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_tail_merge;
+#else
+ int x_flag_tree_tail_merge;
+#define flag_tree_tail_merge global_options.x_flag_tree_tail_merge
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_ter;
+#else
+ int x_flag_tree_ter;
+#define flag_tree_ter global_options.x_flag_tree_ter
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_vectorize;
+#else
+ int x_flag_tree_vectorize;
+#define flag_tree_vectorize global_options.x_flag_tree_vectorize
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_tree_vrp;
+#else
+ int x_flag_tree_vrp;
+#define flag_tree_vrp global_options.x_flag_tree_vrp
+#endif
+#ifdef GENERATOR_FILE
+extern enum auto_init_type flag_auto_var_init;
+#else
+ enum auto_init_type x_flag_auto_var_init;
+#define flag_auto_var_init global_options.x_flag_auto_var_init
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unconstrained_commons;
+#else
+ int x_flag_unconstrained_commons;
+#define flag_unconstrained_commons global_options.x_flag_unconstrained_commons
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_underscoring;
+#else
+ int x_flag_underscoring;
+#define flag_underscoring global_options.x_flag_underscoring
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unit_at_a_time;
+#else
+ int x_flag_unit_at_a_time;
+#define flag_unit_at_a_time global_options.x_flag_unit_at_a_time
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unreachable_traps;
+#else
+ int x_flag_unreachable_traps;
+#define flag_unreachable_traps global_options.x_flag_unreachable_traps
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unroll_all_loops;
+#else
+ int x_flag_unroll_all_loops;
+#define flag_unroll_all_loops global_options.x_flag_unroll_all_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_cunroll_grow_size;
+#else
+ int x_flag_cunroll_grow_size;
+#define flag_cunroll_grow_size global_options.x_flag_cunroll_grow_size
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unroll_loops;
+#else
+ int x_flag_unroll_loops;
+#define flag_unroll_loops global_options.x_flag_unroll_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unsafe_math_optimizations;
+#else
+ int x_flag_unsafe_math_optimizations;
+#define flag_unsafe_math_optimizations global_options.x_flag_unsafe_math_optimizations
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unswitch_loops;
+#else
+ int x_flag_unswitch_loops;
+#define flag_unswitch_loops global_options.x_flag_unswitch_loops
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_unwind_tables;
+#else
+ int x_flag_unwind_tables;
+#define flag_unwind_tables global_options.x_flag_unwind_tables
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_use_cxa_atexit;
+#else
+ int x_flag_use_cxa_atexit;
+#define flag_use_cxa_atexit global_options.x_flag_use_cxa_atexit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_use_cxa_get_exception_ptr;
+#else
+ int x_flag_use_cxa_get_exception_ptr;
+#define flag_use_cxa_get_exception_ptr global_options.x_flag_use_cxa_get_exception_ptr
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_use_linker_plugin;
+#else
+ int x_flag_use_linker_plugin;
+#define flag_use_linker_plugin global_options.x_flag_use_linker_plugin
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_var_tracking;
+#else
+ int x_flag_var_tracking;
+#define flag_var_tracking global_options.x_flag_var_tracking
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_var_tracking_assignments;
+#else
+ int x_flag_var_tracking_assignments;
+#define flag_var_tracking_assignments global_options.x_flag_var_tracking_assignments
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_var_tracking_assignments_toggle;
+#else
+ int x_flag_var_tracking_assignments_toggle;
+#define flag_var_tracking_assignments_toggle global_options.x_flag_var_tracking_assignments_toggle
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_var_tracking_uninit;
+#else
+ int x_flag_var_tracking_uninit;
+#define flag_var_tracking_uninit global_options.x_flag_var_tracking_uninit
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_variable_expansion_in_unroller;
+#else
+ int x_flag_variable_expansion_in_unroller;
+#define flag_variable_expansion_in_unroller global_options.x_flag_variable_expansion_in_unroller
+#endif
+#ifdef GENERATOR_FILE
+extern enum vect_cost_model flag_vect_cost_model;
+#else
+ enum vect_cost_model x_flag_vect_cost_model;
+#define flag_vect_cost_model global_options.x_flag_vect_cost_model
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_verbose_asm;
+#else
+ int x_flag_verbose_asm;
+#define flag_verbose_asm global_options.x_flag_verbose_asm
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_version_loops_for_strides;
+#else
+ int x_flag_version_loops_for_strides;
+#define flag_version_loops_for_strides global_options.x_flag_version_loops_for_strides
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_visibility_ms_compat;
+#else
+ int x_flag_visibility_ms_compat;
+#define flag_visibility_ms_compat global_options.x_flag_visibility_ms_compat
+#endif
+#ifdef GENERATOR_FILE
+extern enum symbol_visibility default_visibility;
+#else
+ enum symbol_visibility x_default_visibility;
+#define default_visibility global_options.x_default_visibility
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_value_profile_transformations;
+#else
+ int x_flag_value_profile_transformations;
+#define flag_value_profile_transformations global_options.x_flag_value_profile_transformations
+#endif
+#ifdef GENERATOR_FILE
+extern enum vtv_priority flag_vtable_verify;
+#else
+ enum vtv_priority x_flag_vtable_verify;
+#define flag_vtable_verify global_options.x_flag_vtable_verify
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_vtv_counts;
+#else
+ int x_flag_vtv_counts;
+#define flag_vtv_counts global_options.x_flag_vtv_counts
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_vtv_debug;
+#else
+ int x_flag_vtv_debug;
+#define flag_vtv_debug global_options.x_flag_vtv_debug
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_weak;
+#else
+ int x_flag_weak;
+#define flag_weak global_options.x_flag_weak
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_weak_templates;
+#else
+ int x_flag_weak_templates;
+#define flag_weak_templates global_options.x_flag_weak_templates
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_web;
+#else
+ int x_flag_web;
+#define flag_web global_options.x_flag_web
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_whole_program;
+#else
+ int x_flag_whole_program;
+#define flag_whole_program global_options.x_flag_whole_program
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_working_directory;
+#else
+ int x_flag_working_directory;
+#define flag_working_directory global_options.x_flag_working_directory
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_wpa;
+#else
+ const char *x_flag_wpa;
+#define flag_wpa global_options.x_flag_wpa
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_wrapv;
+#else
+ int x_flag_wrapv;
+#define flag_wrapv global_options.x_flag_wrapv
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_wrapv_pointer;
+#else
+ int x_flag_wrapv_pointer;
+#define flag_wrapv_pointer global_options.x_flag_wrapv_pointer
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_zero_initialized_in_bss;
+#else
+ int x_flag_zero_initialized_in_bss;
+#define flag_zero_initialized_in_bss global_options.x_flag_zero_initialized_in_bss
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_zero_link;
+#else
+ int x_flag_zero_link;
+#define flag_zero_link global_options.x_flag_zero_link
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf2out_as_loc_support;
+#else
+ int x_dwarf2out_as_loc_support;
+#define dwarf2out_as_loc_support global_options.x_dwarf2out_as_loc_support
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf2out_as_locview_support;
+#else
+ int x_dwarf2out_as_locview_support;
+#define dwarf2out_as_locview_support global_options.x_dwarf2out_as_locview_support
+#endif
+#ifdef GENERATOR_FILE
+extern int debug_column_info;
+#else
+ int x_debug_column_info;
+#define debug_column_info global_options.x_debug_column_info
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_describe_dies;
+#else
+ int x_flag_describe_dies;
+#define flag_describe_dies global_options.x_flag_describe_dies
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf_version;
+#else
+ int x_dwarf_version;
+#define dwarf_version global_options.x_dwarf_version
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf_offset_size;
+#else
+ int x_dwarf_offset_size;
+#define dwarf_offset_size global_options.x_dwarf_offset_size
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gen_declaration;
+#else
+ int x_flag_gen_declaration;
+#define flag_gen_declaration global_options.x_flag_gen_declaration
+#endif
+#ifdef GENERATOR_FILE
+extern int debug_generate_pub_sections;
+#else
+ int x_debug_generate_pub_sections;
+#define debug_generate_pub_sections global_options.x_debug_generate_pub_sections
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_gimple_stats;
+#else
+ int x_flag_lto_gimple_stats;
+#define flag_lto_gimple_stats global_options.x_flag_lto_gimple_stats
+#endif
+#ifdef GENERATOR_FILE
+extern int debug_inline_points;
+#else
+ int x_debug_inline_points;
+#define debug_inline_points global_options.x_debug_inline_points
+#endif
+#ifdef GENERATOR_FILE
+extern int debug_internal_reset_location_views;
+#else
+ int x_debug_internal_reset_location_views;
+#define debug_internal_reset_location_views global_options.x_debug_internal_reset_location_views
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf_record_gcc_switches;
+#else
+ int x_dwarf_record_gcc_switches;
+#define dwarf_record_gcc_switches global_options.x_dwarf_record_gcc_switches
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf_split_debug_info;
+#else
+ int x_dwarf_split_debug_info;
+#define dwarf_split_debug_info global_options.x_dwarf_split_debug_info
+#endif
+#ifdef GENERATOR_FILE
+extern int debug_nonbind_markers_p;
+#else
+ int x_debug_nonbind_markers_p;
+#define debug_nonbind_markers_p global_options.x_debug_nonbind_markers_p
+#endif
+#ifdef GENERATOR_FILE
+extern int dwarf_strict;
+#else
+ int x_dwarf_strict;
+#define dwarf_strict global_options.x_dwarf_strict
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_gtoggle;
+#else
+ int x_flag_gtoggle;
+#define flag_gtoggle global_options.x_flag_gtoggle
+#endif
+#ifdef GENERATOR_FILE
+extern int debug_variable_location_views;
+#else
+ int x_debug_variable_location_views;
+#define debug_variable_location_views global_options.x_debug_variable_location_views
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_dump_tool_help;
+#else
+ int x_flag_lto_dump_tool_help;
+#define flag_lto_dump_tool_help global_options.x_flag_lto_dump_tool_help
+#endif
+#ifdef GENERATOR_FILE
+extern const char *imultiarch;
+#else
+ const char *x_imultiarch;
+#define imultiarch global_options.x_imultiarch
+#endif
+#ifdef GENERATOR_FILE
+extern const char *plugindir_string;
+#else
+ const char *x_plugindir_string;
+#define plugindir_string global_options.x_plugindir_string
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_dump_list;
+#else
+ int x_flag_lto_dump_list;
+#define flag_lto_dump_list global_options.x_flag_lto_dump_list
+#endif
+#ifdef GENERATOR_FILE
+extern enum arm_abi_type arm_abi;
+#else
+ enum arm_abi_type x_arm_abi;
+#define arm_abi global_options.x_arm_abi
+#endif
+#ifdef GENERATOR_FILE
+extern const char *arm_arch_string;
+#else
+ const char *x_arm_arch_string;
+#define arm_arch_string global_options.x_arm_arch_string
+#endif
+#ifdef GENERATOR_FILE
+extern int inline_asm_unified;
+#else
+ int x_inline_asm_unified;
+#define inline_asm_unified global_options.x_inline_asm_unified
+#endif
+#ifdef GENERATOR_FILE
+extern int arm_branch_cost;
+#else
+ int x_arm_branch_cost;
+#define arm_branch_cost global_options.x_arm_branch_cost
+#endif
+#ifdef GENERATOR_FILE
+extern const char *arm_branch_protection_string;
+#else
+ const char *x_arm_branch_protection_string;
+#define arm_branch_protection_string global_options.x_arm_branch_protection_string
+#endif
+#ifdef GENERATOR_FILE
+extern int use_cmse;
+#else
+ int x_use_cmse;
+#define use_cmse global_options.x_use_cmse
+#endif
+#ifdef GENERATOR_FILE
+extern const char *arm_cpu_string;
+#else
+ const char *x_arm_cpu_string;
+#define arm_cpu_string global_options.x_arm_cpu_string
+#endif
+#ifdef GENERATOR_FILE
+extern int fix_vlldm;
+#else
+ int x_fix_vlldm;
+#define fix_vlldm global_options.x_fix_vlldm
+#endif
+#ifdef GENERATOR_FILE
+extern int fix_aes_erratum_1742098;
+#else
+ int x_fix_aes_erratum_1742098;
+#define fix_aes_erratum_1742098 global_options.x_fix_aes_erratum_1742098
+#endif
+#ifdef GENERATOR_FILE
+extern int fix_cm3_ldrd;
+#else
+ int x_fix_cm3_ldrd;
+#define fix_cm3_ldrd global_options.x_fix_cm3_ldrd
+#endif
+#ifdef GENERATOR_FILE
+extern int TARGET_FLIP_THUMB;
+#else
+ int x_TARGET_FLIP_THUMB;
+#define TARGET_FLIP_THUMB global_options.x_TARGET_FLIP_THUMB
+#endif
+#ifdef GENERATOR_FILE
+extern enum float_abi_type arm_float_abi;
+#else
+ enum float_abi_type x_arm_float_abi;
+#define arm_float_abi global_options.x_arm_float_abi
+#endif
+#ifdef GENERATOR_FILE
+extern enum arm_fp16_format_type arm_fp16_format;
+#else
+ enum arm_fp16_format_type x_arm_fp16_format;
+#define arm_fp16_format global_options.x_arm_fp16_format
+#endif
+#ifdef GENERATOR_FILE
+extern enum fpu_type arm_fpu_index;
+#else
+ enum fpu_type x_arm_fpu_index;
+#define arm_fpu_index global_options.x_arm_fpu_index
+#endif
+#ifdef GENERATOR_FILE
+extern int arm_pic_data_is_text_relative;
+#else
+ int x_arm_pic_data_is_text_relative;
+#define arm_pic_data_is_text_relative global_options.x_arm_pic_data_is_text_relative
+#endif
+#ifdef GENERATOR_FILE
+extern const char *arm_pic_register_string;
+#else
+ const char *x_arm_pic_register_string;
+#define arm_pic_register_string global_options.x_arm_pic_register_string
+#endif
+#ifdef GENERATOR_FILE
+extern int print_tune_info;
+#else
+ int x_print_tune_info;
+#define print_tune_info global_options.x_print_tune_info
+#endif
+#ifdef GENERATOR_FILE
+extern int target_pure_code;
+#else
+ int x_target_pure_code;
+#define target_pure_code global_options.x_target_pure_code
+#endif
+#ifdef GENERATOR_FILE
+extern int arm_restrict_it;
+#else
+ int x_arm_restrict_it;
+#define arm_restrict_it global_options.x_arm_restrict_it
+#endif
+#ifdef GENERATOR_FILE
+extern int target_slow_flash_data;
+#else
+ int x_target_slow_flash_data;
+#define target_slow_flash_data global_options.x_target_slow_flash_data
+#endif
+#ifdef GENERATOR_FILE
+extern const char *arm_stack_protector_guard_offset_str;
+#else
+ const char *x_arm_stack_protector_guard_offset_str;
+#define arm_stack_protector_guard_offset_str global_options.x_arm_stack_protector_guard_offset_str
+#endif
+#ifdef GENERATOR_FILE
+extern enum stack_protector_guard arm_stack_protector_guard;
+#else
+ enum stack_protector_guard x_arm_stack_protector_guard;
+#define arm_stack_protector_guard global_options.x_arm_stack_protector_guard
+#endif
+#ifdef GENERATOR_FILE
+extern int arm_structure_size_boundary;
+#else
+ int x_arm_structure_size_boundary;
+#define arm_structure_size_boundary global_options.x_arm_structure_size_boundary
+#endif
+#ifdef GENERATOR_FILE
+extern enum arm_tls_type target_tls_dialect;
+#else
+ enum arm_tls_type x_target_tls_dialect;
+#define target_tls_dialect global_options.x_target_tls_dialect
+#endif
+#ifdef GENERATOR_FILE
+extern enum arm_tp_type target_thread_pointer;
+#else
+ enum arm_tp_type x_target_thread_pointer;
+#define target_thread_pointer global_options.x_target_thread_pointer
+#endif
+#ifdef GENERATOR_FILE
+extern const char *arm_tune_string;
+#else
+ const char *x_arm_tune_string;
+#define arm_tune_string global_options.x_arm_tune_string
+#endif
+#ifdef GENERATOR_FILE
+extern int unaligned_access;
+#else
+ int x_unaligned_access;
+#define unaligned_access global_options.x_unaligned_access
+#endif
+#ifdef GENERATOR_FILE
+extern int arm_verbose_cost;
+#else
+ int x_arm_verbose_cost;
+#define arm_verbose_cost global_options.x_arm_verbose_cost
+#endif
+#ifdef GENERATOR_FILE
+extern int target_word_relocations;
+#else
+ int x_target_word_relocations;
+#define target_word_relocations global_options.x_target_word_relocations
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_name_sort;
+#else
+ int x_flag_lto_name_sort;
+#define flag_lto_name_sort global_options.x_flag_lto_name_sort
+#endif
+#ifdef GENERATOR_FILE
+extern const char *asm_file_name;
+#else
+ const char *x_asm_file_name;
+#define asm_file_name global_options.x_asm_file_name
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_dump_objects;
+#else
+ int x_flag_lto_dump_objects;
+#define flag_lto_dump_objects global_options.x_flag_lto_dump_objects
+#endif
+#ifdef GENERATOR_FILE
+extern int pass_exit_codes;
+#else
+ int x_pass_exit_codes;
+#define pass_exit_codes global_options.x_pass_exit_codes
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_pedantic_errors;
+#else
+ int x_flag_pedantic_errors;
+#define flag_pedantic_errors global_options.x_flag_pedantic_errors
+#endif
+#ifdef GENERATOR_FILE
+extern int use_pipes;
+#else
+ int x_use_pipes;
+#define use_pipes global_options.x_use_pipes
+#endif
+#ifdef GENERATOR_FILE
+extern const char *print_file_name;
+#else
+ const char *x_print_file_name;
+#define print_file_name global_options.x_print_file_name
+#endif
+#ifdef GENERATOR_FILE
+extern int print_multi_directory;
+#else
+ int x_print_multi_directory;
+#define print_multi_directory global_options.x_print_multi_directory
+#endif
+#ifdef GENERATOR_FILE
+extern int print_multi_lib;
+#else
+ int x_print_multi_lib;
+#define print_multi_lib global_options.x_print_multi_lib
+#endif
+#ifdef GENERATOR_FILE
+extern int print_multi_os_directory;
+#else
+ int x_print_multi_os_directory;
+#define print_multi_os_directory global_options.x_print_multi_os_directory
+#endif
+#ifdef GENERATOR_FILE
+extern int print_multiarch;
+#else
+ int x_print_multiarch;
+#define print_multiarch global_options.x_print_multiarch
+#endif
+#ifdef GENERATOR_FILE
+extern const char *print_prog_name;
+#else
+ const char *x_print_prog_name;
+#define print_prog_name global_options.x_print_prog_name
+#endif
+#ifdef GENERATOR_FILE
+extern int print_search_dirs;
+#else
+ int x_print_search_dirs;
+#define print_search_dirs global_options.x_print_search_dirs
+#endif
+#ifdef GENERATOR_FILE
+extern int print_sysroot;
+#else
+ int x_print_sysroot;
+#define print_sysroot global_options.x_print_sysroot
+#endif
+#ifdef GENERATOR_FILE
+extern int print_sysroot_headers_suffix;
+#else
+ int x_print_sysroot_headers_suffix;
+#define print_sysroot_headers_suffix global_options.x_print_sysroot_headers_suffix
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_print_value;
+#else
+ int x_flag_lto_print_value;
+#define flag_lto_print_value global_options.x_flag_lto_print_value
+#endif
+#ifdef GENERATOR_FILE
+extern int quiet_flag;
+#else
+ int x_quiet_flag;
+#define quiet_flag global_options.x_quiet_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_reverse_sort;
+#else
+ int x_flag_lto_reverse_sort;
+#define flag_lto_reverse_sort global_options.x_flag_lto_reverse_sort
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_size_sort;
+#else
+ int x_flag_lto_size_sort;
+#define flag_lto_size_sort global_options.x_flag_lto_size_sort
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_stdlib_kind;
+#else
+ int x_flag_stdlib_kind;
+#define flag_stdlib_kind global_options.x_flag_stdlib_kind
+#endif
+#ifdef GENERATOR_FILE
+extern const char *flag_lto_dump_symbol;
+#else
+ const char *x_flag_lto_dump_symbol;
+#define flag_lto_dump_symbol global_options.x_flag_lto_dump_symbol
+#endif
+#ifdef GENERATOR_FILE
+extern int report_times;
+#else
+ int x_report_times;
+#define report_times global_options.x_report_times
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_tree_stats;
+#else
+ int x_flag_lto_tree_stats;
+#define flag_lto_tree_stats global_options.x_flag_lto_tree_stats
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_lto_dump_type_stats;
+#else
+ int x_flag_lto_dump_type_stats;
+#define flag_lto_dump_type_stats global_options.x_flag_lto_dump_type_stats
+#endif
+#ifdef GENERATOR_FILE
+extern int flag_undef;
+#else
+ int x_flag_undef;
+#define flag_undef global_options.x_flag_undef
+#endif
+#ifdef GENERATOR_FILE
+extern int verbose_flag;
+#else
+ int x_verbose_flag;
+#define verbose_flag global_options.x_verbose_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int version_flag;
+#else
+ int x_version_flag;
+#define version_flag global_options.x_version_flag
+#endif
+#ifdef GENERATOR_FILE
+extern int inhibit_warnings;
+#else
+ int x_inhibit_warnings;
+#define inhibit_warnings global_options.x_inhibit_warnings
+#endif
+#ifdef GENERATOR_FILE
+extern const char *wrapper_string;
+#else
+ const char *x_wrapper_string;
+#define wrapper_string global_options.x_wrapper_string
+#endif
+#ifndef GENERATOR_FILE
+ const char *x_VAR_mlibarch_;
+#define x_VAR_mlibarch_ do_not_use
+#endif
+#ifndef GENERATOR_FILE
+ int x_VAR_mneon_for_64bits;
+#define x_VAR_mneon_for_64bits do_not_use
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_associative_math;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_cx_limited_range;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_excess_precision;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_finite_math_only;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_errno_math;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_reciprocal_math;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_rounding_math;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_signaling_nans;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_signed_zeros;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_trapping_math;
+#endif
+#ifndef GENERATOR_FILE
+ bool frontend_set_flag_unsafe_math_optimizations;
+#endif
+#ifndef GENERATOR_FILE
+};
+extern struct gcc_options global_options;
+extern const struct gcc_options global_options_init;
+extern struct gcc_options global_options_set;
+#define target_flags_explicit global_options_set.x_target_flags
+#endif
+#endif
+
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+
+/* Structure to save/restore optimization and target specific options. */
+struct GTY(()) cl_optimization
+{
+ const char *x_str_align_functions;
+ const char *x_str_align_jumps;
+ const char *x_str_align_labels;
+ const char *x_str_align_loops;
+ const char *x_flag_patchable_function_entry;
+ int x_param_align_loop_iterations;
+ int x_param_align_threshold;
+ int x_param_asan_protect_allocas;
+ int x_param_asan_instrument_reads;
+ int x_param_asan_instrument_writes;
+ int x_param_asan_instrumentation_with_call_threshold;
+ int x_param_asan_kernel_mem_intrinsic_prefix;
+ int x_param_asan_memintrin;
+ int x_param_asan_stack;
+ int x_param_asan_use_after_return;
+ int x_param_avg_loop_niter;
+ int x_param_avoid_fma_max_bits;
+ int x_param_builtin_expect_probability;
+ int x_param_builtin_string_cmp_inline_length;
+ int x_param_case_values_threshold;
+ int x_param_comdat_sharing_probability;
+ int x_param_construct_interfere_size;
+ int x_param_destruct_interfere_size;
+ int x_param_dse_max_alias_queries_per_store;
+ int x_param_dse_max_object_size;
+ int x_param_early_inlining_insns;
+ int x_param_evrp_sparse_threshold;
+ int x_param_evrp_switch_limit;
+ int x_param_fsm_scale_path_stmts;
+ int x_param_gcse_after_reload_critical_fraction;
+ int x_param_gcse_after_reload_partial_fraction;
+ int x_param_gcse_cost_distance_ratio;
+ int x_param_gcse_unrestricted_cost;
+ int x_param_graphite_max_arrays_per_scop;
+ int x_param_graphite_max_nb_scop_params;
+ int x_param_hwasan_instrument_allocas;
+ int x_param_hwasan_instrument_mem_intrinsics;
+ int x_param_hwasan_instrument_reads;
+ int x_param_hwasan_instrument_stack;
+ int x_param_hwasan_instrument_writes;
+ int x_param_hwasan_random_frame_tag;
+ int x_param_inline_heuristics_hint_percent;
+ int x_param_inline_min_speedup;
+ int x_param_inline_unit_growth;
+ int x_param_ipa_cp_eval_threshold;
+ int x_param_ipa_cp_large_unit_insns;
+ int x_param_ipa_cp_loop_hint_bonus;
+ int x_param_ipa_cp_max_recursive_depth;
+ int x_param_ipa_cp_min_recursive_probability;
+ int x_param_ipa_cp_profile_count_base;
+ int x_param_ipa_cp_recursion_penalty;
+ int x_param_ipa_cp_recursive_freq_factor;
+ int x_param_ipa_cp_single_call_penalty;
+ int x_param_ipa_cp_unit_growth;
+ int x_param_ipa_cp_value_list_size;
+ int x_param_ipa_jump_function_lookups;
+ int x_param_ipa_max_aa_steps;
+ int x_param_ipa_max_agg_items;
+ int x_param_ipa_max_loop_predicates;
+ int x_param_ipa_max_param_expr_ops;
+ int x_param_ipa_max_switch_predicate_bounds;
+ int x_param_ipa_sra_deref_prob_threshold;
+ int x_param_ipa_sra_max_replacements;
+ int x_param_ipa_sra_ptr_growth_factor;
+ int x_param_ipa_sra_ptrwrap_growth_factor;
+ int x_param_ira_consider_dup_in_all_alts;
+ int x_param_ira_loop_reserved_regs;
+ int x_param_ira_max_conflict_table_size;
+ int x_param_ira_max_loops_num;
+ int x_param_ira_simple_lra_insn_threshold;
+ int x_param_iv_always_prune_cand_set_bound;
+ int x_param_iv_consider_all_candidates_bound;
+ int x_param_iv_max_considered_uses;
+ int x_param_jump_table_max_growth_ratio_for_size;
+ int x_param_jump_table_max_growth_ratio_for_speed;
+ int x_param_l1_cache_line_size;
+ int x_param_l1_cache_size;
+ int x_param_l2_cache_size;
+ int x_param_large_function_growth;
+ int x_param_large_function_insns;
+ int x_param_stack_frame_growth;
+ int x_param_large_stack_frame;
+ int x_param_large_unit_insns;
+ int x_param_lim_expensive;
+ int x_param_loop_block_tile_size;
+ int x_param_loop_interchange_max_num_stmts;
+ int x_param_loop_interchange_stride_ratio;
+ int x_param_loop_invariant_max_bbs_in_loop;
+ int x_param_loop_max_datarefs_for_datadeps;
+ int x_param_loop_versioning_max_inner_insns;
+ int x_param_loop_versioning_max_outer_insns;
+ int x_param_lra_inheritance_ebb_probability_cutoff;
+ int x_param_lra_max_considered_reload_pseudos;
+ int x_param_max_average_unrolled_insns;
+ int x_param_max_combine_insns;
+ int x_param_max_unroll_iterations;
+ int x_param_max_completely_peel_times;
+ int x_param_max_completely_peeled_insns;
+ int x_param_max_crossjump_edges;
+ int x_param_max_cse_insns;
+ int x_param_max_cse_path_length;
+ int x_param_max_cselib_memory_locations;
+ int x_param_max_debug_marker_count;
+ int x_param_max_delay_slot_insn_search;
+ int x_param_max_delay_slot_live_search;
+ int x_param_max_dse_active_local_stores;
+ int x_param_early_inliner_max_iterations;
+ int x_param_max_find_base_term_values;
+ int x_param_max_fsm_thread_path_insns;
+ int x_param_max_gcse_insertion_ratio;
+ int x_param_max_gcse_memory;
+ int x_param_max_goto_duplication_insns;
+ int x_param_max_grow_copy_bb_insns;
+ int x_param_max_hoist_depth;
+ int x_param_inline_functions_called_once_insns;
+ int x_param_inline_functions_called_once_loop_depth;
+ int x_param_max_inline_insns_auto;
+ int x_param_max_inline_insns_recursive_auto;
+ int x_param_max_inline_insns_recursive;
+ int x_param_max_inline_insns_single;
+ int x_param_max_inline_insns_size;
+ int x_param_max_inline_insns_small;
+ int x_param_max_inline_recursive_depth_auto;
+ int x_param_max_inline_recursive_depth;
+ int x_param_max_isl_operations;
+ int x_param_max_iterations_computation_cost;
+ int x_param_max_iterations_to_track;
+ int x_param_max_jump_thread_duplication_stmts;
+ int x_param_max_jump_thread_paths;
+ int x_param_max_last_value_rtl;
+ int x_param_max_loop_header_insns;
+ int x_param_max_modulo_backtrack_attempts;
+ int x_param_max_partial_antic_length;
+ int x_param_max_peel_branches;
+ int x_param_max_peel_times;
+ int x_param_max_peeled_insns;
+ int x_param_max_pending_list_length;
+ int x_param_max_pipeline_region_blocks;
+ int x_param_max_pipeline_region_insns;
+ int x_param_max_pow_sqrt_depth;
+ int x_param_max_predicted_iterations;
+ int x_param_max_reload_search_insns;
+ int x_param_max_rtl_if_conversion_insns;
+ int x_param_max_rtl_if_conversion_predictable_cost;
+ int x_param_max_rtl_if_conversion_unpredictable_cost;
+ int x_param_max_sched_extend_regions_iters;
+ int x_param_max_sched_insn_conflict_delay;
+ int x_param_max_sched_ready_insns;
+ int x_param_max_sched_region_blocks;
+ int x_param_max_sched_region_insns;
+ int x_param_max_slsr_candidate_scan;
+ int x_param_max_speculative_devirt_maydefs;
+ int x_param_max_stores_to_merge;
+ int x_param_max_stores_to_sink;
+ int x_param_max_tail_merge_comparisons;
+ int x_param_max_tail_merge_iterations;
+ int x_param_max_tracked_strlens;
+ int x_param_max_tree_if_conversion_phi_args;
+ int x_param_max_unroll_times;
+ int x_param_max_unrolled_insns;
+ int x_param_max_unswitch_depth;
+ int x_param_max_unswitch_insns;
+ int x_param_max_variable_expansions;
+ int x_param_max_vartrack_expr_depth;
+ int x_param_max_vartrack_reverse_op_size;
+ int x_param_max_vartrack_size;
+ int x_param_min_crossjump_insns;
+ int x_param_min_inline_recursive_probability;
+ int x_param_min_insn_to_prefetch_ratio;
+ int x_param_min_loop_cond_split_prob;
+ int x_param_min_pagesize;
+ int x_param_min_size_for_stack_sharing;
+ int x_param_min_spec_prob;
+ int x_param_min_vect_loop_bound;
+ int x_param_modref_max_accesses;
+ int x_param_modref_max_adjustments;
+ int x_param_modref_max_bases;
+ int x_param_modref_max_depth;
+ int x_param_modref_max_escape_points;
+ int x_param_modref_max_refs;
+ int x_param_modref_max_tests;
+ int x_param_parloops_chunk_size;
+ int x_param_parloops_min_per_thread;
+ int x_param_parloops_schedule;
+ int x_param_partial_inlining_entry_probability;
+ int x_param_predictable_branch_outcome;
+ int x_param_prefetch_dynamic_strides;
+ int x_param_prefetch_latency;
+ int x_param_prefetch_min_insn_to_mem_ratio;
+ int x_param_prefetch_minimum_stride;
+ int x_param_ranger_logical_depth;
+ int x_param_ranger_recompute_depth;
+ int x_param_relation_block_limit;
+ int x_param_rpo_vn_max_loop_depth;
+ int x_param_sccvn_max_alias_queries_per_access;
+ int x_param_scev_max_expr_complexity;
+ int x_param_scev_max_expr_size;
+ int x_param_sched_autopref_queue_depth;
+ int x_param_sched_mem_true_dep_cost;
+ int x_param_sched_pressure_algorithm;
+ int x_param_sched_spec_prob_cutoff;
+ int x_param_sched_state_edge_prob_cutoff;
+ int x_param_selsched_insns_to_rename;
+ int x_param_selsched_max_lookahead;
+ int x_param_selsched_max_sched_times;
+ int x_param_simultaneous_prefetches;
+ int x_param_sink_frequency_threshold;
+ int x_param_sms_dfa_history;
+ int x_param_sms_loop_average_count_threshold;
+ int x_param_sms_max_ii_factor;
+ int x_param_sms_min_sc;
+ int x_param_sra_max_propagations;
+ int x_param_sra_max_scalarization_size_size;
+ int x_param_sra_max_scalarization_size_speed;
+ int x_param_ssa_name_def_chain_limit;
+ int x_param_ssp_buffer_size;
+ int x_param_stack_clash_protection_guard_size;
+ int x_param_stack_clash_protection_probe_interval;
+ int x_param_store_merging_allow_unaligned;
+ int x_param_store_merging_max_size;
+ int x_param_switch_conversion_branch_ratio;
+ int x_param_tm_max_aggregate_size;
+ int x_param_tracer_dynamic_coverage_feedback;
+ int x_param_tracer_dynamic_coverage;
+ int x_param_tracer_max_code_growth;
+ int x_param_tracer_min_branch_probability_feedback;
+ int x_param_tracer_min_branch_probability;
+ int x_param_tracer_min_branch_ratio;
+ int x_param_tree_reassoc_width;
+ int x_param_uninit_control_dep_attempts;
+ int x_param_uninlined_function_insns;
+ int x_param_uninlined_function_time;
+ int x_param_uninlined_function_thunk_insns;
+ int x_param_uninlined_function_thunk_time;
+ int x_param_unlikely_bb_count_fraction;
+ int x_param_unroll_jam_max_unroll;
+ int x_param_unroll_jam_min_percent;
+ int x_param_use_after_scope_direct_emission_threshold;
+ int x_param_vect_epilogues_nomask;
+ int x_param_vect_induction_float;
+ int x_param_vect_inner_loop_cost_factor;
+ int x_param_vect_max_layout_candidates;
+ int x_param_vect_max_peeling_for_alignment;
+ int x_param_vect_max_version_for_alias_checks;
+ int x_param_vect_max_version_for_alignment_checks;
+ int x_param_vect_partial_vector_usage;
+ int x_flag_openmp_target_simd_clone;
+ int x_flag_sched_stalled_insns;
+ int x_flag_sched_stalled_insns_dep;
+ int x_flag_tree_parallelize_loops;
+ enum ranger_debug x_param_ranger_debug;
+ enum threader_debug x_param_threader_debug;
+ enum excess_precision x_flag_excess_precision;
+ enum fp_contract_mode x_flag_fp_contract_mode;
+ enum ira_algorithm x_flag_ira_algorithm;
+ enum ira_region x_flag_ira_region;
+ enum live_patching_level x_flag_live_patching;
+ enum reorder_blocks_algorithm x_flag_reorder_blocks_algorithm;
+ enum vect_cost_model x_flag_simd_cost_model;
+ enum stack_reuse_level x_flag_stack_reuse;
+ enum auto_init_type x_flag_auto_var_init;
+ enum vect_cost_model x_flag_vect_cost_model;
+ unsigned char x_optimize;
+ unsigned char x_optimize_size;
+ unsigned char x_optimize_debug;
+ unsigned char x_optimize_fast;
+ signed char x_warn_inline;
+ signed char x_flag_aggressive_loop_optimizations;
+ signed char x_flag_align_functions;
+ signed char x_flag_align_jumps;
+ signed char x_flag_align_labels;
+ signed char x_flag_align_loops;
+ signed char x_flag_allocation_dce;
+ signed char x_flag_store_data_races;
+ signed char x_flag_associative_math;
+ signed char x_flag_asynchronous_unwind_tables;
+ signed char x_flag_auto_inc_dec;
+ signed char x_flag_bit_tests;
+ signed char x_flag_branch_on_count_reg;
+ signed char x_flag_branch_probabilities;
+ signed char x_flag_caller_saves;
+ signed char x_flag_code_hoisting;
+ signed char x_flag_combine_stack_adjustments;
+ signed char x_flag_compare_elim_after_reload;
+ signed char x_flag_conserve_stack;
+ signed char x_flag_cprop_registers;
+ signed char x_flag_crossjumping;
+ signed char x_flag_cse_follow_jumps;
+ signed char x_flag_cx_fortran_rules;
+ signed char x_flag_cx_limited_range;
+ signed char x_flag_dce;
+ signed char x_flag_defer_pop;
+ signed char x_flag_delayed_branch;
+ signed char x_flag_delete_dead_exceptions;
+ signed char x_flag_delete_null_pointer_checks;
+ signed char x_flag_devirtualize;
+ signed char x_flag_devirtualize_speculatively;
+ signed char x_flag_dse;
+ signed char x_flag_early_inlining;
+ signed char x_flag_exceptions;
+ signed char x_flag_expensive_optimizations;
+ signed char x_flag_finite_loops;
+ signed char x_flag_finite_math_only;
+ signed char x_flag_float_store;
+ signed char x_flag_fold_simple_inlines;
+ signed char x_flag_forward_propagate;
+ signed char x_flag_fp_int_builtin_inexact;
+ signed char x_flag_no_function_cse;
+ signed char x_flag_gcse;
+ signed char x_flag_gcse_after_reload;
+ signed char x_flag_gcse_las;
+ signed char x_flag_gcse_lm;
+ signed char x_flag_gcse_sm;
+ signed char x_flag_graphite;
+ signed char x_flag_graphite_identity;
+ signed char x_flag_guess_branch_prob;
+ signed char x_flag_harden_compares;
+ signed char x_flag_harden_conditional_branches;
+ signed char x_flag_hoist_adjacent_loads;
+ signed char x_flag_if_conversion;
+ signed char x_flag_if_conversion2;
+ signed char x_flag_indirect_inlining;
+ signed char x_flag_no_inline;
+ signed char x_flag_inline_atomics;
+ signed char x_flag_inline_functions;
+ signed char x_flag_inline_functions_called_once;
+ signed char x_flag_inline_small_functions;
+ signed char x_flag_ipa_bit_cp;
+ signed char x_flag_ipa_cp;
+ signed char x_flag_ipa_cp_clone;
+ signed char x_flag_ipa_icf;
+ signed char x_flag_ipa_icf_functions;
+ signed char x_flag_ipa_icf_variables;
+ signed char x_flag_ipa_modref;
+ signed char x_flag_ipa_profile;
+ signed char x_flag_ipa_pta;
+ signed char x_flag_ipa_pure_const;
+ signed char x_flag_ipa_ra;
+ signed char x_flag_ipa_reference;
+ signed char x_flag_ipa_reference_addressable;
+ signed char x_flag_ipa_sra;
+ signed char x_flag_ipa_stack_alignment;
+ signed char x_flag_ipa_strict_aliasing;
+ signed char x_flag_ipa_vrp;
+ signed char x_flag_ira_hoist_pressure;
+ signed char x_flag_ira_loop_pressure;
+ signed char x_flag_ira_share_save_slots;
+ signed char x_flag_ira_share_spill_slots;
+ signed char x_flag_isolate_erroneous_paths_attribute;
+ signed char x_flag_isolate_erroneous_paths_dereference;
+ signed char x_flag_ivopts;
+ signed char x_flag_jump_tables;
+ signed char x_flag_keep_gc_roots_live;
+ signed char x_flag_lifetime_dse;
+ signed char x_flag_limit_function_alignment;
+ signed char x_flag_live_range_shrinkage;
+ signed char x_flag_loop_interchange;
+ signed char x_flag_loop_nest_optimize;
+ signed char x_flag_loop_parallelize_all;
+ signed char x_flag_unroll_jam;
+ signed char x_flag_lra_remat;
+ signed char x_flag_errno_math;
+ signed char x_flag_modulo_sched;
+ signed char x_flag_modulo_sched_allow_regmoves;
+ signed char x_flag_move_loop_invariants;
+ signed char x_flag_move_loop_stores;
+ signed char x_flag_non_call_exceptions;
+ signed char x_flag_nothrow_opt;
+ signed char x_flag_omit_frame_pointer;
+ signed char x_flag_opt_info;
+ signed char x_flag_optimize_sibling_calls;
+ signed char x_flag_optimize_strlen;
+ signed char x_flag_pack_struct;
+ signed char x_flag_partial_inlining;
+ signed char x_flag_peel_loops;
+ signed char x_flag_no_peephole;
+ signed char x_flag_peephole2;
+ signed char x_flag_plt;
+ signed char x_flag_predictive_commoning;
+ signed char x_flag_prefetch_loop_arrays;
+ signed char x_flag_printf_return_value;
+ signed char x_flag_profile_partial_training;
+ signed char x_flag_profile_reorder_functions;
+ signed char x_flag_reciprocal_math;
+ signed char x_flag_ree;
+ signed char x_flag_pcc_struct_return;
+ signed char x_flag_rename_registers;
+ signed char x_flag_reorder_blocks;
+ signed char x_flag_reorder_blocks_and_partition;
+ signed char x_flag_reorder_functions;
+ signed char x_flag_rerun_cse_after_loop;
+ signed char x_flag_resched_modulo_sched;
+ signed char x_flag_rounding_math;
+ signed char x_flag_rtti;
+ signed char x_flag_save_optimization_record;
+ signed char x_flag_sched_critical_path_heuristic;
+ signed char x_flag_sched_dep_count_heuristic;
+ signed char x_flag_sched_group_heuristic;
+ signed char x_flag_schedule_interblock;
+ signed char x_flag_sched_last_insn_heuristic;
+ signed char x_flag_sched_pressure;
+ signed char x_flag_sched_rank_heuristic;
+ signed char x_flag_schedule_speculative;
+ signed char x_flag_sched_spec_insn_heuristic;
+ signed char x_flag_schedule_speculative_load;
+ signed char x_flag_schedule_speculative_load_dangerous;
+ signed char x_flag_sched2_use_superblocks;
+ signed char x_flag_schedule_fusion;
+ signed char x_flag_schedule_insns;
+ signed char x_flag_schedule_insns_after_reload;
+ signed char x_flag_section_anchors;
+ signed char x_flag_sel_sched_pipelining;
+ signed char x_flag_sel_sched_pipelining_outer_loops;
+ signed char x_flag_sel_sched_reschedule_pipelined;
+ signed char x_flag_selective_scheduling;
+ signed char x_flag_selective_scheduling2;
+ signed char x_flag_semantic_interposition;
+ signed char x_flag_short_enums;
+ signed char x_flag_short_wchar;
+ signed char x_flag_shrink_wrap;
+ signed char x_flag_shrink_wrap_separate;
+ signed char x_flag_signaling_nans;
+ signed char x_flag_signed_zeros;
+ signed char x_flag_single_precision_constant;
+ signed char x_flag_split_ivs_in_unroller;
+ signed char x_flag_split_loops;
+ signed char x_flag_split_paths;
+ signed char x_flag_split_wide_types;
+ signed char x_flag_split_wide_types_early;
+ signed char x_flag_ssa_backprop;
+ signed char x_flag_ssa_phiopt;
+ signed char x_flag_stack_clash_protection;
+ signed char x_flag_stack_protect;
+ signed char x_flag_stdarg_opt;
+ signed char x_flag_store_merging;
+ signed char x_flag_strict_aliasing;
+ signed char x_flag_strict_enums;
+ signed char x_flag_strict_volatile_bitfields;
+ signed char x_flag_thread_jumps;
+ signed char x_flag_threadsafe_statics;
+ signed char x_flag_toplevel_reorder;
+ signed char x_flag_tracer;
+ signed char x_flag_trapping_math;
+ signed char x_flag_trapv;
+ signed char x_flag_tree_bit_ccp;
+ signed char x_flag_tree_builtin_call_dce;
+ signed char x_flag_tree_ccp;
+ signed char x_flag_tree_ch;
+ signed char x_flag_tree_coalesce_vars;
+ signed char x_flag_tree_copy_prop;
+ signed char x_flag_tree_cselim;
+ signed char x_flag_tree_dce;
+ signed char x_flag_tree_dom;
+ signed char x_flag_tree_dse;
+ signed char x_flag_tree_forwprop;
+ signed char x_flag_tree_fre;
+ signed char x_flag_tree_loop_distribute_patterns;
+ signed char x_flag_tree_loop_distribution;
+ signed char x_flag_tree_loop_if_convert;
+ signed char x_flag_tree_loop_im;
+ signed char x_flag_tree_loop_ivcanon;
+ signed char x_flag_tree_loop_optimize;
+ signed char x_flag_tree_loop_vectorize;
+ signed char x_flag_tree_live_range_split;
+ signed char x_flag_tree_partial_pre;
+ signed char x_flag_tree_phiprop;
+ signed char x_flag_tree_pre;
+ signed char x_flag_tree_pta;
+ signed char x_flag_tree_reassoc;
+ signed char x_flag_tree_scev_cprop;
+ signed char x_flag_tree_sink;
+ signed char x_flag_tree_slp_vectorize;
+ signed char x_flag_tree_slsr;
+ signed char x_flag_tree_sra;
+ signed char x_flag_tree_switch_conversion;
+ signed char x_flag_tree_tail_merge;
+ signed char x_flag_tree_ter;
+ signed char x_flag_tree_vectorize;
+ signed char x_flag_tree_vrp;
+ signed char x_flag_unconstrained_commons;
+ signed char x_flag_unreachable_traps;
+ signed char x_flag_unroll_all_loops;
+ signed char x_flag_cunroll_grow_size;
+ signed char x_flag_unroll_loops;
+ signed char x_flag_unsafe_math_optimizations;
+ signed char x_flag_unswitch_loops;
+ signed char x_flag_unwind_tables;
+ signed char x_flag_var_tracking;
+ signed char x_flag_var_tracking_assignments;
+ signed char x_flag_var_tracking_assignments_toggle;
+ signed char x_flag_var_tracking_uninit;
+ signed char x_flag_variable_expansion_in_unroller;
+ signed char x_flag_version_loops_for_strides;
+ signed char x_flag_value_profile_transformations;
+ signed char x_flag_web;
+ signed char x_flag_wrapv;
+ signed char x_flag_wrapv_pointer;
+ signed char x_debug_nonbind_markers_p;
+ /* 498 members */
+ unsigned HOST_WIDE_INT explicit_mask[8];
+};
+
+/* Structure to save/restore selected target specific options. */
+struct GTY(()) cl_target_option
+{
+ long x_arm_stack_protector_guard_offset;
+ unsigned x_aarch_enable_bti;
+ const char *x_arm_arch_string;
+ const char *x_arm_branch_protection_string;
+ const char *x_arm_cpu_string;
+ enum fpu_type x_arm_fpu_index;
+ /* - */ int x_target_flags;
+ const char *x_arm_tune_string;
+ enum aarch_function_type x_aarch_ra_sign_scope;
+ enum aarch_key_type x_aarch_ra_sign_key;
+ signed char x_inline_asm_unified;
+ signed char x_fix_aes_erratum_1742098;
+ signed char x_arm_restrict_it;
+ signed char x_unaligned_access;
+ /* 13 members */
+ unsigned HOST_WIDE_INT explicit_mask[1];
+ /* - */ int explicit_mask_target_flags;
+};
+
+
+/* Save optimization variables into a structure. */
+extern void cl_optimization_save (struct cl_optimization *, struct gcc_options *, struct gcc_options *);
+
+/* Restore optimization variables from a structure. */
+extern void cl_optimization_restore (struct gcc_options *, struct gcc_options *, struct cl_optimization *);
+
+/* Print optimization variables from a structure. */
+extern void cl_optimization_print (FILE *, int, struct cl_optimization *);
+
+/* Print different optimization variables from structures provided as arguments. */
+extern void cl_optimization_print_diff (FILE *, int, cl_optimization *ptr1, cl_optimization *ptr2);
+
+/* Save selected option variables into a structure. */
+extern void cl_target_option_save (struct cl_target_option *, struct gcc_options *, struct gcc_options *);
+
+/* Restore selected option variables from a structure. */
+extern void cl_target_option_restore (struct gcc_options *, struct gcc_options *, struct cl_target_option *);
+
+/* Print target option variables from a structure. */
+extern void cl_target_option_print (FILE *, int, struct cl_target_option *);
+
+/* Print different target option variables from structures provided as arguments. */
+extern void cl_target_option_print_diff (FILE *, int, cl_target_option *ptr1, cl_target_option *ptr2);
+
+/* Compare two target option variables from a structure. */
+extern bool cl_target_option_eq (const struct cl_target_option *, const struct cl_target_option *);
+
+/* Free heap memory used by target option variables. */
+extern void cl_target_option_free (struct cl_target_option *);
+
+/* Hash option variables from a structure. */
+extern hashval_t cl_target_option_hash (const struct cl_target_option *);
+
+/* Hash optimization from a structure. */
+extern hashval_t cl_optimization_hash (const struct cl_optimization *);
+
+/* Compare two optimization options. */
+extern bool cl_optimization_option_eq (cl_optimization const *ptr1, cl_optimization const *ptr2);
+
+/* Free heap memory used by optimization options. */
+extern void cl_optimization_option_free (cl_optimization *ptr1);
+
+/* Compare and report difference for a part of cl_optimization options. */
+extern void cl_optimization_compare (gcc_options *ptr1, gcc_options *ptr2);
+
+/* Generator files may not have access to location_t, and don't need these. */
+#if defined(UNKNOWN_LOCATION)
+bool
+common_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ const struct cl_decoded_option *decoded,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+Ada_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+AdaSCIL_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+AdaWhy_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+C_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+CXX_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+D_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+Fortran_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+Go_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+LTO_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+LTODump_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+ModulaX2_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+ObjC_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+ObjCXX_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+bool
+Rust_handle_option_auto (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t scode, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+void cpp_handle_option_auto (const struct gcc_options * opts, size_t scode,
+ struct cpp_options * cpp_opts);
+void init_global_opts_from_cpp(struct gcc_options * opts,
+ const struct cpp_options * cpp_opts);
+#endif
+#endif
+
+#define MASK_ABORT_NORETURN (1U << 0)
+#define MASK_APCS_FRAME (1U << 1)
+#define MASK_APCS_REENT (1U << 2)
+#define MASK_APCS_STACK (1U << 3)
+#define MASK_THUMB (1U << 4)
+#define MASK_BE8 (1U << 5)
+#define MASK_BIG_END (1U << 6)
+#define MASK_CALLEE_INTERWORKING (1U << 7)
+#define MASK_CALLER_INTERWORKING (1U << 8)
+#define MASK_FDPIC (1U << 9)
+#define MASK_GENERAL_REGS_ONLY (1U << 10)
+#define MASK_LONG_CALLS (1U << 11)
+#define MASK_POKE_FUNCTION_NAME (1U << 12)
+#define MASK_SCHED_PROLOG (1U << 13)
+#define MASK_SINGLE_PIC_BASE (1U << 14)
+#define MASK_INTERWORK (1U << 15)
+#define MASK_TPCS_FRAME (1U << 16)
+#define MASK_TPCS_LEAF_FRAME (1U << 17)
+#define MASK_NEON_VECTORIZE_DOUBLE (1U << 18)
+
+/* ABORT_NORETURN mask */
+#define TARGET_ABORT_NORETURN ((target_flags & MASK_ABORT_NORETURN) != 0)
+#define TARGET_ABORT_NORETURN_P(target_flags) (((target_flags) & MASK_ABORT_NORETURN) != 0)
+#define TARGET_EXPLICIT_ABORT_NORETURN_P(opts) ((opts->x_target_flags_explicit & MASK_ABORT_NORETURN) != 0)
+#define SET_TARGET_ABORT_NORETURN(opts) opts->x_target_flags |= MASK_ABORT_NORETURN
+/* APCS_FRAME mask */
+#define TARGET_APCS_FRAME ((target_flags & MASK_APCS_FRAME) != 0)
+#define TARGET_APCS_FRAME_P(target_flags) (((target_flags) & MASK_APCS_FRAME) != 0)
+#define TARGET_EXPLICIT_APCS_FRAME_P(opts) ((opts->x_target_flags_explicit & MASK_APCS_FRAME) != 0)
+#define SET_TARGET_APCS_FRAME(opts) opts->x_target_flags |= MASK_APCS_FRAME
+/* APCS_REENT mask */
+#define TARGET_APCS_REENT ((target_flags & MASK_APCS_REENT) != 0)
+#define TARGET_APCS_REENT_P(target_flags) (((target_flags) & MASK_APCS_REENT) != 0)
+#define TARGET_EXPLICIT_APCS_REENT_P(opts) ((opts->x_target_flags_explicit & MASK_APCS_REENT) != 0)
+#define SET_TARGET_APCS_REENT(opts) opts->x_target_flags |= MASK_APCS_REENT
+/* APCS_STACK mask */
+#define TARGET_APCS_STACK ((target_flags & MASK_APCS_STACK) != 0)
+#define TARGET_APCS_STACK_P(target_flags) (((target_flags) & MASK_APCS_STACK) != 0)
+#define TARGET_EXPLICIT_APCS_STACK_P(opts) ((opts->x_target_flags_explicit & MASK_APCS_STACK) != 0)
+#define SET_TARGET_APCS_STACK(opts) opts->x_target_flags |= MASK_APCS_STACK
+/* THUMB mask */
+#define TARGET_THUMB ((target_flags & MASK_THUMB) != 0)
+#define TARGET_THUMB_P(target_flags) (((target_flags) & MASK_THUMB) != 0)
+#define TARGET_EXPLICIT_THUMB_P(opts) ((opts->x_target_flags_explicit & MASK_THUMB) != 0)
+#define SET_TARGET_THUMB(opts) opts->x_target_flags |= MASK_THUMB
+/* BE8 mask */
+#define TARGET_BE8 ((target_flags & MASK_BE8) != 0)
+#define TARGET_BE8_P(target_flags) (((target_flags) & MASK_BE8) != 0)
+#define TARGET_EXPLICIT_BE8_P(opts) ((opts->x_target_flags_explicit & MASK_BE8) != 0)
+#define SET_TARGET_BE8(opts) opts->x_target_flags |= MASK_BE8
+/* BIG_END mask */
+#define TARGET_BIG_END ((target_flags & MASK_BIG_END) != 0)
+#define TARGET_BIG_END_P(target_flags) (((target_flags) & MASK_BIG_END) != 0)
+#define TARGET_EXPLICIT_BIG_END_P(opts) ((opts->x_target_flags_explicit & MASK_BIG_END) != 0)
+#define SET_TARGET_BIG_END(opts) opts->x_target_flags |= MASK_BIG_END
+/* CALLEE_INTERWORKING mask */
+#define TARGET_CALLEE_INTERWORKING ((target_flags & MASK_CALLEE_INTERWORKING) != 0)
+#define TARGET_CALLEE_INTERWORKING_P(target_flags) (((target_flags) & MASK_CALLEE_INTERWORKING) != 0)
+#define TARGET_EXPLICIT_CALLEE_INTERWORKING_P(opts) ((opts->x_target_flags_explicit & MASK_CALLEE_INTERWORKING) != 0)
+#define SET_TARGET_CALLEE_INTERWORKING(opts) opts->x_target_flags |= MASK_CALLEE_INTERWORKING
+/* CALLER_INTERWORKING mask */
+#define TARGET_CALLER_INTERWORKING ((target_flags & MASK_CALLER_INTERWORKING) != 0)
+#define TARGET_CALLER_INTERWORKING_P(target_flags) (((target_flags) & MASK_CALLER_INTERWORKING) != 0)
+#define TARGET_EXPLICIT_CALLER_INTERWORKING_P(opts) ((opts->x_target_flags_explicit & MASK_CALLER_INTERWORKING) != 0)
+#define SET_TARGET_CALLER_INTERWORKING(opts) opts->x_target_flags |= MASK_CALLER_INTERWORKING
+/* FDPIC mask */
+#define TARGET_FDPIC ((target_flags & MASK_FDPIC) != 0)
+#define TARGET_FDPIC_P(target_flags) (((target_flags) & MASK_FDPIC) != 0)
+#define TARGET_EXPLICIT_FDPIC_P(opts) ((opts->x_target_flags_explicit & MASK_FDPIC) != 0)
+#define SET_TARGET_FDPIC(opts) opts->x_target_flags |= MASK_FDPIC
+/* GENERAL_REGS_ONLY mask */
+#define TARGET_GENERAL_REGS_ONLY ((target_flags & MASK_GENERAL_REGS_ONLY) != 0)
+#define TARGET_GENERAL_REGS_ONLY_P(target_flags) (((target_flags) & MASK_GENERAL_REGS_ONLY) != 0)
+#define TARGET_EXPLICIT_GENERAL_REGS_ONLY_P(opts) ((opts->x_target_flags_explicit & MASK_GENERAL_REGS_ONLY) != 0)
+#define SET_TARGET_GENERAL_REGS_ONLY(opts) opts->x_target_flags |= MASK_GENERAL_REGS_ONLY
+/* LONG_CALLS mask */
+#define TARGET_LONG_CALLS ((target_flags & MASK_LONG_CALLS) != 0)
+#define TARGET_LONG_CALLS_P(target_flags) (((target_flags) & MASK_LONG_CALLS) != 0)
+#define TARGET_EXPLICIT_LONG_CALLS_P(opts) ((opts->x_target_flags_explicit & MASK_LONG_CALLS) != 0)
+#define SET_TARGET_LONG_CALLS(opts) opts->x_target_flags |= MASK_LONG_CALLS
+/* POKE_FUNCTION_NAME mask */
+#define TARGET_POKE_FUNCTION_NAME ((target_flags & MASK_POKE_FUNCTION_NAME) != 0)
+#define TARGET_POKE_FUNCTION_NAME_P(target_flags) (((target_flags) & MASK_POKE_FUNCTION_NAME) != 0)
+#define TARGET_EXPLICIT_POKE_FUNCTION_NAME_P(opts) ((opts->x_target_flags_explicit & MASK_POKE_FUNCTION_NAME) != 0)
+#define SET_TARGET_POKE_FUNCTION_NAME(opts) opts->x_target_flags |= MASK_POKE_FUNCTION_NAME
+/* SCHED_PROLOG mask */
+#define TARGET_SCHED_PROLOG ((target_flags & MASK_SCHED_PROLOG) != 0)
+#define TARGET_SCHED_PROLOG_P(target_flags) (((target_flags) & MASK_SCHED_PROLOG) != 0)
+#define TARGET_EXPLICIT_SCHED_PROLOG_P(opts) ((opts->x_target_flags_explicit & MASK_SCHED_PROLOG) != 0)
+#define SET_TARGET_SCHED_PROLOG(opts) opts->x_target_flags |= MASK_SCHED_PROLOG
+/* SINGLE_PIC_BASE mask */
+#define TARGET_SINGLE_PIC_BASE ((target_flags & MASK_SINGLE_PIC_BASE) != 0)
+#define TARGET_SINGLE_PIC_BASE_P(target_flags) (((target_flags) & MASK_SINGLE_PIC_BASE) != 0)
+#define TARGET_EXPLICIT_SINGLE_PIC_BASE_P(opts) ((opts->x_target_flags_explicit & MASK_SINGLE_PIC_BASE) != 0)
+#define SET_TARGET_SINGLE_PIC_BASE(opts) opts->x_target_flags |= MASK_SINGLE_PIC_BASE
+/* INTERWORK mask */
+#define TARGET_INTERWORK ((target_flags & MASK_INTERWORK) != 0)
+#define TARGET_INTERWORK_P(target_flags) (((target_flags) & MASK_INTERWORK) != 0)
+#define TARGET_EXPLICIT_INTERWORK_P(opts) ((opts->x_target_flags_explicit & MASK_INTERWORK) != 0)
+#define SET_TARGET_INTERWORK(opts) opts->x_target_flags |= MASK_INTERWORK
+/* TPCS_FRAME mask */
+#define TARGET_TPCS_FRAME ((target_flags & MASK_TPCS_FRAME) != 0)
+#define TARGET_TPCS_FRAME_P(target_flags) (((target_flags) & MASK_TPCS_FRAME) != 0)
+#define TARGET_EXPLICIT_TPCS_FRAME_P(opts) ((opts->x_target_flags_explicit & MASK_TPCS_FRAME) != 0)
+#define SET_TARGET_TPCS_FRAME(opts) opts->x_target_flags |= MASK_TPCS_FRAME
+/* TPCS_LEAF_FRAME mask */
+#define TARGET_TPCS_LEAF_FRAME ((target_flags & MASK_TPCS_LEAF_FRAME) != 0)
+#define TARGET_TPCS_LEAF_FRAME_P(target_flags) (((target_flags) & MASK_TPCS_LEAF_FRAME) != 0)
+#define TARGET_EXPLICIT_TPCS_LEAF_FRAME_P(opts) ((opts->x_target_flags_explicit & MASK_TPCS_LEAF_FRAME) != 0)
+#define SET_TARGET_TPCS_LEAF_FRAME(opts) opts->x_target_flags |= MASK_TPCS_LEAF_FRAME
+/* NEON_VECTORIZE_DOUBLE mask */
+#define TARGET_NEON_VECTORIZE_DOUBLE ((target_flags & MASK_NEON_VECTORIZE_DOUBLE) != 0)
+#define TARGET_NEON_VECTORIZE_DOUBLE_P(target_flags) (((target_flags) & MASK_NEON_VECTORIZE_DOUBLE) != 0)
+#define TARGET_EXPLICIT_NEON_VECTORIZE_DOUBLE_P(opts) ((opts->x_target_flags_explicit & MASK_NEON_VECTORIZE_DOUBLE) != 0)
+#define SET_TARGET_NEON_VECTORIZE_DOUBLE(opts) opts->x_target_flags |= MASK_NEON_VECTORIZE_DOUBLE
+
+
+#define CL_Ada (1U << 0)
+#define CL_AdaSCIL (1U << 1)
+#define CL_AdaWhy (1U << 2)
+#define CL_C (1U << 3)
+#define CL_CXX (1U << 4)
+#define CL_D (1U << 5)
+#define CL_Fortran (1U << 6)
+#define CL_Go (1U << 7)
+#define CL_LTO (1U << 8)
+#define CL_LTODump (1U << 9)
+#define CL_ModulaX2 (1U << 10)
+#define CL_ObjC (1U << 11)
+#define CL_ObjCXX (1U << 12)
+#define CL_Rust (1U << 13)
+#define CL_LANG_ALL ((1U << 14) - 1)
+
+enum opt_code
+{
+ OPT____ = 0, /* -### */
+ /* OPT__all_warnings = 1, */ /* --all-warnings */
+ /* OPT__ansi = 2, */ /* --ansi */
+ /* OPT__assemble = 3, */ /* --assemble */
+ /* OPT__assert = 4, */ /* --assert */
+ /* OPT__assert_ = 5, */ /* --assert= */
+ /* OPT__comments = 6, */ /* --comments */
+ /* OPT__comments_in_macros = 7, */ /* --comments-in-macros */
+ /* OPT__compile = 8, */ /* --compile */
+ OPT__completion_ = 9, /* --completion= */
+ /* OPT__coverage = 10, */ /* --coverage */
+ /* OPT__debug = 11, */ /* --debug */
+ /* OPT__define_macro = 12, */ /* --define-macro */
+ /* OPT__define_macro_ = 13, */ /* --define-macro= */
+ /* OPT__dependencies = 14, */ /* --dependencies */
+ /* OPT__dump = 15, */ /* --dump */
+ /* OPT__dump_ = 16, */ /* --dump= */
+ /* OPT__dumpbase = 17, */ /* --dumpbase */
+ /* OPT__dumpbase_ext = 18, */ /* --dumpbase-ext */
+ /* OPT__dumpdir = 19, */ /* --dumpdir */
+ /* OPT__entry = 20, */ /* --entry */
+ /* OPT__entry_ = 21, */ /* --entry= */
+ /* OPT__extra_warnings = 22, */ /* --extra-warnings */
+ /* OPT__for_assembler = 23, */ /* --for-assembler */
+ /* OPT__for_assembler_ = 24, */ /* --for-assembler= */
+ /* OPT__for_linker = 25, */ /* --for-linker */
+ /* OPT__for_linker_ = 26, */ /* --for-linker= */
+ /* OPT__force_link = 27, */ /* --force-link */
+ /* OPT__force_link_ = 28, */ /* --force-link= */
+ OPT__help = 29, /* --help */
+ OPT__help_ = 30, /* --help= */
+ /* OPT__imacros = 31, */ /* --imacros */
+ /* OPT__imacros_ = 32, */ /* --imacros= */
+ /* OPT__include = 33, */ /* --include */
+ /* OPT__include_barrier = 34, */ /* --include-barrier */
+ /* OPT__include_directory = 35, */ /* --include-directory */
+ /* OPT__include_directory_after = 36, */ /* --include-directory-after */
+ /* OPT__include_directory_after_ = 37, */ /* --include-directory-after= */
+ /* OPT__include_directory_ = 38, */ /* --include-directory= */
+ /* OPT__include_prefix = 39, */ /* --include-prefix */
+ /* OPT__include_prefix_ = 40, */ /* --include-prefix= */
+ /* OPT__include_with_prefix = 41, */ /* --include-with-prefix */
+ /* OPT__include_with_prefix_after = 42, */ /* --include-with-prefix-after */
+ /* OPT__include_with_prefix_after_ = 43, *//* --include-with-prefix-after= */
+ /* OPT__include_with_prefix_before = 44, *//* --include-with-prefix-before */
+ /* OPT__include_with_prefix_before_ = 45, *//* --include-with-prefix-before= */
+ /* OPT__include_with_prefix_ = 46, */ /* --include-with-prefix= */
+ /* OPT__include_ = 47, */ /* --include= */
+ /* OPT__language = 48, */ /* --language */
+ /* OPT__language_ = 49, */ /* --language= */
+ /* OPT__library_directory = 50, */ /* --library-directory */
+ /* OPT__library_directory_ = 51, */ /* --library-directory= */
+ /* OPT__no_canonical_prefixes = 52, */ /* --no-canonical-prefixes */
+ /* OPT__no_integrated_cpp = 53, */ /* --no-integrated-cpp */
+ /* OPT__no_line_commands = 54, */ /* --no-line-commands */
+ /* OPT__no_standard_includes = 55, */ /* --no-standard-includes */
+ /* OPT__no_standard_libraries = 56, */ /* --no-standard-libraries */
+ OPT__no_sysroot_suffix = 57, /* --no-sysroot-suffix */
+ /* OPT__no_warnings = 58, */ /* --no-warnings */
+ /* OPT__optimize = 59, */ /* --optimize */
+ /* OPT__output = 60, */ /* --output */
+ OPT__output_pch = 61, /* --output-pch */
+ /* OPT__output_ = 62, */ /* --output= */
+ OPT__param_align_loop_iterations_ = 63, /* --param=align-loop-iterations= */
+ OPT__param_align_threshold_ = 64, /* --param=align-threshold= */
+ OPT__param_analyzer_bb_explosion_factor_ = 65,/* --param=analyzer-bb-explosion-factor= */
+ OPT__param_analyzer_max_constraints_ = 66, /* --param=analyzer-max-constraints= */
+ OPT__param_analyzer_max_enodes_for_full_dump_ = 67,/* --param=analyzer-max-enodes-for-full-dump= */
+ OPT__param_analyzer_max_enodes_per_program_point_ = 68,/* --param=analyzer-max-enodes-per-program-point= */
+ OPT__param_analyzer_max_infeasible_edges_ = 69,/* --param=analyzer-max-infeasible-edges= */
+ OPT__param_analyzer_max_recursion_depth_ = 70,/* --param=analyzer-max-recursion-depth= */
+ OPT__param_analyzer_max_svalue_depth_ = 71,/* --param=analyzer-max-svalue-depth= */
+ OPT__param_analyzer_min_snodes_for_call_summary_ = 72,/* --param=analyzer-min-snodes-for-call-summary= */
+ OPT__param_asan_globals_ = 73, /* --param=asan-globals= */
+ OPT__param_asan_instrument_allocas_ = 74, /* --param=asan-instrument-allocas= */
+ OPT__param_asan_instrument_reads_ = 75, /* --param=asan-instrument-reads= */
+ OPT__param_asan_instrument_writes_ = 76, /* --param=asan-instrument-writes= */
+ OPT__param_asan_instrumentation_with_call_threshold_ = 77,/* --param=asan-instrumentation-with-call-threshold= */
+ OPT__param_asan_kernel_mem_intrinsic_prefix_ = 78,/* --param=asan-kernel-mem-intrinsic-prefix= */
+ OPT__param_asan_memintrin_ = 79, /* --param=asan-memintrin= */
+ OPT__param_asan_stack_ = 80, /* --param=asan-stack= */
+ OPT__param_asan_use_after_return_ = 81, /* --param=asan-use-after-return= */
+ OPT__param_avg_loop_niter_ = 82, /* --param=avg-loop-niter= */
+ OPT__param_avoid_fma_max_bits_ = 83, /* --param=avoid-fma-max-bits= */
+ OPT__param_builtin_expect_probability_ = 84,/* --param=builtin-expect-probability= */
+ OPT__param_builtin_string_cmp_inline_length_ = 85,/* --param=builtin-string-cmp-inline-length= */
+ OPT__param_case_values_threshold_ = 86, /* --param=case-values-threshold= */
+ OPT__param_comdat_sharing_probability_ = 87,/* --param=comdat-sharing-probability= */
+ OPT__param_constructive_interference_size_ = 88,/* --param=constructive-interference-size= */
+ OPT__param_cxx_max_namespaces_for_diagnostic_help_ = 89,/* --param=cxx-max-namespaces-for-diagnostic-help= */
+ OPT__param_destructive_interference_size_ = 90,/* --param=destructive-interference-size= */
+ OPT__param_dse_max_alias_queries_per_store_ = 91,/* --param=dse-max-alias-queries-per-store= */
+ OPT__param_dse_max_object_size_ = 92, /* --param=dse-max-object-size= */
+ OPT__param_early_inlining_insns_ = 93, /* --param=early-inlining-insns= */
+ OPT__param_evrp_sparse_threshold_ = 94, /* --param=evrp-sparse-threshold= */
+ OPT__param_evrp_switch_limit_ = 95, /* --param=evrp-switch-limit= */
+ OPT__param_fsm_scale_path_stmts_ = 96, /* --param=fsm-scale-path-stmts= */
+ OPT__param_gcse_after_reload_critical_fraction_ = 97,/* --param=gcse-after-reload-critical-fraction= */
+ OPT__param_gcse_after_reload_partial_fraction_ = 98,/* --param=gcse-after-reload-partial-fraction= */
+ OPT__param_gcse_cost_distance_ratio_ = 99, /* --param=gcse-cost-distance-ratio= */
+ OPT__param_gcse_unrestricted_cost_ = 100, /* --param=gcse-unrestricted-cost= */
+ OPT__param_ggc_min_expand_ = 101, /* --param=ggc-min-expand= */
+ OPT__param_ggc_min_heapsize_ = 102, /* --param=ggc-min-heapsize= */
+ OPT__param_gimple_fe_computed_hot_bb_threshold_ = 103,/* --param=gimple-fe-computed-hot-bb-threshold= */
+ OPT__param_graphite_allow_codegen_errors_ = 104,/* --param=graphite-allow-codegen-errors= */
+ OPT__param_graphite_max_arrays_per_scop_ = 105,/* --param=graphite-max-arrays-per-scop= */
+ OPT__param_graphite_max_nb_scop_params_ = 106,/* --param=graphite-max-nb-scop-params= */
+ OPT__param_hash_table_verification_limit_ = 107,/* --param=hash-table-verification-limit= */
+ OPT__param_hot_bb_count_fraction_ = 108, /* --param=hot-bb-count-fraction= */
+ OPT__param_hot_bb_count_ws_permille_ = 109,/* --param=hot-bb-count-ws-permille= */
+ OPT__param_hot_bb_frequency_fraction_ = 110,/* --param=hot-bb-frequency-fraction= */
+ OPT__param_hwasan_instrument_allocas_ = 111,/* --param=hwasan-instrument-allocas= */
+ OPT__param_hwasan_instrument_mem_intrinsics_ = 112,/* --param=hwasan-instrument-mem-intrinsics= */
+ OPT__param_hwasan_instrument_reads_ = 113, /* --param=hwasan-instrument-reads= */
+ OPT__param_hwasan_instrument_stack_ = 114, /* --param=hwasan-instrument-stack= */
+ OPT__param_hwasan_instrument_writes_ = 115,/* --param=hwasan-instrument-writes= */
+ OPT__param_hwasan_random_frame_tag_ = 116, /* --param=hwasan-random-frame-tag= */
+ OPT__param_inline_heuristics_hint_percent_ = 117,/* --param=inline-heuristics-hint-percent= */
+ OPT__param_inline_min_speedup_ = 118, /* --param=inline-min-speedup= */
+ OPT__param_inline_unit_growth_ = 119, /* --param=inline-unit-growth= */
+ OPT__param_integer_share_limit_ = 120, /* --param=integer-share-limit= */
+ OPT__param_ipa_cp_eval_threshold_ = 121, /* --param=ipa-cp-eval-threshold= */
+ OPT__param_ipa_cp_large_unit_insns_ = 122, /* --param=ipa-cp-large-unit-insns= */
+ OPT__param_ipa_cp_loop_hint_bonus_ = 123, /* --param=ipa-cp-loop-hint-bonus= */
+ OPT__param_ipa_cp_max_recursive_depth_ = 124,/* --param=ipa-cp-max-recursive-depth= */
+ OPT__param_ipa_cp_min_recursive_probability_ = 125,/* --param=ipa-cp-min-recursive-probability= */
+ OPT__param_ipa_cp_profile_count_base_ = 126,/* --param=ipa-cp-profile-count-base= */
+ OPT__param_ipa_cp_recursion_penalty_ = 127,/* --param=ipa-cp-recursion-penalty= */
+ OPT__param_ipa_cp_recursive_freq_factor_ = 128,/* --param=ipa-cp-recursive-freq-factor= */
+ OPT__param_ipa_cp_single_call_penalty_ = 129,/* --param=ipa-cp-single-call-penalty= */
+ OPT__param_ipa_cp_unit_growth_ = 130, /* --param=ipa-cp-unit-growth= */
+ OPT__param_ipa_cp_value_list_size_ = 131, /* --param=ipa-cp-value-list-size= */
+ OPT__param_ipa_jump_function_lookups_ = 132,/* --param=ipa-jump-function-lookups= */
+ OPT__param_ipa_max_aa_steps_ = 133, /* --param=ipa-max-aa-steps= */
+ OPT__param_ipa_max_agg_items_ = 134, /* --param=ipa-max-agg-items= */
+ OPT__param_ipa_max_loop_predicates_ = 135, /* --param=ipa-max-loop-predicates= */
+ OPT__param_ipa_max_param_expr_ops_ = 136, /* --param=ipa-max-param-expr-ops= */
+ OPT__param_ipa_max_switch_predicate_bounds_ = 137,/* --param=ipa-max-switch-predicate-bounds= */
+ OPT__param_ipa_sra_deref_prob_threshold_ = 138,/* --param=ipa-sra-deref-prob-threshold= */
+ OPT__param_ipa_sra_max_replacements_ = 139,/* --param=ipa-sra-max-replacements= */
+ OPT__param_ipa_sra_ptr_growth_factor_ = 140,/* --param=ipa-sra-ptr-growth-factor= */
+ OPT__param_ipa_sra_ptrwrap_growth_factor_ = 141,/* --param=ipa-sra-ptrwrap-growth-factor= */
+ OPT__param_ira_consider_dup_in_all_alts_ = 142,/* --param=ira-consider-dup-in-all-alts= */
+ OPT__param_ira_loop_reserved_regs_ = 143, /* --param=ira-loop-reserved-regs= */
+ OPT__param_ira_max_conflict_table_size_ = 144,/* --param=ira-max-conflict-table-size= */
+ OPT__param_ira_max_loops_num_ = 145, /* --param=ira-max-loops-num= */
+ OPT__param_ira_simple_lra_insn_threshold_ = 146,/* --param=ira-simple-lra-insn-threshold= */
+ OPT__param_iv_always_prune_cand_set_bound_ = 147,/* --param=iv-always-prune-cand-set-bound= */
+ OPT__param_iv_consider_all_candidates_bound_ = 148,/* --param=iv-consider-all-candidates-bound= */
+ OPT__param_iv_max_considered_uses_ = 149, /* --param=iv-max-considered-uses= */
+ OPT__param_jump_table_max_growth_ratio_for_size_ = 150,/* --param=jump-table-max-growth-ratio-for-size= */
+ OPT__param_jump_table_max_growth_ratio_for_speed_ = 151,/* --param=jump-table-max-growth-ratio-for-speed= */
+ OPT__param_l1_cache_line_size_ = 152, /* --param=l1-cache-line-size= */
+ OPT__param_l1_cache_size_ = 153, /* --param=l1-cache-size= */
+ OPT__param_l2_cache_size_ = 154, /* --param=l2-cache-size= */
+ OPT__param_large_function_growth_ = 155, /* --param=large-function-growth= */
+ OPT__param_large_function_insns_ = 156, /* --param=large-function-insns= */
+ OPT__param_large_stack_frame_growth_ = 157,/* --param=large-stack-frame-growth= */
+ OPT__param_large_stack_frame_ = 158, /* --param=large-stack-frame= */
+ OPT__param_large_unit_insns_ = 159, /* --param=large-unit-insns= */
+ OPT__param_lazy_modules_ = 160, /* --param=lazy-modules= */
+ OPT__param_lim_expensive_ = 161, /* --param=lim-expensive= */
+ OPT__param_logical_op_non_short_circuit_ = 162,/* --param=logical-op-non-short-circuit= */
+ OPT__param_loop_block_tile_size_ = 163, /* --param=loop-block-tile-size= */
+ OPT__param_loop_interchange_max_num_stmts_ = 164,/* --param=loop-interchange-max-num-stmts= */
+ OPT__param_loop_interchange_stride_ratio_ = 165,/* --param=loop-interchange-stride-ratio= */
+ OPT__param_loop_invariant_max_bbs_in_loop_ = 166,/* --param=loop-invariant-max-bbs-in-loop= */
+ OPT__param_loop_max_datarefs_for_datadeps_ = 167,/* --param=loop-max-datarefs-for-datadeps= */
+ OPT__param_loop_versioning_max_inner_insns_ = 168,/* --param=loop-versioning-max-inner-insns= */
+ OPT__param_loop_versioning_max_outer_insns_ = 169,/* --param=loop-versioning-max-outer-insns= */
+ OPT__param_lra_inheritance_ebb_probability_cutoff_ = 170,/* --param=lra-inheritance-ebb-probability-cutoff= */
+ OPT__param_lra_max_considered_reload_pseudos_ = 171,/* --param=lra-max-considered-reload-pseudos= */
+ OPT__param_lto_max_partition_ = 172, /* --param=lto-max-partition= */
+ OPT__param_lto_max_streaming_parallelism_ = 173,/* --param=lto-max-streaming-parallelism= */
+ OPT__param_lto_min_partition_ = 174, /* --param=lto-min-partition= */
+ OPT__param_lto_partitions_ = 175, /* --param=lto-partitions= */
+ OPT__param_max_average_unrolled_insns_ = 176,/* --param=max-average-unrolled-insns= */
+ OPT__param_max_combine_insns_ = 177, /* --param=max-combine-insns= */
+ OPT__param_max_completely_peel_loop_nest_depth_ = 178,/* --param=max-completely-peel-loop-nest-depth= */
+ OPT__param_max_completely_peel_times_ = 179,/* --param=max-completely-peel-times= */
+ OPT__param_max_completely_peeled_insns_ = 180,/* --param=max-completely-peeled-insns= */
+ OPT__param_max_crossjump_edges_ = 181, /* --param=max-crossjump-edges= */
+ OPT__param_max_cse_insns_ = 182, /* --param=max-cse-insns= */
+ OPT__param_max_cse_path_length_ = 183, /* --param=max-cse-path-length= */
+ OPT__param_max_cselib_memory_locations_ = 184,/* --param=max-cselib-memory-locations= */
+ OPT__param_max_debug_marker_count_ = 185, /* --param=max-debug-marker-count= */
+ OPT__param_max_delay_slot_insn_search_ = 186,/* --param=max-delay-slot-insn-search= */
+ OPT__param_max_delay_slot_live_search_ = 187,/* --param=max-delay-slot-live-search= */
+ OPT__param_max_dse_active_local_stores_ = 188,/* --param=max-dse-active-local-stores= */
+ OPT__param_max_early_inliner_iterations_ = 189,/* --param=max-early-inliner-iterations= */
+ OPT__param_max_fields_for_field_sensitive_ = 190,/* --param=max-fields-for-field-sensitive= */
+ OPT__param_max_find_base_term_values_ = 191,/* --param=max-find-base-term-values= */
+ OPT__param_max_fsm_thread_path_insns_ = 192,/* --param=max-fsm-thread-path-insns= */
+ OPT__param_max_gcse_insertion_ratio_ = 193,/* --param=max-gcse-insertion-ratio= */
+ OPT__param_max_gcse_memory_ = 194, /* --param=max-gcse-memory= */
+ OPT__param_max_goto_duplication_insns_ = 195,/* --param=max-goto-duplication-insns= */
+ OPT__param_max_grow_copy_bb_insns_ = 196, /* --param=max-grow-copy-bb-insns= */
+ OPT__param_max_hoist_depth_ = 197, /* --param=max-hoist-depth= */
+ OPT__param_max_inline_functions_called_once_insns_ = 198,/* --param=max-inline-functions-called-once-insns= */
+ OPT__param_max_inline_functions_called_once_loop_depth_ = 199,/* --param=max-inline-functions-called-once-loop-depth= */
+ OPT__param_max_inline_insns_auto_ = 200, /* --param=max-inline-insns-auto= */
+ OPT__param_max_inline_insns_recursive_auto_ = 201,/* --param=max-inline-insns-recursive-auto= */
+ OPT__param_max_inline_insns_recursive_ = 202,/* --param=max-inline-insns-recursive= */
+ OPT__param_max_inline_insns_single_ = 203, /* --param=max-inline-insns-single= */
+ OPT__param_max_inline_insns_size_ = 204, /* --param=max-inline-insns-size= */
+ OPT__param_max_inline_insns_small_ = 205, /* --param=max-inline-insns-small= */
+ OPT__param_max_inline_recursive_depth_auto_ = 206,/* --param=max-inline-recursive-depth-auto= */
+ OPT__param_max_inline_recursive_depth_ = 207,/* --param=max-inline-recursive-depth= */
+ OPT__param_max_isl_operations_ = 208, /* --param=max-isl-operations= */
+ OPT__param_max_iterations_computation_cost_ = 209,/* --param=max-iterations-computation-cost= */
+ OPT__param_max_iterations_to_track_ = 210, /* --param=max-iterations-to-track= */
+ OPT__param_max_jump_thread_duplication_stmts_ = 211,/* --param=max-jump-thread-duplication-stmts= */
+ OPT__param_max_jump_thread_paths_ = 212, /* --param=max-jump-thread-paths= */
+ OPT__param_max_last_value_rtl_ = 213, /* --param=max-last-value-rtl= */
+ OPT__param_max_loop_header_insns_ = 214, /* --param=max-loop-header-insns= */
+ OPT__param_max_modulo_backtrack_attempts_ = 215,/* --param=max-modulo-backtrack-attempts= */
+ OPT__param_max_partial_antic_length_ = 216,/* --param=max-partial-antic-length= */
+ OPT__param_max_peel_branches_ = 217, /* --param=max-peel-branches= */
+ OPT__param_max_peel_times_ = 218, /* --param=max-peel-times= */
+ OPT__param_max_peeled_insns_ = 219, /* --param=max-peeled-insns= */
+ OPT__param_max_pending_list_length_ = 220, /* --param=max-pending-list-length= */
+ OPT__param_max_pipeline_region_blocks_ = 221,/* --param=max-pipeline-region-blocks= */
+ OPT__param_max_pipeline_region_insns_ = 222,/* --param=max-pipeline-region-insns= */
+ OPT__param_max_pow_sqrt_depth_ = 223, /* --param=max-pow-sqrt-depth= */
+ OPT__param_max_predicted_iterations_ = 224,/* --param=max-predicted-iterations= */
+ OPT__param_max_reload_search_insns_ = 225, /* --param=max-reload-search-insns= */
+ OPT__param_max_rtl_if_conversion_insns_ = 226,/* --param=max-rtl-if-conversion-insns= */
+ OPT__param_max_rtl_if_conversion_predictable_cost_ = 227,/* --param=max-rtl-if-conversion-predictable-cost= */
+ OPT__param_max_rtl_if_conversion_unpredictable_cost_ = 228,/* --param=max-rtl-if-conversion-unpredictable-cost= */
+ OPT__param_max_sched_extend_regions_iters_ = 229,/* --param=max-sched-extend-regions-iters= */
+ OPT__param_max_sched_insn_conflict_delay_ = 230,/* --param=max-sched-insn-conflict-delay= */
+ OPT__param_max_sched_ready_insns_ = 231, /* --param=max-sched-ready-insns= */
+ OPT__param_max_sched_region_blocks_ = 232, /* --param=max-sched-region-blocks= */
+ OPT__param_max_sched_region_insns_ = 233, /* --param=max-sched-region-insns= */
+ OPT__param_max_slsr_cand_scan_ = 234, /* --param=max-slsr-cand-scan= */
+ OPT__param_max_speculative_devirt_maydefs_ = 235,/* --param=max-speculative-devirt-maydefs= */
+ OPT__param_max_ssa_name_query_depth_ = 236,/* --param=max-ssa-name-query-depth= */
+ OPT__param_max_store_chains_to_track_ = 237,/* --param=max-store-chains-to-track= */
+ OPT__param_max_stores_to_merge_ = 238, /* --param=max-stores-to-merge= */
+ OPT__param_max_stores_to_sink_ = 239, /* --param=max-stores-to-sink= */
+ OPT__param_max_stores_to_track_ = 240, /* --param=max-stores-to-track= */
+ OPT__param_max_tail_merge_comparisons_ = 241,/* --param=max-tail-merge-comparisons= */
+ OPT__param_max_tail_merge_iterations_ = 242,/* --param=max-tail-merge-iterations= */
+ OPT__param_max_tracked_strlens_ = 243, /* --param=max-tracked-strlens= */
+ OPT__param_max_tree_if_conversion_phi_args_ = 244,/* --param=max-tree-if-conversion-phi-args= */
+ OPT__param_max_unroll_times_ = 245, /* --param=max-unroll-times= */
+ OPT__param_max_unrolled_insns_ = 246, /* --param=max-unrolled-insns= */
+ OPT__param_max_unswitch_depth_ = 247, /* --param=max-unswitch-depth= */
+ OPT__param_max_unswitch_insns_ = 248, /* --param=max-unswitch-insns= */
+ OPT__param_max_variable_expansions_in_unroller_ = 249,/* --param=max-variable-expansions-in-unroller= */
+ OPT__param_max_vartrack_expr_depth_ = 250, /* --param=max-vartrack-expr-depth= */
+ OPT__param_max_vartrack_reverse_op_size_ = 251,/* --param=max-vartrack-reverse-op-size= */
+ OPT__param_max_vartrack_size_ = 252, /* --param=max-vartrack-size= */
+ OPT__param_min_crossjump_insns_ = 253, /* --param=min-crossjump-insns= */
+ OPT__param_min_inline_recursive_probability_ = 254,/* --param=min-inline-recursive-probability= */
+ OPT__param_min_insn_to_prefetch_ratio_ = 255,/* --param=min-insn-to-prefetch-ratio= */
+ OPT__param_min_loop_cond_split_prob_ = 256,/* --param=min-loop-cond-split-prob= */
+ OPT__param_min_nondebug_insn_uid_ = 257, /* --param=min-nondebug-insn-uid= */
+ OPT__param_min_pagesize_ = 258, /* --param=min-pagesize= */
+ OPT__param_min_size_for_stack_sharing_ = 259,/* --param=min-size-for-stack-sharing= */
+ OPT__param_min_spec_prob_ = 260, /* --param=min-spec-prob= */
+ OPT__param_min_vect_loop_bound_ = 261, /* --param=min-vect-loop-bound= */
+ OPT__param_modref_max_accesses_ = 262, /* --param=modref-max-accesses= */
+ OPT__param_modref_max_adjustments_ = 263, /* --param=modref-max-adjustments= */
+ OPT__param_modref_max_bases_ = 264, /* --param=modref-max-bases= */
+ OPT__param_modref_max_depth_ = 265, /* --param=modref-max-depth= */
+ OPT__param_modref_max_escape_points_ = 266,/* --param=modref-max-escape-points= */
+ OPT__param_modref_max_refs_ = 267, /* --param=modref-max-refs= */
+ OPT__param_modref_max_tests_ = 268, /* --param=modref-max-tests= */
+ OPT__param_openacc_kernels_ = 269, /* --param=openacc-kernels= */
+ OPT__param_openacc_privatization_ = 270, /* --param=openacc-privatization= */
+ OPT__param_parloops_chunk_size_ = 271, /* --param=parloops-chunk-size= */
+ OPT__param_parloops_min_per_thread_ = 272, /* --param=parloops-min-per-thread= */
+ OPT__param_parloops_schedule_ = 273, /* --param=parloops-schedule= */
+ OPT__param_partial_inlining_entry_probability_ = 274,/* --param=partial-inlining-entry-probability= */
+ OPT__param_predictable_branch_outcome_ = 275,/* --param=predictable-branch-outcome= */
+ OPT__param_prefetch_dynamic_strides_ = 276,/* --param=prefetch-dynamic-strides= */
+ OPT__param_prefetch_latency_ = 277, /* --param=prefetch-latency= */
+ OPT__param_prefetch_min_insn_to_mem_ratio_ = 278,/* --param=prefetch-min-insn-to-mem-ratio= */
+ OPT__param_prefetch_minimum_stride_ = 279, /* --param=prefetch-minimum-stride= */
+ OPT__param_profile_func_internal_id_ = 280,/* --param=profile-func-internal-id= */
+ OPT__param_ranger_debug_ = 281, /* --param=ranger-debug= */
+ OPT__param_ranger_logical_depth_ = 282, /* --param=ranger-logical-depth= */
+ OPT__param_ranger_recompute_depth_ = 283, /* --param=ranger-recompute-depth= */
+ OPT__param_relation_block_limit_ = 284, /* --param=relation-block-limit= */
+ OPT__param_rpo_vn_max_loop_depth_ = 285, /* --param=rpo-vn-max-loop-depth= */
+ OPT__param_sccvn_max_alias_queries_per_access_ = 286,/* --param=sccvn-max-alias-queries-per-access= */
+ OPT__param_scev_max_expr_complexity_ = 287,/* --param=scev-max-expr-complexity= */
+ OPT__param_scev_max_expr_size_ = 288, /* --param=scev-max-expr-size= */
+ OPT__param_sched_autopref_queue_depth_ = 289,/* --param=sched-autopref-queue-depth= */
+ OPT__param_sched_mem_true_dep_cost_ = 290, /* --param=sched-mem-true-dep-cost= */
+ OPT__param_sched_pressure_algorithm_ = 291,/* --param=sched-pressure-algorithm= */
+ OPT__param_sched_spec_prob_cutoff_ = 292, /* --param=sched-spec-prob-cutoff= */
+ OPT__param_sched_state_edge_prob_cutoff_ = 293,/* --param=sched-state-edge-prob-cutoff= */
+ OPT__param_selsched_insns_to_rename_ = 294,/* --param=selsched-insns-to-rename= */
+ OPT__param_selsched_max_lookahead_ = 295, /* --param=selsched-max-lookahead= */
+ OPT__param_selsched_max_sched_times_ = 296,/* --param=selsched-max-sched-times= */
+ OPT__param_simultaneous_prefetches_ = 297, /* --param=simultaneous-prefetches= */
+ OPT__param_sink_frequency_threshold_ = 298,/* --param=sink-frequency-threshold= */
+ OPT__param_sms_dfa_history_ = 299, /* --param=sms-dfa-history= */
+ OPT__param_sms_loop_average_count_threshold_ = 300,/* --param=sms-loop-average-count-threshold= */
+ OPT__param_sms_max_ii_factor_ = 301, /* --param=sms-max-ii-factor= */
+ OPT__param_sms_min_sc_ = 302, /* --param=sms-min-sc= */
+ OPT__param_sra_max_propagations_ = 303, /* --param=sra-max-propagations= */
+ OPT__param_sra_max_scalarization_size_Osize_ = 304,/* --param=sra-max-scalarization-size-Osize= */
+ OPT__param_sra_max_scalarization_size_Ospeed_ = 305,/* --param=sra-max-scalarization-size-Ospeed= */
+ OPT__param_ssa_name_def_chain_limit_ = 306,/* --param=ssa-name-def-chain-limit= */
+ OPT__param_ssp_buffer_size_ = 307, /* --param=ssp-buffer-size= */
+ OPT__param_stack_clash_protection_guard_size_ = 308,/* --param=stack-clash-protection-guard-size= */
+ OPT__param_stack_clash_protection_probe_interval_ = 309,/* --param=stack-clash-protection-probe-interval= */
+ OPT__param_store_merging_allow_unaligned_ = 310,/* --param=store-merging-allow-unaligned= */
+ OPT__param_store_merging_max_size_ = 311, /* --param=store-merging-max-size= */
+ OPT__param_switch_conversion_max_branch_ratio_ = 312,/* --param=switch-conversion-max-branch-ratio= */
+ OPT__param_threader_debug_ = 313, /* --param=threader-debug= */
+ OPT__param_tm_max_aggregate_size_ = 314, /* --param=tm-max-aggregate-size= */
+ OPT__param_tracer_dynamic_coverage_feedback_ = 315,/* --param=tracer-dynamic-coverage-feedback= */
+ OPT__param_tracer_dynamic_coverage_ = 316, /* --param=tracer-dynamic-coverage= */
+ OPT__param_tracer_max_code_growth_ = 317, /* --param=tracer-max-code-growth= */
+ OPT__param_tracer_min_branch_probability_feedback_ = 318,/* --param=tracer-min-branch-probability-feedback= */
+ OPT__param_tracer_min_branch_probability_ = 319,/* --param=tracer-min-branch-probability= */
+ OPT__param_tracer_min_branch_ratio_ = 320, /* --param=tracer-min-branch-ratio= */
+ OPT__param_tree_reassoc_width_ = 321, /* --param=tree-reassoc-width= */
+ OPT__param_tsan_distinguish_volatile_ = 322,/* --param=tsan-distinguish-volatile= */
+ OPT__param_tsan_instrument_func_entry_exit_ = 323,/* --param=tsan-instrument-func-entry-exit= */
+ OPT__param_uninit_control_dep_attempts_ = 324,/* --param=uninit-control-dep-attempts= */
+ OPT__param_uninlined_function_insns_ = 325,/* --param=uninlined-function-insns= */
+ OPT__param_uninlined_function_time_ = 326, /* --param=uninlined-function-time= */
+ OPT__param_uninlined_thunk_insns_ = 327, /* --param=uninlined-thunk-insns= */
+ OPT__param_uninlined_thunk_time_ = 328, /* --param=uninlined-thunk-time= */
+ OPT__param_unlikely_bb_count_fraction_ = 329,/* --param=unlikely-bb-count-fraction= */
+ OPT__param_unroll_jam_max_unroll_ = 330, /* --param=unroll-jam-max-unroll= */
+ OPT__param_unroll_jam_min_percent_ = 331, /* --param=unroll-jam-min-percent= */
+ OPT__param_use_after_scope_direct_emission_threshold_ = 332,/* --param=use-after-scope-direct-emission-threshold= */
+ OPT__param_use_canonical_types_ = 333, /* --param=use-canonical-types= */
+ OPT__param_vect_epilogues_nomask_ = 334, /* --param=vect-epilogues-nomask= */
+ OPT__param_vect_induction_float_ = 335, /* --param=vect-induction-float= */
+ OPT__param_vect_inner_loop_cost_factor_ = 336,/* --param=vect-inner-loop-cost-factor= */
+ OPT__param_vect_max_layout_candidates_ = 337,/* --param=vect-max-layout-candidates= */
+ OPT__param_vect_max_peeling_for_alignment_ = 338,/* --param=vect-max-peeling-for-alignment= */
+ OPT__param_vect_max_version_for_alias_checks_ = 339,/* --param=vect-max-version-for-alias-checks= */
+ OPT__param_vect_max_version_for_alignment_checks_ = 340,/* --param=vect-max-version-for-alignment-checks= */
+ OPT__param_vect_partial_vector_usage_ = 341,/* --param=vect-partial-vector-usage= */
+ /* OPT__pass_exit_codes = 342, */ /* --pass-exit-codes */
+ /* OPT__pedantic = 343, */ /* --pedantic */
+ /* OPT__pedantic_errors = 344, */ /* --pedantic-errors */
+ /* OPT__pie = 345, */ /* --pie */
+ /* OPT__pipe = 346, */ /* --pipe */
+ /* OPT__prefix = 347, */ /* --prefix */
+ /* OPT__prefix_ = 348, */ /* --prefix= */
+ /* OPT__preprocess = 349, */ /* --preprocess */
+ /* OPT__print_file_name = 350, */ /* --print-file-name */
+ /* OPT__print_file_name_ = 351, */ /* --print-file-name= */
+ /* OPT__print_libgcc_file_name = 352, */ /* --print-libgcc-file-name */
+ /* OPT__print_missing_file_dependencies = 353, *//* --print-missing-file-dependencies */
+ /* OPT__print_multi_directory = 354, */ /* --print-multi-directory */
+ /* OPT__print_multi_lib = 355, */ /* --print-multi-lib */
+ /* OPT__print_multi_os_directory = 356, */ /* --print-multi-os-directory */
+ /* OPT__print_multiarch = 357, */ /* --print-multiarch */
+ /* OPT__print_prog_name = 358, */ /* --print-prog-name */
+ /* OPT__print_prog_name_ = 359, */ /* --print-prog-name= */
+ /* OPT__print_search_dirs = 360, */ /* --print-search-dirs */
+ /* OPT__print_sysroot = 361, */ /* --print-sysroot */
+ /* OPT__print_sysroot_headers_suffix = 362, *//* --print-sysroot-headers-suffix */
+ /* OPT__profile = 363, */ /* --profile */
+ /* OPT__save_temps = 364, */ /* --save-temps */
+ /* OPT__shared = 365, */ /* --shared */
+ /* OPT__specs = 366, */ /* --specs */
+ /* OPT__specs_ = 367, */ /* --specs= */
+ /* OPT__static = 368, */ /* --static */
+ /* OPT__static_pie = 369, */ /* --static-pie */
+ /* OPT__symbolic = 370, */ /* --symbolic */
+ /* OPT__sysroot = 371, */ /* --sysroot */
+ OPT__sysroot_ = 372, /* --sysroot= */
+ OPT__target_help = 373, /* --target-help */
+ /* OPT__time = 374, */ /* --time */
+ /* OPT__trace_includes = 375, */ /* --trace-includes */
+ /* OPT__traditional = 376, */ /* --traditional */
+ /* OPT__traditional_cpp = 377, */ /* --traditional-cpp */
+ /* OPT__trigraphs = 378, */ /* --trigraphs */
+ /* OPT__undefine_macro = 379, */ /* --undefine-macro */
+ /* OPT__undefine_macro_ = 380, */ /* --undefine-macro= */
+ /* OPT__user_dependencies = 381, */ /* --user-dependencies */
+ /* OPT__verbose = 382, */ /* --verbose */
+ OPT__version = 383, /* --version */
+ /* OPT__write_dependencies = 384, */ /* --write-dependencies */
+ /* OPT__write_user_dependencies = 385, */ /* --write-user-dependencies */
+ OPT_A = 386, /* -A */
+ OPT_B = 387, /* -B */
+ OPT_C = 388, /* -C */
+ OPT_CC = 389, /* -CC */
+ OPT_D = 390, /* -D */
+ OPT_E = 391, /* -E */
+ OPT_F = 392, /* -F */
+ OPT_H = 393, /* -H */
+ OPT_Hd = 394, /* -Hd */
+ OPT_Hf = 395, /* -Hf */
+ OPT_I = 396, /* -I */
+ OPT_J = 397, /* -J */
+ OPT_L = 398, /* -L */
+ OPT_M = 399, /* -M */
+ OPT_MD = 400, /* -MD */
+ OPT_MF = 401, /* -MF */
+ OPT_MG = 402, /* -MG */
+ OPT_MM = 403, /* -MM */
+ OPT_MMD = 404, /* -MMD */
+ OPT_MP = 405, /* -MP */
+ OPT_MQ = 406, /* -MQ */
+ OPT_MT = 407, /* -MT */
+ OPT_Mmodules = 408, /* -Mmodules */
+ OPT_Mno_modules = 409, /* -Mno-modules */
+ OPT_N = 410, /* -N */
+ OPT_O = 411, /* -O */
+ OPT_Ofast = 412, /* -Ofast */
+ OPT_Og = 413, /* -Og */
+ OPT_Os = 414, /* -Os */
+ OPT_Oz = 415, /* -Oz */
+ OPT_P = 416, /* -P */
+ OPT_Q = 417, /* -Q */
+ OPT_Qn = 418, /* -Qn */
+ OPT_Qy = 419, /* -Qy */
+ OPT_R = 420, /* -R */
+ OPT_S = 421, /* -S */
+ OPT_T = 422, /* -T */
+ OPT_Tbss = 423, /* -Tbss */
+ OPT_Tbss_ = 424, /* -Tbss= */
+ OPT_Tdata = 425, /* -Tdata */
+ OPT_Tdata_ = 426, /* -Tdata= */
+ OPT_Ttext = 427, /* -Ttext */
+ OPT_Ttext_ = 428, /* -Ttext= */
+ OPT_U = 429, /* -U */
+ /* OPT_W = 430, */ /* -W */
+ OPT_WNSObject_attribute = 431, /* -WNSObject-attribute */
+ OPT_Wa_ = 432, /* -Wa, */
+ OPT_Wabi = 433, /* -Wabi */
+ OPT_Wabi_tag = 434, /* -Wabi-tag */
+ OPT_Wabi_ = 435, /* -Wabi= */
+ OPT_Wabsolute_value = 436, /* -Wabsolute-value */
+ OPT_Waddress = 437, /* -Waddress */
+ OPT_Waddress_of_packed_member = 438, /* -Waddress-of-packed-member */
+ OPT_Waggregate_return = 439, /* -Waggregate-return */
+ OPT_Waggressive_loop_optimizations = 440, /* -Waggressive-loop-optimizations */
+ OPT_Waliasing = 441, /* -Waliasing */
+ OPT_Walign_commons = 442, /* -Walign-commons */
+ /* OPT_Waligned_new = 443, */ /* -Waligned-new */
+ OPT_Waligned_new_ = 444, /* -Waligned-new= */
+ OPT_Wall = 445, /* -Wall */
+ OPT_Walloc_size_larger_than_ = 446, /* -Walloc-size-larger-than= */
+ OPT_Walloc_zero = 447, /* -Walloc-zero */
+ OPT_Walloca = 448, /* -Walloca */
+ OPT_Walloca_larger_than_ = 449, /* -Walloca-larger-than= */
+ OPT_Wampersand = 450, /* -Wampersand */
+ OPT_Wanalyzer_allocation_size = 451, /* -Wanalyzer-allocation-size */
+ OPT_Wanalyzer_deref_before_check = 452, /* -Wanalyzer-deref-before-check */
+ OPT_Wanalyzer_double_fclose = 453, /* -Wanalyzer-double-fclose */
+ OPT_Wanalyzer_double_free = 454, /* -Wanalyzer-double-free */
+ OPT_Wanalyzer_exposure_through_output_file = 455,/* -Wanalyzer-exposure-through-output-file */
+ OPT_Wanalyzer_exposure_through_uninit_copy = 456,/* -Wanalyzer-exposure-through-uninit-copy */
+ OPT_Wanalyzer_fd_access_mode_mismatch = 457,/* -Wanalyzer-fd-access-mode-mismatch */
+ OPT_Wanalyzer_fd_double_close = 458, /* -Wanalyzer-fd-double-close */
+ OPT_Wanalyzer_fd_leak = 459, /* -Wanalyzer-fd-leak */
+ OPT_Wanalyzer_fd_phase_mismatch = 460, /* -Wanalyzer-fd-phase-mismatch */
+ OPT_Wanalyzer_fd_type_mismatch = 461, /* -Wanalyzer-fd-type-mismatch */
+ OPT_Wanalyzer_fd_use_after_close = 462, /* -Wanalyzer-fd-use-after-close */
+ OPT_Wanalyzer_fd_use_without_check = 463, /* -Wanalyzer-fd-use-without-check */
+ OPT_Wanalyzer_file_leak = 464, /* -Wanalyzer-file-leak */
+ OPT_Wanalyzer_free_of_non_heap = 465, /* -Wanalyzer-free-of-non-heap */
+ OPT_Wanalyzer_imprecise_fp_arithmetic = 466,/* -Wanalyzer-imprecise-fp-arithmetic */
+ OPT_Wanalyzer_infinite_recursion = 467, /* -Wanalyzer-infinite-recursion */
+ OPT_Wanalyzer_jump_through_null = 468, /* -Wanalyzer-jump-through-null */
+ OPT_Wanalyzer_malloc_leak = 469, /* -Wanalyzer-malloc-leak */
+ OPT_Wanalyzer_mismatching_deallocation = 470,/* -Wanalyzer-mismatching-deallocation */
+ OPT_Wanalyzer_null_argument = 471, /* -Wanalyzer-null-argument */
+ OPT_Wanalyzer_null_dereference = 472, /* -Wanalyzer-null-dereference */
+ OPT_Wanalyzer_out_of_bounds = 473, /* -Wanalyzer-out-of-bounds */
+ OPT_Wanalyzer_possible_null_argument = 474,/* -Wanalyzer-possible-null-argument */
+ OPT_Wanalyzer_possible_null_dereference = 475,/* -Wanalyzer-possible-null-dereference */
+ OPT_Wanalyzer_putenv_of_auto_var = 476, /* -Wanalyzer-putenv-of-auto-var */
+ OPT_Wanalyzer_shift_count_negative = 477, /* -Wanalyzer-shift-count-negative */
+ OPT_Wanalyzer_shift_count_overflow = 478, /* -Wanalyzer-shift-count-overflow */
+ OPT_Wanalyzer_stale_setjmp_buffer = 479, /* -Wanalyzer-stale-setjmp-buffer */
+ OPT_Wanalyzer_tainted_allocation_size = 480,/* -Wanalyzer-tainted-allocation-size */
+ OPT_Wanalyzer_tainted_array_index = 481, /* -Wanalyzer-tainted-array-index */
+ OPT_Wanalyzer_tainted_assertion = 482, /* -Wanalyzer-tainted-assertion */
+ OPT_Wanalyzer_tainted_divisor = 483, /* -Wanalyzer-tainted-divisor */
+ OPT_Wanalyzer_tainted_offset = 484, /* -Wanalyzer-tainted-offset */
+ OPT_Wanalyzer_tainted_size = 485, /* -Wanalyzer-tainted-size */
+ OPT_Wanalyzer_too_complex = 486, /* -Wanalyzer-too-complex */
+ OPT_Wanalyzer_unsafe_call_within_signal_handler = 487,/* -Wanalyzer-unsafe-call-within-signal-handler */
+ OPT_Wanalyzer_use_after_free = 488, /* -Wanalyzer-use-after-free */
+ OPT_Wanalyzer_use_of_pointer_in_stale_stack_frame = 489,/* -Wanalyzer-use-of-pointer-in-stale-stack-frame */
+ OPT_Wanalyzer_use_of_uninitialized_value = 490,/* -Wanalyzer-use-of-uninitialized-value */
+ OPT_Wanalyzer_va_arg_type_mismatch = 491, /* -Wanalyzer-va-arg-type-mismatch */
+ OPT_Wanalyzer_va_list_exhausted = 492, /* -Wanalyzer-va-list-exhausted */
+ OPT_Wanalyzer_va_list_leak = 493, /* -Wanalyzer-va-list-leak */
+ OPT_Wanalyzer_va_list_use_after_va_end = 494,/* -Wanalyzer-va-list-use-after-va-end */
+ OPT_Wanalyzer_write_to_const = 495, /* -Wanalyzer-write-to-const */
+ OPT_Wanalyzer_write_to_string_literal = 496,/* -Wanalyzer-write-to-string-literal */
+ OPT_Wargument_mismatch = 497, /* -Wargument-mismatch */
+ OPT_Warith_conversion = 498, /* -Warith-conversion */
+ /* OPT_Warray_bounds = 499, */ /* -Warray-bounds */
+ OPT_Warray_bounds_ = 500, /* -Warray-bounds= */
+ OPT_Warray_compare = 501, /* -Warray-compare */
+ /* OPT_Warray_parameter = 502, */ /* -Warray-parameter */
+ OPT_Warray_parameter_ = 503, /* -Warray-parameter= */
+ OPT_Warray_temporaries = 504, /* -Warray-temporaries */
+ OPT_Wassign_intercept = 505, /* -Wassign-intercept */
+ /* OPT_Wattribute_alias = 506, */ /* -Wattribute-alias */
+ OPT_Wattribute_alias_ = 507, /* -Wattribute-alias= */
+ OPT_Wattribute_warning = 508, /* -Wattribute-warning */
+ OPT_Wattributes = 509, /* -Wattributes */
+ OPT_Wattributes_ = 510, /* -Wattributes= */
+ OPT_Wbad_function_cast = 511, /* -Wbad-function-cast */
+ /* OPT_Wbidi_chars = 512, */ /* -Wbidi-chars */
+ OPT_Wbidi_chars_ = 513, /* -Wbidi-chars= */
+ OPT_Wbool_compare = 514, /* -Wbool-compare */
+ OPT_Wbool_operation = 515, /* -Wbool-operation */
+ OPT_Wbuiltin_declaration_mismatch = 516, /* -Wbuiltin-declaration-mismatch */
+ OPT_Wbuiltin_macro_redefined = 517, /* -Wbuiltin-macro-redefined */
+ OPT_Wc___compat = 518, /* -Wc++-compat */
+ /* OPT_Wc__0x_compat = 519, */ /* -Wc++0x-compat */
+ OPT_Wc__11_compat = 520, /* -Wc++11-compat */
+ OPT_Wc__11_extensions = 521, /* -Wc++11-extensions */
+ OPT_Wc__14_compat = 522, /* -Wc++14-compat */
+ OPT_Wc__14_extensions = 523, /* -Wc++14-extensions */
+ OPT_Wc__17_compat = 524, /* -Wc++17-compat */
+ OPT_Wc__17_extensions = 525, /* -Wc++17-extensions */
+ /* OPT_Wc__1z_compat = 526, */ /* -Wc++1z-compat */
+ OPT_Wc__20_compat = 527, /* -Wc++20-compat */
+ OPT_Wc__20_extensions = 528, /* -Wc++20-extensions */
+ OPT_Wc__23_extensions = 529, /* -Wc++23-extensions */
+ /* OPT_Wc__2a_compat = 530, */ /* -Wc++2a-compat */
+ OPT_Wc_binding_type = 531, /* -Wc-binding-type */
+ OPT_Wc11_c2x_compat = 532, /* -Wc11-c2x-compat */
+ OPT_Wc90_c99_compat = 533, /* -Wc90-c99-compat */
+ OPT_Wc99_c11_compat = 534, /* -Wc99-c11-compat */
+ OPT_Wcannot_profile = 535, /* -Wcannot-profile */
+ OPT_Wcase_enum = 536, /* -Wcase-enum */
+ OPT_Wcast_align = 537, /* -Wcast-align */
+ OPT_Wcast_align_strict = 538, /* -Wcast-align=strict */
+ OPT_Wcast_function_type = 539, /* -Wcast-function-type */
+ OPT_Wcast_qual = 540, /* -Wcast-qual */
+ OPT_Wcast_result = 541, /* -Wcast-result */
+ /* OPT_Wcatch_value = 542, */ /* -Wcatch-value */
+ OPT_Wcatch_value_ = 543, /* -Wcatch-value= */
+ OPT_Wchanges_meaning = 544, /* -Wchanges-meaning */
+ OPT_Wchar_subscripts = 545, /* -Wchar-subscripts */
+ OPT_Wcharacter_truncation = 546, /* -Wcharacter-truncation */
+ OPT_Wchkp = 547, /* -Wchkp */
+ OPT_Wclass_conversion = 548, /* -Wclass-conversion */
+ OPT_Wclass_memaccess = 549, /* -Wclass-memaccess */
+ OPT_Wclobbered = 550, /* -Wclobbered */
+ OPT_Wcomma_subscript = 551, /* -Wcomma-subscript */
+ OPT_Wcomment = 552, /* -Wcomment */
+ /* OPT_Wcomments = 553, */ /* -Wcomments */
+ OPT_Wcompare_reals = 554, /* -Wcompare-reals */
+ OPT_Wcomplain_wrong_lang = 555, /* -Wcomplain-wrong-lang */
+ OPT_Wconditionally_supported = 556, /* -Wconditionally-supported */
+ OPT_Wconversion = 557, /* -Wconversion */
+ OPT_Wconversion_extra = 558, /* -Wconversion-extra */
+ OPT_Wconversion_null = 559, /* -Wconversion-null */
+ OPT_Wcoverage_invalid_line_number = 560, /* -Wcoverage-invalid-line-number */
+ OPT_Wcoverage_mismatch = 561, /* -Wcoverage-mismatch */
+ OPT_Wcpp = 562, /* -Wcpp */
+ OPT_Wctad_maybe_unsupported = 563, /* -Wctad-maybe-unsupported */
+ OPT_Wctor_dtor_privacy = 564, /* -Wctor-dtor-privacy */
+ OPT_Wdangling_else = 565, /* -Wdangling-else */
+ /* OPT_Wdangling_pointer = 566, */ /* -Wdangling-pointer */
+ OPT_Wdangling_pointer_ = 567, /* -Wdangling-pointer= */
+ OPT_Wdangling_reference = 568, /* -Wdangling-reference */
+ OPT_Wdate_time = 569, /* -Wdate-time */
+ OPT_Wdeclaration_after_statement = 570, /* -Wdeclaration-after-statement */
+ OPT_Wdelete_incomplete = 571, /* -Wdelete-incomplete */
+ OPT_Wdelete_non_virtual_dtor = 572, /* -Wdelete-non-virtual-dtor */
+ OPT_Wdeprecated = 573, /* -Wdeprecated */
+ OPT_Wdeprecated_copy = 574, /* -Wdeprecated-copy */
+ OPT_Wdeprecated_copy_dtor = 575, /* -Wdeprecated-copy-dtor */
+ OPT_Wdeprecated_declarations = 576, /* -Wdeprecated-declarations */
+ OPT_Wdeprecated_enum_enum_conversion = 577,/* -Wdeprecated-enum-enum-conversion */
+ OPT_Wdeprecated_enum_float_conversion = 578,/* -Wdeprecated-enum-float-conversion */
+ OPT_Wdesignated_init = 579, /* -Wdesignated-init */
+ OPT_Wdisabled_optimization = 580, /* -Wdisabled-optimization */
+ OPT_Wdiscarded_array_qualifiers = 581, /* -Wdiscarded-array-qualifiers */
+ OPT_Wdiscarded_qualifiers = 582, /* -Wdiscarded-qualifiers */
+ OPT_Wdiv_by_zero = 583, /* -Wdiv-by-zero */
+ OPT_Wdo_subscript = 584, /* -Wdo-subscript */
+ OPT_Wdouble_promotion = 585, /* -Wdouble-promotion */
+ OPT_Wduplicate_decl_specifier = 586, /* -Wduplicate-decl-specifier */
+ OPT_Wduplicated_branches = 587, /* -Wduplicated-branches */
+ OPT_Wduplicated_cond = 588, /* -Wduplicated-cond */
+ OPT_Weffc__ = 589, /* -Weffc++ */
+ OPT_Wempty_body = 590, /* -Wempty-body */
+ OPT_Wendif_labels = 591, /* -Wendif-labels */
+ OPT_Wenum_compare = 592, /* -Wenum-compare */
+ OPT_Wenum_conversion = 593, /* -Wenum-conversion */
+ OPT_Wenum_int_mismatch = 594, /* -Wenum-int-mismatch */
+ OPT_Werror = 595, /* -Werror */
+ /* OPT_Werror_implicit_function_declaration = 596, *//* -Werror-implicit-function-declaration */
+ OPT_Werror_ = 597, /* -Werror= */
+ OPT_Wexceptions = 598, /* -Wexceptions */
+ OPT_Wexpansion_to_defined = 599, /* -Wexpansion-to-defined */
+ OPT_Wextra = 600, /* -Wextra */
+ OPT_Wextra_semi = 601, /* -Wextra-semi */
+ OPT_Wfatal_errors = 602, /* -Wfatal-errors */
+ OPT_Wfloat_conversion = 603, /* -Wfloat-conversion */
+ OPT_Wfloat_equal = 604, /* -Wfloat-equal */
+ /* OPT_Wformat = 605, */ /* -Wformat */
+ OPT_Wformat_contains_nul = 606, /* -Wformat-contains-nul */
+ OPT_Wformat_diag = 607, /* -Wformat-diag */
+ OPT_Wformat_extra_args = 608, /* -Wformat-extra-args */
+ OPT_Wformat_nonliteral = 609, /* -Wformat-nonliteral */
+ /* OPT_Wformat_overflow = 610, */ /* -Wformat-overflow */
+ OPT_Wformat_overflow_ = 611, /* -Wformat-overflow= */
+ OPT_Wformat_security = 612, /* -Wformat-security */
+ OPT_Wformat_signedness = 613, /* -Wformat-signedness */
+ /* OPT_Wformat_truncation = 614, */ /* -Wformat-truncation */
+ OPT_Wformat_truncation_ = 615, /* -Wformat-truncation= */
+ OPT_Wformat_y2k = 616, /* -Wformat-y2k */
+ OPT_Wformat_zero_length = 617, /* -Wformat-zero-length */
+ OPT_Wformat_ = 618, /* -Wformat= */
+ OPT_Wframe_address = 619, /* -Wframe-address */
+ OPT_Wframe_larger_than_ = 620, /* -Wframe-larger-than= */
+ OPT_Wfree_nonheap_object = 621, /* -Wfree-nonheap-object */
+ OPT_Wfrontend_loop_interchange = 622, /* -Wfrontend-loop-interchange */
+ OPT_Wfunction_elimination = 623, /* -Wfunction-elimination */
+ /* OPT_Whsa = 624, */ /* -Whsa */
+ OPT_Wif_not_aligned = 625, /* -Wif-not-aligned */
+ OPT_Wignored_attributes = 626, /* -Wignored-attributes */
+ OPT_Wignored_qualifiers = 627, /* -Wignored-qualifiers */
+ OPT_Wimplicit = 628, /* -Wimplicit */
+ /* OPT_Wimplicit_fallthrough = 629, */ /* -Wimplicit-fallthrough */
+ OPT_Wimplicit_fallthrough_ = 630, /* -Wimplicit-fallthrough= */
+ OPT_Wimplicit_function_declaration = 631, /* -Wimplicit-function-declaration */
+ OPT_Wimplicit_int = 632, /* -Wimplicit-int */
+ OPT_Wimplicit_interface = 633, /* -Wimplicit-interface */
+ OPT_Wimplicit_procedure = 634, /* -Wimplicit-procedure */
+ /* OPT_Wimport = 635, */ /* -Wimport */
+ OPT_Winaccessible_base = 636, /* -Winaccessible-base */
+ OPT_Wincompatible_pointer_types = 637, /* -Wincompatible-pointer-types */
+ OPT_Winfinite_recursion = 638, /* -Winfinite-recursion */
+ OPT_Winherited_variadic_ctor = 639, /* -Winherited-variadic-ctor */
+ OPT_Winit_list_lifetime = 640, /* -Winit-list-lifetime */
+ OPT_Winit_self = 641, /* -Winit-self */
+ OPT_Winline = 642, /* -Winline */
+ OPT_Wint_conversion = 643, /* -Wint-conversion */
+ OPT_Wint_in_bool_context = 644, /* -Wint-in-bool-context */
+ OPT_Wint_to_pointer_cast = 645, /* -Wint-to-pointer-cast */
+ OPT_Winteger_division = 646, /* -Winteger-division */
+ OPT_Winterference_size = 647, /* -Winterference-size */
+ OPT_Wintrinsic_shadow = 648, /* -Wintrinsic-shadow */
+ OPT_Wintrinsics_std = 649, /* -Wintrinsics-std */
+ OPT_Winvalid_constexpr = 650, /* -Winvalid-constexpr */
+ OPT_Winvalid_imported_macros = 651, /* -Winvalid-imported-macros */
+ OPT_Winvalid_memory_model = 652, /* -Winvalid-memory-model */
+ OPT_Winvalid_offsetof = 653, /* -Winvalid-offsetof */
+ OPT_Winvalid_pch = 654, /* -Winvalid-pch */
+ OPT_Winvalid_utf8 = 655, /* -Winvalid-utf8 */
+ OPT_Wjump_misses_init = 656, /* -Wjump-misses-init */
+ OPT_Wl_ = 657, /* -Wl, */
+ /* OPT_Wlarger_than_ = 658, */ /* -Wlarger-than- */
+ OPT_Wlarger_than_ = 659, /* -Wlarger-than= */
+ OPT_Wline_truncation = 660, /* -Wline-truncation */
+ OPT_Wliteral_suffix = 661, /* -Wliteral-suffix */
+ OPT_Wlogical_not_parentheses = 662, /* -Wlogical-not-parentheses */
+ OPT_Wlogical_op = 663, /* -Wlogical-op */
+ OPT_Wlong_long = 664, /* -Wlong-long */
+ OPT_Wlto_type_mismatch = 665, /* -Wlto-type-mismatch */
+ OPT_Wmain = 666, /* -Wmain */
+ OPT_Wmaybe_uninitialized = 667, /* -Wmaybe-uninitialized */
+ OPT_Wmemset_elt_size = 668, /* -Wmemset-elt-size */
+ OPT_Wmemset_transposed_args = 669, /* -Wmemset-transposed-args */
+ OPT_Wmisleading_indentation = 670, /* -Wmisleading-indentation */
+ OPT_Wmismatched_dealloc = 671, /* -Wmismatched-dealloc */
+ OPT_Wmismatched_new_delete = 672, /* -Wmismatched-new-delete */
+ OPT_Wmismatched_special_enum = 673, /* -Wmismatched-special-enum */
+ OPT_Wmismatched_tags = 674, /* -Wmismatched-tags */
+ OPT_Wmissing_attributes = 675, /* -Wmissing-attributes */
+ OPT_Wmissing_braces = 676, /* -Wmissing-braces */
+ OPT_Wmissing_declarations = 677, /* -Wmissing-declarations */
+ OPT_Wmissing_field_initializers = 678, /* -Wmissing-field-initializers */
+ /* OPT_Wmissing_format_attribute = 679, */ /* -Wmissing-format-attribute */
+ OPT_Wmissing_include_dirs = 680, /* -Wmissing-include-dirs */
+ /* OPT_Wmissing_noreturn = 681, */ /* -Wmissing-noreturn */
+ OPT_Wmissing_parameter_type = 682, /* -Wmissing-parameter-type */
+ OPT_Wmissing_profile = 683, /* -Wmissing-profile */
+ OPT_Wmissing_prototypes = 684, /* -Wmissing-prototypes */
+ OPT_Wmissing_requires = 685, /* -Wmissing-requires */
+ OPT_Wmissing_template_keyword = 686, /* -Wmissing-template-keyword */
+ OPT_Wmudflap = 687, /* -Wmudflap */
+ OPT_Wmultichar = 688, /* -Wmultichar */
+ OPT_Wmultiple_inheritance = 689, /* -Wmultiple-inheritance */
+ OPT_Wmultistatement_macros = 690, /* -Wmultistatement-macros */
+ OPT_Wnamespaces = 691, /* -Wnamespaces */
+ OPT_Wnarrowing = 692, /* -Wnarrowing */
+ OPT_Wnested_externs = 693, /* -Wnested-externs */
+ /* OPT_Wno_alloc_size_larger_than = 694, *//* -Wno-alloc-size-larger-than */
+ /* OPT_Wno_alloca_larger_than = 695, */ /* -Wno-alloca-larger-than */
+ /* OPT_Wno_frame_larger_than = 696, */ /* -Wno-frame-larger-than */
+ /* OPT_Wno_larger_than = 697, */ /* -Wno-larger-than */
+ /* OPT_Wno_stack_usage = 698, */ /* -Wno-stack-usage */
+ /* OPT_Wno_vla_larger_than = 699, */ /* -Wno-vla-larger-than */
+ OPT_Wnoexcept = 700, /* -Wnoexcept */
+ OPT_Wnoexcept_type = 701, /* -Wnoexcept-type */
+ OPT_Wnon_template_friend = 702, /* -Wnon-template-friend */
+ OPT_Wnon_virtual_dtor = 703, /* -Wnon-virtual-dtor */
+ OPT_Wnonnull = 704, /* -Wnonnull */
+ OPT_Wnonnull_compare = 705, /* -Wnonnull-compare */
+ /* OPT_Wnormalized = 706, */ /* -Wnormalized */
+ OPT_Wnormalized_ = 707, /* -Wnormalized= */
+ OPT_Wnull_dereference = 708, /* -Wnull-dereference */
+ OPT_Wobjc_root_class = 709, /* -Wobjc-root-class */
+ OPT_Wodr = 710, /* -Wodr */
+ OPT_Wold_style_cast = 711, /* -Wold-style-cast */
+ OPT_Wold_style_declaration = 712, /* -Wold-style-declaration */
+ OPT_Wold_style_definition = 713, /* -Wold-style-definition */
+ OPT_Wopenacc_parallelism = 714, /* -Wopenacc-parallelism */
+ OPT_Wopenmp_simd = 715, /* -Wopenmp-simd */
+ OPT_Woverflow = 716, /* -Woverflow */
+ OPT_Woverlength_strings = 717, /* -Woverlength-strings */
+ /* OPT_Woverloaded_virtual = 718, */ /* -Woverloaded-virtual */
+ OPT_Woverloaded_virtual_ = 719, /* -Woverloaded-virtual= */
+ OPT_Woverride_init = 720, /* -Woverride-init */
+ OPT_Woverride_init_side_effects = 721, /* -Woverride-init-side-effects */
+ OPT_Woverwrite_recursive = 722, /* -Woverwrite-recursive */
+ OPT_Wp_ = 723, /* -Wp, */
+ OPT_Wpacked = 724, /* -Wpacked */
+ OPT_Wpacked_bitfield_compat = 725, /* -Wpacked-bitfield-compat */
+ OPT_Wpacked_not_aligned = 726, /* -Wpacked-not-aligned */
+ OPT_Wpadded = 727, /* -Wpadded */
+ OPT_Wparentheses = 728, /* -Wparentheses */
+ OPT_Wpedantic = 729, /* -Wpedantic */
+ OPT_Wpedantic_cast = 730, /* -Wpedantic-cast */
+ OPT_Wpedantic_param_names = 731, /* -Wpedantic-param-names */
+ OPT_Wpessimizing_move = 732, /* -Wpessimizing-move */
+ /* OPT_Wplacement_new = 733, */ /* -Wplacement-new */
+ OPT_Wplacement_new_ = 734, /* -Wplacement-new= */
+ OPT_Wpmf_conversions = 735, /* -Wpmf-conversions */
+ OPT_Wpointer_arith = 736, /* -Wpointer-arith */
+ OPT_Wpointer_compare = 737, /* -Wpointer-compare */
+ OPT_Wpointer_sign = 738, /* -Wpointer-sign */
+ OPT_Wpointer_to_int_cast = 739, /* -Wpointer-to-int-cast */
+ OPT_Wpragmas = 740, /* -Wpragmas */
+ OPT_Wprio_ctor_dtor = 741, /* -Wprio-ctor-dtor */
+ OPT_Wproperty_assign_default = 742, /* -Wproperty-assign-default */
+ OPT_Wprotocol = 743, /* -Wprotocol */
+ OPT_Wpsabi = 744, /* -Wpsabi */
+ OPT_Wrange_loop_construct = 745, /* -Wrange-loop-construct */
+ OPT_Wreal_q_constant = 746, /* -Wreal-q-constant */
+ OPT_Wrealloc_lhs = 747, /* -Wrealloc-lhs */
+ OPT_Wrealloc_lhs_all = 748, /* -Wrealloc-lhs-all */
+ OPT_Wredundant_decls = 749, /* -Wredundant-decls */
+ OPT_Wredundant_move = 750, /* -Wredundant-move */
+ OPT_Wredundant_tags = 751, /* -Wredundant-tags */
+ OPT_Wregister = 752, /* -Wregister */
+ OPT_Wreorder = 753, /* -Wreorder */
+ OPT_Wrestrict = 754, /* -Wrestrict */
+ OPT_Wreturn_local_addr = 755, /* -Wreturn-local-addr */
+ OPT_Wreturn_type = 756, /* -Wreturn-type */
+ OPT_Wscalar_storage_order = 757, /* -Wscalar-storage-order */
+ OPT_Wselector = 758, /* -Wselector */
+ OPT_Wself_move = 759, /* -Wself-move */
+ OPT_Wsequence_point = 760, /* -Wsequence-point */
+ OPT_Wshadow = 761, /* -Wshadow */
+ /* OPT_Wshadow_compatible_local = 762, */ /* -Wshadow-compatible-local */
+ OPT_Wshadow_ivar = 763, /* -Wshadow-ivar */
+ /* OPT_Wshadow_local = 764, */ /* -Wshadow-local */
+ OPT_Wshadow_compatible_local = 765, /* -Wshadow=compatible-local */
+ /* OPT_Wshadow_global = 766, */ /* -Wshadow=global */
+ OPT_Wshadow_local = 767, /* -Wshadow=local */
+ OPT_Wshift_count_negative = 768, /* -Wshift-count-negative */
+ OPT_Wshift_count_overflow = 769, /* -Wshift-count-overflow */
+ OPT_Wshift_negative_value = 770, /* -Wshift-negative-value */
+ /* OPT_Wshift_overflow = 771, */ /* -Wshift-overflow */
+ OPT_Wshift_overflow_ = 772, /* -Wshift-overflow= */
+ OPT_Wsign_compare = 773, /* -Wsign-compare */
+ OPT_Wsign_conversion = 774, /* -Wsign-conversion */
+ OPT_Wsign_promo = 775, /* -Wsign-promo */
+ OPT_Wsized_deallocation = 776, /* -Wsized-deallocation */
+ OPT_Wsizeof_array_argument = 777, /* -Wsizeof-array-argument */
+ OPT_Wsizeof_array_div = 778, /* -Wsizeof-array-div */
+ OPT_Wsizeof_pointer_div = 779, /* -Wsizeof-pointer-div */
+ OPT_Wsizeof_pointer_memaccess = 780, /* -Wsizeof-pointer-memaccess */
+ OPT_Wspeculative = 781, /* -Wspeculative */
+ OPT_Wstack_protector = 782, /* -Wstack-protector */
+ OPT_Wstack_usage_ = 783, /* -Wstack-usage= */
+ OPT_Wstrict_aliasing = 784, /* -Wstrict-aliasing */
+ OPT_Wstrict_aliasing_ = 785, /* -Wstrict-aliasing= */
+ OPT_Wstrict_flex_arrays = 786, /* -Wstrict-flex-arrays */
+ OPT_Wstrict_null_sentinel = 787, /* -Wstrict-null-sentinel */
+ OPT_Wstrict_overflow = 788, /* -Wstrict-overflow */
+ OPT_Wstrict_overflow_ = 789, /* -Wstrict-overflow= */
+ OPT_Wstrict_prototypes = 790, /* -Wstrict-prototypes */
+ OPT_Wstrict_selector_match = 791, /* -Wstrict-selector-match */
+ OPT_Wstring_compare = 792, /* -Wstring-compare */
+ /* OPT_Wstringop_overflow = 793, */ /* -Wstringop-overflow */
+ OPT_Wstringop_overflow_ = 794, /* -Wstringop-overflow= */
+ OPT_Wstringop_overread = 795, /* -Wstringop-overread */
+ OPT_Wstringop_truncation = 796, /* -Wstringop-truncation */
+ OPT_Wstyle = 797, /* -Wstyle */
+ OPT_Wsubobject_linkage = 798, /* -Wsubobject-linkage */
+ OPT_Wsuggest_attribute_cold = 799, /* -Wsuggest-attribute=cold */
+ OPT_Wsuggest_attribute_const = 800, /* -Wsuggest-attribute=const */
+ OPT_Wsuggest_attribute_format = 801, /* -Wsuggest-attribute=format */
+ OPT_Wsuggest_attribute_malloc = 802, /* -Wsuggest-attribute=malloc */
+ OPT_Wsuggest_attribute_noreturn = 803, /* -Wsuggest-attribute=noreturn */
+ OPT_Wsuggest_attribute_pure = 804, /* -Wsuggest-attribute=pure */
+ OPT_Wsuggest_final_methods = 805, /* -Wsuggest-final-methods */
+ OPT_Wsuggest_final_types = 806, /* -Wsuggest-final-types */
+ OPT_Wsuggest_override = 807, /* -Wsuggest-override */
+ OPT_Wsurprising = 808, /* -Wsurprising */
+ OPT_Wswitch = 809, /* -Wswitch */
+ OPT_Wswitch_bool = 810, /* -Wswitch-bool */
+ OPT_Wswitch_default = 811, /* -Wswitch-default */
+ OPT_Wswitch_enum = 812, /* -Wswitch-enum */
+ OPT_Wswitch_outside_range = 813, /* -Wswitch-outside-range */
+ OPT_Wswitch_unreachable = 814, /* -Wswitch-unreachable */
+ OPT_Wsync_nand = 815, /* -Wsync-nand */
+ OPT_Wsynth = 816, /* -Wsynth */
+ OPT_Wsystem_headers = 817, /* -Wsystem-headers */
+ OPT_Wtabs = 818, /* -Wtabs */
+ OPT_Wtarget_lifetime = 819, /* -Wtarget-lifetime */
+ OPT_Wtautological_compare = 820, /* -Wtautological-compare */
+ OPT_Wtemplates = 821, /* -Wtemplates */
+ OPT_Wterminate = 822, /* -Wterminate */
+ OPT_Wtraditional = 823, /* -Wtraditional */
+ OPT_Wtraditional_conversion = 824, /* -Wtraditional-conversion */
+ OPT_Wtrampolines = 825, /* -Wtrampolines */
+ OPT_Wtrigraphs = 826, /* -Wtrigraphs */
+ OPT_Wtrivial_auto_var_init = 827, /* -Wtrivial-auto-var-init */
+ OPT_Wtsan = 828, /* -Wtsan */
+ OPT_Wtype_limits = 829, /* -Wtype-limits */
+ OPT_Wundeclared_selector = 830, /* -Wundeclared-selector */
+ OPT_Wundef = 831, /* -Wundef */
+ OPT_Wundefined_do_loop = 832, /* -Wundefined-do-loop */
+ OPT_Wunderflow = 833, /* -Wunderflow */
+ OPT_Wunicode = 834, /* -Wunicode */
+ OPT_Wuninit_variable_checking = 835, /* -Wuninit-variable-checking */
+ OPT_Wuninit_variable_checking_ = 836, /* -Wuninit-variable-checking= */
+ OPT_Wuninitialized = 837, /* -Wuninitialized */
+ OPT_Wunknown_pragmas = 838, /* -Wunknown-pragmas */
+ /* OPT_Wunreachable_code = 839, */ /* -Wunreachable-code */
+ /* OPT_Wunsafe_loop_optimizations = 840, *//* -Wunsafe-loop-optimizations */
+ OPT_Wunsuffixed_float_constants = 841, /* -Wunsuffixed-float-constants */
+ OPT_Wunused = 842, /* -Wunused */
+ OPT_Wunused_but_set_parameter = 843, /* -Wunused-but-set-parameter */
+ OPT_Wunused_but_set_variable = 844, /* -Wunused-but-set-variable */
+ /* OPT_Wunused_const_variable = 845, */ /* -Wunused-const-variable */
+ OPT_Wunused_const_variable_ = 846, /* -Wunused-const-variable= */
+ OPT_Wunused_dummy_argument = 847, /* -Wunused-dummy-argument */
+ OPT_Wunused_function = 848, /* -Wunused-function */
+ OPT_Wunused_label = 849, /* -Wunused-label */
+ OPT_Wunused_local_typedefs = 850, /* -Wunused-local-typedefs */
+ OPT_Wunused_macros = 851, /* -Wunused-macros */
+ OPT_Wunused_parameter = 852, /* -Wunused-parameter */
+ OPT_Wunused_result = 853, /* -Wunused-result */
+ OPT_Wunused_value = 854, /* -Wunused-value */
+ OPT_Wunused_variable = 855, /* -Wunused-variable */
+ OPT_Wuse_after_free = 856, /* -Wuse-after-free */
+ OPT_Wuse_after_free_ = 857, /* -Wuse-after-free= */
+ OPT_Wuse_without_only = 858, /* -Wuse-without-only */
+ OPT_Wuseless_cast = 859, /* -Wuseless-cast */
+ OPT_Wvarargs = 860, /* -Wvarargs */
+ OPT_Wvariadic_macros = 861, /* -Wvariadic-macros */
+ OPT_Wvector_operation_performance = 862, /* -Wvector-operation-performance */
+ OPT_Wverbose_unbounded = 863, /* -Wverbose-unbounded */
+ OPT_Wvexing_parse = 864, /* -Wvexing-parse */
+ OPT_Wvirtual_inheritance = 865, /* -Wvirtual-inheritance */
+ OPT_Wvirtual_move_assign = 866, /* -Wvirtual-move-assign */
+ OPT_Wvla = 867, /* -Wvla */
+ OPT_Wvla_larger_than_ = 868, /* -Wvla-larger-than= */
+ OPT_Wvla_parameter = 869, /* -Wvla-parameter */
+ OPT_Wvolatile = 870, /* -Wvolatile */
+ OPT_Wvolatile_register_var = 871, /* -Wvolatile-register-var */
+ OPT_Wwrite_strings = 872, /* -Wwrite-strings */
+ OPT_Wxor_used_as_pow = 873, /* -Wxor-used-as-pow */
+ OPT_Wzero_as_null_pointer_constant = 874, /* -Wzero-as-null-pointer-constant */
+ OPT_Wzero_length_bounds = 875, /* -Wzero-length-bounds */
+ OPT_Wzerotrip = 876, /* -Wzerotrip */
+ OPT_X = 877, /* -X */
+ OPT_Xassembler = 878, /* -Xassembler */
+ OPT_Xf = 879, /* -Xf */
+ OPT_Xlinker = 880, /* -Xlinker */
+ OPT_Xpreprocessor = 881, /* -Xpreprocessor */
+ OPT_Z = 882, /* -Z */
+ OPT_ansi = 883, /* -ansi */
+ OPT_aux_info = 884, /* -aux-info */
+ /* OPT_aux_info_ = 885, */ /* -aux-info= */
+ OPT_c = 886, /* -c */
+ OPT_callgraph = 887, /* -callgraph */
+ OPT_coverage = 888, /* -coverage */
+ OPT_cpp = 889, /* -cpp */
+ OPT_cpp_ = 890, /* -cpp= */
+ OPT_d = 891, /* -d */
+ OPT_debuglib_ = 892, /* -debuglib= */
+ OPT_defaultlib_ = 893, /* -defaultlib= */
+ OPT_defined_only = 894, /* -defined-only */
+ OPT_demangle = 895, /* -demangle */
+ OPT_dstartfiles = 896, /* -dstartfiles */
+ OPT_dump_body_ = 897, /* -dump-body= */
+ OPT_dump_level_ = 898, /* -dump-level= */
+ OPT_dumpbase = 899, /* -dumpbase */
+ OPT_dumpbase_ext = 900, /* -dumpbase-ext */
+ OPT_dumpdir = 901, /* -dumpdir */
+ OPT_dumpfullversion = 902, /* -dumpfullversion */
+ OPT_dumpmachine = 903, /* -dumpmachine */
+ OPT_dumpspecs = 904, /* -dumpspecs */
+ OPT_dumpversion = 905, /* -dumpversion */
+ OPT_e = 906, /* -e */
+ OPT_export_dynamic = 907, /* -export-dynamic */
+ OPT_fPIC = 908, /* -fPIC */
+ OPT_fPIE = 909, /* -fPIE */
+ OPT_fRTS_ = 910, /* -fRTS= */
+ OPT_fabi_compat_version_ = 911, /* -fabi-compat-version= */
+ OPT_fabi_version_ = 912, /* -fabi-version= */
+ OPT_faccess_control = 913, /* -faccess-control */
+ OPT_fada_spec_parent_ = 914, /* -fada-spec-parent= */
+ OPT_faggressive_function_elimination = 915,/* -faggressive-function-elimination */
+ OPT_faggressive_loop_optimizations = 916, /* -faggressive-loop-optimizations */
+ OPT_falign_commons = 917, /* -falign-commons */
+ OPT_falign_functions = 918, /* -falign-functions */
+ OPT_falign_functions_ = 919, /* -falign-functions= */
+ OPT_falign_jumps = 920, /* -falign-jumps */
+ OPT_falign_jumps_ = 921, /* -falign-jumps= */
+ OPT_falign_labels = 922, /* -falign-labels */
+ OPT_falign_labels_ = 923, /* -falign-labels= */
+ OPT_falign_loops = 924, /* -falign-loops */
+ OPT_falign_loops_ = 925, /* -falign-loops= */
+ /* OPT_faligned_new = 926, */ /* -faligned-new */
+ OPT_faligned_new_ = 927, /* -faligned-new= */
+ OPT_fall_instantiations = 928, /* -fall-instantiations */
+ OPT_fall_intrinsics = 929, /* -fall-intrinsics */
+ OPT_fall_virtual = 930, /* -fall-virtual */
+ OPT_fallocation_dce = 931, /* -fallocation-dce */
+ OPT_fallow_argument_mismatch = 932, /* -fallow-argument-mismatch */
+ OPT_fallow_invalid_boz = 933, /* -fallow-invalid-boz */
+ OPT_fallow_leading_underscore = 934, /* -fallow-leading-underscore */
+ /* OPT_fallow_parameterless_variadic_functions = 935, *//* -fallow-parameterless-variadic-functions */
+ OPT_fallow_store_data_races = 936, /* -fallow-store-data-races */
+ OPT_falt_external_templates = 937, /* -falt-external-templates */
+ OPT_fanalyzer = 938, /* -fanalyzer */
+ OPT_fanalyzer_call_summaries = 939, /* -fanalyzer-call-summaries */
+ OPT_fanalyzer_checker_ = 940, /* -fanalyzer-checker= */
+ OPT_fanalyzer_feasibility = 941, /* -fanalyzer-feasibility */
+ OPT_fanalyzer_fine_grained = 942, /* -fanalyzer-fine-grained */
+ OPT_fanalyzer_show_duplicate_count = 943, /* -fanalyzer-show-duplicate-count */
+ OPT_fanalyzer_state_merge = 944, /* -fanalyzer-state-merge */
+ OPT_fanalyzer_state_purge = 945, /* -fanalyzer-state-purge */
+ OPT_fanalyzer_suppress_followups = 946, /* -fanalyzer-suppress-followups */
+ OPT_fanalyzer_transitivity = 947, /* -fanalyzer-transitivity */
+ OPT_fanalyzer_undo_inlining = 948, /* -fanalyzer-undo-inlining */
+ OPT_fanalyzer_verbose_edges = 949, /* -fanalyzer-verbose-edges */
+ OPT_fanalyzer_verbose_state_changes = 950, /* -fanalyzer-verbose-state-changes */
+ OPT_fanalyzer_verbosity_ = 951, /* -fanalyzer-verbosity= */
+ /* OPT_fargument_alias = 952, */ /* -fargument-alias */
+ /* OPT_fargument_noalias = 953, */ /* -fargument-noalias */
+ /* OPT_fargument_noalias_anything = 954, *//* -fargument-noalias-anything */
+ /* OPT_fargument_noalias_global = 955, */ /* -fargument-noalias-global */
+ OPT_fasan_shadow_offset_ = 956, /* -fasan-shadow-offset= */
+ OPT_fasm = 957, /* -fasm */
+ OPT_fassert = 958, /* -fassert */
+ OPT_fassociative_math = 959, /* -fassociative-math */
+ OPT_fasynchronous_unwind_tables = 960, /* -fasynchronous-unwind-tables */
+ OPT_fauto_inc_dec = 961, /* -fauto-inc-dec */
+ OPT_fauto_init = 962, /* -fauto-init */
+ OPT_fauto_profile = 963, /* -fauto-profile */
+ OPT_fauto_profile_ = 964, /* -fauto-profile= */
+ OPT_fautomatic = 965, /* -fautomatic */
+ OPT_fbackslash = 966, /* -fbackslash */
+ OPT_fbacktrace = 967, /* -fbacktrace */
+ OPT_fbit_tests = 968, /* -fbit-tests */
+ OPT_fblas_matmul_limit_ = 969, /* -fblas-matmul-limit= */
+ OPT_fbounds = 970, /* -fbounds */
+ OPT_fbounds_check = 971, /* -fbounds-check */
+ OPT_fbounds_check_ = 972, /* -fbounds-check= */
+ OPT_fbranch_count_reg = 973, /* -fbranch-count-reg */
+ OPT_fbranch_probabilities = 974, /* -fbranch-probabilities */
+ /* OPT_fbranch_target_load_optimize = 975, *//* -fbranch-target-load-optimize */
+ /* OPT_fbranch_target_load_optimize2 = 976, *//* -fbranch-target-load-optimize2 */
+ /* OPT_fbtr_bb_exclusive = 977, */ /* -fbtr-bb-exclusive */
+ OPT_fbuilding_libgcc = 978, /* -fbuilding-libgcc */
+ OPT_fbuilding_libgfortran = 979, /* -fbuilding-libgfortran */
+ OPT_fbuilding_libphobos_tests = 980, /* -fbuilding-libphobos-tests */
+ OPT_fbuiltin = 981, /* -fbuiltin */
+ OPT_fbuiltin_ = 982, /* -fbuiltin- */
+ OPT_fbuiltin_printf = 983, /* -fbuiltin-printf */
+ OPT_fc_prototypes = 984, /* -fc-prototypes */
+ OPT_fc_prototypes_external = 985, /* -fc-prototypes-external */
+ OPT_fcall_saved_ = 986, /* -fcall-saved- */
+ OPT_fcall_used_ = 987, /* -fcall-used- */
+ OPT_fcaller_saves = 988, /* -fcaller-saves */
+ OPT_fcallgraph_info = 989, /* -fcallgraph-info */
+ OPT_fcallgraph_info_ = 990, /* -fcallgraph-info= */
+ OPT_fcanon_prefix_map = 991, /* -fcanon-prefix-map */
+ OPT_fcanonical_system_headers = 992, /* -fcanonical-system-headers */
+ OPT_fcase = 993, /* -fcase */
+ /* OPT_fcf_protection = 994, */ /* -fcf-protection */
+ OPT_fcf_protection_ = 995, /* -fcf-protection= */
+ OPT_fchar8_t = 996, /* -fchar8_t */
+ OPT_fcheck_array_temporaries = 997, /* -fcheck-array-temporaries */
+ /* OPT_fcheck_data_deps = 998, */ /* -fcheck-data-deps */
+ OPT_fcheck_new = 999, /* -fcheck-new */
+ OPT_fcheck_pointer_bounds = 1000, /* -fcheck-pointer-bounds */
+ OPT_fcheck_ = 1001, /* -fcheck= */
+ /* OPT_fcheck_assert = 1002, */ /* -fcheck=assert */
+ /* OPT_fcheck_bounds = 1003, */ /* -fcheck=bounds */
+ /* OPT_fcheck_in = 1004, */ /* -fcheck=in */
+ /* OPT_fcheck_invariant = 1005, */ /* -fcheck=invariant */
+ /* OPT_fcheck_out = 1006, */ /* -fcheck=out */
+ /* OPT_fcheck_switch = 1007, */ /* -fcheck=switch */
+ OPT_fcheckaction_ = 1008, /* -fcheckaction= */
+ OPT_fchecking = 1009, /* -fchecking */
+ OPT_fchecking_ = 1010, /* -fchecking= */
+ OPT_fchkp_check_incomplete_type = 1011, /* -fchkp-check-incomplete-type */
+ OPT_fchkp_check_read = 1012, /* -fchkp-check-read */
+ OPT_fchkp_check_write = 1013, /* -fchkp-check-write */
+ OPT_fchkp_first_field_has_own_bounds = 1014,/* -fchkp-first-field-has-own-bounds */
+ OPT_fchkp_flexible_struct_trailing_arrays = 1015,/* -fchkp-flexible-struct-trailing-arrays */
+ OPT_fchkp_instrument_calls = 1016, /* -fchkp-instrument-calls */
+ OPT_fchkp_instrument_marked_only = 1017, /* -fchkp-instrument-marked-only */
+ OPT_fchkp_narrow_bounds = 1018, /* -fchkp-narrow-bounds */
+ OPT_fchkp_narrow_to_innermost_array = 1019,/* -fchkp-narrow-to-innermost-array */
+ OPT_fchkp_optimize = 1020, /* -fchkp-optimize */
+ OPT_fchkp_store_bounds = 1021, /* -fchkp-store-bounds */
+ OPT_fchkp_treat_zero_dynamic_size_as_infinite = 1022,/* -fchkp-treat-zero-dynamic-size-as-infinite */
+ OPT_fchkp_use_fast_string_functions = 1023,/* -fchkp-use-fast-string-functions */
+ OPT_fchkp_use_nochk_string_functions = 1024,/* -fchkp-use-nochk-string-functions */
+ OPT_fchkp_use_static_bounds = 1025, /* -fchkp-use-static-bounds */
+ OPT_fchkp_use_static_const_bounds = 1026, /* -fchkp-use-static-const-bounds */
+ OPT_fchkp_use_wrappers = 1027, /* -fchkp-use-wrappers */
+ OPT_fchkp_zero_input_bounds_for_main = 1028,/* -fchkp-zero-input-bounds-for-main */
+ /* OPT_fcilkplus = 1029, */ /* -fcilkplus */
+ OPT_fcoarray_ = 1030, /* -fcoarray= */
+ OPT_fcode_hoisting = 1031, /* -fcode-hoisting */
+ OPT_fcombine_stack_adjustments = 1032, /* -fcombine-stack-adjustments */
+ OPT_fcommon = 1033, /* -fcommon */
+ OPT_fcompare_debug = 1034, /* -fcompare-debug */
+ OPT_fcompare_debug_second = 1035, /* -fcompare-debug-second */
+ OPT_fcompare_debug_ = 1036, /* -fcompare-debug= */
+ OPT_fcompare_elim = 1037, /* -fcompare-elim */
+ OPT_fconcepts = 1038, /* -fconcepts */
+ OPT_fconcepts_diagnostics_depth_ = 1039, /* -fconcepts-diagnostics-depth= */
+ OPT_fconcepts_ts = 1040, /* -fconcepts-ts */
+ OPT_fcond_mismatch = 1041, /* -fcond-mismatch */
+ /* OPT_fconserve_space = 1042, */ /* -fconserve-space */
+ OPT_fconserve_stack = 1043, /* -fconserve-stack */
+ OPT_fconstant_string_class_ = 1044, /* -fconstant-string-class= */
+ OPT_fconstexpr_cache_depth_ = 1045, /* -fconstexpr-cache-depth= */
+ OPT_fconstexpr_depth_ = 1046, /* -fconstexpr-depth= */
+ OPT_fconstexpr_fp_except = 1047, /* -fconstexpr-fp-except */
+ OPT_fconstexpr_loop_limit_ = 1048, /* -fconstexpr-loop-limit= */
+ OPT_fconstexpr_ops_limit_ = 1049, /* -fconstexpr-ops-limit= */
+ OPT_fcontract_assumption_mode_ = 1050, /* -fcontract-assumption-mode= */
+ OPT_fcontract_build_level_ = 1051, /* -fcontract-build-level= */
+ OPT_fcontract_continuation_mode_ = 1052, /* -fcontract-continuation-mode= */
+ OPT_fcontract_mode_ = 1053, /* -fcontract-mode= */
+ OPT_fcontract_role_ = 1054, /* -fcontract-role= */
+ OPT_fcontract_semantic_ = 1055, /* -fcontract-semantic= */
+ OPT_fcontract_strict_declarations_ = 1056, /* -fcontract-strict-declarations= */
+ OPT_fcontracts = 1057, /* -fcontracts */
+ OPT_fconvert_ = 1058, /* -fconvert= */
+ OPT_fcoroutines = 1059, /* -fcoroutines */
+ OPT_fcpp = 1060, /* -fcpp */
+ OPT_fcpp_begin = 1061, /* -fcpp-begin */
+ OPT_fcpp_end = 1062, /* -fcpp-end */
+ OPT_fcprop_registers = 1063, /* -fcprop-registers */
+ OPT_fcray_pointer = 1064, /* -fcray-pointer */
+ OPT_fcrossjumping = 1065, /* -fcrossjumping */
+ OPT_fcse_follow_jumps = 1066, /* -fcse-follow-jumps */
+ /* OPT_fcse_skip_blocks = 1067, */ /* -fcse-skip-blocks */
+ OPT_fcx_fortran_rules = 1068, /* -fcx-fortran-rules */
+ OPT_fcx_limited_range = 1069, /* -fcx-limited-range */
+ OPT_fd = 1070, /* -fd */
+ OPT_fd_lines_as_code = 1071, /* -fd-lines-as-code */
+ OPT_fd_lines_as_comments = 1072, /* -fd-lines-as-comments */
+ OPT_fdata_sections = 1073, /* -fdata-sections */
+ OPT_fdbg_cnt_list = 1074, /* -fdbg-cnt-list */
+ OPT_fdbg_cnt_ = 1075, /* -fdbg-cnt= */
+ OPT_fdce = 1076, /* -fdce */
+ OPT_fdebug = 1077, /* -fdebug */
+ OPT_fdebug_aux_vars = 1078, /* -fdebug-aux-vars */
+ OPT_fdebug_builtins = 1079, /* -fdebug-builtins */
+ OPT_fdebug_cpp = 1080, /* -fdebug-cpp */
+ OPT_fdebug_function_line_numbers = 1081, /* -fdebug-function-line-numbers */
+ OPT_fdebug_prefix_map_ = 1082, /* -fdebug-prefix-map= */
+ OPT_fdebug_trace_api = 1083, /* -fdebug-trace-api */
+ OPT_fdebug_trace_quad = 1084, /* -fdebug-trace-quad */
+ OPT_fdebug_types_section = 1085, /* -fdebug-types-section */
+ OPT_fdebug_ = 1086, /* -fdebug= */
+ OPT_fdec = 1087, /* -fdec */
+ OPT_fdec_blank_format_item = 1088, /* -fdec-blank-format-item */
+ OPT_fdec_char_conversions = 1089, /* -fdec-char-conversions */
+ OPT_fdec_format_defaults = 1090, /* -fdec-format-defaults */
+ OPT_fdec_include = 1091, /* -fdec-include */
+ OPT_fdec_intrinsic_ints = 1092, /* -fdec-intrinsic-ints */
+ OPT_fdec_math = 1093, /* -fdec-math */
+ OPT_fdec_static = 1094, /* -fdec-static */
+ OPT_fdec_structure = 1095, /* -fdec-structure */
+ OPT_fdeclone_ctor_dtor = 1096, /* -fdeclone-ctor-dtor */
+ /* OPT_fdeduce_init_list = 1097, */ /* -fdeduce-init-list */
+ OPT_fdef_ = 1098, /* -fdef= */
+ OPT_fdefault_double_8 = 1099, /* -fdefault-double-8 */
+ /* OPT_fdefault_inline = 1100, */ /* -fdefault-inline */
+ OPT_fdefault_integer_8 = 1101, /* -fdefault-integer-8 */
+ OPT_fdefault_real_10 = 1102, /* -fdefault-real-10 */
+ OPT_fdefault_real_16 = 1103, /* -fdefault-real-16 */
+ OPT_fdefault_real_8 = 1104, /* -fdefault-real-8 */
+ OPT_fdefer_pop = 1105, /* -fdefer-pop */
+ OPT_fdelayed_branch = 1106, /* -fdelayed-branch */
+ OPT_fdelete_dead_exceptions = 1107, /* -fdelete-dead-exceptions */
+ OPT_fdelete_null_pointer_checks = 1108, /* -fdelete-null-pointer-checks */
+ OPT_fdevirtualize = 1109, /* -fdevirtualize */
+ OPT_fdevirtualize_at_ltrans = 1110, /* -fdevirtualize-at-ltrans */
+ OPT_fdevirtualize_speculatively = 1111, /* -fdevirtualize-speculatively */
+ /* OPT_fdiagnostics_color = 1112, */ /* -fdiagnostics-color */
+ OPT_fdiagnostics_color_ = 1113, /* -fdiagnostics-color= */
+ OPT_fdiagnostics_column_origin_ = 1114, /* -fdiagnostics-column-origin= */
+ OPT_fdiagnostics_column_unit_ = 1115, /* -fdiagnostics-column-unit= */
+ OPT_fdiagnostics_escape_format_ = 1116, /* -fdiagnostics-escape-format= */
+ OPT_fdiagnostics_format_ = 1117, /* -fdiagnostics-format= */
+ OPT_fdiagnostics_generate_patch = 1118, /* -fdiagnostics-generate-patch */
+ OPT_fdiagnostics_minimum_margin_width_ = 1119,/* -fdiagnostics-minimum-margin-width= */
+ OPT_fdiagnostics_parseable_fixits = 1120, /* -fdiagnostics-parseable-fixits */
+ OPT_fdiagnostics_path_format_ = 1121, /* -fdiagnostics-path-format= */
+ OPT_fdiagnostics_plain_output = 1122, /* -fdiagnostics-plain-output */
+ OPT_fdiagnostics_show_caret = 1123, /* -fdiagnostics-show-caret */
+ OPT_fdiagnostics_show_cwe = 1124, /* -fdiagnostics-show-cwe */
+ OPT_fdiagnostics_show_labels = 1125, /* -fdiagnostics-show-labels */
+ OPT_fdiagnostics_show_line_numbers = 1126, /* -fdiagnostics-show-line-numbers */
+ OPT_fdiagnostics_show_location_ = 1127, /* -fdiagnostics-show-location= */
+ OPT_fdiagnostics_show_option = 1128, /* -fdiagnostics-show-option */
+ OPT_fdiagnostics_show_path_depths = 1129, /* -fdiagnostics-show-path-depths */
+ OPT_fdiagnostics_show_rules = 1130, /* -fdiagnostics-show-rules */
+ OPT_fdiagnostics_show_template_tree = 1131,/* -fdiagnostics-show-template-tree */
+ OPT_fdiagnostics_urls_ = 1132, /* -fdiagnostics-urls= */
+ OPT_fdirectives_only = 1133, /* -fdirectives-only */
+ OPT_fdisable_ = 1134, /* -fdisable- */
+ OPT_fdoc = 1135, /* -fdoc */
+ OPT_fdoc_dir_ = 1136, /* -fdoc-dir= */
+ OPT_fdoc_file_ = 1137, /* -fdoc-file= */
+ OPT_fdoc_inc_ = 1138, /* -fdoc-inc= */
+ OPT_fdollar_ok = 1139, /* -fdollar-ok */
+ OPT_fdollars_in_identifiers = 1140, /* -fdollars-in-identifiers */
+ OPT_fdruntime = 1141, /* -fdruntime */
+ OPT_fdse = 1142, /* -fdse */
+ OPT_fdump_ = 1143, /* -fdump- */
+ OPT_fdump_ada_spec = 1144, /* -fdump-ada-spec */
+ OPT_fdump_ada_spec_slim = 1145, /* -fdump-ada-spec-slim */
+ OPT_fdump_analyzer = 1146, /* -fdump-analyzer */
+ OPT_fdump_analyzer_callgraph = 1147, /* -fdump-analyzer-callgraph */
+ OPT_fdump_analyzer_exploded_graph = 1148, /* -fdump-analyzer-exploded-graph */
+ OPT_fdump_analyzer_exploded_nodes = 1149, /* -fdump-analyzer-exploded-nodes */
+ OPT_fdump_analyzer_exploded_nodes_2 = 1150,/* -fdump-analyzer-exploded-nodes-2 */
+ OPT_fdump_analyzer_exploded_nodes_3 = 1151,/* -fdump-analyzer-exploded-nodes-3 */
+ OPT_fdump_analyzer_exploded_paths = 1152, /* -fdump-analyzer-exploded-paths */
+ OPT_fdump_analyzer_feasibility = 1153, /* -fdump-analyzer-feasibility */
+ OPT_fdump_analyzer_json = 1154, /* -fdump-analyzer-json */
+ OPT_fdump_analyzer_state_purge = 1155, /* -fdump-analyzer-state-purge */
+ OPT_fdump_analyzer_stderr = 1156, /* -fdump-analyzer-stderr */
+ OPT_fdump_analyzer_supergraph = 1157, /* -fdump-analyzer-supergraph */
+ OPT_fdump_analyzer_untracked = 1158, /* -fdump-analyzer-untracked */
+ OPT_fdump_c___spec_verbose = 1159, /* -fdump-c++-spec-verbose */
+ OPT_fdump_c___spec_ = 1160, /* -fdump-c++-spec= */
+ /* OPT_fdump_core = 1161, */ /* -fdump-core */
+ OPT_fdump_d_original = 1162, /* -fdump-d-original */
+ OPT_fdump_final_insns = 1163, /* -fdump-final-insns */
+ OPT_fdump_final_insns_ = 1164, /* -fdump-final-insns= */
+ OPT_fdump_fortran_global = 1165, /* -fdump-fortran-global */
+ OPT_fdump_fortran_optimized = 1166, /* -fdump-fortran-optimized */
+ OPT_fdump_fortran_original = 1167, /* -fdump-fortran-original */
+ OPT_fdump_go_spec_ = 1168, /* -fdump-go-spec= */
+ OPT_fdump_internal_locations = 1169, /* -fdump-internal-locations */
+ OPT_fdump_noaddr = 1170, /* -fdump-noaddr */
+ /* OPT_fdump_parse_tree = 1171, */ /* -fdump-parse-tree */
+ OPT_fdump_passes = 1172, /* -fdump-passes */
+ OPT_fdump_scos = 1173, /* -fdump-scos */
+ OPT_fdump_system_exports = 1174, /* -fdump-system-exports */
+ OPT_fdump_unnumbered = 1175, /* -fdump-unnumbered */
+ OPT_fdump_unnumbered_links = 1176, /* -fdump-unnumbered-links */
+ OPT_fdwarf2_cfi_asm = 1177, /* -fdwarf2-cfi-asm */
+ OPT_fearly_inlining = 1178, /* -fearly-inlining */
+ OPT_felide_constructors = 1179, /* -felide-constructors */
+ OPT_felide_type = 1180, /* -felide-type */
+ /* OPT_feliminate_dwarf2_dups = 1181, */ /* -feliminate-dwarf2-dups */
+ OPT_feliminate_unused_debug_symbols = 1182,/* -feliminate-unused-debug-symbols */
+ OPT_feliminate_unused_debug_types = 1183, /* -feliminate-unused-debug-types */
+ OPT_femit_class_debug_always = 1184, /* -femit-class-debug-always */
+ OPT_femit_struct_debug_baseonly = 1185, /* -femit-struct-debug-baseonly */
+ OPT_femit_struct_debug_detailed_ = 1186, /* -femit-struct-debug-detailed= */
+ OPT_femit_struct_debug_reduced = 1187, /* -femit-struct-debug-reduced */
+ OPT_fenable_ = 1188, /* -fenable- */
+ OPT_fenforce_eh_specs = 1189, /* -fenforce-eh-specs */
+ OPT_fenum_int_equiv = 1190, /* -fenum-int-equiv */
+ OPT_fexceptions = 1191, /* -fexceptions */
+ OPT_fexcess_precision_ = 1192, /* -fexcess-precision= */
+ OPT_fexec_charset_ = 1193, /* -fexec-charset= */
+ OPT_fexpensive_optimizations = 1194, /* -fexpensive-optimizations */
+ OPT_fext_numeric_literals = 1195, /* -fext-numeric-literals */
+ OPT_fextended_identifiers = 1196, /* -fextended-identifiers */
+ OPT_fextended_opaque = 1197, /* -fextended-opaque */
+ OPT_fextern_std_ = 1198, /* -fextern-std= */
+ OPT_fextern_tls_init = 1199, /* -fextern-tls-init */
+ OPT_fexternal_blas = 1200, /* -fexternal-blas */
+ OPT_fexternal_templates = 1201, /* -fexternal-templates */
+ OPT_ff2c = 1202, /* -ff2c */
+ OPT_ffast_math = 1203, /* -ffast-math */
+ OPT_ffat_lto_objects = 1204, /* -ffat-lto-objects */
+ OPT_ffile_prefix_map_ = 1205, /* -ffile-prefix-map= */
+ OPT_ffinite_loops = 1206, /* -ffinite-loops */
+ OPT_ffinite_math_only = 1207, /* -ffinite-math-only */
+ OPT_ffixed_ = 1208, /* -ffixed- */
+ OPT_ffixed_form = 1209, /* -ffixed-form */
+ OPT_ffixed_line_length_ = 1210, /* -ffixed-line-length- */
+ OPT_ffixed_line_length_none = 1211, /* -ffixed-line-length-none */
+ OPT_ffloat_store = 1212, /* -ffloat-store */
+ OPT_ffloatvalue = 1213, /* -ffloatvalue */
+ OPT_ffold_simple_inlines = 1214, /* -ffold-simple-inlines */
+ OPT_ffor_scope = 1215, /* -ffor-scope */
+ /* OPT_fforce_addr = 1216, */ /* -fforce-addr */
+ OPT_fforward_propagate = 1217, /* -fforward-propagate */
+ OPT_ffp_contract_ = 1218, /* -ffp-contract= */
+ OPT_ffp_int_builtin_inexact = 1219, /* -ffp-int-builtin-inexact */
+ OPT_ffpe_summary_ = 1220, /* -ffpe-summary= */
+ OPT_ffpe_trap_ = 1221, /* -ffpe-trap= */
+ OPT_ffree_form = 1222, /* -ffree-form */
+ OPT_ffree_line_length_ = 1223, /* -ffree-line-length- */
+ OPT_ffree_line_length_none = 1224, /* -ffree-line-length-none */
+ OPT_ffreestanding = 1225, /* -ffreestanding */
+ OPT_ffriend_injection = 1226, /* -ffriend-injection */
+ OPT_ffrontend_loop_interchange = 1227, /* -ffrontend-loop-interchange */
+ OPT_ffrontend_optimize = 1228, /* -ffrontend-optimize */
+ OPT_ffunction_cse = 1229, /* -ffunction-cse */
+ OPT_ffunction_sections = 1230, /* -ffunction-sections */
+ OPT_fgcse = 1231, /* -fgcse */
+ OPT_fgcse_after_reload = 1232, /* -fgcse-after-reload */
+ OPT_fgcse_las = 1233, /* -fgcse-las */
+ OPT_fgcse_lm = 1234, /* -fgcse-lm */
+ OPT_fgcse_sm = 1235, /* -fgcse-sm */
+ OPT_fgen_module_list_ = 1236, /* -fgen-module-list= */
+ OPT_fgimple = 1237, /* -fgimple */
+ OPT_fgnat_encodings_ = 1238, /* -fgnat-encodings= */
+ OPT_fgnu_keywords = 1239, /* -fgnu-keywords */
+ OPT_fgnu_runtime = 1240, /* -fgnu-runtime */
+ OPT_fgnu_tm = 1241, /* -fgnu-tm */
+ OPT_fgnu_unique = 1242, /* -fgnu-unique */
+ OPT_fgnu89_inline = 1243, /* -fgnu89-inline */
+ OPT_fgo_c_header_ = 1244, /* -fgo-c-header= */
+ OPT_fgo_check_divide_overflow = 1245, /* -fgo-check-divide-overflow */
+ OPT_fgo_check_divide_zero = 1246, /* -fgo-check-divide-zero */
+ OPT_fgo_compiling_runtime = 1247, /* -fgo-compiling-runtime */
+ OPT_fgo_debug_escape = 1248, /* -fgo-debug-escape */
+ OPT_fgo_debug_escape_hash_ = 1249, /* -fgo-debug-escape-hash= */
+ OPT_fgo_debug_optimization = 1250, /* -fgo-debug-optimization */
+ OPT_fgo_dump_ = 1251, /* -fgo-dump- */
+ OPT_fgo_embedcfg_ = 1252, /* -fgo-embedcfg= */
+ OPT_fgo_optimize_ = 1253, /* -fgo-optimize- */
+ OPT_fgo_pkgpath_ = 1254, /* -fgo-pkgpath= */
+ OPT_fgo_prefix_ = 1255, /* -fgo-prefix= */
+ OPT_fgo_relative_import_path_ = 1256, /* -fgo-relative-import-path= */
+ OPT_fgraphite = 1257, /* -fgraphite */
+ OPT_fgraphite_identity = 1258, /* -fgraphite-identity */
+ OPT_fguess_branch_probability = 1259, /* -fguess-branch-probability */
+ OPT_fguiding_decls = 1260, /* -fguiding-decls */
+ /* OPT_fhandle_exceptions = 1261, */ /* -fhandle-exceptions */
+ OPT_fharden_compares = 1262, /* -fharden-compares */
+ OPT_fharden_conditional_branches = 1263, /* -fharden-conditional-branches */
+ /* OPT_fhelp = 1264, */ /* -fhelp */
+ /* OPT_fhelp_ = 1265, */ /* -fhelp= */
+ OPT_fhoist_adjacent_loads = 1266, /* -fhoist-adjacent-loads */
+ OPT_fhonor_std = 1267, /* -fhonor-std */
+ OPT_fhosted = 1268, /* -fhosted */
+ OPT_fhuge_objects = 1269, /* -fhuge-objects */
+ OPT_fident = 1270, /* -fident */
+ OPT_fif_conversion = 1271, /* -fif-conversion */
+ OPT_fif_conversion2 = 1272, /* -fif-conversion2 */
+ OPT_fignore_unknown_pragmas = 1273, /* -fignore-unknown-pragmas */
+ OPT_fimplement_inlines = 1274, /* -fimplement-inlines */
+ OPT_fimplicit_constexpr = 1275, /* -fimplicit-constexpr */
+ OPT_fimplicit_inline_templates = 1276, /* -fimplicit-inline-templates */
+ OPT_fimplicit_none = 1277, /* -fimplicit-none */
+ OPT_fimplicit_templates = 1278, /* -fimplicit-templates */
+ OPT_findex = 1279, /* -findex */
+ OPT_findirect_inlining = 1280, /* -findirect-inlining */
+ OPT_finhibit_size_directive = 1281, /* -finhibit-size-directive */
+ OPT_finit_character_ = 1282, /* -finit-character= */
+ OPT_finit_derived = 1283, /* -finit-derived */
+ OPT_finit_integer_ = 1284, /* -finit-integer= */
+ OPT_finit_local_zero = 1285, /* -finit-local-zero */
+ OPT_finit_logical_ = 1286, /* -finit-logical= */
+ OPT_finit_real_ = 1287, /* -finit-real= */
+ OPT_finline = 1288, /* -finline */
+ OPT_finline_arg_packing = 1289, /* -finline-arg-packing */
+ OPT_finline_atomics = 1290, /* -finline-atomics */
+ OPT_finline_functions = 1291, /* -finline-functions */
+ OPT_finline_functions_called_once = 1292, /* -finline-functions-called-once */
+ /* OPT_finline_limit_ = 1293, */ /* -finline-limit- */
+ OPT_finline_limit_ = 1294, /* -finline-limit= */
+ OPT_finline_matmul_limit_ = 1295, /* -finline-matmul-limit= */
+ OPT_finline_small_functions = 1296, /* -finline-small-functions */
+ OPT_finput_charset_ = 1297, /* -finput-charset= */
+ OPT_finstrument_functions = 1298, /* -finstrument-functions */
+ OPT_finstrument_functions_exclude_file_list_ = 1299,/* -finstrument-functions-exclude-file-list= */
+ OPT_finstrument_functions_exclude_function_list_ = 1300,/* -finstrument-functions-exclude-function-list= */
+ OPT_finstrument_functions_once = 1301, /* -finstrument-functions-once */
+ OPT_finteger_4_integer_8 = 1302, /* -finteger-4-integer-8 */
+ OPT_fintrinsic_modules_path = 1303, /* -fintrinsic-modules-path */
+ OPT_fintrinsic_modules_path_ = 1304, /* -fintrinsic-modules-path= */
+ OPT_finvariants = 1305, /* -finvariants */
+ OPT_fipa_bit_cp = 1306, /* -fipa-bit-cp */
+ OPT_fipa_cp = 1307, /* -fipa-cp */
+ /* OPT_fipa_cp_alignment = 1308, */ /* -fipa-cp-alignment */
+ OPT_fipa_cp_clone = 1309, /* -fipa-cp-clone */
+ OPT_fipa_icf = 1310, /* -fipa-icf */
+ OPT_fipa_icf_functions = 1311, /* -fipa-icf-functions */
+ OPT_fipa_icf_variables = 1312, /* -fipa-icf-variables */
+ /* OPT_fipa_matrix_reorg = 1313, */ /* -fipa-matrix-reorg */
+ OPT_fipa_modref = 1314, /* -fipa-modref */
+ OPT_fipa_profile = 1315, /* -fipa-profile */
+ OPT_fipa_pta = 1316, /* -fipa-pta */
+ OPT_fipa_pure_const = 1317, /* -fipa-pure-const */
+ OPT_fipa_ra = 1318, /* -fipa-ra */
+ OPT_fipa_reference = 1319, /* -fipa-reference */
+ OPT_fipa_reference_addressable = 1320, /* -fipa-reference-addressable */
+ OPT_fipa_sra = 1321, /* -fipa-sra */
+ OPT_fipa_stack_alignment = 1322, /* -fipa-stack-alignment */
+ OPT_fipa_strict_aliasing = 1323, /* -fipa-strict-aliasing */
+ /* OPT_fipa_struct_reorg = 1324, */ /* -fipa-struct-reorg */
+ OPT_fipa_vrp = 1325, /* -fipa-vrp */
+ OPT_fira_algorithm_ = 1326, /* -fira-algorithm= */
+ OPT_fira_hoist_pressure = 1327, /* -fira-hoist-pressure */
+ OPT_fira_loop_pressure = 1328, /* -fira-loop-pressure */
+ OPT_fira_region_ = 1329, /* -fira-region= */
+ OPT_fira_share_save_slots = 1330, /* -fira-share-save-slots */
+ OPT_fira_share_spill_slots = 1331, /* -fira-share-spill-slots */
+ OPT_fira_verbose_ = 1332, /* -fira-verbose= */
+ OPT_fiso = 1333, /* -fiso */
+ OPT_fisolate_erroneous_paths_attribute = 1334,/* -fisolate-erroneous-paths-attribute */
+ OPT_fisolate_erroneous_paths_dereference = 1335,/* -fisolate-erroneous-paths-dereference */
+ OPT_fivar_visibility_ = 1336, /* -fivar-visibility= */
+ OPT_fivopts = 1337, /* -fivopts */
+ OPT_fjump_tables = 1338, /* -fjump-tables */
+ OPT_fkeep_gc_roots_live = 1339, /* -fkeep-gc-roots-live */
+ OPT_fkeep_inline_dllexport = 1340, /* -fkeep-inline-dllexport */
+ OPT_fkeep_inline_functions = 1341, /* -fkeep-inline-functions */
+ OPT_fkeep_static_consts = 1342, /* -fkeep-static-consts */
+ OPT_fkeep_static_functions = 1343, /* -fkeep-static-functions */
+ OPT_flabels_ok = 1344, /* -flabels-ok */
+ OPT_flang_info_include_translate = 1345, /* -flang-info-include-translate */
+ OPT_flang_info_include_translate_not = 1346,/* -flang-info-include-translate-not */
+ OPT_flang_info_include_translate_ = 1347, /* -flang-info-include-translate= */
+ OPT_flang_info_module_cmi = 1348, /* -flang-info-module-cmi */
+ OPT_flang_info_module_cmi_ = 1349, /* -flang-info-module-cmi= */
+ OPT_flarge_source_files = 1350, /* -flarge-source-files */
+ OPT_flax_vector_conversions = 1351, /* -flax-vector-conversions */
+ OPT_fleading_underscore = 1352, /* -fleading-underscore */
+ OPT_flibs_ = 1353, /* -flibs= */
+ OPT_flifetime_dse = 1354, /* -flifetime-dse */
+ OPT_flifetime_dse_ = 1355, /* -flifetime-dse= */
+ OPT_flimit_function_alignment = 1356, /* -flimit-function-alignment */
+ OPT_flinker_output_ = 1357, /* -flinker-output= */
+ /* OPT_flive_patching = 1358, */ /* -flive-patching */
+ OPT_flive_patching_ = 1359, /* -flive-patching= */
+ OPT_flive_range_shrinkage = 1360, /* -flive-range-shrinkage */
+ OPT_flocal_ivars = 1361, /* -flocal-ivars */
+ OPT_flocation_ = 1362, /* -flocation= */
+ /* OPT_floop_block = 1363, */ /* -floop-block */
+ /* OPT_floop_flatten = 1364, */ /* -floop-flatten */
+ OPT_floop_interchange = 1365, /* -floop-interchange */
+ OPT_floop_nest_optimize = 1366, /* -floop-nest-optimize */
+ /* OPT_floop_optimize = 1367, */ /* -floop-optimize */
+ OPT_floop_parallelize_all = 1368, /* -floop-parallelize-all */
+ /* OPT_floop_strip_mine = 1369, */ /* -floop-strip-mine */
+ OPT_floop_unroll_and_jam = 1370, /* -floop-unroll-and-jam */
+ OPT_flra_remat = 1371, /* -flra-remat */
+ OPT_flto = 1372, /* -flto */
+ OPT_flto_compression_level_ = 1373, /* -flto-compression-level= */
+ /* OPT_flto_odr_type_merging = 1374, */ /* -flto-odr-type-merging */
+ OPT_flto_partition_ = 1375, /* -flto-partition= */
+ OPT_flto_report = 1376, /* -flto-report */
+ OPT_flto_report_wpa = 1377, /* -flto-report-wpa */
+ OPT_flto_ = 1378, /* -flto= */
+ OPT_fltrans = 1379, /* -fltrans */
+ OPT_fltrans_output_list_ = 1380, /* -fltrans-output-list= */
+ OPT_fm2_g = 1381, /* -fm2-g */
+ OPT_fm2_lower_case = 1382, /* -fm2-lower-case */
+ OPT_fm2_pathname_ = 1383, /* -fm2-pathname= */
+ OPT_fm2_pathnameI = 1384, /* -fm2-pathnameI */
+ OPT_fm2_plugin = 1385, /* -fm2-plugin */
+ OPT_fm2_prefix_ = 1386, /* -fm2-prefix= */
+ OPT_fm2_statistics = 1387, /* -fm2-statistics */
+ OPT_fm2_strict_type = 1388, /* -fm2-strict-type */
+ OPT_fm2_whole_program = 1389, /* -fm2-whole-program */
+ OPT_fmacro_prefix_map_ = 1390, /* -fmacro-prefix-map= */
+ OPT_fmain = 1391, /* -fmain */
+ OPT_fmath_errno = 1392, /* -fmath-errno */
+ OPT_fmax_array_constructor_ = 1393, /* -fmax-array-constructor= */
+ OPT_fmax_errors_ = 1394, /* -fmax-errors= */
+ OPT_fmax_identifier_length_ = 1395, /* -fmax-identifier-length= */
+ OPT_fmax_include_depth_ = 1396, /* -fmax-include-depth= */
+ OPT_fmax_stack_var_size_ = 1397, /* -fmax-stack-var-size= */
+ OPT_fmax_subrecord_length_ = 1398, /* -fmax-subrecord-length= */
+ OPT_fmem_report = 1399, /* -fmem-report */
+ OPT_fmem_report_wpa = 1400, /* -fmem-report-wpa */
+ OPT_fmerge_all_constants = 1401, /* -fmerge-all-constants */
+ OPT_fmerge_constants = 1402, /* -fmerge-constants */
+ OPT_fmerge_debug_strings = 1403, /* -fmerge-debug-strings */
+ OPT_fmessage_length_ = 1404, /* -fmessage-length= */
+ OPT_fmod_ = 1405, /* -fmod= */
+ OPT_fmodule_file_ = 1406, /* -fmodule-file= */
+ OPT_fmodule_header = 1407, /* -fmodule-header */
+ OPT_fmodule_header_ = 1408, /* -fmodule-header= */
+ OPT_fmodule_implicit_inline = 1409, /* -fmodule-implicit-inline */
+ OPT_fmodule_lazy = 1410, /* -fmodule-lazy */
+ OPT_fmodule_mapper_ = 1411, /* -fmodule-mapper= */
+ OPT_fmodule_only = 1412, /* -fmodule-only */
+ OPT_fmodule_private = 1413, /* -fmodule-private */
+ OPT_fmodule_version_ignore = 1414, /* -fmodule-version-ignore */
+ OPT_fmoduleinfo = 1415, /* -fmoduleinfo */
+ OPT_fmodules_ts = 1416, /* -fmodules-ts */
+ OPT_fmodulo_sched = 1417, /* -fmodulo-sched */
+ OPT_fmodulo_sched_allow_regmoves = 1418, /* -fmodulo-sched-allow-regmoves */
+ OPT_fmove_loop_invariants = 1419, /* -fmove-loop-invariants */
+ OPT_fmove_loop_stores = 1420, /* -fmove-loop-stores */
+ OPT_fms_extensions = 1421, /* -fms-extensions */
+ OPT_fmudflap = 1422, /* -fmudflap */
+ OPT_fmudflapir = 1423, /* -fmudflapir */
+ OPT_fmudflapth = 1424, /* -fmudflapth */
+ OPT_fmultiflags = 1425, /* -fmultiflags */
+ OPT_fname_mangling_version_ = 1426, /* -fname-mangling-version- */
+ OPT_fnew_abi = 1427, /* -fnew-abi */
+ OPT_fnew_inheriting_ctors = 1428, /* -fnew-inheriting-ctors */
+ OPT_fnew_ttp_matching = 1429, /* -fnew-ttp-matching */
+ OPT_fnext_runtime = 1430, /* -fnext-runtime */
+ OPT_fnil = 1431, /* -fnil */
+ OPT_fnil_receivers = 1432, /* -fnil-receivers */
+ OPT_fno_modules = 1433, /* -fno-modules */
+ OPT_fnon_call_exceptions = 1434, /* -fnon-call-exceptions */
+ OPT_fnonansi_builtins = 1435, /* -fnonansi-builtins */
+ OPT_fnonnull_objects = 1436, /* -fnonnull-objects */
+ OPT_fnothrow_opt = 1437, /* -fnothrow-opt */
+ OPT_fobjc_abi_version_ = 1438, /* -fobjc-abi-version= */
+ OPT_fobjc_call_cxx_cdtors = 1439, /* -fobjc-call-cxx-cdtors */
+ OPT_fobjc_direct_dispatch = 1440, /* -fobjc-direct-dispatch */
+ OPT_fobjc_exceptions = 1441, /* -fobjc-exceptions */
+ OPT_fobjc_gc = 1442, /* -fobjc-gc */
+ OPT_fobjc_nilcheck = 1443, /* -fobjc-nilcheck */
+ OPT_fobjc_sjlj_exceptions = 1444, /* -fobjc-sjlj-exceptions */
+ OPT_fobjc_std_objc1 = 1445, /* -fobjc-std=objc1 */
+ OPT_foffload_abi_ = 1446, /* -foffload-abi= */
+ OPT_foffload_options_ = 1447, /* -foffload-options= */
+ OPT_foffload_ = 1448, /* -foffload= */
+ OPT_fomit_frame_pointer = 1449, /* -fomit-frame-pointer */
+ OPT_fonly_ = 1450, /* -fonly= */
+ OPT_fopenacc = 1451, /* -fopenacc */
+ OPT_fopenacc_dim_ = 1452, /* -fopenacc-dim= */
+ OPT_fopenmp = 1453, /* -fopenmp */
+ OPT_fopenmp_simd = 1454, /* -fopenmp-simd */
+ /* OPT_fopenmp_target_simd_clone = 1455, *//* -fopenmp-target-simd-clone */
+ OPT_fopenmp_target_simd_clone_ = 1456, /* -fopenmp-target-simd-clone= */
+ OPT_foperator_names = 1457, /* -foperator-names */
+ OPT_fopt_info = 1458, /* -fopt-info */
+ OPT_fopt_info_ = 1459, /* -fopt-info- */
+ /* OPT_foptimize_register_move = 1460, */ /* -foptimize-register-move */
+ OPT_foptimize_sibling_calls = 1461, /* -foptimize-sibling-calls */
+ OPT_foptimize_strlen = 1462, /* -foptimize-strlen */
+ /* OPT_foptional_diags = 1463, */ /* -foptional-diags */
+ OPT_fpack_derived = 1464, /* -fpack-derived */
+ OPT_fpack_struct = 1465, /* -fpack-struct */
+ OPT_fpack_struct_ = 1466, /* -fpack-struct= */
+ OPT_fpad_source = 1467, /* -fpad-source */
+ OPT_fpartial_inlining = 1468, /* -fpartial-inlining */
+ OPT_fpatchable_function_entry_ = 1469, /* -fpatchable-function-entry= */
+ OPT_fpcc_struct_return = 1470, /* -fpcc-struct-return */
+ OPT_fpch_deps = 1471, /* -fpch-deps */
+ OPT_fpch_preprocess = 1472, /* -fpch-preprocess */
+ OPT_fpeel_loops = 1473, /* -fpeel-loops */
+ OPT_fpeephole = 1474, /* -fpeephole */
+ OPT_fpeephole2 = 1475, /* -fpeephole2 */
+ OPT_fpermissive = 1476, /* -fpermissive */
+ OPT_fpermitted_flt_eval_methods_ = 1477, /* -fpermitted-flt-eval-methods= */
+ OPT_fpic = 1478, /* -fpic */
+ OPT_fpie = 1479, /* -fpie */
+ OPT_fpim = 1480, /* -fpim */
+ OPT_fpim2 = 1481, /* -fpim2 */
+ OPT_fpim3 = 1482, /* -fpim3 */
+ OPT_fpim4 = 1483, /* -fpim4 */
+ OPT_fplan9_extensions = 1484, /* -fplan9-extensions */
+ OPT_fplt = 1485, /* -fplt */
+ OPT_fplugin_arg_ = 1486, /* -fplugin-arg- */
+ OPT_fplugin_ = 1487, /* -fplugin= */
+ OPT_fpositive_mod_floor_div = 1488, /* -fpositive-mod-floor-div */
+ OPT_fpost_ipa_mem_report = 1489, /* -fpost-ipa-mem-report */
+ OPT_fpostconditions = 1490, /* -fpostconditions */
+ OPT_fpre_include_ = 1491, /* -fpre-include= */
+ OPT_fpre_ipa_mem_report = 1492, /* -fpre-ipa-mem-report */
+ OPT_fpreconditions = 1493, /* -fpreconditions */
+ OPT_fpredictive_commoning = 1494, /* -fpredictive-commoning */
+ OPT_fprefetch_loop_arrays = 1495, /* -fprefetch-loop-arrays */
+ OPT_fpreprocessed = 1496, /* -fpreprocessed */
+ OPT_fpretty_templates = 1497, /* -fpretty-templates */
+ OPT_fpreview_all = 1498, /* -fpreview=all */
+ OPT_fpreview_bitfields = 1499, /* -fpreview=bitfields */
+ OPT_fpreview_dip1000 = 1500, /* -fpreview=dip1000 */
+ OPT_fpreview_dip1008 = 1501, /* -fpreview=dip1008 */
+ OPT_fpreview_dip1021 = 1502, /* -fpreview=dip1021 */
+ OPT_fpreview_dtorfields = 1503, /* -fpreview=dtorfields */
+ OPT_fpreview_fieldwise = 1504, /* -fpreview=fieldwise */
+ OPT_fpreview_fixaliasthis = 1505, /* -fpreview=fixaliasthis */
+ OPT_fpreview_fiximmutableconv = 1506, /* -fpreview=fiximmutableconv */
+ OPT_fpreview_in = 1507, /* -fpreview=in */
+ OPT_fpreview_inclusiveincontracts = 1508, /* -fpreview=inclusiveincontracts */
+ OPT_fpreview_nosharedaccess = 1509, /* -fpreview=nosharedaccess */
+ OPT_fpreview_rvaluerefparam = 1510, /* -fpreview=rvaluerefparam */
+ OPT_fpreview_systemvariables = 1511, /* -fpreview=systemvariables */
+ OPT_fprintf_return_value = 1512, /* -fprintf-return-value */
+ OPT_fprofile = 1513, /* -fprofile */
+ OPT_fprofile_abs_path = 1514, /* -fprofile-abs-path */
+ OPT_fprofile_arcs = 1515, /* -fprofile-arcs */
+ OPT_fprofile_correction = 1516, /* -fprofile-correction */
+ OPT_fprofile_dir_ = 1517, /* -fprofile-dir= */
+ OPT_fprofile_exclude_files_ = 1518, /* -fprofile-exclude-files= */
+ OPT_fprofile_filter_files_ = 1519, /* -fprofile-filter-files= */
+ OPT_fprofile_generate = 1520, /* -fprofile-generate */
+ OPT_fprofile_generate_ = 1521, /* -fprofile-generate= */
+ OPT_fprofile_info_section = 1522, /* -fprofile-info-section */
+ OPT_fprofile_info_section_ = 1523, /* -fprofile-info-section= */
+ OPT_fprofile_note_ = 1524, /* -fprofile-note= */
+ OPT_fprofile_partial_training = 1525, /* -fprofile-partial-training */
+ OPT_fprofile_prefix_map_ = 1526, /* -fprofile-prefix-map= */
+ OPT_fprofile_prefix_path_ = 1527, /* -fprofile-prefix-path= */
+ OPT_fprofile_reorder_functions = 1528, /* -fprofile-reorder-functions */
+ OPT_fprofile_report = 1529, /* -fprofile-report */
+ OPT_fprofile_reproducible_ = 1530, /* -fprofile-reproducible= */
+ OPT_fprofile_update_ = 1531, /* -fprofile-update= */
+ OPT_fprofile_use = 1532, /* -fprofile-use */
+ OPT_fprofile_use_ = 1533, /* -fprofile-use= */
+ OPT_fprofile_values = 1534, /* -fprofile-values */
+ OPT_fprotect_parens = 1535, /* -fprotect-parens */
+ OPT_fpthread = 1536, /* -fpthread */
+ OPT_fq = 1537, /* -fq */
+ OPT_frandom_seed = 1538, /* -frandom-seed */
+ OPT_frandom_seed_ = 1539, /* -frandom-seed= */
+ OPT_frange = 1540, /* -frange */
+ OPT_frange_check = 1541, /* -frange-check */
+ OPT_freal_4_real_10 = 1542, /* -freal-4-real-10 */
+ OPT_freal_4_real_16 = 1543, /* -freal-4-real-16 */
+ OPT_freal_4_real_8 = 1544, /* -freal-4-real-8 */
+ OPT_freal_8_real_10 = 1545, /* -freal-8-real-10 */
+ OPT_freal_8_real_16 = 1546, /* -freal-8-real-16 */
+ OPT_freal_8_real_4 = 1547, /* -freal-8-real-4 */
+ OPT_frealloc_lhs = 1548, /* -frealloc-lhs */
+ OPT_freciprocal_math = 1549, /* -freciprocal-math */
+ OPT_frecord_gcc_switches = 1550, /* -frecord-gcc-switches */
+ OPT_frecord_marker_4 = 1551, /* -frecord-marker=4 */
+ OPT_frecord_marker_8 = 1552, /* -frecord-marker=8 */
+ OPT_frecursive = 1553, /* -frecursive */
+ OPT_free = 1554, /* -free */
+ OPT_freg_struct_return = 1555, /* -freg-struct-return */
+ /* OPT_fregmove = 1556, */ /* -fregmove */
+ OPT_frelease = 1557, /* -frelease */
+ OPT_frename_registers = 1558, /* -frename-registers */
+ OPT_freorder_blocks = 1559, /* -freorder-blocks */
+ OPT_freorder_blocks_algorithm_ = 1560, /* -freorder-blocks-algorithm= */
+ OPT_freorder_blocks_and_partition = 1561, /* -freorder-blocks-and-partition */
+ OPT_freorder_functions = 1562, /* -freorder-functions */
+ OPT_frepack_arrays = 1563, /* -frepack-arrays */
+ OPT_freplace_objc_classes = 1564, /* -freplace-objc-classes */
+ OPT_frepo = 1565, /* -frepo */
+ OPT_freport_bug = 1566, /* -freport-bug */
+ OPT_frequire_return_statement = 1567, /* -frequire-return-statement */
+ OPT_frerun_cse_after_loop = 1568, /* -frerun-cse-after-loop */
+ /* OPT_frerun_loop_opt = 1569, */ /* -frerun-loop-opt */
+ OPT_freschedule_modulo_scheduled_loops = 1570,/* -freschedule-modulo-scheduled-loops */
+ OPT_fresolution_ = 1571, /* -fresolution= */
+ OPT_freturn = 1572, /* -freturn */
+ OPT_frevert_all = 1573, /* -frevert=all */
+ OPT_frevert_dip1000 = 1574, /* -frevert=dip1000 */
+ OPT_frevert_dtorfields = 1575, /* -frevert=dtorfields */
+ OPT_frevert_intpromote = 1576, /* -frevert=intpromote */
+ OPT_frounding_math = 1577, /* -frounding-math */
+ OPT_frtti = 1578, /* -frtti */
+ OPT_fruntime_modules_ = 1579, /* -fruntime-modules= */
+ OPT_frust_cfg_ = 1580, /* -frust-cfg= */
+ OPT_frust_compile_until_ = 1581, /* -frust-compile-until= */
+ OPT_frust_crate_ = 1582, /* -frust-crate= */
+ OPT_frust_debug = 1583, /* -frust-debug */
+ OPT_frust_dump_ = 1584, /* -frust-dump- */
+ OPT_frust_edition_ = 1585, /* -frust-edition= */
+ OPT_frust_embed_metadata = 1586, /* -frust-embed-metadata */
+ OPT_frust_incomplete_and_experimental_compiler_do_not_use = 1587,/* -frust-incomplete-and-experimental-compiler-do-not-use */
+ OPT_frust_mangling_ = 1588, /* -frust-mangling= */
+ OPT_frust_max_recursion_depth_ = 1589, /* -frust-max-recursion-depth= */
+ OPT_frust_metadata_output_ = 1590, /* -frust-metadata-output= */
+ OPT_fsanitize_address_use_after_scope = 1591,/* -fsanitize-address-use-after-scope */
+ OPT_fsanitize_coverage_ = 1592, /* -fsanitize-coverage= */
+ OPT_fsanitize_recover = 1593, /* -fsanitize-recover */
+ OPT_fsanitize_recover_ = 1594, /* -fsanitize-recover= */
+ OPT_fsanitize_sections_ = 1595, /* -fsanitize-sections= */
+ OPT_fsanitize_trap = 1596, /* -fsanitize-trap */
+ OPT_fsanitize_trap_ = 1597, /* -fsanitize-trap= */
+ /* OPT_fsanitize_undefined_trap_on_error = 1598, *//* -fsanitize-undefined-trap-on-error */
+ OPT_fsanitize_ = 1599, /* -fsanitize= */
+ OPT_fsave_mixins_ = 1600, /* -fsave-mixins= */
+ OPT_fsave_optimization_record = 1601, /* -fsave-optimization-record */
+ OPT_fscaffold_c = 1602, /* -fscaffold-c */
+ OPT_fscaffold_c__ = 1603, /* -fscaffold-c++ */
+ OPT_fscaffold_dynamic = 1604, /* -fscaffold-dynamic */
+ OPT_fscaffold_main = 1605, /* -fscaffold-main */
+ OPT_fscaffold_static = 1606, /* -fscaffold-static */
+ OPT_fsched_critical_path_heuristic = 1607, /* -fsched-critical-path-heuristic */
+ OPT_fsched_dep_count_heuristic = 1608, /* -fsched-dep-count-heuristic */
+ OPT_fsched_group_heuristic = 1609, /* -fsched-group-heuristic */
+ OPT_fsched_interblock = 1610, /* -fsched-interblock */
+ OPT_fsched_last_insn_heuristic = 1611, /* -fsched-last-insn-heuristic */
+ OPT_fsched_pressure = 1612, /* -fsched-pressure */
+ OPT_fsched_rank_heuristic = 1613, /* -fsched-rank-heuristic */
+ OPT_fsched_spec = 1614, /* -fsched-spec */
+ OPT_fsched_spec_insn_heuristic = 1615, /* -fsched-spec-insn-heuristic */
+ OPT_fsched_spec_load = 1616, /* -fsched-spec-load */
+ OPT_fsched_spec_load_dangerous = 1617, /* -fsched-spec-load-dangerous */
+ OPT_fsched_stalled_insns = 1618, /* -fsched-stalled-insns */
+ OPT_fsched_stalled_insns_dep = 1619, /* -fsched-stalled-insns-dep */
+ OPT_fsched_stalled_insns_dep_ = 1620, /* -fsched-stalled-insns-dep= */
+ OPT_fsched_stalled_insns_ = 1621, /* -fsched-stalled-insns= */
+ OPT_fsched_verbose_ = 1622, /* -fsched-verbose= */
+ OPT_fsched2_use_superblocks = 1623, /* -fsched2-use-superblocks */
+ /* OPT_fsched2_use_traces = 1624, */ /* -fsched2-use-traces */
+ OPT_fschedule_fusion = 1625, /* -fschedule-fusion */
+ OPT_fschedule_insns = 1626, /* -fschedule-insns */
+ OPT_fschedule_insns2 = 1627, /* -fschedule-insns2 */
+ OPT_fsecond_underscore = 1628, /* -fsecond-underscore */
+ OPT_fsection_anchors = 1629, /* -fsection-anchors */
+ /* OPT_fsee = 1630, */ /* -fsee */
+ OPT_fsel_sched_pipelining = 1631, /* -fsel-sched-pipelining */
+ OPT_fsel_sched_pipelining_outer_loops = 1632,/* -fsel-sched-pipelining-outer-loops */
+ OPT_fsel_sched_reschedule_pipelined = 1633,/* -fsel-sched-reschedule-pipelined */
+ OPT_fselective_scheduling = 1634, /* -fselective-scheduling */
+ OPT_fselective_scheduling2 = 1635, /* -fselective-scheduling2 */
+ OPT_fself_test_ = 1636, /* -fself-test= */
+ OPT_fsemantic_interposition = 1637, /* -fsemantic-interposition */
+ OPT_fshared = 1638, /* -fshared */
+ OPT_fshort_enums = 1639, /* -fshort-enums */
+ OPT_fshort_wchar = 1640, /* -fshort-wchar */
+ OPT_fshow_column = 1641, /* -fshow-column */
+ OPT_fshrink_wrap = 1642, /* -fshrink-wrap */
+ OPT_fshrink_wrap_separate = 1643, /* -fshrink-wrap-separate */
+ OPT_fsign_zero = 1644, /* -fsign-zero */
+ OPT_fsignaling_nans = 1645, /* -fsignaling-nans */
+ OPT_fsigned_bitfields = 1646, /* -fsigned-bitfields */
+ OPT_fsigned_char = 1647, /* -fsigned-char */
+ OPT_fsigned_zeros = 1648, /* -fsigned-zeros */
+ OPT_fsimd_cost_model_ = 1649, /* -fsimd-cost-model= */
+ OPT_fsingle_precision_constant = 1650, /* -fsingle-precision-constant */
+ OPT_fsized_deallocation = 1651, /* -fsized-deallocation */
+ OPT_fsoft_check_all = 1652, /* -fsoft-check-all */
+ OPT_fsources = 1653, /* -fsources */
+ OPT_fsplit_ivs_in_unroller = 1654, /* -fsplit-ivs-in-unroller */
+ OPT_fsplit_loops = 1655, /* -fsplit-loops */
+ OPT_fsplit_paths = 1656, /* -fsplit-paths */
+ OPT_fsplit_stack = 1657, /* -fsplit-stack */
+ OPT_fsplit_wide_types = 1658, /* -fsplit-wide-types */
+ OPT_fsplit_wide_types_early = 1659, /* -fsplit-wide-types-early */
+ OPT_fsquangle = 1660, /* -fsquangle */
+ OPT_fssa_backprop = 1661, /* -fssa-backprop */
+ OPT_fssa_phiopt = 1662, /* -fssa-phiopt */
+ OPT_fsso_struct_ = 1663, /* -fsso-struct= */
+ OPT_fstack_arrays = 1664, /* -fstack-arrays */
+ /* OPT_fstack_check = 1665, */ /* -fstack-check */
+ OPT_fstack_check_ = 1666, /* -fstack-check= */
+ OPT_fstack_clash_protection = 1667, /* -fstack-clash-protection */
+ OPT_fstack_limit = 1668, /* -fstack-limit */
+ OPT_fstack_limit_register_ = 1669, /* -fstack-limit-register= */
+ OPT_fstack_limit_symbol_ = 1670, /* -fstack-limit-symbol= */
+ OPT_fstack_protector = 1671, /* -fstack-protector */
+ OPT_fstack_protector_all = 1672, /* -fstack-protector-all */
+ OPT_fstack_protector_explicit = 1673, /* -fstack-protector-explicit */
+ OPT_fstack_protector_strong = 1674, /* -fstack-protector-strong */
+ OPT_fstack_reuse_ = 1675, /* -fstack-reuse= */
+ OPT_fstack_usage = 1676, /* -fstack-usage */
+ OPT_fstats = 1677, /* -fstats */
+ OPT_fstdarg_opt = 1678, /* -fstdarg-opt */
+ OPT_fstore_merging = 1679, /* -fstore-merging */
+ /* OPT_fstrength_reduce = 1680, */ /* -fstrength-reduce */
+ OPT_fstrict_aliasing = 1681, /* -fstrict-aliasing */
+ OPT_fstrict_enums = 1682, /* -fstrict-enums */
+ /* OPT_fstrict_flex_arrays = 1683, */ /* -fstrict-flex-arrays */
+ OPT_fstrict_flex_arrays_ = 1684, /* -fstrict-flex-arrays= */
+ OPT_fstrict_overflow = 1685, /* -fstrict-overflow */
+ OPT_fstrict_prototype = 1686, /* -fstrict-prototype */
+ OPT_fstrict_volatile_bitfields = 1687, /* -fstrict-volatile-bitfields */
+ /* OPT_fstrong_eval_order = 1688, */ /* -fstrong-eval-order */
+ OPT_fstrong_eval_order_ = 1689, /* -fstrong-eval-order= */
+ OPT_fswig = 1690, /* -fswig */
+ OPT_fswitch_errors = 1691, /* -fswitch-errors */
+ OPT_fsync_libcalls = 1692, /* -fsync-libcalls */
+ OPT_fsyntax_only = 1693, /* -fsyntax-only */
+ OPT_ftabstop_ = 1694, /* -ftabstop= */
+ /* OPT_ftail_call_workaround = 1695, */ /* -ftail-call-workaround */
+ OPT_ftail_call_workaround_ = 1696, /* -ftail-call-workaround= */
+ /* OPT_ftarget_help = 1697, */ /* -ftarget-help */
+ OPT_ftemplate_backtrace_limit_ = 1698, /* -ftemplate-backtrace-limit= */
+ /* OPT_ftemplate_depth_ = 1699, */ /* -ftemplate-depth- */
+ OPT_ftemplate_depth_ = 1700, /* -ftemplate-depth= */
+ OPT_ftest_coverage = 1701, /* -ftest-coverage */
+ OPT_ftest_forall_temp = 1702, /* -ftest-forall-temp */
+ OPT_fthis_is_variable = 1703, /* -fthis-is-variable */
+ OPT_fthread_jumps = 1704, /* -fthread-jumps */
+ OPT_fthreadsafe_statics = 1705, /* -fthreadsafe-statics */
+ OPT_ftime_report = 1706, /* -ftime-report */
+ OPT_ftime_report_details = 1707, /* -ftime-report-details */
+ OPT_ftls_model_ = 1708, /* -ftls-model= */
+ OPT_ftoplevel_reorder = 1709, /* -ftoplevel-reorder */
+ OPT_ftracer = 1710, /* -ftracer */
+ OPT_ftrack_macro_expansion = 1711, /* -ftrack-macro-expansion */
+ OPT_ftrack_macro_expansion_ = 1712, /* -ftrack-macro-expansion= */
+ OPT_ftrampolines = 1713, /* -ftrampolines */
+ OPT_ftransition_all = 1714, /* -ftransition=all */
+ OPT_ftransition_field = 1715, /* -ftransition=field */
+ OPT_ftransition_in = 1716, /* -ftransition=in */
+ OPT_ftransition_nogc = 1717, /* -ftransition=nogc */
+ OPT_ftransition_templates = 1718, /* -ftransition=templates */
+ OPT_ftransition_tls = 1719, /* -ftransition=tls */
+ OPT_ftrapping_math = 1720, /* -ftrapping-math */
+ OPT_ftrapv = 1721, /* -ftrapv */
+ OPT_ftree_bit_ccp = 1722, /* -ftree-bit-ccp */
+ OPT_ftree_builtin_call_dce = 1723, /* -ftree-builtin-call-dce */
+ OPT_ftree_ccp = 1724, /* -ftree-ccp */
+ OPT_ftree_ch = 1725, /* -ftree-ch */
+ /* OPT_ftree_coalesce_inlined_vars = 1726, *//* -ftree-coalesce-inlined-vars */
+ OPT_ftree_coalesce_vars = 1727, /* -ftree-coalesce-vars */
+ OPT_ftree_copy_prop = 1728, /* -ftree-copy-prop */
+ /* OPT_ftree_copyrename = 1729, */ /* -ftree-copyrename */
+ OPT_ftree_cselim = 1730, /* -ftree-cselim */
+ OPT_ftree_dce = 1731, /* -ftree-dce */
+ OPT_ftree_dominator_opts = 1732, /* -ftree-dominator-opts */
+ OPT_ftree_dse = 1733, /* -ftree-dse */
+ OPT_ftree_forwprop = 1734, /* -ftree-forwprop */
+ OPT_ftree_fre = 1735, /* -ftree-fre */
+ OPT_ftree_loop_distribute_patterns = 1736, /* -ftree-loop-distribute-patterns */
+ OPT_ftree_loop_distribution = 1737, /* -ftree-loop-distribution */
+ OPT_ftree_loop_if_convert = 1738, /* -ftree-loop-if-convert */
+ /* OPT_ftree_loop_if_convert_stores = 1739, *//* -ftree-loop-if-convert-stores */
+ OPT_ftree_loop_im = 1740, /* -ftree-loop-im */
+ OPT_ftree_loop_ivcanon = 1741, /* -ftree-loop-ivcanon */
+ /* OPT_ftree_loop_linear = 1742, */ /* -ftree-loop-linear */
+ OPT_ftree_loop_optimize = 1743, /* -ftree-loop-optimize */
+ OPT_ftree_loop_vectorize = 1744, /* -ftree-loop-vectorize */
+ OPT_ftree_lrs = 1745, /* -ftree-lrs */
+ OPT_ftree_parallelize_loops_ = 1746, /* -ftree-parallelize-loops= */
+ OPT_ftree_partial_pre = 1747, /* -ftree-partial-pre */
+ OPT_ftree_phiprop = 1748, /* -ftree-phiprop */
+ OPT_ftree_pre = 1749, /* -ftree-pre */
+ OPT_ftree_pta = 1750, /* -ftree-pta */
+ OPT_ftree_reassoc = 1751, /* -ftree-reassoc */
+ /* OPT_ftree_salias = 1752, */ /* -ftree-salias */
+ OPT_ftree_scev_cprop = 1753, /* -ftree-scev-cprop */
+ OPT_ftree_sink = 1754, /* -ftree-sink */
+ OPT_ftree_slp_vectorize = 1755, /* -ftree-slp-vectorize */
+ OPT_ftree_slsr = 1756, /* -ftree-slsr */
+ OPT_ftree_sra = 1757, /* -ftree-sra */
+ /* OPT_ftree_store_ccp = 1758, */ /* -ftree-store-ccp */
+ /* OPT_ftree_store_copy_prop = 1759, */ /* -ftree-store-copy-prop */
+ OPT_ftree_switch_conversion = 1760, /* -ftree-switch-conversion */
+ OPT_ftree_tail_merge = 1761, /* -ftree-tail-merge */
+ OPT_ftree_ter = 1762, /* -ftree-ter */
+ /* OPT_ftree_vect_loop_version = 1763, */ /* -ftree-vect-loop-version */
+ OPT_ftree_vectorize = 1764, /* -ftree-vectorize */
+ /* OPT_ftree_vectorizer_verbose_ = 1765, *//* -ftree-vectorizer-verbose= */
+ OPT_ftree_vrp = 1766, /* -ftree-vrp */
+ OPT_ftrivial_auto_var_init_ = 1767, /* -ftrivial-auto-var-init= */
+ OPT_funbounded_by_reference = 1768, /* -funbounded-by-reference */
+ OPT_funconstrained_commons = 1769, /* -funconstrained-commons */
+ OPT_funderscoring = 1770, /* -funderscoring */
+ OPT_funit_at_a_time = 1771, /* -funit-at-a-time */
+ OPT_funittest = 1772, /* -funittest */
+ OPT_funreachable_traps = 1773, /* -funreachable-traps */
+ OPT_funroll_all_loops = 1774, /* -funroll-all-loops */
+ OPT_funroll_completely_grow_size = 1775, /* -funroll-completely-grow-size */
+ OPT_funroll_loops = 1776, /* -funroll-loops */
+ /* OPT_funsafe_loop_optimizations = 1777, *//* -funsafe-loop-optimizations */
+ OPT_funsafe_math_optimizations = 1778, /* -funsafe-math-optimizations */
+ OPT_funsigned_bitfields = 1779, /* -funsigned-bitfields */
+ OPT_funsigned_char = 1780, /* -funsigned-char */
+ OPT_funswitch_loops = 1781, /* -funswitch-loops */
+ OPT_funwind_tables = 1782, /* -funwind-tables */
+ OPT_fuse_cxa_atexit = 1783, /* -fuse-cxa-atexit */
+ OPT_fuse_cxa_get_exception_ptr = 1784, /* -fuse-cxa-get-exception-ptr */
+ OPT_fuse_ld_bfd = 1785, /* -fuse-ld=bfd */
+ OPT_fuse_ld_gold = 1786, /* -fuse-ld=gold */
+ OPT_fuse_ld_lld = 1787, /* -fuse-ld=lld */
+ OPT_fuse_ld_mold = 1788, /* -fuse-ld=mold */
+ OPT_fuse_linker_plugin = 1789, /* -fuse-linker-plugin */
+ OPT_fuse_list_ = 1790, /* -fuse-list= */
+ OPT_fvar_tracking = 1791, /* -fvar-tracking */
+ OPT_fvar_tracking_assignments = 1792, /* -fvar-tracking-assignments */
+ OPT_fvar_tracking_assignments_toggle = 1793,/* -fvar-tracking-assignments-toggle */
+ OPT_fvar_tracking_uninit = 1794, /* -fvar-tracking-uninit */
+ OPT_fvariable_expansion_in_unroller = 1795,/* -fvariable-expansion-in-unroller */
+ /* OPT_fvect_cost_model = 1796, */ /* -fvect-cost-model */
+ OPT_fvect_cost_model_ = 1797, /* -fvect-cost-model= */
+ OPT_fverbose_asm = 1798, /* -fverbose-asm */
+ /* OPT_fversion = 1799, */ /* -fversion */
+ OPT_fversion_loops_for_strides = 1800, /* -fversion-loops-for-strides */
+ OPT_fversion_ = 1801, /* -fversion= */
+ OPT_fvisibility_inlines_hidden = 1802, /* -fvisibility-inlines-hidden */
+ OPT_fvisibility_ms_compat = 1803, /* -fvisibility-ms-compat */
+ OPT_fvisibility_ = 1804, /* -fvisibility= */
+ OPT_fvpt = 1805, /* -fvpt */
+ OPT_fvtable_gc = 1806, /* -fvtable-gc */
+ OPT_fvtable_thunks = 1807, /* -fvtable-thunks */
+ OPT_fvtable_verify_ = 1808, /* -fvtable-verify= */
+ OPT_fvtv_counts = 1809, /* -fvtv-counts */
+ OPT_fvtv_debug = 1810, /* -fvtv-debug */
+ OPT_fweak = 1811, /* -fweak */
+ OPT_fweak_templates = 1812, /* -fweak-templates */
+ OPT_fweb = 1813, /* -fweb */
+ /* OPT_fwhole_file = 1814, */ /* -fwhole-file */
+ OPT_fwhole_program = 1815, /* -fwhole-program */
+ OPT_fwholediv = 1816, /* -fwholediv */
+ OPT_fwholevalue = 1817, /* -fwholevalue */
+ OPT_fwide_exec_charset_ = 1818, /* -fwide-exec-charset= */
+ OPT_fworking_directory = 1819, /* -fworking-directory */
+ OPT_fwpa = 1820, /* -fwpa */
+ OPT_fwpa_ = 1821, /* -fwpa= */
+ OPT_fwrapv = 1822, /* -fwrapv */
+ OPT_fwrapv_pointer = 1823, /* -fwrapv-pointer */
+ OPT_fxref = 1824, /* -fxref */
+ /* OPT_fzee = 1825, */ /* -fzee */
+ OPT_fzero_call_used_regs_ = 1826, /* -fzero-call-used-regs= */
+ OPT_fzero_initialized_in_bss = 1827, /* -fzero-initialized-in-bss */
+ OPT_fzero_link = 1828, /* -fzero-link */
+ OPT_g = 1829, /* -g */
+ OPT_gant = 1830, /* -gant */
+ OPT_gas_loc_support = 1831, /* -gas-loc-support */
+ OPT_gas_locview_support = 1832, /* -gas-locview-support */
+ OPT_gbtf = 1833, /* -gbtf */
+ OPT_gcoff = 1834, /* -gcoff */
+ OPT_gcoff1 = 1835, /* -gcoff1 */
+ OPT_gcoff2 = 1836, /* -gcoff2 */
+ OPT_gcoff3 = 1837, /* -gcoff3 */
+ OPT_gcolumn_info = 1838, /* -gcolumn-info */
+ OPT_gctf = 1839, /* -gctf */
+ OPT_gdescribe_dies = 1840, /* -gdescribe-dies */
+ OPT_gdwarf = 1841, /* -gdwarf */
+ OPT_gdwarf_ = 1842, /* -gdwarf- */
+ OPT_gdwarf32 = 1843, /* -gdwarf32 */
+ OPT_gdwarf64 = 1844, /* -gdwarf64 */
+ OPT_gen_decls = 1845, /* -gen-decls */
+ OPT_ggdb = 1846, /* -ggdb */
+ OPT_ggnu_pubnames = 1847, /* -ggnu-pubnames */
+ OPT_gimple_stats = 1848, /* -gimple-stats */
+ OPT_ginline_points = 1849, /* -ginline-points */
+ OPT_ginternal_reset_location_views = 1850, /* -ginternal-reset-location-views */
+ OPT_gnat = 1851, /* -gnat */
+ OPT_gnatO = 1852, /* -gnatO */
+ OPT_gno_ = 1853, /* -gno- */
+ OPT_gno_pubnames = 1854, /* -gno-pubnames */
+ OPT_gpubnames = 1855, /* -gpubnames */
+ OPT_grecord_gcc_switches = 1856, /* -grecord-gcc-switches */
+ OPT_gsplit_dwarf = 1857, /* -gsplit-dwarf */
+ OPT_gstabs = 1858, /* -gstabs */
+ OPT_gstabs_ = 1859, /* -gstabs+ */
+ OPT_gstatement_frontiers = 1860, /* -gstatement-frontiers */
+ OPT_gstrict_dwarf = 1861, /* -gstrict-dwarf */
+ OPT_gtoggle = 1862, /* -gtoggle */
+ OPT_gvariable_location_views = 1863, /* -gvariable-location-views */
+ OPT_gvariable_location_views_incompat5 = 1864,/* -gvariable-location-views=incompat5 */
+ OPT_gvms = 1865, /* -gvms */
+ OPT_gxcoff = 1866, /* -gxcoff */
+ OPT_gxcoff_ = 1867, /* -gxcoff+ */
+ OPT_gz = 1868, /* -gz */
+ OPT_gz_ = 1869, /* -gz= */
+ OPT_h = 1870, /* -h */
+ OPT_help = 1871, /* -help */
+ OPT_idirafter = 1872, /* -idirafter */
+ OPT_imacros = 1873, /* -imacros */
+ OPT_imultiarch = 1874, /* -imultiarch */
+ OPT_imultilib = 1875, /* -imultilib */
+ OPT_include = 1876, /* -include */
+ OPT_iplugindir_ = 1877, /* -iplugindir= */
+ OPT_iprefix = 1878, /* -iprefix */
+ OPT_iquote = 1879, /* -iquote */
+ OPT_isysroot = 1880, /* -isysroot */
+ OPT_isystem = 1881, /* -isystem */
+ OPT_iwithprefix = 1882, /* -iwithprefix */
+ OPT_iwithprefixbefore = 1883, /* -iwithprefixbefore */
+ OPT_k8 = 1884, /* -k8 */
+ OPT_l = 1885, /* -l */
+ OPT_lang_asm = 1886, /* -lang-asm */
+ OPT_list = 1887, /* -list */
+ OPT_mabi_ = 1888, /* -mabi= */
+ OPT_mabort_on_noreturn = 1889, /* -mabort-on-noreturn */
+ OPT_mapcs = 1890, /* -mapcs */
+ OPT_mapcs_frame = 1891, /* -mapcs-frame */
+ OPT_mapcs_reentrant = 1892, /* -mapcs-reentrant */
+ OPT_mapcs_stack_check = 1893, /* -mapcs-stack-check */
+ OPT_march_ = 1894, /* -march= */
+ OPT_marm = 1895, /* -marm */
+ OPT_masm_syntax_unified = 1896, /* -masm-syntax-unified */
+ OPT_mbe32 = 1897, /* -mbe32 */
+ OPT_mbe8 = 1898, /* -mbe8 */
+ OPT_mbig_endian = 1899, /* -mbig-endian */
+ OPT_mbranch_cost_ = 1900, /* -mbranch-cost= */
+ OPT_mbranch_protection_ = 1901, /* -mbranch-protection= */
+ OPT_mcallee_super_interworking = 1902, /* -mcallee-super-interworking */
+ OPT_mcaller_super_interworking = 1903, /* -mcaller-super-interworking */
+ OPT_mcmse = 1904, /* -mcmse */
+ OPT_mcpu_ = 1905, /* -mcpu= */
+ OPT_mfdpic = 1906, /* -mfdpic */
+ OPT_mfix_cmse_cve_2021_35465 = 1907, /* -mfix-cmse-cve-2021-35465 */
+ OPT_mfix_cortex_a57_aes_1742098 = 1908, /* -mfix-cortex-a57-aes-1742098 */
+ /* OPT_mfix_cortex_a72_aes_1655431 = 1909, *//* -mfix-cortex-a72-aes-1655431 */
+ OPT_mfix_cortex_m3_ldrd = 1910, /* -mfix-cortex-m3-ldrd */
+ OPT_mflip_thumb = 1911, /* -mflip-thumb */
+ OPT_mfloat_abi_ = 1912, /* -mfloat-abi= */
+ OPT_mfp16_format_ = 1913, /* -mfp16-format= */
+ OPT_mfpu_ = 1914, /* -mfpu= */
+ OPT_mgeneral_regs_only = 1915, /* -mgeneral-regs-only */
+ /* OPT_mhard_float = 1916, */ /* -mhard-float */
+ OPT_mlibarch_ = 1917, /* -mlibarch= */
+ OPT_mlittle_endian = 1918, /* -mlittle-endian */
+ OPT_mlong_calls = 1919, /* -mlong-calls */
+ OPT_mneon_for_64bits = 1920, /* -mneon-for-64bits */
+ OPT_mpic_data_is_text_relative = 1921, /* -mpic-data-is-text-relative */
+ OPT_mpic_register_ = 1922, /* -mpic-register= */
+ OPT_mpoke_function_name = 1923, /* -mpoke-function-name */
+ OPT_mprint_tune_info = 1924, /* -mprint-tune-info */
+ OPT_mpure_code = 1925, /* -mpure-code */
+ OPT_mrestrict_it = 1926, /* -mrestrict-it */
+ OPT_msched_prolog = 1927, /* -msched-prolog */
+ OPT_msingle_pic_base = 1928, /* -msingle-pic-base */
+ OPT_mslow_flash_data = 1929, /* -mslow-flash-data */
+ /* OPT_msoft_float = 1930, */ /* -msoft-float */
+ OPT_mstack_protector_guard_offset_ = 1931, /* -mstack-protector-guard-offset= */
+ OPT_mstack_protector_guard_ = 1932, /* -mstack-protector-guard= */
+ OPT_mstructure_size_boundary_ = 1933, /* -mstructure-size-boundary= */
+ OPT_mthumb = 1934, /* -mthumb */
+ OPT_mthumb_interwork = 1935, /* -mthumb-interwork */
+ OPT_mtls_dialect_ = 1936, /* -mtls-dialect= */
+ OPT_mtp_ = 1937, /* -mtp= */
+ OPT_mtpcs_frame = 1938, /* -mtpcs-frame */
+ OPT_mtpcs_leaf_frame = 1939, /* -mtpcs-leaf-frame */
+ OPT_mtune_ = 1940, /* -mtune= */
+ OPT_munaligned_access = 1941, /* -munaligned-access */
+ OPT_mvectorize_with_neon_double = 1942, /* -mvectorize-with-neon-double */
+ OPT_mvectorize_with_neon_quad = 1943, /* -mvectorize-with-neon-quad */
+ OPT_mverbose_cost_dump = 1944, /* -mverbose-cost-dump */
+ OPT_mword_relocations = 1945, /* -mword-relocations */
+ OPT_n = 1946, /* -n */
+ OPT_name_sort = 1947, /* -name-sort */
+ OPT_no_canonical_prefixes = 1948, /* -no-canonical-prefixes */
+ OPT_no_integrated_cpp = 1949, /* -no-integrated-cpp */
+ OPT_no_pie = 1950, /* -no-pie */
+ OPT_nocpp = 1951, /* -nocpp */
+ OPT_nodefaultlibs = 1952, /* -nodefaultlibs */
+ OPT_nolibc = 1953, /* -nolibc */
+ OPT_nophoboslib = 1954, /* -nophoboslib */
+ OPT_nostartfiles = 1955, /* -nostartfiles */
+ OPT_nostdinc = 1956, /* -nostdinc */
+ OPT_nostdinc__ = 1957, /* -nostdinc++ */
+ OPT_nostdlib = 1958, /* -nostdlib */
+ OPT_nostdlib__ = 1959, /* -nostdlib++ */
+ OPT_o = 1960, /* -o */
+ OPT_objects = 1961, /* -objects */
+ OPT_p = 1962, /* -p */
+ OPT_pass_exit_codes = 1963, /* -pass-exit-codes */
+ /* OPT_pedantic = 1964, */ /* -pedantic */
+ OPT_pedantic_errors = 1965, /* -pedantic-errors */
+ OPT_pg = 1966, /* -pg */
+ OPT_pie = 1967, /* -pie */
+ OPT_pipe = 1968, /* -pipe */
+ OPT_print_file_name_ = 1969, /* -print-file-name= */
+ OPT_print_libgcc_file_name = 1970, /* -print-libgcc-file-name */
+ OPT_print_multi_directory = 1971, /* -print-multi-directory */
+ OPT_print_multi_lib = 1972, /* -print-multi-lib */
+ OPT_print_multi_os_directory = 1973, /* -print-multi-os-directory */
+ OPT_print_multiarch = 1974, /* -print-multiarch */
+ OPT_print_objc_runtime_info = 1975, /* -print-objc-runtime-info */
+ OPT_print_prog_name_ = 1976, /* -print-prog-name= */
+ OPT_print_search_dirs = 1977, /* -print-search-dirs */
+ OPT_print_sysroot = 1978, /* -print-sysroot */
+ OPT_print_sysroot_headers_suffix = 1979, /* -print-sysroot-headers-suffix */
+ OPT_print_value = 1980, /* -print-value */
+ OPT_quiet = 1981, /* -quiet */
+ OPT_r = 1982, /* -r */
+ OPT_remap = 1983, /* -remap */
+ OPT_reverse_sort = 1984, /* -reverse-sort */
+ OPT_s = 1985, /* -s */
+ OPT_save_temps = 1986, /* -save-temps */
+ OPT_save_temps_ = 1987, /* -save-temps= */
+ OPT_shared = 1988, /* -shared */
+ OPT_shared_libgcc = 1989, /* -shared-libgcc */
+ OPT_shared_libphobos = 1990, /* -shared-libphobos */
+ OPT_size_sort = 1991, /* -size-sort */
+ /* OPT_specs = 1992, */ /* -specs */
+ OPT_specs_ = 1993, /* -specs= */
+ OPT_static = 1994, /* -static */
+ OPT_static_libasan = 1995, /* -static-libasan */
+ OPT_static_libgcc = 1996, /* -static-libgcc */
+ OPT_static_libgfortran = 1997, /* -static-libgfortran */
+ OPT_static_libgm2 = 1998, /* -static-libgm2 */
+ OPT_static_libgo = 1999, /* -static-libgo */
+ OPT_static_libhwasan = 2000, /* -static-libhwasan */
+ OPT_static_liblsan = 2001, /* -static-liblsan */
+ OPT_static_libmpx = 2002, /* -static-libmpx */
+ OPT_static_libmpxwrappers = 2003, /* -static-libmpxwrappers */
+ OPT_static_libphobos = 2004, /* -static-libphobos */
+ OPT_static_libquadmath = 2005, /* -static-libquadmath */
+ OPT_static_libstdc__ = 2006, /* -static-libstdc++ */
+ OPT_static_libtsan = 2007, /* -static-libtsan */
+ OPT_static_libubsan = 2008, /* -static-libubsan */
+ OPT_static_pie = 2009, /* -static-pie */
+ /* OPT_std_c__03 = 2010, */ /* -std=c++03 */
+ /* OPT_std_c__0x = 2011, */ /* -std=c++0x */
+ OPT_std_c__11 = 2012, /* -std=c++11 */
+ OPT_std_c__14 = 2013, /* -std=c++14 */
+ OPT_std_c__17 = 2014, /* -std=c++17 */
+ /* OPT_std_c__1y = 2015, */ /* -std=c++1y */
+ /* OPT_std_c__1z = 2016, */ /* -std=c++1z */
+ OPT_std_c__20 = 2017, /* -std=c++20 */
+ OPT_std_c__23 = 2018, /* -std=c++23 */
+ /* OPT_std_c__2a = 2019, */ /* -std=c++2a */
+ /* OPT_std_c__2b = 2020, */ /* -std=c++2b */
+ OPT_std_c__98 = 2021, /* -std=c++98 */
+ OPT_std_c11 = 2022, /* -std=c11 */
+ OPT_std_c17 = 2023, /* -std=c17 */
+ /* OPT_std_c18 = 2024, */ /* -std=c18 */
+ /* OPT_std_c1x = 2025, */ /* -std=c1x */
+ OPT_std_c2x = 2026, /* -std=c2x */
+ /* OPT_std_c89 = 2027, */ /* -std=c89 */
+ OPT_std_c90 = 2028, /* -std=c90 */
+ OPT_std_c99 = 2029, /* -std=c99 */
+ /* OPT_std_c9x = 2030, */ /* -std=c9x */
+ OPT_std_f2003 = 2031, /* -std=f2003 */
+ OPT_std_f2008 = 2032, /* -std=f2008 */
+ OPT_std_f2008ts = 2033, /* -std=f2008ts */
+ OPT_std_f2018 = 2034, /* -std=f2018 */
+ OPT_std_f95 = 2035, /* -std=f95 */
+ OPT_std_gnu = 2036, /* -std=gnu */
+ /* OPT_std_gnu__03 = 2037, */ /* -std=gnu++03 */
+ /* OPT_std_gnu__0x = 2038, */ /* -std=gnu++0x */
+ OPT_std_gnu__11 = 2039, /* -std=gnu++11 */
+ OPT_std_gnu__14 = 2040, /* -std=gnu++14 */
+ OPT_std_gnu__17 = 2041, /* -std=gnu++17 */
+ /* OPT_std_gnu__1y = 2042, */ /* -std=gnu++1y */
+ /* OPT_std_gnu__1z = 2043, */ /* -std=gnu++1z */
+ OPT_std_gnu__20 = 2044, /* -std=gnu++20 */
+ OPT_std_gnu__23 = 2045, /* -std=gnu++23 */
+ /* OPT_std_gnu__2a = 2046, */ /* -std=gnu++2a */
+ /* OPT_std_gnu__2b = 2047, */ /* -std=gnu++2b */
+ OPT_std_gnu__98 = 2048, /* -std=gnu++98 */
+ OPT_std_gnu11 = 2049, /* -std=gnu11 */
+ OPT_std_gnu17 = 2050, /* -std=gnu17 */
+ /* OPT_std_gnu18 = 2051, */ /* -std=gnu18 */
+ /* OPT_std_gnu1x = 2052, */ /* -std=gnu1x */
+ OPT_std_gnu2x = 2053, /* -std=gnu2x */
+ /* OPT_std_gnu89 = 2054, */ /* -std=gnu89 */
+ OPT_std_gnu90 = 2055, /* -std=gnu90 */
+ OPT_std_gnu99 = 2056, /* -std=gnu99 */
+ /* OPT_std_gnu9x = 2057, */ /* -std=gnu9x */
+ /* OPT_std_iso9899_1990 = 2058, */ /* -std=iso9899:1990 */
+ OPT_std_iso9899_199409 = 2059, /* -std=iso9899:199409 */
+ /* OPT_std_iso9899_1999 = 2060, */ /* -std=iso9899:1999 */
+ /* OPT_std_iso9899_199x = 2061, */ /* -std=iso9899:199x */
+ /* OPT_std_iso9899_2011 = 2062, */ /* -std=iso9899:2011 */
+ /* OPT_std_iso9899_2017 = 2063, */ /* -std=iso9899:2017 */
+ /* OPT_std_iso9899_2018 = 2064, */ /* -std=iso9899:2018 */
+ OPT_std_legacy = 2065, /* -std=legacy */
+ OPT_stdlib_ = 2066, /* -stdlib= */
+ OPT_symbol_ = 2067, /* -symbol= */
+ OPT_symbolic = 2068, /* -symbolic */
+ OPT_t = 2069, /* -t */
+ OPT_time = 2070, /* -time */
+ OPT_time_ = 2071, /* -time= */
+ OPT_traditional = 2072, /* -traditional */
+ OPT_traditional_cpp = 2073, /* -traditional-cpp */
+ OPT_tree_stats = 2074, /* -tree-stats */
+ OPT_trigraphs = 2075, /* -trigraphs */
+ OPT_type_stats = 2076, /* -type-stats */
+ OPT_u = 2077, /* -u */
+ OPT_undef = 2078, /* -undef */
+ OPT_v = 2079, /* -v */
+ OPT_version = 2080, /* -version */
+ OPT_w = 2081, /* -w */
+ OPT_wrapper = 2082, /* -wrapper */
+ OPT_x = 2083, /* -x */
+ OPT_z = 2084, /* -z */
+ N_OPTS,
+ OPT_SPECIAL_unknown,
+ OPT_SPECIAL_ignore,
+ OPT_SPECIAL_warn_removed,
+ OPT_SPECIAL_program_name,
+ OPT_SPECIAL_input_file
+};
+
+#ifdef GCC_C_COMMON_C
+/* Mapping from cpp message reasons to the options that enable them. */
+#include <cpplib.h>
+struct cpp_reason_option_codes_t
+{
+ /* cpplib message reason. */
+ const enum cpp_warning_reason reason;
+ /* gcc option that controls this message. */
+ const int option_code;
+};
+
+static const struct cpp_reason_option_codes_t cpp_reason_option_codes[] = {
+ {CPP_W_BIDIRECTIONAL, OPT_Wbidi_chars_},
+ {CPP_W_BUILTIN_MACRO_REDEFINED, OPT_Wbuiltin_macro_redefined},
+ {CPP_W_CXX_OPERATOR_NAMES, OPT_Wc___compat},
+ {CPP_W_CXX11_COMPAT, OPT_Wc__11_compat},
+ {CPP_W_CXX20_COMPAT, OPT_Wc__20_compat},
+ {CPP_W_C11_C2X_COMPAT, OPT_Wc11_c2x_compat},
+ {CPP_W_C90_C99_COMPAT, OPT_Wc90_c99_compat},
+ {CPP_W_COMMENTS, OPT_Wcomment},
+ {CPP_W_WARNING_DIRECTIVE, OPT_Wcpp},
+ {CPP_W_DATE_TIME, OPT_Wdate_time},
+ {CPP_W_DEPRECATED, OPT_Wdeprecated},
+ {CPP_W_ENDIF_LABELS, OPT_Wendif_labels},
+ {CPP_W_EXPANSION_TO_DEFINED, OPT_Wexpansion_to_defined},
+ {CPP_W_INVALID_PCH, OPT_Winvalid_pch},
+ {CPP_W_INVALID_UTF8, OPT_Winvalid_utf8},
+ {CPP_W_LITERAL_SUFFIX, OPT_Wliteral_suffix},
+ {CPP_W_LONG_LONG, OPT_Wlong_long},
+ {CPP_W_MISSING_INCLUDE_DIRS, OPT_Wmissing_include_dirs},
+ {CPP_W_MULTICHAR, OPT_Wmultichar},
+ {CPP_W_NORMALIZE, OPT_Wnormalized_},
+ {CPP_W_PEDANTIC, OPT_Wpedantic},
+ {CPP_W_TRADITIONAL, OPT_Wtraditional},
+ {CPP_W_TRIGRAPHS, OPT_Wtrigraphs},
+ {CPP_W_UNDEF, OPT_Wundef},
+ {CPP_W_UNICODE, OPT_Wunicode},
+ {CPP_W_UNUSED_MACROS, OPT_Wunused_macros},
+ {CPP_W_VARIADIC_MACROS, OPT_Wvariadic_macros},
+ {CPP_W_NONE, 0},
+};
+#endif
+
+#endif /* OPTIONS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-diagnostic.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-diagnostic.h
new file mode 100644
index 0000000..b36fb46
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-diagnostic.h
@@ -0,0 +1,28 @@
+/* Command line option handling. Interactions with diagnostics code.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTS_DIAGNOSTIC_H
+#define GCC_OPTS_DIAGNOSTIC_H
+
+extern char *option_name (diagnostic_context *context, int option_index,
+ diagnostic_t orig_diag_kind, diagnostic_t diag_kind);
+
+extern char *get_option_url (diagnostic_context *context, int option_index);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-jobserver.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-jobserver.h
new file mode 100644
index 0000000..4756be8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts-jobserver.h
@@ -0,0 +1,62 @@
+/* GNU make's jobserver related functionality.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>.
+
+See dbgcnt.def for usage information. */
+
+#ifndef GCC_JOBSERVER_H
+#define GCC_JOBSERVER_H
+
+using namespace std;
+
+struct jobserver_info
+{
+ /* Default constructor. */
+ jobserver_info ();
+
+ /* Connect to the server. */
+ void connect ();
+
+ /* Disconnect from the server. */
+ void disconnect ();
+
+ /* Get token from the server. */
+ bool get_token ();
+
+ /* Return token to the server. */
+ void return_token ();
+
+ /* Error message if there is a problem. */
+ string error_msg = "";
+ /* Skipped MAKEFLAGS where --jobserver-auth is skipped. */
+ string skipped_makeflags = "";
+ /* File descriptor for reading used for jobserver communication. */
+ int rfd = -1;
+ /* File descriptor for writing used for jobserver communication. */
+ int wfd = -1;
+ /* Named pipe path. */
+ string pipe_path = "";
+ /* Pipe file descriptor. */
+ int pipefd = -1;
+ /* Return true if jobserver is active. */
+ bool is_active = false;
+ /* Return true if communication with jobserver is working. */
+ bool is_connected = false;
+};
+
+#endif /* GCC_JOBSERVER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts.h
new file mode 100644
index 0000000..9959a44
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/opts.h
@@ -0,0 +1,566 @@
+/* Command line option handling.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OPTS_H
+#define GCC_OPTS_H
+
+#include "obstack.h"
+
+/* Specifies how a switch's VAR_VALUE relates to its FLAG_VAR. */
+enum cl_var_type {
+ /* The switch is an integer value. */
+ CLVC_INTEGER,
+
+ /* The switch is enabled when FLAG_VAR == VAR_VALUE. */
+ CLVC_EQUAL,
+
+ /* The switch is enabled when VAR_VALUE is not set in FLAG_VAR. */
+ CLVC_BIT_CLEAR,
+
+ /* The switch is enabled when VAR_VALUE is set in FLAG_VAR. */
+ CLVC_BIT_SET,
+
+ /* The switch is enabled when FLAG_VAR is less than HOST_WIDE_INT_M1U. */
+ CLVC_SIZE,
+
+ /* The switch takes a string argument and FLAG_VAR points to that
+ argument. */
+ CLVC_STRING,
+
+ /* The switch takes an enumerated argument (VAR_ENUM says what
+ enumeration) and FLAG_VAR points to that argument. */
+ CLVC_ENUM,
+
+ /* The switch should be stored in the VEC pointed to by FLAG_VAR for
+ later processing. */
+ CLVC_DEFER
+};
+
+/* Values for var_value member of CLVC_ENUM. */
+enum cl_enum_var_value {
+ /* Enum without EnumSet or EnumBitSet. */
+ CLEV_NORMAL,
+
+ /* EnumSet. */
+ CLEV_SET,
+
+ /* EnumBitSet. */
+ CLEV_BITSET
+};
+
+struct cl_option
+{
+ /* Text of the option, including initial '-'. */
+ const char *opt_text;
+ /* Help text for --help, or NULL. */
+ const char *help;
+ /* Error message for missing argument, or NULL. */
+ const char *missing_argument_error;
+ /* Warning to give when this option is used, or NULL. */
+ const char *warn_message;
+ /* Argument of alias target when positive option given, or NULL. */
+ const char *alias_arg;
+ /* Argument of alias target when negative option given, or NULL. */
+ const char *neg_alias_arg;
+ /* Alias target, or N_OPTS if not an alias. */
+ unsigned short alias_target;
+ /* Previous option that is an initial substring of this one, or
+ N_OPTS if none. */
+ unsigned short back_chain;
+ /* Option length, not including initial '-'. */
+ unsigned char opt_len;
+ /* Next option in a sequence marked with Negative, or -1 if none.
+ For a single option with both a negative and a positve form
+ (such as -Wall and -Wno-all), NEG_IDX is equal to the option's
+ own index (i.e., cl_options[IDX].neg_idx == IDX holds). */
+ int neg_index;
+ /* CL_* flags for this option. */
+ unsigned int flags;
+ /* Disabled in this configuration. */
+ BOOL_BITFIELD cl_disabled : 1;
+ /* Options marked with CL_SEPARATE take a number of separate
+ arguments (1 to 4) that is one more than the number in this
+ bit-field. */
+ unsigned int cl_separate_nargs : 2;
+ /* Option is an alias when used with separate argument. */
+ BOOL_BITFIELD cl_separate_alias : 1;
+ /* Alias to negative form of option. */
+ BOOL_BITFIELD cl_negative_alias : 1;
+ /* Option takes no argument in the driver. */
+ BOOL_BITFIELD cl_no_driver_arg : 1;
+ /* Reject this option in the driver. */
+ BOOL_BITFIELD cl_reject_driver : 1;
+ /* Reject no- form. */
+ BOOL_BITFIELD cl_reject_negative : 1;
+ /* Missing argument OK (joined). */
+ BOOL_BITFIELD cl_missing_ok : 1;
+ /* Argument is an integer >=0. */
+ BOOL_BITFIELD cl_uinteger : 1;
+ /* Argument is a HOST_WIDE_INT. */
+ BOOL_BITFIELD cl_host_wide_int : 1;
+ /* Argument should be converted to lowercase. */
+ BOOL_BITFIELD cl_tolower : 1;
+ /* Argument is an unsigned integer with an optional byte suffix. */
+ BOOL_BITFIELD cl_byte_size: 1;
+ /* Offset of field for this option in struct gcc_options, or
+ (unsigned short) -1 if none. */
+ unsigned short flag_var_offset;
+ /* Index in cl_enums of enum used for this option's arguments, for
+ CLVC_ENUM options. */
+ unsigned short var_enum;
+ /* How this option's value is determined and sets a field. */
+ enum cl_var_type var_type;
+ /* Value or bit-mask with which to set a field. */
+ HOST_WIDE_INT var_value;
+ /* Range info minimum, or -1. */
+ int range_min;
+ /* Range info maximum, or -1. */
+ int range_max;
+};
+
+struct cl_var
+{
+ /* Name of the variable. */
+ const char *var_name;
+ /* Offset of field for this var in struct gcc_options. */
+ unsigned short var_offset;
+};
+
+/* Records that the state of an option consists of SIZE bytes starting
+ at DATA. DATA might point to CH in some cases. */
+struct cl_option_state {
+ const void *data;
+ size_t size;
+ char ch;
+};
+
+extern const struct cl_option cl_options[];
+extern const unsigned int cl_options_count;
+#ifdef ENABLE_PLUGIN
+extern const struct cl_var cl_vars[];
+#endif
+extern const char *const lang_names[];
+extern const unsigned int cl_lang_count;
+
+#define CL_PARAMS (1U << 16) /* Fake entry. Used to display --param info with --help. */
+#define CL_WARNING (1U << 17) /* Enables an (optional) warning message. */
+#define CL_OPTIMIZATION (1U << 18) /* Enables an (optional) optimization. */
+#define CL_DRIVER (1U << 19) /* Driver option. */
+#define CL_TARGET (1U << 20) /* Target-specific option. */
+#define CL_COMMON (1U << 21) /* Language-independent. */
+
+#define CL_MIN_OPTION_CLASS CL_PARAMS
+#define CL_MAX_OPTION_CLASS CL_COMMON
+
+/* From here on the bits describe attributes of the options.
+ Before this point the bits have described the class of the option.
+ This distinction is important because --help will not list options
+ which only have these higher bits set. */
+
+#define CL_JOINED (1U << 22) /* If takes joined argument. */
+#define CL_SEPARATE (1U << 23) /* If takes a separate argument. */
+#define CL_UNDOCUMENTED (1U << 24) /* Do not output with --help. */
+#define CL_NO_DWARF_RECORD (1U << 25) /* Do not add to producer string. */
+#define CL_PCH_IGNORE (1U << 26) /* Do compare state for pch. */
+
+/* Flags for an enumerated option argument. */
+#define CL_ENUM_CANONICAL (1 << 0) /* Canonical for this value. */
+#define CL_ENUM_DRIVER_ONLY (1 << 1) /* Only accepted in the driver. */
+#define CL_ENUM_SET_SHIFT 2 /* Shift for enum set. */
+
+/* Structure describing an enumerated option argument. */
+
+struct cl_enum_arg
+{
+ /* The argument text, or NULL at the end of the array. */
+ const char *arg;
+
+ /* The corresponding integer value. */
+ int value;
+
+ /* Flags associated with this argument. */
+ unsigned int flags;
+};
+
+/* Structure describing an enumerated set of option arguments. */
+
+struct cl_enum
+{
+ /* Help text, or NULL if the values should not be listed in --help
+ output. */
+ const char *help;
+
+ /* Error message for unknown arguments, or NULL to use a generic
+ error. */
+ const char *unknown_error;
+
+ /* Array of possible values. */
+ const struct cl_enum_arg *values;
+
+ /* The size of the type used to store a value. */
+ size_t var_size;
+
+ /* Function to set a variable of this type. */
+ void (*set) (void *var, int value);
+
+ /* Function to get the value of a variable of this type. */
+ int (*get) (const void *var);
+};
+
+extern const struct cl_enum cl_enums[];
+extern const unsigned int cl_enums_count;
+
+/* Possible ways in which a command-line option may be erroneous.
+ These do not include not being known at all; an option index of
+ OPT_SPECIAL_unknown is used for that. */
+
+#define CL_ERR_DISABLED (1 << 0) /* Disabled in this configuration. */
+#define CL_ERR_MISSING_ARG (1 << 1) /* Argument required but missing. */
+#define CL_ERR_WRONG_LANG (1 << 2) /* Option for wrong language. */
+#define CL_ERR_UINT_ARG (1 << 3) /* Bad unsigned integer argument. */
+#define CL_ERR_INT_RANGE_ARG (1 << 4) /* Bad unsigned integer argument. */
+#define CL_ERR_ENUM_ARG (1 << 5) /* Bad enumerated argument. */
+#define CL_ERR_NEGATIVE (1 << 6) /* Negative form of option
+ not permitted (together
+ with OPT_SPECIAL_unknown). */
+#define CL_ERR_ENUM_SET_ARG (1 << 7) /* Bad argument of enumerated set. */
+
+/* Structure describing the result of decoding an option. */
+
+struct cl_decoded_option
+{
+ /* The index of this option, or an OPT_SPECIAL_* value for
+ non-options and unknown options. */
+ size_t opt_index;
+
+ /* Any warning to give for use of this option, or NULL if none. */
+ const char *warn_message;
+
+ /* The string argument, or NULL if none. For OPT_SPECIAL_* cases,
+ the option or non-option command-line argument. */
+ const char *arg;
+
+ /* The original text of option plus arguments, with separate argv
+ elements concatenated into one string with spaces separating
+ them. This is for such uses as diagnostics and
+ -frecord-gcc-switches. */
+ const char *orig_option_with_args_text;
+
+ /* The canonical form of the option and its argument, for when it is
+ necessary to reconstruct argv elements (in particular, for
+ processing specs and passing options to subprocesses from the
+ driver). */
+ const char *canonical_option[4];
+
+ /* The number of elements in the canonical form of the option and
+ arguments; always at least 1. */
+ size_t canonical_option_num_elements;
+
+ /* For a boolean option, 1 for the true case and 0 for the "no-"
+ case. For an unsigned integer option, the value of the
+ argument. For enum the value of the enumerator corresponding
+ to argument string. 1 in all other cases. */
+ HOST_WIDE_INT value;
+
+ /* For EnumSet the value mask. Variable should be changed to
+ value | (prev_value & ~mask). */
+ HOST_WIDE_INT mask;
+
+ /* Any flags describing errors detected in this option. */
+ int errors;
+};
+
+/* Structure describing an option deferred for handling after the main
+ option handlers. */
+
+struct cl_deferred_option
+{
+ /* Elements from struct cl_decoded_option used for deferred
+ options. */
+ size_t opt_index;
+ const char *arg;
+ int value;
+};
+
+/* Structure describing a single option-handling callback. */
+
+struct cl_option_handler_func
+{
+ /* The function called to handle the option. */
+ bool (*handler) (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ const struct cl_decoded_option *decoded,
+ unsigned int lang_mask, int kind, location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc,
+ void (*target_option_override_hook) (void));
+
+ /* The mask that must have some bit in common with the flags for the
+ option for this particular handler to be used. */
+ unsigned int mask;
+};
+
+/* Structure describing the callbacks used in handling options. */
+
+struct cl_option_handlers
+{
+ /* Callback for an unknown option to determine whether to give an
+ error for it, and possibly store information to diagnose the
+ option at a later point. Return true if an error should be
+ given, false otherwise. */
+ bool (*unknown_option_callback) (const struct cl_decoded_option *decoded);
+
+ /* Callback to handle, and possibly diagnose, an option for another
+ language. */
+ void (*wrong_lang_callback) (const struct cl_decoded_option *decoded,
+ unsigned int lang_mask);
+
+ /* Target option override hook. */
+ void (*target_option_override_hook) (void);
+
+ /* The number of individual handlers. */
+ size_t num_handlers;
+
+ /* The handlers themselves. */
+ struct cl_option_handler_func handlers[3];
+};
+
+/* Hold command-line options associated with stack limitation. */
+extern const char *opt_fstack_limit_symbol_arg;
+extern int opt_fstack_limit_register_no;
+
+/* Input file names. */
+
+extern const char **in_fnames;
+
+/* The count of input filenames. */
+
+extern unsigned num_in_fnames;
+
+extern char *opts_concat (const char *first, ...);
+
+/* Obstack for option strings. */
+
+extern struct obstack opts_obstack;
+
+size_t find_opt (const char *input, unsigned int lang_mask);
+extern HOST_WIDE_INT integral_argument (const char *arg, int * = NULL, bool = false);
+extern bool enum_value_to_arg (const struct cl_enum_arg *enum_args,
+ const char **argp, int value,
+ unsigned int lang_mask);
+extern void decode_cmdline_options_to_array (unsigned int argc,
+ const char **argv,
+ unsigned int lang_mask,
+ struct cl_decoded_option **decoded_options,
+ unsigned int *decoded_options_count);
+extern void init_options_once (void);
+extern void init_options_struct (struct gcc_options *opts,
+ struct gcc_options *opts_set);
+extern void init_opts_obstack (void);
+extern void decode_cmdline_options_to_array_default_mask (unsigned int argc,
+ const char **argv,
+ struct cl_decoded_option **decoded_options,
+ unsigned int *decoded_options_count);
+extern void set_default_handlers (struct cl_option_handlers *handlers,
+ void (*target_option_override_hook) (void));
+extern void decode_options (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ struct cl_decoded_option *decoded_options,
+ unsigned int decoded_options_count,
+ location_t loc,
+ diagnostic_context *dc,
+ void (*target_option_override_hook) (void));
+extern int option_enabled (int opt_idx, unsigned lang_mask, void *opts);
+
+extern bool get_option_state (struct gcc_options *, int,
+ struct cl_option_state *);
+extern void set_option (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ int opt_index, HOST_WIDE_INT value, const char *arg,
+ int kind, location_t loc, diagnostic_context *dc,
+ HOST_WIDE_INT = 0);
+extern void *option_flag_var (int opt_index, struct gcc_options *opts);
+bool handle_generated_option (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ size_t opt_index, const char *arg,
+ HOST_WIDE_INT value,
+ unsigned int lang_mask, int kind, location_t loc,
+ const struct cl_option_handlers *handlers,
+ bool generated_p, diagnostic_context *dc);
+void generate_option (size_t opt_index, const char *arg, HOST_WIDE_INT value,
+ unsigned int lang_mask,
+ struct cl_decoded_option *decoded);
+void generate_option_input_file (const char *file,
+ struct cl_decoded_option *decoded);
+extern void read_cmdline_option (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ struct cl_decoded_option *decoded,
+ location_t loc,
+ unsigned int lang_mask,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+extern void control_warning_option (unsigned int opt_index, int kind,
+ const char *arg, bool imply, location_t loc,
+ unsigned int lang_mask,
+ const struct cl_option_handlers *handlers,
+ struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ diagnostic_context *dc);
+extern char *write_langs (unsigned int mask);
+extern void print_ignored_options (void);
+extern void handle_common_deferred_options (void);
+unsigned int parse_sanitizer_options (const char *, location_t, int,
+ unsigned int, int, bool);
+
+unsigned int parse_no_sanitize_attribute (char *value);
+extern bool common_handle_option (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ const struct cl_decoded_option *decoded,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc,
+ void (*target_option_override_hook) (void));
+extern bool target_handle_option (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ const struct cl_decoded_option *decoded,
+ unsigned int lang_mask, int kind,
+ location_t loc,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc,
+ void (*target_option_override_hook) (void));
+extern void finish_options (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ location_t loc);
+extern void diagnose_options (gcc_options *opts, gcc_options *opts_set,
+ location_t loc);
+extern void print_help (struct gcc_options *opts, unsigned int lang_mask, const
+ char *help_option_argument);
+extern void default_options_optimization (struct gcc_options *opts,
+ struct gcc_options *opts_set,
+ struct cl_decoded_option *decoded_options,
+ unsigned int decoded_options_count,
+ location_t loc,
+ unsigned int lang_mask,
+ const struct cl_option_handlers *handlers,
+ diagnostic_context *dc);
+extern void set_struct_debug_option (struct gcc_options *opts,
+ location_t loc,
+ const char *value);
+extern bool opt_enum_arg_to_value (size_t opt_index, const char *arg,
+ int *value,
+ unsigned int lang_mask);
+
+extern const struct sanitizer_opts_s
+{
+ const char *const name;
+ unsigned int flag;
+ size_t len;
+ bool can_recover;
+ bool can_trap;
+} sanitizer_opts[];
+
+extern const struct zero_call_used_regs_opts_s
+{
+ const char *const name;
+ unsigned int flag;
+} zero_call_used_regs_opts[];
+
+extern vec<const char *> help_option_arguments;
+
+extern void add_misspelling_candidates (auto_vec<char *> *candidates,
+ const struct cl_option *option,
+ const char *base_option);
+extern const char *candidates_list_and_hint (const char *arg, char *&str,
+ const auto_vec <const char *> &
+ candidates);
+
+
+extern bool parse_and_check_align_values (const char *flag,
+ const char *name,
+ auto_vec<unsigned> &result_values,
+ bool report_error,
+ location_t loc);
+
+extern void parse_and_check_patch_area (const char *arg, bool report_error,
+ HOST_WIDE_INT *patch_area_size,
+ HOST_WIDE_INT *patch_area_start);
+
+extern void parse_options_from_collect_gcc_options (const char *, obstack *,
+ int *);
+
+extern void prepend_xassembler_to_collect_as_options (const char *, obstack *);
+
+extern char *gen_command_line_string (cl_decoded_option *options,
+ unsigned int options_count);
+extern char *gen_producer_string (const char *language_string,
+ cl_decoded_option *options,
+ unsigned int options_count);
+
+/* Set OPTION in OPTS to VALUE if the option is not set in OPTS_SET. */
+
+#define SET_OPTION_IF_UNSET(OPTS, OPTS_SET, OPTION, VALUE) \
+ do \
+ { \
+ if (!(OPTS_SET)->x_ ## OPTION) \
+ (OPTS)->x_ ## OPTION = VALUE; \
+ } \
+ while (false)
+
+/* Return true if OPTION is set by user in global options. */
+
+#define OPTION_SET_P(OPTION) global_options_set.x_ ## OPTION
+
+/* Find all the switches given to us
+ and make a vector describing them.
+ The elements of the vector are strings, one per switch given.
+ If a switch uses following arguments, then the `part1' field
+ is the switch itself and the `args' field
+ is a null-terminated vector containing the following arguments.
+ Bits in the `live_cond' field are:
+ SWITCH_LIVE to indicate this switch is true in a conditional spec.
+ SWITCH_FALSE to indicate this switch is overridden by a later switch.
+ SWITCH_IGNORE to indicate this switch should be ignored (used in %<S).
+ SWITCH_IGNORE_PERMANENTLY to indicate this switch should be ignored.
+ SWITCH_KEEP_FOR_GCC to indicate that this switch, otherwise ignored,
+ should be included in COLLECT_GCC_OPTIONS.
+ in all do_spec calls afterwards. Used for %<S from self specs.
+ The `known' field describes whether this is an internal switch.
+ The `validated' field describes whether any spec has looked at this switch;
+ if it remains false at the end of the run, the switch must be meaningless.
+ The `ordering' field is used to temporarily mark switches that have to be
+ kept in a specific order. */
+
+#define SWITCH_LIVE (1 << 0)
+#define SWITCH_FALSE (1 << 1)
+#define SWITCH_IGNORE (1 << 2)
+#define SWITCH_IGNORE_PERMANENTLY (1 << 3)
+#define SWITCH_KEEP_FOR_GCC (1 << 4)
+
+struct switchstr
+{
+ const char *part1;
+ const char **args;
+ unsigned int live_cond;
+ bool known;
+ bool validated;
+ bool ordering;
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ordered-hash-map.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ordered-hash-map.h
new file mode 100644
index 0000000..6b68cc9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ordered-hash-map.h
@@ -0,0 +1,188 @@
+/* A type-safe hash map that retains the insertion order of keys.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_ORDERED_HASH_MAP_H
+#define GCC_ORDERED_HASH_MAP_H
+
+/* Notes:
+ - The keys must be PODs, since vec<> uses assignment to populate slots
+ without properly initializing them.
+ - doesn't have GTY support.
+ - supports removal, but retains order of original insertion.
+ (Removal might be better handled by using a doubly-linked list
+ of nodes, holding the values). */
+
+template<typename KeyId, typename Value,
+ typename Traits>
+class ordered_hash_map
+{
+ typedef typename Traits::key_type Key;
+
+public:
+ ordered_hash_map () {}
+
+ ordered_hash_map (const ordered_hash_map &other)
+ : m_map (other.m_map),
+ m_keys (other.m_keys.length ()),
+ m_key_index (other.m_key_index)
+ {
+ unsigned i;
+ Key key;
+ FOR_EACH_VEC_ELT (other.m_keys, i, key)
+ m_keys.quick_push (key);
+ }
+
+ /* If key K isn't already in the map add key K with value V to the map, and
+ return false. Otherwise set the value of the entry for key K to be V and
+ return true. */
+
+ bool put (const Key &k, const Value &v)
+ {
+ bool existed = m_map.put (k, v);
+ if (!existed)
+ {
+ bool key_present;
+ int &slot = m_key_index.get_or_insert (k, &key_present);
+ if (!key_present)
+ {
+ slot = m_keys.length ();
+ m_keys.safe_push (k);
+ }
+ }
+ return existed;
+ }
+
+ /* If the passed in key is in the map return its value otherwise NULL. */
+
+ Value *get (const Key &k)
+ {
+ return m_map.get (k);
+ }
+
+ /* Removing a key removes it from the map, but retains the insertion
+ order. */
+
+ void remove (const Key &k)
+ {
+ m_map.remove (k);
+ }
+
+ size_t elements () const { return m_map.elements (); }
+
+ class iterator
+ {
+ public:
+ explicit iterator (const ordered_hash_map &map, unsigned idx) :
+ m_ordered_hash_map (map), m_idx (idx) {}
+
+ iterator &operator++ ()
+ {
+ /* Increment m_idx until we find a non-deleted element, or go beyond
+ the end. */
+ while (1)
+ {
+ ++m_idx;
+ if (valid_index_p ())
+ break;
+ }
+ return *this;
+ }
+
+ /* Can't use std::pair here, because GCC before 4.3 don't handle
+ std::pair where template parameters are references well.
+ See PR86739. */
+ struct reference_pair {
+ const Key &first;
+ Value &second;
+
+ reference_pair (const Key &key, Value &value)
+ : first (key), second (value) {}
+
+ template <typename K, typename V>
+ operator std::pair<K, V> () const { return std::pair<K, V> (first, second); }
+ };
+
+ reference_pair operator* ()
+ {
+ const Key &k = m_ordered_hash_map.m_keys[m_idx];
+ Value *slot
+ = const_cast<ordered_hash_map &> (m_ordered_hash_map).get (k);
+ gcc_assert (slot);
+ return reference_pair (k, *slot);
+ }
+
+ bool
+ operator != (const iterator &other) const
+ {
+ return m_idx != other.m_idx;
+ }
+
+ /* Treat one-beyond-the-end as valid, for handling the "end" case. */
+
+ bool valid_index_p () const
+ {
+ if (m_idx > m_ordered_hash_map.m_keys.length ())
+ return false;
+ if (m_idx == m_ordered_hash_map.m_keys.length ())
+ return true;
+ const Key &k = m_ordered_hash_map.m_keys[m_idx];
+ Value *slot
+ = const_cast<ordered_hash_map &> (m_ordered_hash_map).get (k);
+ return slot != NULL;
+ }
+
+ const ordered_hash_map &m_ordered_hash_map;
+ unsigned m_idx;
+ };
+
+ /* Standard iterator retrieval methods. */
+
+ iterator begin () const
+ {
+ iterator i = iterator (*this, 0);
+ while (!i.valid_index_p () && i != end ())
+ ++i;
+ return i;
+ }
+ iterator end () const { return iterator (*this, m_keys.length ()); }
+
+private:
+ /* The assignment operator is not yet implemented; prevent erroneous
+ usage of unsafe compiler-generated one. */
+ void operator= (const ordered_hash_map &);
+
+ /* The underlying map. */
+ hash_map<KeyId, Value, Traits> m_map;
+
+ /* The ordering of the keys. */
+ auto_vec<Key> m_keys;
+
+ /* For each key that's ever been in the map, its index within m_keys. */
+ hash_map<KeyId, int> m_key_index;
+};
+
+/* Two-argument form. */
+
+template<typename Key, typename Value,
+ typename Traits = simple_hashmap_traits<default_hash_traits<Key>,
+ Value> >
+class ordered_hash_map;
+
+#endif /* GCC_ORDERED_HASH_MAP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/output.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/output.h
new file mode 100644
index 0000000..877d2af
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/output.h
@@ -0,0 +1,631 @@
+/* Declarations for insn-output.cc and other code to write to asm_out_file.
+ These functions are defined in final.cc, and varasm.cc.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_OUTPUT_H
+#define GCC_OUTPUT_H
+
+/* Initialize data in final at the beginning of a compilation. */
+extern void init_final (const char *);
+
+/* Enable APP processing of subsequent output.
+ Used before the output from an `asm' statement. */
+extern void app_enable (void);
+
+/* Disable APP processing of subsequent output.
+ Called from varasm.cc before most kinds of output. */
+extern void app_disable (void);
+
+/* Return the number of slots filled in the current
+ delayed branch sequence (we don't count the insn needing the
+ delay slot). Zero if not in a delayed branch sequence. */
+extern int dbr_sequence_length (void);
+
+/* Indicate that branch shortening hasn't yet been done. */
+extern void init_insn_lengths (void);
+
+/* Obtain the current length of an insn. If branch shortening has been done,
+ get its actual length. Otherwise, get its maximum length. */
+extern int get_attr_length (rtx_insn *);
+
+/* Obtain the current length of an insn. If branch shortening has been done,
+ get its actual length. Otherwise, get its minimum length. */
+extern int get_attr_min_length (rtx_insn *);
+
+/* Make a pass over all insns and compute their actual lengths by shortening
+ any branches of variable length if possible. */
+extern void shorten_branches (rtx_insn *);
+
+const char *get_some_local_dynamic_name ();
+
+/* Output assembler code for the start of a function,
+ and initialize some of the variables in this file
+ for the new function. The label for the function and associated
+ assembler pseudo-ops have already been output in
+ `assemble_start_function'. */
+extern void final_start_function (rtx_insn *, FILE *, int);
+
+/* Output assembler code for the end of a function.
+ For clarity, args are same as those of `final_start_function'
+ even though not all of them are needed. */
+extern void final_end_function (void);
+
+/* Output assembler code for some insns: all or part of a function. */
+extern void final (rtx_insn *, FILE *, int);
+
+/* The final scan for one insn, INSN. Args are same as in `final', except
+ that INSN is the insn being scanned. Value returned is the next insn to
+ be scanned. */
+extern rtx_insn *final_scan_insn (rtx_insn *, FILE *, int, int, int *);
+
+/* Replace a SUBREG with a REG or a MEM, based on the thing it is a
+ subreg of. */
+extern rtx alter_subreg (rtx *, bool);
+
+/* Print an operand using machine-dependent assembler syntax. */
+extern void output_operand (rtx, int);
+
+/* Report inconsistency between the assembler template and the operands.
+ In an `asm', it's the user's fault; otherwise, the compiler's fault. */
+extern void output_operand_lossage (const char *, ...) ATTRIBUTE_PRINTF_1;
+
+/* Output a string of assembler code, substituting insn operands.
+ Defined in final.cc. */
+extern void output_asm_insn (const char *, rtx *);
+
+/* Compute a worst-case reference address of a branch so that it
+ can be safely used in the presence of aligned labels.
+ Defined in final.cc. */
+extern int insn_current_reference_address (rtx_insn *);
+
+/* Find the alignment associated with a CODE_LABEL.
+ Defined in final.cc. */
+extern align_flags label_to_alignment (rtx);
+
+/* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */
+extern void output_asm_label (rtx);
+
+/* Marks SYMBOL_REFs in x as referenced through use of assemble_external. */
+extern void mark_symbol_refs_as_used (rtx);
+
+/* Print a memory reference operand for address X with access mode MODE
+ using machine-dependent assembler syntax. */
+extern void output_address (machine_mode, rtx);
+
+/* Print an integer constant expression in assembler syntax.
+ Addition and subtraction are the only arithmetic
+ that may appear in these expressions. */
+extern void output_addr_const (FILE *, rtx);
+
+/* Output a string of assembler code, substituting numbers, strings
+ and fixed syntactic prefixes. */
+#if GCC_VERSION >= 3004
+#define ATTRIBUTE_ASM_FPRINTF(m, n) __attribute__ ((__format__ (__asm_fprintf__, m, n))) ATTRIBUTE_NONNULL(m)
+#else
+#define ATTRIBUTE_ASM_FPRINTF(m, n) ATTRIBUTE_NONNULL(m)
+#endif
+
+extern void fprint_whex (FILE *, unsigned HOST_WIDE_INT);
+extern void fprint_ul (FILE *, unsigned long);
+extern int sprint_ul (char *, unsigned long);
+
+extern void asm_fprintf (FILE *file, const char *p, ...)
+ ATTRIBUTE_ASM_FPRINTF(2, 3);
+
+/* Return nonzero if this function has no function calls. */
+extern int leaf_function_p (void);
+
+/* Return 1 if branch is a forward branch.
+ Uses insn_shuid array, so it works only in the final pass. May be used by
+ output templates to add branch prediction hints, for example. */
+extern int final_forward_branch_p (rtx_insn *);
+
+/* Return 1 if this function uses only the registers that can be
+ safely renumbered. */
+extern int only_leaf_regs_used (void);
+
+/* Scan IN_RTX and its subexpressions, and renumber all regs into those
+ available in leaf functions. */
+extern void leaf_renumber_regs_insn (rtx);
+
+/* Locate the proper template for the given insn-code. */
+extern const char *get_insn_template (int, rtx_insn *);
+
+/* Functions in varasm.cc. */
+
+/* Emit any pending weak declarations. */
+extern void weak_finish (void);
+
+/* Decode an `asm' spec for a declaration as a register name.
+ Return the register number, or -1 if nothing specified,
+ or -2 if the ASMSPEC is not `cc' or `memory' and is not recognized,
+ or -3 if ASMSPEC is `cc' and is not recognized,
+ or -4 if ASMSPEC is `memory' and is not recognized.
+ Accept an exact spelling or a decimal number.
+ Prefixes such as % are optional. */
+extern int decode_reg_name (const char *);
+
+/* Similar to decode_reg_name, but takes an extra parameter that is a
+ pointer to the number of (internal) registers described by the
+ external name. */
+extern int decode_reg_name_and_count (const char *, int *);
+
+extern void do_assemble_alias (tree, tree);
+extern void do_assemble_symver (tree, tree);
+
+extern void default_assemble_visibility (tree, int);
+
+/* Output a string of literal assembler code
+ for an `asm' keyword used between functions. */
+extern void assemble_asm (tree);
+
+/* Get the function's name from a decl, as described by its RTL. */
+extern const char *get_fnname_from_decl (tree);
+
+/* Output assembler code for the constant pool of a function and associated
+ with defining the name of the function. DECL describes the function.
+ NAME is the function's name. For the constant pool, we use the current
+ constant pool data. */
+extern void assemble_start_function (tree, const char *);
+
+/* Output assembler code associated with defining the size of the
+ function. DECL describes the function. NAME is the function's name. */
+extern void assemble_end_function (tree, const char *);
+
+/* Assemble everything that is needed for a variable or function declaration.
+ Not used for automatic variables, and not used for function definitions.
+ Should not be called for variables of incomplete structure type.
+
+ TOP_LEVEL is nonzero if this variable has file scope.
+ AT_END is nonzero if this is the special handling, at end of compilation,
+ to define things that have had only tentative definitions.
+ DONT_OUTPUT_DATA if nonzero means don't actually output the
+ initial value (that will be done by the caller). */
+extern void assemble_variable (tree, int, int, int);
+
+/* Assemble everything that is needed for a variable declaration that has
+ no definition in the current translation unit. */
+extern void assemble_undefined_decl (tree);
+
+/* Compute the alignment of variable specified by DECL.
+ DONT_OUTPUT_DATA is from assemble_variable. */
+extern void align_variable (tree decl, bool dont_output_data);
+
+/* Queue for outputting something to declare an external symbol to the
+ assembler. (Most assemblers don't need this, so we normally output
+ nothing.) Do nothing if DECL is not external. */
+extern void assemble_external (tree);
+
+/* Assemble code to leave SIZE bytes of zeros. */
+extern void assemble_zeros (unsigned HOST_WIDE_INT);
+
+/* Assemble an alignment pseudo op for an ALIGN-bit boundary. */
+extern void assemble_align (unsigned int);
+
+/* Assemble a string constant with the specified C string as contents. */
+extern void assemble_string (const char *, int);
+
+/* Similar, for calling a library function FUN. */
+extern void assemble_external_libcall (rtx);
+
+/* Assemble a label named NAME. */
+extern void assemble_label (FILE *, const char *);
+
+/* Output to FILE (an assembly file) a reference to NAME. If NAME
+ starts with a *, the rest of NAME is output verbatim. Otherwise
+ NAME is transformed in a target-specific way (usually by the
+ addition of an underscore). */
+extern void assemble_name_raw (FILE *, const char *);
+
+/* Return NAME that should actually be emitted, looking through
+ transparent aliases. If NAME refers to an entity that is also
+ represented as a tree (like a function or variable), mark the entity
+ as referenced. */
+extern const char *assemble_name_resolve (const char *);
+
+/* Like assemble_name_raw, but should be used when NAME might refer to
+ an entity that is also represented as a tree (like a function or
+ variable). If NAME does refer to such an entity, that entity will
+ be marked as referenced. */
+extern void assemble_name (FILE *, const char *);
+
+/* Return the assembler directive for creating a given kind of integer
+ object. SIZE is the number of bytes in the object and ALIGNED_P
+ indicates whether it is known to be aligned. Return NULL if the
+ assembly dialect has no such directive.
+
+ The returned string should be printed at the start of a new line and
+ be followed immediately by the object's initial value. */
+extern const char *integer_asm_op (int, int);
+
+/* Use directive OP to assemble an integer object X. Print OP at the
+ start of the line, followed immediately by the value of X. */
+extern void assemble_integer_with_op (const char *, rtx);
+
+/* The default implementation of the asm_out.integer target hook. */
+extern bool default_assemble_integer (rtx, unsigned int, int);
+
+/* Assemble the integer constant X into an object of SIZE bytes. ALIGN is
+ the alignment of the integer in bits. Return 1 if we were able to output
+ the constant, otherwise 0. If FORCE is nonzero the constant must
+ be outputable. */
+extern bool assemble_integer (rtx, unsigned, unsigned, int);
+
+/* Return section for TEXT_SECITON_NAME if DECL or DECL_SECTION_NAME (DECL)
+ is NULL. */
+extern section *get_named_text_section (tree, const char *, const char *);
+
+/* An interface to assemble_integer for the common case in which a value is
+ fully aligned and must be printed. VALUE is the value of the integer
+ object and SIZE is the number of bytes it contains. */
+#define assemble_aligned_integer(SIZE, VALUE) \
+ assemble_integer (VALUE, SIZE, (SIZE) * BITS_PER_UNIT, 1)
+
+/* Assemble the floating-point constant D into an object of size MODE. ALIGN
+ is the alignment of the constant in bits. If REVERSE is true, D is output
+ in reverse storage order. */
+extern void assemble_real (REAL_VALUE_TYPE, scalar_float_mode, unsigned,
+ bool = false);
+
+/* Write the address of the entity given by SYMBOL to SEC. */
+extern void assemble_addr_to_section (rtx, section *);
+
+/* Return TRUE if and only if the constant pool has no entries. Note
+ that even entries we might end up choosing not to emit are counted
+ here, so there is the potential for missed optimizations. */
+extern bool constant_pool_empty_p (void);
+
+extern rtx_insn *peephole (rtx_insn *);
+
+extern void output_shared_constant_pool (void);
+
+extern void output_object_blocks (void);
+
+extern void output_quoted_string (FILE *, const char *);
+
+/* When outputting delayed branch sequences, this rtx holds the
+ sequence being output. It is null when no delayed branch
+ sequence is being output, so it can be used as a test in the
+ insn output code.
+
+ This variable is defined in final.cc. */
+extern rtx_sequence *final_sequence;
+
+/* File in which assembler code is being written. */
+
+#ifdef BUFSIZ
+extern FILE *asm_out_file;
+#endif
+
+/* The first global object in the file. */
+extern const char *first_global_object_name;
+
+/* The first weak object in the file. */
+extern const char *weak_global_object_name;
+
+/* Nonnull if the insn currently being emitted was a COND_EXEC pattern. */
+extern rtx current_insn_predicate;
+
+/* Last insn processed by final_scan_insn. */
+extern rtx_insn *current_output_insn;
+
+/* Nonzero while outputting an `asm' with operands.
+ This means that inconsistencies are the user's fault, so don't die.
+ The precise value is the insn being output, to pass to error_for_asm. */
+extern const rtx_insn *this_is_asm_operands;
+
+/* Carry information from ASM_DECLARE_OBJECT_NAME
+ to ASM_FINISH_DECLARE_OBJECT. */
+extern int size_directive_output;
+extern tree last_assemble_variable_decl;
+
+extern bool first_function_block_is_cold;
+
+/* Decide whether DECL needs to be in a writable section.
+ RELOC is the same as for SELECT_SECTION. */
+extern bool decl_readonly_section (const_tree, int);
+
+/* This can be used to compute RELOC for the function above, when
+ given a constant expression. */
+extern int compute_reloc_for_constant (tree);
+
+/* This can be used to compute RELOC for get_variable_section. */
+extern int compute_reloc_for_var (tree);
+
+/* User label prefix in effect for this compilation. */
+extern const char *user_label_prefix;
+
+/* Default target function prologue and epilogue assembler output. */
+extern void default_function_pro_epilogue (FILE *);
+
+/* Default target function switched text sections. */
+extern void default_function_switched_text_sections (FILE *, tree, bool);
+
+/* Default target hook that outputs nothing to a stream. */
+extern void no_asm_to_stream (FILE *);
+
+/* Flags controlling properties of a section. */
+enum section_flag
+{
+ /* This SECTION_STYLE is used for unnamed sections that we can switch
+ to using a special assembler directive. */
+ SECTION_UNNAMED = 0,
+
+ SECTION_ENTSIZE = (1UL << 8) - 1, /* entity size in section */
+ SECTION_CODE = 1UL << 8, /* contains code */
+ SECTION_WRITE = 1UL << 9, /* data is writable */
+
+ SECTION_DEBUG = 1UL << 10, /* contains debug data */
+ SECTION_LINKONCE = 1UL << 11, /* is linkonce */
+ SECTION_SMALL = 1UL << 12, /* contains "small data" */
+ SECTION_BSS = 1UL << 13, /* contains zeros only */
+ SECTION_MERGE = 1UL << 14, /* contains mergeable data */
+ SECTION_STRINGS = 1UL << 15, /* contains zero terminated strings
+ without embedded zeros */
+ SECTION_OVERRIDE = 1UL << 16, /* allow override of default flags */
+ SECTION_TLS = 1UL << 17, /* contains thread-local storage */
+ SECTION_NOTYPE = 1UL << 18, /* don't output @progbits */
+ SECTION_DECLARED = 1UL << 19, /* section has been used */
+
+ /* This SECTION_STYLE is used for named sections that we can switch
+ to using a general section directive. */
+ SECTION_NAMED = 1UL << 20,
+
+ /* This SECTION_STYLE is used for sections that we cannot switch to at
+ all. The choice of section is implied by the directive that we use
+ to declare the object. */
+ SECTION_NOSWITCH = 1UL << 21,
+
+ /* bits used for SECTION_STYLE */
+ SECTION_STYLE_MASK = SECTION_NAMED | SECTION_NOSWITCH,
+
+ SECTION_COMMON = 1UL << 22, /* contains common data */
+ SECTION_RELRO = 1UL << 23, /* data is readonly after
+ relocation processing */
+ SECTION_EXCLUDE = 1UL << 24, /* discarded by the linker */
+ SECTION_RETAIN = 1UL << 25, /* retained by the linker. */
+ SECTION_LINK_ORDER = 1UL << 26, /* section needs link-order. */
+
+ /* NB: The maximum SECTION_MACH_DEP is (1UL << 28) since AVR needs 4 bits
+ in SECTION_MACH_DEP. */
+ SECTION_MACH_DEP = 1UL << 27,
+
+ /* subsequent bits reserved for target */
+};
+
+/* A helper function for default_elf_select_section and
+ default_elf_unique_section. Categorizes the DECL. */
+
+enum section_category
+{
+ SECCAT_TEXT,
+
+ SECCAT_RODATA,
+ SECCAT_RODATA_MERGE_STR,
+ SECCAT_RODATA_MERGE_STR_INIT,
+ SECCAT_RODATA_MERGE_CONST,
+ SECCAT_SRODATA,
+
+ SECCAT_DATA,
+
+ /* To optimize loading of shared programs, define following subsections
+ of data section:
+ _REL Contains data that has relocations, so they get grouped
+ together and dynamic linker will visit fewer pages in memory.
+ _RO Contains data that is otherwise read-only. This is useful
+ with prelinking as most relocations won't be dynamically
+ linked and thus stay read only.
+ _LOCAL Marks data containing relocations only to local objects.
+ These relocations will get fully resolved by prelinking. */
+ SECCAT_DATA_REL,
+ SECCAT_DATA_REL_LOCAL,
+ SECCAT_DATA_REL_RO,
+ SECCAT_DATA_REL_RO_LOCAL,
+
+ SECCAT_SDATA,
+ SECCAT_TDATA,
+
+ SECCAT_BSS,
+ SECCAT_SBSS,
+ SECCAT_TBSS
+};
+
+/* Information that is provided by all instances of the section type. */
+struct GTY(()) section_common {
+ /* The set of SECTION_* flags that apply to this section. */
+ unsigned int flags;
+};
+
+/* Information about a SECTION_NAMED section. */
+struct GTY(()) named_section {
+ struct section_common common;
+
+ /* The name of the section. */
+ const char *name;
+
+ /* If nonnull, the VAR_DECL or FUNCTION_DECL with which the
+ section is associated. */
+ tree decl;
+};
+
+/* A callback that writes the assembly code for switching to an unnamed
+ section. The argument provides callback-specific data. */
+typedef void (*unnamed_section_callback) (const char *);
+
+/* Information about a SECTION_UNNAMED section. */
+struct GTY(()) unnamed_section {
+ struct section_common common;
+
+ /* The callback used to switch to the section, and the data that
+ should be passed to the callback. */
+ unnamed_section_callback GTY ((callback)) callback;
+ const char *data;
+
+ /* The next entry in the chain of unnamed sections. */
+ section *next;
+};
+
+/* A callback that writes the assembly code for a decl in a
+ SECTION_NOSWITCH section. DECL is the decl that should be assembled
+ and NAME is the name of its SYMBOL_REF. SIZE is the size of the decl
+ in bytes and ROUNDED is that size rounded up to the next
+ BIGGEST_ALIGNMENT / BITS_PER_UNIT boundary.
+
+ Return true if the callback used DECL_ALIGN to set the object's
+ alignment. A false return value implies that we are relying
+ on the rounded size to align the decl. */
+typedef bool (*noswitch_section_callback) (tree decl, const char *name,
+ unsigned HOST_WIDE_INT size,
+ unsigned HOST_WIDE_INT rounded);
+
+/* Information about a SECTION_NOSWITCH section. */
+struct GTY(()) noswitch_section {
+ struct section_common common;
+
+ /* The callback used to assemble decls in this section. */
+ noswitch_section_callback GTY ((callback)) callback;
+};
+
+/* Information about a section, which may be named or unnamed. */
+union GTY ((desc ("SECTION_STYLE (&(%h))"), for_user)) section {
+ struct section_common GTY ((skip)) common;
+ struct named_section GTY ((tag ("SECTION_NAMED"))) named;
+ struct unnamed_section GTY ((tag ("SECTION_UNNAMED"))) unnamed;
+ struct noswitch_section GTY ((tag ("SECTION_NOSWITCH"))) noswitch;
+};
+
+/* Return the style of section SECT. */
+#define SECTION_STYLE(SECT) ((SECT)->common.flags & SECTION_STYLE_MASK)
+
+struct object_block;
+
+/* Special well-known sections. */
+extern GTY(()) section *text_section;
+extern GTY(()) section *data_section;
+extern GTY(()) section *readonly_data_section;
+extern GTY(()) section *sdata_section;
+extern GTY(()) section *ctors_section;
+extern GTY(()) section *dtors_section;
+extern GTY(()) section *bss_section;
+extern GTY(()) section *sbss_section;
+extern GTY(()) section *exception_section;
+extern GTY(()) section *eh_frame_section;
+extern GTY(()) section *tls_comm_section;
+extern GTY(()) section *comm_section;
+extern GTY(()) section *lcomm_section;
+extern GTY(()) section *bss_noswitch_section;
+
+extern GTY(()) section *in_section;
+extern GTY(()) bool in_cold_section_p;
+
+extern section *get_unnamed_section (unsigned int, void (*) (const char *),
+ const char *);
+extern section *get_section (const char *, unsigned int, tree,
+ bool not_existing = false);
+extern section *get_named_section (tree, const char *, int);
+extern section *get_variable_section (tree, bool);
+extern void place_block_symbol (rtx);
+extern rtx get_section_anchor (struct object_block *, HOST_WIDE_INT,
+ enum tls_model);
+extern section *mergeable_constant_section (machine_mode,
+ unsigned HOST_WIDE_INT,
+ unsigned int);
+extern section *function_section (tree);
+extern section *unlikely_text_section (void);
+extern section *current_function_section (void);
+extern void switch_to_other_text_partition (void);
+
+/* Return the numbered .ctors.N (if CONSTRUCTOR_P) or .dtors.N (if
+ not) section for PRIORITY. */
+extern section *get_cdtor_priority_section (int, bool);
+
+extern bool unlikely_text_section_p (section *);
+extern void switch_to_section (section *, tree = nullptr);
+extern void output_section_asm_op (const char *);
+
+extern void record_tm_clone_pair (tree, tree);
+extern void finish_tm_clone_pairs (void);
+extern tree get_tm_clone_pair (tree);
+
+extern void default_asm_output_source_filename (FILE *, const char *);
+extern void output_file_directive (FILE *, const char *);
+
+extern unsigned int default_section_type_flags (tree, const char *, int);
+
+extern bool have_global_bss_p (void);
+extern bool bss_initializer_p (const_tree, bool = false);
+
+extern void default_no_named_section (const char *, unsigned int, tree);
+extern void default_elf_asm_named_section (const char *, unsigned int, tree);
+extern enum section_category categorize_decl_for_section (const_tree, int);
+extern void default_coff_asm_named_section (const char *, unsigned int, tree);
+extern void default_pe_asm_named_section (const char *, unsigned int, tree);
+
+extern void default_named_section_asm_out_destructor (rtx, int);
+extern void default_dtor_section_asm_out_destructor (rtx, int);
+extern void default_named_section_asm_out_constructor (rtx, int);
+extern void default_ctor_section_asm_out_constructor (rtx, int);
+
+extern section *default_select_section (tree, int, unsigned HOST_WIDE_INT);
+extern section *default_elf_select_section (tree, int, unsigned HOST_WIDE_INT);
+extern void default_unique_section (tree, int);
+extern section *default_function_rodata_section (tree, bool);
+extern section *default_no_function_rodata_section (tree, bool);
+extern section *default_clone_table_section (void);
+extern section *default_select_rtx_section (machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
+extern section *default_elf_select_rtx_section (machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
+extern void default_encode_section_info (tree, rtx, int);
+extern const char *default_strip_name_encoding (const char *);
+extern void default_asm_output_anchor (rtx);
+extern bool default_use_anchors_for_symbol_p (const_rtx);
+extern bool default_binds_local_p (const_tree);
+extern bool default_binds_local_p_1 (const_tree, int);
+extern bool default_binds_local_p_2 (const_tree);
+extern bool default_binds_local_p_3 (const_tree, bool, bool, bool, bool);
+extern void default_globalize_label (FILE *, const char *);
+extern void default_globalize_decl_name (FILE *, tree);
+extern void default_emit_unwind_label (FILE *, tree, int, int);
+extern void default_emit_except_table_label (FILE *);
+extern void default_generate_internal_label (char *, const char *,
+ unsigned long);
+extern void default_internal_label (FILE *, const char *, unsigned long);
+extern void default_asm_declare_constant_name (FILE *, const char *,
+ const_tree, HOST_WIDE_INT);
+extern void default_file_start (void);
+extern void file_end_indicate_exec_stack (void);
+extern void file_end_indicate_split_stack (void);
+
+extern void default_elf_asm_output_external (FILE *file, tree,
+ const char *);
+extern void default_elf_asm_output_limited_string (FILE *, const char *);
+extern void default_elf_asm_output_ascii (FILE *, const char *, unsigned int);
+extern void default_elf_internal_label (FILE *, const char *, unsigned long);
+
+extern void default_elf_init_array_asm_out_constructor (rtx, int);
+extern void default_elf_fini_array_asm_out_destructor (rtx, int);
+extern int maybe_assemble_visibility (tree);
+
+extern int default_address_cost (rtx, machine_mode, addr_space_t, bool);
+
+/* Stack usage. */
+extern void output_stack_usage (void);
+
+#endif /* ! GCC_OUTPUT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass-instances.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass-instances.def
new file mode 100644
index 0000000..f0a0ebf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass-instances.def
@@ -0,0 +1,563 @@
+/* This file is auto-generated by gen-pass-instances.awk
+ from passes.def. */
+/* Description of pass structure
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ Macros that should be defined when using this file:
+ INSERT_PASSES_AFTER (PASS)
+ PUSH_INSERT_PASSES_WITHIN (PASS)
+ POP_INSERT_PASSES ()
+ NEXT_PASS (PASS, 1)
+ TERMINATE_PASS_LIST (PASS)
+ */
+
+ /* All passes needed to lower the function into shape optimizers can
+ operate on. These passes are always run first on the function, but
+ backend might produce already lowered functions that are not processed
+ by these passes. */
+ INSERT_PASSES_AFTER (all_lowering_passes)
+ NEXT_PASS (pass_warn_unused_result, 1);
+ NEXT_PASS (pass_diagnose_omp_blocks, 1);
+ NEXT_PASS (pass_diagnose_tm_blocks, 1);
+ NEXT_PASS (pass_omp_oacc_kernels_decompose, 1);
+ NEXT_PASS (pass_lower_omp, 1);
+ NEXT_PASS (pass_lower_cf, 1);
+ NEXT_PASS (pass_lower_tm, 1);
+ NEXT_PASS (pass_refactor_eh, 1);
+ NEXT_PASS (pass_lower_eh, 1);
+ NEXT_PASS (pass_coroutine_lower_builtins, 1);
+ NEXT_PASS (pass_build_cfg, 1);
+ NEXT_PASS (pass_warn_function_return, 1);
+ NEXT_PASS (pass_coroutine_early_expand_ifns, 1);
+ NEXT_PASS (pass_expand_omp, 1);
+ NEXT_PASS (pass_build_cgraph_edges, 1);
+ TERMINATE_PASS_LIST (all_lowering_passes)
+
+ /* Interprocedural optimization passes. */
+ INSERT_PASSES_AFTER (all_small_ipa_passes)
+ NEXT_PASS (pass_ipa_free_lang_data, 1);
+ NEXT_PASS (pass_ipa_function_and_variable_visibility, 1);
+ NEXT_PASS (pass_build_ssa_passes, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_build_ssa_passes)
+ NEXT_PASS (pass_fixup_cfg, 1);
+ NEXT_PASS (pass_build_ssa, 1);
+ NEXT_PASS_WITH_ARG (pass_walloca, 1, /*strict_mode_p=*/true);
+ NEXT_PASS (pass_warn_printf, 1);
+ NEXT_PASS (pass_warn_nonnull_compare, 1);
+ NEXT_PASS (pass_early_warn_uninitialized, 1);
+ NEXT_PASS_WITH_ARG (pass_warn_access, 1, /*early=*/true);
+ NEXT_PASS (pass_ubsan, 1);
+ NEXT_PASS (pass_nothrow, 1);
+ NEXT_PASS (pass_rebuild_cgraph_edges, 1);
+ POP_INSERT_PASSES ()
+
+ NEXT_PASS (pass_local_optimization_passes, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_local_optimization_passes)
+ NEXT_PASS (pass_fixup_cfg, 2);
+ NEXT_PASS (pass_rebuild_cgraph_edges, 2);
+ NEXT_PASS (pass_local_fn_summary, 1);
+ NEXT_PASS (pass_early_inline, 1);
+ NEXT_PASS (pass_warn_recursion, 1);
+ NEXT_PASS (pass_all_early_optimizations, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_all_early_optimizations)
+ NEXT_PASS (pass_remove_cgraph_callee_edges, 1);
+ NEXT_PASS (pass_early_object_sizes, 1);
+ /* Don't record nonzero bits before IPA to avoid
+ using too much memory. */
+ NEXT_PASS_WITH_ARG (pass_ccp, 1, false /* nonzero_p */);
+ /* After CCP we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_forwprop, 1);
+ NEXT_PASS_WITH_ARG (pass_early_thread_jumps, 1, /*first=*/true);
+ NEXT_PASS (pass_sra_early, 1);
+ /* pass_build_ealias is a dummy pass that ensures that we
+ execute TODO_rebuild_alias at this point. */
+ NEXT_PASS (pass_build_ealias, 1);
+ NEXT_PASS_WITH_ARG (pass_fre, 1, true /* may_iterate */);
+ NEXT_PASS (pass_early_vrp, 1);
+ NEXT_PASS (pass_merge_phi, 1);
+ NEXT_PASS (pass_dse, 1);
+ NEXT_PASS_WITH_ARG (pass_cd_dce, 1, false /* update_address_taken_p */);
+ NEXT_PASS_WITH_ARG (pass_phiopt, 1, true /* early_p */);
+ NEXT_PASS (pass_tail_recursion, 1);
+ NEXT_PASS (pass_if_to_switch, 1);
+ NEXT_PASS (pass_convert_switch, 1);
+ NEXT_PASS (pass_cleanup_eh, 1);
+ NEXT_PASS (pass_profile, 1);
+ NEXT_PASS (pass_local_pure_const, 1);
+ NEXT_PASS (pass_modref, 1);
+ /* Split functions creates parts that are not run through
+ early optimizations again. It is thus good idea to do this
+ late. */
+ NEXT_PASS (pass_split_functions, 1);
+ NEXT_PASS_WITH_ARG (pass_strip_predict_hints, 1, true /* early_p */);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_release_ssa_names, 1);
+ NEXT_PASS (pass_rebuild_cgraph_edges, 3);
+ NEXT_PASS (pass_local_fn_summary, 2);
+ POP_INSERT_PASSES ()
+
+ NEXT_PASS (pass_ipa_remove_symbols, 1);
+ NEXT_PASS (pass_ipa_oacc, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_ipa_oacc)
+ NEXT_PASS (pass_ipa_pta, 1);
+ /* Pass group that runs when the function is an offloaded function
+ containing oacc kernels loops. */
+ NEXT_PASS (pass_ipa_oacc_kernels, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_ipa_oacc_kernels)
+ NEXT_PASS (pass_oacc_kernels, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_oacc_kernels)
+ NEXT_PASS (pass_ch, 1);
+ NEXT_PASS_WITH_ARG (pass_fre, 2, true /* may_iterate */);
+ /* We use pass_lim to rewrite in-memory iteration and reduction
+ variable accesses in loops into local variables accesses. */
+ NEXT_PASS (pass_lim, 1);
+ NEXT_PASS_WITH_ARG (pass_dominator, 1, false /* may_peel_loop_headers_p */);
+ NEXT_PASS (pass_dce, 1);
+ NEXT_PASS_WITH_ARG (pass_parallelize_loops, 1, true /* oacc_kernels_p */);
+ NEXT_PASS (pass_expand_omp_ssa, 1);
+ NEXT_PASS (pass_rebuild_cgraph_edges, 4);
+ POP_INSERT_PASSES ()
+ POP_INSERT_PASSES ()
+ POP_INSERT_PASSES ()
+
+ NEXT_PASS (pass_target_clone, 1);
+ NEXT_PASS (pass_ipa_auto_profile, 1);
+ NEXT_PASS (pass_ipa_tree_profile, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_ipa_tree_profile)
+ NEXT_PASS (pass_feedback_split_functions, 1);
+ POP_INSERT_PASSES ()
+ NEXT_PASS_WITH_ARG (pass_ipa_free_fn_summary, 1, true /* small_p */);
+ NEXT_PASS (pass_ipa_increase_alignment, 1);
+ NEXT_PASS (pass_ipa_tm, 1);
+ NEXT_PASS (pass_ipa_lower_emutls, 1);
+ TERMINATE_PASS_LIST (all_small_ipa_passes)
+
+ INSERT_PASSES_AFTER (all_regular_ipa_passes)
+ NEXT_PASS (pass_analyzer, 1);
+ NEXT_PASS (pass_ipa_odr, 1);
+ NEXT_PASS (pass_ipa_whole_program_visibility, 1);
+ NEXT_PASS (pass_ipa_profile, 1);
+ NEXT_PASS (pass_ipa_icf, 1);
+ NEXT_PASS (pass_ipa_devirt, 1);
+ NEXT_PASS (pass_ipa_cp, 1);
+ NEXT_PASS (pass_ipa_sra, 1);
+ NEXT_PASS (pass_ipa_cdtor_merge, 1);
+ NEXT_PASS (pass_ipa_fn_summary, 1);
+ NEXT_PASS (pass_ipa_inline, 1);
+ NEXT_PASS (pass_ipa_pure_const, 1);
+ NEXT_PASS (pass_ipa_modref, 1);
+ NEXT_PASS_WITH_ARG (pass_ipa_free_fn_summary, 2, false /* small_p */);
+ NEXT_PASS (pass_ipa_reference, 1);
+ /* This pass needs to be scheduled after any IP code duplication. */
+ NEXT_PASS (pass_ipa_single_use, 1);
+ /* Comdat privatization come last, as direct references to comdat local
+ symbols are not allowed outside of the comdat group. Privatizing early
+ would result in missed optimizations due to this restriction. */
+ NEXT_PASS (pass_ipa_comdats, 1);
+ TERMINATE_PASS_LIST (all_regular_ipa_passes)
+
+ /* Simple IPA passes executed after the regular passes. In WHOPR mode the
+ passes are executed after partitioning and thus see just parts of the
+ compiled unit. */
+ INSERT_PASSES_AFTER (all_late_ipa_passes)
+ NEXT_PASS (pass_ipa_pta, 2);
+ NEXT_PASS (pass_omp_simd_clone, 1);
+ TERMINATE_PASS_LIST (all_late_ipa_passes)
+
+ /* These passes are run after IPA passes on every function that is being
+ output to the assembler file. */
+ INSERT_PASSES_AFTER (all_passes)
+ NEXT_PASS (pass_fixup_cfg, 3);
+ NEXT_PASS (pass_lower_eh_dispatch, 1);
+ NEXT_PASS (pass_oacc_loop_designation, 1);
+ NEXT_PASS (pass_omp_oacc_neuter_broadcast, 1);
+ NEXT_PASS (pass_oacc_device_lower, 1);
+ NEXT_PASS (pass_omp_device_lower, 1);
+ NEXT_PASS (pass_omp_target_link, 1);
+ NEXT_PASS (pass_adjust_alignment, 1);
+ NEXT_PASS (pass_all_optimizations, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations)
+ NEXT_PASS (pass_remove_cgraph_callee_edges, 2);
+ /* Initial scalar cleanups before alias computation.
+ They ensure memory accesses are not indirect wherever possible. */
+ NEXT_PASS_WITH_ARG (pass_strip_predict_hints, 2, false /* early_p */);
+ NEXT_PASS_WITH_ARG (pass_ccp, 2, true /* nonzero_p */);
+ /* After CCP we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_object_sizes, 1);
+ NEXT_PASS (pass_post_ipa_warn, 1);
+ /* Must run before loop unrolling. */
+ NEXT_PASS_WITH_ARG (pass_warn_access, 2, /*early=*/true);
+ NEXT_PASS (pass_complete_unrolli, 1);
+ NEXT_PASS (pass_backprop, 1);
+ NEXT_PASS (pass_phiprop, 1);
+ NEXT_PASS (pass_forwprop, 2);
+ /* pass_build_alias is a dummy pass that ensures that we
+ execute TODO_rebuild_alias at this point. */
+ NEXT_PASS (pass_build_alias, 1);
+ NEXT_PASS (pass_return_slot, 1);
+ NEXT_PASS_WITH_ARG (pass_fre, 3, true /* may_iterate */);
+ NEXT_PASS (pass_merge_phi, 2);
+ NEXT_PASS_WITH_ARG (pass_thread_jumps_full, 1, /*first=*/true);
+ NEXT_PASS_WITH_ARG (pass_vrp, 1, true /* warn_array_bounds_p */);
+ NEXT_PASS (pass_dse, 2);
+ NEXT_PASS (pass_dce, 2);
+ /* pass_stdarg is always run and at this point we execute
+ TODO_remove_unused_locals to prune CLOBBERs of dead
+ variables which are otherwise a churn on alias walkings. */
+ NEXT_PASS (pass_stdarg, 1);
+ NEXT_PASS (pass_call_cdce, 1);
+ NEXT_PASS (pass_cselim, 1);
+ NEXT_PASS (pass_copy_prop, 1);
+ NEXT_PASS (pass_tree_ifcombine, 1);
+ NEXT_PASS (pass_merge_phi, 3);
+ NEXT_PASS_WITH_ARG (pass_phiopt, 2, false /* early_p */);
+ NEXT_PASS (pass_tail_recursion, 2);
+ NEXT_PASS (pass_ch, 2);
+ NEXT_PASS (pass_lower_complex, 1);
+ NEXT_PASS (pass_sra, 1);
+ /* The dom pass will also resolve all __builtin_constant_p calls
+ that are still there to 0. This has to be done after some
+ propagations have already run, but before some more dead code
+ is removed, and this place fits nicely. Remember this when
+ trying to move or duplicate pass_dominator somewhere earlier. */
+ NEXT_PASS_WITH_ARG (pass_thread_jumps, 1, /*first=*/true);
+ NEXT_PASS_WITH_ARG (pass_dominator, 2, true /* may_peel_loop_headers_p */);
+ /* Threading can leave many const/copy propagations in the IL.
+ Clean them up. Failure to do so well can lead to false
+ positives from warnings for erroneous code. */
+ NEXT_PASS (pass_copy_prop, 2);
+ /* Identify paths that should never be executed in a conforming
+ program and isolate those paths. */
+ NEXT_PASS (pass_isolate_erroneous_paths, 1);
+ NEXT_PASS_WITH_ARG (pass_reassoc, 1, true /* early_p */);
+ NEXT_PASS (pass_dce, 3);
+ NEXT_PASS (pass_forwprop, 3);
+ NEXT_PASS_WITH_ARG (pass_phiopt, 3, false /* early_p */);
+ NEXT_PASS_WITH_ARG (pass_ccp, 3, true /* nonzero_p */);
+ /* After CCP we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_expand_powcabs, 1);
+ NEXT_PASS (pass_optimize_bswap, 1);
+ NEXT_PASS (pass_laddress, 1);
+ NEXT_PASS (pass_lim, 2);
+ NEXT_PASS_WITH_ARG (pass_walloca, 2, false);
+ NEXT_PASS (pass_pre, 1);
+ NEXT_PASS_WITH_ARG (pass_sink_code, 1, false /* unsplit edges */);
+ NEXT_PASS (pass_sancov, 1);
+ NEXT_PASS (pass_asan, 1);
+ NEXT_PASS (pass_tsan, 1);
+ NEXT_PASS_WITH_ARG (pass_dse, 3, true /* use DR analysis */);
+ NEXT_PASS (pass_dce, 4);
+ /* Pass group that runs when 1) enabled, 2) there are loops
+ in the function. Make sure to run pass_fix_loops before
+ to discover/remove loops before running the gate function
+ of pass_tree_loop. */
+ NEXT_PASS (pass_fix_loops, 1);
+ NEXT_PASS (pass_tree_loop, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_tree_loop)
+ /* Before loop_init we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_tree_loop_init, 1);
+ NEXT_PASS (pass_tree_unswitch, 1);
+ NEXT_PASS (pass_scev_cprop, 1);
+ NEXT_PASS (pass_loop_split, 1);
+ NEXT_PASS (pass_loop_versioning, 1);
+ NEXT_PASS (pass_loop_jam, 1);
+ /* All unswitching, final value replacement and splitting can expose
+ empty loops. Remove them now. */
+ NEXT_PASS_WITH_ARG (pass_cd_dce, 2, false /* update_address_taken_p */);
+ NEXT_PASS (pass_iv_canon, 1);
+ NEXT_PASS (pass_loop_distribution, 1);
+ NEXT_PASS (pass_linterchange, 1);
+ NEXT_PASS (pass_copy_prop, 3);
+ NEXT_PASS (pass_graphite, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_graphite)
+ NEXT_PASS (pass_graphite_transforms, 1);
+ NEXT_PASS (pass_lim, 3);
+ NEXT_PASS (pass_copy_prop, 4);
+ NEXT_PASS (pass_dce, 5);
+ POP_INSERT_PASSES ()
+ NEXT_PASS_WITH_ARG (pass_parallelize_loops, 2, false /* oacc_kernels_p */);
+ NEXT_PASS (pass_expand_omp_ssa, 2);
+ NEXT_PASS (pass_ch_vect, 1);
+ NEXT_PASS (pass_if_conversion, 1);
+ /* pass_vectorize must immediately follow pass_if_conversion.
+ Please do not add any other passes in between. */
+ NEXT_PASS (pass_vectorize, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_vectorize)
+ NEXT_PASS (pass_dce, 6);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_predcom, 1);
+ NEXT_PASS (pass_complete_unroll, 1);
+ NEXT_PASS (pass_pre_slp_scalar_cleanup, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_pre_slp_scalar_cleanup)
+ NEXT_PASS_WITH_ARG (pass_fre, 4, false /* may_iterate */);
+ NEXT_PASS (pass_dse, 4);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_slp_vectorize, 1);
+ NEXT_PASS (pass_loop_prefetch, 1);
+ /* Run IVOPTs after the last pass that uses data-reference analysis
+ as that doesn't handle TARGET_MEM_REFs. */
+ NEXT_PASS (pass_iv_optimize, 1);
+ NEXT_PASS (pass_lim, 4);
+ NEXT_PASS (pass_tree_loop_done, 1);
+ POP_INSERT_PASSES ()
+ /* Pass group that runs when pass_tree_loop is disabled or there
+ are no loops in the function. */
+ NEXT_PASS (pass_tree_no_loop, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_tree_no_loop)
+ NEXT_PASS (pass_slp_vectorize, 2);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_simduid_cleanup, 1);
+ NEXT_PASS (pass_lower_vector_ssa, 1);
+ NEXT_PASS (pass_lower_switch, 1);
+ NEXT_PASS (pass_cse_sincos, 1);
+ NEXT_PASS (pass_cse_reciprocals, 1);
+ NEXT_PASS_WITH_ARG (pass_reassoc, 2, false /* early_p */);
+ NEXT_PASS (pass_strength_reduction, 1);
+ NEXT_PASS (pass_split_paths, 1);
+ NEXT_PASS (pass_tracer, 1);
+ NEXT_PASS_WITH_ARG (pass_fre, 5, false /* may_iterate */);
+ /* After late FRE we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS_WITH_ARG (pass_thread_jumps, 2, /*first=*/false);
+ NEXT_PASS_WITH_ARG (pass_dominator, 3, false /* may_peel_loop_headers_p */);
+ NEXT_PASS (pass_strlen, 1);
+ NEXT_PASS_WITH_ARG (pass_thread_jumps_full, 2, /*first=*/false);
+ NEXT_PASS_WITH_ARG (pass_vrp, 2, false /* warn_array_bounds_p */);
+ /* Run CCP to compute alignment and nonzero bits. */
+ NEXT_PASS_WITH_ARG (pass_ccp, 4, true /* nonzero_p */);
+ NEXT_PASS (pass_warn_restrict, 1);
+ NEXT_PASS (pass_dse, 5);
+ NEXT_PASS_WITH_ARG (pass_dce, 7, true /* update_address_taken_p */);
+ /* After late DCE we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_forwprop, 4);
+ NEXT_PASS_WITH_ARG (pass_sink_code, 2, true /* unsplit edges */);
+ NEXT_PASS_WITH_ARG (pass_phiopt, 4, false /* early_p */);
+ NEXT_PASS (pass_fold_builtins, 1);
+ NEXT_PASS (pass_optimize_widening_mul, 1);
+ NEXT_PASS (pass_store_merging, 1);
+ /* If DCE is not run before checking for uninitialized uses,
+ we may get false warnings (e.g., testsuite/gcc.dg/uninit-5.c).
+ However, this also causes us to misdiagnose cases that should be
+ real warnings (e.g., testsuite/gcc.dg/pr18501.c). */
+ NEXT_PASS_WITH_ARG (pass_cd_dce, 3, false /* update_address_taken_p */);
+ NEXT_PASS (pass_tail_calls, 1);
+ /* Split critical edges before late uninit warning to reduce the
+ number of false positives from it. */
+ NEXT_PASS (pass_split_crit_edges, 1);
+ NEXT_PASS (pass_late_warn_uninitialized, 1);
+ NEXT_PASS (pass_local_pure_const, 2);
+ NEXT_PASS (pass_modref, 2);
+ /* uncprop replaces constants by SSA names. This makes analysis harder
+ and thus it should be run last. */
+ NEXT_PASS (pass_uncprop, 1);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_all_optimizations_g, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations_g)
+ /* The idea is that with -Og we do not perform any IPA optimization
+ so post-IPA work should be restricted to semantically required
+ passes and all optimization work is done early. */
+ NEXT_PASS (pass_remove_cgraph_callee_edges, 3);
+ NEXT_PASS_WITH_ARG (pass_strip_predict_hints, 3, false /* early_p */);
+ /* Lower remaining pieces of GIMPLE. */
+ NEXT_PASS (pass_lower_complex, 2);
+ NEXT_PASS (pass_lower_vector_ssa, 2);
+ NEXT_PASS (pass_lower_switch, 2);
+ /* Perform simple scalar cleanup which is constant/copy propagation. */
+ NEXT_PASS_WITH_ARG (pass_ccp, 5, true /* nonzero_p */);
+ NEXT_PASS (pass_post_ipa_warn, 2);
+ NEXT_PASS (pass_object_sizes, 2);
+ /* Fold remaining builtins. */
+ NEXT_PASS (pass_fold_builtins, 2);
+ NEXT_PASS (pass_strlen, 2);
+ /* Copy propagation also copy-propagates constants, this is necessary
+ to forward object-size and builtin folding results properly. */
+ NEXT_PASS (pass_copy_prop, 5);
+ NEXT_PASS (pass_dce, 8);
+ NEXT_PASS (pass_sancov, 2);
+ NEXT_PASS (pass_asan, 2);
+ NEXT_PASS (pass_tsan, 2);
+ /* ??? We do want some kind of loop invariant motion, but we possibly
+ need to adjust LIM to be more friendly towards preserving accurate
+ debug information here. */
+ /* Split critical edges before late uninit warning to reduce the
+ number of false positives from it. */
+ NEXT_PASS (pass_split_crit_edges, 2);
+ NEXT_PASS (pass_late_warn_uninitialized, 2);
+ /* uncprop replaces constants by SSA names. This makes analysis harder
+ and thus it should be run last. */
+ NEXT_PASS (pass_uncprop, 2);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_assumptions, 1);
+ NEXT_PASS (pass_tm_init, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_tm_init)
+ NEXT_PASS (pass_tm_mark, 1);
+ NEXT_PASS (pass_tm_memopt, 1);
+ NEXT_PASS (pass_tm_edges, 1);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_simduid_cleanup, 2);
+ NEXT_PASS (pass_vtable_verify, 1);
+ NEXT_PASS (pass_lower_vaarg, 1);
+ NEXT_PASS (pass_lower_vector, 1);
+ NEXT_PASS (pass_lower_complex_O0, 1);
+ NEXT_PASS (pass_sancov_O0, 1);
+ NEXT_PASS (pass_lower_switch_O0, 1);
+ NEXT_PASS (pass_asan_O0, 1);
+ NEXT_PASS (pass_tsan_O0, 1);
+ NEXT_PASS (pass_sanopt, 1);
+ NEXT_PASS (pass_cleanup_eh, 2);
+ NEXT_PASS (pass_lower_resx, 1);
+ NEXT_PASS (pass_nrv, 1);
+ NEXT_PASS (pass_gimple_isel, 1);
+ NEXT_PASS (pass_harden_conditional_branches, 1);
+ NEXT_PASS (pass_harden_compares, 1);
+ NEXT_PASS_WITH_ARG (pass_warn_access, 3, /*early=*/false);
+ NEXT_PASS (pass_cleanup_cfg_post_optimizing, 1);
+ NEXT_PASS (pass_warn_function_noreturn, 1);
+
+ NEXT_PASS (pass_expand, 1);
+
+ NEXT_PASS (pass_rest_of_compilation, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_rest_of_compilation)
+ NEXT_PASS (pass_instantiate_virtual_regs, 1);
+ NEXT_PASS (pass_into_cfg_layout_mode, 1);
+ NEXT_PASS (pass_jump, 1);
+ NEXT_PASS (pass_lower_subreg, 1);
+ NEXT_PASS (pass_df_initialize_opt, 1);
+ NEXT_PASS (pass_cse, 1);
+ NEXT_PASS (pass_rtl_fwprop, 1);
+ NEXT_PASS (pass_rtl_cprop, 1);
+ NEXT_PASS (pass_rtl_pre, 1);
+ NEXT_PASS (pass_rtl_hoist, 1);
+ NEXT_PASS (pass_rtl_cprop, 2);
+ NEXT_PASS (pass_rtl_store_motion, 1);
+ NEXT_PASS (pass_cse_after_global_opts, 1);
+ NEXT_PASS (pass_rtl_ifcvt, 1);
+ NEXT_PASS (pass_reginfo_init, 1);
+ /* Perform loop optimizations. It might be better to do them a bit
+ sooner, but we want the profile feedback to work more
+ efficiently. */
+ NEXT_PASS (pass_loop2, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_loop2)
+ NEXT_PASS (pass_rtl_loop_init, 1);
+ NEXT_PASS (pass_rtl_move_loop_invariants, 1);
+ NEXT_PASS (pass_rtl_unroll_loops, 1);
+ NEXT_PASS (pass_rtl_doloop, 1);
+ NEXT_PASS (pass_rtl_loop_done, 1);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_lower_subreg2, 1);
+ NEXT_PASS (pass_web, 1);
+ NEXT_PASS (pass_rtl_cprop, 3);
+ NEXT_PASS (pass_cse2, 1);
+ NEXT_PASS (pass_rtl_dse1, 1);
+ NEXT_PASS (pass_rtl_fwprop_addr, 1);
+ NEXT_PASS (pass_inc_dec, 1);
+ NEXT_PASS (pass_initialize_regs, 1);
+ NEXT_PASS (pass_ud_rtl_dce, 1);
+ NEXT_PASS (pass_combine, 1);
+ NEXT_PASS (pass_if_after_combine, 1);
+ NEXT_PASS (pass_jump_after_combine, 1);
+ NEXT_PASS (pass_partition_blocks, 1);
+ NEXT_PASS (pass_outof_cfg_layout_mode, 1);
+ NEXT_PASS (pass_split_all_insns, 1);
+ NEXT_PASS (pass_lower_subreg3, 1);
+ NEXT_PASS (pass_df_initialize_no_opt, 1);
+ NEXT_PASS (pass_stack_ptr_mod, 1);
+ NEXT_PASS (pass_mode_switching, 1);
+ NEXT_PASS (pass_match_asm_constraints, 1);
+ NEXT_PASS (pass_sms, 1);
+ NEXT_PASS (pass_live_range_shrinkage, 1);
+ NEXT_PASS (pass_sched, 1);
+ NEXT_PASS (pass_early_remat, 1);
+ NEXT_PASS (pass_ira, 1);
+ NEXT_PASS (pass_reload, 1);
+ NEXT_PASS (pass_postreload, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_postreload)
+ NEXT_PASS (pass_postreload_cse, 1);
+ NEXT_PASS (pass_gcse2, 1);
+ NEXT_PASS (pass_split_after_reload, 1);
+ NEXT_PASS (pass_ree, 1);
+ NEXT_PASS (pass_compare_elim_after_reload, 1);
+ NEXT_PASS (pass_thread_prologue_and_epilogue, 1);
+ NEXT_PASS (pass_rtl_dse2, 1);
+ NEXT_PASS (pass_stack_adjustments, 1);
+ NEXT_PASS (pass_jump2, 1);
+ NEXT_PASS (pass_duplicate_computed_gotos, 1);
+ NEXT_PASS (pass_sched_fusion, 1);
+ NEXT_PASS (pass_peephole2, 1);
+ NEXT_PASS (pass_if_after_reload, 1);
+ NEXT_PASS (pass_regrename, 1);
+ NEXT_PASS (pass_cprop_hardreg, 1);
+ NEXT_PASS (pass_fast_rtl_dce, 1);
+ NEXT_PASS (pass_reorder_blocks, 1);
+ NEXT_PASS (pass_leaf_regs, 1);
+ NEXT_PASS (pass_split_before_sched2, 1);
+ NEXT_PASS (pass_sched2, 1);
+ NEXT_PASS (pass_stack_regs, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_stack_regs)
+ NEXT_PASS (pass_split_before_regstack, 1);
+ NEXT_PASS (pass_stack_regs_run, 1);
+ POP_INSERT_PASSES ()
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_late_compilation, 1);
+ PUSH_INSERT_PASSES_WITHIN (pass_late_compilation)
+ NEXT_PASS (pass_zero_call_used_regs, 1);
+ NEXT_PASS (pass_compute_alignments, 1);
+ NEXT_PASS (pass_variable_tracking, 1);
+ NEXT_PASS (pass_free_cfg, 1);
+ NEXT_PASS (pass_machine_reorg, 1);
+ NEXT_PASS (pass_cleanup_barriers, 1);
+ NEXT_PASS (pass_delay_slots, 1);
+ NEXT_PASS (pass_split_for_shorten_branches, 1);
+ NEXT_PASS (pass_convert_to_eh_region_ranges, 1);
+NEXT_PASS (pass_insert_bti, 1);
+ NEXT_PASS (pass_shorten_branches, 1);
+ NEXT_PASS (pass_set_nothrow_function_flags, 1);
+ NEXT_PASS (pass_dwarf2_frame, 1);
+ NEXT_PASS (pass_final, 1);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_df_finish, 1);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_clean_state, 1);
+ TERMINATE_PASS_LIST (all_passes)
+/* Arm-specific passes declarations.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ Contributed by Arm Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass_manager.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass_manager.h
new file mode 100644
index 0000000..5b7f44a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pass_manager.h
@@ -0,0 +1,148 @@
+/* pass_manager.h - The pipeline of optimization passes
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PASS_MANAGER_H
+#define GCC_PASS_MANAGER_H
+
+class opt_pass;
+struct register_pass_info;
+
+/* Define a list of pass lists so that both passes.cc and plugins can easily
+ find all the pass lists. */
+#define GCC_PASS_LISTS \
+ DEF_PASS_LIST (all_lowering_passes) \
+ DEF_PASS_LIST (all_small_ipa_passes) \
+ DEF_PASS_LIST (all_regular_ipa_passes) \
+ DEF_PASS_LIST (all_late_ipa_passes) \
+ DEF_PASS_LIST (all_passes)
+
+#define DEF_PASS_LIST(LIST) PASS_LIST_NO_##LIST,
+enum pass_list
+{
+ GCC_PASS_LISTS
+ PASS_LIST_NUM
+};
+#undef DEF_PASS_LIST
+
+namespace gcc {
+
+class context;
+
+class pass_manager
+{
+public:
+ pass_manager (context *ctxt);
+ ~pass_manager ();
+
+ void register_pass (struct register_pass_info *pass_info);
+ void register_one_dump_file (opt_pass *pass);
+
+ opt_pass *get_pass_for_id (int id) const;
+
+ void dump_passes () const;
+
+ void dump_profile_report () const;
+
+ void finish_optimization_passes ();
+
+ /* Access to specific passes, so that the majority can be private. */
+ void execute_early_local_passes ();
+ unsigned int execute_pass_mode_switching ();
+
+ /* Various passes are manually cloned by epiphany. */
+ opt_pass *get_pass_split_all_insns () const {
+ return pass_split_all_insns_1;
+ }
+ opt_pass *get_pass_mode_switching () const {
+ return pass_mode_switching_1;
+ }
+ opt_pass *get_pass_peephole2 () const { return pass_peephole2_1; }
+ opt_pass *get_pass_profile () const { return pass_profile_1; }
+
+ void register_pass_name (opt_pass *pass, const char *name);
+
+ opt_pass *get_pass_by_name (const char *name);
+
+ opt_pass *get_rest_of_compilation () const
+ {
+ return pass_rest_of_compilation_1;
+ }
+ opt_pass *get_clean_slate () const { return pass_clean_state_1; }
+
+public:
+ /* The root of the compilation pass tree, once constructed. */
+ opt_pass *all_passes;
+ opt_pass *all_small_ipa_passes;
+ opt_pass *all_lowering_passes;
+ opt_pass *all_regular_ipa_passes;
+ opt_pass *all_late_ipa_passes;
+
+ /* A map from static pass id to optimization pass. */
+ opt_pass **passes_by_id;
+ int passes_by_id_size;
+
+ opt_pass **pass_lists[PASS_LIST_NUM];
+
+private:
+ void set_pass_for_id (int id, opt_pass *pass);
+ void register_dump_files (opt_pass *pass);
+ void create_pass_tab () const;
+
+private:
+ context *m_ctxt;
+ hash_map<free_string_hash, opt_pass *> *m_name_to_pass_map;
+
+ /* References to all of the individual passes.
+ These fields are generated via macro expansion.
+
+ For example:
+ NEXT_PASS (pass_build_cfg, 1);
+ within pass-instances.def means that there is a field:
+ opt_pass *pass_build_cfg_1;
+
+ Similarly, the various:
+ NEXT_PASS (pass_copy_prop, 1);
+ ...
+ NEXT_PASS (pass_copy_prop, 8);
+ in pass-instances.def lead to fields:
+ opt_pass *pass_copy_prop_1;
+ ...
+ opt_pass *pass_copy_prop_8; */
+
+#define INSERT_PASSES_AFTER(PASS)
+#define PUSH_INSERT_PASSES_WITHIN(PASS)
+#define POP_INSERT_PASSES()
+#define NEXT_PASS(PASS, NUM) opt_pass *PASS ## _ ## NUM
+#define NEXT_PASS_WITH_ARG(PASS, NUM, ARG) NEXT_PASS (PASS, NUM)
+#define TERMINATE_PASS_LIST(PASS)
+
+#include "pass-instances.def"
+
+#undef INSERT_PASSES_AFTER
+#undef PUSH_INSERT_PASSES_WITHIN
+#undef POP_INSERT_PASSES
+#undef NEXT_PASS
+#undef NEXT_PASS_WITH_ARG
+#undef TERMINATE_PASS_LIST
+
+}; // class pass_manager
+
+} // namespace gcc
+
+#endif /* ! GCC_PASS_MANAGER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/passes.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/passes.def
new file mode 100644
index 0000000..c9a8f19
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/passes.def
@@ -0,0 +1,540 @@
+/* Description of pass structure
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ Macros that should be defined when using this file:
+ INSERT_PASSES_AFTER (PASS)
+ PUSH_INSERT_PASSES_WITHIN (PASS)
+ POP_INSERT_PASSES ()
+ NEXT_PASS (PASS)
+ TERMINATE_PASS_LIST (PASS)
+ */
+
+ /* All passes needed to lower the function into shape optimizers can
+ operate on. These passes are always run first on the function, but
+ backend might produce already lowered functions that are not processed
+ by these passes. */
+ INSERT_PASSES_AFTER (all_lowering_passes)
+ NEXT_PASS (pass_warn_unused_result);
+ NEXT_PASS (pass_diagnose_omp_blocks);
+ NEXT_PASS (pass_diagnose_tm_blocks);
+ NEXT_PASS (pass_omp_oacc_kernels_decompose);
+ NEXT_PASS (pass_lower_omp);
+ NEXT_PASS (pass_lower_cf);
+ NEXT_PASS (pass_lower_tm);
+ NEXT_PASS (pass_refactor_eh);
+ NEXT_PASS (pass_lower_eh);
+ NEXT_PASS (pass_coroutine_lower_builtins);
+ NEXT_PASS (pass_build_cfg);
+ NEXT_PASS (pass_warn_function_return);
+ NEXT_PASS (pass_coroutine_early_expand_ifns);
+ NEXT_PASS (pass_expand_omp);
+ NEXT_PASS (pass_build_cgraph_edges);
+ TERMINATE_PASS_LIST (all_lowering_passes)
+
+ /* Interprocedural optimization passes. */
+ INSERT_PASSES_AFTER (all_small_ipa_passes)
+ NEXT_PASS (pass_ipa_free_lang_data);
+ NEXT_PASS (pass_ipa_function_and_variable_visibility);
+ NEXT_PASS (pass_build_ssa_passes);
+ PUSH_INSERT_PASSES_WITHIN (pass_build_ssa_passes)
+ NEXT_PASS (pass_fixup_cfg);
+ NEXT_PASS (pass_build_ssa);
+ NEXT_PASS (pass_walloca, /*strict_mode_p=*/true);
+ NEXT_PASS (pass_warn_printf);
+ NEXT_PASS (pass_warn_nonnull_compare);
+ NEXT_PASS (pass_early_warn_uninitialized);
+ NEXT_PASS (pass_warn_access, /*early=*/true);
+ NEXT_PASS (pass_ubsan);
+ NEXT_PASS (pass_nothrow);
+ NEXT_PASS (pass_rebuild_cgraph_edges);
+ POP_INSERT_PASSES ()
+
+ NEXT_PASS (pass_local_optimization_passes);
+ PUSH_INSERT_PASSES_WITHIN (pass_local_optimization_passes)
+ NEXT_PASS (pass_fixup_cfg);
+ NEXT_PASS (pass_rebuild_cgraph_edges);
+ NEXT_PASS (pass_local_fn_summary);
+ NEXT_PASS (pass_early_inline);
+ NEXT_PASS (pass_warn_recursion);
+ NEXT_PASS (pass_all_early_optimizations);
+ PUSH_INSERT_PASSES_WITHIN (pass_all_early_optimizations)
+ NEXT_PASS (pass_remove_cgraph_callee_edges);
+ NEXT_PASS (pass_early_object_sizes);
+ /* Don't record nonzero bits before IPA to avoid
+ using too much memory. */
+ NEXT_PASS (pass_ccp, false /* nonzero_p */);
+ /* After CCP we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_forwprop);
+ NEXT_PASS (pass_early_thread_jumps, /*first=*/true);
+ NEXT_PASS (pass_sra_early);
+ /* pass_build_ealias is a dummy pass that ensures that we
+ execute TODO_rebuild_alias at this point. */
+ NEXT_PASS (pass_build_ealias);
+ NEXT_PASS (pass_fre, true /* may_iterate */);
+ NEXT_PASS (pass_early_vrp);
+ NEXT_PASS (pass_merge_phi);
+ NEXT_PASS (pass_dse);
+ NEXT_PASS (pass_cd_dce, false /* update_address_taken_p */);
+ NEXT_PASS (pass_phiopt, true /* early_p */);
+ NEXT_PASS (pass_tail_recursion);
+ NEXT_PASS (pass_if_to_switch);
+ NEXT_PASS (pass_convert_switch);
+ NEXT_PASS (pass_cleanup_eh);
+ NEXT_PASS (pass_profile);
+ NEXT_PASS (pass_local_pure_const);
+ NEXT_PASS (pass_modref);
+ /* Split functions creates parts that are not run through
+ early optimizations again. It is thus good idea to do this
+ late. */
+ NEXT_PASS (pass_split_functions);
+ NEXT_PASS (pass_strip_predict_hints, true /* early_p */);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_release_ssa_names);
+ NEXT_PASS (pass_rebuild_cgraph_edges);
+ NEXT_PASS (pass_local_fn_summary);
+ POP_INSERT_PASSES ()
+
+ NEXT_PASS (pass_ipa_remove_symbols);
+ NEXT_PASS (pass_ipa_oacc);
+ PUSH_INSERT_PASSES_WITHIN (pass_ipa_oacc)
+ NEXT_PASS (pass_ipa_pta);
+ /* Pass group that runs when the function is an offloaded function
+ containing oacc kernels loops. */
+ NEXT_PASS (pass_ipa_oacc_kernels);
+ PUSH_INSERT_PASSES_WITHIN (pass_ipa_oacc_kernels)
+ NEXT_PASS (pass_oacc_kernels);
+ PUSH_INSERT_PASSES_WITHIN (pass_oacc_kernels)
+ NEXT_PASS (pass_ch);
+ NEXT_PASS (pass_fre, true /* may_iterate */);
+ /* We use pass_lim to rewrite in-memory iteration and reduction
+ variable accesses in loops into local variables accesses. */
+ NEXT_PASS (pass_lim);
+ NEXT_PASS (pass_dominator, false /* may_peel_loop_headers_p */);
+ NEXT_PASS (pass_dce);
+ NEXT_PASS (pass_parallelize_loops, true /* oacc_kernels_p */);
+ NEXT_PASS (pass_expand_omp_ssa);
+ NEXT_PASS (pass_rebuild_cgraph_edges);
+ POP_INSERT_PASSES ()
+ POP_INSERT_PASSES ()
+ POP_INSERT_PASSES ()
+
+ NEXT_PASS (pass_target_clone);
+ NEXT_PASS (pass_ipa_auto_profile);
+ NEXT_PASS (pass_ipa_tree_profile);
+ PUSH_INSERT_PASSES_WITHIN (pass_ipa_tree_profile)
+ NEXT_PASS (pass_feedback_split_functions);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_ipa_free_fn_summary, true /* small_p */);
+ NEXT_PASS (pass_ipa_increase_alignment);
+ NEXT_PASS (pass_ipa_tm);
+ NEXT_PASS (pass_ipa_lower_emutls);
+ TERMINATE_PASS_LIST (all_small_ipa_passes)
+
+ INSERT_PASSES_AFTER (all_regular_ipa_passes)
+ NEXT_PASS (pass_analyzer);
+ NEXT_PASS (pass_ipa_odr);
+ NEXT_PASS (pass_ipa_whole_program_visibility);
+ NEXT_PASS (pass_ipa_profile);
+ NEXT_PASS (pass_ipa_icf);
+ NEXT_PASS (pass_ipa_devirt);
+ NEXT_PASS (pass_ipa_cp);
+ NEXT_PASS (pass_ipa_sra);
+ NEXT_PASS (pass_ipa_cdtor_merge);
+ NEXT_PASS (pass_ipa_fn_summary);
+ NEXT_PASS (pass_ipa_inline);
+ NEXT_PASS (pass_ipa_pure_const);
+ NEXT_PASS (pass_ipa_modref);
+ NEXT_PASS (pass_ipa_free_fn_summary, false /* small_p */);
+ NEXT_PASS (pass_ipa_reference);
+ /* This pass needs to be scheduled after any IP code duplication. */
+ NEXT_PASS (pass_ipa_single_use);
+ /* Comdat privatization come last, as direct references to comdat local
+ symbols are not allowed outside of the comdat group. Privatizing early
+ would result in missed optimizations due to this restriction. */
+ NEXT_PASS (pass_ipa_comdats);
+ TERMINATE_PASS_LIST (all_regular_ipa_passes)
+
+ /* Simple IPA passes executed after the regular passes. In WHOPR mode the
+ passes are executed after partitioning and thus see just parts of the
+ compiled unit. */
+ INSERT_PASSES_AFTER (all_late_ipa_passes)
+ NEXT_PASS (pass_ipa_pta);
+ NEXT_PASS (pass_omp_simd_clone);
+ TERMINATE_PASS_LIST (all_late_ipa_passes)
+
+ /* These passes are run after IPA passes on every function that is being
+ output to the assembler file. */
+ INSERT_PASSES_AFTER (all_passes)
+ NEXT_PASS (pass_fixup_cfg);
+ NEXT_PASS (pass_lower_eh_dispatch);
+ NEXT_PASS (pass_oacc_loop_designation);
+ NEXT_PASS (pass_omp_oacc_neuter_broadcast);
+ NEXT_PASS (pass_oacc_device_lower);
+ NEXT_PASS (pass_omp_device_lower);
+ NEXT_PASS (pass_omp_target_link);
+ NEXT_PASS (pass_adjust_alignment);
+ NEXT_PASS (pass_all_optimizations);
+ PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations)
+ NEXT_PASS (pass_remove_cgraph_callee_edges);
+ /* Initial scalar cleanups before alias computation.
+ They ensure memory accesses are not indirect wherever possible. */
+ NEXT_PASS (pass_strip_predict_hints, false /* early_p */);
+ NEXT_PASS (pass_ccp, true /* nonzero_p */);
+ /* After CCP we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_object_sizes);
+ NEXT_PASS (pass_post_ipa_warn);
+ /* Must run before loop unrolling. */
+ NEXT_PASS (pass_warn_access, /*early=*/true);
+ NEXT_PASS (pass_complete_unrolli);
+ NEXT_PASS (pass_backprop);
+ NEXT_PASS (pass_phiprop);
+ NEXT_PASS (pass_forwprop);
+ /* pass_build_alias is a dummy pass that ensures that we
+ execute TODO_rebuild_alias at this point. */
+ NEXT_PASS (pass_build_alias);
+ NEXT_PASS (pass_return_slot);
+ NEXT_PASS (pass_fre, true /* may_iterate */);
+ NEXT_PASS (pass_merge_phi);
+ NEXT_PASS (pass_thread_jumps_full, /*first=*/true);
+ NEXT_PASS (pass_vrp, true /* warn_array_bounds_p */);
+ NEXT_PASS (pass_dse);
+ NEXT_PASS (pass_dce);
+ /* pass_stdarg is always run and at this point we execute
+ TODO_remove_unused_locals to prune CLOBBERs of dead
+ variables which are otherwise a churn on alias walkings. */
+ NEXT_PASS (pass_stdarg);
+ NEXT_PASS (pass_call_cdce);
+ NEXT_PASS (pass_cselim);
+ NEXT_PASS (pass_copy_prop);
+ NEXT_PASS (pass_tree_ifcombine);
+ NEXT_PASS (pass_merge_phi);
+ NEXT_PASS (pass_phiopt, false /* early_p */);
+ NEXT_PASS (pass_tail_recursion);
+ NEXT_PASS (pass_ch);
+ NEXT_PASS (pass_lower_complex);
+ NEXT_PASS (pass_sra);
+ /* The dom pass will also resolve all __builtin_constant_p calls
+ that are still there to 0. This has to be done after some
+ propagations have already run, but before some more dead code
+ is removed, and this place fits nicely. Remember this when
+ trying to move or duplicate pass_dominator somewhere earlier. */
+ NEXT_PASS (pass_thread_jumps, /*first=*/true);
+ NEXT_PASS (pass_dominator, true /* may_peel_loop_headers_p */);
+ /* Threading can leave many const/copy propagations in the IL.
+ Clean them up. Failure to do so well can lead to false
+ positives from warnings for erroneous code. */
+ NEXT_PASS (pass_copy_prop);
+ /* Identify paths that should never be executed in a conforming
+ program and isolate those paths. */
+ NEXT_PASS (pass_isolate_erroneous_paths);
+ NEXT_PASS (pass_reassoc, true /* early_p */);
+ NEXT_PASS (pass_dce);
+ NEXT_PASS (pass_forwprop);
+ NEXT_PASS (pass_phiopt, false /* early_p */);
+ NEXT_PASS (pass_ccp, true /* nonzero_p */);
+ /* After CCP we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_expand_powcabs);
+ NEXT_PASS (pass_optimize_bswap);
+ NEXT_PASS (pass_laddress);
+ NEXT_PASS (pass_lim);
+ NEXT_PASS (pass_walloca, false);
+ NEXT_PASS (pass_pre);
+ NEXT_PASS (pass_sink_code, false /* unsplit edges */);
+ NEXT_PASS (pass_sancov);
+ NEXT_PASS (pass_asan);
+ NEXT_PASS (pass_tsan);
+ NEXT_PASS (pass_dse, true /* use DR analysis */);
+ NEXT_PASS (pass_dce);
+ /* Pass group that runs when 1) enabled, 2) there are loops
+ in the function. Make sure to run pass_fix_loops before
+ to discover/remove loops before running the gate function
+ of pass_tree_loop. */
+ NEXT_PASS (pass_fix_loops);
+ NEXT_PASS (pass_tree_loop);
+ PUSH_INSERT_PASSES_WITHIN (pass_tree_loop)
+ /* Before loop_init we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_tree_loop_init);
+ NEXT_PASS (pass_tree_unswitch);
+ NEXT_PASS (pass_scev_cprop);
+ NEXT_PASS (pass_loop_split);
+ NEXT_PASS (pass_loop_versioning);
+ NEXT_PASS (pass_loop_jam);
+ /* All unswitching, final value replacement and splitting can expose
+ empty loops. Remove them now. */
+ NEXT_PASS (pass_cd_dce, false /* update_address_taken_p */);
+ NEXT_PASS (pass_iv_canon);
+ NEXT_PASS (pass_loop_distribution);
+ NEXT_PASS (pass_linterchange);
+ NEXT_PASS (pass_copy_prop);
+ NEXT_PASS (pass_graphite);
+ PUSH_INSERT_PASSES_WITHIN (pass_graphite)
+ NEXT_PASS (pass_graphite_transforms);
+ NEXT_PASS (pass_lim);
+ NEXT_PASS (pass_copy_prop);
+ NEXT_PASS (pass_dce);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_parallelize_loops, false /* oacc_kernels_p */);
+ NEXT_PASS (pass_expand_omp_ssa);
+ NEXT_PASS (pass_ch_vect);
+ NEXT_PASS (pass_if_conversion);
+ /* pass_vectorize must immediately follow pass_if_conversion.
+ Please do not add any other passes in between. */
+ NEXT_PASS (pass_vectorize);
+ PUSH_INSERT_PASSES_WITHIN (pass_vectorize)
+ NEXT_PASS (pass_dce);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_predcom);
+ NEXT_PASS (pass_complete_unroll);
+ NEXT_PASS (pass_pre_slp_scalar_cleanup);
+ PUSH_INSERT_PASSES_WITHIN (pass_pre_slp_scalar_cleanup)
+ NEXT_PASS (pass_fre, false /* may_iterate */);
+ NEXT_PASS (pass_dse);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_slp_vectorize);
+ NEXT_PASS (pass_loop_prefetch);
+ /* Run IVOPTs after the last pass that uses data-reference analysis
+ as that doesn't handle TARGET_MEM_REFs. */
+ NEXT_PASS (pass_iv_optimize);
+ NEXT_PASS (pass_lim);
+ NEXT_PASS (pass_tree_loop_done);
+ POP_INSERT_PASSES ()
+ /* Pass group that runs when pass_tree_loop is disabled or there
+ are no loops in the function. */
+ NEXT_PASS (pass_tree_no_loop);
+ PUSH_INSERT_PASSES_WITHIN (pass_tree_no_loop)
+ NEXT_PASS (pass_slp_vectorize);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_simduid_cleanup);
+ NEXT_PASS (pass_lower_vector_ssa);
+ NEXT_PASS (pass_lower_switch);
+ NEXT_PASS (pass_cse_sincos);
+ NEXT_PASS (pass_cse_reciprocals);
+ NEXT_PASS (pass_reassoc, false /* early_p */);
+ NEXT_PASS (pass_strength_reduction);
+ NEXT_PASS (pass_split_paths);
+ NEXT_PASS (pass_tracer);
+ NEXT_PASS (pass_fre, false /* may_iterate */);
+ /* After late FRE we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_thread_jumps, /*first=*/false);
+ NEXT_PASS (pass_dominator, false /* may_peel_loop_headers_p */);
+ NEXT_PASS (pass_strlen);
+ NEXT_PASS (pass_thread_jumps_full, /*first=*/false);
+ NEXT_PASS (pass_vrp, false /* warn_array_bounds_p */);
+ /* Run CCP to compute alignment and nonzero bits. */
+ NEXT_PASS (pass_ccp, true /* nonzero_p */);
+ NEXT_PASS (pass_warn_restrict);
+ NEXT_PASS (pass_dse);
+ NEXT_PASS (pass_dce, true /* update_address_taken_p */);
+ /* After late DCE we rewrite no longer addressed locals into SSA
+ form if possible. */
+ NEXT_PASS (pass_forwprop);
+ NEXT_PASS (pass_sink_code, true /* unsplit edges */);
+ NEXT_PASS (pass_phiopt, false /* early_p */);
+ NEXT_PASS (pass_fold_builtins);
+ NEXT_PASS (pass_optimize_widening_mul);
+ NEXT_PASS (pass_store_merging);
+ /* If DCE is not run before checking for uninitialized uses,
+ we may get false warnings (e.g., testsuite/gcc.dg/uninit-5.c).
+ However, this also causes us to misdiagnose cases that should be
+ real warnings (e.g., testsuite/gcc.dg/pr18501.c). */
+ NEXT_PASS (pass_cd_dce, false /* update_address_taken_p */);
+ NEXT_PASS (pass_tail_calls);
+ /* Split critical edges before late uninit warning to reduce the
+ number of false positives from it. */
+ NEXT_PASS (pass_split_crit_edges);
+ NEXT_PASS (pass_late_warn_uninitialized);
+ NEXT_PASS (pass_local_pure_const);
+ NEXT_PASS (pass_modref);
+ /* uncprop replaces constants by SSA names. This makes analysis harder
+ and thus it should be run last. */
+ NEXT_PASS (pass_uncprop);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_all_optimizations_g);
+ PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations_g)
+ /* The idea is that with -Og we do not perform any IPA optimization
+ so post-IPA work should be restricted to semantically required
+ passes and all optimization work is done early. */
+ NEXT_PASS (pass_remove_cgraph_callee_edges);
+ NEXT_PASS (pass_strip_predict_hints, false /* early_p */);
+ /* Lower remaining pieces of GIMPLE. */
+ NEXT_PASS (pass_lower_complex);
+ NEXT_PASS (pass_lower_vector_ssa);
+ NEXT_PASS (pass_lower_switch);
+ /* Perform simple scalar cleanup which is constant/copy propagation. */
+ NEXT_PASS (pass_ccp, true /* nonzero_p */);
+ NEXT_PASS (pass_post_ipa_warn);
+ NEXT_PASS (pass_object_sizes);
+ /* Fold remaining builtins. */
+ NEXT_PASS (pass_fold_builtins);
+ NEXT_PASS (pass_strlen);
+ /* Copy propagation also copy-propagates constants, this is necessary
+ to forward object-size and builtin folding results properly. */
+ NEXT_PASS (pass_copy_prop);
+ NEXT_PASS (pass_dce);
+ NEXT_PASS (pass_sancov);
+ NEXT_PASS (pass_asan);
+ NEXT_PASS (pass_tsan);
+ /* ??? We do want some kind of loop invariant motion, but we possibly
+ need to adjust LIM to be more friendly towards preserving accurate
+ debug information here. */
+ /* Split critical edges before late uninit warning to reduce the
+ number of false positives from it. */
+ NEXT_PASS (pass_split_crit_edges);
+ NEXT_PASS (pass_late_warn_uninitialized);
+ /* uncprop replaces constants by SSA names. This makes analysis harder
+ and thus it should be run last. */
+ NEXT_PASS (pass_uncprop);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_assumptions);
+ NEXT_PASS (pass_tm_init);
+ PUSH_INSERT_PASSES_WITHIN (pass_tm_init)
+ NEXT_PASS (pass_tm_mark);
+ NEXT_PASS (pass_tm_memopt);
+ NEXT_PASS (pass_tm_edges);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_simduid_cleanup);
+ NEXT_PASS (pass_vtable_verify);
+ NEXT_PASS (pass_lower_vaarg);
+ NEXT_PASS (pass_lower_vector);
+ NEXT_PASS (pass_lower_complex_O0);
+ NEXT_PASS (pass_sancov_O0);
+ NEXT_PASS (pass_lower_switch_O0);
+ NEXT_PASS (pass_asan_O0);
+ NEXT_PASS (pass_tsan_O0);
+ NEXT_PASS (pass_sanopt);
+ NEXT_PASS (pass_cleanup_eh);
+ NEXT_PASS (pass_lower_resx);
+ NEXT_PASS (pass_nrv);
+ NEXT_PASS (pass_gimple_isel);
+ NEXT_PASS (pass_harden_conditional_branches);
+ NEXT_PASS (pass_harden_compares);
+ NEXT_PASS (pass_warn_access, /*early=*/false);
+ NEXT_PASS (pass_cleanup_cfg_post_optimizing);
+ NEXT_PASS (pass_warn_function_noreturn);
+
+ NEXT_PASS (pass_expand);
+
+ NEXT_PASS (pass_rest_of_compilation);
+ PUSH_INSERT_PASSES_WITHIN (pass_rest_of_compilation)
+ NEXT_PASS (pass_instantiate_virtual_regs);
+ NEXT_PASS (pass_into_cfg_layout_mode);
+ NEXT_PASS (pass_jump);
+ NEXT_PASS (pass_lower_subreg);
+ NEXT_PASS (pass_df_initialize_opt);
+ NEXT_PASS (pass_cse);
+ NEXT_PASS (pass_rtl_fwprop);
+ NEXT_PASS (pass_rtl_cprop);
+ NEXT_PASS (pass_rtl_pre);
+ NEXT_PASS (pass_rtl_hoist);
+ NEXT_PASS (pass_rtl_cprop);
+ NEXT_PASS (pass_rtl_store_motion);
+ NEXT_PASS (pass_cse_after_global_opts);
+ NEXT_PASS (pass_rtl_ifcvt);
+ NEXT_PASS (pass_reginfo_init);
+ /* Perform loop optimizations. It might be better to do them a bit
+ sooner, but we want the profile feedback to work more
+ efficiently. */
+ NEXT_PASS (pass_loop2);
+ PUSH_INSERT_PASSES_WITHIN (pass_loop2)
+ NEXT_PASS (pass_rtl_loop_init);
+ NEXT_PASS (pass_rtl_move_loop_invariants);
+ NEXT_PASS (pass_rtl_unroll_loops);
+ NEXT_PASS (pass_rtl_doloop);
+ NEXT_PASS (pass_rtl_loop_done);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_lower_subreg2);
+ NEXT_PASS (pass_web);
+ NEXT_PASS (pass_rtl_cprop);
+ NEXT_PASS (pass_cse2);
+ NEXT_PASS (pass_rtl_dse1);
+ NEXT_PASS (pass_rtl_fwprop_addr);
+ NEXT_PASS (pass_inc_dec);
+ NEXT_PASS (pass_initialize_regs);
+ NEXT_PASS (pass_ud_rtl_dce);
+ NEXT_PASS (pass_combine);
+ NEXT_PASS (pass_if_after_combine);
+ NEXT_PASS (pass_jump_after_combine);
+ NEXT_PASS (pass_partition_blocks);
+ NEXT_PASS (pass_outof_cfg_layout_mode);
+ NEXT_PASS (pass_split_all_insns);
+ NEXT_PASS (pass_lower_subreg3);
+ NEXT_PASS (pass_df_initialize_no_opt);
+ NEXT_PASS (pass_stack_ptr_mod);
+ NEXT_PASS (pass_mode_switching);
+ NEXT_PASS (pass_match_asm_constraints);
+ NEXT_PASS (pass_sms);
+ NEXT_PASS (pass_live_range_shrinkage);
+ NEXT_PASS (pass_sched);
+ NEXT_PASS (pass_early_remat);
+ NEXT_PASS (pass_ira);
+ NEXT_PASS (pass_reload);
+ NEXT_PASS (pass_postreload);
+ PUSH_INSERT_PASSES_WITHIN (pass_postreload)
+ NEXT_PASS (pass_postreload_cse);
+ NEXT_PASS (pass_gcse2);
+ NEXT_PASS (pass_split_after_reload);
+ NEXT_PASS (pass_ree);
+ NEXT_PASS (pass_compare_elim_after_reload);
+ NEXT_PASS (pass_thread_prologue_and_epilogue);
+ NEXT_PASS (pass_rtl_dse2);
+ NEXT_PASS (pass_stack_adjustments);
+ NEXT_PASS (pass_jump2);
+ NEXT_PASS (pass_duplicate_computed_gotos);
+ NEXT_PASS (pass_sched_fusion);
+ NEXT_PASS (pass_peephole2);
+ NEXT_PASS (pass_if_after_reload);
+ NEXT_PASS (pass_regrename);
+ NEXT_PASS (pass_cprop_hardreg);
+ NEXT_PASS (pass_fast_rtl_dce);
+ NEXT_PASS (pass_reorder_blocks);
+ NEXT_PASS (pass_leaf_regs);
+ NEXT_PASS (pass_split_before_sched2);
+ NEXT_PASS (pass_sched2);
+ NEXT_PASS (pass_stack_regs);
+ PUSH_INSERT_PASSES_WITHIN (pass_stack_regs)
+ NEXT_PASS (pass_split_before_regstack);
+ NEXT_PASS (pass_stack_regs_run);
+ POP_INSERT_PASSES ()
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_late_compilation);
+ PUSH_INSERT_PASSES_WITHIN (pass_late_compilation)
+ NEXT_PASS (pass_zero_call_used_regs);
+ NEXT_PASS (pass_compute_alignments);
+ NEXT_PASS (pass_variable_tracking);
+ NEXT_PASS (pass_free_cfg);
+ NEXT_PASS (pass_machine_reorg);
+ NEXT_PASS (pass_cleanup_barriers);
+ NEXT_PASS (pass_delay_slots);
+ NEXT_PASS (pass_split_for_shorten_branches);
+ NEXT_PASS (pass_convert_to_eh_region_ranges);
+ NEXT_PASS (pass_shorten_branches);
+ NEXT_PASS (pass_set_nothrow_function_flags);
+ NEXT_PASS (pass_dwarf2_frame);
+ NEXT_PASS (pass_final);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_df_finish);
+ POP_INSERT_PASSES ()
+ NEXT_PASS (pass_clean_state);
+ TERMINATE_PASS_LIST (all_passes)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-api.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-api.h
new file mode 100644
index 0000000..379828b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-api.h
@@ -0,0 +1,605 @@
+/* plugin-api.h -- External linker plugin API. */
+
+/* Copyright (C) 2009-2023 Free Software Foundation, Inc.
+ Written by Cary Coutant <ccoutant@google.com>.
+
+ This file is part of binutils.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* This file defines the interface for writing a linker plugin, which is
+ described at < http://gcc.gnu.org/wiki/whopr/driver >. */
+
+#ifndef PLUGIN_API_H
+#define PLUGIN_API_H
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#include <sys/types.h>
+#if !defined(HAVE_STDINT_H) && !defined(HAVE_INTTYPES_H) && \
+ !defined(UINT64_MAX) && !defined(uint64_t)
+#error cannot find uint64_t type
+#endif
+
+/* Detect endianess based on __BYTE_ORDER__ macro. */
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ defined(__ORDER_LITTLE_ENDIAN__) && defined(__ORDER_PDP_ENDIAN__)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define PLUGIN_LITTLE_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define PLUGIN_BIG_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_PDP_ENDIAN__
+#define PLUGIN_PDP_ENDIAN 1
+#endif
+#else
+/* Older GCC releases (<4.6.0) can make detection from glibc macros. */
+#if defined(__GLIBC__) || defined(__GNU_LIBRARY__) || defined(__ANDROID__)
+#include <endian.h>
+#ifdef __BYTE_ORDER
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define PLUGIN_LITTLE_ENDIAN 1
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define PLUGIN_BIG_ENDIAN 1
+#endif
+#endif
+#endif
+/* Include all necessary header files based on target. */
+#if defined(__SVR4) && defined(__sun)
+#include <sys/byteorder.h>
+#endif
+#if defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__) || defined(__minix)
+#include <sys/endian.h>
+#endif
+#if defined(__OpenBSD__)
+#include <machine/endian.h>
+#endif
+/* Detect endianess based on _BYTE_ORDER. */
+#ifdef _BYTE_ORDER
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define PLUGIN_LITTLE_ENDIAN 1
+#elif _BYTE_ORDER == _BIG_ENDIAN
+#define PLUGIN_BIG_ENDIAN 1
+#endif
+#endif
+/* Detect based on _WIN32. */
+#if defined(_WIN32)
+#define PLUGIN_LITTLE_ENDIAN 1
+#endif
+/* Detect based on __BIG_ENDIAN__ and __LITTLE_ENDIAN__ */
+#ifdef __LITTLE_ENDIAN__
+#define PLUGIN_LITTLE_ENDIAN 1
+#endif
+#ifdef __BIG_ENDIAN__
+#define PLUGIN_BIG_ENDIAN 1
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Status code returned by most API routines. */
+
+enum ld_plugin_status
+{
+ LDPS_OK = 0,
+ LDPS_NO_SYMS, /* Attempt to get symbols that haven't been added. */
+ LDPS_BAD_HANDLE, /* No claimed object associated with given handle. */
+ LDPS_ERR
+ /* Additional Error codes TBD. */
+};
+
+/* The version of the API specification. */
+
+enum ld_plugin_api_version
+{
+ LD_PLUGIN_API_VERSION = 1
+};
+
+/* The type of output file being generated by the linker. */
+
+enum ld_plugin_output_file_type
+{
+ LDPO_REL,
+ LDPO_EXEC,
+ LDPO_DYN,
+ LDPO_PIE
+};
+
+/* An input file managed by the plugin library. */
+
+struct ld_plugin_input_file
+{
+ const char *name;
+ int fd;
+ off_t offset;
+ off_t filesize;
+ void *handle;
+};
+
+/* A symbol belonging to an input file managed by the plugin library. */
+
+struct ld_plugin_symbol
+{
+ char *name;
+ char *version;
+ /* This is for compatibility with older ABIs. The older ABI defined
+ only 'def' field. */
+#if PLUGIN_BIG_ENDIAN == 1
+ char unused;
+ char section_kind;
+ char symbol_type;
+ char def;
+#elif PLUGIN_LITTLE_ENDIAN == 1
+ char def;
+ char symbol_type;
+ char section_kind;
+ char unused;
+#elif PLUGIN_PDP_ENDIAN == 1
+ char symbol_type;
+ char def;
+ char unused;
+ char section_kind;
+#else
+#error "Could not detect architecture endianess"
+#endif
+ int visibility;
+ uint64_t size;
+ char *comdat_key;
+ int resolution;
+};
+
+/* An object's section. */
+
+struct ld_plugin_section
+{
+ const void* handle;
+ unsigned int shndx;
+};
+
+/* Whether the symbol is a definition, reference, or common, weak or not. */
+
+enum ld_plugin_symbol_kind
+{
+ LDPK_DEF,
+ LDPK_WEAKDEF,
+ LDPK_UNDEF,
+ LDPK_WEAKUNDEF,
+ LDPK_COMMON
+};
+
+/* The visibility of the symbol. */
+
+enum ld_plugin_symbol_visibility
+{
+ LDPV_DEFAULT,
+ LDPV_PROTECTED,
+ LDPV_INTERNAL,
+ LDPV_HIDDEN
+};
+
+/* The type of the symbol. */
+
+enum ld_plugin_symbol_type
+{
+ LDST_UNKNOWN,
+ LDST_FUNCTION,
+ LDST_VARIABLE
+};
+
+enum ld_plugin_symbol_section_kind
+{
+ LDSSK_DEFAULT,
+ LDSSK_BSS
+};
+
+/* How a symbol is resolved. */
+
+enum ld_plugin_symbol_resolution
+{
+ LDPR_UNKNOWN = 0,
+
+ /* Symbol is still undefined at this point. */
+ LDPR_UNDEF,
+
+ /* This is the prevailing definition of the symbol, with references from
+ regular object code. */
+ LDPR_PREVAILING_DEF,
+
+ /* This is the prevailing definition of the symbol, with no
+ references from regular objects. It is only referenced from IR
+ code. */
+ LDPR_PREVAILING_DEF_IRONLY,
+
+ /* This definition was pre-empted by a definition in a regular
+ object file. */
+ LDPR_PREEMPTED_REG,
+
+ /* This definition was pre-empted by a definition in another IR file. */
+ LDPR_PREEMPTED_IR,
+
+ /* This symbol was resolved by a definition in another IR file. */
+ LDPR_RESOLVED_IR,
+
+ /* This symbol was resolved by a definition in a regular object
+ linked into the main executable. */
+ LDPR_RESOLVED_EXEC,
+
+ /* This symbol was resolved by a definition in a shared object. */
+ LDPR_RESOLVED_DYN,
+
+ /* This is the prevailing definition of the symbol, with no
+ references from regular objects. It is only referenced from IR
+ code, but the symbol is exported and may be referenced from
+ a dynamic object (not seen at link time). */
+ LDPR_PREVAILING_DEF_IRONLY_EXP
+};
+
+/* The plugin library's "claim file" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_claim_file_handler) (
+ const struct ld_plugin_input_file *file, int *claimed);
+
+/* The plugin library's "all symbols read" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_all_symbols_read_handler) (void);
+
+/* The plugin library's cleanup handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_cleanup_handler) (void);
+
+/* The linker's interface for registering the "claim file" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_claim_file) (ld_plugin_claim_file_handler handler);
+
+/* The linker's interface for registering the "all symbols read" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_all_symbols_read) (
+ ld_plugin_all_symbols_read_handler handler);
+
+/* The linker's interface for registering the cleanup handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_cleanup) (ld_plugin_cleanup_handler handler);
+
+/* The linker's interface for adding symbols from a claimed input file. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_add_symbols) (void *handle, int nsyms,
+ const struct ld_plugin_symbol *syms);
+
+/* The linker's interface for getting the input file information with
+ an open (possibly re-opened) file descriptor. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_file) (const void *handle,
+ struct ld_plugin_input_file *file);
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_view) (const void *handle, const void **viewp);
+
+/* The linker's interface for releasing the input file. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_release_input_file) (const void *handle);
+
+/* The linker's interface for retrieving symbol resolution information. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_symbols) (const void *handle, int nsyms,
+ struct ld_plugin_symbol *syms);
+
+/* The linker's interface for adding a compiled input file. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_add_input_file) (const char *pathname);
+
+/* The linker's interface for adding a library that should be searched. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_add_input_library) (const char *libname);
+
+/* The linker's interface for adding a library path that should be searched. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_set_extra_library_path) (const char *path);
+
+/* The linker's interface for issuing a warning or error message. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_message) (int level, const char *format, ...);
+
+/* The linker's interface for retrieving the number of sections in an object.
+ The handle is obtained in the claim_file handler. This interface should
+ only be invoked in the claim_file handler. This function sets *COUNT to
+ the number of sections in the object. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_section_count) (const void* handle, unsigned int *count);
+
+/* The linker's interface for retrieving the section type of a specific
+ section in an object. This interface should only be invoked in the
+ claim_file handler. This function sets *TYPE to an ELF SHT_xxx value. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_section_type) (const struct ld_plugin_section section,
+ unsigned int *type);
+
+/* The linker's interface for retrieving the name of a specific section in
+ an object. This interface should only be invoked in the claim_file handler.
+ This function sets *SECTION_NAME_PTR to a null-terminated buffer allocated
+ by malloc. The plugin must free *SECTION_NAME_PTR. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_section_name) (const struct ld_plugin_section section,
+ char **section_name_ptr);
+
+/* The linker's interface for retrieving the contents of a specific section
+ in an object. This interface should only be invoked in the claim_file
+ handler. This function sets *SECTION_CONTENTS to point to a buffer that is
+ valid until clam_file handler returns. It sets *LEN to the size of the
+ buffer. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_section_contents) (const struct ld_plugin_section section,
+ const unsigned char **section_contents,
+ size_t* len);
+
+/* The linker's interface for specifying the desired order of sections.
+ The sections should be specifed using the array SECTION_LIST in the
+ order in which they should appear in the final layout. NUM_SECTIONS
+ specifies the number of entries in each array. This should be invoked
+ in the all_symbols_read handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_update_section_order) (const struct ld_plugin_section *section_list,
+ unsigned int num_sections);
+
+/* The linker's interface for specifying that reordering of sections is
+ desired so that the linker can prepare for it. This should be invoked
+ before update_section_order, preferably in the claim_file handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_allow_section_ordering) (void);
+
+/* The linker's interface for specifying that a subset of sections is
+ to be mapped to a unique segment. If the plugin wants to call
+ unique_segment_for_sections, it must call this function from a
+ claim_file_handler or when it is first loaded. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_allow_unique_segment_for_sections) (void);
+
+/* The linker's interface for specifying that a specific set of sections
+ must be mapped to a unique segment. ELF segments do not have names
+ and the NAME is used as the name of the newly created output section
+ that is then placed in the unique PT_LOAD segment. FLAGS is used to
+ specify if any additional segment flags need to be set. For instance,
+ a specific segment flag can be set to identify this segment. Unsetting
+ segment flags that would be set by default is not possible. The
+ parameter SEGMENT_ALIGNMENT when non-zero will override the default. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_unique_segment_for_sections) (
+ const char* segment_name,
+ uint64_t segment_flags,
+ uint64_t segment_alignment,
+ const struct ld_plugin_section * section_list,
+ unsigned int num_sections);
+
+/* The linker's interface for retrieving the section alignment requirement
+ of a specific section in an object. This interface should only be invoked in the
+ claim_file handler. This function sets *ADDRALIGN to the ELF sh_addralign
+ value of the input section. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_section_alignment) (const struct ld_plugin_section section,
+ unsigned int *addralign);
+
+/* The linker's interface for retrieving the section size of a specific section
+ in an object. This interface should only be invoked in the claim_file handler.
+ This function sets *SECSIZE to the ELF sh_size
+ value of the input section. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_section_size) (const struct ld_plugin_section section,
+ uint64_t *secsize);
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_new_input_handler) (const struct ld_plugin_input_file *file);
+
+/* The linker's interface for registering the "new_input" handler. This handler
+ will be notified when a new input file has been added after the
+ all_symbols_read event, allowing the plugin to, for example, set a unique
+ segment for sections in plugin-generated input files. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_new_input) (ld_plugin_new_input_handler handler);
+
+/* The linker's interface for getting the list of wrapped symbols using the
+ --wrap option. This sets *NUM_SYMBOLS to number of wrapped symbols and
+ *WRAP_SYMBOL_LIST to the list of wrapped symbols. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_wrap_symbols) (uint64_t *num_symbols,
+ const char ***wrap_symbol_list);
+
+enum ld_plugin_level
+{
+ LDPL_INFO,
+ LDPL_WARNING,
+ LDPL_ERROR,
+ LDPL_FATAL
+};
+
+/* Contract between a plug-in and a linker. */
+
+enum linker_api_version
+{
+ /* The linker/plugin do not implement any of the API levels below, the API
+ is determined solely via the transfer vector. */
+ LAPI_V0,
+
+ /* API level v1. The linker provides get_symbols_v3, add_symbols_v2,
+ the plugin will use that and not any lower versions.
+ claim_file is thread-safe on the plugin side and
+ add_symbols on the linker side. */
+ LAPI_V1
+};
+
+/* The linker's interface for API version negotiation. A plug-in calls
+ the function (with its IDENTIFIER and VERSION), plus minimal and maximal
+ version of linker_api_version is provided. Linker then returns selected
+ API version and provides its IDENTIFIER and VERSION. The returned value
+ by linker must be in range [MINIMAL_API_SUPPORTED, MAXIMAL_API_SUPPORTED].
+ Identifier pointers remain valid as long as the plugin is loaded. */
+
+typedef
+int
+(*ld_plugin_get_api_version) (const char *plugin_identifier,
+ const char *plugin_version,
+ int minimal_api_supported,
+ int maximal_api_supported,
+ const char **linker_identifier,
+ const char **linker_version);
+
+/* Values for the tv_tag field of the transfer vector. */
+
+enum ld_plugin_tag
+{
+ LDPT_NULL,
+ LDPT_API_VERSION,
+ LDPT_GOLD_VERSION,
+ LDPT_LINKER_OUTPUT,
+ LDPT_OPTION,
+ LDPT_REGISTER_CLAIM_FILE_HOOK,
+ LDPT_REGISTER_ALL_SYMBOLS_READ_HOOK,
+ LDPT_REGISTER_CLEANUP_HOOK,
+ LDPT_ADD_SYMBOLS,
+ LDPT_GET_SYMBOLS,
+ LDPT_ADD_INPUT_FILE,
+ LDPT_MESSAGE,
+ LDPT_GET_INPUT_FILE,
+ LDPT_RELEASE_INPUT_FILE,
+ LDPT_ADD_INPUT_LIBRARY,
+ LDPT_OUTPUT_NAME,
+ LDPT_SET_EXTRA_LIBRARY_PATH,
+ LDPT_GNU_LD_VERSION,
+ LDPT_GET_VIEW,
+ LDPT_GET_INPUT_SECTION_COUNT,
+ LDPT_GET_INPUT_SECTION_TYPE,
+ LDPT_GET_INPUT_SECTION_NAME,
+ LDPT_GET_INPUT_SECTION_CONTENTS,
+ LDPT_UPDATE_SECTION_ORDER,
+ LDPT_ALLOW_SECTION_ORDERING,
+ LDPT_GET_SYMBOLS_V2,
+ LDPT_ALLOW_UNIQUE_SEGMENT_FOR_SECTIONS,
+ LDPT_UNIQUE_SEGMENT_FOR_SECTIONS,
+ LDPT_GET_SYMBOLS_V3,
+ LDPT_GET_INPUT_SECTION_ALIGNMENT,
+ LDPT_GET_INPUT_SECTION_SIZE,
+ LDPT_REGISTER_NEW_INPUT_HOOK,
+ LDPT_GET_WRAP_SYMBOLS,
+ LDPT_ADD_SYMBOLS_V2,
+ LDPT_GET_API_VERSION,
+};
+
+/* The plugin transfer vector. */
+
+struct ld_plugin_tv
+{
+ enum ld_plugin_tag tv_tag;
+ union
+ {
+ int tv_val;
+ const char *tv_string;
+ ld_plugin_register_claim_file tv_register_claim_file;
+ ld_plugin_register_all_symbols_read tv_register_all_symbols_read;
+ ld_plugin_register_cleanup tv_register_cleanup;
+ ld_plugin_add_symbols tv_add_symbols;
+ ld_plugin_get_symbols tv_get_symbols;
+ ld_plugin_add_input_file tv_add_input_file;
+ ld_plugin_message tv_message;
+ ld_plugin_get_input_file tv_get_input_file;
+ ld_plugin_get_view tv_get_view;
+ ld_plugin_release_input_file tv_release_input_file;
+ ld_plugin_add_input_library tv_add_input_library;
+ ld_plugin_set_extra_library_path tv_set_extra_library_path;
+ ld_plugin_get_input_section_count tv_get_input_section_count;
+ ld_plugin_get_input_section_type tv_get_input_section_type;
+ ld_plugin_get_input_section_name tv_get_input_section_name;
+ ld_plugin_get_input_section_contents tv_get_input_section_contents;
+ ld_plugin_update_section_order tv_update_section_order;
+ ld_plugin_allow_section_ordering tv_allow_section_ordering;
+ ld_plugin_allow_unique_segment_for_sections tv_allow_unique_segment_for_sections;
+ ld_plugin_unique_segment_for_sections tv_unique_segment_for_sections;
+ ld_plugin_get_input_section_alignment tv_get_input_section_alignment;
+ ld_plugin_get_input_section_size tv_get_input_section_size;
+ ld_plugin_register_new_input tv_register_new_input;
+ ld_plugin_get_wrap_symbols tv_get_wrap_symbols;
+ ld_plugin_get_api_version tv_get_api_version;
+ } tv_u;
+};
+
+/* The plugin library's "onload" entry point. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_onload) (struct ld_plugin_tv *tv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PLUGIN_API_H) */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-version.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-version.h
new file mode 100644
index 0000000..44c935d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin-version.h
@@ -0,0 +1,18 @@
+#include "configargs.h"
+
+#define GCCPLUGIN_VERSION_MAJOR 13
+#define GCCPLUGIN_VERSION_MINOR 2
+#define GCCPLUGIN_VERSION_PATCHLEVEL 1
+#define GCCPLUGIN_VERSION (GCCPLUGIN_VERSION_MAJOR*1000 + GCCPLUGIN_VERSION_MINOR)
+
+static char basever[] = "13.2.1";
+static char datestamp[] = "20231009";
+static char devphase[] = "";
+static char revision[] = "";
+
+/* FIXME plugins: We should make the version information more precise.
+ One way to do is to add a checksum. */
+
+static struct plugin_gcc_version gcc_version = {basever, datestamp,
+ devphase, revision,
+ configuration_arguments};
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.def
new file mode 100644
index 0000000..5cf62ac
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.def
@@ -0,0 +1,112 @@
+/* This file contains the definitions for plugin events in GCC.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Called before parsing the body of a function. */
+DEFEVENT (PLUGIN_START_PARSE_FUNCTION)
+
+/* After finishing parsing a function. */
+DEFEVENT (PLUGIN_FINISH_PARSE_FUNCTION)
+
+/* To hook into pass manager. */
+DEFEVENT (PLUGIN_PASS_MANAGER_SETUP)
+
+/* After finishing parsing a type. */
+DEFEVENT (PLUGIN_FINISH_TYPE)
+
+/* After finishing parsing a declaration. */
+DEFEVENT (PLUGIN_FINISH_DECL)
+
+/* Useful for summary processing. */
+DEFEVENT (PLUGIN_FINISH_UNIT)
+
+/* Allows to see low level AST in C and C++ frontends. */
+DEFEVENT (PLUGIN_PRE_GENERICIZE)
+
+/* Called before GCC exits. */
+DEFEVENT (PLUGIN_FINISH)
+
+/* Information about the plugin. */
+DEFEVENT (PLUGIN_INFO)
+
+/* Called at start of GCC Garbage Collection. */
+DEFEVENT (PLUGIN_GGC_START)
+
+/* Extend the GGC marking. */
+DEFEVENT (PLUGIN_GGC_MARKING)
+
+/* Called at end of GGC. */
+DEFEVENT (PLUGIN_GGC_END)
+
+/* Register an extra GGC root table. */
+DEFEVENT (PLUGIN_REGISTER_GGC_ROOTS)
+
+/* Called during attribute registration. */
+DEFEVENT (PLUGIN_ATTRIBUTES)
+
+/* Called before processing a translation unit. */
+DEFEVENT (PLUGIN_START_UNIT)
+
+/* Called during pragma registration. */
+DEFEVENT (PLUGIN_PRAGMAS)
+
+/* Called before first pass from all_passes. */
+DEFEVENT (PLUGIN_ALL_PASSES_START)
+
+/* Called after last pass from all_passes. */
+DEFEVENT (PLUGIN_ALL_PASSES_END)
+
+/* Called before first ipa pass. */
+DEFEVENT (PLUGIN_ALL_IPA_PASSES_START)
+
+/* Called after last ipa pass. */
+DEFEVENT (PLUGIN_ALL_IPA_PASSES_END)
+
+/* Allows to override pass gate decision for current_pass. */
+DEFEVENT (PLUGIN_OVERRIDE_GATE)
+
+/* Called before executing a pass. */
+DEFEVENT (PLUGIN_PASS_EXECUTION)
+
+/* Called before executing subpasses of a GIMPLE_PASS in
+ execute_ipa_pass_list. */
+DEFEVENT (PLUGIN_EARLY_GIMPLE_PASSES_START)
+
+/* Called after executing subpasses of a GIMPLE_PASS in
+ execute_ipa_pass_list. */
+DEFEVENT (PLUGIN_EARLY_GIMPLE_PASSES_END)
+
+/* Called when a pass is first instantiated. */
+DEFEVENT (PLUGIN_NEW_PASS)
+
+/* Called when a file is #include-d or given via the #line directive.
+ this could happen many times. The event data is the included file path,
+ as a const char* pointer. */
+DEFEVENT (PLUGIN_INCLUDE_FILE)
+
+/* Called when -fanalyzer starts. The event data is an
+ ana::plugin_analyzer_init_iface *. */
+DEFEVENT (PLUGIN_ANALYZER_INIT)
+
+/* When adding a new hard-coded plugin event, don't forget to edit in
+ file plugin.cc the functions register_callback and
+ invoke_plugin_callbacks_full accordingly! */
+
+/* After the hard-coded events above, plugins can dynamically allocate events
+ at run time.
+ PLUGIN_EVENT_FIRST_DYNAMIC only appears as last enum element. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.h
new file mode 100644
index 0000000..ee0a53e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/plugin.h
@@ -0,0 +1,208 @@
+/* Header file for internal GCC plugin mechanism.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef PLUGIN_H
+#define PLUGIN_H
+
+#include "highlev-plugin-common.h"
+
+/* Event names. */
+enum plugin_event
+{
+# define DEFEVENT(NAME) NAME,
+# include "plugin.def"
+# undef DEFEVENT
+ PLUGIN_EVENT_FIRST_DYNAMIC
+};
+
+/* All globals declared here have C linkage to reduce link compatibility
+ issues with implementation language choice and mangling. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const char **plugin_event_name;
+
+struct plugin_argument
+{
+ char *key; /* key of the argument. */
+ char *value; /* value is optional and can be NULL. */
+};
+
+/* Additional information about the plugin. Used by --help and --version. */
+
+struct plugin_info
+{
+ const char *version;
+ const char *help;
+};
+
+/* Represents the gcc version. Used to avoid using an incompatible plugin. */
+
+struct plugin_gcc_version
+{
+ const char *basever;
+ const char *datestamp;
+ const char *devphase;
+ const char *revision;
+ const char *configuration_arguments;
+};
+
+/* Object that keeps track of the plugin name and its arguments. */
+struct plugin_name_args
+{
+ char *base_name; /* Short name of the plugin (filename without
+ .so suffix). */
+ const char *full_name; /* Path to the plugin as specified with
+ -fplugin=. */
+ int argc; /* Number of arguments specified with
+ -fplugin-arg-... */
+ struct plugin_argument *argv; /* Array of ARGC key-value pairs. */
+ const char *version; /* Version string provided by plugin. */
+ const char *help; /* Help string provided by plugin. */
+};
+
+/* The default version check. Compares every field in VERSION. */
+
+extern bool plugin_default_version_check (struct plugin_gcc_version *,
+ struct plugin_gcc_version *);
+
+/* Function type for the plugin initialization routine. Each plugin module
+ should define this as an externally-visible function with name
+ "plugin_init."
+
+ PLUGIN_INFO - plugin invocation information.
+ VERSION - the plugin_gcc_version symbol of GCC.
+
+ Returns 0 if initialization finishes successfully. */
+
+typedef int (*plugin_init_func) (struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version);
+
+/* Declaration for "plugin_init" function so that it doesn't need to be
+ duplicated in every plugin. */
+extern int plugin_init (struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version);
+
+/* Function type for a plugin callback routine.
+
+ GCC_DATA - event-specific data provided by GCC
+ USER_DATA - plugin-specific data provided by the plugin */
+
+typedef void (*plugin_callback_func) (void *gcc_data, void *user_data);
+
+/* Called from the plugin's initialization code. Register a single callback.
+ This function can be called multiple times.
+
+ PLUGIN_NAME - display name for this plugin
+ EVENT - which event the callback is for
+ CALLBACK - the callback to be called at the event
+ USER_DATA - plugin-provided data.
+*/
+
+/* Number of event ids / names registered so far. */
+
+extern int get_event_last (void);
+
+int get_named_event_id (const char *name, enum insert_option insert);
+
+/* This is also called without a callback routine for the
+ PLUGIN_PASS_MANAGER_SETUP, PLUGIN_INFO and PLUGIN_REGISTER_GGC_ROOTS
+ pseudo-events, with a specific user_data.
+ */
+
+extern void register_callback (const char *plugin_name,
+ int event,
+ plugin_callback_func callback,
+ void *user_data);
+
+extern int unregister_callback (const char *plugin_name, int event);
+
+
+/* Retrieve the plugin directory name, as returned by the
+ -fprint-file-name=plugin argument to the gcc program, which is the
+ -iplugindir program argument to cc1. */
+extern const char* default_plugin_dir_name (void);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* In case the C++ compiler does name mangling for globals, declare
+ plugin_is_GPL_compatible extern "C" so that a later definition
+ in a plugin file will have this linkage. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int plugin_is_GPL_compatible;
+#ifdef __cplusplus
+}
+#endif
+
+
+struct attribute_spec;
+struct scoped_attributes;
+
+extern void add_new_plugin (const char *);
+extern void parse_plugin_arg_opt (const char *);
+extern int invoke_plugin_callbacks_full (int, void *);
+extern void initialize_plugins (void);
+extern bool plugins_active_p (void);
+extern void dump_active_plugins (FILE *);
+extern void debug_active_plugins (void);
+extern void warn_if_plugins (void);
+extern void print_plugins_versions (FILE *file, const char *indent);
+extern void print_plugins_help (FILE *file, const char *indent);
+extern void finalize_plugins (void);
+extern void for_each_plugin (void (*cb) (const plugin_name_args *,
+ void *user_data),
+ void *user_data);
+
+extern bool flag_plugin_added;
+
+/* Called from inside GCC. Invoke all plugin callbacks registered with
+ the specified event.
+ Return PLUGEVT_SUCCESS if at least one callback was called,
+ PLUGEVT_NO_CALLBACK if there was no callback.
+
+ EVENT - the event identifier
+ GCC_DATA - event-specific data provided by the compiler */
+
+inline int
+invoke_plugin_callbacks (int event ATTRIBUTE_UNUSED,
+ void *gcc_data ATTRIBUTE_UNUSED)
+{
+#ifdef ENABLE_PLUGIN
+ /* True iff at least one plugin has been added. */
+ if (flag_plugin_added)
+ return invoke_plugin_callbacks_full (event, gcc_data);
+#endif
+
+ return PLUGEVT_NO_CALLBACK;
+}
+
+/* In attribs.cc. */
+
+extern void register_attribute (const struct attribute_spec *attr);
+/* The default argument for the third parameter is given in attribs.h. */
+extern struct scoped_attributes* register_scoped_attributes (const struct attribute_spec *,
+ const char *,
+ bool);
+
+#endif /* PLUGIN_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pointer-query.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pointer-query.h
new file mode 100644
index 0000000..2522ac9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pointer-query.h
@@ -0,0 +1,297 @@
+/* Definitions of the pointer_query and related classes.
+
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_POINTER_QUERY_H
+#define GCC_POINTER_QUERY_H
+
+/* Describes recursion limits used by functions that follow use-def
+ chains of SSA_NAMEs. */
+
+class ssa_name_limit_t
+{
+ bitmap visited; /* Bitmap of visited SSA_NAMEs. */
+ unsigned ssa_def_max; /* Longest chain of SSA_NAMEs to follow. */
+
+ /* Not copyable or assignable. */
+ DISABLE_COPY_AND_ASSIGN (ssa_name_limit_t);
+
+public:
+
+ ssa_name_limit_t ()
+ : visited (),
+ ssa_def_max (param_ssa_name_def_chain_limit) { }
+
+ /* Set a bit for the PHI in VISITED and return true if it wasn't
+ already set. */
+ bool visit_phi (tree);
+ /* Clear a bit for the PHI in VISITED. */
+ void leave_phi (tree);
+ /* Return false if the SSA_NAME chain length counter has reached
+ the limit, otherwise increment the counter and return true. */
+ bool next ();
+
+ /* If the SSA_NAME has already been "seen" return a positive value.
+ Otherwise add it to VISITED. If the SSA_NAME limit has been
+ reached, return a negative value. Otherwise return zero. */
+ int next_phi (tree);
+
+ ~ssa_name_limit_t ();
+};
+
+class pointer_query;
+
+/* Describes a reference to an object used in an access. */
+struct access_ref
+{
+ /* Set the bounds of the reference. */
+ access_ref ();
+
+ /* Return the PHI node REF refers to or null if it doesn't. */
+ gphi *phi () const;
+
+ /* Merge the result for a pointer with *THIS. */
+ void merge_ref (vec<access_ref> *all_refs, tree, gimple *, int, bool,
+ ssa_name_limit_t &, pointer_query &);
+
+ /* Return the object to which REF refers. */
+ tree get_ref (vec<access_ref> *, access_ref * = nullptr, int = 1,
+ ssa_name_limit_t * = nullptr, pointer_query * = nullptr) const;
+
+ /* Return true if OFFRNG is the constant zero. */
+ bool offset_zero () const
+ {
+ return offrng[0] == 0 && offrng[1] == 0;
+ }
+
+ /* Return true if OFFRNG is bounded to a subrange of offset values
+ valid for the largest possible object. */
+ bool offset_bounded () const;
+
+ /* Return the maximum amount of space remaining and if non-null, set
+ argument to the minimum. */
+ offset_int size_remaining (offset_int * = nullptr) const;
+
+ /* Return true if the offset and object size are in range for SIZE. */
+ bool offset_in_range (const offset_int &) const;
+
+ /* Return true if *THIS is an access to a declared object. */
+ bool ref_declared () const
+ {
+ return DECL_P (ref) && base0 && deref < 1;
+ }
+
+ /* Set the size range to the maximum. */
+ void set_max_size_range ()
+ {
+ sizrng[0] = 0;
+ sizrng[1] = wi::to_offset (max_object_size ());
+ }
+
+ /* Add OFF to the offset range. */
+ void add_offset (const offset_int &off)
+ {
+ add_offset (off, off);
+ }
+
+ /* Add the range [MIN, MAX] to the offset range. */
+ void add_offset (const offset_int &, const offset_int &);
+
+ /* Add the maximum representable offset to the offset range. */
+ void add_max_offset ()
+ {
+ offset_int maxoff = wi::to_offset (TYPE_MAX_VALUE (ptrdiff_type_node));
+ add_offset (-maxoff - 1, maxoff);
+ }
+
+ /* Issue an informational message describing the target of an access
+ with the given mode. */
+ void inform_access (access_mode, int = 1) const;
+
+ /* Dump *THIS to a file. */
+ void dump (FILE *) const;
+
+ /* Reference to the accessed object(s). */
+ tree ref;
+
+ /* Range of byte offsets into and sizes of the object(s). */
+ offset_int offrng[2];
+ offset_int sizrng[2];
+ /* The minimum and maximum offset computed. */
+ offset_int offmax[2];
+
+ /* Used to fold integer expressions when called from front ends. */
+ tree (*eval)(tree);
+ /* Positive when REF is dereferenced, negative when its address is
+ taken. */
+ int deref;
+ /* The following indicates if heuristics interpreted 'ref' is interpreted
+ as (offsetted) nullptr. */
+ bool ref_nullptr_p;
+ /* Set if trailing one-element arrays should be treated as flexible
+ array members. */
+ bool trail1special;
+ /* Set if valid offsets must start at zero (for declared and allocated
+ objects but not for others referenced by pointers). */
+ bool base0;
+ /* Set if REF refers to a function array parameter not declared
+ static. */
+ bool parmarray;
+};
+
+class range_query;
+
+/* Queries and caches compute_objsize results. */
+class pointer_query
+{
+ DISABLE_COPY_AND_ASSIGN (pointer_query);
+
+ /* Type of the two-level cache object defined by clients of the class
+ to have pointer SSA_NAMEs cached for speedy access. */
+ struct cache_type
+ {
+ /* 1-based indices into cache. */
+ auto_vec<unsigned> indices;
+ /* The cache itself. */
+ auto_vec<access_ref> access_refs;
+ };
+
+public:
+ /* Construct an object with the given Ranger instance. */
+ explicit pointer_query (range_query * = nullptr);
+
+ /* Retrieve the access_ref for a variable from cache if it's there. */
+ const access_ref* get_ref (tree, int = 1) const;
+
+ /* Retrieve the access_ref for a variable from cache or compute it. */
+ bool get_ref (tree, gimple *, access_ref*, int = 1);
+
+ /* Add an access_ref for the SSA_NAME to the cache. */
+ void put_ref (tree, const access_ref&, int = 1);
+
+ /* Flush the cache. */
+ void flush_cache ();
+
+ /* Dump statistics and optionally cache contents to DUMP_FILE. */
+ void dump (FILE *, bool = false);
+
+ /* A Ranger instance. May be null to use global ranges. */
+ range_query *rvals;
+
+ /* Cache performance counters. */
+ mutable unsigned hits;
+ mutable unsigned misses;
+ mutable unsigned failures;
+ mutable unsigned depth;
+ mutable unsigned max_depth;
+
+private:
+ /* Cache of SSA_NAMEs. May be null to disable caching. */
+ cache_type var_cache;
+};
+
+/* Describes a pair of references used in an access by built-in
+ functions like memcpy. */
+struct access_data
+{
+ /* Set the access to at most MAXWRITE and MAXREAD bytes, and
+ at least 1 when MINWRITE or MINREAD, respectively, is set. */
+ access_data (range_query *, gimple *, access_mode,
+ tree = NULL_TREE, bool = false,
+ tree = NULL_TREE, bool = false);
+
+ /* Set the access to at most MAXWRITE and MAXREAD bytes, and
+ at least 1 when MINWRITE or MINREAD, respectively, is set. */
+ access_data (range_query *, tree, access_mode,
+ tree = NULL_TREE, bool = false,
+ tree = NULL_TREE, bool = false);
+
+ /* Constructor helper. */
+ static void set_bound (offset_int[2], tree, bool, range_query *, gimple *);
+
+ /* Access statement. */
+ gimple *stmt;
+ /* Built-in function call. */
+ tree call;
+ /* Destination and source of the access. */
+ access_ref dst, src;
+
+ /* Range of the bound of the access: denotes that the access is at
+ least XXX_BNDRNG[0] bytes but no more than XXX_BNDRNG[1]. For
+ string functions the size of the actual access is further
+ constrained by the length of the string. */
+ offset_int dst_bndrng[2];
+ offset_int src_bndrng[2];
+
+ /* Read-only for functions like memcmp or strlen, write-only
+ for memset, read-write for memcpy or strcat. */
+ access_mode mode;
+ /* The object size type. */
+ int ostype;
+};
+
+enum size_range_flags
+ {
+ /* Set to consider zero a valid range. */
+ SR_ALLOW_ZERO = 1,
+ /* Set to use the largest subrange of a set of ranges as opposed
+ to the smallest. */
+ SR_USE_LARGEST = 2
+ };
+extern bool get_size_range (tree, tree[2], int = 0);
+extern bool get_size_range (range_query *, tree, gimple *, tree[2], int = 0);
+
+class range_query;
+extern tree gimple_call_alloc_size (gimple *, wide_int[2] = nullptr,
+ range_query * = nullptr);
+
+/* Compute the size of an object referenced by the first argument in
+ a statement given by second argument, using Object Size Type given
+ by third argument. Store result in an access_ref. */
+extern tree compute_objsize (tree, gimple *, int, access_ref *,
+ range_query * = nullptr);
+extern tree compute_objsize (tree, gimple *, int, access_ref *,
+ pointer_query *);
+inline tree compute_objsize (tree ptr, int ostype, access_ref *pref)
+{
+ return compute_objsize (ptr, nullptr, ostype, pref, (range_query *)nullptr);
+}
+
+/* Legacy/transitional API. Should not be used in new code. */
+extern tree compute_objsize (tree, gimple *, int, tree * = nullptr,
+ tree * = nullptr, range_query * = nullptr);
+inline tree compute_objsize (tree ptr, int ostype, tree *pdecl = nullptr,
+ tree *poff = nullptr, range_query *rvals = nullptr)
+{
+ return compute_objsize (ptr, nullptr, ostype, pdecl, poff, rvals);
+}
+
+/* Return the field at the constant offset. */
+extern tree field_at_offset (tree, tree, HOST_WIDE_INT,
+ HOST_WIDE_INT * = nullptr,
+ HOST_WIDE_INT * = nullptr);
+/* Return the array at the constant offset. */
+extern tree array_elt_at_offset (tree, HOST_WIDE_INT,
+ HOST_WIDE_INT * = nullptr,
+ HOST_WIDE_INT * = nullptr);
+
+/* Helper to build an array type that can be printed. */
+extern tree build_printable_array_type (tree, unsigned HOST_WIDE_INT);
+
+#endif // GCC_POINTER_QUERY_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int-types.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int-types.h
new file mode 100644
index 0000000..07e5da0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int-types.h
@@ -0,0 +1,103 @@
+/* Typedefs for polynomial integers used in GCC.
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef HAVE_POLY_INT_TYPES_H
+#define HAVE_POLY_INT_TYPES_H
+
+typedef poly_int_pod<NUM_POLY_INT_COEFFS, unsigned short> poly_uint16_pod;
+typedef poly_int_pod<NUM_POLY_INT_COEFFS, HOST_WIDE_INT> poly_int64_pod;
+typedef poly_int_pod<NUM_POLY_INT_COEFFS,
+ unsigned HOST_WIDE_INT> poly_uint64_pod;
+typedef poly_int_pod<NUM_POLY_INT_COEFFS, offset_int> poly_offset_int_pod;
+typedef poly_int_pod<NUM_POLY_INT_COEFFS, wide_int> poly_wide_int_pod;
+typedef poly_int_pod<NUM_POLY_INT_COEFFS, widest_int> poly_widest_int_pod;
+
+typedef poly_int<NUM_POLY_INT_COEFFS, unsigned short> poly_uint16;
+typedef poly_int<NUM_POLY_INT_COEFFS, HOST_WIDE_INT> poly_int64;
+typedef poly_int<NUM_POLY_INT_COEFFS, unsigned HOST_WIDE_INT> poly_uint64;
+typedef poly_int<NUM_POLY_INT_COEFFS, offset_int> poly_offset_int;
+typedef poly_int<NUM_POLY_INT_COEFFS, wide_int> poly_wide_int;
+typedef poly_int<NUM_POLY_INT_COEFFS, wide_int_ref> poly_wide_int_ref;
+typedef poly_int<NUM_POLY_INT_COEFFS, widest_int> poly_widest_int;
+
+/* Divide bit quantity X by BITS_PER_UNIT and round down (towards -Inf).
+ If X is a bit size, this gives the number of whole bytes spanned by X.
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define bits_to_bytes_round_down(X) force_align_down_and_div (X, BITS_PER_UNIT)
+
+/* Divide bit quantity X by BITS_PER_UNIT and round up (towards +Inf).
+ If X is a bit size, this gives the number of whole or partial bytes
+ spanned by X.
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define bits_to_bytes_round_up(X) force_align_up_and_div (X, BITS_PER_UNIT)
+
+/* Return the number of bits in bit quantity X that do not belong to
+ whole bytes. This is equivalent to:
+
+ X - bits_to_bytes_round_down (X) * BITS_PER_UNIT
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define num_trailing_bits(X) force_get_misalignment (X, BITS_PER_UNIT)
+
+/* Round bit quantity X down to the nearest byte boundary.
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define round_down_to_byte_boundary(X) force_align_down (X, BITS_PER_UNIT)
+
+/* Round bit quantity X up the nearest byte boundary.
+
+ This is safe because non-constant mode sizes must be a whole number
+ of bytes in size. */
+#define round_up_to_byte_boundary(X) force_align_up (X, BITS_PER_UNIT)
+
+/* Return the size of an element in a vector of size SIZE, given that
+ the vector has NELTS elements. The return value is in the same units
+ as SIZE (either bits or bytes).
+
+ to_constant () is safe in this situation because vector elements are
+ always constant-sized scalars. */
+#define vector_element_size(SIZE, NELTS) \
+ (exact_div (SIZE, NELTS).to_constant ())
+
+/* Return the number of unroll times when a vector that has NELTS1 elements
+ is unrolled to vectors that have NELTS2 elements.
+
+ to_constant () is safe in this situation because the multiples of the
+ NELTS of two vectors are always constant-size scalars. */
+#define vector_unroll_factor(NELTS1, NELTS2) \
+ (exact_div (NELTS1, NELTS2).to_constant ())
+
+/* Wrapper for poly_int arguments to target macros, so that if a target
+ doesn't need polynomial-sized modes, its header file can continue to
+ treat the argument as a normal constant. This should go away once
+ macros are moved to target hooks. It shouldn't be used in other
+ contexts. */
+#if NUM_POLY_INT_COEFFS == 1
+#define MACRO_INT(X) ((X).to_constant ())
+#else
+#define MACRO_INT(X) (X)
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int.h
new file mode 100644
index 0000000..1257145
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/poly-int.h
@@ -0,0 +1,2748 @@
+/* Polynomial integer classes.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file provides a representation of sizes and offsets whose exact
+ values depend on certain runtime properties. The motivating example
+ is the Arm SVE ISA, in which the number of vector elements is only
+ known at runtime. See doc/poly-int.texi for more details.
+
+ Tests for poly-int.h are located in testsuite/gcc.dg/plugin,
+ since they are too expensive (in terms of binary size) to be
+ included as selftests. */
+
+#ifndef HAVE_POLY_INT_H
+#define HAVE_POLY_INT_H
+
+template<unsigned int N, typename T> struct poly_int_pod;
+template<unsigned int N, typename T> class poly_int;
+
+/* poly_coeff_traiits<T> describes the properties of a poly_int
+ coefficient type T:
+
+ - poly_coeff_traits<T1>::rank is less than poly_coeff_traits<T2>::rank
+ if T1 can promote to T2. For C-like types the rank is:
+
+ (2 * number of bytes) + (unsigned ? 1 : 0)
+
+ wide_ints don't have a normal rank and so use a value of INT_MAX.
+ Any fixed-width integer should be promoted to wide_int if possible
+ and lead to an error otherwise.
+
+ - poly_coeff_traits<T>::int_type is the type to which an integer
+ literal should be cast before comparing it with T.
+
+ - poly_coeff_traits<T>::precision is the number of bits that T can hold.
+
+ - poly_coeff_traits<T>::signedness is:
+ 0 if T is unsigned
+ 1 if T is signed
+ -1 if T has no inherent sign (as for wide_int).
+
+ - poly_coeff_traits<T>::max_value, if defined, is the maximum value of T.
+
+ - poly_coeff_traits<T>::result is a type that can hold results of
+ operations on T. This is different from T itself in cases where T
+ is the result of an accessor like wi::to_offset. */
+template<typename T, wi::precision_type = wi::int_traits<T>::precision_type>
+struct poly_coeff_traits;
+
+template<typename T>
+struct poly_coeff_traits<T, wi::FLEXIBLE_PRECISION>
+{
+ typedef T result;
+ typedef T int_type;
+ static const int signedness = (T (0) >= T (-1));
+ static const int precision = sizeof (T) * CHAR_BIT;
+ static const T max_value = (signedness
+ ? ((T (1) << (precision - 2))
+ + ((T (1) << (precision - 2)) - 1))
+ : T (-1));
+ static const int rank = sizeof (T) * 2 + !signedness;
+};
+
+template<typename T>
+struct poly_coeff_traits<T, wi::VAR_PRECISION>
+{
+ typedef T result;
+ typedef int int_type;
+ static const int signedness = -1;
+ static const int precision = WIDE_INT_MAX_PRECISION;
+ static const int rank = INT_MAX;
+};
+
+template<typename T>
+struct poly_coeff_traits<T, wi::CONST_PRECISION>
+{
+ typedef WI_UNARY_RESULT (T) result;
+ typedef int int_type;
+ /* These types are always signed. */
+ static const int signedness = 1;
+ static const int precision = wi::int_traits<T>::precision;
+ static const int rank = precision * 2 / CHAR_BIT;
+};
+
+/* Information about a pair of coefficient types. */
+template<typename T1, typename T2>
+struct poly_coeff_pair_traits
+{
+ /* True if T1 can represent all the values of T2.
+
+ Either:
+
+ - T1 should be a type with the same signedness as T2 and no less
+ precision. This allows things like int16_t -> int16_t and
+ uint32_t -> uint64_t.
+
+ - T1 should be signed, T2 should be unsigned, and T1 should be
+ wider than T2. This allows things like uint16_t -> int32_t.
+
+ This rules out cases in which T1 has less precision than T2 or where
+ the conversion would reinterpret the top bit. E.g. int16_t -> uint32_t
+ can be dangerous and should have an explicit cast if deliberate. */
+ static const bool lossless_p = (poly_coeff_traits<T1>::signedness
+ == poly_coeff_traits<T2>::signedness
+ ? (poly_coeff_traits<T1>::precision
+ >= poly_coeff_traits<T2>::precision)
+ : (poly_coeff_traits<T1>::signedness == 1
+ && poly_coeff_traits<T2>::signedness == 0
+ && (poly_coeff_traits<T1>::precision
+ > poly_coeff_traits<T2>::precision)));
+
+ /* 0 if T1 op T2 should promote to HOST_WIDE_INT,
+ 1 if T1 op T2 should promote to unsigned HOST_WIDE_INT,
+ 2 if T1 op T2 should use wide-int rules. */
+#define RANK(X) poly_coeff_traits<X>::rank
+ static const int result_kind
+ = ((RANK (T1) <= RANK (HOST_WIDE_INT)
+ && RANK (T2) <= RANK (HOST_WIDE_INT))
+ ? 0
+ : (RANK (T1) <= RANK (unsigned HOST_WIDE_INT)
+ && RANK (T2) <= RANK (unsigned HOST_WIDE_INT))
+ ? 1 : 2);
+#undef RANK
+};
+
+/* SFINAE class that makes T3 available as "type" if T2 can represent all the
+ values in T1. */
+template<typename T1, typename T2, typename T3,
+ bool lossless_p = poly_coeff_pair_traits<T1, T2>::lossless_p>
+struct if_lossless;
+template<typename T1, typename T2, typename T3>
+struct if_lossless<T1, T2, T3, true>
+{
+ typedef T3 type;
+};
+
+/* poly_int_traits<T> describes an integer type T that might be polynomial
+ or non-polynomial:
+
+ - poly_int_traits<T>::is_poly is true if T is a poly_int-based type
+ and false otherwise.
+
+ - poly_int_traits<T>::num_coeffs gives the number of coefficients in T
+ if T is a poly_int and 1 otherwise.
+
+ - poly_int_traits<T>::coeff_type gives the coefficent type of T if T
+ is a poly_int and T itself otherwise
+
+ - poly_int_traits<T>::int_type is a shorthand for
+ typename poly_coeff_traits<coeff_type>::int_type. */
+template<typename T>
+struct poly_int_traits
+{
+ static const bool is_poly = false;
+ static const unsigned int num_coeffs = 1;
+ typedef T coeff_type;
+ typedef typename poly_coeff_traits<T>::int_type int_type;
+};
+template<unsigned int N, typename C>
+struct poly_int_traits<poly_int_pod<N, C> >
+{
+ static const bool is_poly = true;
+ static const unsigned int num_coeffs = N;
+ typedef C coeff_type;
+ typedef typename poly_coeff_traits<C>::int_type int_type;
+};
+template<unsigned int N, typename C>
+struct poly_int_traits<poly_int<N, C> > : poly_int_traits<poly_int_pod<N, C> >
+{
+};
+
+/* SFINAE class that makes T2 available as "type" if T1 is a non-polynomial
+ type. */
+template<typename T1, typename T2 = T1,
+ bool is_poly = poly_int_traits<T1>::is_poly>
+struct if_nonpoly {};
+template<typename T1, typename T2>
+struct if_nonpoly<T1, T2, false>
+{
+ typedef T2 type;
+};
+
+/* SFINAE class that makes T3 available as "type" if both T1 and T2 are
+ non-polynomial types. */
+template<typename T1, typename T2, typename T3,
+ bool is_poly1 = poly_int_traits<T1>::is_poly,
+ bool is_poly2 = poly_int_traits<T2>::is_poly>
+struct if_nonpoly2 {};
+template<typename T1, typename T2, typename T3>
+struct if_nonpoly2<T1, T2, T3, false, false>
+{
+ typedef T3 type;
+};
+
+/* SFINAE class that makes T2 available as "type" if T1 is a polynomial
+ type. */
+template<typename T1, typename T2 = T1,
+ bool is_poly = poly_int_traits<T1>::is_poly>
+struct if_poly {};
+template<typename T1, typename T2>
+struct if_poly<T1, T2, true>
+{
+ typedef T2 type;
+};
+
+/* poly_result<T1, T2> describes the result of an operation on two
+ types T1 and T2, where at least one of the types is polynomial:
+
+ - poly_result<T1, T2>::type gives the result type for the operation.
+ The intention is to provide normal C-like rules for integer ranks,
+ except that everything smaller than HOST_WIDE_INT promotes to
+ HOST_WIDE_INT.
+
+ - poly_result<T1, T2>::cast is the type to which an operand of type
+ T1 should be cast before doing the operation, to ensure that
+ the operation is done at the right precision. Casting to
+ poly_result<T1, T2>::type would also work, but casting to this
+ type is more efficient. */
+template<typename T1, typename T2 = T1,
+ int result_kind = poly_coeff_pair_traits<T1, T2>::result_kind>
+struct poly_result;
+
+/* Promote pair to HOST_WIDE_INT. */
+template<typename T1, typename T2>
+struct poly_result<T1, T2, 0>
+{
+ typedef HOST_WIDE_INT type;
+ /* T1 and T2 are primitive types, so cast values to T before operating
+ on them. */
+ typedef type cast;
+};
+
+/* Promote pair to unsigned HOST_WIDE_INT. */
+template<typename T1, typename T2>
+struct poly_result<T1, T2, 1>
+{
+ typedef unsigned HOST_WIDE_INT type;
+ /* T1 and T2 are primitive types, so cast values to T before operating
+ on them. */
+ typedef type cast;
+};
+
+/* Use normal wide-int rules. */
+template<typename T1, typename T2>
+struct poly_result<T1, T2, 2>
+{
+ typedef WI_BINARY_RESULT (T1, T2) type;
+ /* Don't cast values before operating on them; leave the wi:: routines
+ to handle promotion as necessary. */
+ typedef const T1 &cast;
+};
+
+/* The coefficient type for the result of a binary operation on two
+ poly_ints, the first of which has coefficients of type C1 and the
+ second of which has coefficients of type C2. */
+#define POLY_POLY_COEFF(C1, C2) typename poly_result<C1, C2>::type
+
+/* Enforce that T2 is non-polynomial and provide the cofficient type of
+ the result of a binary operation in which the first operand is a
+ poly_int with coefficients of type C1 and the second operand is
+ a constant of type T2. */
+#define POLY_CONST_COEFF(C1, T2) \
+ POLY_POLY_COEFF (C1, typename if_nonpoly<T2>::type)
+
+/* Likewise in reverse. */
+#define CONST_POLY_COEFF(T1, C2) \
+ POLY_POLY_COEFF (typename if_nonpoly<T1>::type, C2)
+
+/* The result type for a binary operation on poly_int<N, C1> and
+ poly_int<N, C2>. */
+#define POLY_POLY_RESULT(N, C1, C2) poly_int<N, POLY_POLY_COEFF (C1, C2)>
+
+/* Enforce that T2 is non-polynomial and provide the result type
+ for a binary operation on poly_int<N, C1> and T2. */
+#define POLY_CONST_RESULT(N, C1, T2) poly_int<N, POLY_CONST_COEFF (C1, T2)>
+
+/* Enforce that T1 is non-polynomial and provide the result type
+ for a binary operation on T1 and poly_int<N, C2>. */
+#define CONST_POLY_RESULT(N, T1, C2) poly_int<N, CONST_POLY_COEFF (T1, C2)>
+
+/* Enforce that T1 and T2 are non-polynomial and provide the result type
+ for a binary operation on T1 and T2. */
+#define CONST_CONST_RESULT(N, T1, T2) \
+ POLY_POLY_COEFF (typename if_nonpoly<T1>::type, \
+ typename if_nonpoly<T2>::type)
+
+/* The type to which a coefficient of type C1 should be cast before
+ using it in a binary operation with a coefficient of type C2. */
+#define POLY_CAST(C1, C2) typename poly_result<C1, C2>::cast
+
+/* Provide the coefficient type for the result of T1 op T2, where T1
+ and T2 can be polynomial or non-polynomial. */
+#define POLY_BINARY_COEFF(T1, T2) \
+ typename poly_result<typename poly_int_traits<T1>::coeff_type, \
+ typename poly_int_traits<T2>::coeff_type>::type
+
+/* The type to which an integer constant should be cast before
+ comparing it with T. */
+#define POLY_INT_TYPE(T) typename poly_int_traits<T>::int_type
+
+/* RES is a poly_int result that has coefficients of type C and that
+ is being built up a coefficient at a time. Set coefficient number I
+ to VALUE in the most efficient way possible.
+
+ For primitive C it is better to assign directly, since it avoids
+ any further calls and so is more efficient when the compiler is
+ built at -O0. But for wide-int based C it is better to construct
+ the value in-place. This means that calls out to a wide-int.cc
+ routine can take the address of RES rather than the address of
+ a temporary.
+
+ The dummy self-comparison against C * is just a way of checking
+ that C gives the right type. */
+#define POLY_SET_COEFF(C, RES, I, VALUE) \
+ ((void) (&(RES).coeffs[0] == (C *) (void *) &(RES).coeffs[0]), \
+ wi::int_traits<C>::precision_type == wi::FLEXIBLE_PRECISION \
+ ? (void) ((RES).coeffs[I] = VALUE) \
+ : (void) ((RES).coeffs[I].~C (), new (&(RES).coeffs[I]) C (VALUE)))
+
+/* A base POD class for polynomial integers. The polynomial has N
+ coefficients of type C. */
+template<unsigned int N, typename C>
+struct poly_int_pod
+{
+public:
+ template<typename Ca>
+ poly_int_pod &operator = (const poly_int_pod<N, Ca> &);
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator = (const Ca &);
+
+ template<typename Ca>
+ poly_int_pod &operator += (const poly_int_pod<N, Ca> &);
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator += (const Ca &);
+
+ template<typename Ca>
+ poly_int_pod &operator -= (const poly_int_pod<N, Ca> &);
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator -= (const Ca &);
+
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int_pod>::type &operator *= (const Ca &);
+
+ poly_int_pod &operator <<= (unsigned int);
+
+ bool is_constant () const;
+
+ template<typename T>
+ typename if_lossless<T, C, bool>::type is_constant (T *) const;
+
+ C to_constant () const;
+
+ template<typename Ca>
+ static poly_int<N, C> from (const poly_int_pod<N, Ca> &, unsigned int,
+ signop);
+ template<typename Ca>
+ static poly_int<N, C> from (const poly_int_pod<N, Ca> &, signop);
+
+ bool to_shwi (poly_int_pod<N, HOST_WIDE_INT> *) const;
+ bool to_uhwi (poly_int_pod<N, unsigned HOST_WIDE_INT> *) const;
+ poly_int<N, HOST_WIDE_INT> force_shwi () const;
+ poly_int<N, unsigned HOST_WIDE_INT> force_uhwi () const;
+
+#if POLY_INT_CONVERSION
+ operator C () const;
+#endif
+
+ C coeffs[N];
+};
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int_pod<N, C>&
+poly_int_pod<N, C>::operator = (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
+poly_int_pod<N, C>::operator = (const Ca &a)
+{
+ POLY_SET_COEFF (C, *this, 0, a);
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, wi::ints_for<C>::zero (this->coeffs[0]));
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int_pod<N, C>&
+poly_int_pod<N, C>::operator += (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] += a.coeffs[i];
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
+poly_int_pod<N, C>::operator += (const Ca &a)
+{
+ this->coeffs[0] += a;
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int_pod<N, C>&
+poly_int_pod<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] -= a.coeffs[i];
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
+poly_int_pod<N, C>::operator -= (const Ca &a)
+{
+ this->coeffs[0] -= a;
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int_pod<N, C> >::type &
+poly_int_pod<N, C>::operator *= (const Ca &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] *= a;
+ return *this;
+}
+
+template<unsigned int N, typename C>
+inline poly_int_pod<N, C>&
+poly_int_pod<N, C>::operator <<= (unsigned int a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] <<= a;
+ return *this;
+}
+
+/* Return true if the polynomial value is a compile-time constant. */
+
+template<unsigned int N, typename C>
+inline bool
+poly_int_pod<N, C>::is_constant () const
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (this->coeffs[i] != 0)
+ return false;
+ return true;
+}
+
+/* Return true if the polynomial value is a compile-time constant,
+ storing its value in CONST_VALUE if so. */
+
+template<unsigned int N, typename C>
+template<typename T>
+inline typename if_lossless<T, C, bool>::type
+poly_int_pod<N, C>::is_constant (T *const_value) const
+{
+ if (is_constant ())
+ {
+ *const_value = this->coeffs[0];
+ return true;
+ }
+ return false;
+}
+
+/* Return the value of a polynomial that is already known to be a
+ compile-time constant.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the value is constant in that context. */
+
+template<unsigned int N, typename C>
+inline C
+poly_int_pod<N, C>::to_constant () const
+{
+ gcc_checking_assert (is_constant ());
+ return this->coeffs[0];
+}
+
+/* Convert X to a wide_int-based polynomial in which each coefficient
+ has BITSIZE bits. If X's coefficients are smaller than BITSIZE,
+ extend them according to SGN. */
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int<N, C>
+poly_int_pod<N, C>::from (const poly_int_pod<N, Ca> &a,
+ unsigned int bitsize, signop sgn)
+{
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, C::from (a.coeffs[i], bitsize, sgn));
+ return r;
+}
+
+/* Convert X to a fixed_wide_int-based polynomial, extending according
+ to SGN. */
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int<N, C>
+poly_int_pod<N, C>::from (const poly_int_pod<N, Ca> &a, signop sgn)
+{
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, C::from (a.coeffs[i], sgn));
+ return r;
+}
+
+/* Return true if the coefficients of this generic_wide_int-based
+ polynomial can be represented as signed HOST_WIDE_INTs without loss
+ of precision. Store the HOST_WIDE_INT representation in *R if so. */
+
+template<unsigned int N, typename C>
+inline bool
+poly_int_pod<N, C>::to_shwi (poly_int_pod<N, HOST_WIDE_INT> *r) const
+{
+ for (unsigned int i = 0; i < N; i++)
+ if (!wi::fits_shwi_p (this->coeffs[i]))
+ return false;
+ for (unsigned int i = 0; i < N; i++)
+ r->coeffs[i] = this->coeffs[i].to_shwi ();
+ return true;
+}
+
+/* Return true if the coefficients of this generic_wide_int-based
+ polynomial can be represented as unsigned HOST_WIDE_INTs without
+ loss of precision. Store the unsigned HOST_WIDE_INT representation
+ in *R if so. */
+
+template<unsigned int N, typename C>
+inline bool
+poly_int_pod<N, C>::to_uhwi (poly_int_pod<N, unsigned HOST_WIDE_INT> *r) const
+{
+ for (unsigned int i = 0; i < N; i++)
+ if (!wi::fits_uhwi_p (this->coeffs[i]))
+ return false;
+ for (unsigned int i = 0; i < N; i++)
+ r->coeffs[i] = this->coeffs[i].to_uhwi ();
+ return true;
+}
+
+/* Force a generic_wide_int-based constant to HOST_WIDE_INT precision,
+ truncating if necessary. */
+
+template<unsigned int N, typename C>
+inline poly_int<N, HOST_WIDE_INT>
+poly_int_pod<N, C>::force_shwi () const
+{
+ poly_int_pod<N, HOST_WIDE_INT> r;
+ for (unsigned int i = 0; i < N; i++)
+ r.coeffs[i] = this->coeffs[i].to_shwi ();
+ return r;
+}
+
+/* Force a generic_wide_int-based constant to unsigned HOST_WIDE_INT precision,
+ truncating if necessary. */
+
+template<unsigned int N, typename C>
+inline poly_int<N, unsigned HOST_WIDE_INT>
+poly_int_pod<N, C>::force_uhwi () const
+{
+ poly_int_pod<N, unsigned HOST_WIDE_INT> r;
+ for (unsigned int i = 0; i < N; i++)
+ r.coeffs[i] = this->coeffs[i].to_uhwi ();
+ return r;
+}
+
+#if POLY_INT_CONVERSION
+/* Provide a conversion operator to constants. */
+
+template<unsigned int N, typename C>
+inline
+poly_int_pod<N, C>::operator C () const
+{
+ gcc_checking_assert (this->is_constant ());
+ return this->coeffs[0];
+}
+#endif
+
+/* The main class for polynomial integers. The class provides
+ constructors that are necessarily missing from the POD base. */
+template<unsigned int N, typename C>
+class poly_int : public poly_int_pod<N, C>
+{
+public:
+ poly_int () {}
+
+ template<typename Ca>
+ poly_int (const poly_int<N, Ca> &);
+ template<typename Ca>
+ poly_int (const poly_int_pod<N, Ca> &);
+ template<typename C0>
+ poly_int (const C0 &);
+ template<typename C0, typename C1>
+ poly_int (const C0 &, const C1 &);
+
+ template<typename Ca>
+ poly_int &operator = (const poly_int_pod<N, Ca> &);
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int>::type &operator = (const Ca &);
+
+ template<typename Ca>
+ poly_int &operator += (const poly_int_pod<N, Ca> &);
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int>::type &operator += (const Ca &);
+
+ template<typename Ca>
+ poly_int &operator -= (const poly_int_pod<N, Ca> &);
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int>::type &operator -= (const Ca &);
+
+ template<typename Ca>
+ typename if_nonpoly<Ca, poly_int>::type &operator *= (const Ca &);
+
+ poly_int &operator <<= (unsigned int);
+};
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline
+poly_int<N, C>::poly_int (const poly_int<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline
+poly_int<N, C>::poly_int (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, a.coeffs[i]);
+}
+
+template<unsigned int N, typename C>
+template<typename C0>
+inline
+poly_int<N, C>::poly_int (const C0 &c0)
+{
+ POLY_SET_COEFF (C, *this, 0, c0);
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, wi::ints_for<C>::zero (this->coeffs[0]));
+}
+
+template<unsigned int N, typename C>
+template<typename C0, typename C1>
+inline
+poly_int<N, C>::poly_int (const C0 &c0, const C1 &c1)
+{
+ STATIC_ASSERT (N >= 2);
+ POLY_SET_COEFF (C, *this, 0, c0);
+ POLY_SET_COEFF (C, *this, 1, c1);
+ for (unsigned int i = 2; i < N; i++)
+ POLY_SET_COEFF (C, *this, i, wi::ints_for<C>::zero (this->coeffs[0]));
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int<N, C>&
+poly_int<N, C>::operator = (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] = a.coeffs[i];
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator = (const Ca &a)
+{
+ this->coeffs[0] = a;
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ this->coeffs[i] = wi::ints_for<C>::zero (this->coeffs[0]);
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int<N, C>&
+poly_int<N, C>::operator += (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] += a.coeffs[i];
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator += (const Ca &a)
+{
+ this->coeffs[0] += a;
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline poly_int<N, C>&
+poly_int<N, C>::operator -= (const poly_int_pod<N, Ca> &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] -= a.coeffs[i];
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator -= (const Ca &a)
+{
+ this->coeffs[0] -= a;
+ return *this;
+}
+
+template<unsigned int N, typename C>
+template<typename Ca>
+inline typename if_nonpoly<Ca, poly_int<N, C> >::type &
+poly_int<N, C>::operator *= (const Ca &a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] *= a;
+ return *this;
+}
+
+template<unsigned int N, typename C>
+inline poly_int<N, C>&
+poly_int<N, C>::operator <<= (unsigned int a)
+{
+ for (unsigned int i = 0; i < N; i++)
+ this->coeffs[i] <<= a;
+ return *this;
+}
+
+/* Return true if every coefficient of A is in the inclusive range [B, C]. */
+
+template<typename Ca, typename Cb, typename Cc>
+inline typename if_nonpoly<Ca, bool>::type
+coeffs_in_range_p (const Ca &a, const Cb &b, const Cc &c)
+{
+ return a >= b && a <= c;
+}
+
+template<unsigned int N, typename Ca, typename Cb, typename Cc>
+inline typename if_nonpoly<Ca, bool>::type
+coeffs_in_range_p (const poly_int_pod<N, Ca> &a, const Cb &b, const Cc &c)
+{
+ for (unsigned int i = 0; i < N; i++)
+ if (a.coeffs[i] < b || a.coeffs[i] > c)
+ return false;
+ return true;
+}
+
+namespace wi {
+/* Poly version of wi::shwi, with the same interface. */
+
+template<unsigned int N>
+inline poly_int<N, hwi_with_prec>
+shwi (const poly_int_pod<N, HOST_WIDE_INT> &a, unsigned int precision)
+{
+ poly_int<N, hwi_with_prec> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (hwi_with_prec, r, i, wi::shwi (a.coeffs[i], precision));
+ return r;
+}
+
+/* Poly version of wi::uhwi, with the same interface. */
+
+template<unsigned int N>
+inline poly_int<N, hwi_with_prec>
+uhwi (const poly_int_pod<N, unsigned HOST_WIDE_INT> &a, unsigned int precision)
+{
+ poly_int<N, hwi_with_prec> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (hwi_with_prec, r, i, wi::uhwi (a.coeffs[i], precision));
+ return r;
+}
+
+/* Poly version of wi::sext, with the same interface. */
+
+template<unsigned int N, typename Ca>
+inline POLY_POLY_RESULT (N, Ca, Ca)
+sext (const poly_int_pod<N, Ca> &a, unsigned int precision)
+{
+ typedef POLY_POLY_COEFF (Ca, Ca) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::sext (a.coeffs[i], precision));
+ return r;
+}
+
+/* Poly version of wi::zext, with the same interface. */
+
+template<unsigned int N, typename Ca>
+inline POLY_POLY_RESULT (N, Ca, Ca)
+zext (const poly_int_pod<N, Ca> &a, unsigned int precision)
+{
+ typedef POLY_POLY_COEFF (Ca, Ca) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::zext (a.coeffs[i], precision));
+ return r;
+}
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+operator + (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_POLY_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) + b.coeffs[i]);
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+operator + (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CONST_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, NCa (a.coeffs[0]) + b);
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+operator + (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef CONST_POLY_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, a + NCb (b.coeffs[0]));
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCb (b.coeffs[i]));
+ return r;
+}
+
+namespace wi {
+/* Poly versions of wi::add, with the same interface. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::add (a.coeffs[i], b.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+add (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::add (a.coeffs[0], b));
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::add (a.coeffs[i],
+ wi::ints_for<Cb>::zero (b)));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+add (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::add (a, b.coeffs[0]));
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::add (wi::ints_for<Ca>::zero (a),
+ b.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
+ signop sgn, wi::overflow_type *overflow)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::add (a.coeffs[0], b.coeffs[0], sgn, overflow));
+ for (unsigned int i = 1; i < N; i++)
+ {
+ wi::overflow_type suboverflow;
+ POLY_SET_COEFF (C, r, i, wi::add (a.coeffs[i], b.coeffs[i], sgn,
+ &suboverflow));
+ wi::accumulate_overflow (*overflow, suboverflow);
+ }
+ return r;
+}
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+operator - (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_POLY_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) - b.coeffs[i]);
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+operator - (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CONST_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, NCa (a.coeffs[0]) - b);
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+operator - (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef CONST_POLY_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, a - NCb (b.coeffs[0]));
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, -NCb (b.coeffs[i]));
+ return r;
+}
+
+namespace wi {
+/* Poly versions of wi::sub, with the same interface. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::sub (a.coeffs[i], b.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+sub (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::sub (a.coeffs[0], b));
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::sub (a.coeffs[i],
+ wi::ints_for<Cb>::zero (b)));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+sub (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::sub (a, b.coeffs[0]));
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::sub (wi::ints_for<Ca>::zero (a),
+ b.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
+ signop sgn, wi::overflow_type *overflow)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::sub (a.coeffs[0], b.coeffs[0], sgn, overflow));
+ for (unsigned int i = 1; i < N; i++)
+ {
+ wi::overflow_type suboverflow;
+ POLY_SET_COEFF (C, r, i, wi::sub (a.coeffs[i], b.coeffs[i], sgn,
+ &suboverflow));
+ wi::accumulate_overflow (*overflow, suboverflow);
+ }
+ return r;
+}
+}
+
+template<unsigned int N, typename Ca>
+inline POLY_POLY_RESULT (N, Ca, Ca)
+operator - (const poly_int_pod<N, Ca> &a)
+{
+ typedef POLY_CAST (Ca, Ca) NCa;
+ typedef POLY_POLY_COEFF (Ca, Ca) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, -NCa (a.coeffs[i]));
+ return r;
+}
+
+namespace wi {
+/* Poly version of wi::neg, with the same interface. */
+
+template<unsigned int N, typename Ca>
+inline poly_int<N, WI_UNARY_RESULT (Ca)>
+neg (const poly_int_pod<N, Ca> &a)
+{
+ typedef WI_UNARY_RESULT (Ca) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::neg (a.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca>
+inline poly_int<N, WI_UNARY_RESULT (Ca)>
+neg (const poly_int_pod<N, Ca> &a, wi::overflow_type *overflow)
+{
+ typedef WI_UNARY_RESULT (Ca) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::neg (a.coeffs[0], overflow));
+ for (unsigned int i = 1; i < N; i++)
+ {
+ wi::overflow_type suboverflow;
+ POLY_SET_COEFF (C, r, i, wi::neg (a.coeffs[i], &suboverflow));
+ wi::accumulate_overflow (*overflow, suboverflow);
+ }
+ return r;
+}
+}
+
+template<unsigned int N, typename Ca>
+inline POLY_POLY_RESULT (N, Ca, Ca)
+operator ~ (const poly_int_pod<N, Ca> &a)
+{
+ if (N >= 2)
+ return -1 - a;
+ return ~a.coeffs[0];
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+operator * (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CONST_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) * b);
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+operator * (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef CONST_POLY_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a) * b.coeffs[i]);
+ return r;
+}
+
+namespace wi {
+/* Poly versions of wi::mul, with the same interface. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+mul (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::mul (a.coeffs[i], b));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+mul (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::mul (a, b.coeffs[i]));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
+mul (const poly_int_pod<N, Ca> &a, const Cb &b,
+ signop sgn, wi::overflow_type *overflow)
+{
+ typedef WI_BINARY_RESULT (Ca, Cb) C;
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, wi::mul (a.coeffs[0], b, sgn, overflow));
+ for (unsigned int i = 1; i < N; i++)
+ {
+ wi::overflow_type suboverflow;
+ POLY_SET_COEFF (C, r, i, wi::mul (a.coeffs[i], b, sgn, &suboverflow));
+ wi::accumulate_overflow (*overflow, suboverflow);
+ }
+ return r;
+}
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Ca)
+operator << (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef POLY_CAST (Ca, Ca) NCa;
+ typedef POLY_POLY_COEFF (Ca, Ca) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, NCa (a.coeffs[i]) << b);
+ return r;
+}
+
+namespace wi {
+/* Poly version of wi::lshift, with the same interface. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, WI_BINARY_RESULT (Ca, Ca)>
+lshift (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef WI_BINARY_RESULT (Ca, Ca) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, wi::lshift (a.coeffs[i], b));
+ return r;
+}
+}
+
+/* Poly version of sext_hwi, with the same interface. */
+
+template<unsigned int N, typename C>
+inline poly_int<N, HOST_WIDE_INT>
+sext_hwi (const poly_int<N, C> &a, unsigned int precision)
+{
+ poly_int_pod<N, HOST_WIDE_INT> r;
+ for (unsigned int i = 0; i < N; i++)
+ r.coeffs[i] = sext_hwi (a.coeffs[i], precision);
+ return r;
+}
+
+
+/* Return true if a0 + a1 * x might equal b0 + b1 * x for some nonnegative
+ integer x. */
+
+template<typename Ca, typename Cb>
+inline bool
+maybe_eq_2 (const Ca &a0, const Ca &a1, const Cb &b0, const Cb &b1)
+{
+ if (a1 != b1)
+ /* a0 + a1 * x == b0 + b1 * x
+ ==> (a1 - b1) * x == b0 - a0
+ ==> x == (b0 - a0) / (a1 - b1)
+
+ We need to test whether that's a valid value of x.
+ (b0 - a0) and (a1 - b1) must not have opposite signs
+ and the result must be integral. */
+ return (a1 < b1
+ ? b0 <= a0 && (a0 - b0) % (b1 - a1) == 0
+ : b0 >= a0 && (b0 - a0) % (a1 - b1) == 0);
+ return a0 == b0;
+}
+
+/* Return true if a0 + a1 * x might equal b for some nonnegative
+ integer x. */
+
+template<typename Ca, typename Cb>
+inline bool
+maybe_eq_2 (const Ca &a0, const Ca &a1, const Cb &b)
+{
+ if (a1 != 0)
+ /* a0 + a1 * x == b
+ ==> x == (b - a0) / a1
+
+ We need to test whether that's a valid value of x.
+ (b - a0) and a1 must not have opposite signs and the
+ result must be integral. */
+ return (a1 < 0
+ ? b <= a0 && (a0 - b) % a1 == 0
+ : b >= a0 && (b - a0) % a1 == 0);
+ return a0 == b;
+}
+
+/* Return true if A might equal B for some indeterminate values. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+maybe_eq (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ STATIC_ASSERT (N <= 2);
+ if (N == 2)
+ return maybe_eq_2 (a.coeffs[0], a.coeffs[1], b.coeffs[0], b.coeffs[1]);
+ return a.coeffs[0] == b.coeffs[0];
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Cb, bool>::type
+maybe_eq (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ STATIC_ASSERT (N <= 2);
+ if (N == 2)
+ return maybe_eq_2 (a.coeffs[0], a.coeffs[1], b);
+ return a.coeffs[0] == b;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Ca, bool>::type
+maybe_eq (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ STATIC_ASSERT (N <= 2);
+ if (N == 2)
+ return maybe_eq_2 (b.coeffs[0], b.coeffs[1], a);
+ return a == b.coeffs[0];
+}
+
+template<typename Ca, typename Cb>
+inline typename if_nonpoly2<Ca, Cb, bool>::type
+maybe_eq (const Ca &a, const Cb &b)
+{
+ return a == b;
+}
+
+/* Return true if A might not equal B for some indeterminate values. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+maybe_ne (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (a.coeffs[i] != b.coeffs[i])
+ return true;
+ return a.coeffs[0] != b.coeffs[0];
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Cb, bool>::type
+maybe_ne (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (a.coeffs[i] != 0)
+ return true;
+ return a.coeffs[0] != b;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Ca, bool>::type
+maybe_ne (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (b.coeffs[i] != 0)
+ return true;
+ return a != b.coeffs[0];
+}
+
+template<typename Ca, typename Cb>
+inline typename if_nonpoly2<Ca, Cb, bool>::type
+maybe_ne (const Ca &a, const Cb &b)
+{
+ return a != b;
+}
+
+/* Return true if A is known to be equal to B. */
+#define known_eq(A, B) (!maybe_ne (A, B))
+
+/* Return true if A is known to be unequal to B. */
+#define known_ne(A, B) (!maybe_eq (A, B))
+
+/* Return true if A might be less than or equal to B for some
+ indeterminate values. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+maybe_le (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (a.coeffs[i] < b.coeffs[i])
+ return true;
+ return a.coeffs[0] <= b.coeffs[0];
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Cb, bool>::type
+maybe_le (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (a.coeffs[i] < 0)
+ return true;
+ return a.coeffs[0] <= b;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Ca, bool>::type
+maybe_le (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (b.coeffs[i] > 0)
+ return true;
+ return a <= b.coeffs[0];
+}
+
+template<typename Ca, typename Cb>
+inline typename if_nonpoly2<Ca, Cb, bool>::type
+maybe_le (const Ca &a, const Cb &b)
+{
+ return a <= b;
+}
+
+/* Return true if A might be less than B for some indeterminate values. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+maybe_lt (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (a.coeffs[i] < b.coeffs[i])
+ return true;
+ return a.coeffs[0] < b.coeffs[0];
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Cb, bool>::type
+maybe_lt (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (a.coeffs[i] < 0)
+ return true;
+ return a.coeffs[0] < b;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Ca, bool>::type
+maybe_lt (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if (b.coeffs[i] > 0)
+ return true;
+ return a < b.coeffs[0];
+}
+
+template<typename Ca, typename Cb>
+inline typename if_nonpoly2<Ca, Cb, bool>::type
+maybe_lt (const Ca &a, const Cb &b)
+{
+ return a < b;
+}
+
+/* Return true if A may be greater than or equal to B. */
+#define maybe_ge(A, B) maybe_le (B, A)
+
+/* Return true if A may be greater than B. */
+#define maybe_gt(A, B) maybe_lt (B, A)
+
+/* Return true if A is known to be less than or equal to B. */
+#define known_le(A, B) (!maybe_gt (A, B))
+
+/* Return true if A is known to be less than B. */
+#define known_lt(A, B) (!maybe_ge (A, B))
+
+/* Return true if A is known to be greater than B. */
+#define known_gt(A, B) (!maybe_le (A, B))
+
+/* Return true if A is known to be greater than or equal to B. */
+#define known_ge(A, B) (!maybe_lt (A, B))
+
+/* Return true if A and B are ordered by the partial ordering known_le. */
+
+template<typename T1, typename T2>
+inline bool
+ordered_p (const T1 &a, const T2 &b)
+{
+ return ((poly_int_traits<T1>::num_coeffs == 1
+ && poly_int_traits<T2>::num_coeffs == 1)
+ || known_le (a, b)
+ || known_le (b, a));
+}
+
+/* Assert that A and B are known to be ordered and return the minimum
+ of the two.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the values are ordered in that context. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+ordered_min (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (known_le (a, b))
+ return a;
+ else
+ {
+ if (N > 1)
+ gcc_checking_assert (known_le (b, a));
+ return b;
+ }
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+ordered_min (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ if (known_le (a, b))
+ return a;
+ else
+ {
+ if (N > 1)
+ gcc_checking_assert (known_le (b, a));
+ return b;
+ }
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+ordered_min (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (known_le (a, b))
+ return a;
+ else
+ {
+ if (N > 1)
+ gcc_checking_assert (known_le (b, a));
+ return b;
+ }
+}
+
+/* Assert that A and B are known to be ordered and return the maximum
+ of the two.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the values are ordered in that context. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+ordered_max (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (known_le (a, b))
+ return b;
+ else
+ {
+ if (N > 1)
+ gcc_checking_assert (known_le (b, a));
+ return a;
+ }
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+ordered_max (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ if (known_le (a, b))
+ return b;
+ else
+ {
+ if (N > 1)
+ gcc_checking_assert (known_le (b, a));
+ return a;
+ }
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+ordered_max (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (known_le (a, b))
+ return b;
+ else
+ {
+ if (N > 1)
+ gcc_checking_assert (known_le (b, a));
+ return a;
+ }
+}
+
+/* Return a constant lower bound on the value of A, which is known
+ to be nonnegative. */
+
+template<unsigned int N, typename Ca>
+inline Ca
+constant_lower_bound (const poly_int_pod<N, Ca> &a)
+{
+ gcc_checking_assert (known_ge (a, POLY_INT_TYPE (Ca) (0)));
+ return a.coeffs[0];
+}
+
+/* Return the constant lower bound of A, given that it is no less than B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_COEFF (Ca, Cb)
+constant_lower_bound_with_limit (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (known_ge (a, b))
+ return a.coeffs[0];
+ return b;
+}
+
+/* Return the constant upper bound of A, given that it is no greater
+ than B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_COEFF (Ca, Cb)
+constant_upper_bound_with_limit (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ if (known_le (a, b))
+ return a.coeffs[0];
+ return b;
+}
+
+/* Return a value that is known to be no greater than A and B. This
+ will be the greatest lower bound for some indeterminate values but
+ not necessarily for all. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+lower_bound (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Cb) ICb;
+ typedef POLY_CONST_COEFF (Ca, Cb) C;
+
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, MIN (NCa (a.coeffs[0]), NCb (b)));
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, MIN (NCa (a.coeffs[i]), ICb (0)));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+lower_bound (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ return lower_bound (b, a);
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+lower_bound (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_POLY_COEFF (Ca, Cb) C;
+
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, MIN (NCa (a.coeffs[i]), NCb (b.coeffs[i])));
+ return r;
+}
+
+template<typename Ca, typename Cb>
+inline CONST_CONST_RESULT (N, Ca, Cb)
+lower_bound (const Ca &a, const Cb &b)
+{
+ return a < b ? a : b;
+}
+
+/* Return a value that is known to be no less than A and B. This will
+ be the least upper bound for some indeterminate values but not
+ necessarily for all. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+upper_bound (const poly_int_pod<N, Ca> &a, const Cb &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Cb) ICb;
+ typedef POLY_CONST_COEFF (Ca, Cb) C;
+
+ poly_int<N, C> r;
+ POLY_SET_COEFF (C, r, 0, MAX (NCa (a.coeffs[0]), NCb (b)));
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (C, r, i, MAX (NCa (a.coeffs[i]), ICb (0)));
+ return r;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+upper_bound (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ return upper_bound (b, a);
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+upper_bound (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_POLY_COEFF (Ca, Cb) C;
+
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (C, r, i, MAX (NCa (a.coeffs[i]), NCb (b.coeffs[i])));
+ return r;
+}
+
+/* Return the greatest common divisor of all nonzero coefficients, or zero
+ if all coefficients are zero. */
+
+template<unsigned int N, typename Ca>
+inline POLY_BINARY_COEFF (Ca, Ca)
+coeff_gcd (const poly_int_pod<N, Ca> &a)
+{
+ /* Find the first nonzero coefficient, stopping at 0 whatever happens. */
+ unsigned int i;
+ for (i = N - 1; i > 0; --i)
+ if (a.coeffs[i] != 0)
+ break;
+ typedef POLY_BINARY_COEFF (Ca, Ca) C;
+ C r = a.coeffs[i];
+ for (unsigned int j = 0; j < i; ++j)
+ if (a.coeffs[j] != 0)
+ r = gcd (r, C (a.coeffs[j]));
+ return r;
+}
+
+/* Return a value that is a multiple of both A and B. This will be the
+ least common multiple for some indeterminate values but necessarily
+ for all. */
+
+template<unsigned int N, typename Ca, typename Cb>
+POLY_CONST_RESULT (N, Ca, Cb)
+common_multiple (const poly_int_pod<N, Ca> &a, Cb b)
+{
+ POLY_BINARY_COEFF (Ca, Ca) xgcd = coeff_gcd (a);
+ return a * (least_common_multiple (xgcd, b) / xgcd);
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline CONST_POLY_RESULT (N, Ca, Cb)
+common_multiple (const Ca &a, const poly_int_pod<N, Cb> &b)
+{
+ return common_multiple (b, a);
+}
+
+/* Return a value that is a multiple of both A and B, asserting that
+ such a value exists. The result will be the least common multiple
+ for some indeterminate values but necessarily for all.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the values have a common multiple (which might
+ for example be because we know A / B is rational). */
+
+template<unsigned int N, typename Ca, typename Cb>
+POLY_POLY_RESULT (N, Ca, Cb)
+force_common_multiple (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b)
+{
+ if (b.is_constant ())
+ return common_multiple (a, b.coeffs[0]);
+ if (a.is_constant ())
+ return common_multiple (a.coeffs[0], b);
+
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_BINARY_COEFF (Ca, Cb) C;
+ typedef POLY_INT_TYPE (Ca) ICa;
+
+ for (unsigned int i = 1; i < N; ++i)
+ if (a.coeffs[i] != ICa (0))
+ {
+ C lcm = least_common_multiple (NCa (a.coeffs[i]), NCb (b.coeffs[i]));
+ C amul = lcm / a.coeffs[i];
+ C bmul = lcm / b.coeffs[i];
+ for (unsigned int j = 0; j < N; ++j)
+ gcc_checking_assert (a.coeffs[j] * amul == b.coeffs[j] * bmul);
+ return a * amul;
+ }
+ gcc_unreachable ();
+}
+
+/* Compare A and B for sorting purposes, returning -1 if A should come
+ before B, 0 if A and B are identical, and 1 if A should come after B.
+ This is a lexicographical compare of the coefficients in reverse order.
+
+ A consequence of this is that all constant sizes come before all
+ non-constant ones, regardless of magnitude (since a size is never
+ negative). This is what most callers want. For example, when laying
+ data out on the stack, it's better to keep all the constant-sized
+ data together so that it can be accessed as a constant offset from a
+ single base. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline int
+compare_sizes_for_sort (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b)
+{
+ for (unsigned int i = N; i-- > 0; )
+ if (a.coeffs[i] != b.coeffs[i])
+ return a.coeffs[i] < b.coeffs[i] ? -1 : 1;
+ return 0;
+}
+
+/* Return true if we can calculate VALUE & (ALIGN - 1) at compile time. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+can_align_p (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ for (unsigned int i = 1; i < N; i++)
+ if ((value.coeffs[i] & (align - 1)) != 0)
+ return false;
+ return true;
+}
+
+/* Return true if we can align VALUE up to the smallest multiple of
+ ALIGN that is >= VALUE. Store the aligned value in *ALIGNED if so. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+can_align_up (const poly_int_pod<N, Ca> &value, Cb align,
+ poly_int_pod<N, Ca> *aligned)
+{
+ if (!can_align_p (value, align))
+ return false;
+ *aligned = value + (-value.coeffs[0] & (align - 1));
+ return true;
+}
+
+/* Return true if we can align VALUE down to the largest multiple of
+ ALIGN that is <= VALUE. Store the aligned value in *ALIGNED if so. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+can_align_down (const poly_int_pod<N, Ca> &value, Cb align,
+ poly_int_pod<N, Ca> *aligned)
+{
+ if (!can_align_p (value, align))
+ return false;
+ *aligned = value - (value.coeffs[0] & (align - 1));
+ return true;
+}
+
+/* Return true if we can align A and B up to the smallest multiples of
+ ALIGN that are >= A and B respectively, and if doing so gives the
+ same value. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cc>
+inline bool
+known_equal_after_align_up (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ Cc align)
+{
+ poly_int<N, Ca> aligned_a;
+ poly_int<N, Cb> aligned_b;
+ return (can_align_up (a, align, &aligned_a)
+ && can_align_up (b, align, &aligned_b)
+ && known_eq (aligned_a, aligned_b));
+}
+
+/* Return true if we can align A and B down to the largest multiples of
+ ALIGN that are <= A and B respectively, and if doing so gives the
+ same value. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cc>
+inline bool
+known_equal_after_align_down (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ Cc align)
+{
+ poly_int<N, Ca> aligned_a;
+ poly_int<N, Cb> aligned_b;
+ return (can_align_down (a, align, &aligned_a)
+ && can_align_down (b, align, &aligned_b)
+ && known_eq (aligned_a, aligned_b));
+}
+
+/* Assert that we can align VALUE to ALIGN at compile time and return
+ the smallest multiple of ALIGN that is >= VALUE.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the non-constant coefficients must already
+ be a multiple of ALIGN. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, Ca>
+force_align_up (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ gcc_checking_assert (can_align_p (value, align));
+ return value + (-value.coeffs[0] & (align - 1));
+}
+
+/* Assert that we can align VALUE to ALIGN at compile time and return
+ the largest multiple of ALIGN that is <= VALUE.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the non-constant coefficients must already
+ be a multiple of ALIGN. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, Ca>
+force_align_down (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ gcc_checking_assert (can_align_p (value, align));
+ return value - (value.coeffs[0] & (align - 1));
+}
+
+/* Return a value <= VALUE that is a multiple of ALIGN. It will be the
+ greatest such value for some indeterminate values but not necessarily
+ for all. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, Ca>
+aligned_lower_bound (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ poly_int<N, Ca> r;
+ for (unsigned int i = 0; i < N; i++)
+ /* This form copes correctly with more type combinations than
+ value.coeffs[i] & -align would. */
+ POLY_SET_COEFF (Ca, r, i, (value.coeffs[i]
+ - (value.coeffs[i] & (align - 1))));
+ return r;
+}
+
+/* Return a value >= VALUE that is a multiple of ALIGN. It will be the
+ least such value for some indeterminate values but not necessarily
+ for all. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, Ca>
+aligned_upper_bound (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ poly_int<N, Ca> r;
+ for (unsigned int i = 0; i < N; i++)
+ POLY_SET_COEFF (Ca, r, i, (value.coeffs[i]
+ + (-value.coeffs[i] & (align - 1))));
+ return r;
+}
+
+/* Assert that we can align VALUE to ALIGN at compile time. Align VALUE
+ down to the largest multiple of ALIGN that is <= VALUE, then divide by
+ ALIGN.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the non-constant coefficients must already
+ be a multiple of ALIGN. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, Ca>
+force_align_down_and_div (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ gcc_checking_assert (can_align_p (value, align));
+
+ poly_int<N, Ca> r;
+ POLY_SET_COEFF (Ca, r, 0, ((value.coeffs[0]
+ - (value.coeffs[0] & (align - 1)))
+ / align));
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (Ca, r, i, value.coeffs[i] / align);
+ return r;
+}
+
+/* Assert that we can align VALUE to ALIGN at compile time. Align VALUE
+ up to the smallest multiple of ALIGN that is >= VALUE, then divide by
+ ALIGN.
+
+ NOTE: When using this function, please add a comment above the call
+ explaining why we know the non-constant coefficients must already
+ be a multiple of ALIGN. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline poly_int<N, Ca>
+force_align_up_and_div (const poly_int_pod<N, Ca> &value, Cb align)
+{
+ gcc_checking_assert (can_align_p (value, align));
+
+ poly_int<N, Ca> r;
+ POLY_SET_COEFF (Ca, r, 0, ((value.coeffs[0]
+ + (-value.coeffs[0] & (align - 1)))
+ / align));
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ POLY_SET_COEFF (Ca, r, i, value.coeffs[i] / align);
+ return r;
+}
+
+/* Return true if we know at compile time the difference between VALUE
+ and the equal or preceding multiple of ALIGN. Store the value in
+ *MISALIGN if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline bool
+known_misalignment (const poly_int_pod<N, Ca> &value, Cb align, Cm *misalign)
+{
+ gcc_checking_assert (align != 0);
+ if (!can_align_p (value, align))
+ return false;
+ *misalign = value.coeffs[0] & (align - 1);
+ return true;
+}
+
+/* Return X & (Y - 1), asserting that this value is known. Please add
+ an a comment above callers to this function to explain why the condition
+ is known to hold. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_BINARY_COEFF (Ca, Ca)
+force_get_misalignment (const poly_int_pod<N, Ca> &a, Cb align)
+{
+ gcc_checking_assert (can_align_p (a, align));
+ return a.coeffs[0] & (align - 1);
+}
+
+/* Return the maximum alignment that A is known to have. Return 0
+ if A is known to be zero. */
+
+template<unsigned int N, typename Ca>
+inline POLY_BINARY_COEFF (Ca, Ca)
+known_alignment (const poly_int_pod<N, Ca> &a)
+{
+ typedef POLY_BINARY_COEFF (Ca, Ca) C;
+ C r = a.coeffs[0];
+ for (unsigned int i = 1; i < N; ++i)
+ r |= a.coeffs[i];
+ return r & -r;
+}
+
+/* Return true if we can compute A | B at compile time, storing the
+ result in RES if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cr>
+inline typename if_nonpoly<Cb, bool>::type
+can_ior_p (const poly_int_pod<N, Ca> &a, Cb b, Cr *result)
+{
+ /* Coefficients 1 and above must be a multiple of something greater
+ than B. */
+ typedef POLY_INT_TYPE (Ca) int_type;
+ if (N >= 2)
+ for (unsigned int i = 1; i < N; i++)
+ if ((-(a.coeffs[i] & -a.coeffs[i]) & b) != int_type (0))
+ return false;
+ *result = a;
+ result->coeffs[0] |= b;
+ return true;
+}
+
+/* Return true if A is a constant multiple of B, storing the
+ multiple in *MULTIPLE if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline typename if_nonpoly<Cb, bool>::type
+constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b, Cm *multiple)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+
+ /* Do the modulus before the constant check, to catch divide by
+ zero errors. */
+ if (NCa (a.coeffs[0]) % NCb (b) != 0 || !a.is_constant ())
+ return false;
+ *multiple = NCa (a.coeffs[0]) / NCb (b);
+ return true;
+}
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline typename if_nonpoly<Ca, bool>::type
+constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Ca) int_type;
+
+ /* Do the modulus before the constant check, to catch divide by
+ zero errors. */
+ if (NCa (a) % NCb (b.coeffs[0]) != 0
+ || (a != int_type (0) && !b.is_constant ()))
+ return false;
+ *multiple = NCa (a) / NCb (b.coeffs[0]);
+ return true;
+}
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline bool
+constant_multiple_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b, Cm *multiple)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Ca) ICa;
+ typedef POLY_INT_TYPE (Cb) ICb;
+ typedef POLY_BINARY_COEFF (Ca, Cb) C;
+
+ if (NCa (a.coeffs[0]) % NCb (b.coeffs[0]) != 0)
+ return false;
+
+ C r = NCa (a.coeffs[0]) / NCb (b.coeffs[0]);
+ for (unsigned int i = 1; i < N; ++i)
+ if (b.coeffs[i] == ICb (0)
+ ? a.coeffs[i] != ICa (0)
+ : (NCa (a.coeffs[i]) % NCb (b.coeffs[i]) != 0
+ || NCa (a.coeffs[i]) / NCb (b.coeffs[i]) != r))
+ return false;
+
+ *multiple = r;
+ return true;
+}
+
+/* Return true if A is a constant multiple of B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Cb, bool>::type
+constant_multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+
+ /* Do the modulus before the constant check, to catch divide by
+ zero errors. */
+ if (NCa (a.coeffs[0]) % NCb (b) != 0 || !a.is_constant ())
+ return false;
+ return true;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Ca, bool>::type
+constant_multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Ca) int_type;
+
+ /* Do the modulus before the constant check, to catch divide by
+ zero errors. */
+ if (NCa (a) % NCb (b.coeffs[0]) != 0
+ || (a != int_type (0) && !b.is_constant ()))
+ return false;
+ return true;
+}
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+constant_multiple_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Ca) ICa;
+ typedef POLY_INT_TYPE (Cb) ICb;
+ typedef POLY_BINARY_COEFF (Ca, Cb) C;
+
+ if (NCa (a.coeffs[0]) % NCb (b.coeffs[0]) != 0)
+ return false;
+
+ C r = NCa (a.coeffs[0]) / NCb (b.coeffs[0]);
+ for (unsigned int i = 1; i < N; ++i)
+ if (b.coeffs[i] == ICb (0)
+ ? a.coeffs[i] != ICa (0)
+ : (NCa (a.coeffs[i]) % NCb (b.coeffs[i]) != 0
+ || NCa (a.coeffs[i]) / NCb (b.coeffs[i]) != r))
+ return false;
+ return true;
+}
+
+
+/* Return true if A is a multiple of B. */
+
+template<typename Ca, typename Cb>
+inline typename if_nonpoly2<Ca, Cb, bool>::type
+multiple_p (Ca a, Cb b)
+{
+ return a % b == 0;
+}
+
+/* Return true if A is a (polynomial) multiple of B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Cb, bool>::type
+multiple_p (const poly_int_pod<N, Ca> &a, Cb b)
+{
+ for (unsigned int i = 0; i < N; ++i)
+ if (a.coeffs[i] % b != 0)
+ return false;
+ return true;
+}
+
+/* Return true if A is a (constant) multiple of B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline typename if_nonpoly<Ca, bool>::type
+multiple_p (Ca a, const poly_int_pod<N, Cb> &b)
+{
+ typedef POLY_INT_TYPE (Ca) int_type;
+
+ /* Do the modulus before the constant check, to catch divide by
+ potential zeros. */
+ return a % b.coeffs[0] == 0 && (a == int_type (0) || b.is_constant ());
+}
+
+/* Return true if A is a (polynomial) multiple of B. This handles cases
+ where either B is constant or the multiple is constant. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline bool
+multiple_p (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (b.is_constant ())
+ return multiple_p (a, b.coeffs[0]);
+ POLY_BINARY_COEFF (Ca, Ca) tmp;
+ return constant_multiple_p (a, b, &tmp);
+}
+
+/* Return true if A is a (constant) multiple of B, storing the
+ multiple in *MULTIPLE if so. */
+
+template<typename Ca, typename Cb, typename Cm>
+inline typename if_nonpoly2<Ca, Cb, bool>::type
+multiple_p (Ca a, Cb b, Cm *multiple)
+{
+ if (a % b != 0)
+ return false;
+ *multiple = a / b;
+ return true;
+}
+
+/* Return true if A is a (polynomial) multiple of B, storing the
+ multiple in *MULTIPLE if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline typename if_nonpoly<Cb, bool>::type
+multiple_p (const poly_int_pod<N, Ca> &a, Cb b, poly_int_pod<N, Cm> *multiple)
+{
+ if (!multiple_p (a, b))
+ return false;
+ for (unsigned int i = 0; i < N; ++i)
+ multiple->coeffs[i] = a.coeffs[i] / b;
+ return true;
+}
+
+/* Return true if B is a constant and A is a (constant) multiple of B,
+ storing the multiple in *MULTIPLE if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline typename if_nonpoly<Ca, bool>::type
+multiple_p (Ca a, const poly_int_pod<N, Cb> &b, Cm *multiple)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+
+ /* Do the modulus before the constant check, to catch divide by
+ potential zeros. */
+ if (a % b.coeffs[0] != 0 || (NCa (a) != 0 && !b.is_constant ()))
+ return false;
+ *multiple = a / b.coeffs[0];
+ return true;
+}
+
+/* Return true if A is a (polynomial) multiple of B, storing the
+ multiple in *MULTIPLE if so. This handles cases where either
+ B is constant or the multiple is constant. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cm>
+inline bool
+multiple_p (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
+ poly_int_pod<N, Cm> *multiple)
+{
+ if (b.is_constant ())
+ return multiple_p (a, b.coeffs[0], multiple);
+ return constant_multiple_p (a, b, multiple);
+}
+
+/* Return A / B, given that A is known to be a multiple of B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_CONST_RESULT (N, Ca, Cb)
+exact_div (const poly_int_pod<N, Ca> &a, Cb b)
+{
+ typedef POLY_CONST_COEFF (Ca, Cb) C;
+ poly_int<N, C> r;
+ for (unsigned int i = 0; i < N; i++)
+ {
+ gcc_checking_assert (a.coeffs[i] % b == 0);
+ POLY_SET_COEFF (C, r, i, a.coeffs[i] / b);
+ }
+ return r;
+}
+
+/* Return A / B, given that A is known to be a multiple of B. */
+
+template<unsigned int N, typename Ca, typename Cb>
+inline POLY_POLY_RESULT (N, Ca, Cb)
+exact_div (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b)
+{
+ if (b.is_constant ())
+ return exact_div (a, b.coeffs[0]);
+
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_BINARY_COEFF (Ca, Cb) C;
+ typedef POLY_INT_TYPE (Cb) int_type;
+
+ gcc_checking_assert (a.coeffs[0] % b.coeffs[0] == 0);
+ C r = NCa (a.coeffs[0]) / NCb (b.coeffs[0]);
+ for (unsigned int i = 1; i < N; ++i)
+ gcc_checking_assert (b.coeffs[i] == int_type (0)
+ ? a.coeffs[i] == int_type (0)
+ : (a.coeffs[i] % b.coeffs[i] == 0
+ && NCa (a.coeffs[i]) / NCb (b.coeffs[i]) == r));
+
+ return r;
+}
+
+/* Return true if there is some constant Q and polynomial r such that:
+
+ (1) a = b * Q + r
+ (2) |b * Q| <= |a|
+ (3) |r| < |b|
+
+ Store the value Q in *QUOTIENT if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq>
+inline typename if_nonpoly2<Cb, Cq, bool>::type
+can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b, Cq *quotient)
+{
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+
+ /* Do the division before the constant check, to catch divide by
+ zero errors. */
+ Cq q = NCa (a.coeffs[0]) / NCb (b);
+ if (!a.is_constant ())
+ return false;
+ *quotient = q;
+ return true;
+}
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq>
+inline typename if_nonpoly<Cq, bool>::type
+can_div_trunc_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ Cq *quotient)
+{
+ /* We can calculate Q from the case in which the indeterminates
+ are zero. */
+ typedef POLY_CAST (Ca, Cb) NCa;
+ typedef POLY_CAST (Cb, Ca) NCb;
+ typedef POLY_INT_TYPE (Ca) ICa;
+ typedef POLY_INT_TYPE (Cb) ICb;
+ typedef POLY_BINARY_COEFF (Ca, Cb) C;
+ C q = NCa (a.coeffs[0]) / NCb (b.coeffs[0]);
+
+ /* Check the other coefficients and record whether the division is exact.
+ The only difficult case is when it isn't. If we require a and b to
+ ordered wrt zero, there can be no two coefficients of the same value
+ that have opposite signs. This means that:
+
+ |a| = |a0| + |a1 * x1| + |a2 * x2| + ...
+ |b| = |b0| + |b1 * x1| + |b2 * x2| + ...
+
+ The Q we've just calculated guarantees:
+
+ |b0 * Q| <= |a0|
+ |a0 - b0 * Q| < |b0|
+
+ and so:
+
+ (2) |b * Q| <= |a|
+
+ is satisfied if:
+
+ |bi * xi * Q| <= |ai * xi|
+
+ for each i in [1, N]. This is trivially true when xi is zero.
+ When it isn't we need:
+
+ (2') |bi * Q| <= |ai|
+
+ r is calculated as:
+
+ r = r0 + r1 * x1 + r2 * x2 + ...
+ where ri = ai - bi * Q
+
+ Restricting to ordered a and b also guarantees that no two ris
+ have opposite signs, so we have:
+
+ |r| = |r0| + |r1 * x1| + |r2 * x2| + ...
+
+ We know from the calculation of Q that |r0| < |b0|, so:
+
+ (3) |r| < |b|
+
+ is satisfied if:
+
+ (3') |ai - bi * Q| <= |bi|
+
+ for each i in [1, N]. */
+ bool rem_p = NCa (a.coeffs[0]) % NCb (b.coeffs[0]) != 0;
+ for (unsigned int i = 1; i < N; ++i)
+ {
+ if (b.coeffs[i] == ICb (0))
+ {
+ /* For bi == 0 we simply need: (3') |ai| == 0. */
+ if (a.coeffs[i] != ICa (0))
+ return false;
+ }
+ else
+ {
+ if (q == 0)
+ {
+ /* For Q == 0 we simply need: (3') |ai| <= |bi|. */
+ if (a.coeffs[i] != ICa (0))
+ {
+ /* Use negative absolute to avoid overflow, i.e.
+ -|ai| >= -|bi|. */
+ C neg_abs_a = (a.coeffs[i] < 0 ? a.coeffs[i] : -a.coeffs[i]);
+ C neg_abs_b = (b.coeffs[i] < 0 ? b.coeffs[i] : -b.coeffs[i]);
+ if (neg_abs_a < neg_abs_b)
+ return false;
+ rem_p = true;
+ }
+ }
+ else
+ {
+ /* Otherwise just check for the case in which ai / bi == Q. */
+ if (NCa (a.coeffs[i]) / NCb (b.coeffs[i]) != q)
+ return false;
+ if (NCa (a.coeffs[i]) % NCb (b.coeffs[i]) != 0)
+ rem_p = true;
+ }
+ }
+ }
+
+ /* If the division isn't exact, require both values to be ordered wrt 0,
+ so that we can guarantee conditions (2) and (3) for all indeterminate
+ values. */
+ if (rem_p && (!ordered_p (a, ICa (0)) || !ordered_p (b, ICb (0))))
+ return false;
+
+ *quotient = q;
+ return true;
+}
+
+/* Likewise, but also store r in *REMAINDER. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq, typename Cr>
+inline typename if_nonpoly<Cq, bool>::type
+can_div_trunc_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ Cq *quotient, Cr *remainder)
+{
+ if (!can_div_trunc_p (a, b, quotient))
+ return false;
+ *remainder = a - *quotient * b;
+ return true;
+}
+
+/* Return true if there is some polynomial q and constant R such that:
+
+ (1) a = B * q + R
+ (2) |B * q| <= |a|
+ (3) |R| < |B|
+
+ Store the value q in *QUOTIENT if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq>
+inline typename if_nonpoly<Cb, bool>::type
+can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
+ poly_int_pod<N, Cq> *quotient)
+{
+ /* The remainder must be constant. */
+ for (unsigned int i = 1; i < N; ++i)
+ if (a.coeffs[i] % b != 0)
+ return false;
+ for (unsigned int i = 0; i < N; ++i)
+ quotient->coeffs[i] = a.coeffs[i] / b;
+ return true;
+}
+
+/* Likewise, but also store R in *REMAINDER. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq, typename Cr>
+inline typename if_nonpoly<Cb, bool>::type
+can_div_trunc_p (const poly_int_pod<N, Ca> &a, Cb b,
+ poly_int_pod<N, Cq> *quotient, Cr *remainder)
+{
+ if (!can_div_trunc_p (a, b, quotient))
+ return false;
+ *remainder = a.coeffs[0] % b;
+ return true;
+}
+
+/* Return true if we can compute A / B at compile time, rounding towards zero.
+ Store the result in QUOTIENT if so.
+
+ This handles cases in which either B is constant or the result is
+ constant. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq>
+inline bool
+can_div_trunc_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ poly_int_pod<N, Cq> *quotient)
+{
+ if (b.is_constant ())
+ return can_div_trunc_p (a, b.coeffs[0], quotient);
+ if (!can_div_trunc_p (a, b, &quotient->coeffs[0]))
+ return false;
+ for (unsigned int i = 1; i < N; ++i)
+ quotient->coeffs[i] = 0;
+ return true;
+}
+
+/* Return true if there is some constant Q and polynomial r such that:
+
+ (1) a = b * Q + r
+ (2) |a| <= |b * Q|
+ (3) |r| < |b|
+
+ Store the value Q in *QUOTIENT if so. */
+
+template<unsigned int N, typename Ca, typename Cb, typename Cq>
+inline typename if_nonpoly<Cq, bool>::type
+can_div_away_from_zero_p (const poly_int_pod<N, Ca> &a,
+ const poly_int_pod<N, Cb> &b,
+ Cq *quotient)
+{
+ if (!can_div_trunc_p (a, b, quotient))
+ return false;
+ if (maybe_ne (*quotient * b, a))
+ *quotient += (*quotient < 0 ? -1 : 1);
+ return true;
+}
+
+/* Use print_dec to print VALUE to FILE, where SGN is the sign
+ of the values. */
+
+template<unsigned int N, typename C>
+void
+print_dec (const poly_int_pod<N, C> &value, FILE *file, signop sgn)
+{
+ if (value.is_constant ())
+ print_dec (value.coeffs[0], file, sgn);
+ else
+ {
+ fprintf (file, "[");
+ for (unsigned int i = 0; i < N; ++i)
+ {
+ print_dec (value.coeffs[i], file, sgn);
+ fputc (i == N - 1 ? ']' : ',', file);
+ }
+ }
+}
+
+/* Likewise without the signop argument, for coefficients that have an
+ inherent signedness. */
+
+template<unsigned int N, typename C>
+void
+print_dec (const poly_int_pod<N, C> &value, FILE *file)
+{
+ STATIC_ASSERT (poly_coeff_traits<C>::signedness >= 0);
+ print_dec (value, file,
+ poly_coeff_traits<C>::signedness ? SIGNED : UNSIGNED);
+}
+
+/* Use print_hex to print VALUE to FILE. */
+
+template<unsigned int N, typename C>
+void
+print_hex (const poly_int_pod<N, C> &value, FILE *file)
+{
+ if (value.is_constant ())
+ print_hex (value.coeffs[0], file);
+ else
+ {
+ fprintf (file, "[");
+ for (unsigned int i = 0; i < N; ++i)
+ {
+ print_hex (value.coeffs[i], file);
+ fputc (i == N - 1 ? ']' : ',', file);
+ }
+ }
+}
+
+/* Helper for calculating the distance between two points P1 and P2,
+ in cases where known_le (P1, P2). T1 and T2 are the types of the
+ two positions, in either order. The coefficients of P2 - P1 have
+ type unsigned HOST_WIDE_INT if the coefficients of both T1 and T2
+ have C++ primitive type, otherwise P2 - P1 has its usual
+ wide-int-based type.
+
+ The actual subtraction should look something like this:
+
+ typedef poly_span_traits<T1, T2> span_traits;
+ span_traits::cast (P2) - span_traits::cast (P1)
+
+ Applying the cast before the subtraction avoids undefined overflow
+ for signed T1 and T2.
+
+ The implementation of the cast tries to avoid unnecessary arithmetic
+ or copying. */
+template<typename T1, typename T2,
+ typename Res = POLY_BINARY_COEFF (POLY_BINARY_COEFF (T1, T2),
+ unsigned HOST_WIDE_INT)>
+struct poly_span_traits
+{
+ template<typename T>
+ static const T &cast (const T &x) { return x; }
+};
+
+template<typename T1, typename T2>
+struct poly_span_traits<T1, T2, unsigned HOST_WIDE_INT>
+{
+ template<typename T>
+ static typename if_nonpoly<T, unsigned HOST_WIDE_INT>::type
+ cast (const T &x) { return x; }
+
+ template<unsigned int N, typename T>
+ static poly_int<N, unsigned HOST_WIDE_INT>
+ cast (const poly_int_pod<N, T> &x) { return x; }
+};
+
+/* Return true if SIZE represents a known size, assuming that all-ones
+ indicates an unknown size. */
+
+template<typename T>
+inline bool
+known_size_p (const T &a)
+{
+ return maybe_ne (a, POLY_INT_TYPE (T) (-1));
+}
+
+/* Return true if range [POS, POS + SIZE) might include VAL.
+ SIZE can be the special value -1, in which case the range is
+ open-ended. */
+
+template<typename T1, typename T2, typename T3>
+inline bool
+maybe_in_range_p (const T1 &val, const T2 &pos, const T3 &size)
+{
+ typedef poly_span_traits<T1, T2> start_span;
+ typedef poly_span_traits<T3, T3> size_span;
+ if (known_lt (val, pos))
+ return false;
+ if (!known_size_p (size))
+ return true;
+ if ((poly_int_traits<T1>::num_coeffs > 1
+ || poly_int_traits<T2>::num_coeffs > 1)
+ && maybe_lt (val, pos))
+ /* In this case we don't know whether VAL >= POS is true at compile
+ time, so we can't prove that VAL >= POS + SIZE. */
+ return true;
+ return maybe_lt (start_span::cast (val) - start_span::cast (pos),
+ size_span::cast (size));
+}
+
+/* Return true if range [POS, POS + SIZE) is known to include VAL.
+ SIZE can be the special value -1, in which case the range is
+ open-ended. */
+
+template<typename T1, typename T2, typename T3>
+inline bool
+known_in_range_p (const T1 &val, const T2 &pos, const T3 &size)
+{
+ typedef poly_span_traits<T1, T2> start_span;
+ typedef poly_span_traits<T3, T3> size_span;
+ return (known_size_p (size)
+ && known_ge (val, pos)
+ && known_lt (start_span::cast (val) - start_span::cast (pos),
+ size_span::cast (size)));
+}
+
+/* Return true if the two ranges [POS1, POS1 + SIZE1) and [POS2, POS2 + SIZE2)
+ might overlap. SIZE1 and/or SIZE2 can be the special value -1, in which
+ case the range is open-ended. */
+
+template<typename T1, typename T2, typename T3, typename T4>
+inline bool
+ranges_maybe_overlap_p (const T1 &pos1, const T2 &size1,
+ const T3 &pos2, const T4 &size2)
+{
+ if (maybe_in_range_p (pos2, pos1, size1))
+ return maybe_ne (size2, POLY_INT_TYPE (T4) (0));
+ if (maybe_in_range_p (pos1, pos2, size2))
+ return maybe_ne (size1, POLY_INT_TYPE (T2) (0));
+ return false;
+}
+
+/* Return true if the two ranges [POS1, POS1 + SIZE1) and [POS2, POS2 + SIZE2)
+ are known to overlap. SIZE1 and/or SIZE2 can be the special value -1,
+ in which case the range is open-ended. */
+
+template<typename T1, typename T2, typename T3, typename T4>
+inline bool
+ranges_known_overlap_p (const T1 &pos1, const T2 &size1,
+ const T3 &pos2, const T4 &size2)
+{
+ typedef poly_span_traits<T1, T3> start_span;
+ typedef poly_span_traits<T2, T2> size1_span;
+ typedef poly_span_traits<T4, T4> size2_span;
+ /* known_gt (POS1 + SIZE1, POS2) [infinite precision]
+ --> known_gt (SIZE1, POS2 - POS1) [infinite precision]
+ --> known_gt (SIZE1, POS2 - lower_bound (POS1, POS2)) [infinite precision]
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ always nonnegative
+ --> known_gt (SIZE1, span1::cast (POS2 - lower_bound (POS1, POS2))).
+
+ Using the saturating subtraction enforces that SIZE1 must be
+ nonzero, since known_gt (0, x) is false for all nonnegative x.
+ If POS2.coeff[I] < POS1.coeff[I] for some I > 0, increasing
+ indeterminate number I makes the unsaturated condition easier to
+ satisfy, so using a saturated coefficient of zero tests the case in
+ which the indeterminate is zero (the minimum value). */
+ return (known_size_p (size1)
+ && known_size_p (size2)
+ && known_lt (start_span::cast (pos2)
+ - start_span::cast (lower_bound (pos1, pos2)),
+ size1_span::cast (size1))
+ && known_lt (start_span::cast (pos1)
+ - start_span::cast (lower_bound (pos1, pos2)),
+ size2_span::cast (size2)));
+}
+
+/* Return true if range [POS1, POS1 + SIZE1) is known to be a subrange of
+ [POS2, POS2 + SIZE2). SIZE1 and/or SIZE2 can be the special value -1,
+ in which case the range is open-ended. */
+
+template<typename T1, typename T2, typename T3, typename T4>
+inline bool
+known_subrange_p (const T1 &pos1, const T2 &size1,
+ const T3 &pos2, const T4 &size2)
+{
+ typedef typename poly_int_traits<T2>::coeff_type C2;
+ typedef poly_span_traits<T1, T3> start_span;
+ typedef poly_span_traits<T2, T4> size_span;
+ return (known_gt (size1, POLY_INT_TYPE (T2) (0))
+ && (poly_coeff_traits<C2>::signedness > 0
+ || known_size_p (size1))
+ && known_size_p (size2)
+ && known_ge (pos1, pos2)
+ && known_le (size1, size2)
+ && known_le (start_span::cast (pos1) - start_span::cast (pos2),
+ size_span::cast (size2) - size_span::cast (size1)));
+}
+
+/* Return true if the endpoint of the range [POS, POS + SIZE) can be
+ stored in a T, or if SIZE is the special value -1, which makes the
+ range open-ended. */
+
+template<typename T>
+inline typename if_nonpoly<T, bool>::type
+endpoint_representable_p (const T &pos, const T &size)
+{
+ return (!known_size_p (size)
+ || pos <= poly_coeff_traits<T>::max_value - size);
+}
+
+template<unsigned int N, typename C>
+inline bool
+endpoint_representable_p (const poly_int_pod<N, C> &pos,
+ const poly_int_pod<N, C> &size)
+{
+ if (known_size_p (size))
+ for (unsigned int i = 0; i < N; ++i)
+ if (pos.coeffs[i] > poly_coeff_traits<C>::max_value - size.coeffs[i])
+ return false;
+ return true;
+}
+
+template<unsigned int N, typename C>
+void
+gt_ggc_mx (poly_int_pod<N, C> *)
+{
+}
+
+template<unsigned int N, typename C>
+void
+gt_pch_nx (poly_int_pod<N, C> *)
+{
+}
+
+template<unsigned int N, typename C>
+void
+gt_pch_nx (poly_int_pod<N, C> *, gt_pointer_operator, void *)
+{
+}
+
+#undef POLY_SET_COEFF
+#undef POLY_INT_TYPE
+#undef POLY_BINARY_COEFF
+#undef CONST_CONST_RESULT
+#undef POLY_CONST_RESULT
+#undef CONST_POLY_RESULT
+#undef POLY_POLY_RESULT
+#undef POLY_CONST_COEFF
+#undef CONST_POLY_COEFF
+#undef POLY_POLY_COEFF
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.def
new file mode 100644
index 0000000..1f391a0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.def
@@ -0,0 +1,238 @@
+/* Definitions for the branch prediction routines in the GNU compiler.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, you should define a macro:
+
+ DEF_PREDICTOR (ENUM, NAME, HITRATE)
+
+ This macro will be called once for each predictor. The ENUM will
+ be of type `enum br_predictor', and will enumerate all supported
+ predictors. The order of DEF_PREDICTOR calls is important, as
+ in the first match combining heuristics, the predictor appearing
+ first in this file will win.
+
+ NAME is used in the debugging output to determine predictor type.
+
+ HITRATE is the probability that edge predicted by predictor as taken
+ will be really taken (so it should be always above
+ REG_BR_PROB_BASE / 2). */
+
+
+/* A value used as final outcome of all heuristics. */
+DEF_PREDICTOR (PRED_COMBINED, "combined", PROB_ALWAYS, 0)
+
+/* An outcome estimated by Dempster-Shaffer theory. */
+DEF_PREDICTOR (PRED_DS_THEORY, "DS theory", PROB_ALWAYS, 0)
+
+/* A combined heuristics using probability determined by first
+ matching heuristics from this list. */
+DEF_PREDICTOR (PRED_FIRST_MATCH, "first match", PROB_ALWAYS, 0)
+
+/* Heuristic applying when no heuristic below applies. */
+DEF_PREDICTOR (PRED_NO_PREDICTION, "no prediction", PROB_ALWAYS, 0)
+
+/* Mark unconditional jump as taken. */
+DEF_PREDICTOR (PRED_UNCONDITIONAL, "unconditional jump", PROB_ALWAYS,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Return value of malloc function is almost always non-null. */
+DEF_PREDICTOR (PRED_MALLOC_NONNULL, "malloc returned non-NULL", \
+ PROB_VERY_LIKELY, PRED_FLAG_FIRST_MATCH)
+
+/* Use number of loop iterations determined by # of iterations
+ analysis to set probability. We don't want to use Dempster-Shaffer
+ theory here, as the predictions is exact. */
+DEF_PREDICTOR (PRED_LOOP_ITERATIONS, "loop iterations", PROB_UNINITIALIZED,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Assume that any given atomic operation has low contention,
+ and thus the compare-and-swap operation succeeds. */
+DEF_PREDICTOR (PRED_COMPARE_AND_SWAP, "compare and swap", PROB_VERY_LIKELY,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Hints dropped by user via __builtin_expect feature. Note: the
+ probability of PROB_VERY_LIKELY is now overwritten by param
+ builtin_expect_probability with a default value of HITRATE(90).
+ Refer to param.def for details. */
+DEF_PREDICTOR (PRED_BUILTIN_EXPECT, "__builtin_expect", PROB_VERY_LIKELY,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Hints provided by user via __builtin_expect_with_probability. */
+DEF_PREDICTOR (PRED_BUILTIN_EXPECT_WITH_PROBABILITY,
+ "__builtin_expect_with_probability", PROB_UNINITIALIZED,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Branches to hot labels are likely. */
+DEF_PREDICTOR (PRED_HOT_LABEL, "hot label", HITRATE (90),
+ PRED_FLAG_FIRST_MATCH)
+
+/* Branches to cold labels are extremely unlikely. */
+DEF_PREDICTOR (PRED_COLD_LABEL, "cold label", HITRATE (90),
+ PRED_FLAG_FIRST_MATCH)
+
+/* Use number of loop iterations guessed by the contents of the loop. */
+DEF_PREDICTOR (PRED_LOOP_ITERATIONS_GUESSED, "guessed loop iterations",
+ PROB_UNINITIALIZED, PRED_FLAG_FIRST_MATCH)
+
+/* Use number of loop iterations guessed by the contents of the loop. */
+DEF_PREDICTOR (PRED_LOOP_ITERATIONS_MAX, "guessed loop iterations",
+ PROB_UNINITIALIZED, PRED_FLAG_FIRST_MATCH)
+
+/* Branch containing goto is probably not taken. */
+DEF_PREDICTOR (PRED_CONTINUE, "continue", HITRATE (67), 0)
+
+/* Branch to basic block containing call marked by noreturn attribute. */
+DEF_PREDICTOR (PRED_NORETURN, "noreturn call", PROB_VERY_LIKELY,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Branch to basic block containing call marked by cold function attribute. */
+DEF_PREDICTOR (PRED_COLD_FUNCTION, "cold function call", PROB_VERY_LIKELY,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Edge causing loop to terminate is probably not taken. */
+DEF_PREDICTOR (PRED_LOOP_EXIT, "loop exit", HITRATE (89),
+ PRED_FLAG_FIRST_MATCH)
+
+/* Same as LOOP_EXIT but for loops containing recursive call. */
+DEF_PREDICTOR (PRED_LOOP_EXIT_WITH_RECURSION, "loop exit with recursion",
+ HITRATE (78), PRED_FLAG_FIRST_MATCH)
+
+/* Edge causing loop to terminate by computing value used by later
+ conditional. */
+DEF_PREDICTOR (PRED_LOOP_EXTRA_EXIT, "extra loop exit", HITRATE (67),
+ PRED_FLAG_FIRST_MATCH)
+
+/* Pointers are usually not NULL. */
+DEF_PREDICTOR (PRED_POINTER, "pointer", HITRATE (70), 0)
+DEF_PREDICTOR (PRED_TREE_POINTER, "pointer (on trees)", HITRATE (70), 0)
+
+/* NE is probable, EQ not etc... */
+DEF_PREDICTOR (PRED_OPCODE_POSITIVE, "opcode values positive", HITRATE (59), 0)
+DEF_PREDICTOR (PRED_OPCODE_NONEQUAL, "opcode values nonequal", HITRATE (66), 0)
+DEF_PREDICTOR (PRED_FPOPCODE, "fp_opcode", HITRATE (90), 0)
+DEF_PREDICTOR (PRED_TREE_OPCODE_POSITIVE, "opcode values positive (on trees)",
+ HITRATE (59), 0)
+DEF_PREDICTOR (PRED_TREE_OPCODE_NONEQUAL, "opcode values nonequal (on trees)",
+ HITRATE (66), 0)
+DEF_PREDICTOR (PRED_TREE_FPOPCODE, "fp_opcode (on trees)", HITRATE (90), 0)
+
+/* Branch guarding call is probably taken. */
+DEF_PREDICTOR (PRED_CALL, "call", HITRATE (67), 0)
+
+/* Call predictors are for now ignored, lets leave the predictor
+ to measure its benefit. */
+DEF_PREDICTOR (PRED_INDIR_CALL, "indirect call", PROB_EVEN, 0)
+DEF_PREDICTOR (PRED_POLYMORPHIC_CALL, "polymorphic call", PROB_EVEN, 0)
+DEF_PREDICTOR (PRED_RECURSIVE_CALL, "recursive call", PROB_EVEN, 0)
+
+/* Branch causing function to terminate is probably not taken. */
+DEF_PREDICTOR (PRED_TREE_EARLY_RETURN, "early return (on trees)", HITRATE (66),
+ 0)
+
+/* Branch containing goto is probably not taken. */
+DEF_PREDICTOR (PRED_GOTO, "goto", HITRATE (66), 0)
+
+/* Branch ending with return constant is probably not taken. */
+DEF_PREDICTOR (PRED_CONST_RETURN, "const return", HITRATE (65), 0)
+
+/* Branch ending with return negative constant is probably not taken. */
+DEF_PREDICTOR (PRED_NEGATIVE_RETURN, "negative return", HITRATE (98), 0)
+
+/* Branch ending with return; is probably not taken */
+DEF_PREDICTOR (PRED_NULL_RETURN, "null return", HITRATE (71), 0)
+
+/* Branches to compare induction variable to a loop bound is
+ extremely likely. */
+DEF_PREDICTOR (PRED_LOOP_IV_COMPARE_GUESS, "guess loop iv compare",
+ HITRATE (64), 0)
+
+/* Use number of loop iterations determined by # of iterations analysis
+ to set probability of branches that compares IV to loop bound variable. */
+DEF_PREDICTOR (PRED_LOOP_IV_COMPARE, "loop iv compare", PROB_UNINITIALIZED,
+ PRED_FLAG_FIRST_MATCH)
+
+/* In the following code
+ for (loop1)
+ if (cond)
+ for (loop2)
+ body;
+ guess that cond is unlikely. */
+DEF_PREDICTOR (PRED_LOOP_GUARD, "loop guard", HITRATE (73), 0)
+
+/* Same but for loops containing recursion. */
+DEF_PREDICTOR (PRED_LOOP_GUARD_WITH_RECURSION, "loop guard with recursion",
+ HITRATE (85), 0)
+
+/* The following predictors are used in Fortran. */
+
+/* Branch leading to an integer overflow are extremely unlikely. */
+DEF_PREDICTOR (PRED_FORTRAN_OVERFLOW, "Fortran overflow", PROB_ALWAYS,
+ PRED_FLAG_FIRST_MATCH)
+
+/* Branch leading to a failure status are unlikely. This can occur for out
+ of memory. This predictor only occurs when the user explicitly asked
+ for a return status. By default, the code aborts,
+ which is handled via PRED_NORETURN. */
+DEF_PREDICTOR (PRED_FORTRAN_FAIL_ALLOC, "Fortran fail alloc",
+ PROB_VERY_LIKELY, 0)
+
+/* Predictor is used for an allocation of an already allocated memory or
+ deallocating an already deallocated allocatable. */
+DEF_PREDICTOR (PRED_FORTRAN_REALLOC, "Fortran repeated allocation/deallocation",
+ PROB_LIKELY, 0)
+
+/* Branch leading to an I/O failure status are unlikely. This predictor is
+ used for I/O failures such as for invalid unit numbers. This predictor
+ only occurs when the user explicitly asked for a return status. By default,
+ the code aborts, which is handled via PRED_NORETURN. */
+DEF_PREDICTOR (PRED_FORTRAN_FAIL_IO, "Fortran fail IO", HITRATE (85), 0)
+
+/* Branch leading to a run-time warning message which is printed only once
+ are unlikely. The print-warning branch itself can be likely or unlikely. */
+DEF_PREDICTOR (PRED_FORTRAN_WARN_ONCE, "Fortran warn once", HITRATE (75), 0)
+
+/* Branch belonging to a zero-sized array. */
+DEF_PREDICTOR (PRED_FORTRAN_SIZE_ZERO, "Fortran zero-sized array", \
+ HITRATE (99), 0)
+
+/* Branch belonging to an invalid bound index, in a context where it is
+ standard conform and well defined but rather pointless and, hence, rather
+ unlikely to occur. */
+DEF_PREDICTOR (PRED_FORTRAN_INVALID_BOUND, "Fortran invalid bound", \
+ HITRATE (90), 0)
+
+/* Branch belonging to the handling of absent optional arguments. This
+ predictor is used when an optional dummy argument, associated with an
+ absent argument, is passed on as actual argument to another procedure,
+ which in turn has an optional argument. */
+DEF_PREDICTOR (PRED_FORTRAN_ABSENT_DUMMY, "Fortran absent dummy", \
+ HITRATE (60), 0)
+
+/* Fortran DO statement generates a pre-header guard:
+ empty = (step > 0 ? to < from : to > from), which can be predicted
+ to be very likely. */
+DEF_PREDICTOR (PRED_FORTRAN_LOOP_PREHEADER, "Fortran loop preheader", \
+ HITRATE (99), 0)
+
+/* Fortran assumed size arrays can be non-contiguous, so they need
+ to be repacked. */
+
+DEF_PREDICTOR (PRED_FORTRAN_CONTIGUOUS, "Fortran contiguous", \
+ HITRATE (75), 0)
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.h
new file mode 100644
index 0000000..d9a7fc3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/predict.h
@@ -0,0 +1,111 @@
+/* Definitions for branch prediction routines in the GNU compiler.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PREDICT_H
+#define GCC_PREDICT_H
+
+#include "profile-count.h"
+
+/* Random guesstimation given names.
+ PROB_VERY_UNLIKELY should be small enough so basic block predicted
+ by it gets below HOT_BB_FREQUENCY_FRACTION. */
+#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
+#define PROB_EVEN (REG_BR_PROB_BASE / 2)
+#define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY)
+#define PROB_ALWAYS (REG_BR_PROB_BASE)
+#define PROB_UNLIKELY (REG_BR_PROB_BASE / 5 - 1)
+#define PROB_LIKELY (REG_BR_PROB_BASE - PROB_UNLIKELY)
+#define PROB_UNINITIALIZED (-1)
+
+#define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) ENUM,
+enum br_predictor
+{
+#include "predict.def"
+
+ /* Upper bound on non-language-specific builtins. */
+ END_PREDICTORS
+};
+#undef DEF_PREDICTOR
+enum prediction
+{
+ NOT_TAKEN,
+ TAKEN
+};
+
+/* In emit-rtl.cc. */
+extern profile_probability split_branch_probability;
+
+extern gcov_type get_hot_bb_threshold (void);
+extern void set_hot_bb_threshold (gcov_type);
+extern bool maybe_hot_count_p (struct function *, profile_count);
+extern bool maybe_hot_bb_p (struct function *, const_basic_block);
+extern bool maybe_hot_edge_p (edge);
+extern bool probably_never_executed_bb_p (struct function *, const_basic_block);
+extern bool probably_never_executed_edge_p (struct function *, edge);
+extern enum optimize_size_level optimize_function_for_size_p (struct function *);
+extern bool optimize_function_for_speed_p (struct function *);
+extern optimization_type function_optimization_type (struct function *);
+extern enum optimize_size_level optimize_bb_for_size_p (const_basic_block);
+extern bool optimize_bb_for_speed_p (const_basic_block);
+extern optimization_type bb_optimization_type (const_basic_block);
+extern enum optimize_size_level optimize_edge_for_size_p (edge);
+extern bool optimize_edge_for_speed_p (edge);
+extern enum optimize_size_level optimize_insn_for_size_p (void);
+extern bool optimize_insn_for_speed_p (void);
+extern optimization_type insn_optimization_type ();
+extern enum optimize_size_level optimize_loop_for_size_p (class loop *);
+extern bool optimize_loop_for_speed_p (class loop *);
+extern bool optimize_loop_nest_for_speed_p (class loop *);
+extern enum optimize_size_level optimize_loop_nest_for_size_p (class loop *);
+extern bool predictable_edge_p (edge);
+extern void rtl_profile_for_bb (basic_block);
+extern void rtl_profile_for_edge (edge);
+extern void default_rtl_profile (void);
+extern bool rtl_predicted_by_p (const_basic_block, enum br_predictor);
+extern bool gimple_predicted_by_p (const_basic_block, enum br_predictor);
+extern bool edge_probability_reliable_p (const_edge);
+extern bool br_prob_note_reliable_p (const_rtx);
+extern void predict_insn_def (rtx_insn *, enum br_predictor, enum prediction);
+extern void rtl_predict_edge (edge, enum br_predictor, int);
+extern void gimple_predict_edge (edge, enum br_predictor, int);
+extern void remove_predictions_associated_with_edge (edge);
+extern void predict_edge_def (edge, enum br_predictor, enum prediction);
+extern void invert_br_probabilities (rtx);
+extern void guess_outgoing_edge_probabilities (basic_block);
+extern void tree_guess_outgoing_edge_probabilities (basic_block);
+extern void tree_estimate_probability (bool);
+extern void handle_missing_profiles (void);
+extern bool update_max_bb_count (void);
+extern bool expensive_function_p (int);
+extern void estimate_bb_frequencies (bool);
+extern void compute_function_frequency (void);
+extern tree build_predict_expr (enum br_predictor, enum prediction);
+extern const char *predictor_name (enum br_predictor);
+extern void rebuild_frequencies (void);
+extern void report_predictor_hitrates (void);
+extern void force_edge_cold (edge, bool);
+extern void propagate_unlikely_bbs_forward (void);
+extern void change_edge_frequency (edge, profile_probability);
+
+extern void add_reg_br_prob_note (rtx_insn *, profile_probability);
+
+/* In ipa-pure-const.cc */
+extern void warn_function_cold (tree);
+
+#endif /* GCC_PREDICT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/prefix.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/prefix.h
new file mode 100644
index 0000000..73a4175
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/prefix.h
@@ -0,0 +1,40 @@
+/* Provide prototypes for functions exported from prefix.cc.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at
+your option) any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_PREFIX_H
+#define GCC_PREFIX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* These functions are called by the Ada frontend with C convention. */
+
+/* Update PATH using KEY if PATH starts with PREFIX. The returned
+ string is always malloc-ed, and the caller is responsible for
+ freeing it. */
+extern char *update_path (const char *path, const char *key);
+extern void set_std_prefix (const char *, int);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! GCC_PREFIX_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pretty-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pretty-print.h
new file mode 100644
index 0000000..0230a28
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/pretty-print.h
@@ -0,0 +1,443 @@
+/* Various declarations for language-independent pretty-print subroutines.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+ Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PRETTY_PRINT_H
+#define GCC_PRETTY_PRINT_H
+
+#include "obstack.h"
+#include "diagnostic-url.h"
+
+/* Maximum number of format string arguments. */
+#define PP_NL_ARGMAX 30
+
+/* The type of a text to be formatted according a format specification
+ along with a list of things. */
+struct text_info
+{
+ const char *format_spec;
+ va_list *args_ptr;
+ int err_no; /* for %m */
+ void **x_data;
+ rich_location *m_richloc;
+
+ void set_location (unsigned int idx, location_t loc,
+ enum range_display_kind range_display_kind);
+ location_t get_location (unsigned int index_of_location) const;
+};
+
+/* How often diagnostics are prefixed by their locations:
+ o DIAGNOSTICS_SHOW_PREFIX_NEVER: never - not yet supported;
+ o DIAGNOSTICS_SHOW_PREFIX_ONCE: emit only once;
+ o DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE: emit each time a physical
+ line is started. */
+enum diagnostic_prefixing_rule_t
+{
+ DIAGNOSTICS_SHOW_PREFIX_ONCE = 0x0,
+ DIAGNOSTICS_SHOW_PREFIX_NEVER = 0x1,
+ DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE = 0x2
+};
+
+/* The chunk_info data structure forms a stack of the results from the
+ first phase of formatting (pp_format) which have not yet been
+ output (pp_output_formatted_text). A stack is necessary because
+ the diagnostic starter may decide to generate its own output by way
+ of the formatter. */
+struct chunk_info
+{
+ /* Pointer to previous chunk on the stack. */
+ struct chunk_info *prev;
+
+ /* Array of chunks to output. Each chunk is a NUL-terminated string.
+ In the first phase of formatting, even-numbered chunks are
+ to be output verbatim, odd-numbered chunks are format specifiers.
+ The second phase replaces all odd-numbered chunks with formatted
+ text, and the third phase simply emits all the chunks in sequence
+ with appropriate line-wrapping. */
+ const char *args[PP_NL_ARGMAX * 2];
+};
+
+/* The output buffer datatype. This is best seen as an abstract datatype
+ whose fields should not be accessed directly by clients. */
+class output_buffer
+{
+public:
+ output_buffer ();
+ ~output_buffer ();
+
+ /* Obstack where the text is built up. */
+ struct obstack formatted_obstack;
+
+ /* Obstack containing a chunked representation of the format
+ specification plus arguments. */
+ struct obstack chunk_obstack;
+
+ /* Currently active obstack: one of the above two. This is used so
+ that the text formatters don't need to know which phase we're in. */
+ struct obstack *obstack;
+
+ /* Stack of chunk arrays. These come from the chunk_obstack. */
+ struct chunk_info *cur_chunk_array;
+
+ /* Where to output formatted text. */
+ FILE *stream;
+
+ /* The amount of characters output so far. */
+ int line_length;
+
+ /* This must be large enough to hold any printed integer or
+ floating-point value. */
+ char digit_buffer[128];
+
+ /* Nonzero means that text should be flushed when
+ appropriate. Otherwise, text is buffered until either
+ pp_really_flush or pp_clear_output_area are called. */
+ bool flush_p;
+};
+
+/* Finishes constructing a NULL-terminated character string representing
+ the buffered text. */
+inline const char *
+output_buffer_formatted_text (output_buffer *buff)
+{
+ obstack_1grow (buff->obstack, '\0');
+ return (const char *) obstack_base (buff->obstack);
+}
+
+/* Append to the output buffer a string specified by its
+ STARTing character and LENGTH. */
+inline void
+output_buffer_append_r (output_buffer *buff, const char *start, int length)
+{
+ gcc_checking_assert (start);
+ obstack_grow (buff->obstack, start, length);
+ for (int i = 0; i < length; i++)
+ if (start[i] == '\n')
+ buff->line_length = 0;
+ else
+ buff->line_length++;
+}
+
+/* Return a pointer to the last character emitted in the
+ output_buffer. A NULL pointer means no character available. */
+inline const char *
+output_buffer_last_position_in_text (const output_buffer *buff)
+{
+ const char *p = NULL;
+ struct obstack *text = buff->obstack;
+
+ if (obstack_base (text) != obstack_next_free (text))
+ p = ((const char *) obstack_next_free (text)) - 1;
+ return p;
+}
+
+
+/* The type of pretty-printer flags passed to clients. */
+typedef unsigned int pp_flags;
+
+enum pp_padding
+{
+ pp_none, pp_before, pp_after
+};
+
+/* Structure for switching in and out of verbatim mode in a convenient
+ manner. */
+struct pp_wrapping_mode_t
+{
+ /* Current prefixing rule. */
+ diagnostic_prefixing_rule_t rule;
+
+ /* The ideal upper bound of number of characters per line, as suggested
+ by front-end. */
+ int line_cutoff;
+};
+
+/* Maximum characters per line in automatic line wrapping mode.
+ Zero means don't wrap lines. */
+#define pp_line_cutoff(PP) (PP)->wrapping.line_cutoff
+
+/* Prefixing rule used in formatting a diagnostic message. */
+#define pp_prefixing_rule(PP) (PP)->wrapping.rule
+
+/* Get or set the wrapping mode as a single entity. */
+#define pp_wrapping_mode(PP) (PP)->wrapping
+
+/* The type of a hook that formats client-specific data onto a pretty_printer.
+ A client-supplied formatter returns true if everything goes well,
+ otherwise it returns false. */
+typedef bool (*printer_fn) (pretty_printer *, text_info *, const char *,
+ int, bool, bool, bool, bool *, const char **);
+
+/* Client supplied function used to decode formats. */
+#define pp_format_decoder(PP) (PP)->format_decoder
+
+/* Base class for an optional client-supplied object for doing additional
+ processing between stages 2 and 3 of formatted printing. */
+class format_postprocessor
+{
+ public:
+ virtual ~format_postprocessor () {}
+ virtual format_postprocessor *clone() const = 0;
+ virtual void handle (pretty_printer *) = 0;
+};
+
+/* TRUE if a newline character needs to be added before further
+ formatting. */
+#define pp_needs_newline(PP) (PP)->need_newline
+
+/* True if PRETTY-PRINTER is in line-wrapping mode. */
+#define pp_is_wrapping_line(PP) (pp_line_cutoff (PP) > 0)
+
+/* The amount of whitespace to be emitted when starting a new line. */
+#define pp_indentation(PP) (PP)->indent_skip
+
+/* True if identifiers are translated to the locale character set on
+ output. */
+#define pp_translate_identifiers(PP) (PP)->translate_identifiers
+
+/* True if colors should be shown. */
+#define pp_show_color(PP) (PP)->show_color
+
+/* The data structure that contains the bare minimum required to do
+ proper pretty-printing. Clients may derived from this structure
+ and add additional fields they need. */
+class pretty_printer
+{
+public:
+ /* Default construct a pretty printer with specified
+ maximum line length cut off limit. */
+ explicit pretty_printer (int = 0);
+ explicit pretty_printer (const pretty_printer &other);
+
+ virtual ~pretty_printer ();
+
+ virtual pretty_printer *clone () const;
+
+ /* Where we print external representation of ENTITY. */
+ output_buffer *buffer;
+
+ /* The prefix for each new line. If non-NULL, this is "owned" by the
+ pretty_printer, and will eventually be free-ed. */
+ char *prefix;
+
+ /* Where to put whitespace around the entity being formatted. */
+ pp_padding padding;
+
+ /* The real upper bound of number of characters per line, taking into
+ account the case of a very very looong prefix. */
+ int maximum_length;
+
+ /* Indentation count. */
+ int indent_skip;
+
+ /* Current wrapping mode. */
+ pp_wrapping_mode_t wrapping;
+
+ /* If non-NULL, this function formats a TEXT into the BUFFER. When called,
+ TEXT->format_spec points to a format code. FORMAT_DECODER should call
+ pp_string (and related functions) to add data to the BUFFER.
+ FORMAT_DECODER can read arguments from *TEXT->args_pts using VA_ARG.
+ If the BUFFER needs additional characters from the format string, it
+ should advance the TEXT->format_spec as it goes. When FORMAT_DECODER
+ returns, TEXT->format_spec should point to the last character processed.
+ The QUOTE and BUFFER_PTR are passed in, to allow for deferring-handling
+ of format codes (e.g. %H and %I in the C++ frontend). */
+ printer_fn format_decoder;
+
+ /* If non-NULL, this is called by pp_format once after all format codes
+ have been processed, to allow for client-specific postprocessing.
+ This is used by the C++ frontend for handling the %H and %I
+ format codes (which interract with each other). */
+ format_postprocessor *m_format_postprocessor;
+
+ /* Nonzero if current PREFIX was emitted at least once. */
+ bool emitted_prefix;
+
+ /* Nonzero means one should emit a newline before outputting anything. */
+ bool need_newline;
+
+ /* Nonzero means identifiers are translated to the locale character
+ set on output. */
+ bool translate_identifiers;
+
+ /* Nonzero means that text should be colorized. */
+ bool show_color;
+
+ /* Whether URLs should be emitted, and which terminator to use. */
+ diagnostic_url_format url_format;
+};
+
+inline const char *
+pp_get_prefix (const pretty_printer *pp) { return pp->prefix; }
+
+#define pp_space(PP) pp_character (PP, ' ')
+#define pp_left_paren(PP) pp_character (PP, '(')
+#define pp_right_paren(PP) pp_character (PP, ')')
+#define pp_left_bracket(PP) pp_character (PP, '[')
+#define pp_right_bracket(PP) pp_character (PP, ']')
+#define pp_left_brace(PP) pp_character (PP, '{')
+#define pp_right_brace(PP) pp_character (PP, '}')
+#define pp_semicolon(PP) pp_character (PP, ';')
+#define pp_comma(PP) pp_character (PP, ',')
+#define pp_dot(PP) pp_character (PP, '.')
+#define pp_colon(PP) pp_character (PP, ':')
+#define pp_colon_colon(PP) pp_string (PP, "::")
+#define pp_arrow(PP) pp_string (PP, "->")
+#define pp_equal(PP) pp_character (PP, '=')
+#define pp_question(PP) pp_character (PP, '?')
+#define pp_bar(PP) pp_character (PP, '|')
+#define pp_bar_bar(PP) pp_string (PP, "||")
+#define pp_carret(PP) pp_character (PP, '^')
+#define pp_ampersand(PP) pp_character (PP, '&')
+#define pp_ampersand_ampersand(PP) pp_string (PP, "&&")
+#define pp_less(PP) pp_character (PP, '<')
+#define pp_less_equal(PP) pp_string (PP, "<=")
+#define pp_greater(PP) pp_character (PP, '>')
+#define pp_greater_equal(PP) pp_string (PP, ">=")
+#define pp_plus(PP) pp_character (PP, '+')
+#define pp_minus(PP) pp_character (PP, '-')
+#define pp_star(PP) pp_character (PP, '*')
+#define pp_slash(PP) pp_character (PP, '/')
+#define pp_modulo(PP) pp_character (PP, '%')
+#define pp_exclamation(PP) pp_character (PP, '!')
+#define pp_complement(PP) pp_character (PP, '~')
+#define pp_quote(PP) pp_character (PP, '\'')
+#define pp_backquote(PP) pp_character (PP, '`')
+#define pp_doublequote(PP) pp_character (PP, '"')
+#define pp_underscore(PP) pp_character (PP, '_')
+#define pp_maybe_newline_and_indent(PP, N) \
+ if (pp_needs_newline (PP)) pp_newline_and_indent (PP, N)
+#define pp_scalar(PP, FORMAT, SCALAR) \
+ do \
+ { \
+ sprintf (pp_buffer (PP)->digit_buffer, FORMAT, SCALAR); \
+ pp_string (PP, pp_buffer (PP)->digit_buffer); \
+ } \
+ while (0)
+#define pp_decimal_int(PP, I) pp_scalar (PP, "%d", I)
+#define pp_unsigned_wide_integer(PP, I) \
+ pp_scalar (PP, HOST_WIDE_INT_PRINT_UNSIGNED, (unsigned HOST_WIDE_INT) I)
+#define pp_wide_int(PP, W, SGN) \
+ do \
+ { \
+ print_dec (W, pp_buffer (PP)->digit_buffer, SGN); \
+ pp_string (PP, pp_buffer (PP)->digit_buffer); \
+ } \
+ while (0)
+#define pp_vrange(PP, R) \
+ do \
+ { \
+ vrange_printer vrange_pp (PP); \
+ (R)->accept (vrange_pp); \
+ } \
+ while (0)
+#define pp_double(PP, F) pp_scalar (PP, "%f", F)
+#define pp_pointer(PP, P) pp_scalar (PP, "%p", P)
+
+#define pp_identifier(PP, ID) pp_string (PP, (pp_translate_identifiers (PP) \
+ ? identifier_to_locale (ID) \
+ : (ID)))
+
+
+#define pp_buffer(PP) (PP)->buffer
+
+extern void pp_set_line_maximum_length (pretty_printer *, int);
+extern void pp_set_prefix (pretty_printer *, char *);
+extern char *pp_take_prefix (pretty_printer *);
+extern void pp_destroy_prefix (pretty_printer *);
+extern int pp_remaining_character_count_for_line (pretty_printer *);
+extern void pp_clear_output_area (pretty_printer *);
+extern const char *pp_formatted_text (pretty_printer *);
+extern const char *pp_last_position_in_text (const pretty_printer *);
+extern void pp_emit_prefix (pretty_printer *);
+extern void pp_append_text (pretty_printer *, const char *, const char *);
+extern void pp_newline_and_flush (pretty_printer *);
+extern void pp_newline_and_indent (pretty_printer *, int);
+extern void pp_separate_with (pretty_printer *, char);
+
+/* If we haven't already defined a front-end-specific diagnostics
+ style, use the generic one. */
+#ifdef GCC_DIAG_STYLE
+#define GCC_PPDIAG_STYLE GCC_DIAG_STYLE
+#else
+#define GCC_PPDIAG_STYLE __gcc_diag__
+#endif
+
+/* This header may be included before diagnostics-core.h, hence the duplicate
+ definitions to allow for GCC-specific formats. */
+#if GCC_VERSION >= 3005
+#define ATTRIBUTE_GCC_PPDIAG(m, n) __attribute__ ((__format__ (GCC_PPDIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m)
+#else
+#define ATTRIBUTE_GCC_PPDIAG(m, n) ATTRIBUTE_NONNULL(m)
+#endif
+extern void pp_printf (pretty_printer *, const char *, ...)
+ ATTRIBUTE_GCC_PPDIAG(2,3);
+
+extern void pp_verbatim (pretty_printer *, const char *, ...)
+ ATTRIBUTE_GCC_PPDIAG(2,3);
+extern void pp_flush (pretty_printer *);
+extern void pp_really_flush (pretty_printer *);
+extern void pp_format (pretty_printer *, text_info *);
+extern void pp_output_formatted_text (pretty_printer *);
+extern void pp_format_verbatim (pretty_printer *, text_info *);
+
+extern void pp_indent (pretty_printer *);
+extern void pp_newline (pretty_printer *);
+extern void pp_character (pretty_printer *, int);
+extern void pp_string (pretty_printer *, const char *);
+
+extern void pp_write_text_to_stream (pretty_printer *);
+extern void pp_write_text_as_dot_label_to_stream (pretty_printer *, bool);
+extern void pp_write_text_as_html_like_dot_to_stream (pretty_printer *pp);
+
+extern void pp_maybe_space (pretty_printer *);
+
+extern void pp_begin_quote (pretty_printer *, bool);
+extern void pp_end_quote (pretty_printer *, bool);
+
+extern void pp_begin_url (pretty_printer *pp, const char *url);
+extern void pp_end_url (pretty_printer *pp);
+
+/* Switch into verbatim mode and return the old mode. */
+inline pp_wrapping_mode_t
+pp_set_verbatim_wrapping_ (pretty_printer *pp)
+{
+ pp_wrapping_mode_t oldmode = pp_wrapping_mode (pp);
+ pp_line_cutoff (pp) = 0;
+ pp_prefixing_rule (pp) = DIAGNOSTICS_SHOW_PREFIX_NEVER;
+ return oldmode;
+}
+#define pp_set_verbatim_wrapping(PP) pp_set_verbatim_wrapping_ (PP)
+
+extern const char *identifier_to_locale (const char *);
+extern void *(*identifier_to_locale_alloc) (size_t);
+extern void (*identifier_to_locale_free) (void *);
+
+/* Print I to PP in decimal. */
+
+inline void
+pp_wide_integer (pretty_printer *pp, HOST_WIDE_INT i)
+{
+ pp_scalar (pp, HOST_WIDE_INT_PRINT_DEC, i);
+}
+
+template<unsigned int N, typename T>
+void pp_wide_integer (pretty_printer *pp, const poly_int_pod<N, T> &);
+
+#endif /* GCC_PRETTY_PRINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-rtl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-rtl.h
new file mode 100644
index 0000000..ddaec10
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-rtl.h
@@ -0,0 +1,165 @@
+/* Print RTL for GCC.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PRINT_RTL_H
+#define GCC_PRINT_RTL_H
+
+#ifndef GENERATOR_FILE
+#include "bitmap.h"
+#endif /* #ifndef GENERATOR_FILE */
+
+class rtx_reuse_manager;
+
+/* A class for writing rtx to a FILE *. */
+
+class rtx_writer
+{
+ public:
+ rtx_writer (FILE *outfile, int ind, bool simple, bool compact,
+ rtx_reuse_manager *reuse_manager);
+
+ void print_rtx (const_rtx in_rtx);
+ void print_rtl (const_rtx rtx_first);
+ int print_rtl_single_with_indent (const_rtx x, int ind);
+
+ void finish_directive ();
+
+ private:
+ void print_rtx_operand_code_0 (const_rtx in_rtx, int idx);
+ void print_rtx_operand_code_e (const_rtx in_rtx, int idx);
+ void print_rtx_operand_codes_E_and_V (const_rtx in_rtx, int idx);
+ void print_rtx_operand_code_i (const_rtx in_rtx, int idx);
+ void print_rtx_operand_code_r (const_rtx in_rtx);
+ void print_rtx_operand_code_u (const_rtx in_rtx, int idx);
+ void print_rtx_operand (const_rtx in_rtx, int idx);
+ bool operand_has_default_value_p (const_rtx in_rtx, int idx);
+
+ private:
+ FILE *m_outfile;
+ int m_sawclose;
+ int m_indent;
+ bool m_in_call_function_usage;
+
+ /* True means use simplified format without flags, modes, etc. */
+ bool m_simple;
+
+ /* If true, use compact dump format:
+ - PREV/NEXT_INSN UIDs are omitted
+ - INSN_CODEs are omitted,
+ - register numbers are omitted for hard and virtual regs, and
+ non-virtual pseudos are offset relative to the first such reg, and
+ printed with a '%' sigil e.g. "%0" for (LAST_VIRTUAL_REGISTER + 1),
+ - insn names are prefixed with "c" (e.g. "cinsn", "cnote", etc). */
+ bool m_compact;
+
+#ifndef GENERATOR_FILE
+ /* An optional instance of rtx_reuse_manager. */
+ rtx_reuse_manager *m_rtx_reuse_manager;
+#endif
+};
+
+#ifdef BUFSIZ
+extern void print_rtl (FILE *, const_rtx);
+#endif
+extern void print_rtx_insn_vec (FILE *file, const vec<rtx_insn *> &vec);
+
+extern void dump_value_slim (FILE *, const_rtx, int);
+extern void dump_insn_slim (FILE *, const rtx_insn *);
+extern void dump_rtl_slim (FILE *, const rtx_insn *, const rtx_insn *,
+ int, int);
+extern void print_value (pretty_printer *, const_rtx, int);
+extern void print_pattern (pretty_printer *, const_rtx, int);
+extern void print_insn (pretty_printer *pp, const rtx_insn *x, int verbose);
+extern void print_insn_with_notes (pretty_printer *, const rtx_insn *);
+
+extern void rtl_dump_bb_for_graph (pretty_printer *, basic_block);
+extern const char *str_pattern_slim (const_rtx);
+
+extern void print_rtx_function (FILE *file, function *fn, bool compact);
+
+#ifndef GENERATOR_FILE
+
+/* For some rtx codes (such as SCRATCH), instances are defined to only be
+ equal for pointer equality: two distinct SCRATCH instances are non-equal.
+ copy_rtx preserves this equality by reusing the SCRATCH instance.
+
+ For example, in this x86 instruction:
+
+ (cinsn (set (mem/v:BLK (scratch:DI) [0 A8])
+ (unspec:BLK [
+ (mem/v:BLK (scratch:DI) [0 A8])
+ ] UNSPEC_MEMORY_BLOCKAGE)) "test.c":2
+ (nil))
+
+ the two instances of "(scratch:DI)" are actually the same underlying
+ rtx pointer (and thus "equal"), and the insn will only be recognized
+ (as "*memory_blockage") if this pointer-equality is preserved.
+
+ To be able to preserve this pointer-equality when round-tripping
+ through dumping/loading the rtl, we need some syntax. The first
+ time a reused rtx is encountered in the dump, we prefix it with
+ a reuse ID:
+
+ (0|scratch:DI)
+
+ Subsequent references to the rtx in the dump can be expressed using
+ "reuse_rtx" e.g.:
+
+ (reuse_rtx 0)
+
+ This class is responsible for tracking a set of reuse IDs during a dump.
+
+ Dumping with reuse-support is done in two passes:
+
+ (a) a first pass in which "preprocess" is called on each top-level rtx
+ to be seen in the dump. This traverses the rtx and its descendents,
+ identifying rtx that will be seen more than once in the actual dump,
+ and assigning them reuse IDs.
+
+ (b) the actual dump, via print_rtx etc. print_rtx detect the presence
+ of a live rtx_reuse_manager and uses it if there is one. Any rtx
+ that were assigned reuse IDs will be printed with it the first time
+ that they are seen, and then printed as "(reuse_rtx ID)" subsequently.
+
+ The first phase is needed since otherwise there would be no way to tell
+ if an rtx will be reused when first encountering it. */
+
+class rtx_reuse_manager
+{
+ public:
+ rtx_reuse_manager ();
+
+ /* The first pass. */
+ void preprocess (const_rtx x);
+
+ /* The second pass (within print_rtx). */
+ bool has_reuse_id (const_rtx x, int *out);
+ bool seen_def_p (int reuse_id);
+ void set_seen_def (int reuse_id);
+
+ private:
+ hash_map<const_rtx, int> m_rtx_occurrence_count;
+ hash_map<const_rtx, int> m_rtx_reuse_ids;
+ auto_bitmap m_defs_seen;
+ int m_next_id;
+};
+
+#endif /* #ifndef GENERATOR_FILE */
+
+#endif // GCC_PRINT_RTL_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-tree.h
new file mode 100644
index 0000000..7683730
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/print-tree.h
@@ -0,0 +1,50 @@
+/* Declarations for printing trees in human readable form
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PRINT_TREE_H
+#define GCC_PRINT_TREE_H
+
+extern void debug_tree (tree);
+extern void debug_raw (const tree_node &ref);
+extern void debug_raw (const tree_node *ptr);
+extern void debug (const tree_node &ref);
+extern void debug (const tree_node *ptr);
+extern void debug_verbose (const tree_node &ref);
+extern void debug_verbose (const tree_node *ptr);
+extern void debug_head (const tree_node &ref);
+extern void debug_head (const tree_node *ptr);
+extern void debug_body (const tree_node &ref);
+extern void debug_body (const tree_node *ptr);
+extern void debug (vec<tree, va_gc> &ref);
+extern void debug (vec<tree, va_gc> *ptr);
+extern void debug_raw (vec<tree, va_gc> &ref);
+extern void debug_raw (vec<tree, va_gc> *ptr);
+#ifdef BUFSIZ
+extern void dump_addr (FILE*, const char *, const void *);
+extern void print_node (FILE *, const char *, tree, int,
+ bool brief_for_visited = true);
+extern void print_node_brief (FILE *, const char *, const_tree, int);
+extern void indent_to (FILE *, int);
+#endif
+#define PRINT_DECL_ORIGIN 0x1
+#define PRINT_DECL_NAME 0x2
+#define PRINT_DECL_UNIQUE_NAME 0x4
+extern void print_decl_identifier (FILE *, tree, int flags);
+
+#endif // GCC_PRINT_TREE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile-count.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile-count.h
new file mode 100644
index 0000000..0739e26
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile-count.h
@@ -0,0 +1,1294 @@
+/* Profile counter container type.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_PROFILE_COUNT_H
+#define GCC_PROFILE_COUNT_H
+
+struct function;
+struct profile_count;
+class sreal;
+
+/* Quality of the profile count. Because gengtype does not support enums
+ inside of classes, this is in global namespace. */
+enum profile_quality {
+ /* Uninitialized value. */
+ UNINITIALIZED_PROFILE,
+
+ /* Profile is based on static branch prediction heuristics and may
+ or may not match reality. It is local to function and cannot be compared
+ inter-procedurally. Never used by probabilities (they are always local).
+ */
+ GUESSED_LOCAL,
+
+ /* Profile was read by feedback and was 0, we used local heuristics to guess
+ better. This is the case of functions not run in profile feedback.
+ Never used by probabilities. */
+ GUESSED_GLOBAL0,
+
+ /* Same as GUESSED_GLOBAL0 but global count is adjusted 0. */
+ GUESSED_GLOBAL0_ADJUSTED,
+
+ /* Profile is based on static branch prediction heuristics. It may or may
+ not reflect the reality but it can be compared interprocedurally
+ (for example, we inlined function w/o profile feedback into function
+ with feedback and propagated from that).
+ Never used by probabilities. */
+ GUESSED,
+
+ /* Profile was determined by autofdo. */
+ AFDO,
+
+ /* Profile was originally based on feedback but it was adjusted
+ by code duplicating optimization. It may not precisely reflect the
+ particular code path. */
+ ADJUSTED,
+
+ /* Profile was read from profile feedback or determined by accurate static
+ method. */
+ PRECISE
+};
+
+extern const char *profile_quality_as_string (enum profile_quality);
+extern bool parse_profile_quality (const char *value,
+ profile_quality *quality);
+
+/* The base value for branch probability notes and edge probabilities. */
+#define REG_BR_PROB_BASE 10000
+
+#define RDIV(X,Y) (((X) + (Y) / 2) / (Y))
+
+bool slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res);
+
+/* Compute RES=(a*b + c/2)/c capping and return false if overflow happened. */
+
+inline bool
+safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
+{
+#if (GCC_VERSION >= 5000)
+ uint64_t tmp;
+ if (!__builtin_mul_overflow (a, b, &tmp)
+ && !__builtin_add_overflow (tmp, c/2, &tmp))
+ {
+ *res = tmp / c;
+ return true;
+ }
+ if (c == 1)
+ {
+ *res = (uint64_t) -1;
+ return false;
+ }
+#else
+ if (a < ((uint64_t)1 << 31)
+ && b < ((uint64_t)1 << 31)
+ && c < ((uint64_t)1 << 31))
+ {
+ *res = (a * b + (c / 2)) / c;
+ return true;
+ }
+#endif
+ return slow_safe_scale_64bit (a, b, c, res);
+}
+
+/* Data type to hold probabilities. It implements fixed point arithmetics
+ with capping so probability is always in range [0,1] and scaling requiring
+ values greater than 1 needs to be represented otherwise.
+
+ In addition to actual value the quality of profile is tracked and propagated
+ through all operations. Special value UNINITIALIZED_PROFILE is used for probabilities
+ that has not been determined yet (for example because of
+ -fno-guess-branch-probability)
+
+ Typically probabilities are derived from profile feedback (via
+ probability_in_gcov_type), autoFDO or guessed statically and then propagated
+ thorough the compilation.
+
+ Named probabilities are available:
+ - never (0 probability)
+ - guessed_never
+ - very_unlikely (1/2000 probability)
+ - unlikely (1/5 probability)
+ - even (1/2 probability)
+ - likely (4/5 probability)
+ - very_likely (1999/2000 probability)
+ - guessed_always
+ - always
+
+ Named probabilities except for never/always are assumed to be statically
+ guessed and thus not necessarily accurate. The difference between never
+ and guessed_never is that the first one should be used only in case that
+ well behaving program will very likely not execute the "never" path.
+ For example if the path is going to abort () call or it exception handling.
+
+ Always and guessed_always probabilities are symmetric.
+
+ For legacy code we support conversion to/from REG_BR_PROB_BASE based fixpoint
+ integer arithmetics. Once the code is converted to branch probabilities,
+ these conversions will probably go away because they are lossy.
+*/
+
+class GTY((user)) profile_probability
+{
+ static const int n_bits = 29;
+ /* We can technically use ((uint32_t) 1 << (n_bits - 1)) - 2 but that
+ will lead to harder multiplication sequences. */
+ static const uint32_t max_probability = (uint32_t) 1 << (n_bits - 2);
+ static const uint32_t uninitialized_probability
+ = ((uint32_t) 1 << (n_bits - 1)) - 1;
+
+ uint32_t m_val : 29;
+ enum profile_quality m_quality : 3;
+
+ friend struct profile_count;
+public:
+ profile_probability (): m_val (uninitialized_probability),
+ m_quality (GUESSED)
+ {}
+
+ profile_probability (uint32_t val, profile_quality quality):
+ m_val (val), m_quality (quality)
+ {}
+
+ /* Named probabilities. */
+ static profile_probability never ()
+ {
+ profile_probability ret;
+ ret.m_val = 0;
+ ret.m_quality = PRECISE;
+ return ret;
+ }
+
+ static profile_probability guessed_never ()
+ {
+ profile_probability ret;
+ ret.m_val = 0;
+ ret.m_quality = GUESSED;
+ return ret;
+ }
+
+ static profile_probability very_unlikely ()
+ {
+ /* Be consistent with PROB_VERY_UNLIKELY in predict.h. */
+ profile_probability r = guessed_always () / 2000;
+ r.m_val--;
+ return r;
+ }
+
+ static profile_probability unlikely ()
+ {
+ /* Be consistent with PROB_VERY_LIKELY in predict.h. */
+ profile_probability r = guessed_always () / 5;
+ r.m_val--;
+ return r;
+ }
+
+ static profile_probability even ()
+ {
+ return guessed_always () / 2;
+ }
+
+ static profile_probability very_likely ()
+ {
+ return always () - very_unlikely ();
+ }
+
+ static profile_probability likely ()
+ {
+ return always () - unlikely ();
+ }
+
+ static profile_probability guessed_always ()
+ {
+ profile_probability ret;
+ ret.m_val = max_probability;
+ ret.m_quality = GUESSED;
+ return ret;
+ }
+
+ static profile_probability always ()
+ {
+ profile_probability ret;
+ ret.m_val = max_probability;
+ ret.m_quality = PRECISE;
+ return ret;
+ }
+
+ /* Probabilities which has not been initialized. Either because
+ initialization did not happen yet or because profile is unknown. */
+ static profile_probability uninitialized ()
+ {
+ profile_probability c;
+ c.m_val = uninitialized_probability;
+ c.m_quality = GUESSED;
+ return c;
+ }
+
+ /* Return true if value has been initialized. */
+ bool initialized_p () const
+ {
+ return m_val != uninitialized_probability;
+ }
+
+ /* Return true if value can be trusted. */
+ bool reliable_p () const
+ {
+ return m_quality >= ADJUSTED;
+ }
+
+ /* Conversion from and to REG_BR_PROB_BASE integer fixpoint arithmetics.
+ this is mostly to support legacy code and should go away. */
+ static profile_probability from_reg_br_prob_base (int v)
+ {
+ profile_probability ret;
+ gcc_checking_assert (v >= 0 && v <= REG_BR_PROB_BASE);
+ ret.m_val = RDIV (v * (uint64_t) max_probability, REG_BR_PROB_BASE);
+ ret.m_quality = GUESSED;
+ return ret;
+ }
+
+ /* Return THIS with quality set to ADJUSTED. */
+ profile_probability adjusted () const
+ {
+ profile_probability ret = *this;
+ if (!initialized_p ())
+ return *this;
+ ret.m_quality = ADJUSTED;
+ return ret;
+ }
+
+ int to_reg_br_prob_base () const
+ {
+ gcc_checking_assert (initialized_p ());
+ return RDIV (m_val * (uint64_t) REG_BR_PROB_BASE, max_probability);
+ }
+
+ /* Conversion to and from RTL representation of profile probabilities. */
+ static profile_probability from_reg_br_prob_note (int v)
+ {
+ profile_probability ret;
+ ret.m_val = ((unsigned int)v) / 8;
+ ret.m_quality = (enum profile_quality)(v & 7);
+ return ret;
+ }
+
+ int to_reg_br_prob_note () const
+ {
+ gcc_checking_assert (initialized_p ());
+ int ret = m_val * 8 + m_quality;
+ gcc_checking_assert (from_reg_br_prob_note (ret) == *this);
+ return ret;
+ }
+
+ /* Return VAL1/VAL2. */
+ static profile_probability probability_in_gcov_type
+ (gcov_type val1, gcov_type val2)
+ {
+ profile_probability ret;
+ gcc_checking_assert (val1 >= 0 && val2 > 0);
+ if (val1 > val2)
+ ret.m_val = max_probability;
+ else
+ {
+ uint64_t tmp;
+ safe_scale_64bit (val1, max_probability, val2, &tmp);
+ gcc_checking_assert (tmp <= max_probability);
+ ret.m_val = tmp;
+ }
+ ret.m_quality = PRECISE;
+ return ret;
+ }
+
+ /* Basic operations. */
+ bool operator== (const profile_probability &other) const
+ {
+ return m_val == other.m_val && m_quality == other.m_quality;
+ }
+
+ profile_probability operator+ (const profile_probability &other) const
+ {
+ if (other == never ())
+ return *this;
+ if (*this == never ())
+ return other;
+ if (!initialized_p () || !other.initialized_p ())
+ return uninitialized ();
+
+ profile_probability ret;
+ ret.m_val = MIN ((uint32_t)(m_val + other.m_val), max_probability);
+ ret.m_quality = MIN (m_quality, other.m_quality);
+ return ret;
+ }
+
+ profile_probability &operator+= (const profile_probability &other)
+ {
+ if (other == never ())
+ return *this;
+ if (*this == never ())
+ {
+ *this = other;
+ return *this;
+ }
+ if (!initialized_p () || !other.initialized_p ())
+ return *this = uninitialized ();
+ else
+ {
+ m_val = MIN ((uint32_t)(m_val + other.m_val), max_probability);
+ m_quality = MIN (m_quality, other.m_quality);
+ }
+ return *this;
+ }
+
+ profile_probability operator- (const profile_probability &other) const
+ {
+ if (*this == never ()
+ || other == never ())
+ return *this;
+ if (!initialized_p () || !other.initialized_p ())
+ return uninitialized ();
+ profile_probability ret;
+ ret.m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
+ ret.m_quality = MIN (m_quality, other.m_quality);
+ return ret;
+ }
+
+ profile_probability &operator-= (const profile_probability &other)
+ {
+ if (*this == never ()
+ || other == never ())
+ return *this;
+ if (!initialized_p () || !other.initialized_p ())
+ return *this = uninitialized ();
+ else
+ {
+ m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
+ m_quality = MIN (m_quality, other.m_quality);
+ }
+ return *this;
+ }
+
+ profile_probability operator* (const profile_probability &other) const
+ {
+ if (*this == never ()
+ || other == never ())
+ return never ();
+ if (!initialized_p () || !other.initialized_p ())
+ return uninitialized ();
+ profile_probability ret;
+ ret.m_val = RDIV ((uint64_t)m_val * other.m_val, max_probability);
+ ret.m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
+ return ret;
+ }
+
+ profile_probability &operator*= (const profile_probability &other)
+ {
+ if (*this == never ()
+ || other == never ())
+ return *this = never ();
+ if (!initialized_p () || !other.initialized_p ())
+ return *this = uninitialized ();
+ else
+ {
+ m_val = RDIV ((uint64_t)m_val * other.m_val, max_probability);
+ m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
+ }
+ return *this;
+ }
+
+ profile_probability operator/ (const profile_probability &other) const
+ {
+ if (*this == never ())
+ return never ();
+ if (!initialized_p () || !other.initialized_p ())
+ return uninitialized ();
+ profile_probability ret;
+ /* If we get probability above 1, mark it as unreliable and return 1. */
+ if (m_val >= other.m_val)
+ {
+ ret.m_val = max_probability;
+ ret.m_quality = MIN (MIN (m_quality, other.m_quality),
+ GUESSED);
+ return ret;
+ }
+ else if (!m_val)
+ ret.m_val = 0;
+ else
+ {
+ gcc_checking_assert (other.m_val);
+ ret.m_val = MIN (RDIV ((uint64_t)m_val * max_probability,
+ other.m_val),
+ max_probability);
+ }
+ ret.m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
+ return ret;
+ }
+
+ profile_probability &operator/= (const profile_probability &other)
+ {
+ if (*this == never ())
+ return *this = never ();
+ if (!initialized_p () || !other.initialized_p ())
+ return *this = uninitialized ();
+ else
+ {
+ /* If we get probability above 1, mark it as unreliable
+ and return 1. */
+ if (m_val > other.m_val)
+ {
+ m_val = max_probability;
+ m_quality = MIN (MIN (m_quality, other.m_quality),
+ GUESSED);
+ return *this;
+ }
+ else if (!m_val)
+ ;
+ else
+ {
+ gcc_checking_assert (other.m_val);
+ m_val = MIN (RDIV ((uint64_t)m_val * max_probability,
+ other.m_val),
+ max_probability);
+ }
+ m_quality = MIN (MIN (m_quality, other.m_quality), ADJUSTED);
+ }
+ return *this;
+ }
+
+ /* Split *THIS (ORIG) probability into 2 probabilities, such that
+ the returned one (FIRST) is *THIS * CPROB and *THIS is
+ adjusted (SECOND) so that FIRST + FIRST.invert () * SECOND
+ == ORIG. This is useful e.g. when splitting a conditional
+ branch like:
+ if (cond)
+ goto lab; // ORIG probability
+ into
+ if (cond1)
+ goto lab; // FIRST = ORIG * CPROB probability
+ if (cond2)
+ goto lab; // SECOND probability
+ such that the overall probability of jumping to lab remains
+ the same. CPROB gives the relative probability between the
+ branches. */
+ profile_probability split (const profile_probability &cprob)
+ {
+ profile_probability ret = *this * cprob;
+ /* The following is equivalent to:
+ *this = cprob.invert () * *this / ret.invert ();
+ Avoid scaling when overall outcome is supposed to be always.
+ Without knowing that one is inverse of other, the result would be
+ conservative. */
+ if (!(*this == always ()))
+ *this = (*this - ret) / ret.invert ();
+ return ret;
+ }
+
+ gcov_type apply (gcov_type val) const
+ {
+ if (*this == uninitialized ())
+ return val / 2;
+ return RDIV (val * m_val, max_probability);
+ }
+
+ /* Return 1-*THIS. */
+ profile_probability invert () const
+ {
+ return always() - *this;
+ }
+
+ /* Return THIS with quality dropped to GUESSED. */
+ profile_probability guessed () const
+ {
+ profile_probability ret = *this;
+ ret.m_quality = GUESSED;
+ return ret;
+ }
+
+ /* Return THIS with quality dropped to AFDO. */
+ profile_probability afdo () const
+ {
+ profile_probability ret = *this;
+ ret.m_quality = AFDO;
+ return ret;
+ }
+
+ /* Return *THIS * NUM / DEN. */
+ profile_probability apply_scale (int64_t num, int64_t den) const
+ {
+ if (*this == never ())
+ return *this;
+ if (!initialized_p ())
+ return uninitialized ();
+ profile_probability ret;
+ uint64_t tmp;
+ safe_scale_64bit (m_val, num, den, &tmp);
+ ret.m_val = MIN (tmp, max_probability);
+ ret.m_quality = MIN (m_quality, ADJUSTED);
+ return ret;
+ }
+
+ /* Return true when the probability of edge is reliable.
+
+ The profile guessing code is good at predicting branch outcome (i.e.
+ taken/not taken), that is predicted right slightly over 75% of time.
+ It is however notoriously poor on predicting the probability itself.
+ In general the profile appear a lot flatter (with probabilities closer
+ to 50%) than the reality so it is bad idea to use it to drive optimization
+ such as those disabling dynamic branch prediction for well predictable
+ branches.
+
+ There are two exceptions - edges leading to noreturn edges and edges
+ predicted by number of iterations heuristics are predicted well. This macro
+ should be able to distinguish those, but at the moment it simply check for
+ noreturn heuristic that is only one giving probability over 99% or bellow
+ 1%. In future we might want to propagate reliability information across the
+ CFG if we find this information useful on multiple places. */
+ bool probably_reliable_p () const
+ {
+ if (m_quality >= ADJUSTED)
+ return true;
+ if (!initialized_p ())
+ return false;
+ return m_val < max_probability / 100
+ || m_val > max_probability - max_probability / 100;
+ }
+
+ /* Return false if profile_probability is bogus. */
+ bool verify () const
+ {
+ gcc_checking_assert (m_quality != UNINITIALIZED_PROFILE);
+ if (m_val == uninitialized_probability)
+ return m_quality == GUESSED;
+ else if (m_quality < GUESSED)
+ return false;
+ return m_val <= max_probability;
+ }
+
+ /* Comparisons are three-state and conservative. False is returned if
+ the inequality cannot be decided. */
+ bool operator< (const profile_probability &other) const
+ {
+ return initialized_p () && other.initialized_p () && m_val < other.m_val;
+ }
+
+ bool operator> (const profile_probability &other) const
+ {
+ return initialized_p () && other.initialized_p () && m_val > other.m_val;
+ }
+
+ bool operator<= (const profile_probability &other) const
+ {
+ return initialized_p () && other.initialized_p () && m_val <= other.m_val;
+ }
+
+ bool operator>= (const profile_probability &other) const
+ {
+ return initialized_p () && other.initialized_p () && m_val >= other.m_val;
+ }
+
+ profile_probability operator* (int64_t num) const
+ {
+ return apply_scale (num, 1);
+ }
+
+ profile_probability operator*= (int64_t num)
+ {
+ *this = apply_scale (num, 1);
+ return *this;
+ }
+
+ profile_probability operator/ (int64_t den) const
+ {
+ return apply_scale (1, den);
+ }
+
+ profile_probability operator/= (int64_t den)
+ {
+ *this = apply_scale (1, den);
+ return *this;
+ }
+
+ /* Get the value of the count. */
+ uint32_t value () const { return m_val; }
+
+ /* Get the quality of the count. */
+ enum profile_quality quality () const { return m_quality; }
+
+ /* Output THIS to F. */
+ void dump (FILE *f) const;
+
+ /* Output THIS to BUFFER. */
+ void dump (char *buffer) const;
+
+ /* Print THIS to stderr. */
+ void debug () const;
+
+ /* Return true if THIS is known to differ significantly from OTHER. */
+ bool differs_from_p (profile_probability other) const;
+
+ /* Return if difference is greater than 50%. */
+ bool differs_lot_from_p (profile_probability other) const;
+
+ /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
+ happens with COUNT2 probability. Return probability that either *THIS or
+ OTHER happens. */
+ profile_probability combine_with_count (profile_count count1,
+ profile_probability other,
+ profile_count count2) const;
+
+ /* Return probability as sreal. */
+ sreal to_sreal () const;
+ /* LTO streaming support. */
+ static profile_probability stream_in (class lto_input_block *);
+ void stream_out (struct output_block *);
+ void stream_out (struct lto_output_stream *);
+};
+
+/* Main data type to hold profile counters in GCC. Profile counts originate
+ either from profile feedback, static profile estimation or both. We do not
+ perform whole program profile propagation and thus profile estimation
+ counters are often local to function, while counters from profile feedback
+ (or special cases of profile estimation) can be used inter-procedurally.
+
+ There are 3 basic types
+ 1) local counters which are result of intra-procedural static profile
+ estimation.
+ 2) ipa counters which are result of profile feedback or special case
+ of static profile estimation (such as in function main).
+ 3) counters which counts as 0 inter-procedurally (because given function
+ was never run in train feedback) but they hold local static profile
+ estimate.
+
+ Counters of type 1 and 3 cannot be mixed with counters of different type
+ within operation (because whole function should use one type of counter)
+ with exception that global zero mix in most operations where outcome is
+ well defined.
+
+ To take local counter and use it inter-procedurally use ipa member function
+ which strips information irrelevant at the inter-procedural level.
+
+ Counters are 61bit integers representing number of executions during the
+ train run or normalized frequency within the function.
+
+ As the profile is maintained during the compilation, many adjustments are
+ made. Not all transformations can be made precisely, most importantly
+ when code is being duplicated. It also may happen that part of CFG has
+ profile counts known while other do not - for example when LTO optimizing
+ partly profiled program or when profile was lost due to COMDAT merging.
+
+ For this reason profile_count tracks more information than
+ just unsigned integer and it is also ready for profile mismatches.
+ The API of this data type represent operations that are natural
+ on profile counts - sum, difference and operation with scales and
+ probabilities. All operations are safe by never getting negative counts
+ and they do end up in uninitialized scale if any of the parameters is
+ uninitialized.
+
+ All comparisons that are three state and handling of probabilities. Thus
+ a < b is not equal to !(a >= b).
+
+ The following pre-defined counts are available:
+
+ profile_count::zero () for code that is known to execute zero times at
+ runtime (this can be detected statically i.e. for paths leading to
+ abort ();
+ profile_count::one () for code that is known to execute once (such as
+ main () function
+ profile_count::uninitialized () for unknown execution count.
+
+ */
+
+struct GTY(()) profile_count
+{
+public:
+ /* Use 62bit to hold basic block counters. Should be at least
+ 64bit. Although a counter cannot be negative, we use a signed
+ type to hold various extra stages. */
+
+ static const int n_bits = 61;
+ static const uint64_t max_count = ((uint64_t) 1 << n_bits) - 2;
+private:
+ static const uint64_t uninitialized_count = ((uint64_t) 1 << n_bits) - 1;
+
+#if defined (__arm__) && (__GNUC__ >= 6 && __GNUC__ <= 8)
+ /* Work-around for PR88469. A bug in the gcc-6/7/8 PCS layout code
+ incorrectly detects the alignment of a structure where the only
+ 64-bit aligned object is a bit-field. We force the alignment of
+ the entire field to mitigate this. */
+#define UINT64_BIT_FIELD_ALIGN __attribute__ ((aligned(8)))
+#else
+#define UINT64_BIT_FIELD_ALIGN
+#endif
+ uint64_t UINT64_BIT_FIELD_ALIGN m_val : n_bits;
+#undef UINT64_BIT_FIELD_ALIGN
+ enum profile_quality m_quality : 3;
+public:
+
+ /* Return true if both values can meaningfully appear in single function
+ body. We have either all counters in function local or global, otherwise
+ operations between them are not really defined well. */
+ bool compatible_p (const profile_count other) const
+ {
+ if (!initialized_p () || !other.initialized_p ())
+ return true;
+ if (*this == zero ()
+ || other == zero ())
+ return true;
+ /* Do not allow nonzero global profile together with local guesses
+ that are globally0. */
+ if (ipa ().nonzero_p ()
+ && !(other.ipa () == other))
+ return false;
+ if (other.ipa ().nonzero_p ()
+ && !(ipa () == *this))
+ return false;
+
+ return ipa_p () == other.ipa_p ();
+ }
+
+ /* Used for counters which are expected to be never executed. */
+ static profile_count zero ()
+ {
+ return from_gcov_type (0);
+ }
+
+ static profile_count adjusted_zero ()
+ {
+ profile_count c;
+ c.m_val = 0;
+ c.m_quality = ADJUSTED;
+ return c;
+ }
+
+ static profile_count guessed_zero ()
+ {
+ profile_count c;
+ c.m_val = 0;
+ c.m_quality = GUESSED;
+ return c;
+ }
+
+ static profile_count one ()
+ {
+ return from_gcov_type (1);
+ }
+
+ /* Value of counters which has not been initialized. Either because
+ initialization did not happen yet or because profile is unknown. */
+ static profile_count uninitialized ()
+ {
+ profile_count c;
+ c.m_val = uninitialized_count;
+ c.m_quality = GUESSED_LOCAL;
+ return c;
+ }
+
+ /* Conversion to gcov_type is lossy. */
+ gcov_type to_gcov_type () const
+ {
+ gcc_checking_assert (initialized_p ());
+ return m_val;
+ }
+
+ /* Return true if value has been initialized. */
+ bool initialized_p () const
+ {
+ return m_val != uninitialized_count;
+ }
+
+ /* Return true if value can be trusted. */
+ bool reliable_p () const
+ {
+ return m_quality >= ADJUSTED;
+ }
+
+ /* Return true if value can be operated inter-procedurally. */
+ bool ipa_p () const
+ {
+ return !initialized_p () || m_quality >= GUESSED_GLOBAL0;
+ }
+
+ /* Return true if quality of profile is precise. */
+ bool precise_p () const
+ {
+ return m_quality == PRECISE;
+ }
+
+ /* Get the value of the count. */
+ uint64_t value () const { return m_val; }
+
+ /* Get the quality of the count. */
+ enum profile_quality quality () const { return m_quality; }
+
+ /* When merging basic blocks, the two different profile counts are unified.
+ Return true if this can be done without losing info about profile.
+ The only case we care about here is when first BB contains something
+ that makes it terminate in a way not visible in CFG. */
+ bool ok_for_merging (profile_count other) const
+ {
+ if (m_quality < ADJUSTED
+ || other.m_quality < ADJUSTED)
+ return true;
+ return !(other < *this);
+ }
+
+ /* When merging two BBs with different counts, pick common count that looks
+ most representative. */
+ profile_count merge (profile_count other) const
+ {
+ if (*this == other || !other.initialized_p ()
+ || m_quality > other.m_quality)
+ return *this;
+ if (other.m_quality > m_quality
+ || other > *this)
+ return other;
+ return *this;
+ }
+
+ /* Basic operations. */
+ bool operator== (const profile_count &other) const
+ {
+ return m_val == other.m_val && m_quality == other.m_quality;
+ }
+
+ profile_count operator+ (const profile_count &other) const
+ {
+ if (other == zero ())
+ return *this;
+ if (*this == zero ())
+ return other;
+ if (!initialized_p () || !other.initialized_p ())
+ return uninitialized ();
+
+ profile_count ret;
+ gcc_checking_assert (compatible_p (other));
+ ret.m_val = m_val + other.m_val;
+ ret.m_quality = MIN (m_quality, other.m_quality);
+ return ret;
+ }
+
+ profile_count &operator+= (const profile_count &other)
+ {
+ if (other == zero ())
+ return *this;
+ if (*this == zero ())
+ {
+ *this = other;
+ return *this;
+ }
+ if (!initialized_p () || !other.initialized_p ())
+ return *this = uninitialized ();
+ else
+ {
+ gcc_checking_assert (compatible_p (other));
+ m_val += other.m_val;
+ m_quality = MIN (m_quality, other.m_quality);
+ }
+ return *this;
+ }
+
+ profile_count operator- (const profile_count &other) const
+ {
+ if (*this == zero () || other == zero ())
+ return *this;
+ if (!initialized_p () || !other.initialized_p ())
+ return uninitialized ();
+ gcc_checking_assert (compatible_p (other));
+ profile_count ret;
+ ret.m_val = m_val >= other.m_val ? m_val - other.m_val : 0;
+ ret.m_quality = MIN (m_quality, other.m_quality);
+ return ret;
+ }
+
+ profile_count &operator-= (const profile_count &other)
+ {
+ if (*this == zero () || other == zero ())
+ return *this;
+ if (!initialized_p () || !other.initialized_p ())
+ return *this = uninitialized ();
+ else
+ {
+ gcc_checking_assert (compatible_p (other));
+ m_val = m_val >= other.m_val ? m_val - other.m_val: 0;
+ m_quality = MIN (m_quality, other.m_quality);
+ }
+ return *this;
+ }
+
+ /* Return false if profile_count is bogus. */
+ bool verify () const
+ {
+ gcc_checking_assert (m_quality != UNINITIALIZED_PROFILE);
+ return m_val != uninitialized_count || m_quality == GUESSED_LOCAL;
+ }
+
+ /* Comparisons are three-state and conservative. False is returned if
+ the inequality cannot be decided. */
+ bool operator< (const profile_count &other) const
+ {
+ if (!initialized_p () || !other.initialized_p ())
+ return false;
+ if (*this == zero ())
+ return !(other == zero ());
+ if (other == zero ())
+ return false;
+ gcc_checking_assert (compatible_p (other));
+ return m_val < other.m_val;
+ }
+
+ bool operator> (const profile_count &other) const
+ {
+ if (!initialized_p () || !other.initialized_p ())
+ return false;
+ if (*this == zero ())
+ return false;
+ if (other == zero ())
+ return !(*this == zero ());
+ gcc_checking_assert (compatible_p (other));
+ return initialized_p () && other.initialized_p () && m_val > other.m_val;
+ }
+
+ bool operator< (const gcov_type other) const
+ {
+ gcc_checking_assert (ipa_p ());
+ gcc_checking_assert (other >= 0);
+ return ipa ().initialized_p () && ipa ().m_val < (uint64_t) other;
+ }
+
+ bool operator> (const gcov_type other) const
+ {
+ gcc_checking_assert (ipa_p ());
+ gcc_checking_assert (other >= 0);
+ return ipa ().initialized_p () && ipa ().m_val > (uint64_t) other;
+ }
+
+ bool operator<= (const profile_count &other) const
+ {
+ if (!initialized_p () || !other.initialized_p ())
+ return false;
+ if (*this == zero ())
+ return true;
+ if (other == zero ())
+ return (*this == zero ());
+ gcc_checking_assert (compatible_p (other));
+ return m_val <= other.m_val;
+ }
+
+ bool operator>= (const profile_count &other) const
+ {
+ if (!initialized_p () || !other.initialized_p ())
+ return false;
+ if (other == zero ())
+ return true;
+ if (*this == zero ())
+ return (other == zero ());
+ gcc_checking_assert (compatible_p (other));
+ return m_val >= other.m_val;
+ }
+
+ bool operator<= (const gcov_type other) const
+ {
+ gcc_checking_assert (ipa_p ());
+ gcc_checking_assert (other >= 0);
+ return ipa ().initialized_p () && ipa ().m_val <= (uint64_t) other;
+ }
+
+ bool operator>= (const gcov_type other) const
+ {
+ gcc_checking_assert (ipa_p ());
+ gcc_checking_assert (other >= 0);
+ return ipa ().initialized_p () && ipa ().m_val >= (uint64_t) other;
+ }
+
+ profile_count operator* (int64_t num) const
+ {
+ return apply_scale (num, 1);
+ }
+
+ profile_count operator*= (int64_t num)
+ {
+ *this = apply_scale (num, 1);
+ return *this;
+ }
+
+ profile_count operator/ (int64_t den) const
+ {
+ return apply_scale (1, den);
+ }
+
+ profile_count operator/= (int64_t den)
+ {
+ *this = apply_scale (1, den);
+ return *this;
+ }
+
+ /* Return true when value is not zero and can be used for scaling.
+ This is different from *this > 0 because that requires counter to
+ be IPA. */
+ bool nonzero_p () const
+ {
+ return initialized_p () && m_val != 0;
+ }
+
+ /* Make counter forcibly nonzero. */
+ profile_count force_nonzero () const
+ {
+ if (!initialized_p ())
+ return *this;
+ profile_count ret = *this;
+ if (ret.m_val == 0)
+ {
+ ret.m_val = 1;
+ ret.m_quality = MIN (m_quality, ADJUSTED);
+ }
+ return ret;
+ }
+
+ profile_count max (profile_count other) const
+ {
+ profile_count val = *this;
+
+ /* Always prefer nonzero IPA counts over local counts. */
+ if (ipa ().nonzero_p () || other.ipa ().nonzero_p ())
+ {
+ val = ipa ();
+ other = other.ipa ();
+ }
+ if (!initialized_p ())
+ return other;
+ if (!other.initialized_p ())
+ return *this;
+ if (*this == zero ())
+ return other;
+ if (other == zero ())
+ return *this;
+ gcc_checking_assert (compatible_p (other));
+ if (val.m_val < other.m_val || (m_val == other.m_val
+ && val.m_quality < other.m_quality))
+ return other;
+ return *this;
+ }
+
+ /* PROB is a probability in scale 0...REG_BR_PROB_BASE. Scale counter
+ accordingly. */
+ profile_count apply_probability (int prob) const
+ {
+ gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
+ if (m_val == 0)
+ return *this;
+ if (!initialized_p ())
+ return uninitialized ();
+ profile_count ret;
+ ret.m_val = RDIV (m_val * prob, REG_BR_PROB_BASE);
+ ret.m_quality = MIN (m_quality, ADJUSTED);
+ return ret;
+ }
+
+ /* Scale counter according to PROB. */
+ profile_count apply_probability (profile_probability prob) const
+ {
+ if (*this == zero ())
+ return *this;
+ if (prob == profile_probability::never ())
+ return zero ();
+ if (!initialized_p ())
+ return uninitialized ();
+ profile_count ret;
+ uint64_t tmp;
+ safe_scale_64bit (m_val, prob.m_val, profile_probability::max_probability,
+ &tmp);
+ ret.m_val = tmp;
+ ret.m_quality = MIN (m_quality, prob.m_quality);
+ return ret;
+ }
+
+ /* Return *THIS * NUM / DEN. */
+ profile_count apply_scale (int64_t num, int64_t den) const
+ {
+ if (m_val == 0)
+ return *this;
+ if (!initialized_p ())
+ return uninitialized ();
+ profile_count ret;
+ uint64_t tmp;
+
+ gcc_checking_assert (num >= 0 && den > 0);
+ safe_scale_64bit (m_val, num, den, &tmp);
+ ret.m_val = MIN (tmp, max_count);
+ ret.m_quality = MIN (m_quality, ADJUSTED);
+ return ret;
+ }
+
+ profile_count apply_scale (profile_count num, profile_count den) const
+ {
+ if (*this == zero ())
+ return *this;
+ if (num == zero ())
+ return num;
+ if (!initialized_p () || !num.initialized_p () || !den.initialized_p ())
+ return uninitialized ();
+ if (num == den)
+ return *this;
+ gcc_checking_assert (den.m_val);
+
+ profile_count ret;
+ uint64_t val;
+ safe_scale_64bit (m_val, num.m_val, den.m_val, &val);
+ ret.m_val = MIN (val, max_count);
+ ret.m_quality = MIN (MIN (MIN (m_quality, ADJUSTED),
+ num.m_quality), den.m_quality);
+ /* Be sure that ret is not local if num is global.
+ Also ensure that ret is not global0 when num is global. */
+ if (num.ipa_p ())
+ ret.m_quality = MAX (ret.m_quality,
+ num == num.ipa () ? GUESSED : num.m_quality);
+ return ret;
+ }
+
+ /* Return THIS with quality dropped to GUESSED_LOCAL. */
+ profile_count guessed_local () const
+ {
+ profile_count ret = *this;
+ if (!initialized_p ())
+ return *this;
+ ret.m_quality = GUESSED_LOCAL;
+ return ret;
+ }
+
+ /* We know that profile is globally 0 but keep local profile if present. */
+ profile_count global0 () const
+ {
+ profile_count ret = *this;
+ if (!initialized_p ())
+ return *this;
+ ret.m_quality = GUESSED_GLOBAL0;
+ return ret;
+ }
+
+ /* We know that profile is globally adjusted 0 but keep local profile
+ if present. */
+ profile_count global0adjusted () const
+ {
+ profile_count ret = *this;
+ if (!initialized_p ())
+ return *this;
+ ret.m_quality = GUESSED_GLOBAL0_ADJUSTED;
+ return ret;
+ }
+
+ /* Return THIS with quality dropped to GUESSED. */
+ profile_count guessed () const
+ {
+ profile_count ret = *this;
+ ret.m_quality = MIN (ret.m_quality, GUESSED);
+ return ret;
+ }
+
+ /* Return variant of profile count which is always safe to compare
+ across functions. */
+ profile_count ipa () const
+ {
+ if (m_quality > GUESSED_GLOBAL0_ADJUSTED)
+ return *this;
+ if (m_quality == GUESSED_GLOBAL0)
+ return zero ();
+ if (m_quality == GUESSED_GLOBAL0_ADJUSTED)
+ return adjusted_zero ();
+ return uninitialized ();
+ }
+
+ /* Return THIS with quality dropped to AFDO. */
+ profile_count afdo () const
+ {
+ profile_count ret = *this;
+ ret.m_quality = AFDO;
+ return ret;
+ }
+
+ /* Return probability of event with counter THIS within event with counter
+ OVERALL. */
+ profile_probability probability_in (const profile_count overall) const
+ {
+ if (*this == zero ()
+ && !(overall == zero ()))
+ return profile_probability::never ();
+ if (!initialized_p () || !overall.initialized_p ()
+ || !overall.m_val)
+ return profile_probability::uninitialized ();
+ if (*this == overall && m_quality == PRECISE)
+ return profile_probability::always ();
+ profile_probability ret;
+ gcc_checking_assert (compatible_p (overall));
+
+ if (overall.m_val < m_val)
+ {
+ ret.m_val = profile_probability::max_probability;
+ ret.m_quality = GUESSED;
+ return ret;
+ }
+ else
+ ret.m_val = RDIV (m_val * profile_probability::max_probability,
+ overall.m_val);
+ ret.m_quality = MIN (MAX (MIN (m_quality, overall.m_quality),
+ GUESSED), ADJUSTED);
+ return ret;
+ }
+
+ int to_frequency (struct function *fun) const;
+ int to_cgraph_frequency (profile_count entry_bb_count) const;
+ sreal to_sreal_scale (profile_count in, bool *known = NULL) const;
+
+ /* Output THIS to F. */
+ void dump (FILE *f) const;
+
+ /* Output THIS to BUFFER. */
+ void dump (char *buffer) const;
+
+ /* Print THIS to stderr. */
+ void debug () const;
+
+ /* Return true if THIS is known to differ significantly from OTHER. */
+ bool differs_from_p (profile_count other) const;
+
+ /* We want to scale profile across function boundary from NUM to DEN.
+ Take care of the side case when NUM and DEN are zeros of incompatible
+ kinds. */
+ static void adjust_for_ipa_scaling (profile_count *num, profile_count *den);
+
+ /* THIS is a count of bb which is known to be executed IPA times.
+ Combine this information into bb counter. This means returning IPA
+ if it is nonzero, not changing anything if IPA is uninitialized
+ and if IPA is zero, turning THIS into corresponding local profile with
+ global0. */
+ profile_count combine_with_ipa_count (profile_count ipa);
+
+ /* Same as combine_with_ipa_count but inside function with count IPA2. */
+ profile_count combine_with_ipa_count_within
+ (profile_count ipa, profile_count ipa2);
+
+ /* The profiling runtime uses gcov_type, which is usually 64bit integer.
+ Conversions back and forth are used to read the coverage and get it
+ into internal representation. */
+ static profile_count from_gcov_type (gcov_type v,
+ profile_quality quality = PRECISE);
+
+ /* LTO streaming support. */
+ static profile_count stream_in (class lto_input_block *);
+ void stream_out (struct output_block *);
+ void stream_out (struct lto_output_stream *);
+};
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile.h
new file mode 100644
index 0000000..242b0a4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/profile.h
@@ -0,0 +1,80 @@
+/* Header file for minimum-cost maximal flow routines used to smooth basic
+ block and edge frequency counts.
+ Copyright (C) 2008-2023 Free Software Foundation, Inc.
+ Contributed by Paul Yuan (yingbo.com@gmail.com)
+ and Vinodha Ramasamy (vinodha@google.com).
+
+This file is part of GCC.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef PROFILE_H
+#define PROFILE_H
+
+/* Additional information about edges. */
+struct edge_profile_info
+{
+ unsigned int count_valid:1;
+
+ /* Is on the spanning tree. */
+ unsigned int on_tree:1;
+
+ /* Pretend this edge does not exist (it is abnormal and we've
+ inserted a fake to compensate). */
+ unsigned int ignore:1;
+};
+
+#define EDGE_INFO(e) ((struct edge_profile_info *) (e)->aux)
+
+/* Helpers annotating edges/basic blocks to GCOV counts. */
+
+extern vec<gcov_type> bb_gcov_counts;
+extern hash_map<edge,gcov_type> *edge_gcov_counts;
+
+inline gcov_type &
+edge_gcov_count (edge e)
+{
+ bool existed;
+ gcov_type &c = edge_gcov_counts->get_or_insert (e, &existed);
+ if (!existed)
+ c = 0;
+ return c;
+}
+
+inline gcov_type &
+bb_gcov_count (basic_block bb)
+{
+ return bb_gcov_counts[bb->index];
+}
+
+typedef struct gcov_working_set_info gcov_working_set_t;
+extern gcov_working_set_t *find_working_set (unsigned pct_times_10);
+extern void add_working_set (gcov_working_set_t *);
+
+/* Smoothes the initial assigned basic block and edge counts using
+ a minimum cost flow algorithm. */
+extern void mcf_smooth_cfg (void);
+
+extern gcov_type sum_edge_counts (vec<edge, va_gc> *edges);
+
+extern void init_node_map (bool);
+extern void del_node_map (void);
+
+extern void get_working_sets (void);
+
+/* Counter summary from the last set of coverage counts read by
+ profile.cc. */
+extern struct gcov_summary *profile_info;
+
+#endif /* PROFILE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/range-op.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/range-op.h
new file mode 100644
index 0000000..03ef6b9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/range-op.h
@@ -0,0 +1,318 @@
+/* Header file for range operator class.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RANGE_OP_H
+#define GCC_RANGE_OP_H
+
+// This class is implemented for each kind of operator supported by
+// the range generator. It serves various purposes.
+//
+// 1 - Generates range information for the specific operation between
+// two ranges. This provides the ability to fold ranges for an
+// expression.
+//
+// 2 - Performs range algebra on the expression such that a range can be
+// adjusted in terms of one of the operands:
+//
+// def = op1 + op2
+//
+// Given a range for def, we can adjust the range so that it is in
+// terms of either operand.
+//
+// op1_range (def_range, op2) will adjust the range in place so it
+// is in terms of op1. Since op1 = def - op2, it will subtract
+// op2 from each element of the range.
+//
+// 3 - Creates a range for an operand based on whether the result is 0 or
+// non-zero. This is mostly for logical true false, but can serve other
+// purposes.
+// ie 0 = op1 - op2 implies op2 has the same range as op1.
+
+class range_operator
+{
+ friend class range_op_table;
+public:
+ range_operator () : m_code (ERROR_MARK) { }
+ // Perform an operation between 2 ranges and return it.
+ virtual bool fold_range (irange &r, tree type,
+ const irange &lh,
+ const irange &rh,
+ relation_trio = TRIO_VARYING) const;
+
+ // Return the range for op[12] in the general case. LHS is the range for
+ // the LHS of the expression, OP[12]is the range for the other
+ //
+ // The operand and the result is returned in R.
+ //
+ // TYPE is the expected type of the range.
+ //
+ // Return TRUE if the operation is performed and a valid range is available.
+ //
+ // i.e. [LHS] = ??? + OP2
+ // is re-formed as R = [LHS] - OP2.
+ virtual bool op1_range (irange &r, tree type,
+ const irange &lhs,
+ const irange &op2,
+ relation_trio = TRIO_VARYING) const;
+ virtual bool op2_range (irange &r, tree type,
+ const irange &lhs,
+ const irange &op1,
+ relation_trio = TRIO_VARYING) const;
+
+ // The following routines are used to represent relations between the
+ // various operations. If the caller knows where the symbolics are,
+ // it can query for relationships between them given known ranges.
+ // the optional relation passed in is the relation between op1 and op2.
+ virtual relation_kind lhs_op1_relation (const irange &lhs,
+ const irange &op1,
+ const irange &op2,
+ relation_kind = VREL_VARYING) const;
+ virtual relation_kind lhs_op2_relation (const irange &lhs,
+ const irange &op1,
+ const irange &op2,
+ relation_kind = VREL_VARYING) const;
+ virtual relation_kind op1_op2_relation (const irange &lhs) const;
+protected:
+ // Perform an integral operation between 2 sub-ranges and return it.
+ virtual void wi_fold (irange &r, tree type,
+ const wide_int &lh_lb,
+ const wide_int &lh_ub,
+ const wide_int &rh_lb,
+ const wide_int &rh_ub) const;
+ // Effect of relation for generic fold_range clients.
+ virtual bool op1_op2_relation_effect (irange &lhs_range, tree type,
+ const irange &op1_range,
+ const irange &op2_range,
+ relation_kind rel) const;
+ // Called by fold range to split small subranges into parts.
+ void wi_fold_in_parts (irange &r, tree type,
+ const wide_int &lh_lb,
+ const wide_int &lh_ub,
+ const wide_int &rh_lb,
+ const wide_int &rh_ub) const;
+
+ // Called by fold range to split small subranges into parts when op1 == op2
+ void wi_fold_in_parts_equiv (irange &r, tree type,
+ const wide_int &lb,
+ const wide_int &ub,
+ unsigned limit) const;
+
+ // Tree code of the range operator or ERROR_MARK if unknown.
+ tree_code m_code;
+};
+
+// Like range_operator above, but for floating point operators.
+
+class range_operator_float
+{
+public:
+ virtual bool fold_range (frange &r, tree type,
+ const frange &lh,
+ const frange &rh,
+ relation_trio = TRIO_VARYING) const;
+ virtual void rv_fold (REAL_VALUE_TYPE &lb, REAL_VALUE_TYPE &ub,
+ bool &maybe_nan,
+ tree type,
+ const REAL_VALUE_TYPE &lh_lb,
+ const REAL_VALUE_TYPE &lh_ub,
+ const REAL_VALUE_TYPE &rh_lb,
+ const REAL_VALUE_TYPE &rh_ub,
+ relation_kind) const;
+ // Unary operations have the range of the LHS as op2.
+ virtual bool fold_range (irange &r, tree type,
+ const frange &lh,
+ const irange &rh,
+ relation_trio = TRIO_VARYING) const;
+ virtual bool fold_range (irange &r, tree type,
+ const frange &lh,
+ const frange &rh,
+ relation_trio = TRIO_VARYING) const;
+ virtual bool op1_range (frange &r, tree type,
+ const frange &lhs,
+ const frange &op2,
+ relation_trio = TRIO_VARYING) const;
+ virtual bool op1_range (frange &r, tree type,
+ const irange &lhs,
+ const frange &op2,
+ relation_trio = TRIO_VARYING) const;
+ virtual bool op2_range (frange &r, tree type,
+ const frange &lhs,
+ const frange &op1,
+ relation_trio = TRIO_VARYING) const;
+ virtual bool op2_range (frange &r, tree type,
+ const irange &lhs,
+ const frange &op1,
+ relation_trio = TRIO_VARYING) const;
+
+ virtual relation_kind lhs_op1_relation (const frange &lhs,
+ const frange &op1,
+ const frange &op2,
+ relation_kind = VREL_VARYING) const;
+ virtual relation_kind lhs_op1_relation (const irange &lhs,
+ const frange &op1,
+ const frange &op2,
+ relation_kind = VREL_VARYING) const;
+ virtual relation_kind lhs_op2_relation (const frange &lhs,
+ const frange &op1,
+ const frange &op2,
+ relation_kind = VREL_VARYING) const;
+ virtual relation_kind lhs_op2_relation (const irange &lhs,
+ const frange &op1,
+ const frange &op2,
+ relation_kind = VREL_VARYING) const;
+ virtual relation_kind op1_op2_relation (const irange &lhs) const;
+ virtual relation_kind op1_op2_relation (const frange &lhs) const;
+};
+
+class range_op_handler
+{
+public:
+ range_op_handler ();
+ range_op_handler (enum tree_code code, tree type);
+ inline operator bool () const { return m_valid; }
+
+ bool fold_range (vrange &r, tree type,
+ const vrange &lh,
+ const vrange &rh,
+ relation_trio = TRIO_VARYING) const;
+ bool op1_range (vrange &r, tree type,
+ const vrange &lhs,
+ const vrange &op2,
+ relation_trio = TRIO_VARYING) const;
+ bool op2_range (vrange &r, tree type,
+ const vrange &lhs,
+ const vrange &op1,
+ relation_trio = TRIO_VARYING) const;
+ relation_kind lhs_op1_relation (const vrange &lhs,
+ const vrange &op1,
+ const vrange &op2,
+ relation_kind = VREL_VARYING) const;
+ relation_kind lhs_op2_relation (const vrange &lhs,
+ const vrange &op1,
+ const vrange &op2,
+ relation_kind = VREL_VARYING) const;
+ relation_kind op1_op2_relation (const vrange &lhs) const;
+protected:
+ void set_op_handler (enum tree_code code, tree type);
+ bool m_valid;
+ range_operator *m_int;
+ range_operator_float *m_float;
+};
+
+extern bool range_cast (vrange &, tree type);
+extern void wi_set_zero_nonzero_bits (tree type,
+ const wide_int &, const wide_int &,
+ wide_int &maybe_nonzero,
+ wide_int &mustbe_nonzero);
+
+// op1_op2_relation methods that are the same across irange and frange.
+relation_kind equal_op1_op2_relation (const irange &lhs);
+relation_kind not_equal_op1_op2_relation (const irange &lhs);
+relation_kind lt_op1_op2_relation (const irange &lhs);
+relation_kind le_op1_op2_relation (const irange &lhs);
+relation_kind gt_op1_op2_relation (const irange &lhs);
+relation_kind ge_op1_op2_relation (const irange &lhs);
+
+enum bool_range_state { BRS_FALSE, BRS_TRUE, BRS_EMPTY, BRS_FULL };
+bool_range_state get_bool_state (vrange &r, const vrange &lhs, tree val_type);
+
+// If the range of either op1 or op2 is undefined, set the result to
+// varying and return TRUE. If the caller truly cares about a result,
+// they should pass in a varying if it has an undefined that it wants
+// treated as a varying.
+
+inline bool
+empty_range_varying (vrange &r, tree type,
+ const vrange &op1, const vrange & op2)
+{
+ if (op1.undefined_p () || op2.undefined_p ())
+ {
+ r.set_varying (type);
+ return true;
+ }
+ else
+ return false;
+}
+
+// For relation opcodes, first try to see if the supplied relation
+// forces a true or false result, and return that.
+// Then check for undefined operands. If none of this applies,
+// return false.
+
+inline bool
+relop_early_resolve (irange &r, tree type, const vrange &op1,
+ const vrange &op2, relation_trio trio,
+ relation_kind my_rel)
+{
+ relation_kind rel = trio.op1_op2 ();
+ // If known relation is a complete subset of this relation, always true.
+ if (relation_union (rel, my_rel) == my_rel)
+ {
+ r = range_true (type);
+ return true;
+ }
+
+ // If known relation has no subset of this relation, always false.
+ if (relation_intersect (rel, my_rel) == VREL_UNDEFINED)
+ {
+ r = range_false (type);
+ return true;
+ }
+
+ // If either operand is undefined, return VARYING.
+ if (empty_range_varying (r, type, op1, op2))
+ return true;
+
+ return false;
+}
+
+// This implements the range operator tables as local objects.
+
+class range_op_table
+{
+public:
+ range_operator *operator[] (enum tree_code code);
+protected:
+ void set (enum tree_code code, range_operator &op);
+private:
+ range_operator *m_range_tree[MAX_TREE_CODES];
+};
+
+// Like above, but for floating point operators.
+
+class floating_op_table
+{
+public:
+ floating_op_table ();
+ range_operator_float *operator[] (enum tree_code code);
+private:
+ void set (enum tree_code code, range_operator_float &op);
+ range_operator_float *m_range_tree[MAX_TREE_CODES];
+};
+
+// This holds the range op table for floating point operations.
+extern floating_op_table *floating_tree_table;
+
+extern range_operator *ptr_op_widen_mult_signed;
+extern range_operator *ptr_op_widen_mult_unsigned;
+extern range_operator *ptr_op_widen_plus_signed;
+extern range_operator *ptr_op_widen_plus_unsigned;
+#endif // GCC_RANGE_OP_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/range.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/range.h
new file mode 100644
index 0000000..3b0e9ef
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/range.h
@@ -0,0 +1,58 @@
+/* Header file for misc range functions. -*- C++ -*-
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RANGE_H
+#define GCC_RANGE_H
+
+value_range range_zero (tree type);
+value_range range_nonzero (tree type);
+value_range range_positives (tree type);
+value_range range_negatives (tree type);
+
+// Return an irange instance that is a boolean TRUE.
+
+inline int_range<1>
+range_true (tree type)
+{
+ unsigned prec = TYPE_PRECISION (type);
+ return int_range<2> (type, wi::one (prec), wi::one (prec));
+}
+
+// Return an irange instance that is a boolean FALSE.
+
+inline int_range<1>
+range_false (tree type)
+{
+ unsigned prec = TYPE_PRECISION (type);
+ return int_range<2> (type, wi::zero (prec), wi::zero (prec));
+}
+
+// Return an irange that covers both true and false.
+
+inline int_range<1>
+range_true_and_false (tree type)
+{
+ unsigned prec = TYPE_PRECISION (type);
+ if (prec == 1)
+ return int_range<2> (type);
+ return int_range<2> (type, wi::zero (prec), wi::one (prec));
+}
+
+#endif // GCC_RANGE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-md.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-md.h
new file mode 100644
index 0000000..b309c9c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-md.h
@@ -0,0 +1,408 @@
+/* MD reader definitions.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_READ_MD_H
+#define GCC_READ_MD_H
+
+#include "obstack.h"
+
+/* Records a position in the file. */
+class file_location {
+public:
+ file_location () {}
+ file_location (const char *, int, int);
+
+ const char *filename;
+ int lineno;
+ int colno;
+};
+
+inline file_location::file_location (const char *filename_in, int lineno_in, int colno_in)
+: filename (filename_in), lineno (lineno_in), colno (colno_in) {}
+
+/* Holds one symbol or number in the .md file. */
+struct md_name {
+ /* The name as it appeared in the .md file. Names are syntactically
+ limited to the length of this buffer. */
+ char buffer[256];
+
+ /* The name that should actually be used by the generator programs.
+ This is an expansion of NAME, after things like constant substitution. */
+ char *string;
+};
+
+/* This structure represents a constant defined by define_constant,
+ define_enum, or such-like. */
+struct md_constant {
+ /* The name of the constant. */
+ char *name;
+
+ /* The string to which the constants expands. */
+ char *value;
+
+ /* If the constant is associated with a enumeration, this field
+ points to that enumeration, otherwise it is null. */
+ struct enum_type *parent_enum;
+};
+
+/* This structure represents one value in an enum_type. */
+struct enum_value {
+ /* The next value in the enum, or null if this is the last. */
+ struct enum_value *next;
+
+ /* The name of the value as it appears in the .md file. */
+ char *name;
+
+ /* The definition of the related C value. */
+ struct md_constant *def;
+};
+
+/* This structure represents an enum defined by define_enum or the like. */
+struct enum_type {
+ /* The C name of the enumeration. */
+ char *name;
+
+ /* True if this is an md-style enum (DEFINE_ENUM) rather than
+ a C-style enum (DEFINE_C_ENUM). */
+ bool md_p;
+
+ /* The values of the enumeration. There is always at least one. */
+ struct enum_value *values;
+
+ /* A pointer to the null terminator in VALUES. */
+ struct enum_value **tail_ptr;
+
+ /* The number of enumeration values. */
+ unsigned int num_values;
+};
+
+/* Describes one instance of an overloaded_name. */
+struct overloaded_instance {
+ /* The next instance in the chain, or null if none. */
+ overloaded_instance *next;
+
+ /* The values that the overloaded_name arguments should have for this
+ instance to be chosen. Each value is a C token. */
+ vec<const char *> arg_values;
+
+ /* The full (non-overloaded) name of the pattern. */
+ const char *name;
+
+ /* The corresponding define_expand or define_insn. */
+ rtx insn;
+};
+
+/* Describes a define_expand or define_insn whose name was preceded by '@'.
+ Overloads are uniquely determined by their name and the types of their
+ arguments; it's possible to have overloads with the same name but
+ different argument types. */
+struct overloaded_name {
+ /* The next overloaded name in the chain. */
+ overloaded_name *next;
+
+ /* The overloaded name (i.e. the name with "@" character and
+ "<...>" placeholders removed). */
+ const char *name;
+
+ /* The C types of the iterators that determine the underlying pattern,
+ in the same order as in the pattern name. E.g. "<mode>" in the
+ pattern name would give a "machine_mode" argument here. */
+ vec<const char *> arg_types;
+
+ /* The first instance associated with this overloaded_name. */
+ overloaded_instance *first_instance;
+
+ /* Where to chain new overloaded_instances. */
+ overloaded_instance **next_instance_ptr;
+};
+
+struct mapping;
+
+/* A class for reading .md files and RTL dump files.
+
+ Implemented in read-md.cc.
+
+ This class has responsibility for reading chars from input files, and
+ for certain common top-level directives including the "include"
+ directive.
+
+ It does not handle parsing the hierarchically-nested expressions of
+ rtl.def; for that see the rtx_reader subclass below (implemented in
+ read-rtl.cc). */
+
+class md_reader
+{
+ public:
+ /* Associates PTR (which can be a string, etc.) with the file location
+ specified by LOC. */
+ struct ptr_loc {
+ const void *ptr;
+ file_location loc;
+ };
+
+ md_reader (bool compact);
+ virtual ~md_reader ();
+
+ bool read_md_files (int, const char **, bool (*) (const char *));
+ bool read_file (const char *filename);
+ bool read_file_fragment (const char *filename,
+ int first_line,
+ int last_line);
+
+ /* A hook that handles a single .md-file directive, up to but not
+ including the closing ')'. It takes two arguments: the file position
+ at which the directive started, and the name of the directive. The next
+ unread character is the optional space after the directive name. */
+ virtual void handle_unknown_directive (file_location, const char *) = 0;
+
+ file_location get_current_location () const;
+
+ bool is_compact () const { return m_compact; }
+
+ /* Defined in read-md.cc. */
+ int read_char (void);
+ void unread_char (int ch);
+ file_location read_name (struct md_name *name);
+ file_location read_name_or_nil (struct md_name *);
+ void read_escape ();
+ char *read_quoted_string ();
+ char *read_braced_string ();
+ char *read_string (int star_if_braced);
+ void read_skip_construct (int depth, file_location loc);
+ void require_char (char expected);
+ void require_char_ws (char expected);
+ void require_word_ws (const char *expected);
+ int peek_char (void);
+
+ void set_md_ptr_loc (const void *ptr, file_location);
+ const struct ptr_loc *get_md_ptr_loc (const void *ptr);
+ void copy_md_ptr_loc (const void *new_ptr, const void *old_ptr);
+ void fprint_md_ptr_loc (FILE *outf, const void *ptr);
+ void print_md_ptr_loc (const void *ptr);
+
+ struct enum_type *lookup_enum_type (const char *name);
+ void traverse_enum_types (htab_trav callback, void *info);
+
+ void handle_constants ();
+ void traverse_md_constants (htab_trav callback, void *info);
+ void handle_enum (file_location loc, bool md_p);
+
+ const char *join_c_conditions (const char *cond1, const char *cond2);
+ void fprint_c_condition (FILE *outf, const char *cond);
+ void print_c_condition (const char *cond);
+
+ /* Defined in read-rtl.cc. */
+ const char *apply_iterator_to_string (const char *string);
+ rtx copy_rtx_for_iterators (rtx original);
+ void read_conditions ();
+ void record_potential_iterator_use (struct iterator_group *group,
+ file_location loc, rtx x,
+ unsigned int index, const char *name);
+ struct mapping *read_mapping (struct iterator_group *group, htab_t table);
+ overloaded_name *handle_overloaded_name (rtx, vec<mapping *> *);
+
+ const char *get_top_level_filename () const { return m_toplevel_fname; }
+ const char *get_filename () const { return m_read_md_filename; }
+ int get_lineno () const { return m_read_md_lineno; }
+ int get_colno () const { return m_read_md_colno; }
+
+ struct obstack *get_string_obstack () { return &m_string_obstack; }
+ htab_t get_md_constants () { return m_md_constants; }
+
+ overloaded_name *get_overloads () const { return m_first_overload; }
+
+ private:
+ /* A singly-linked list of filenames. */
+ struct file_name_list {
+ struct file_name_list *next;
+ const char *fname;
+ };
+
+ private:
+ void handle_file ();
+ void handle_toplevel_file ();
+ void handle_include (file_location loc);
+ void add_include_path (const char *arg);
+
+ bool read_name_1 (struct md_name *name, file_location *out_loc);
+
+ private:
+ /* Are we reading a compact dump? */
+ bool m_compact;
+
+ /* The name of the toplevel file that indirectly included
+ m_read_md_file. */
+ const char *m_toplevel_fname;
+
+ /* The directory part of m_toplevel_fname
+ NULL if m_toplevel_fname is a bare filename. */
+ char *m_base_dir;
+
+ /* The file we are reading. */
+ FILE *m_read_md_file;
+
+ /* The filename of m_read_md_file. */
+ const char *m_read_md_filename;
+
+ /* The current line number in m_read_md_file. */
+ int m_read_md_lineno;
+
+ /* The current column number in m_read_md_file. */
+ int m_read_md_colno;
+
+ /* The column number before the last newline, so that
+ we can handle unread_char ('\n') at least once whilst
+ retaining column information. */
+ int m_last_line_colno;
+
+ /* The first directory to search. */
+ file_name_list *m_first_dir_md_include;
+
+ /* A pointer to the null terminator of the md include chain. */
+ file_name_list **m_last_dir_md_include_ptr;
+
+ /* Obstack used for allocating MD strings. */
+ struct obstack m_string_obstack;
+
+ /* A table of ptr_locs, hashed on the PTR field. */
+ htab_t m_ptr_locs;
+
+ /* An obstack for the above. Plain xmalloc is a bit heavyweight for a
+ small structure like ptr_loc. */
+ struct obstack m_ptr_loc_obstack;
+
+ /* A hash table of triples (A, B, C), where each of A, B and C is a condition
+ and A is equivalent to "B && C". This is used to keep track of the source
+ of conditions that are made up of separate MD strings (such as the split
+ condition of a define_insn_and_split). */
+ htab_t m_joined_conditions;
+
+ /* An obstack for allocating joined_conditions entries. */
+ struct obstack m_joined_conditions_obstack;
+
+ /* A table of md_constant structures, hashed by name. Null if no
+ constant expansion should occur. */
+ htab_t m_md_constants;
+
+ /* A table of enum_type structures, hashed by name. */
+ htab_t m_enum_types;
+
+ /* If non-zero, filter the input to just this subset of lines. */
+ int m_first_line;
+ int m_last_line;
+
+ /* The first overloaded_name. */
+ overloaded_name *m_first_overload;
+
+ /* Where to chain further overloaded_names, */
+ overloaded_name **m_next_overload_ptr;
+
+ /* A hash table of overloaded_names, keyed off their name and the types of
+ their arguments. */
+ htab_t m_overloads_htab;
+};
+
+/* Global singleton; constrast with rtx_reader_ptr below. */
+extern md_reader *md_reader_ptr;
+
+/* An md_reader subclass which skips unknown directives, for
+ the gen* tools that purely use read-md.o. */
+
+class noop_reader : public md_reader
+{
+ public:
+ noop_reader () : md_reader (false) {}
+
+ /* A dummy implementation which skips unknown directives. */
+ void handle_unknown_directive (file_location, const char *) override;
+};
+
+/* An md_reader subclass that actually handles full hierarchical
+ rtx expressions.
+
+ Implemented in read-rtl.cc. */
+
+class rtx_reader : public md_reader
+{
+ public:
+ rtx_reader (bool compact);
+ ~rtx_reader ();
+
+ bool read_rtx (const char *rtx_name, vec<rtx> *rtxen);
+ rtx rtx_alloc_for_name (const char *);
+ rtx read_rtx_code (const char *code_name);
+ virtual rtx read_rtx_operand (rtx return_rtx, int idx);
+ rtx read_nested_rtx ();
+ rtx read_rtx_variadic (rtx form);
+ char *read_until (const char *terminator_chars, bool consume_terminator);
+
+ virtual void handle_any_trailing_information (rtx) {}
+ virtual rtx postprocess (rtx x) { return x; }
+
+ /* Hook to allow function_reader subclass to put STRINGBUF into gc-managed
+ memory, rather than within an obstack.
+ This base class implementation is a no-op. */
+ virtual const char *finalize_string (char *stringbuf) { return stringbuf; }
+
+ protected:
+ /* Analogous to rtx_writer's m_in_call_function_usage. */
+ bool m_in_call_function_usage;
+
+ /* Support for "reuse_rtx" directives. */
+ auto_vec<rtx> m_reuse_rtx_by_id;
+};
+
+/* Global singleton; constrast with md_reader_ptr above. */
+extern rtx_reader *rtx_reader_ptr;
+
+extern void (*include_callback) (const char *);
+
+/* Read the next character from the MD file. */
+
+inline int
+read_char (void)
+{
+ return md_reader_ptr->read_char ();
+}
+
+/* Put back CH, which was the last character read from the MD file. */
+
+inline void
+unread_char (int ch)
+{
+ md_reader_ptr->unread_char (ch);
+}
+
+extern hashval_t leading_string_hash (const void *);
+extern int leading_string_eq_p (const void *, const void *);
+extern const char *join_c_conditions (const char *, const char *);
+extern void message_at (file_location, const char *, ...) ATTRIBUTE_PRINTF_2;
+extern void error_at (file_location, const char *, ...) ATTRIBUTE_PRINTF_2;
+extern void fatal_at (file_location, const char *, ...) ATTRIBUTE_PRINTF_2;
+extern void fatal_with_file_and_line (const char *, ...)
+ ATTRIBUTE_PRINTF_1 ATTRIBUTE_NORETURN;
+extern void fatal_expected_char (int, int) ATTRIBUTE_NORETURN;
+extern int read_skip_spaces (void);
+extern int n_comma_elts (const char *);
+extern const char *scan_comma_elt (const char **);
+extern void upcase_string (char *);
+extern void traverse_enum_types (htab_trav, void *);
+extern struct enum_type *lookup_enum_type (const char *);
+
+#endif /* GCC_READ_MD_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-rtl-function.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-rtl-function.h
new file mode 100644
index 0000000..1e98066
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/read-rtl-function.h
@@ -0,0 +1,28 @@
+/* read-rtl-function.h - Reader for RTL function dumps
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_READ_RTL_FUNCTION_H
+#define GCC_READ_RTL_FUNCTION_H
+
+extern bool read_rtl_function_body (const char *path);
+
+extern bool read_rtl_function_body_from_file_range (location_t start_loc,
+ location_t end_loc);
+
+#endif /* GCC_READ_RTL_FUNCTION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/real.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/real.h
new file mode 100644
index 0000000..dd41c65
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/real.h
@@ -0,0 +1,559 @@
+/* Definitions of floating-point access for GNU compiler.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_REAL_H
+#define GCC_REAL_H
+
+/* An expanded form of the represented number. */
+
+/* Enumerate the special cases of numbers that we encounter. */
+enum real_value_class {
+ rvc_zero,
+ rvc_normal,
+ rvc_inf,
+ rvc_nan
+};
+
+#define SIGNIFICAND_BITS (128 + HOST_BITS_PER_LONG)
+#define EXP_BITS (32 - 6)
+#define MAX_EXP ((1 << (EXP_BITS - 1)) - 1)
+#define SIGSZ (SIGNIFICAND_BITS / HOST_BITS_PER_LONG)
+#define SIG_MSB ((unsigned long)1 << (HOST_BITS_PER_LONG - 1))
+
+struct GTY(()) real_value {
+ /* Use the same underlying type for all bit-fields, so as to make
+ sure they're packed together, otherwise REAL_VALUE_TYPE_SIZE will
+ be miscomputed. */
+ unsigned int /* ENUM_BITFIELD (real_value_class) */ cl : 2;
+ /* 1 if number is decimal floating point. */
+ unsigned int decimal : 1;
+ /* 1 if number is negative. */
+ unsigned int sign : 1;
+ /* 1 if number is signalling. */
+ unsigned int signalling : 1;
+ /* 1 if number is canonical
+ All are generally used for handling cases in real.cc. */
+ unsigned int canonical : 1;
+ /* unbiased exponent of the number. */
+ unsigned int uexp : EXP_BITS;
+ /* significand of the number. */
+ unsigned long sig[SIGSZ];
+};
+
+#define REAL_EXP(REAL) \
+ ((int)((REAL)->uexp ^ (unsigned int)(1 << (EXP_BITS - 1))) \
+ - (1 << (EXP_BITS - 1)))
+#define SET_REAL_EXP(REAL, EXP) \
+ ((REAL)->uexp = ((unsigned int)(EXP) & (unsigned int)((1 << EXP_BITS) - 1)))
+
+/* Various headers condition prototypes on #ifdef REAL_VALUE_TYPE, so it
+ needs to be a macro. We do need to continue to have a structure tag
+ so that other headers can forward declare it. */
+#define REAL_VALUE_TYPE struct real_value
+
+/* We store a REAL_VALUE_TYPE into an rtx, and we do this by putting it in
+ consecutive "w" slots. Moreover, we've got to compute the number of "w"
+ slots at preprocessor time, which means we can't use sizeof. Guess. */
+
+#define REAL_VALUE_TYPE_SIZE (SIGNIFICAND_BITS + 32)
+#define REAL_WIDTH \
+ (REAL_VALUE_TYPE_SIZE/HOST_BITS_PER_WIDE_INT \
+ + (REAL_VALUE_TYPE_SIZE%HOST_BITS_PER_WIDE_INT ? 1 : 0)) /* round up */
+
+/* Verify the guess. */
+extern char test_real_width
+ [sizeof (REAL_VALUE_TYPE) <= REAL_WIDTH * sizeof (HOST_WIDE_INT) ? 1 : -1];
+
+/* Calculate the format for CONST_DOUBLE. We need as many slots as
+ are necessary to overlay a REAL_VALUE_TYPE on them. This could be
+ as many as four (32-bit HOST_WIDE_INT, 128-bit REAL_VALUE_TYPE).
+
+ A number of places assume that there are always at least two 'w'
+ slots in a CONST_DOUBLE, so we provide them even if one would suffice. */
+
+#if REAL_WIDTH == 1
+# define CONST_DOUBLE_FORMAT "ww"
+#else
+# if REAL_WIDTH == 2
+# define CONST_DOUBLE_FORMAT "ww"
+# else
+# if REAL_WIDTH == 3
+# define CONST_DOUBLE_FORMAT "www"
+# else
+# if REAL_WIDTH == 4
+# define CONST_DOUBLE_FORMAT "wwww"
+# else
+# if REAL_WIDTH == 5
+# define CONST_DOUBLE_FORMAT "wwwww"
+# else
+# if REAL_WIDTH == 6
+# define CONST_DOUBLE_FORMAT "wwwwww"
+# else
+ #error "REAL_WIDTH > 6 not supported"
+# endif
+# endif
+# endif
+# endif
+# endif
+#endif
+
+
+/* Describes the properties of the specific target format in use. */
+struct real_format
+{
+ /* Move to and from the target bytes. */
+ void (*encode) (const struct real_format *, long *,
+ const REAL_VALUE_TYPE *);
+ void (*decode) (const struct real_format *, REAL_VALUE_TYPE *,
+ const long *);
+
+ /* The radix of the exponent and digits of the significand. */
+ int b;
+
+ /* Size of the significand in digits of radix B. */
+ int p;
+
+ /* Size of the significant of a NaN, in digits of radix B. */
+ int pnan;
+
+ /* The minimum negative integer, x, such that b**(x-1) is normalized. */
+ int emin;
+
+ /* The maximum integer, x, such that b**(x-1) is representable. */
+ int emax;
+
+ /* The bit position of the sign bit, for determining whether a value
+ is positive/negative, or -1 for a complex encoding. */
+ int signbit_ro;
+
+ /* The bit position of the sign bit, for changing the sign of a number,
+ or -1 for a complex encoding. */
+ int signbit_rw;
+
+ /* If this is an IEEE interchange format, the number of bits in the
+ format; otherwise, if it is an IEEE extended format, one more
+ than the greatest number of bits in an interchange format it
+ extends; otherwise 0. Formats need not follow the IEEE 754-2008
+ recommended practice regarding how signaling NaNs are identified,
+ and may vary in the choice of default NaN, but must follow other
+ IEEE practice regarding having NaNs, infinities and subnormal
+ values, and the relation of minimum and maximum exponents, and,
+ for interchange formats, the details of the encoding. */
+ int ieee_bits;
+
+ /* Default rounding mode for operations on this format. */
+ bool round_towards_zero;
+ bool has_sign_dependent_rounding;
+
+ /* Properties of the format. */
+ bool has_nans;
+ bool has_inf;
+ bool has_denorm;
+ bool has_signed_zero;
+ bool qnan_msb_set;
+ bool canonical_nan_lsbs_set;
+ const char *name;
+};
+
+
+/* The target format used for each floating point mode.
+ Float modes are followed by decimal float modes, with entries for
+ float modes indexed by (MODE - first float mode), and entries for
+ decimal float modes indexed by (MODE - first decimal float mode) +
+ the number of float modes. */
+extern const struct real_format *
+ real_format_for_mode[NUM_MODE_FLOAT + NUM_MODE_DECIMAL_FLOAT];
+
+#define REAL_MODE_FORMAT(MODE) \
+ (real_format_for_mode[DECIMAL_FLOAT_MODE_P (MODE) \
+ ? (((MODE) - MIN_MODE_DECIMAL_FLOAT) \
+ + NUM_MODE_FLOAT) \
+ : GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ ? ((MODE) - MIN_MODE_FLOAT) \
+ : (gcc_unreachable (), 0)])
+
+#define FLOAT_MODE_FORMAT(MODE) \
+ (REAL_MODE_FORMAT (as_a <scalar_float_mode> (GET_MODE_INNER (MODE))))
+
+/* The following macro determines whether the floating point format is
+ composite, i.e. may contain non-consecutive mantissa bits, in which
+ case compile-time FP overflow may not model run-time overflow. */
+#define MODE_COMPOSITE_P(MODE) \
+ (FLOAT_MODE_P (MODE) \
+ && FLOAT_MODE_FORMAT (MODE)->pnan < FLOAT_MODE_FORMAT (MODE)->p)
+
+/* Accessor macros for format properties. */
+#define MODE_HAS_NANS(MODE) \
+ (FLOAT_MODE_P (MODE) && FLOAT_MODE_FORMAT (MODE)->has_nans)
+#define MODE_HAS_INFINITIES(MODE) \
+ (FLOAT_MODE_P (MODE) && FLOAT_MODE_FORMAT (MODE)->has_inf)
+#define MODE_HAS_SIGNED_ZEROS(MODE) \
+ (FLOAT_MODE_P (MODE) && FLOAT_MODE_FORMAT (MODE)->has_signed_zero)
+#define MODE_HAS_SIGN_DEPENDENT_ROUNDING(MODE) \
+ (FLOAT_MODE_P (MODE) \
+ && FLOAT_MODE_FORMAT (MODE)->has_sign_dependent_rounding)
+
+/* This class allows functions in this file to accept a floating-point
+ format as either a mode or an explicit real_format pointer. In the
+ former case the mode must be VOIDmode (which means "no particular
+ format") or must satisfy SCALAR_FLOAT_MODE_P. */
+class format_helper
+{
+public:
+ format_helper (const real_format *format) : m_format (format) {}
+ template<typename T> format_helper (const T &);
+ const real_format *operator-> () const { return m_format; }
+ operator const real_format *() const { return m_format; }
+
+ bool decimal_p () const { return m_format && m_format->b == 10; }
+ bool can_represent_integral_type_p (tree type) const;
+
+private:
+ const real_format *m_format;
+};
+
+template<typename T>
+inline format_helper::format_helper (const T &m)
+ : m_format (m == VOIDmode ? 0 : REAL_MODE_FORMAT (m))
+{}
+
+/* Declare functions in real.cc. */
+
+/* True if the given mode has a NaN representation and the treatment of
+ NaN operands is important. Certain optimizations, such as folding
+ x * 0 into 0, are not correct for NaN operands, and are normally
+ disabled for modes with NaNs. The user can ask for them to be
+ done anyway using the -funsafe-math-optimizations switch. */
+extern bool HONOR_NANS (machine_mode);
+extern bool HONOR_NANS (const_tree);
+extern bool HONOR_NANS (const_rtx);
+
+/* Like HONOR_NANs, but true if we honor signaling NaNs (or sNaNs). */
+extern bool HONOR_SNANS (machine_mode);
+extern bool HONOR_SNANS (const_tree);
+extern bool HONOR_SNANS (const_rtx);
+
+/* As for HONOR_NANS, but true if the mode can represent infinity and
+ the treatment of infinite values is important. */
+extern bool HONOR_INFINITIES (machine_mode);
+extern bool HONOR_INFINITIES (const_tree);
+extern bool HONOR_INFINITIES (const_rtx);
+
+/* Like HONOR_NANS, but true if the given mode distinguishes between
+ positive and negative zero, and the sign of zero is important. */
+extern bool HONOR_SIGNED_ZEROS (machine_mode);
+extern bool HONOR_SIGNED_ZEROS (const_tree);
+extern bool HONOR_SIGNED_ZEROS (const_rtx);
+
+/* Like HONOR_NANS, but true if given mode supports sign-dependent rounding,
+ and the rounding mode is important. */
+extern bool HONOR_SIGN_DEPENDENT_ROUNDING (machine_mode);
+extern bool HONOR_SIGN_DEPENDENT_ROUNDING (const_tree);
+extern bool HONOR_SIGN_DEPENDENT_ROUNDING (const_rtx);
+
+/* Binary or unary arithmetic on tree_code. */
+extern bool real_arithmetic (REAL_VALUE_TYPE *, int, const REAL_VALUE_TYPE *,
+ const REAL_VALUE_TYPE *);
+
+/* Compare reals by tree_code. */
+extern bool real_compare (int, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is infinite. */
+extern bool real_isinf (const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is infinite with SIGN. */
+extern bool real_isinf (const REAL_VALUE_TYPE *, bool sign);
+
+/* Determine whether a floating-point value X is a NaN. */
+extern bool real_isnan (const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is a signaling NaN. */
+extern bool real_issignaling_nan (const REAL_VALUE_TYPE *);
+
+/* Determine whether floating-point value R is a denormal. This
+ function is only valid for normalized values. */
+inline bool
+real_isdenormal (const REAL_VALUE_TYPE *r, machine_mode mode)
+{
+ return r->cl == rvc_normal && REAL_EXP (r) < REAL_MODE_FORMAT (mode)->emin;
+}
+
+/* Determine whether a floating-point value X is finite. */
+extern bool real_isfinite (const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is negative. */
+extern bool real_isneg (const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is minus zero. */
+extern bool real_isnegzero (const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is plus or minus zero. */
+extern bool real_iszero (const REAL_VALUE_TYPE *);
+
+/* Determine whether a floating-point value X is zero with SIGN. */
+extern bool real_iszero (const REAL_VALUE_TYPE *, bool sign);
+
+/* Test relationships between reals. */
+extern bool real_identical (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+extern bool real_equal (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+extern bool real_less (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+
+/* Extend or truncate to a new format. */
+extern void real_convert (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *);
+
+/* Return true if truncating to NEW is exact. */
+extern bool exact_real_truncate (format_helper, const REAL_VALUE_TYPE *);
+
+/* Render R as a decimal floating point constant. */
+extern void real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t,
+ size_t, int);
+
+/* Render R as a decimal floating point constant, rounded so as to be
+ parsed back to the same value when interpreted in mode MODE. */
+extern void real_to_decimal_for_mode (char *, const REAL_VALUE_TYPE *, size_t,
+ size_t, int, machine_mode);
+
+/* Render R as a hexadecimal floating point constant. */
+extern void real_to_hexadecimal (char *, const REAL_VALUE_TYPE *,
+ size_t, size_t, int);
+
+/* Render R as an integer. */
+extern HOST_WIDE_INT real_to_integer (const REAL_VALUE_TYPE *);
+
+/* Initialize R from a decimal or hexadecimal string. Return -1 if
+ the value underflows, +1 if overflows, and 0 otherwise. */
+extern int real_from_string (REAL_VALUE_TYPE *, const char *);
+/* Wrapper to allow different internal representation for decimal floats. */
+extern void real_from_string3 (REAL_VALUE_TYPE *, const char *, format_helper);
+
+extern long real_to_target (long *, const REAL_VALUE_TYPE *, format_helper);
+
+extern void real_from_target (REAL_VALUE_TYPE *, const long *,
+ format_helper);
+
+extern void real_inf (REAL_VALUE_TYPE *, bool sign = false);
+
+extern bool real_nan (REAL_VALUE_TYPE *, const char *, int, format_helper);
+
+extern void real_maxval (REAL_VALUE_TYPE *, int, machine_mode);
+
+extern void real_2expN (REAL_VALUE_TYPE *, int, format_helper);
+
+extern unsigned int real_hash (const REAL_VALUE_TYPE *);
+
+
+/* Target formats defined in real.cc. */
+extern const struct real_format ieee_single_format;
+extern const struct real_format mips_single_format;
+extern const struct real_format motorola_single_format;
+extern const struct real_format spu_single_format;
+extern const struct real_format ieee_double_format;
+extern const struct real_format mips_double_format;
+extern const struct real_format motorola_double_format;
+extern const struct real_format ieee_extended_motorola_format;
+extern const struct real_format ieee_extended_intel_96_format;
+extern const struct real_format ieee_extended_intel_96_round_53_format;
+extern const struct real_format ieee_extended_intel_128_format;
+extern const struct real_format ibm_extended_format;
+extern const struct real_format mips_extended_format;
+extern const struct real_format ieee_quad_format;
+extern const struct real_format mips_quad_format;
+extern const struct real_format vax_f_format;
+extern const struct real_format vax_d_format;
+extern const struct real_format vax_g_format;
+extern const struct real_format real_internal_format;
+extern const struct real_format decimal_single_format;
+extern const struct real_format decimal_double_format;
+extern const struct real_format decimal_quad_format;
+extern const struct real_format ieee_half_format;
+extern const struct real_format arm_half_format;
+extern const struct real_format arm_bfloat_half_format;
+
+
+/* ====================================================================== */
+/* Crap. */
+
+/* Determine whether a floating-point value X is infinite. */
+#define REAL_VALUE_ISINF(x) real_isinf (&(x))
+
+/* Determine whether a floating-point value X is a NaN. */
+#define REAL_VALUE_ISNAN(x) real_isnan (&(x))
+
+/* Determine whether a floating-point value X is a signaling NaN. */
+#define REAL_VALUE_ISSIGNALING_NAN(x) real_issignaling_nan (&(x))
+
+/* Determine whether a floating-point value X is negative. */
+#define REAL_VALUE_NEGATIVE(x) real_isneg (&(x))
+
+/* Determine whether a floating-point value X is minus zero. */
+#define REAL_VALUE_MINUS_ZERO(x) real_isnegzero (&(x))
+
+/* IN is a REAL_VALUE_TYPE. OUT is an array of longs. */
+#define REAL_VALUE_TO_TARGET_LONG_DOUBLE(IN, OUT) \
+ real_to_target (OUT, &(IN), \
+ float_mode_for_size (LONG_DOUBLE_TYPE_SIZE).require ())
+
+#define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) \
+ real_to_target (OUT, &(IN), float_mode_for_size (64).require ())
+
+/* IN is a REAL_VALUE_TYPE. OUT is a long. */
+#define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \
+ ((OUT) = real_to_target (NULL, &(IN), float_mode_for_size (32).require ()))
+
+/* Real values to IEEE 754 decimal floats. */
+
+/* IN is a REAL_VALUE_TYPE. OUT is an array of longs. */
+#define REAL_VALUE_TO_TARGET_DECIMAL128(IN, OUT) \
+ real_to_target (OUT, &(IN), decimal_float_mode_for_size (128).require ())
+
+#define REAL_VALUE_TO_TARGET_DECIMAL64(IN, OUT) \
+ real_to_target (OUT, &(IN), decimal_float_mode_for_size (64).require ())
+
+/* IN is a REAL_VALUE_TYPE. OUT is a long. */
+#define REAL_VALUE_TO_TARGET_DECIMAL32(IN, OUT) \
+ ((OUT) = real_to_target (NULL, &(IN), \
+ decimal_float_mode_for_size (32).require ()))
+
+extern REAL_VALUE_TYPE real_value_truncate (format_helper, REAL_VALUE_TYPE);
+
+extern REAL_VALUE_TYPE real_value_negate (const REAL_VALUE_TYPE *);
+extern REAL_VALUE_TYPE real_value_abs (const REAL_VALUE_TYPE *);
+
+extern int significand_size (format_helper);
+
+extern REAL_VALUE_TYPE real_from_string2 (const char *, format_helper);
+
+#define REAL_VALUE_ATOF(s, m) \
+ real_from_string2 (s, m)
+
+#define CONST_DOUBLE_ATOF(s, m) \
+ const_double_from_real_value (real_from_string2 (s, m), m)
+
+#define REAL_VALUE_FIX(r) \
+ real_to_integer (&(r))
+
+/* ??? Not quite right. */
+#define REAL_VALUE_UNSIGNED_FIX(r) \
+ real_to_integer (&(r))
+
+/* ??? These were added for Paranoia support. */
+
+/* Return floor log2(R). */
+extern int real_exponent (const REAL_VALUE_TYPE *);
+
+/* R = A * 2**EXP. */
+extern void real_ldexp (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int);
+
+/* **** End of software floating point emulator interface macros **** */
+
+/* Constant real values 0, 1, 2, -1 and 0.5. */
+
+extern REAL_VALUE_TYPE dconst0;
+extern REAL_VALUE_TYPE dconst1;
+extern REAL_VALUE_TYPE dconst2;
+extern REAL_VALUE_TYPE dconstm1;
+extern REAL_VALUE_TYPE dconsthalf;
+extern REAL_VALUE_TYPE dconstinf;
+extern REAL_VALUE_TYPE dconstninf;
+
+#define dconst_e() (*dconst_e_ptr ())
+#define dconst_third() (*dconst_third_ptr ())
+#define dconst_quarter() (*dconst_quarter_ptr ())
+#define dconst_sixth() (*dconst_sixth_ptr ())
+#define dconst_ninth() (*dconst_ninth_ptr ())
+#define dconst_sqrt2() (*dconst_sqrt2_ptr ())
+
+/* Function to return the real value special constant 'e'. */
+extern const REAL_VALUE_TYPE * dconst_e_ptr (void);
+
+/* Returns a cached REAL_VALUE_TYPE corresponding to 1/n, for various n. */
+extern const REAL_VALUE_TYPE *dconst_third_ptr (void);
+extern const REAL_VALUE_TYPE *dconst_quarter_ptr (void);
+extern const REAL_VALUE_TYPE *dconst_sixth_ptr (void);
+extern const REAL_VALUE_TYPE *dconst_ninth_ptr (void);
+
+/* Returns the special REAL_VALUE_TYPE corresponding to sqrt(2). */
+extern const REAL_VALUE_TYPE * dconst_sqrt2_ptr (void);
+
+/* Function to return a real value (not a tree node)
+ from a given integer constant. */
+REAL_VALUE_TYPE real_value_from_int_cst (const_tree, const_tree);
+
+/* Return a CONST_DOUBLE with value R and mode M. */
+extern rtx const_double_from_real_value (REAL_VALUE_TYPE, machine_mode);
+
+/* Replace R by 1/R in the given format, if the result is exact. */
+extern bool exact_real_inverse (format_helper, REAL_VALUE_TYPE *);
+
+/* Return true if arithmetic on values in IMODE that were promoted
+ from values in TMODE is equivalent to direct arithmetic on values
+ in TMODE. */
+bool real_can_shorten_arithmetic (machine_mode, machine_mode);
+
+/* In tree.cc: wrap up a REAL_VALUE_TYPE in a tree node. */
+extern tree build_real (tree, REAL_VALUE_TYPE);
+
+/* Likewise, but first truncate the value to the type. */
+extern tree build_real_truncate (tree, REAL_VALUE_TYPE);
+
+/* Calculate R as X raised to the integer exponent N in format FMT. */
+extern bool real_powi (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *, HOST_WIDE_INT);
+
+/* Standard round to integer value functions. */
+extern void real_trunc (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *);
+extern void real_floor (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *);
+extern void real_ceil (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *);
+extern void real_round (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *);
+extern void real_roundeven (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *);
+
+/* Set the sign of R to the sign of X. */
+extern void real_copysign (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+
+/* Check whether the real constant value given is an integer. */
+extern bool real_isinteger (const REAL_VALUE_TYPE *, format_helper);
+extern bool real_isinteger (const REAL_VALUE_TYPE *, HOST_WIDE_INT *);
+
+/* Calculate nextafter (X, Y) in format FMT. */
+extern bool real_nextafter (REAL_VALUE_TYPE *, format_helper,
+ const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
+
+/* Write into BUF the maximum representable finite floating-point
+ number, (1 - b**-p) * b**emax for a given FP format FMT as a hex
+ float string. BUF must be large enough to contain the result. */
+extern void get_max_float (const struct real_format *, char *, size_t, bool);
+
+#ifndef GENERATOR_FILE
+/* real related routines. */
+extern wide_int real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
+extern void real_from_integer (REAL_VALUE_TYPE *, format_helper,
+ const wide_int_ref &, signop);
+#endif
+
+/* Fills r with the largest value such that 1 + r*r won't overflow.
+ This is used in both sin (atan (x)) and cos (atan(x)) optimizations. */
+extern void build_sinatan_real (REAL_VALUE_TYPE *, tree);
+
+#endif /* ! GCC_REAL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/realmpfr.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/realmpfr.h
new file mode 100644
index 0000000..5e032c0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/realmpfr.h
@@ -0,0 +1,35 @@
+/* Definitions of floating-point conversion from compiler
+ internal format to MPFR.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_REALGMP_H
+#define GCC_REALGMP_H
+
+#include <mpfr.h>
+#include <mpc.h>
+
+/* Convert between MPFR and REAL_VALUE_TYPE. The caller is
+ responsible for initializing and clearing the MPFR parameter. */
+
+extern void real_from_mpfr (REAL_VALUE_TYPE *, mpfr_srcptr, tree, mpfr_rnd_t);
+extern void real_from_mpfr (REAL_VALUE_TYPE *, mpfr_srcptr,
+ const real_format *, mpfr_rnd_t);
+extern void mpfr_from_real (mpfr_ptr, const REAL_VALUE_TYPE *, mpfr_rnd_t);
+
+#endif /* ! GCC_REALGMP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/recog.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/recog.h
new file mode 100644
index 0000000..539a27c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/recog.h
@@ -0,0 +1,565 @@
+/* Declarations for interface to insn recognizer and insn-output.cc.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RECOG_H
+#define GCC_RECOG_H
+
+/* Random number that should be large enough for all purposes. Also define
+ a type that has at least MAX_RECOG_ALTERNATIVES + 1 bits, with the extra
+ bit giving an invalid value that can be used to mean "uninitialized". */
+#define MAX_RECOG_ALTERNATIVES 35
+typedef uint64_t alternative_mask; /* Keep in sync with genattrtab.cc. */
+
+/* A mask of all alternatives. */
+#define ALL_ALTERNATIVES ((alternative_mask) -1)
+
+/* A mask containing just alternative X. */
+#define ALTERNATIVE_BIT(X) ((alternative_mask) 1 << (X))
+
+/* Types of operands. */
+enum op_type {
+ OP_IN,
+ OP_OUT,
+ OP_INOUT
+};
+
+struct operand_alternative
+{
+ /* Pointer to the beginning of the constraint string for this alternative,
+ for easier access by alternative number. */
+ const char *constraint;
+
+ /* The register class valid for this alternative (possibly NO_REGS). */
+ ENUM_BITFIELD (reg_class) cl : 16;
+
+ /* "Badness" of this alternative, computed from number of '?' and '!'
+ characters in the constraint string. */
+ unsigned int reject : 16;
+
+ /* -1 if no matching constraint was found, or an operand number. */
+ int matches : 8;
+ /* The same information, but reversed: -1 if this operand is not
+ matched by any other, or the operand number of the operand that
+ matches this one. */
+ int matched : 8;
+
+ /* Nonzero if '&' was found in the constraint string. */
+ unsigned int earlyclobber : 1;
+ /* Nonzero if TARGET_MEM_CONSTRAINT was found in the constraint
+ string. */
+ unsigned int memory_ok : 1;
+ /* Nonzero if 'p' was found in the constraint string. */
+ unsigned int is_address : 1;
+ /* Nonzero if 'X' was found in the constraint string, or if the constraint
+ string for this alternative was empty. */
+ unsigned int anything_ok : 1;
+
+ unsigned int unused : 12;
+};
+
+/* Return the class for operand I of alternative ALT, taking matching
+ constraints into account. */
+
+inline enum reg_class
+alternative_class (const operand_alternative *alt, int i)
+{
+ return alt[i].matches >= 0 ? alt[alt[i].matches].cl : alt[i].cl;
+}
+
+/* A class for substituting one rtx for another within an instruction,
+ or for recursively simplifying the instruction as-is. Derived classes
+ can record or filter certain decisions. */
+
+class insn_propagation : public simplify_context
+{
+public:
+ /* Assignments for RESULT_FLAGS.
+
+ UNSIMPLIFIED is true if a substitution has been made inside an rtx
+ X and if neither X nor its parent expressions could be simplified.
+
+ FIRST_SPARE_RESULT is the first flag available for derived classes. */
+ static const uint16_t UNSIMPLIFIED = 1U << 0;
+ static const uint16_t FIRST_SPARE_RESULT = 1U << 1;
+
+ insn_propagation (rtx_insn *);
+ insn_propagation (rtx_insn *, rtx, rtx, bool = true);
+ bool apply_to_pattern (rtx *);
+ bool apply_to_rvalue (rtx *);
+
+ /* Return true if we should accept a substitution into the address of
+ memory expression MEM. Undoing changes OLD_NUM_CHANGES and up restores
+ MEM's original address. */
+ virtual bool check_mem (int /*old_num_changes*/,
+ rtx /*mem*/) { return true; }
+
+ /* Note that we've simplified OLD_RTX into NEW_RTX. When substituting,
+ this only happens if a substitution occured within OLD_RTX.
+ Undoing OLD_NUM_CHANGES and up will restore the old form of OLD_RTX.
+ OLD_RESULT_FLAGS is the value that RESULT_FLAGS had before processing
+ OLD_RTX. */
+ virtual void note_simplification (int /*old_num_changes*/,
+ uint16_t /*old_result_flags*/,
+ rtx /*old_rtx*/, rtx /*new_rtx*/) {}
+
+private:
+ bool apply_to_mem_1 (rtx);
+ bool apply_to_lvalue_1 (rtx);
+ bool apply_to_rvalue_1 (rtx *);
+ bool apply_to_pattern_1 (rtx *);
+
+public:
+ /* The instruction that we are simplifying or propagating into. */
+ rtx_insn *insn;
+
+ /* If FROM is nonnull, we're replacing FROM with TO, otherwise we're
+ just doing a recursive simplification. */
+ rtx from;
+ rtx to;
+
+ /* The number of times that we have replaced FROM with TO. */
+ unsigned int num_replacements;
+
+ /* A bitmask of flags that describe the result of the simplificiation;
+ see above for details. */
+ uint16_t result_flags : 16;
+
+ /* True if we should unshare TO when making the next substitution,
+ false if we can use TO itself. */
+ uint16_t should_unshare : 1;
+
+ /* True if we should call check_mem after substituting into a memory. */
+ uint16_t should_check_mems : 1;
+
+ /* True if we should call note_simplification after each simplification. */
+ uint16_t should_note_simplifications : 1;
+
+ /* For future expansion. */
+ uint16_t spare : 13;
+
+ /* Gives the reason that a substitution failed, for debug purposes. */
+ const char *failure_reason;
+};
+
+/* Try to replace FROM with TO in INSN. SHARED_P is true if TO is shared
+ with other instructions, false if INSN can use TO directly. */
+
+inline insn_propagation::insn_propagation (rtx_insn *insn, rtx from, rtx to,
+ bool shared_p)
+ : insn (insn),
+ from (from),
+ to (to),
+ num_replacements (0),
+ result_flags (0),
+ should_unshare (shared_p),
+ should_check_mems (false),
+ should_note_simplifications (false),
+ spare (0),
+ failure_reason (nullptr)
+{
+}
+
+/* Try to simplify INSN without performing a substitution. */
+
+inline insn_propagation::insn_propagation (rtx_insn *insn)
+ : insn_propagation (insn, NULL_RTX, NULL_RTX)
+{
+}
+
+extern void init_recog (void);
+extern void init_recog_no_volatile (void);
+extern int check_asm_operands (rtx);
+extern int asm_operand_ok (rtx, const char *, const char **);
+extern bool validate_change (rtx, rtx *, rtx, bool);
+extern bool validate_unshare_change (rtx, rtx *, rtx, bool);
+extern bool validate_change_xveclen (rtx, rtx *, int, bool);
+extern bool canonicalize_change_group (rtx_insn *insn, rtx x);
+extern int insn_invalid_p (rtx_insn *, bool);
+extern int verify_changes (int);
+extern void confirm_change_group (void);
+extern int apply_change_group (void);
+extern int num_validated_changes (void);
+extern void cancel_changes (int);
+extern void temporarily_undo_changes (int);
+extern void redo_changes (int);
+extern int constrain_operands (int, alternative_mask);
+extern int constrain_operands_cached (rtx_insn *, int);
+extern bool memory_address_addr_space_p (machine_mode, rtx, addr_space_t);
+#define memory_address_p(mode,addr) \
+ memory_address_addr_space_p ((mode), (addr), ADDR_SPACE_GENERIC)
+extern bool strict_memory_address_addr_space_p (machine_mode, rtx,
+ addr_space_t);
+#define strict_memory_address_p(mode,addr) \
+ strict_memory_address_addr_space_p ((mode), (addr), ADDR_SPACE_GENERIC)
+extern int validate_replace_rtx_subexp (rtx, rtx, rtx_insn *, rtx *);
+extern int validate_replace_rtx (rtx, rtx, rtx_insn *);
+extern int validate_replace_rtx_part (rtx, rtx, rtx *, rtx_insn *);
+extern int validate_replace_rtx_part_nosimplify (rtx, rtx, rtx *, rtx_insn *);
+extern void validate_replace_rtx_group (rtx, rtx, rtx_insn *);
+extern void validate_replace_src_group (rtx, rtx, rtx_insn *);
+extern bool validate_simplify_insn (rtx_insn *insn);
+extern int num_changes_pending (void);
+extern bool reg_fits_class_p (const_rtx, reg_class_t, int, machine_mode);
+extern bool valid_insn_p (rtx_insn *);
+
+extern bool offsettable_memref_p (rtx);
+extern bool offsettable_nonstrict_memref_p (rtx);
+extern bool offsettable_address_addr_space_p (int, machine_mode, rtx,
+ addr_space_t);
+#define offsettable_address_p(strict,mode,addr) \
+ offsettable_address_addr_space_p ((strict), (mode), (addr), \
+ ADDR_SPACE_GENERIC)
+extern bool mode_dependent_address_p (rtx, addr_space_t);
+
+extern int recog (rtx, rtx_insn *, int *);
+#ifndef GENERATOR_FILE
+inline int recog_memoized (rtx_insn *insn);
+#endif
+extern void add_clobbers (rtx, int);
+extern int added_clobbers_hard_reg_p (int);
+extern void insn_extract (rtx_insn *);
+extern void extract_insn (rtx_insn *);
+extern void extract_constrain_insn (rtx_insn *insn);
+extern void extract_constrain_insn_cached (rtx_insn *);
+extern void extract_insn_cached (rtx_insn *);
+extern void preprocess_constraints (int, int, const char **,
+ operand_alternative *, rtx **);
+extern const operand_alternative *preprocess_insn_constraints (unsigned int);
+extern void preprocess_constraints (rtx_insn *);
+extern rtx_insn *peep2_next_insn (int);
+extern int peep2_regno_dead_p (int, int);
+extern int peep2_reg_dead_p (int, rtx);
+#ifdef HARD_CONST
+extern rtx peep2_find_free_register (int, int, const char *,
+ machine_mode, HARD_REG_SET *);
+#endif
+extern rtx_insn *peephole2_insns (rtx, rtx_insn *, int *);
+
+extern int store_data_bypass_p (rtx_insn *, rtx_insn *);
+extern int if_test_bypass_p (rtx_insn *, rtx_insn *);
+
+extern void copy_frame_info_to_split_insn (rtx_insn *, rtx_insn *);
+
+#ifndef GENERATOR_FILE
+/* Try recognizing the instruction INSN,
+ and return the code number that results.
+ Remember the code so that repeated calls do not
+ need to spend the time for actual rerecognition.
+
+ This function is the normal interface to instruction recognition.
+ The automatically-generated function `recog' is normally called
+ through this one. */
+
+inline int
+recog_memoized (rtx_insn *insn)
+{
+ if (INSN_CODE (insn) < 0)
+ INSN_CODE (insn) = recog (PATTERN (insn), insn, 0);
+ return INSN_CODE (insn);
+}
+#endif
+
+/* Skip chars until the next ',' or the end of the string. This is
+ useful to skip alternatives in a constraint string. */
+inline const char *
+skip_alternative (const char *p)
+{
+ const char *r = p;
+ while (*r != '\0' && *r != ',')
+ r++;
+ if (*r == ',')
+ r++;
+ return r;
+}
+
+/* Nonzero means volatile operands are recognized. */
+extern int volatile_ok;
+
+/* RAII class for temporarily setting volatile_ok. */
+
+class temporary_volatile_ok
+{
+public:
+ temporary_volatile_ok (int value) : save_volatile_ok (volatile_ok)
+ {
+ volatile_ok = value;
+ }
+
+ ~temporary_volatile_ok () { volatile_ok = save_volatile_ok; }
+
+private:
+ temporary_volatile_ok (const temporary_volatile_ok &);
+ int save_volatile_ok;
+};
+
+/* Set by constrain_operands to the number of the alternative that
+ matched. */
+extern int which_alternative;
+
+/* The following vectors hold the results from insn_extract. */
+
+struct recog_data_d
+{
+ /* It is very tempting to make the 5 operand related arrays into a
+ structure and index on that. However, to be source compatible
+ with all of the existing md file insn constraints and output
+ templates, we need `operand' as a flat array. Without that
+ member, making an array for the rest seems pointless. */
+
+ /* Gives value of operand N. */
+ rtx operand[MAX_RECOG_OPERANDS];
+
+ /* Gives location where operand N was found. */
+ rtx *operand_loc[MAX_RECOG_OPERANDS];
+
+ /* Gives the constraint string for operand N. */
+ const char *constraints[MAX_RECOG_OPERANDS];
+
+ /* Nonzero if operand N is a match_operator or a match_parallel. */
+ char is_operator[MAX_RECOG_OPERANDS];
+
+ /* Gives the mode of operand N. */
+ machine_mode operand_mode[MAX_RECOG_OPERANDS];
+
+ /* Gives the type (in, out, inout) for operand N. */
+ enum op_type operand_type[MAX_RECOG_OPERANDS];
+
+ /* Gives location where the Nth duplicate-appearance of an operand
+ was found. This is something that matched MATCH_DUP. */
+ rtx *dup_loc[MAX_DUP_OPERANDS];
+
+ /* Gives the operand number that was duplicated in the Nth
+ duplicate-appearance of an operand. */
+ char dup_num[MAX_DUP_OPERANDS];
+
+ /* ??? Note that these are `char' instead of `unsigned char' to (try to)
+ avoid certain lossage from K&R C, wherein `unsigned char' default
+ promotes to `unsigned int' instead of `int' as in ISO C. As of 1999,
+ the most common places to bootstrap from K&R C are SunOS and HPUX,
+ both of which have signed characters by default. The only other
+ supported natives that have both K&R C and unsigned characters are
+ ROMP and Irix 3, and neither have been seen for a while, but do
+ continue to consider unsignedness when performing arithmetic inside
+ a comparison. */
+
+ /* The number of operands of the insn. */
+ char n_operands;
+
+ /* The number of MATCH_DUPs in the insn. */
+ char n_dups;
+
+ /* The number of alternatives in the constraints for the insn. */
+ char n_alternatives;
+
+ /* True if insn is ASM_OPERANDS. */
+ bool is_asm;
+
+ /* In case we are caching, hold insn data was generated for. */
+ rtx_insn *insn;
+};
+
+extern struct recog_data_d recog_data;
+
+extern const operand_alternative *recog_op_alt;
+
+/* Return a pointer to an array in which index OP describes the constraints
+ on operand OP of the current instruction alternative (which_alternative).
+ Only valid after calling preprocess_constraints and constrain_operands. */
+
+inline const operand_alternative *
+which_op_alt ()
+{
+ gcc_checking_assert (IN_RANGE (which_alternative, 0,
+ recog_data.n_alternatives - 1));
+ return &recog_op_alt[which_alternative * recog_data.n_operands];
+}
+
+/* A table defined in insn-output.cc that give information about
+ each insn-code value. */
+
+typedef bool (*insn_operand_predicate_fn) (rtx, machine_mode);
+typedef const char * (*insn_output_fn) (rtx *, rtx_insn *);
+
+struct insn_gen_fn
+{
+ typedef void (*stored_funcptr) (void);
+
+ template<typename ...Ts>
+ rtx_insn *operator() (Ts... args) const
+ {
+ typedef rtx_insn *(*funcptr) (decltype ((void) args, NULL_RTX)...);
+ return ((funcptr) func) (args...);
+ }
+
+ // This is for compatibility of code that invokes functions like
+ // (*funcptr) (arg)
+ insn_gen_fn operator * (void) const { return *this; }
+
+ // The wrapped function pointer must be public and there must not be any
+ // constructors. Otherwise the insn_data_d struct initializers generated
+ // by genoutput.cc will result in static initializer functions, which defeats
+ // the purpose of the generated insn_data_d array.
+ stored_funcptr func;
+};
+
+struct insn_operand_data
+{
+ const insn_operand_predicate_fn predicate;
+
+ const char *const constraint;
+
+ ENUM_BITFIELD(machine_mode) const mode : 16;
+
+ const char strict_low;
+
+ const char is_operator;
+
+ const char eliminable;
+
+ const char allows_mem;
+};
+
+/* Legal values for insn_data.output_format. Indicate what type of data
+ is stored in insn_data.output. */
+#define INSN_OUTPUT_FORMAT_NONE 0 /* abort */
+#define INSN_OUTPUT_FORMAT_SINGLE 1 /* const char * */
+#define INSN_OUTPUT_FORMAT_MULTI 2 /* const char * const * */
+#define INSN_OUTPUT_FORMAT_FUNCTION 3 /* const char * (*)(...) */
+
+struct insn_data_d
+{
+ const char *const name;
+#if HAVE_DESIGNATED_UNION_INITIALIZERS
+ union {
+ const char *single;
+ const char *const *multi;
+ insn_output_fn function;
+ } output;
+#else
+ struct {
+ const char *single;
+ const char *const *multi;
+ insn_output_fn function;
+ } output;
+#endif
+ const insn_gen_fn genfun;
+ const struct insn_operand_data *const operand;
+
+ const char n_generator_args;
+ const char n_operands;
+ const char n_dups;
+ const char n_alternatives;
+ const char output_format;
+};
+
+extern const struct insn_data_d insn_data[];
+extern int peep2_current_count;
+
+#ifndef GENERATOR_FILE
+#include "insn-codes.h"
+
+/* An enum of boolean attributes that may only depend on the current
+ subtarget, not on things like operands or compiler phase. */
+enum bool_attr {
+ BA_ENABLED,
+ BA_PREFERRED_FOR_SPEED,
+ BA_PREFERRED_FOR_SIZE,
+ BA_LAST = BA_PREFERRED_FOR_SIZE
+};
+
+/* Target-dependent globals. */
+struct target_recog {
+ bool x_initialized;
+ alternative_mask x_bool_attr_masks[NUM_INSN_CODES][BA_LAST + 1];
+ operand_alternative *x_op_alt[NUM_INSN_CODES];
+};
+
+extern struct target_recog default_target_recog;
+#if SWITCHABLE_TARGET
+extern struct target_recog *this_target_recog;
+#else
+#define this_target_recog (&default_target_recog)
+#endif
+
+alternative_mask get_enabled_alternatives (rtx_insn *);
+alternative_mask get_preferred_alternatives (rtx_insn *);
+alternative_mask get_preferred_alternatives (rtx_insn *, basic_block);
+bool check_bool_attrs (rtx_insn *);
+
+void recog_init ();
+
+/* This RAII class can help to undo tentative insn changes on failure.
+ When an object of the class goes out of scope, it undoes all group
+ changes that have been made via the validate_change machinery and
+ not yet confirmed via confirm_change_group.
+
+ For example:
+
+ insn_change_watermark watermark;
+ validate_change (..., true); // A
+ ...
+ if (test)
+ // Undoes change A.
+ return false;
+ ...
+ validate_change (..., true); // B
+ ...
+ if (test)
+ // Undoes changes A and B.
+ return false;
+ ...
+ confirm_change_group ();
+
+ Code that wants to avoid this behavior can use keep ():
+
+ insn_change_watermark watermark;
+ validate_change (..., true); // A
+ ...
+ if (test)
+ // Undoes change A.
+ return false;
+ ...
+ watermark.keep ();
+ validate_change (..., true); // B
+ ...
+ if (test)
+ // Undoes change B, but not A.
+ return false;
+ ...
+ confirm_change_group (); */
+class insn_change_watermark
+{
+public:
+ insn_change_watermark () : m_old_num_changes (num_validated_changes ()) {}
+ ~insn_change_watermark ();
+ void keep () { m_old_num_changes = num_validated_changes (); }
+
+private:
+ int m_old_num_changes;
+};
+
+inline insn_change_watermark::~insn_change_watermark ()
+{
+ if (m_old_num_changes < num_validated_changes ())
+ cancel_changes (m_old_num_changes);
+}
+
+#endif
+
+#endif /* GCC_RECOG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/reg-notes.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/reg-notes.def
new file mode 100644
index 0000000..1f74a60
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/reg-notes.def
@@ -0,0 +1,254 @@
+/* Register note definitions.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This file defines all the codes that may appear on individual
+ EXPR_LIST, INSN_LIST and INT_LIST rtxes in the REG_NOTES chain of an insn.
+ The codes are stored in the mode field of the rtx. Source files
+ define DEF_REG_NOTE appropriately before including this file.
+
+ CFA related notes meant for RTX_FRAME_RELATED_P instructions
+ should be declared with REG_CFA_NOTE macro instead of REG_NOTE. */
+
+/* Shorthand. */
+#define REG_NOTE(NAME) DEF_REG_NOTE (REG_##NAME)
+#ifndef REG_CFA_NOTE
+# define REG_CFA_NOTE(NAME) REG_NOTE (NAME)
+#endif
+
+/* REG_DEP_TRUE is used in scheduler dependencies lists to represent a
+ read-after-write dependency (i.e. a true data dependency). This is
+ here, not grouped with REG_DEP_ANTI and REG_DEP_OUTPUT, because some
+ passes use a literal 0 for it. */
+REG_NOTE (DEP_TRUE)
+
+/* The value in REG dies in this insn (i.e., it is not needed past
+ this insn). If REG is set in this insn, the REG_DEAD note may,
+ but need not, be omitted. */
+REG_NOTE (DEAD)
+
+/* The REG is autoincremented or autodecremented in this insn. */
+REG_NOTE (INC)
+
+/* Describes the insn as a whole; it says that the insn sets a
+ register to a constant value or to be equivalent to a memory
+ address. If the register is spilled to the stack then the constant
+ value should be substituted for it. The contents of the REG_EQUIV
+ is the constant value or memory address, which may be different
+ from the source of the SET although it has the same value. A
+ REG_EQUIV note may also appear on an insn which copies a register
+ parameter to a pseudo-register, if there is a memory address which
+ could be used to hold that pseudo-register throughout the function. */
+REG_NOTE (EQUIV)
+
+/* Like REG_EQUIV except that the destination is only momentarily
+ equal to the specified rtx. Therefore, it cannot be used for
+ substitution; but it can be used for cse. */
+REG_NOTE (EQUAL)
+
+/* The register is always nonnegative during the containing loop.
+ This is used in branches so that decrement and branch instructions
+ terminating on zero can be matched. There must be an insn pattern
+ in the md file named `decrement_and_branch_until_zero' or else this
+ will never be added to any instructions. */
+REG_NOTE (NONNEG)
+
+/* Identifies a register set in this insn and never used. */
+REG_NOTE (UNUSED)
+
+/* Points to a CODE_LABEL. Used by JUMP_INSNs to say that the CODE_LABEL
+ contained in the REG_LABEL_TARGET note is a possible jump target of
+ this insn. This note is an INSN_LIST. */
+REG_NOTE (LABEL_TARGET)
+
+/* Points to a CODE_LABEL. Used by any insn to say that the CODE_LABEL
+ contained in the REG_LABEL_OPERAND note is used by the insn, but as an
+ operand, not as a jump target (though it may indirectly be a jump
+ target for a later jump insn). This note is an INSN_LIST. */
+REG_NOTE (LABEL_OPERAND)
+
+/* REG_DEP_OUTPUT and REG_DEP_ANTI are used in scheduler dependencies lists
+ to represent write-after-write and write-after-read dependencies
+ respectively. */
+REG_NOTE (DEP_OUTPUT)
+REG_NOTE (DEP_ANTI)
+REG_NOTE (DEP_CONTROL)
+
+/* REG_BR_PROB is attached to JUMP_INSNs. It has an
+ integer value (in an INT_LIST). For jumps, it is the probability
+ that this is a taken branch. The integer represents a value of
+ profile_probability type. Use to_reg_br_prob_note and from_reg_br_prob_note
+ to extract the actual value. */
+REG_NOTE (BR_PROB)
+
+/* Attached to a call insn; indicates that the call is malloc-like and
+ that the pointer returned cannot alias anything else. */
+REG_NOTE (NOALIAS)
+
+/* REG_BR_PRED is attached to JUMP_INSNs. It contains
+ CONCAT of two integer value. First specifies the branch predictor
+ that added the note, second specifies the predicted hitrate of
+ branch in a fixed point arithmetic based on REG_BR_PROB_BASE. */
+REG_NOTE (BR_PRED)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, but are too complex
+ for DWARF to interpret what they imply. The attached rtx is used
+ instead of intuition. */
+REG_CFA_NOTE (FRAME_RELATED_EXPR)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, but are too complex
+ for FRAME_RELATED_EXPR intuition. The insn's first pattern must be
+ a SET, and the destination must be the CFA register. The attached
+ rtx is an expression that defines the CFA. In the simplest case, the
+ rtx could be just the stack_pointer_rtx; more common would be a PLUS
+ with a base register and a constant offset. In the most complicated
+ cases, this will result in a DW_CFA_def_cfa_expression with the rtx
+ expression rendered in a dwarf location expression. */
+REG_CFA_NOTE (CFA_DEF_CFA)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, but are too complex
+ for FRAME_RELATED_EXPR intuition. This note adjusts the expression
+ from which the CFA is computed. The attached rtx defines a new CFA
+ expression, relative to the old CFA expression. This rtx must be of
+ the form (SET new-cfa-reg (PLUS old-cfa-reg const_int)). If the note
+ rtx is NULL, we use the first SET of the insn. */
+REG_CFA_NOTE (CFA_ADJUST_CFA)
+
+/* Similar to FRAME_RELATED_EXPR, with the additional information that
+ this is a save to memory, i.e. will result in DW_CFA_offset or the
+ like. The pattern or the insn should be a simple store relative to
+ the CFA. */
+REG_CFA_NOTE (CFA_OFFSET)
+
+/* Similar to FRAME_RELATED_EXPR, with the additional information that this
+ is a save to a register, i.e. will result in DW_CFA_register. The insn
+ or the pattern should be simple reg-reg move. */
+REG_CFA_NOTE (CFA_REGISTER)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, but are too complex
+ for FRAME_RELATED_EXPR intuition. This is a save to memory, i.e. will
+ result in a DW_CFA_expression. The pattern or the insn should be a
+ store of a register to an arbitrary (non-validated) memory address. */
+REG_CFA_NOTE (CFA_EXPRESSION)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, but are too complex
+ for FRAME_RELATED_EXPR intuition. The DWARF expression computes the value of
+ the given register. */
+REG_CFA_NOTE (CFA_VAL_EXPRESSION)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, with the information
+ that this is a restore operation, i.e. will result in DW_CFA_restore
+ or the like. Either the attached rtx, or the destination of the insn's
+ first pattern is the register to be restored. */
+REG_CFA_NOTE (CFA_RESTORE)
+
+/* Like CFA_RESTORE but without actually emitting CFI. This can be
+ used to tell the verification infrastructure that a register is
+ saved without intending to restore it. */
+REG_CFA_NOTE (CFA_NO_RESTORE)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, marks insn that sets
+ vDRAP from DRAP. If vDRAP is a register, vdrap_reg is initalized
+ to the argument, if it is a MEM, it is ignored. */
+REG_CFA_NOTE (CFA_SET_VDRAP)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, indicating a window
+ save operation, i.e. will result in a DW_CFA_GNU_window_save.
+ The argument is ignored. */
+REG_CFA_NOTE (CFA_WINDOW_SAVE)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, marks the insn as
+ requiring that all queued information should be flushed *before* insn,
+ regardless of what is visible in the rtl. The argument is ignored.
+ This is normally used for a call instruction which is not exposed to
+ the rest of the compiler as a CALL_INSN. */
+REG_CFA_NOTE (CFA_FLUSH_QUEUE)
+
+/* Attached to insns that are RTX_FRAME_RELATED_P, toggling the mangling status
+ of return address. Currently it's only used by AArch64. The argument is
+ ignored. */
+REG_CFA_NOTE (CFA_TOGGLE_RA_MANGLE)
+
+/* Indicates what exception region an INSN belongs in. This is used
+ to indicate what region to which a call may throw. REGION 0
+ indicates that a call cannot throw at all. REGION -1 indicates
+ that it cannot throw, nor will it execute a non-local goto. */
+REG_NOTE (EH_REGION)
+
+/* Used by haifa-sched to save NOTE_INSN notes across scheduling. */
+REG_NOTE (SAVE_NOTE)
+
+/* Indicates that a call does not return. */
+REG_NOTE (NORETURN)
+
+/* Indicates that an indirect jump is a non-local goto instead of a
+ computed goto. */
+REG_NOTE (NON_LOCAL_GOTO)
+
+/* This kind of note is generated at each to `setjmp', and similar
+ functions that can return twice. */
+REG_NOTE (SETJMP)
+
+/* This kind of note is generated at each transactional memory
+ builtin, to indicate we need to generate transaction restart
+ edges for this insn. */
+REG_NOTE (TM)
+
+/* Indicates the cumulative offset of the stack pointer accounting
+ for pushed arguments. This will only be generated when
+ ACCUMULATE_OUTGOING_ARGS is false. */
+REG_NOTE (ARGS_SIZE)
+
+/* Used for communication between IRA and caller-save.cc, indicates
+ that the return value of a call can be used to reinitialize a
+ pseudo reg. */
+REG_NOTE (RETURNED)
+
+/* Indicates the instruction is a stack check probe that should not
+ be combined with other stack adjustments. */
+REG_NOTE (STACK_CHECK)
+
+/* Used to mark a call with the function decl called by the call.
+ The decl might not be available in the call due to splitting of the call
+ insn. This note is a SYMBOL_REF. */
+REG_NOTE (CALL_DECL)
+
+/* Indicates that the call is an untyped_call. These calls are special
+ in that they set all of the target ABI's return value registers to a
+ defined value without explicitly saying so. For example, a typical
+ untyped_call sequence has the form:
+
+ (call (mem (symbol_ref "foo")))
+ (set (reg pseudo1) (reg result1))
+ ...
+ (set (reg pseudon) (reg resultn))
+
+ The ABI specifies that result1..resultn are clobbered by the call,
+ but the RTL does not indicate that result1..resultn are the results
+ of the call. */
+REG_NOTE (UNTYPED_CALL)
+
+/* Indicate that a call should not be verified for control-flow consistency.
+ The target address of the call is assumed as a valid address and no check
+ to validate a branch to the target address is needed. The call is marked
+ when a called function has a 'notrack' attribute. This note is used by the
+ compiler when the option -fcf-protection=branch is specified. */
+REG_NOTE (CALL_NOCF_CHECK)
+
+/* The values passed to callee, for debuginfo purposes. */
+REG_NOTE (CALL_ARG_LOCATION)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regcprop.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regcprop.h
new file mode 100644
index 0000000..74f29ea
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regcprop.h
@@ -0,0 +1,25 @@
+/* Copy propagation on hard registers.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_REGCPROP_H
+#define GCC_REGCPROP_H
+
+extern void copyprop_hardreg_forward_bb_without_debug_insn (basic_block bb);
+
+#endif /* GCC_REGCPROP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regrename.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regrename.h
new file mode 100644
index 0000000..ae33cc1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regrename.h
@@ -0,0 +1,111 @@
+/* This file contains definitions for the register renamer.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_REGRENAME_H
+#define GCC_REGRENAME_H
+
+/* We keep linked lists of DU_HEAD structures, each of which describes
+ a chain of occurrences of a reg. */
+class du_head
+{
+public:
+ /* The next chain. */
+ class du_head *next_chain;
+ /* The first and last elements of this chain. */
+ struct du_chain *first, *last;
+ /* The chain that this chain is tied to. */
+ class du_head *tied_chain;
+ /* Describes the register being tracked. */
+ unsigned regno;
+ int nregs;
+
+ /* A unique id to be used as an index into the conflicts bitmaps. */
+ unsigned id;
+ /* A bitmap to record conflicts with other chains. */
+ bitmap_head conflicts;
+ /* Conflicts with untracked hard registers. */
+ HARD_REG_SET hard_conflicts;
+ /* Which registers are fully or partially clobbered by the calls that
+ the chain crosses. */
+ HARD_REG_SET call_clobber_mask;
+
+ /* A bitmask of ABIs used by the calls that the chain crosses. */
+ unsigned int call_abis : NUM_ABI_IDS;
+ /* Nonzero if the register is used in a way that prevents renaming,
+ such as the SET_DEST of a CALL_INSN or an asm operand that used
+ to be a hard register. */
+ unsigned int cannot_rename:1;
+ /* Nonzero if the chain has already been renamed. */
+ unsigned int renamed:1;
+
+ /* Fields for use by target code. */
+ unsigned int target_data_1;
+ unsigned int target_data_2;
+};
+
+typedef class du_head *du_head_p;
+
+/* This struct describes a single occurrence of a register. */
+struct du_chain
+{
+ /* Links to the next occurrence of the register. */
+ struct du_chain *next_use;
+
+ /* The insn where the register appears. */
+ rtx_insn *insn;
+ /* The location inside the insn. */
+ rtx *loc;
+ /* The register class required by the insn at this location. */
+ ENUM_BITFIELD(reg_class) cl : 16;
+};
+
+/* This struct describes data gathered during regrename_analyze about
+ a single operand of an insn. */
+struct operand_rr_info
+{
+ /* The number of chains recorded for this operand. */
+ short n_chains;
+ bool failed;
+ /* Holds either the chain for the operand itself, or for the registers in
+ a memory operand. */
+ struct du_chain *chains[MAX_REGS_PER_ADDRESS];
+ class du_head *heads[MAX_REGS_PER_ADDRESS];
+};
+
+/* A struct to hold a vector of operand_rr_info structures describing the
+ operands of an insn. */
+struct insn_rr_info
+{
+ operand_rr_info *op_info;
+};
+
+
+extern vec<insn_rr_info> insn_rr;
+
+extern void regrename_init (bool);
+extern void regrename_finish (void);
+extern void regrename_analyze (bitmap, bool = true);
+extern du_head_p regrename_chain_from_id (unsigned int);
+extern int find_rename_reg (du_head_p, enum reg_class, HARD_REG_SET *, int,
+ bool);
+extern bool regrename_do_replace (du_head_p, int);
+extern reg_class regrename_find_superclass (du_head_p, int *,
+ HARD_REG_SET *);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regs.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regs.h
new file mode 100644
index 0000000..aea093e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regs.h
@@ -0,0 +1,392 @@
+/* Define per-register tables for data flow info and register allocation.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_REGS_H
+#define GCC_REGS_H
+
+#define REG_BYTES(R) mode_size[(int) GET_MODE (R)]
+
+/* When you only have the mode of a pseudo register before it has a hard
+ register chosen for it, this reports the size of each hard register
+ a pseudo in such a mode would get allocated to. A target may
+ override this. */
+
+#ifndef REGMODE_NATURAL_SIZE
+#define REGMODE_NATURAL_SIZE(MODE) UNITS_PER_WORD
+#endif
+
+/* Maximum register number used in this function, plus one. */
+
+extern int max_regno;
+
+/* REG_N_REFS and REG_N_SETS are initialized by a call to
+ regstat_init_n_sets_and_refs from the current values of
+ DF_REG_DEF_COUNT and DF_REG_USE_COUNT. REG_N_REFS and REG_N_SETS
+ should only be used if a pass need to change these values in some
+ magical way or the pass needs to have accurate values for these
+ and is not using incremental df scanning.
+
+ At the end of a pass that uses REG_N_REFS and REG_N_SETS, a call
+ should be made to regstat_free_n_sets_and_refs.
+
+ Local alloc seems to play pretty loose with these values.
+ REG_N_REFS is set to 0 if the register is used in an asm.
+ Furthermore, local_alloc calls regclass to hack both REG_N_REFS and
+ REG_N_SETS for three address insns. Other passes seem to have
+ other special values. */
+
+
+
+/* Structure to hold values for REG_N_SETS (i) and REG_N_REFS (i). */
+
+struct regstat_n_sets_and_refs_t
+{
+ int sets; /* # of times (REG n) is set */
+ int refs; /* # of times (REG n) is used or set */
+};
+
+extern struct regstat_n_sets_and_refs_t *regstat_n_sets_and_refs;
+
+/* Indexed by n, gives number of times (REG n) is used or set. */
+inline int
+REG_N_REFS (int regno)
+{
+ return regstat_n_sets_and_refs[regno].refs;
+}
+
+/* Indexed by n, gives number of times (REG n) is used or set. */
+#define SET_REG_N_REFS(N,V) (regstat_n_sets_and_refs[N].refs = V)
+#define INC_REG_N_REFS(N,V) (regstat_n_sets_and_refs[N].refs += V)
+
+/* Indexed by n, gives number of times (REG n) is set. */
+inline int
+REG_N_SETS (int regno)
+{
+ return regstat_n_sets_and_refs[regno].sets;
+}
+
+/* Indexed by n, gives number of times (REG n) is set. */
+#define SET_REG_N_SETS(N,V) (regstat_n_sets_and_refs[N].sets = V)
+#define INC_REG_N_SETS(N,V) (regstat_n_sets_and_refs[N].sets += V)
+
+/* Given a REG, return TRUE if the reg is a PARM_DECL, FALSE otherwise. */
+extern bool reg_is_parm_p (rtx);
+
+/* Functions defined in regstat.cc. */
+extern void regstat_init_n_sets_and_refs (void);
+extern void regstat_free_n_sets_and_refs (void);
+extern void regstat_compute_ri (void);
+extern void regstat_free_ri (void);
+extern bitmap regstat_get_setjmp_crosses (void);
+extern void regstat_compute_calls_crossed (void);
+extern void regstat_free_calls_crossed (void);
+extern void dump_reg_info (FILE *);
+
+/* Register information indexed by register number. This structure is
+ initialized by calling regstat_compute_ri and is destroyed by
+ calling regstat_free_ri. */
+struct reg_info_t
+{
+ int freq; /* # estimated frequency (REG n) is used or set */
+ int deaths; /* # of times (REG n) dies */
+ int calls_crossed; /* # of calls (REG n) is live across */
+ int basic_block; /* # of basic blocks (REG n) is used in */
+};
+
+extern struct reg_info_t *reg_info_p;
+
+/* The number allocated elements of reg_info_p. */
+extern size_t reg_info_p_size;
+
+/* Estimate frequency of references to register N. */
+
+#define REG_FREQ(N) (reg_info_p[N].freq)
+
+/* The weights for each insn varies from 0 to REG_FREQ_BASE.
+ This constant does not need to be high, as in infrequently executed
+ regions we want to count instructions equivalently to optimize for
+ size instead of speed. */
+#define REG_FREQ_MAX 1000
+
+/* Compute register frequency from the BB frequency. When optimizing for size,
+ or profile driven feedback is available and the function is never executed,
+ frequency is always equivalent. Otherwise rescale the basic block
+ frequency. */
+#define REG_FREQ_FROM_BB(bb) ((optimize_function_for_size_p (cfun) \
+ || !cfun->cfg->count_max.initialized_p ()) \
+ ? REG_FREQ_MAX \
+ : ((bb)->count.to_frequency (cfun) \
+ * REG_FREQ_MAX / BB_FREQ_MAX) \
+ ? ((bb)->count.to_frequency (cfun) \
+ * REG_FREQ_MAX / BB_FREQ_MAX) \
+ : 1)
+
+/* Indexed by N, gives number of insns in which register N dies.
+ Note that if register N is live around loops, it can die
+ in transitions between basic blocks, and that is not counted here.
+ So this is only a reliable indicator of how many regions of life there are
+ for registers that are contained in one basic block. */
+
+#define REG_N_DEATHS(N) (reg_info_p[N].deaths)
+
+/* Get the number of consecutive words required to hold pseudo-reg N. */
+
+#define PSEUDO_REGNO_SIZE(N) \
+ ((GET_MODE_SIZE (PSEUDO_REGNO_MODE (N)) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* Get the number of bytes required to hold pseudo-reg N. */
+
+#define PSEUDO_REGNO_BYTES(N) \
+ GET_MODE_SIZE (PSEUDO_REGNO_MODE (N))
+
+/* Get the machine mode of pseudo-reg N. */
+
+#define PSEUDO_REGNO_MODE(N) GET_MODE (regno_reg_rtx[N])
+
+/* Indexed by N, gives number of CALL_INSNS across which (REG n) is live. */
+
+#define REG_N_CALLS_CROSSED(N) (reg_info_p[N].calls_crossed)
+
+/* Indexed by n, gives number of basic block that (REG n) is used in.
+ If the value is REG_BLOCK_GLOBAL (-1),
+ it means (REG n) is used in more than one basic block.
+ REG_BLOCK_UNKNOWN (0) means it hasn't been seen yet so we don't know.
+ This information remains valid for the rest of the compilation
+ of the current function; it is used to control register allocation. */
+
+#define REG_BLOCK_UNKNOWN 0
+#define REG_BLOCK_GLOBAL -1
+
+#define REG_BASIC_BLOCK(N) (reg_info_p[N].basic_block)
+
+/* Vector of substitutions of register numbers,
+ used to map pseudo regs into hardware regs.
+
+ This can't be folded into reg_n_info without changing all of the
+ machine dependent directories, since the reload functions
+ in the machine dependent files access it. */
+
+extern short *reg_renumber;
+
+/* Flag set by local-alloc or global-alloc if they decide to allocate
+ something in a call-clobbered register. */
+
+extern int caller_save_needed;
+
+/* Select a register mode required for caller save of hard regno REGNO. */
+#ifndef HARD_REGNO_CALLER_SAVE_MODE
+#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+ choose_hard_reg_mode (REGNO, NREGS, NULL)
+#endif
+
+/* Target-dependent globals. */
+struct target_regs {
+ /* For each starting hard register, the number of consecutive hard
+ registers that a given machine mode occupies. */
+ unsigned char x_hard_regno_nregs[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE];
+
+ /* The max value found in x_hard_regno_nregs. */
+ unsigned char x_hard_regno_max_nregs;
+
+ /* For each hard register, the widest mode object that it can contain.
+ This will be a MODE_INT mode if the register can hold integers. Otherwise
+ it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the
+ register. */
+ machine_mode x_reg_raw_mode[FIRST_PSEUDO_REGISTER];
+
+ /* Vector indexed by machine mode saying whether there are regs of
+ that mode. */
+ bool x_have_regs_of_mode[MAX_MACHINE_MODE];
+
+ /* 1 if the corresponding class contains a register of the given mode. */
+ char x_contains_reg_of_mode[N_REG_CLASSES][MAX_MACHINE_MODE];
+
+ /* 1 if the corresponding class contains a register of the given mode
+ which is not global and can therefore be allocated. */
+ char x_contains_allocatable_reg_of_mode[N_REG_CLASSES][MAX_MACHINE_MODE];
+
+ /* Record for each mode whether we can move a register directly to or
+ from an object of that mode in memory. If we can't, we won't try
+ to use that mode directly when accessing a field of that mode. */
+ char x_direct_load[NUM_MACHINE_MODES];
+ char x_direct_store[NUM_MACHINE_MODES];
+
+ /* Record for each mode whether we can float-extend from memory. */
+ bool x_float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES];
+};
+
+extern struct target_regs default_target_regs;
+#if SWITCHABLE_TARGET
+extern struct target_regs *this_target_regs;
+#else
+#define this_target_regs (&default_target_regs)
+#endif
+#define hard_regno_max_nregs \
+ (this_target_regs->x_hard_regno_max_nregs)
+#define reg_raw_mode \
+ (this_target_regs->x_reg_raw_mode)
+#define have_regs_of_mode \
+ (this_target_regs->x_have_regs_of_mode)
+#define contains_reg_of_mode \
+ (this_target_regs->x_contains_reg_of_mode)
+#define contains_allocatable_reg_of_mode \
+ (this_target_regs->x_contains_allocatable_reg_of_mode)
+#define direct_load \
+ (this_target_regs->x_direct_load)
+#define direct_store \
+ (this_target_regs->x_direct_store)
+#define float_extend_from_mem \
+ (this_target_regs->x_float_extend_from_mem)
+
+/* Return the number of hard registers in (reg:MODE REGNO). */
+
+ALWAYS_INLINE unsigned char
+hard_regno_nregs (unsigned int regno, machine_mode mode)
+{
+ return this_target_regs->x_hard_regno_nregs[regno][mode];
+}
+
+/* Return an exclusive upper bound on the registers occupied by hard
+ register (reg:MODE REGNO). */
+
+inline unsigned int
+end_hard_regno (machine_mode mode, unsigned int regno)
+{
+ return regno + hard_regno_nregs (regno, mode);
+}
+
+/* Add to REGS all the registers required to store a value of mode MODE
+ in register REGNO. */
+
+inline void
+add_to_hard_reg_set (HARD_REG_SET *regs, machine_mode mode,
+ unsigned int regno)
+{
+ unsigned int end_regno;
+
+ end_regno = end_hard_regno (mode, regno);
+ do
+ SET_HARD_REG_BIT (*regs, regno);
+ while (++regno < end_regno);
+}
+
+/* Likewise, but remove the registers. */
+
+inline void
+remove_from_hard_reg_set (HARD_REG_SET *regs, machine_mode mode,
+ unsigned int regno)
+{
+ unsigned int end_regno;
+
+ end_regno = end_hard_regno (mode, regno);
+ do
+ CLEAR_HARD_REG_BIT (*regs, regno);
+ while (++regno < end_regno);
+}
+
+/* Return true if REGS contains the whole of (reg:MODE REGNO). */
+
+inline bool
+in_hard_reg_set_p (const_hard_reg_set regs, machine_mode mode,
+ unsigned int regno)
+{
+ unsigned int end_regno;
+
+ gcc_assert (HARD_REGISTER_NUM_P (regno));
+
+ if (!TEST_HARD_REG_BIT (regs, regno))
+ return false;
+
+ end_regno = end_hard_regno (mode, regno);
+
+ if (!HARD_REGISTER_NUM_P (end_regno - 1))
+ return false;
+
+ while (++regno < end_regno)
+ if (!TEST_HARD_REG_BIT (regs, regno))
+ return false;
+
+ return true;
+}
+
+/* Return true if (reg:MODE REGNO) includes an element of REGS. */
+
+inline bool
+overlaps_hard_reg_set_p (const_hard_reg_set regs, machine_mode mode,
+ unsigned int regno)
+{
+ unsigned int end_regno;
+
+ if (TEST_HARD_REG_BIT (regs, regno))
+ return true;
+
+ end_regno = end_hard_regno (mode, regno);
+ while (++regno < end_regno)
+ if (TEST_HARD_REG_BIT (regs, regno))
+ return true;
+
+ return false;
+}
+
+/* Like add_to_hard_reg_set, but use a REGNO/NREGS range instead of
+ REGNO and MODE. */
+
+inline void
+add_range_to_hard_reg_set (HARD_REG_SET *regs, unsigned int regno,
+ int nregs)
+{
+ while (nregs-- > 0)
+ SET_HARD_REG_BIT (*regs, regno + nregs);
+}
+
+/* Likewise, but remove the registers. */
+
+inline void
+remove_range_from_hard_reg_set (HARD_REG_SET *regs, unsigned int regno,
+ int nregs)
+{
+ while (nregs-- > 0)
+ CLEAR_HARD_REG_BIT (*regs, regno + nregs);
+}
+
+/* Like overlaps_hard_reg_set_p, but use a REGNO/NREGS range instead of
+ REGNO and MODE. */
+inline bool
+range_overlaps_hard_reg_set_p (const_hard_reg_set set, unsigned regno,
+ int nregs)
+{
+ while (nregs-- > 0)
+ if (TEST_HARD_REG_BIT (set, regno + nregs))
+ return true;
+ return false;
+}
+
+/* Like in_hard_reg_set_p, but use a REGNO/NREGS range instead of
+ REGNO and MODE. */
+inline bool
+range_in_hard_reg_set_p (const_hard_reg_set set, unsigned regno, int nregs)
+{
+ while (nregs-- > 0)
+ if (!TEST_HARD_REG_BIT (set, regno + nregs))
+ return false;
+ return true;
+}
+
+#endif /* GCC_REGS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regset.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regset.h
new file mode 100644
index 0000000..a5f6961
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/regset.h
@@ -0,0 +1,123 @@
+/* Define regsets.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_REGSET_H
+#define GCC_REGSET_H
+
+/* TODO: regset is just a bitmap in its implementation. The compiler does
+ not consistently use one or the other, i.e. sometimes variables are
+ declared as bitmap but they are actually regsets and regset accessors
+ are used, and vice versa, or mixed (see e.g. spilled_regs in IRA).
+
+ This should be cleaned up, either by just dropping the regset type, or
+ by changing all bitmaps that are really regsets to the regset type. For
+ the latter option, a good start would be to change everything allocated
+ on the reg_obstack to regset. */
+
+
+/* Head of register set linked list. */
+typedef bitmap_head regset_head;
+
+/* A pointer to a regset_head. */
+typedef bitmap regset;
+
+/* Allocate a register set with oballoc. */
+#define ALLOC_REG_SET(OBSTACK) BITMAP_ALLOC (OBSTACK)
+
+/* Do any cleanup needed on a regset when it is no longer used. */
+#define FREE_REG_SET(REGSET) BITMAP_FREE (REGSET)
+
+/* Initialize a new regset. */
+#define INIT_REG_SET(HEAD) bitmap_initialize (HEAD, &reg_obstack)
+
+/* Clear a register set by freeing up the linked list. */
+#define CLEAR_REG_SET(HEAD) bitmap_clear (HEAD)
+
+/* True if the register set is empty. */
+#define REG_SET_EMPTY_P(HEAD) bitmap_empty_p (HEAD)
+
+/* Copy a register set to another register set. */
+#define COPY_REG_SET(TO, FROM) bitmap_copy (TO, FROM)
+
+/* Compare two register sets. */
+#define REG_SET_EQUAL_P(A, B) bitmap_equal_p (A, B)
+
+/* `and' a register set with a second register set. */
+#define AND_REG_SET(TO, FROM) bitmap_and_into (TO, FROM)
+
+/* `and' the complement of a register set with a register set. */
+#define AND_COMPL_REG_SET(TO, FROM) bitmap_and_compl_into (TO, FROM)
+
+/* Inclusive or a register set with a second register set. */
+#define IOR_REG_SET(TO, FROM) bitmap_ior_into (TO, FROM)
+
+/* Same, but with FROM being a HARD_REG_SET. */
+#define IOR_REG_SET_HRS(TO, FROM) \
+ bitmap_ior_into (TO, bitmap_view<HARD_REG_SET> (FROM))
+
+/* Exclusive or a register set with a second register set. */
+#define XOR_REG_SET(TO, FROM) bitmap_xor_into (TO, FROM)
+
+/* Or into TO the register set FROM1 `and'ed with the complement of FROM2. */
+#define IOR_AND_COMPL_REG_SET(TO, FROM1, FROM2) \
+ bitmap_ior_and_compl_into (TO, FROM1, FROM2)
+
+/* Clear a single register in a register set. */
+#define CLEAR_REGNO_REG_SET(HEAD, REG) bitmap_clear_bit (HEAD, REG)
+
+/* Set a single register in a register set. */
+#define SET_REGNO_REG_SET(HEAD, REG) bitmap_set_bit (HEAD, REG)
+
+/* Return true if a register is set in a register set. */
+#define REGNO_REG_SET_P(TO, REG) bitmap_bit_p (TO, REG)
+
+/* Copy the hard registers in a register set to the hard register set. */
+extern void reg_set_to_hard_reg_set (HARD_REG_SET *, const_bitmap);
+#define REG_SET_TO_HARD_REG_SET(TO, FROM) \
+do { \
+ CLEAR_HARD_REG_SET (TO); \
+ reg_set_to_hard_reg_set (&TO, FROM); \
+} while (0)
+
+typedef bitmap_iterator reg_set_iterator;
+
+/* Loop over all registers in REGSET, starting with MIN, setting REGNUM to the
+ register number and executing CODE for all registers that are set. */
+#define EXECUTE_IF_SET_IN_REG_SET(REGSET, MIN, REGNUM, RSI) \
+ EXECUTE_IF_SET_IN_BITMAP (REGSET, MIN, REGNUM, RSI)
+
+/* Loop over all registers in REGSET1 and REGSET2, starting with MIN, setting
+ REGNUM to the register number and executing CODE for all registers that are
+ set in the first regset and not set in the second. */
+#define EXECUTE_IF_AND_COMPL_IN_REG_SET(REGSET1, REGSET2, MIN, REGNUM, RSI) \
+ EXECUTE_IF_AND_COMPL_IN_BITMAP (REGSET1, REGSET2, MIN, REGNUM, RSI)
+
+/* Loop over all registers in REGSET1 and REGSET2, starting with MIN, setting
+ REGNUM to the register number and executing CODE for all registers that are
+ set in both regsets. */
+#define EXECUTE_IF_AND_IN_REG_SET(REGSET1, REGSET2, MIN, REGNUM, RSI) \
+ EXECUTE_IF_AND_IN_BITMAP (REGSET1, REGSET2, MIN, REGNUM, RSI) \
+
+/* An obstack for regsets. */
+extern bitmap_obstack reg_obstack;
+
+/* In df-core.cc (which should use regset consistently instead of bitmap...) */
+extern void dump_regset (regset, FILE *);
+
+#endif /* GCC_REGSET_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/reload.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/reload.h
new file mode 100644
index 0000000..0982d0c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/reload.h
@@ -0,0 +1,466 @@
+/* Communication between reload.cc, reload1.cc and the rest of compiler.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RELOAD_H
+#define GCC_RELOAD_H
+
+/* If secondary reloads are the same for inputs and outputs, define those
+ macros here. */
+
+#ifdef SECONDARY_RELOAD_CLASS
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ SECONDARY_RELOAD_CLASS (CLASS, MODE, X)
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ SECONDARY_RELOAD_CLASS (CLASS, MODE, X)
+#endif
+
+extern int register_move_cost (machine_mode, reg_class_t, reg_class_t);
+extern int memory_move_cost (machine_mode, reg_class_t, bool);
+extern int memory_move_secondary_cost (machine_mode, reg_class_t, bool);
+
+/* Maximum number of reloads we can need. */
+#define MAX_RELOADS (2 * MAX_RECOG_OPERANDS * (MAX_REGS_PER_ADDRESS + 1))
+
+/* Encode the usage of a reload. The following codes are supported:
+
+ RELOAD_FOR_INPUT reload of an input operand
+ RELOAD_FOR_OUTPUT likewise, for output
+ RELOAD_FOR_INSN a reload that must not conflict with anything
+ used in the insn, but may conflict with
+ something used before or after the insn
+ RELOAD_FOR_INPUT_ADDRESS reload for parts of the address of an object
+ that is an input reload
+ RELOAD_FOR_INPADDR_ADDRESS reload needed for RELOAD_FOR_INPUT_ADDRESS
+ RELOAD_FOR_OUTPUT_ADDRESS like RELOAD_FOR INPUT_ADDRESS, for output
+ RELOAD_FOR_OUTADDR_ADDRESS reload needed for RELOAD_FOR_OUTPUT_ADDRESS
+ RELOAD_FOR_OPERAND_ADDRESS reload for the address of a non-reloaded
+ operand; these don't conflict with
+ any other addresses.
+ RELOAD_FOR_OPADDR_ADDR reload needed for RELOAD_FOR_OPERAND_ADDRESS
+ reloads; usually secondary reloads
+ RELOAD_OTHER none of the above, usually multiple uses
+ RELOAD_FOR_OTHER_ADDRESS reload for part of the address of an input
+ that is marked RELOAD_OTHER.
+
+ This used to be "enum reload_when_needed" but some debuggers have trouble
+ with an enum tag and variable of the same name. */
+
+enum reload_type
+{
+ RELOAD_FOR_INPUT, RELOAD_FOR_OUTPUT, RELOAD_FOR_INSN,
+ RELOAD_FOR_INPUT_ADDRESS, RELOAD_FOR_INPADDR_ADDRESS,
+ RELOAD_FOR_OUTPUT_ADDRESS, RELOAD_FOR_OUTADDR_ADDRESS,
+ RELOAD_FOR_OPERAND_ADDRESS, RELOAD_FOR_OPADDR_ADDR,
+ RELOAD_OTHER, RELOAD_FOR_OTHER_ADDRESS
+};
+
+#ifdef GCC_INSN_CODES_H
+/* Each reload is recorded with a structure like this. */
+struct reload
+{
+ /* The value to reload from */
+ rtx in;
+ /* Where to store reload-reg afterward if nec (often the same as
+ reload_in) */
+ rtx out;
+
+ /* The class of registers to reload into. */
+ enum reg_class rclass;
+
+ /* The mode this operand should have when reloaded, on input. */
+ machine_mode inmode;
+ /* The mode this operand should have when reloaded, on output. */
+ machine_mode outmode;
+
+ /* The mode of the reload register. */
+ machine_mode mode;
+
+ /* the largest number of registers this reload will require. */
+ unsigned int nregs;
+
+ /* Positive amount to increment or decrement by if
+ reload_in is a PRE_DEC, PRE_INC, POST_DEC, POST_INC.
+ Ignored otherwise (don't assume it is zero). */
+ poly_int64_pod inc;
+ /* A reg for which reload_in is the equivalent.
+ If reload_in is a symbol_ref which came from
+ reg_equiv_constant, then this is the pseudo
+ which has that symbol_ref as equivalent. */
+ rtx in_reg;
+ rtx out_reg;
+
+ /* Used in find_reload_regs to record the allocated register. */
+ int regno;
+ /* This is the register to reload into. If it is zero when `find_reloads'
+ returns, you must find a suitable register in the class specified by
+ reload_reg_class, and store here an rtx for that register with mode from
+ reload_inmode or reload_outmode. */
+ rtx reg_rtx;
+ /* The operand number being reloaded. This is used to group related reloads
+ and need not always be equal to the actual operand number in the insn,
+ though it current will be; for in-out operands, it is one of the two
+ operand numbers. */
+ int opnum;
+
+ /* Gives the reload number of a secondary input reload, when needed;
+ otherwise -1. */
+ int secondary_in_reload;
+ /* Gives the reload number of a secondary output reload, when needed;
+ otherwise -1. */
+ int secondary_out_reload;
+ /* If a secondary input reload is required, gives the INSN_CODE that uses the
+ secondary reload as a scratch register, or CODE_FOR_nothing if the
+ secondary reload register is to be an intermediate register. */
+ enum insn_code secondary_in_icode;
+ /* Likewise, for a secondary output reload. */
+ enum insn_code secondary_out_icode;
+
+ /* Classifies reload as needed either for addressing an input reload,
+ addressing an output, for addressing a non-reloaded mem ref, or for
+ unspecified purposes (i.e., more than one of the above). */
+ enum reload_type when_needed;
+
+ /* Nonzero for an optional reload. Optional reloads are ignored unless the
+ value is already sitting in a register. */
+ unsigned int optional:1;
+ /* nonzero if this reload shouldn't be combined with another reload. */
+ unsigned int nocombine:1;
+ /* Nonzero if this is a secondary register for one or more reloads. */
+ unsigned int secondary_p:1;
+ /* Nonzero if this reload must use a register not already allocated to a
+ group. */
+ unsigned int nongroup:1;
+};
+
+extern struct reload rld[MAX_RELOADS];
+extern int n_reloads;
+#endif
+
+/* Target-dependent globals. */
+struct target_reload {
+ /* Nonzero if indirect addressing is supported when the innermost MEM is
+ of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to
+ which these are valid is the same as spill_indirect_levels, above. */
+ bool x_indirect_symref_ok;
+
+ /* Nonzero if indirect addressing is supported on the machine; this means
+ that spilling (REG n) does not require reloading it into a register in
+ order to do (MEM (REG n)) or (MEM (PLUS (REG n) (CONST_INT c))). The
+ value indicates the level of indirect addressing supported, e.g., two
+ means that (MEM (MEM (REG n))) is also valid if (REG n) does not get
+ a hard register. */
+ unsigned char x_spill_indirect_levels;
+
+ /* True if caller-save has been reinitialized. */
+ bool x_caller_save_initialized_p;
+
+ /* Modes for each hard register that we can save. The smallest mode is wide
+ enough to save the entire contents of the register. When saving the
+ register because it is live we first try to save in multi-register modes.
+ If that is not possible the save is done one register at a time. */
+ machine_mode (x_regno_save_mode
+ [FIRST_PSEUDO_REGISTER]
+ [MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1]);
+
+ /* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid
+ in the given mode. */
+ bool x_double_reg_address_ok[MAX_MACHINE_MODE];
+
+ /* We will only make a register eligible for caller-save if it can be
+ saved in its widest mode with a simple SET insn as long as the memory
+ address is valid. We record the INSN_CODE is those insns here since
+ when we emit them, the addresses might not be valid, so they might not
+ be recognized. */
+ int x_cached_reg_save_code[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE];
+ int x_cached_reg_restore_code[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE];
+};
+
+extern struct target_reload default_target_reload;
+#if SWITCHABLE_TARGET
+extern struct target_reload *this_target_reload;
+#else
+#define this_target_reload (&default_target_reload)
+#endif
+
+#define indirect_symref_ok \
+ (this_target_reload->x_indirect_symref_ok)
+#define double_reg_address_ok \
+ (this_target_reload->x_double_reg_address_ok)
+#define caller_save_initialized_p \
+ (this_target_reload->x_caller_save_initialized_p)
+
+/* Register equivalences. Indexed by register number. */
+struct reg_equivs_t
+{
+ /* The constant value to which pseudo reg N is equivalent,
+ or zero if pseudo reg N is not equivalent to a constant.
+ find_reloads looks at this in order to replace pseudo reg N
+ with the constant it stands for. */
+ rtx constant;
+
+ /* An invariant value to which pseudo reg N is equivalent.
+ eliminate_regs_in_insn uses this to replace pseudos in particular
+ contexts. */
+ rtx invariant;
+
+ /* A memory location to which pseudo reg N is equivalent,
+ prior to any register elimination (such as frame pointer to stack
+ pointer). Depending on whether or not it is a valid address, this value
+ is transferred to either equiv_address or equiv_mem. */
+ rtx memory_loc;
+
+ /* The address of stack slot to which pseudo reg N is equivalent.
+ This is used when the address is not valid as a memory address
+ (because its displacement is too big for the machine.) */
+ rtx address;
+
+ /* The memory slot to which pseudo reg N is equivalent,
+ or zero if pseudo reg N is not equivalent to a memory slot. */
+ rtx mem;
+
+ /* An EXPR_LIST of REG_EQUIVs containing MEMs with
+ alternate representations of the location of pseudo reg N. */
+ rtx_expr_list *alt_mem_list;
+
+ /* The list of insns that initialized reg N from its equivalent
+ constant or memory slot. */
+ rtx_insn_list *init;
+};
+
+#define reg_equiv_constant(ELT) \
+ (*reg_equivs)[(ELT)].constant
+#define reg_equiv_invariant(ELT) \
+ (*reg_equivs)[(ELT)].invariant
+#define reg_equiv_memory_loc(ELT) \
+ (*reg_equivs)[(ELT)].memory_loc
+#define reg_equiv_address(ELT) \
+ (*reg_equivs)[(ELT)].address
+#define reg_equiv_mem(ELT) \
+ (*reg_equivs)[(ELT)].mem
+#define reg_equiv_alt_mem_list(ELT) \
+ (*reg_equivs)[(ELT)].alt_mem_list
+#define reg_equiv_init(ELT) \
+ (*reg_equivs)[(ELT)].init
+
+extern vec<reg_equivs_t, va_gc> *reg_equivs;
+
+/* All the "earlyclobber" operands of the current insn
+ are recorded here. */
+extern int n_earlyclobbers;
+extern rtx reload_earlyclobbers[MAX_RECOG_OPERANDS];
+
+/* Save the number of operands. */
+extern int reload_n_operands;
+
+/* First uid used by insns created by reload in this function.
+ Used in find_equiv_reg. */
+extern int reload_first_uid;
+
+extern int num_not_at_initial_offset;
+
+#if defined HARD_CONST && defined CLEAR_REG_SET
+/* This structure describes instructions which are relevant for reload.
+ Apart from all regular insns, this also includes CODE_LABELs, since they
+ must be examined for register elimination. */
+class insn_chain
+{
+public:
+ /* Links to the neighbor instructions. */
+ class insn_chain *next, *prev;
+
+ /* Link through a chains set up by calculate_needs_all_insns, containing
+ all insns that need reloading. */
+ class insn_chain *next_need_reload;
+
+ /* The rtx of the insn. */
+ rtx_insn *insn;
+
+ /* The basic block this insn is in. */
+ int block;
+
+ /* Nonzero if find_reloads said the insn requires reloading. */
+ unsigned int need_reload:1;
+ /* Nonzero if find_reloads needs to be run during reload_as_needed to
+ perform modifications on any operands. */
+ unsigned int need_operand_change:1;
+ /* Nonzero if eliminate_regs_in_insn said it requires eliminations. */
+ unsigned int need_elim:1;
+ /* Nonzero if this insn was inserted by perform_caller_saves. */
+ unsigned int is_caller_save_insn:1;
+
+ /* Register life information: record all live hard registers, and
+ all live pseudos that have a hard register. This set also
+ contains pseudos spilled by IRA. */
+ bitmap_head live_throughout;
+ bitmap_head dead_or_set;
+
+ /* Copies of the global variables computed by find_reloads. */
+ struct reload *rld;
+ int n_reloads;
+
+ /* Indicates which registers have already been used for spills. */
+ HARD_REG_SET used_spill_regs;
+};
+
+/* A chain of insn_chain structures to describe all non-note insns in
+ a function. */
+extern class insn_chain *reload_insn_chain;
+
+/* Allocate a new insn_chain structure. */
+extern class insn_chain *new_insn_chain (void);
+#endif
+
+#if defined HARD_CONST
+extern void compute_use_by_pseudos (HARD_REG_SET *, bitmap);
+#endif
+
+/* Functions from reload.cc: */
+
+extern reg_class_t secondary_reload_class (bool, reg_class_t,
+ machine_mode, rtx);
+
+#ifdef GCC_INSN_CODES_H
+extern enum reg_class scratch_reload_class (enum insn_code);
+#endif
+
+/* Return a memory location that will be used to copy X in mode MODE.
+ If we haven't already made a location for this mode in this insn,
+ call find_reloads_address on the location being returned. */
+extern rtx get_secondary_mem (rtx, machine_mode, int, enum reload_type);
+
+/* Clear any secondary memory locations we've made. */
+extern void clear_secondary_mem (void);
+
+/* Transfer all replacements that used to be in reload FROM to be in
+ reload TO. */
+extern void transfer_replacements (int, int);
+
+/* IN_RTX is the value loaded by a reload that we now decided to inherit,
+ or a subpart of it. If we have any replacements registered for IN_RTX,
+ cancel the reloads that were supposed to load them.
+ Return nonzero if we canceled any reloads. */
+extern int remove_address_replacements (rtx in_rtx);
+
+/* Like rtx_equal_p except that it allows a REG and a SUBREG to match
+ if they are the same hard reg, and has special hacks for
+ autoincrement and autodecrement. */
+extern int operands_match_p (rtx, rtx);
+
+/* Return 1 if altering OP will not modify the value of CLOBBER. */
+extern int safe_from_earlyclobber (rtx, rtx);
+
+/* Search the body of INSN for values that need reloading and record them
+ with push_reload. REPLACE nonzero means record also where the values occur
+ so that subst_reloads can be used. */
+extern int find_reloads (rtx_insn *, int, int, int, short *);
+
+/* Compute the sum of X and Y, making canonicalizations assumed in an
+ address, namely: sum constant integers, surround the sum of two
+ constants with a CONST, put the constant as the second operand, and
+ group the constant on the outermost sum. */
+extern rtx form_sum (machine_mode, rtx, rtx);
+
+/* Substitute into the current INSN the registers into which we have reloaded
+ the things that need reloading. */
+extern void subst_reloads (rtx_insn *);
+
+/* Make a copy of any replacements being done into X and move those copies
+ to locations in Y, a copy of X. We only look at the highest level of
+ the RTL. */
+extern void copy_replacements (rtx, rtx);
+
+/* Change any replacements being done to *X to be done to *Y */
+extern void move_replacements (rtx *x, rtx *y);
+
+/* If LOC was scheduled to be replaced by something, return the replacement.
+ Otherwise, return *LOC. */
+extern rtx find_replacement (rtx *);
+
+/* Nonzero if modifying X will affect IN. */
+extern int reg_overlap_mentioned_for_reload_p (rtx, rtx);
+
+/* Check the insns before INSN to see if there is a suitable register
+ containing the same value as GOAL. */
+extern rtx find_equiv_reg (rtx, rtx_insn *, enum reg_class, int, short *,
+ int, machine_mode);
+
+/* Return 1 if register REGNO is the subject of a clobber in insn INSN. */
+extern int regno_clobbered_p (unsigned int, rtx_insn *, machine_mode, int);
+
+/* Return 1 if X is an operand of an insn that is being earlyclobbered. */
+extern int earlyclobber_operand_p (rtx);
+
+/* Record one reload that needs to be performed. */
+extern int push_reload (rtx, rtx, rtx *, rtx *, enum reg_class,
+ machine_mode, machine_mode,
+ int, int, int, enum reload_type);
+
+/* Functions in reload1.cc: */
+
+/* Initialize the reload pass once per compilation. */
+extern void init_reload (void);
+
+/* The reload pass itself. */
+extern bool reload (rtx_insn *, int);
+
+/* Mark the slots in regs_ever_live for the hard regs
+ used by pseudo-reg number REGNO. */
+extern void mark_home_live (int);
+
+/* Scan X and replace any eliminable registers (such as fp) with a
+ replacement (such as sp), plus an offset. */
+extern rtx eliminate_regs (rtx, machine_mode, rtx);
+extern bool elimination_target_reg_p (rtx);
+
+/* Called from the register allocator to estimate costs of eliminating
+ invariant registers. */
+extern void calculate_elim_costs_all_insns (void);
+
+/* Deallocate the reload register used by reload number R. */
+extern void deallocate_reload_reg (int r);
+
+/* Functions in caller-save.cc: */
+
+/* Initialize for caller-save. */
+extern void init_caller_save (void);
+
+/* Initialize save areas by showing that we haven't allocated any yet. */
+extern void init_save_areas (void);
+
+/* Allocate save areas for any hard registers that might need saving. */
+extern void setup_save_areas (void);
+
+/* Find the places where hard regs are live across calls and save them. */
+extern void save_call_clobbered_regs (void);
+
+/* Replace (subreg (reg)) with the appropriate (reg) for any operands. */
+extern void cleanup_subreg_operands (rtx_insn *);
+
+/* Debugging support. */
+extern void debug_reload_to_stream (FILE *);
+extern void debug_reload (void);
+
+/* Compute the actual register we should reload to, in case we're
+ reloading to/from a register that is wider than a word. */
+extern rtx reload_adjust_reg_for_mode (rtx, machine_mode);
+
+/* Allocate or grow the reg_equiv tables, initializing new entries to 0. */
+extern void grow_reg_equivs (void);
+
+#endif /* GCC_RELOAD_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/resource.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/resource.h
new file mode 100644
index 0000000..155c4e8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/resource.h
@@ -0,0 +1,55 @@
+/* Definitions for computing resource usage of specific insns.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RESOURCE_H
+#define GCC_RESOURCE_H
+
+/* Macro to clear all resources. */
+#define CLEAR_RESOURCE(RES) \
+ do { (RES)->memory = (RES)->volatil = (RES)->cc = 0; \
+ CLEAR_HARD_REG_SET ((RES)->regs); } while (0)
+
+/* The resources used by a given insn. */
+struct resources
+{
+ char memory; /* Insn sets or needs a memory location. */
+ char volatil; /* Insn sets or needs a volatile memory loc. */
+ char cc; /* Insn sets or needs the condition codes. */
+ HARD_REG_SET regs; /* Which registers are set or needed. */
+};
+
+/* The kinds of rtl mark_*_resources will consider */
+enum mark_resource_type
+{
+ MARK_SRC_DEST = 0,
+ MARK_SRC_DEST_CALL = 1
+};
+
+extern void mark_target_live_regs (rtx_insn *, rtx, struct resources *);
+extern void mark_set_resources (rtx, struct resources *, int,
+ enum mark_resource_type);
+extern void mark_referenced_resources (rtx, struct resources *, bool);
+extern void clear_hashed_info_for_insn (rtx_insn *);
+extern void clear_hashed_info_until_next_barrier (rtx_insn *);
+extern void incr_ticks_for_insn (rtx_insn *);
+extern void mark_end_of_function_resources (rtx, bool);
+extern void init_resource_info (rtx_insn *);
+extern void free_resource_info (void);
+
+#endif /* GCC_RESOURCE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-error.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-error.h
new file mode 100644
index 0000000..dd123a3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-error.h
@@ -0,0 +1,31 @@
+/* RTL specific diagnostic subroutines for GCC
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RTL_ERROR_H
+#define GCC_RTL_ERROR_H
+
+#include "rtl.h"
+#include "diagnostic-core.h"
+
+extern void error_for_asm (const rtx_insn *, const char *,
+ ...) ATTRIBUTE_GCC_DIAG(2,3);
+extern void warning_for_asm (const rtx_insn *, const char *,
+ ...) ATTRIBUTE_GCC_DIAG(2,3);
+
+#endif /* GCC_RTL_ERROR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-iter.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-iter.h
new file mode 100644
index 0000000..00ce4f1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-iter.h
@@ -0,0 +1,292 @@
+/* RTL iterators
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* This structure describes the subrtxes of an rtx as follows:
+
+ - if the rtx has no subrtxes, START and COUNT are both 0.
+
+ - if all the subrtxes of an rtx are stored in a contiguous block
+ of XEXPs ("e"s), START is the index of the first XEXP and COUNT
+ is the number of them.
+
+ - otherwise START is arbitrary and COUNT is UCHAR_MAX.
+
+ rtx_all_subrtx_bounds applies to all codes. rtx_nonconst_subrtx_bounds
+ is like rtx_all_subrtx_bounds except that all constant rtxes are treated
+ as having no subrtxes. */
+struct rtx_subrtx_bound_info {
+ unsigned char start;
+ unsigned char count;
+};
+extern rtx_subrtx_bound_info rtx_all_subrtx_bounds[];
+extern rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[];
+
+/* Return true if CODE has no subrtxes. */
+
+inline bool
+leaf_code_p (enum rtx_code code)
+{
+ return rtx_all_subrtx_bounds[code].count == 0;
+}
+
+/* Used to iterate over subrtxes of an rtx. T abstracts the type of
+ access. */
+template <typename T>
+class generic_subrtx_iterator
+{
+ static const size_t LOCAL_ELEMS = 16;
+ typedef typename T::value_type value_type;
+ typedef typename T::rtx_type rtx_type;
+ typedef typename T::rtunion_type rtunion_type;
+
+public:
+ class array_type
+ {
+ public:
+ array_type ();
+ ~array_type ();
+ value_type stack[LOCAL_ELEMS];
+ vec <value_type, va_heap, vl_embed> *heap;
+ };
+ generic_subrtx_iterator (array_type &, value_type,
+ const rtx_subrtx_bound_info *);
+
+ value_type operator * () const;
+ bool at_end () const;
+ void next ();
+ void skip_subrtxes ();
+ void substitute (value_type);
+
+private:
+ /* The bounds to use for iterating over subrtxes. */
+ const rtx_subrtx_bound_info *m_bounds;
+
+ /* The storage used for the worklist. */
+ array_type &m_array;
+
+ /* The current rtx. */
+ value_type m_current;
+
+ /* The base of the current worklist. */
+ value_type *m_base;
+
+ /* The number of subrtxes in M_BASE. */
+ size_t m_end;
+
+ /* The following booleans shouldn't end up in registers or memory
+ but just direct control flow. */
+
+ /* True if the iteration is over. */
+ bool m_done;
+
+ /* True if we should skip the subrtxes of M_CURRENT. */
+ bool m_skip;
+
+ /* True if M_CURRENT has been replaced with a different rtx. */
+ bool m_substitute;
+
+ static void free_array (array_type &);
+ static size_t add_subrtxes_to_queue (array_type &, value_type *, size_t,
+ rtx_type);
+ static value_type *add_single_to_queue (array_type &, value_type *, size_t,
+ value_type);
+};
+
+template <typename T>
+inline generic_subrtx_iterator <T>::array_type::array_type () : heap (0) {}
+
+template <typename T>
+inline generic_subrtx_iterator <T>::array_type::~array_type ()
+{
+ if (UNLIKELY (heap != 0))
+ free_array (*this);
+}
+
+/* Iterate over X and its subrtxes, in arbitrary order. Use ARRAY to
+ store the worklist. We use an external array in order to avoid
+ capturing the fields of this structure when taking the address of
+ the array. Use BOUNDS to find the bounds of simple "e"-string codes. */
+
+template <typename T>
+inline generic_subrtx_iterator <T>::
+generic_subrtx_iterator (array_type &array, value_type x,
+ const rtx_subrtx_bound_info *bounds)
+ : m_bounds (bounds),
+ m_array (array),
+ m_current (x),
+ m_base (m_array.stack),
+ m_end (0),
+ m_done (false),
+ m_skip (false),
+ m_substitute (false)
+{
+}
+
+/* Return the current subrtx. */
+
+template <typename T>
+inline typename T::value_type
+generic_subrtx_iterator <T>::operator * () const
+{
+ return m_current;
+}
+
+/* Return true if the iteration has finished. */
+
+template <typename T>
+inline bool
+generic_subrtx_iterator <T>::at_end () const
+{
+ return m_done;
+}
+
+/* Move on to the next subrtx. */
+
+template <typename T>
+inline void
+generic_subrtx_iterator <T>::next ()
+{
+ if (m_substitute)
+ {
+ m_substitute = false;
+ m_skip = false;
+ return;
+ }
+ if (!m_skip)
+ {
+ /* Add the subrtxes of M_CURRENT. */
+ rtx_type x = T::get_rtx (m_current);
+ if (LIKELY (x != 0))
+ {
+ enum rtx_code code = GET_CODE (x);
+ ssize_t count = m_bounds[code].count;
+ if (count > 0)
+ {
+ /* Handle the simple case of a single "e" block that is known
+ to fit into the current array. */
+ if (LIKELY (m_end + count <= LOCAL_ELEMS + 1))
+ {
+ /* Set M_CURRENT to the first subrtx and queue the rest. */
+ ssize_t start = m_bounds[code].start;
+ rtunion_type *src = &x->u.fld[start];
+ if (UNLIKELY (count > 2))
+ m_base[m_end++] = T::get_value (src[2].rt_rtx);
+ if (count > 1)
+ m_base[m_end++] = T::get_value (src[1].rt_rtx);
+ m_current = T::get_value (src[0].rt_rtx);
+ return;
+ }
+ /* Handle cases which aren't simple "e" sequences or where
+ the sequence might overrun M_BASE. */
+ count = add_subrtxes_to_queue (m_array, m_base, m_end, x);
+ if (count > 0)
+ {
+ m_end += count;
+ if (m_end > LOCAL_ELEMS)
+ m_base = m_array.heap->address ();
+ m_current = m_base[--m_end];
+ return;
+ }
+ }
+ }
+ }
+ else
+ m_skip = false;
+ if (m_end == 0)
+ m_done = true;
+ else
+ m_current = m_base[--m_end];
+}
+
+/* Skip the subrtxes of the current rtx. */
+
+template <typename T>
+inline void
+generic_subrtx_iterator <T>::skip_subrtxes ()
+{
+ m_skip = true;
+}
+
+/* Ignore the subrtxes of the current rtx and look at X instead. */
+
+template <typename T>
+inline void
+generic_subrtx_iterator <T>::substitute (value_type x)
+{
+ m_substitute = true;
+ m_current = x;
+}
+
+/* Iterators for const_rtx. */
+struct const_rtx_accessor
+{
+ typedef const_rtx value_type;
+ typedef const_rtx rtx_type;
+ typedef const rtunion rtunion_type;
+ static rtx_type get_rtx (value_type x) { return x; }
+ static value_type get_value (rtx_type x) { return x; }
+};
+typedef generic_subrtx_iterator <const_rtx_accessor> subrtx_iterator;
+
+/* Iterators for non-constant rtx. */
+struct rtx_var_accessor
+{
+ typedef rtx value_type;
+ typedef rtx rtx_type;
+ typedef rtunion rtunion_type;
+ static rtx_type get_rtx (value_type x) { return x; }
+ static value_type get_value (rtx_type x) { return x; }
+};
+typedef generic_subrtx_iterator <rtx_var_accessor> subrtx_var_iterator;
+
+/* Iterators for rtx *. */
+struct rtx_ptr_accessor
+{
+ typedef rtx *value_type;
+ typedef rtx rtx_type;
+ typedef rtunion rtunion_type;
+ static rtx_type get_rtx (value_type ptr) { return *ptr; }
+ static value_type get_value (rtx_type &x) { return &x; }
+};
+typedef generic_subrtx_iterator <rtx_ptr_accessor> subrtx_ptr_iterator;
+
+#define ALL_BOUNDS rtx_all_subrtx_bounds
+#define NONCONST_BOUNDS rtx_nonconst_subrtx_bounds
+
+/* Use ITER to iterate over const_rtx X and its recursive subrtxes,
+ using subrtx_iterator::array ARRAY as the storage for the worklist.
+ ARRAY can be reused for multiple consecutive iterations but shouldn't
+ be shared by two concurrent iterations. TYPE is ALL if all subrtxes
+ are of interest or NONCONST if it is safe to ignore subrtxes of
+ constants. */
+#define FOR_EACH_SUBRTX(ITER, ARRAY, X, TYPE) \
+ for (subrtx_iterator ITER (ARRAY, X, TYPE##_BOUNDS); !ITER.at_end (); \
+ ITER.next ())
+
+/* Like FOR_EACH_SUBRTX, but iterate over subrtxes of an rtx X. */
+#define FOR_EACH_SUBRTX_VAR(ITER, ARRAY, X, TYPE) \
+ for (subrtx_var_iterator ITER (ARRAY, X, TYPE##_BOUNDS); !ITER.at_end (); \
+ ITER.next ())
+
+/* Like FOR_EACH_SUBRTX, but iterate over subrtx pointers of rtx pointer X.
+ For example, if X is &PATTERN (insn) and the pattern is a SET, iterate
+ over &PATTERN (insn), &SET_DEST (PATTERN (insn)), etc. */
+#define FOR_EACH_SUBRTX_PTR(ITER, ARRAY, X, TYPE) \
+ for (subrtx_ptr_iterator ITER (ARRAY, X, TYPE##_BOUNDS); !ITER.at_end (); \
+ ITER.next ())
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-ssa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-ssa.h
new file mode 100644
index 0000000..7355c6c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl-ssa.h
@@ -0,0 +1,71 @@
+// On-the-side RTL SSA representation -*- C++ -*-
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef GCC_RTL_SSA_H
+#define GCC_RTL_SSA_H 1
+
+// This is an aggregation header file. This means it should contain only
+// other include files.
+
+#if 0
+// Files that use this one should first have:
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#endif
+
+// Needed by splay-tree-utils.h and directly by rtl-ssa.
+#include "pretty-print.h"
+
+// Needed directly by recog.h.
+#include "insn-config.h"
+
+// Needed directly by rtl-ssa.
+#include "splay-tree-utils.h"
+#include "recog.h"
+#include "regs.h"
+#include "function-abi.h"
+#include "obstack-utils.h"
+#include "mux-utils.h"
+#include "rtlanal.h"
+
+// Provides the global crtl->ssa.
+#include "memmodel.h"
+#include "tm_p.h"
+#include "emit-rtl.h"
+
+// The rtl-ssa files themselves.
+#include "rtl-ssa/accesses.h"
+#include "rtl-ssa/insns.h"
+#include "rtl-ssa/blocks.h"
+#include "rtl-ssa/changes.h"
+#include "rtl-ssa/functions.h"
+#include "rtl-ssa/is-a.inl"
+#include "rtl-ssa/access-utils.h"
+#include "rtl-ssa/insn-utils.h"
+#include "rtl-ssa/movement.h"
+#include "rtl-ssa/change-utils.h"
+#include "rtl-ssa/member-fns.inl"
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.def
new file mode 100644
index 0000000..6ddbce3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.def
@@ -0,0 +1,1368 @@
+/* This file contains the definitions and documentation for the
+ Register Transfer Expressions (rtx's) that make up the
+ Register Transfer Language (rtl) used in the Back End of the GNU compiler.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Expression definitions and descriptions for all targets are in this file.
+ Some will not be used for some targets.
+
+ The fields in the cpp macro call "DEF_RTL_EXPR()"
+ are used to create declarations in the C source of the compiler.
+
+ The fields are:
+
+ 1. The internal name of the rtx used in the C source.
+ It is a tag in the enumeration "enum rtx_code" defined in "rtl.h".
+ By convention these are in UPPER_CASE.
+
+ 2. The name of the rtx in the external ASCII format read by
+ read_rtx(), and printed by print_rtx().
+ These names are stored in rtx_name[].
+ By convention these are the internal (field 1) names in lower_case.
+
+ 3. The print format, and type of each rtx->u.fld[] (field) in this rtx.
+ These formats are stored in rtx_format[].
+ The meaning of the formats is documented in front of this array in rtl.cc
+
+ 4. The class of the rtx. These are stored in rtx_class and are accessed
+ via the GET_RTX_CLASS macro. They are defined as follows:
+
+ RTX_CONST_OBJ
+ an rtx code that can be used to represent a constant object
+ (e.g, CONST_INT)
+ RTX_OBJ
+ an rtx code that can be used to represent an object (e.g, REG, MEM)
+ RTX_COMPARE
+ an rtx code for a comparison (e.g, LT, GT)
+ RTX_COMM_COMPARE
+ an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED)
+ RTX_UNARY
+ an rtx code for a unary arithmetic expression (e.g, NEG, NOT)
+ RTX_COMM_ARITH
+ an rtx code for a commutative binary operation (e.g,, PLUS, MULT)
+ RTX_TERNARY
+ an rtx code for a non-bitfield three input operation (IF_THEN_ELSE)
+ RTX_BIN_ARITH
+ an rtx code for a non-commutative binary operation (e.g., MINUS, DIV)
+ RTX_BITFIELD_OPS
+ an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT)
+ RTX_INSN
+ an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) or
+ data that will be output as assembly pseudo-ops (DEBUG_INSN)
+ RTX_MATCH
+ an rtx code for something that matches in insns (e.g, MATCH_DUP)
+ RTX_AUTOINC
+ an rtx code for autoincrement addressing modes (e.g. POST_DEC)
+ RTX_EXTRA
+ everything else
+
+ All of the expressions that appear only in machine descriptions,
+ not in RTL used by the compiler itself, are at the end of the file. */
+
+/* Unknown, or no such operation; the enumeration constant should have
+ value zero. */
+DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA)
+
+/* Used in the cselib routines to describe a value. Objects of this
+ kind are only allocated in cselib.cc, in an alloc pool instead of in
+ GC memory. The only operand of a VALUE is a cselib_val.
+ var-tracking requires this to have a distinct integral value from
+ DECL codes in trees. */
+DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ)
+
+/* The RTL generated for a DEBUG_EXPR_DECL. It links back to the
+ DEBUG_EXPR_DECL in the first operand. */
+DEF_RTL_EXPR(DEBUG_EXPR, "debug_expr", "0", RTX_OBJ)
+
+/* ---------------------------------------------------------------------
+ Expressions used in constructing lists.
+ --------------------------------------------------------------------- */
+
+/* A linked list of expressions. */
+DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA)
+
+/* A linked list of instructions.
+ The insns are represented in print by their uids. */
+DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA)
+
+/* A linked list of integers. */
+DEF_RTL_EXPR(INT_LIST, "int_list", "ie", RTX_EXTRA)
+
+/* SEQUENCE is used in late passes of the compiler to group insns for
+ one reason or another.
+
+ For example, after delay slot filling, branch instructions with filled
+ delay slots are represented as a SEQUENCE of length 1 + n_delay_slots,
+ with the branch instruction in XEXPVEC(seq, 0, 0) and the instructions
+ occupying the delay slots in the remaining XEXPVEC slots.
+
+ Another place where a SEQUENCE may appear, is in REG_FRAME_RELATED_EXPR
+ notes, to express complex operations that are not obvious from the insn
+ to which the REG_FRAME_RELATED_EXPR note is attached. In this usage of
+ SEQUENCE, the sequence vector slots do not hold real instructions but
+ only pseudo-instructions that can be translated to DWARF CFA expressions.
+
+ Some back ends also use SEQUENCE to group insns in bundles.
+
+ Much of the compiler infrastructure is not prepared to handle SEQUENCE
+ objects. Only passes after pass_free_cfg are expected to handle them. */
+DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA)
+
+/* Represents a non-global base address. This is only used in alias.cc. */
+DEF_RTL_EXPR(ADDRESS, "address", "i", RTX_EXTRA)
+
+/* ----------------------------------------------------------------------
+ Expression types used for things in the instruction chain.
+
+ All formats must start with "uu" to handle the chain.
+ Each insn expression holds an rtl instruction and its semantics
+ during back-end processing.
+ See macros in "rtl.h" for the meaning of each rtx->u.fld[].
+
+ ---------------------------------------------------------------------- */
+
+/* An annotation for variable assignment tracking. */
+DEF_RTL_EXPR(DEBUG_INSN, "debug_insn", "uuBeiie", RTX_INSN)
+
+/* An instruction that cannot jump. */
+DEF_RTL_EXPR(INSN, "insn", "uuBeiie", RTX_INSN)
+
+/* An instruction that can possibly jump.
+ Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */
+DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "uuBeiie0", RTX_INSN)
+
+/* An instruction that can possibly call a subroutine
+ but which will not change which instruction comes next
+ in the current function.
+ Field ( rtx->u.fld[8] ) is CALL_INSN_FUNCTION_USAGE.
+ All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */
+DEF_RTL_EXPR(CALL_INSN, "call_insn", "uuBeiiee", RTX_INSN)
+
+/* Placeholder for tablejump JUMP_INSNs. The pattern of this kind
+ of rtx is always either an ADDR_VEC or an ADDR_DIFF_VEC. These
+ placeholders do not appear as real instructions inside a basic
+ block, but are considered active_insn_p instructions for historical
+ reasons, when jump table data was represented with JUMP_INSNs. */
+DEF_RTL_EXPR(JUMP_TABLE_DATA, "jump_table_data", "uuBe0000", RTX_INSN)
+
+/* A marker that indicates that control will not flow through. */
+DEF_RTL_EXPR(BARRIER, "barrier", "uu00000", RTX_EXTRA)
+
+/* Holds a label that is followed by instructions.
+ Operand:
+ 3: is used in jump.cc for the use-count of the label.
+ 4: is used in the sh backend.
+ 5: is a number that is unique in the entire compilation.
+ 6: is the user-given name of the label, if any. */
+DEF_RTL_EXPR(CODE_LABEL, "code_label", "uuB00is", RTX_EXTRA)
+
+/* Say where in the code a source line starts, for symbol table's sake.
+ Operand:
+ 3: note-specific data
+ 4: enum insn_note
+ 5: unique number if insn_note == note_insn_deleted_label. */
+DEF_RTL_EXPR(NOTE, "note", "uuB0ni", RTX_EXTRA)
+
+/* ----------------------------------------------------------------------
+ Top level constituents of INSN, JUMP_INSN and CALL_INSN.
+ ---------------------------------------------------------------------- */
+
+/* Conditionally execute code.
+ Operand 0 is the condition that if true, the code is executed.
+ Operand 1 is the code to be executed (typically a SET).
+
+ Semantics are that there are no side effects if the condition
+ is false. This pattern is created automatically by the if_convert
+ pass run after reload or by target-specific splitters. */
+DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA)
+
+/* Several operations to be done in parallel (perhaps under COND_EXEC). */
+DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA)
+
+/* A string that is passed through to the assembler as input.
+ One can obviously pass comments through by using the
+ assembler comment syntax.
+ These occur in an insn all by themselves as the PATTERN.
+ They also appear inside an ASM_OPERANDS
+ as a convenient way to hold a string. */
+DEF_RTL_EXPR(ASM_INPUT, "asm_input", "si", RTX_EXTRA)
+
+/* An assembler instruction with operands.
+ 1st operand is the instruction template.
+ 2nd operand is the constraint for the output.
+ 3rd operand is the number of the output this expression refers to.
+ When an insn stores more than one value, a separate ASM_OPERANDS
+ is made for each output; this integer distinguishes them.
+ 4th is a vector of values of input operands.
+ 5th is a vector of modes and constraints for the input operands.
+ Each element is an ASM_INPUT containing a constraint string
+ and whose mode indicates the mode of the input operand.
+ 6th is a vector of labels that may be branched to by the asm.
+ 7th is the source line number. */
+DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEEi", RTX_EXTRA)
+
+/* A machine-specific operation.
+ 1st operand is a vector of operands being used by the operation so that
+ any needed reloads can be done.
+ 2nd operand is a unique value saying which of a number of machine-specific
+ operations is to be performed.
+ (Note that the vector must be the first operand because of the way that
+ genrecog.cc record positions within an insn.)
+
+ UNSPEC can occur all by itself in a PATTERN, as a component of a PARALLEL,
+ or inside an expression.
+ UNSPEC by itself or as a component of a PARALLEL
+ is currently considered not deletable.
+
+ FIXME: Replace all uses of UNSPEC that appears by itself or as a component
+ of a PARALLEL with USE.
+ */
+DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA)
+
+/* Similar, but a volatile operation and one which may trap. */
+DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA)
+
+/* ----------------------------------------------------------------------
+ Table jump addresses.
+ ---------------------------------------------------------------------- */
+
+/* Vector of addresses, stored as full words.
+ Each element is a LABEL_REF to a CODE_LABEL whose address we want. */
+DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA)
+
+/* Vector of address differences X0 - BASE, X1 - BASE, ...
+ First operand is BASE; the vector contains the X's.
+ The machine mode of this rtx says how much space to leave
+ for each difference and is adjusted by branch shortening if
+ CASE_VECTOR_SHORTEN_MODE is defined.
+ The third and fourth operands store the target labels with the
+ minimum and maximum addresses respectively.
+ The fifth operand stores flags for use by branch shortening.
+ Set at the start of shorten_branches:
+ min_align: the minimum alignment for any of the target labels.
+ base_after_vec: true iff BASE is after the ADDR_DIFF_VEC.
+ min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC.
+ max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC.
+ min_after_base: true iff minimum address target label is after BASE.
+ max_after_base: true iff maximum address target label is after BASE.
+ Set by the actual branch shortening process:
+ offset_unsigned: true iff offsets have to be treated as unsigned.
+ scale: scaling that is necessary to make offsets fit into the mode.
+
+ The third, fourth and fifth operands are only valid when
+ CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing
+ compilation. */
+DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA)
+
+/* Memory prefetch, with attributes supported on some targets.
+ Operand 1 is the address of the memory to fetch.
+ Operand 2 is 1 for a write access, 0 otherwise.
+ Operand 3 is the level of temporal locality; 0 means there is no
+ temporal locality and 1, 2, and 3 are for increasing levels of temporal
+ locality.
+
+ The attributes specified by operands 2 and 3 are ignored for targets
+ whose prefetch instructions do not support them. */
+DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA)
+
+/* ----------------------------------------------------------------------
+ At the top level of an instruction (perhaps under PARALLEL).
+ ---------------------------------------------------------------------- */
+
+/* Assignment.
+ Operand 1 is the location (REG, MEM, PC or whatever) assigned to.
+ Operand 2 is the value stored there.
+ ALL assignment must use SET.
+ Instructions that do multiple assignments must use multiple SET,
+ under PARALLEL. */
+DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA)
+
+/* Indicate something is used in a way that we don't want to explain.
+ For example, subroutine calls will use the register
+ in which the static chain is passed.
+
+ USE cannot appear as an operand of other rtx except for PARALLEL.
+ USE is not deletable, as it indicates that the operand
+ is used in some unknown way. */
+DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA)
+
+/* Indicate something is clobbered in a way that we don't want to explain.
+ For example, subroutine calls will clobber some physical registers
+ (the ones that are by convention not saved).
+
+ CLOBBER cannot appear as an operand of other rtx except for PARALLEL.
+ CLOBBER of a hard register appearing by itself (not within PARALLEL)
+ is considered undeletable before reload. */
+DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA)
+
+/* Call a subroutine.
+ Operand 1 is the address to call.
+ Operand 2 is the number of arguments. */
+
+DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA)
+
+/* Return from a subroutine. */
+
+DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA)
+
+/* Like RETURN, but truly represents only a function return, while
+ RETURN may represent an insn that also performs other functions
+ of the function epilogue. Like RETURN, this may also occur in
+ conditional jumps. */
+DEF_RTL_EXPR(SIMPLE_RETURN, "simple_return", "", RTX_EXTRA)
+
+/* Special for EH return from subroutine. */
+
+DEF_RTL_EXPR(EH_RETURN, "eh_return", "", RTX_EXTRA)
+
+/* Conditional trap.
+ Operand 1 is the condition.
+ Operand 2 is the trap code.
+ For an unconditional trap, make the condition (const_int 1). */
+DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA)
+
+/* ----------------------------------------------------------------------
+ Primitive values for use in expressions.
+ ---------------------------------------------------------------------- */
+
+/* numeric integer constant */
+DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ)
+
+/* numeric integer constant */
+DEF_RTL_EXPR(CONST_WIDE_INT, "const_wide_int", "", RTX_CONST_OBJ)
+
+/* An rtx representation of a poly_wide_int. */
+DEF_RTL_EXPR(CONST_POLY_INT, "const_poly_int", "", RTX_CONST_OBJ)
+
+/* fixed-point constant */
+DEF_RTL_EXPR(CONST_FIXED, "const_fixed", "www", RTX_CONST_OBJ)
+
+/* numeric floating point or integer constant. If the mode is
+ VOIDmode it is an int otherwise it has a floating point mode and a
+ floating point value. Operands hold the value. They are all 'w'
+ and there may be from 2 to 6; see real.h. */
+DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ)
+
+/* Describes a vector constant. */
+DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_CONST_OBJ)
+
+/* String constant. Used for attributes in machine descriptions and
+ for special cases in DWARF2 debug output. NOT used for source-
+ language string constants. */
+DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ)
+
+/* This is used to encapsulate an expression whose value is constant
+ (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be
+ recognized as a constant operand rather than by arithmetic instructions. */
+
+DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ)
+
+/* program counter. Ordinary jumps are represented
+ by a SET whose first operand is (PC). */
+DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ)
+
+/* A register. The "operand" is the register number, accessed with
+ the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER
+ then a hardware register is being referred to. The second operand
+ points to a reg_attrs structure.
+ This rtx needs to have as many (or more) fields as a MEM, since we
+ can change REG rtx's into MEMs during reload. */
+DEF_RTL_EXPR(REG, "reg", "r", RTX_OBJ)
+
+/* A scratch register. This represents a register used only within a
+ single insn. It will be replaced by a REG during register allocation
+ or reload unless the constraint indicates that the register won't be
+ needed, in which case it can remain a SCRATCH. */
+DEF_RTL_EXPR(SCRATCH, "scratch", "", RTX_OBJ)
+
+/* A reference to a part of another value. The first operand is the
+ complete value and the second is the byte offset of the selected part. */
+DEF_RTL_EXPR(SUBREG, "subreg", "ep", RTX_EXTRA)
+
+/* This one-argument rtx is used for move instructions
+ that are guaranteed to alter only the low part of a destination.
+ Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...))
+ has an unspecified effect on the high part of REG,
+ but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...))
+ is guaranteed to alter only the bits of REG that are in HImode.
+
+ The actual instruction used is probably the same in both cases,
+ but the register constraints may be tighter when STRICT_LOW_PART
+ is in use. */
+
+DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA)
+
+/* (CONCAT a b) represents the virtual concatenation of a and b
+ to make a value that has as many bits as a and b put together.
+ This is used for complex values. Normally it appears only
+ in DECL_RTLs and during RTL generation, but not in the insn chain. */
+DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ)
+
+/* (CONCATN [a1 a2 ... an]) represents the virtual concatenation of
+ all An to make a value. This is an extension of CONCAT to larger
+ number of components. Like CONCAT, it should not appear in the
+ insn chain. Every element of the CONCATN is the same size. */
+DEF_RTL_EXPR(CONCATN, "concatn", "E", RTX_OBJ)
+
+/* A memory location; operand is the address. The second operand is the
+ alias set to which this MEM belongs. We use `0' instead of `w' for this
+ field so that the field need not be specified in machine descriptions. */
+DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ)
+
+/* Reference to an assembler label in the code for this function.
+ The operand is a CODE_LABEL found in the insn chain. */
+DEF_RTL_EXPR(LABEL_REF, "label_ref", "u", RTX_CONST_OBJ)
+
+/* Reference to a named label:
+ Operand 0: label name
+ Operand 1: tree from which this symbol is derived, or null.
+ This is either a DECL node, or some kind of constant. */
+DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s0", RTX_CONST_OBJ)
+
+/* ----------------------------------------------------------------------
+ Expressions for operators in an rtl pattern
+ ---------------------------------------------------------------------- */
+
+/* if_then_else. This is used in representing ordinary
+ conditional jump instructions.
+ Operand:
+ 0: condition
+ 1: then expr
+ 2: else expr */
+DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY)
+
+/* Comparison, produces a condition code result. */
+DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH)
+
+/* plus */
+DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH)
+
+/* Operand 0 minus operand 1. */
+DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH)
+
+/* Minus operand 0. */
+DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY)
+
+DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH)
+
+/* Multiplication with signed saturation */
+DEF_RTL_EXPR(SS_MULT, "ss_mult", "ee", RTX_COMM_ARITH)
+/* Multiplication with unsigned saturation */
+DEF_RTL_EXPR(US_MULT, "us_mult", "ee", RTX_COMM_ARITH)
+
+/* Signed high-part multiplication. */
+DEF_RTL_EXPR(SMUL_HIGHPART, "smul_highpart", "ee", RTX_COMM_ARITH)
+/* Unsigned high-part multiplication. */
+DEF_RTL_EXPR(UMUL_HIGHPART, "umul_highpart", "ee", RTX_COMM_ARITH)
+
+/* Operand 0 divided by operand 1. */
+DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH)
+/* Division with signed saturation */
+DEF_RTL_EXPR(SS_DIV, "ss_div", "ee", RTX_BIN_ARITH)
+/* Division with unsigned saturation */
+DEF_RTL_EXPR(US_DIV, "us_div", "ee", RTX_BIN_ARITH)
+
+/* Remainder of operand 0 divided by operand 1. */
+DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH)
+
+/* Unsigned divide and remainder. */
+DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH)
+DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH)
+
+/* Bitwise operations. */
+DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH)
+DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH)
+DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH)
+DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY)
+
+/* Operand:
+ 0: value to be shifted.
+ 1: number of bits. */
+DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */
+DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */
+DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */
+DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */
+DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */
+
+/* Minimum and maximum values of two operands. We need both signed and
+ unsigned forms. (We cannot use MIN for SMIN because it conflicts
+ with a macro of the same name.) The signed variants should be used
+ with floating point. Further, if both operands are zeros, or if either
+ operand is NaN, then it is unspecified which of the two operands is
+ returned as the result. */
+
+DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH)
+DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH)
+DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH)
+DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH)
+
+/* These unary operations are used to represent incrementation
+ and decrementation as they occur in memory addresses.
+ The amount of increment or decrement are not represented
+ because they can be understood from the machine-mode of the
+ containing MEM. These operations exist in only two cases:
+ 1. pushes onto the stack.
+ 2. created automatically by the auto-inc-dec pass. */
+DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC)
+DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC)
+DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC)
+DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC)
+
+/* These binary operations are used to represent generic address
+ side-effects in memory addresses, except for simple incrementation
+ or decrementation which use the above operations. They are
+ created automatically by the life_analysis pass in flow.c.
+ The first operand is a REG which is used as the address.
+ The second operand is an expression that is assigned to the
+ register, either before (PRE_MODIFY) or after (POST_MODIFY)
+ evaluating the address.
+ Currently, the compiler can only handle second operands of the
+ form (plus (reg) (reg)) and (plus (reg) (const_int)), where
+ the first operand of the PLUS has to be the same register as
+ the first operand of the *_MODIFY. */
+DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC)
+DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC)
+
+/* Comparison operations. The first 6 are allowed only for integral,
+floating-point and vector modes. LTGT is only allowed for floating-point
+modes. The last 4 are allowed only for integral and vector modes.
+For floating-point operations, if either operand is a NaN, then NE returns
+true and the remaining operations return false. The operations other than
+EQ and NE may generate an exception on quiet NaNs. */
+DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE)
+DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE)
+DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE)
+DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE)
+
+/* Additional floating-point unordered comparison flavors. */
+DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE)
+DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE)
+
+/* These are equivalent to unordered or ... */
+DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE)
+DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE)
+DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE)
+
+/* Represents the result of sign-extending the sole operand.
+ The machine modes of the operand and of the SIGN_EXTEND expression
+ determine how much sign-extension is going on. */
+DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY)
+
+/* Similar for zero-extension (such as unsigned short to int). */
+DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY)
+
+/* Similar but here the operand has a wider mode. */
+DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY)
+
+/* Similar for extending floating-point values (such as SFmode to DFmode). */
+DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY)
+DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY)
+
+/* Conversion of fixed point operand to floating point value. */
+DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY)
+
+/* With fixed-point machine mode:
+ Conversion of floating point operand to fixed point value.
+ Value is defined only when the operand's value is an integer.
+ With floating-point machine mode (and operand with same mode):
+ Operand is rounded toward zero to produce an integer value
+ represented in floating point. */
+DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY)
+
+/* Conversion of unsigned fixed point operand to floating point value. */
+DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY)
+
+/* With fixed-point machine mode:
+ Conversion of floating point operand to *unsigned* fixed point value.
+ Value is defined only when the operand's value is an integer. */
+DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY)
+
+/* Conversions involving fractional fixed-point types without saturation,
+ including:
+ fractional to fractional (of different precision),
+ signed integer to fractional,
+ fractional to signed integer,
+ floating point to fractional,
+ fractional to floating point.
+ NOTE: fractional can be either signed or unsigned for conversions. */
+DEF_RTL_EXPR(FRACT_CONVERT, "fract_convert", "e", RTX_UNARY)
+
+/* Conversions involving fractional fixed-point types and unsigned integer
+ without saturation, including:
+ unsigned integer to fractional,
+ fractional to unsigned integer.
+ NOTE: fractional can be either signed or unsigned for conversions. */
+DEF_RTL_EXPR(UNSIGNED_FRACT_CONVERT, "unsigned_fract_convert", "e", RTX_UNARY)
+
+/* Conversions involving fractional fixed-point types with saturation,
+ including:
+ fractional to fractional (of different precision),
+ signed integer to fractional,
+ floating point to fractional.
+ NOTE: fractional can be either signed or unsigned for conversions. */
+DEF_RTL_EXPR(SAT_FRACT, "sat_fract", "e", RTX_UNARY)
+
+/* Conversions involving fractional fixed-point types and unsigned integer
+ with saturation, including:
+ unsigned integer to fractional.
+ NOTE: fractional can be either signed or unsigned for conversions. */
+DEF_RTL_EXPR(UNSIGNED_SAT_FRACT, "unsigned_sat_fract", "e", RTX_UNARY)
+
+/* Absolute value */
+DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY)
+
+/* Square root */
+DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY)
+
+/* Swap bytes. */
+DEF_RTL_EXPR(BSWAP, "bswap", "e", RTX_UNARY)
+
+/* Find first bit that is set.
+ Value is 1 + number of trailing zeros in the arg.,
+ or 0 if arg is 0. */
+DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY)
+
+/* Count number of leading redundant sign bits (number of leading
+ sign bits minus one). */
+DEF_RTL_EXPR(CLRSB, "clrsb", "e", RTX_UNARY)
+
+/* Count leading zeros. */
+DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY)
+
+/* Count trailing zeros. */
+DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY)
+
+/* Population count (number of 1 bits). */
+DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY)
+
+/* Population parity (number of 1 bits modulo 2). */
+DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY)
+
+/* Reference to a signed bit-field of specified size and position.
+ Operand 0 is the memory unit (usually SImode or QImode) which
+ contains the field's first bit. Operand 1 is the width, in bits.
+ Operand 2 is the number of bits in the memory unit before the
+ first bit of this field.
+ If BITS_BIG_ENDIAN is defined, the first bit is the msb and
+ operand 2 counts from the msb of the memory unit.
+ Otherwise, the first bit is the lsb and operand 2 counts from
+ the lsb of the memory unit.
+ This kind of expression cannot appear as an lvalue in RTL. */
+DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS)
+
+/* Similar for unsigned bit-field.
+ But note! This kind of expression _can_ appear as an lvalue. */
+DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS)
+
+/* For RISC machines. These save memory when splitting insns. */
+
+/* HIGH are the high-order bits of a constant expression. */
+DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ)
+
+/* LO_SUM is the sum of a register and the low-order bits
+ of a constant expression. */
+DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ)
+
+/* Describes a merge operation between two vector values.
+ Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask
+ that specifies where the parts of the result are taken from. Set bits
+ indicate operand 0, clear bits indicate operand 1. The parts are defined
+ by the mode of the vectors. */
+DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY)
+
+/* Describes an operation that selects parts of a vector.
+ Operands 0 is the source vector, operand 1 is a PARALLEL that contains
+ a CONST_INT for each of the subparts of the result vector, giving the
+ number of the source subpart that should be stored into it. */
+DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH)
+
+/* Describes a vector concat operation. Operands 0 and 1 are the source
+ vectors, the result is a vector that is as long as operands 0 and 1
+ combined and is the concatenation of the two source vectors. */
+DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH)
+
+/* Describes an operation that converts a small vector into a larger one by
+ duplicating the input values. The output vector mode must have the same
+ submodes as the input vector mode, and the number of output parts must be
+ an integer multiple of the number of input parts. */
+DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY)
+
+/* Creation of a vector in which element I has the value BASE + I * STEP,
+ where BASE is the first operand and STEP is the second. The result
+ must have a vector integer mode. */
+DEF_RTL_EXPR(VEC_SERIES, "vec_series", "ee", RTX_BIN_ARITH)
+
+/* Addition with signed saturation */
+DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH)
+
+/* Addition with unsigned saturation */
+DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH)
+
+/* Operand 0 minus operand 1, with signed saturation. */
+DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH)
+
+/* Negation with signed saturation. */
+DEF_RTL_EXPR(SS_NEG, "ss_neg", "e", RTX_UNARY)
+/* Negation with unsigned saturation. */
+DEF_RTL_EXPR(US_NEG, "us_neg", "e", RTX_UNARY)
+
+/* Absolute value with signed saturation. */
+DEF_RTL_EXPR(SS_ABS, "ss_abs", "e", RTX_UNARY)
+
+/* Shift left with signed saturation. */
+DEF_RTL_EXPR(SS_ASHIFT, "ss_ashift", "ee", RTX_BIN_ARITH)
+
+/* Shift left with unsigned saturation. */
+DEF_RTL_EXPR(US_ASHIFT, "us_ashift", "ee", RTX_BIN_ARITH)
+
+/* Operand 0 minus operand 1, with unsigned saturation. */
+DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH)
+
+/* Signed saturating truncate. */
+DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY)
+
+/* Unsigned saturating truncate. */
+DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY)
+
+/* Floating point multiply/add combined instruction. */
+DEF_RTL_EXPR(FMA, "fma", "eee", RTX_TERNARY)
+
+/* Information about the variable and its location. */
+DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA)
+
+/* Used in VAR_LOCATION for a pointer to a decl that is no longer
+ addressable. */
+DEF_RTL_EXPR(DEBUG_IMPLICIT_PTR, "debug_implicit_ptr", "t", RTX_OBJ)
+
+/* Represents value that argument had on function entry. The
+ single argument is the DECL_INCOMING_RTL of the corresponding
+ parameter. */
+DEF_RTL_EXPR(ENTRY_VALUE, "entry_value", "0", RTX_OBJ)
+
+/* Used in VAR_LOCATION for a reference to a parameter that has
+ been optimized away completely. */
+DEF_RTL_EXPR(DEBUG_PARAMETER_REF, "debug_parameter_ref", "t", RTX_OBJ)
+
+/* Used in marker DEBUG_INSNs to avoid being recognized as an insn. */
+DEF_RTL_EXPR(DEBUG_MARKER, "debug_marker", "", RTX_EXTRA)
+
+/* All expressions from this point forward appear only in machine
+ descriptions. */
+#ifdef GENERATOR_FILE
+
+/* Pattern-matching operators: */
+
+/* Use the function named by the second arg (the string)
+ as a predicate; if matched, store the structure that was matched
+ in the operand table at index specified by the first arg (the integer).
+ If the second arg is the null string, the structure is just stored.
+
+ A third string argument indicates to the register allocator restrictions
+ on where the operand can be allocated.
+
+ If the target needs no restriction on any instruction this field should
+ be the null string.
+
+ The string is prepended by:
+ '=' to indicate the operand is only written to.
+ '+' to indicate the operand is both read and written to.
+
+ Each character in the string represents an allocable class for an operand.
+ 'g' indicates the operand can be any valid class.
+ 'i' indicates the operand can be immediate (in the instruction) data.
+ 'r' indicates the operand can be in a register.
+ 'm' indicates the operand can be in memory.
+ 'o' a subset of the 'm' class. Those memory addressing modes that
+ can be offset at compile time (have a constant added to them).
+
+ Other characters indicate target dependent operand classes and
+ are described in each target's machine description.
+
+ For instructions with more than one operand, sets of classes can be
+ separated by a comma to indicate the appropriate multi-operand constraints.
+ There must be a 1 to 1 correspondence between these sets of classes in
+ all operands for an instruction.
+ */
+DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH)
+
+/* Match a SCRATCH or a register. When used to generate rtl, a
+ SCRATCH is generated. As for MATCH_OPERAND, the mode specifies
+ the desired mode and the first argument is the operand number.
+ The second argument is the constraint. */
+DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH)
+
+/* Apply a predicate, AND match recursively the operands of the rtx.
+ Operand 0 is the operand-number, as in match_operand.
+ Operand 1 is a predicate to apply (as a string, a function name).
+ Operand 2 is a vector of expressions, each of which must match
+ one subexpression of the rtx this construct is matching. */
+DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH)
+
+/* Match a PARALLEL of arbitrary length. The predicate is applied
+ to the PARALLEL and the initial expressions in the PARALLEL are matched.
+ Operand 0 is the operand-number, as in match_operand.
+ Operand 1 is a predicate to apply to the PARALLEL.
+ Operand 2 is a vector of expressions, each of which must match the
+ corresponding element in the PARALLEL. */
+DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH)
+
+/* Match only something equal to what is stored in the operand table
+ at the index specified by the argument. Use with MATCH_OPERAND. */
+DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH)
+
+/* Match only something equal to what is stored in the operand table
+ at the index specified by the argument. Use with MATCH_OPERATOR. */
+DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH)
+
+/* Match only something equal to what is stored in the operand table
+ at the index specified by the argument. Use with MATCH_PARALLEL. */
+DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH)
+
+/* Appears only in define_predicate/define_special_predicate
+ expressions. Evaluates true only if the operand has an RTX code
+ from the set given by the argument (a comma-separated list). If the
+ second argument is present and nonempty, it is a sequence of digits
+ and/or letters which indicates the subexpression to test, using the
+ same syntax as genextract/genrecog's location strings: 0-9 for
+ XEXP (op, n), a-z for XVECEXP (op, 0, n); each character applies to
+ the result of the one before it. */
+DEF_RTL_EXPR(MATCH_CODE, "match_code", "ss", RTX_MATCH)
+
+/* Used to inject a C conditional expression into an .md file. It can
+ appear in a predicate definition or an attribute expression. */
+DEF_RTL_EXPR(MATCH_TEST, "match_test", "s", RTX_MATCH)
+
+/* Insn (and related) definitions. */
+
+/* Definition of the pattern for one kind of instruction.
+ Operand:
+ 0: names this instruction.
+ If the name is the null string, the instruction is in the
+ machine description just to be recognized, and will never be emitted by
+ the tree to rtl expander.
+ 1: is the pattern.
+ 2: is a string which is a C expression
+ giving an additional condition for recognizing this pattern.
+ A null string means no extra condition.
+ 3: is the action to execute if this pattern is matched.
+ If this assembler code template starts with a * then it is a fragment of
+ C code to run to decide on a template to use. Otherwise, it is the
+ template to use.
+ 4: optionally, a vector of attributes for this insn.
+ */
+DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA)
+
+/* Definition of a peephole optimization.
+ 1st operand: vector of insn patterns to match
+ 2nd operand: C expression that must be true
+ 3rd operand: template or C code to produce assembler output.
+ 4: optionally, a vector of attributes for this insn.
+
+ This form is deprecated; use define_peephole2 instead. */
+DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA)
+
+/* Definition of a split operation.
+ 1st operand: insn pattern to match
+ 2nd operand: C expression that must be true
+ 3rd operand: vector of insn patterns to place into a SEQUENCE
+ 4th operand: optionally, some C code to execute before generating the
+ insns. This might, for example, create some RTX's and store them in
+ elements of `recog_data.operand' for use by the vector of
+ insn-patterns.
+ (`operands' is an alias here for `recog_data.operand'). */
+DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA)
+
+/* Definition of an insn and associated split.
+ This is the concatenation, with a few modifications, of a define_insn
+ and a define_split which share the same pattern.
+ Operand:
+ 0: names this instruction.
+ If the name is the null string, the instruction is in the
+ machine description just to be recognized, and will never be emitted by
+ the tree to rtl expander.
+ 1: is the pattern.
+ 2: is a string which is a C expression
+ giving an additional condition for recognizing this pattern.
+ A null string means no extra condition.
+ 3: is the action to execute if this pattern is matched.
+ If this assembler code template starts with a * then it is a fragment of
+ C code to run to decide on a template to use. Otherwise, it is the
+ template to use.
+ 4: C expression that must be true for split. This may start with "&&"
+ in which case the split condition is the logical and of the insn
+ condition and what follows the "&&" of this operand.
+ 5: vector of insn patterns to place into a SEQUENCE
+ 6: optionally, some C code to execute before generating the
+ insns. This might, for example, create some RTX's and store them in
+ elements of `recog_data.operand' for use by the vector of
+ insn-patterns.
+ (`operands' is an alias here for `recog_data.operand').
+ 7: optionally, a vector of attributes for this insn. */
+DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA)
+
+/* A form of define_insn_and_split in which the split insn pattern (operand 5)
+ is determined automatically by replacing match_operands with match_dups
+ and match_operators with match_op_dups. The operands are the same as
+ define_insn_and_split but with operand 5 removed. */
+DEF_RTL_EXPR(DEFINE_INSN_AND_REWRITE, "define_insn_and_rewrite", "sEsTsSV", RTX_EXTRA)
+
+/* Definition of an RTL peephole operation.
+ Follows the same arguments as define_split. */
+DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA)
+
+/* Define how to generate multiple insns for a standard insn name.
+ 1st operand: the insn name.
+ 2nd operand: vector of insn-patterns.
+ Use match_operand to substitute an element of `recog_data.operand'.
+ 3rd operand: C expression that must be true for this to be available.
+ This may not test any operands.
+ 4th operand: Extra C code to execute before generating the insns.
+ This might, for example, create some RTX's and store them in
+ elements of `recog_data.operand' for use by the vector of
+ insn-patterns.
+ (`operands' is an alias here for `recog_data.operand').
+ 5th: optionally, a vector of attributes for this expand. */
+DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEssV", RTX_EXTRA)
+
+/* Define a requirement for delay slots.
+ 1st operand: Condition involving insn attributes that, if true,
+ indicates that the insn requires the number of delay slots
+ shown.
+ 2nd operand: Vector whose length is the three times the number of delay
+ slots required.
+ Each entry gives three conditions, each involving attributes.
+ The first must be true for an insn to occupy that delay slot
+ location. The second is true for all insns that can be
+ annulled if the branch is true and the third is true for all
+ insns that can be annulled if the branch is false.
+
+ Multiple DEFINE_DELAYs may be present. They indicate differing
+ requirements for delay slots. */
+DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA)
+
+/* Define attribute computation for `asm' instructions. */
+DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA)
+
+/* Definition of a conditional execution meta operation. Automatically
+ generates new instances of DEFINE_INSN, selected by having attribute
+ "predicable" true. The new pattern will contain a COND_EXEC and the
+ predicate at top-level.
+
+ Operand:
+ 0: The predicate pattern. The top-level form should match a
+ relational operator. Operands should have only one alternative.
+ 1: A C expression giving an additional condition for recognizing
+ the generated pattern.
+ 2: A template or C code to produce assembler output.
+ 3: A vector of attributes to append to the resulting cond_exec insn. */
+DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "EssV", RTX_EXTRA)
+
+/* Definition of an operand predicate. The difference between
+ DEFINE_PREDICATE and DEFINE_SPECIAL_PREDICATE is that genrecog will
+ not warn about a match_operand with no mode if it has a predicate
+ defined with DEFINE_SPECIAL_PREDICATE.
+
+ Operand:
+ 0: The name of the predicate.
+ 1: A boolean expression which computes whether or not the predicate
+ matches. This expression can use IOR, AND, NOT, MATCH_OPERAND,
+ MATCH_CODE, and MATCH_TEST. It must be specific enough that genrecog
+ can calculate the set of RTX codes that can possibly match.
+ 2: A C function body which must return true for the predicate to match.
+ Optional. Use this when the test is too complicated to fit into a
+ match_test expression. */
+DEF_RTL_EXPR(DEFINE_PREDICATE, "define_predicate", "ses", RTX_EXTRA)
+DEF_RTL_EXPR(DEFINE_SPECIAL_PREDICATE, "define_special_predicate", "ses", RTX_EXTRA)
+
+/* Definition of a register operand constraint. This simply maps the
+ constraint string to a register class.
+
+ Operand:
+ 0: The name of the constraint (often, but not always, a single letter).
+ 1: A C expression which evaluates to the appropriate register class for
+ this constraint. If this is not just a constant, it should look only
+ at -m switches and the like.
+ 2: A docstring for this constraint, in Texinfo syntax; not currently
+ used, in future will be incorporated into the manual's list of
+ machine-specific operand constraints. */
+DEF_RTL_EXPR(DEFINE_REGISTER_CONSTRAINT, "define_register_constraint", "sss", RTX_EXTRA)
+
+/* Definition of a non-register operand constraint. These look at the
+ operand and decide whether it fits the constraint.
+
+ DEFINE_CONSTRAINT gets no special treatment if it fails to match.
+ It is appropriate for constant-only constraints, and most others.
+
+ DEFINE_MEMORY_CONSTRAINT tells reload that this constraint can be made
+ to match, if it doesn't already, by converting the operand to the form
+ (mem (reg X)) where X is a base register. It is suitable for constraints
+ that describe a subset of all memory references.
+
+ DEFINE_ADDRESS_CONSTRAINT tells reload that this constraint can be made
+ to match, if it doesn't already, by converting the operand to the form
+ (reg X) where X is a base register. It is suitable for constraints that
+ describe a subset of all address references.
+
+ When in doubt, use plain DEFINE_CONSTRAINT.
+
+ Operand:
+ 0: The name of the constraint (often, but not always, a single letter).
+ 1: A docstring for this constraint, in Texinfo syntax; not currently
+ used, in future will be incorporated into the manual's list of
+ machine-specific operand constraints.
+ 2: A boolean expression which computes whether or not the constraint
+ matches. It should follow the same rules as a define_predicate
+ expression, including the bit about specifying the set of RTX codes
+ that could possibly match. MATCH_TEST subexpressions may make use of
+ these variables:
+ `op' - the RTL object defining the operand.
+ `mode' - the mode of `op'.
+ `ival' - INTVAL(op), if op is a CONST_INT.
+ `hval' - CONST_DOUBLE_HIGH(op), if op is an integer CONST_DOUBLE.
+ `lval' - CONST_DOUBLE_LOW(op), if op is an integer CONST_DOUBLE.
+ `rval' - CONST_DOUBLE_REAL_VALUE(op), if op is a floating-point
+ CONST_DOUBLE.
+ Do not use ival/hval/lval/rval if op is not the appropriate kind of
+ RTL object. */
+DEF_RTL_EXPR(DEFINE_CONSTRAINT, "define_constraint", "sse", RTX_EXTRA)
+DEF_RTL_EXPR(DEFINE_MEMORY_CONSTRAINT, "define_memory_constraint", "sse", RTX_EXTRA)
+DEF_RTL_EXPR(DEFINE_SPECIAL_MEMORY_CONSTRAINT, "define_special_memory_constraint", "sse", RTX_EXTRA)
+DEF_RTL_EXPR(DEFINE_RELAXED_MEMORY_CONSTRAINT, "define_relaxed_memory_constraint", "sse", RTX_EXTRA)
+DEF_RTL_EXPR(DEFINE_ADDRESS_CONSTRAINT, "define_address_constraint", "sse", RTX_EXTRA)
+
+
+/* Constructions for CPU pipeline description described by NDFAs. */
+
+/* (define_cpu_unit string [string]) describes cpu functional
+ units (separated by comma).
+
+ 1st operand: Names of cpu functional units.
+ 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON).
+
+ All define_reservations, define_cpu_units, and
+ define_query_cpu_units should have unique names which may not be
+ "nothing". */
+DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA)
+
+/* (define_query_cpu_unit string [string]) describes cpu functional
+ units analogously to define_cpu_unit. The reservation of such
+ units can be queried for automaton state. */
+DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA)
+
+/* (exclusion_set string string) means that each CPU functional unit
+ in the first string cannot be reserved simultaneously with any
+ unit whose name is in the second string and vise versa. CPU units
+ in the string are separated by commas. For example, it is useful
+ for description CPU with fully pipelined floating point functional
+ unit which can execute simultaneously only single floating point
+ insns or only double floating point insns. All CPU functional
+ units in a set should belong to the same automaton. */
+DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA)
+
+/* (presence_set string string) means that each CPU functional unit in
+ the first string cannot be reserved unless at least one of pattern
+ of units whose names are in the second string is reserved. This is
+ an asymmetric relation. CPU units or unit patterns in the strings
+ are separated by commas. Pattern is one unit name or unit names
+ separated by white-spaces.
+
+ For example, it is useful for description that slot1 is reserved
+ after slot0 reservation for a VLIW processor. We could describe it
+ by the following construction
+
+ (presence_set "slot1" "slot0")
+
+ Or slot1 is reserved only after slot0 and unit b0 reservation. In
+ this case we could write
+
+ (presence_set "slot1" "slot0 b0")
+
+ All CPU functional units in a set should belong to the same
+ automaton. */
+DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA)
+
+/* (final_presence_set string string) is analogous to `presence_set'.
+ The difference between them is when checking is done. When an
+ instruction is issued in given automaton state reflecting all
+ current and planned unit reservations, the automaton state is
+ changed. The first state is a source state, the second one is a
+ result state. Checking for `presence_set' is done on the source
+ state reservation, checking for `final_presence_set' is done on the
+ result reservation. This construction is useful to describe a
+ reservation which is actually two subsequent reservations. For
+ example, if we use
+
+ (presence_set "slot1" "slot0")
+
+ the following insn will be never issued (because slot1 requires
+ slot0 which is absent in the source state).
+
+ (define_reservation "insn_and_nop" "slot0 + slot1")
+
+ but it can be issued if we use analogous `final_presence_set'. */
+DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA)
+
+/* (absence_set string string) means that each CPU functional unit in
+ the first string can be reserved only if each pattern of units
+ whose names are in the second string is not reserved. This is an
+ asymmetric relation (actually exclusion set is analogous to this
+ one but it is symmetric). CPU units or unit patterns in the string
+ are separated by commas. Pattern is one unit name or unit names
+ separated by white-spaces.
+
+ For example, it is useful for description that slot0 cannot be
+ reserved after slot1 or slot2 reservation for a VLIW processor. We
+ could describe it by the following construction
+
+ (absence_set "slot2" "slot0, slot1")
+
+ Or slot2 cannot be reserved if slot0 and unit b0 are reserved or
+ slot1 and unit b1 are reserved . In this case we could write
+
+ (absence_set "slot2" "slot0 b0, slot1 b1")
+
+ All CPU functional units in a set should to belong the same
+ automaton. */
+DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA)
+
+/* (final_absence_set string string) is analogous to `absence_set' but
+ checking is done on the result (state) reservation. See comments
+ for `final_presence_set'. */
+DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA)
+
+/* (define_bypass number out_insn_names in_insn_names) names bypass
+ with given latency (the first number) from insns given by the first
+ string (see define_insn_reservation) into insns given by the second
+ string. Insn names in the strings are separated by commas. The
+ third operand is optional name of function which is additional
+ guard for the bypass. The function will get the two insns as
+ parameters. If the function returns zero the bypass will be
+ ignored for this case. Additional guard is necessary to recognize
+ complicated bypasses, e.g. when consumer is load address. If there
+ are more one bypass with the same output and input insns, the
+ chosen bypass is the first bypass with a guard in description whose
+ guard function returns nonzero. If there is no such bypass, then
+ bypass without the guard function is chosen. */
+DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA)
+
+/* (define_automaton string) describes names of automata generated and
+ used for pipeline hazards recognition. The names are separated by
+ comma. Actually it is possibly to generate the single automaton
+ but unfortunately it can be very large. If we use more one
+ automata, the summary size of the automata usually is less than the
+ single one. The automaton name is used in define_cpu_unit and
+ define_query_cpu_unit. All automata should have unique names. */
+DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA)
+
+/* (automata_option string) describes option for generation of
+ automata. Currently there are the following options:
+
+ o "no-minimization" which makes no minimization of automata. This
+ is only worth to do when we are debugging the description and
+ need to look more accurately at reservations of states.
+
+ o "time" which means printing additional time statistics about
+ generation of automata.
+
+ o "v" which means generation of file describing the result
+ automata. The file has suffix `.dfa' and can be used for the
+ description verification and debugging.
+
+ o "w" which means generation of warning instead of error for
+ non-critical errors.
+
+ o "ndfa" which makes nondeterministic finite state automata.
+
+ o "progress" which means output of a progress bar showing how many
+ states were generated so far for automaton being processed. */
+DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA)
+
+/* (define_reservation string string) names reservation (the first
+ string) of cpu functional units (the 2nd string). Sometimes unit
+ reservations for different insns contain common parts. In such
+ case, you can describe common part and use its name (the 1st
+ parameter) in regular expression in define_insn_reservation. All
+ define_reservations, define_cpu_units, and define_query_cpu_units
+ should have unique names which may not be "nothing". */
+DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA)
+
+/* (define_insn_reservation name default_latency condition regexpr)
+ describes reservation of cpu functional units (the 3nd operand) for
+ instruction which is selected by the condition (the 2nd parameter).
+ The first parameter is used for output of debugging information.
+ The reservations are described by a regular expression according
+ the following syntax:
+
+ regexp = regexp "," oneof
+ | oneof
+
+ oneof = oneof "|" allof
+ | allof
+
+ allof = allof "+" repeat
+ | repeat
+
+ repeat = element "*" number
+ | element
+
+ element = cpu_function_unit_name
+ | reservation_name
+ | result_name
+ | "nothing"
+ | "(" regexp ")"
+
+ 1. "," is used for describing start of the next cycle in
+ reservation.
+
+ 2. "|" is used for describing the reservation described by the
+ first regular expression *or* the reservation described by the
+ second regular expression *or* etc.
+
+ 3. "+" is used for describing the reservation described by the
+ first regular expression *and* the reservation described by the
+ second regular expression *and* etc.
+
+ 4. "*" is used for convenience and simply means sequence in
+ which the regular expression are repeated NUMBER times with
+ cycle advancing (see ",").
+
+ 5. cpu functional unit name which means its reservation.
+
+ 6. reservation name -- see define_reservation.
+
+ 7. string "nothing" means no units reservation. */
+
+DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA)
+
+/* Expressions used for insn attributes. */
+
+/* Definition of an insn attribute.
+ 1st operand: name of the attribute
+ 2nd operand: comma-separated list of possible attribute values
+ 3rd operand: expression for the default value of the attribute. */
+DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA)
+
+/* Definition of an insn attribute that uses an existing enumerated type.
+ 1st operand: name of the attribute
+ 2nd operand: the name of the enumerated type
+ 3rd operand: expression for the default value of the attribute. */
+DEF_RTL_EXPR(DEFINE_ENUM_ATTR, "define_enum_attr", "sse", RTX_EXTRA)
+
+/* Marker for the name of an attribute. */
+DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA)
+
+/* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and
+ in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that
+ pattern.
+
+ (set_attr "name" "value") is equivalent to
+ (set (attr "name") (const_string "value")) */
+DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA)
+
+/* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to
+ specify that attribute values are to be assigned according to the
+ alternative matched.
+
+ The following three expressions are equivalent:
+
+ (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1")
+ (eq_attrq "alternative" "2") (const_string "a2")]
+ (const_string "a3")))
+ (set_attr_alternative "att" [(const_string "a1") (const_string "a2")
+ (const_string "a3")])
+ (set_attr "att" "a1,a2,a3")
+ */
+DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA)
+
+/* A conditional expression true if the value of the specified attribute of
+ the current insn equals the specified value. The first operand is the
+ attribute name and the second is the comparison value. */
+DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA)
+
+/* A special case of the above representing a set of alternatives. The first
+ operand is bitmap of the set, the second one is the default value. */
+DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ww", RTX_EXTRA)
+
+/* A conditional expression which is true if the specified flag is
+ true for the insn being scheduled in reorg.
+
+ genattr.cc defines the following flags which can be tested by
+ (attr_flag "foo") expressions in eligible_for_delay: forward, backward. */
+
+DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA)
+
+/* General conditional. The first operand is a vector composed of pairs of
+ expressions. The first element of each pair is evaluated, in turn.
+ The value of the conditional is the second expression of the first pair
+ whose first expression evaluates nonzero. If none of the expressions is
+ true, the second operand will be used as the value of the conditional. */
+DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA)
+
+/* Definition of a pattern substitution meta operation on a DEFINE_EXPAND
+ or a DEFINE_INSN. Automatically generates new instances of DEFINE_INSNs
+ that match the substitution pattern.
+
+ Operand:
+ 0: The name of the substitition template.
+ 1: Input template to match to see if a substitution is applicable.
+ 2: A C expression giving an additional condition for the generated
+ new define_expand or define_insn.
+ 3: Output tempalate to generate via substitution.
+
+ Within a DEFINE_SUBST template, the meaning of some RTL expressions is
+ different from their usual interpretation: a MATCH_OPERAND matches any
+ expression tree with matching machine mode or with VOIDmode. Likewise,
+ MATCH_OP_DUP and MATCH_DUP match more liberally in a DEFINE_SUBST than
+ in other RTL expressions. MATCH_OPERATOR matches all common operators
+ but also UNSPEC, UNSPEC_VOLATILE, and MATCH_OPERATORS from the input
+ DEFINE_EXPAND or DEFINE_INSN. */
+DEF_RTL_EXPR(DEFINE_SUBST, "define_subst", "sEsE", RTX_EXTRA)
+
+/* Substitution attribute to apply a DEFINE_SUBST to a pattern.
+
+ Operand:
+ 0: The name of the subst-attribute.
+ 1: The name of the DEFINE_SUBST to be applied for this attribute.
+ 2: String to substitute for the subst-attribute name in the pattern
+ name, for the case that the DEFINE_SUBST is not applied (i.e. the
+ unmodified version of the pattern).
+ 3: String to substitute for the subst-attribute name in the pattern
+ name, for the case that the DEFINE_SUBST is applied to the patten.
+
+ The use of DEFINE_SUBST and DEFINE_SUBST_ATTR is explained in the
+ GCC internals manual, under "RTL Templates Transformations". */
+DEF_RTL_EXPR(DEFINE_SUBST_ATTR, "define_subst_attr", "ssss", RTX_EXTRA)
+
+#endif /* GENERATOR_FILE */
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.h
new file mode 100644
index 0000000..52f0419
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtl.h
@@ -0,0 +1,4623 @@
+/* Register Transfer Language (RTL) definitions for GCC
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RTL_H
+#define GCC_RTL_H
+
+/* This file is occasionally included by generator files which expect
+ machmode.h and other files to exist and would not normally have been
+ included by coretypes.h. */
+#ifdef GENERATOR_FILE
+#include "real.h"
+#include "fixed-value.h"
+#include "statistics.h"
+#include "vec.h"
+#include "hash-table.h"
+#include "hash-set.h"
+#include "input.h"
+#include "is-a.h"
+#endif /* GENERATOR_FILE */
+
+#include "hard-reg-set.h"
+
+class predefined_function_abi;
+
+/* Value used by some passes to "recognize" noop moves as valid
+ instructions. */
+#define NOOP_MOVE_INSN_CODE INT_MAX
+
+/* Register Transfer Language EXPRESSIONS CODES */
+
+#define RTX_CODE enum rtx_code
+enum rtx_code {
+
+#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) ENUM ,
+#include "rtl.def" /* rtl expressions are documented here */
+#undef DEF_RTL_EXPR
+
+ LAST_AND_UNUSED_RTX_CODE}; /* A convenient way to get a value for
+ NUM_RTX_CODE.
+ Assumes default enum value assignment. */
+
+/* The cast here, saves many elsewhere. */
+#define NUM_RTX_CODE ((int) LAST_AND_UNUSED_RTX_CODE)
+
+/* Similar, but since generator files get more entries... */
+#ifdef GENERATOR_FILE
+# define NON_GENERATOR_NUM_RTX_CODE ((int) MATCH_OPERAND)
+#endif
+
+/* Register Transfer Language EXPRESSIONS CODE CLASSES */
+
+enum rtx_class {
+ /* We check bit 0-1 of some rtx class codes in the predicates below. */
+
+ /* Bit 0 = comparison if 0, arithmetic is 1
+ Bit 1 = 1 if commutative. */
+ RTX_COMPARE, /* 0 */
+ RTX_COMM_COMPARE,
+ RTX_BIN_ARITH,
+ RTX_COMM_ARITH,
+
+ /* Must follow the four preceding values. */
+ RTX_UNARY, /* 4 */
+
+ RTX_EXTRA,
+ RTX_MATCH,
+ RTX_INSN,
+
+ /* Bit 0 = 1 if constant. */
+ RTX_OBJ, /* 8 */
+ RTX_CONST_OBJ,
+
+ RTX_TERNARY,
+ RTX_BITFIELD_OPS,
+ RTX_AUTOINC
+};
+
+#define RTX_OBJ_MASK (~1)
+#define RTX_OBJ_RESULT (RTX_OBJ & RTX_OBJ_MASK)
+#define RTX_COMPARE_MASK (~1)
+#define RTX_COMPARE_RESULT (RTX_COMPARE & RTX_COMPARE_MASK)
+#define RTX_ARITHMETIC_MASK (~1)
+#define RTX_ARITHMETIC_RESULT (RTX_COMM_ARITH & RTX_ARITHMETIC_MASK)
+#define RTX_BINARY_MASK (~3)
+#define RTX_BINARY_RESULT (RTX_COMPARE & RTX_BINARY_MASK)
+#define RTX_COMMUTATIVE_MASK (~2)
+#define RTX_COMMUTATIVE_RESULT (RTX_COMM_COMPARE & RTX_COMMUTATIVE_MASK)
+#define RTX_NON_COMMUTATIVE_RESULT (RTX_COMPARE & RTX_COMMUTATIVE_MASK)
+
+extern const unsigned char rtx_length[NUM_RTX_CODE];
+#define GET_RTX_LENGTH(CODE) (rtx_length[(int) (CODE)])
+
+extern const char * const rtx_name[NUM_RTX_CODE];
+#define GET_RTX_NAME(CODE) (rtx_name[(int) (CODE)])
+
+extern const char * const rtx_format[NUM_RTX_CODE];
+#define GET_RTX_FORMAT(CODE) (rtx_format[(int) (CODE)])
+
+extern const enum rtx_class rtx_class[NUM_RTX_CODE];
+#define GET_RTX_CLASS(CODE) (rtx_class[(int) (CODE)])
+
+/* True if CODE is part of the insn chain (i.e. has INSN_UID, PREV_INSN
+ and NEXT_INSN fields). */
+#define INSN_CHAIN_CODE_P(CODE) IN_RANGE (CODE, DEBUG_INSN, NOTE)
+
+extern const unsigned char rtx_code_size[NUM_RTX_CODE];
+extern const unsigned char rtx_next[NUM_RTX_CODE];
+
+/* The flags and bitfields of an ADDR_DIFF_VEC. BASE is the base label
+ relative to which the offsets are calculated, as explained in rtl.def. */
+struct addr_diff_vec_flags
+{
+ /* Set at the start of shorten_branches - ONLY WHEN OPTIMIZING - : */
+ unsigned min_align: 8;
+ /* Flags: */
+ unsigned base_after_vec: 1; /* BASE is after the ADDR_DIFF_VEC. */
+ unsigned min_after_vec: 1; /* minimum address target label is
+ after the ADDR_DIFF_VEC. */
+ unsigned max_after_vec: 1; /* maximum address target label is
+ after the ADDR_DIFF_VEC. */
+ unsigned min_after_base: 1; /* minimum address target label is
+ after BASE. */
+ unsigned max_after_base: 1; /* maximum address target label is
+ after BASE. */
+ /* Set by the actual branch shortening process - ONLY WHEN OPTIMIZING - : */
+ unsigned offset_unsigned: 1; /* offsets have to be treated as unsigned. */
+ unsigned : 2;
+ unsigned scale : 8;
+};
+
+/* Structure used to describe the attributes of a MEM. These are hashed
+ so MEMs that the same attributes share a data structure. This means
+ they cannot be modified in place. */
+class GTY(()) mem_attrs
+{
+public:
+ mem_attrs ();
+
+ /* The expression that the MEM accesses, or null if not known.
+ This expression might be larger than the memory reference itself.
+ (In other words, the MEM might access only part of the object.) */
+ tree expr;
+
+ /* The offset of the memory reference from the start of EXPR.
+ Only valid if OFFSET_KNOWN_P. */
+ poly_int64 offset;
+
+ /* The size of the memory reference in bytes. Only valid if
+ SIZE_KNOWN_P. */
+ poly_int64 size;
+
+ /* The alias set of the memory reference. */
+ alias_set_type alias;
+
+ /* The alignment of the reference in bits. Always a multiple of
+ BITS_PER_UNIT. Note that EXPR may have a stricter alignment
+ than the memory reference itself. */
+ unsigned int align;
+
+ /* The address space that the memory reference uses. */
+ unsigned char addrspace;
+
+ /* True if OFFSET is known. */
+ bool offset_known_p;
+
+ /* True if SIZE is known. */
+ bool size_known_p;
+};
+
+/* Structure used to describe the attributes of a REG in similar way as
+ mem_attrs does for MEM above. Note that the OFFSET field is calculated
+ in the same way as for mem_attrs, rather than in the same way as a
+ SUBREG_BYTE. For example, if a big-endian target stores a byte
+ object in the low part of a 4-byte register, the OFFSET field
+ will be -3 rather than 0. */
+
+class GTY((for_user)) reg_attrs {
+public:
+ tree decl; /* decl corresponding to REG. */
+ poly_int64 offset; /* Offset from start of DECL. */
+};
+
+/* Common union for an element of an rtx. */
+
+union rtunion
+{
+ int rt_int;
+ unsigned int rt_uint;
+ poly_uint16_pod rt_subreg;
+ const char *rt_str;
+ rtx rt_rtx;
+ rtvec rt_rtvec;
+ machine_mode rt_type;
+ addr_diff_vec_flags rt_addr_diff_vec_flags;
+ struct cselib_val *rt_cselib;
+ tree rt_tree;
+ basic_block rt_bb;
+ mem_attrs *rt_mem;
+ class constant_descriptor_rtx *rt_constant;
+ struct dw_cfi_node *rt_cfi;
+};
+
+/* Describes the properties of a REG. */
+struct GTY(()) reg_info {
+ /* The value of REGNO. */
+ unsigned int regno;
+
+ /* The value of REG_NREGS. */
+ unsigned int nregs : 8;
+ unsigned int unused : 24;
+
+ /* The value of REG_ATTRS. */
+ reg_attrs *attrs;
+};
+
+/* This structure remembers the position of a SYMBOL_REF within an
+ object_block structure. A SYMBOL_REF only provides this information
+ if SYMBOL_REF_HAS_BLOCK_INFO_P is true. */
+struct GTY(()) block_symbol {
+ /* The usual SYMBOL_REF fields. */
+ rtunion GTY ((skip)) fld[2];
+
+ /* The block that contains this object. */
+ struct object_block *block;
+
+ /* The offset of this object from the start of its block. It is negative
+ if the symbol has not yet been assigned an offset. */
+ HOST_WIDE_INT offset;
+};
+
+/* Describes a group of objects that are to be placed together in such
+ a way that their relative positions are known. */
+struct GTY((for_user)) object_block {
+ /* The section in which these objects should be placed. */
+ section *sect;
+
+ /* The alignment of the first object, measured in bits. */
+ unsigned int alignment;
+
+ /* The total size of the objects, measured in bytes. */
+ HOST_WIDE_INT size;
+
+ /* The SYMBOL_REFs for each object. The vector is sorted in
+ order of increasing offset and the following conditions will
+ hold for each element X:
+
+ SYMBOL_REF_HAS_BLOCK_INFO_P (X)
+ !SYMBOL_REF_ANCHOR_P (X)
+ SYMBOL_REF_BLOCK (X) == [address of this structure]
+ SYMBOL_REF_BLOCK_OFFSET (X) >= 0. */
+ vec<rtx, va_gc> *objects;
+
+ /* All the anchor SYMBOL_REFs used to address these objects, sorted
+ in order of increasing offset, and then increasing TLS model.
+ The following conditions will hold for each element X in this vector:
+
+ SYMBOL_REF_HAS_BLOCK_INFO_P (X)
+ SYMBOL_REF_ANCHOR_P (X)
+ SYMBOL_REF_BLOCK (X) == [address of this structure]
+ SYMBOL_REF_BLOCK_OFFSET (X) >= 0. */
+ vec<rtx, va_gc> *anchors;
+};
+
+struct GTY((variable_size)) hwivec_def {
+ HOST_WIDE_INT elem[1];
+};
+
+/* Number of elements of the HWIVEC if RTX is a CONST_WIDE_INT. */
+#define CWI_GET_NUM_ELEM(RTX) \
+ ((int)RTL_FLAG_CHECK1("CWI_GET_NUM_ELEM", (RTX), CONST_WIDE_INT)->u2.num_elem)
+#define CWI_PUT_NUM_ELEM(RTX, NUM) \
+ (RTL_FLAG_CHECK1("CWI_PUT_NUM_ELEM", (RTX), CONST_WIDE_INT)->u2.num_elem = (NUM))
+
+struct GTY((variable_size)) const_poly_int_def {
+ trailing_wide_ints<NUM_POLY_INT_COEFFS> coeffs;
+};
+
+/* RTL expression ("rtx"). */
+
+/* The GTY "desc" and "tag" options below are a kludge: we need a desc
+ field for gengtype to recognize that inheritance is occurring,
+ so that all subclasses are redirected to the traversal hook for the
+ base class.
+ However, all of the fields are in the base class, and special-casing
+ is at work. Hence we use desc and tag of 0, generating a switch
+ statement of the form:
+ switch (0)
+ {
+ case 0: // all the work happens here
+ }
+ in order to work with the existing special-casing in gengtype. */
+
+struct GTY((desc("0"), tag("0"),
+ chain_next ("RTX_NEXT (&%h)"),
+ chain_prev ("RTX_PREV (&%h)"))) rtx_def {
+ /* The kind of expression this is. */
+ ENUM_BITFIELD(rtx_code) code: 16;
+
+ /* The kind of value the expression has. */
+ ENUM_BITFIELD(machine_mode) mode : 8;
+
+ /* 1 in a MEM if we should keep the alias set for this mem unchanged
+ when we access a component.
+ 1 in a JUMP_INSN if it is a crossing jump.
+ 1 in a CALL_INSN if it is a sibling call.
+ 1 in a SET that is for a return.
+ In a CODE_LABEL, part of the two-bit alternate entry field.
+ 1 in a CONCAT is VAL_EXPR_IS_COPIED in var-tracking.cc.
+ 1 in a VALUE is SP_BASED_VALUE_P in cselib.cc.
+ 1 in a SUBREG generated by LRA for reload insns.
+ 1 in a REG if this is a static chain register.
+ Dumped as "/j" in RTL dumps. */
+ unsigned int jump : 1;
+ /* In a CODE_LABEL, part of the two-bit alternate entry field.
+ 1 in a MEM if it cannot trap.
+ 1 in a CALL_INSN logically equivalent to
+ ECF_LOOPING_CONST_OR_PURE and DECL_LOOPING_CONST_OR_PURE_P.
+ 1 in a VALUE is SP_DERIVED_VALUE_P in cselib.cc.
+ Dumped as "/c" in RTL dumps. */
+ unsigned int call : 1;
+ /* 1 in a REG, MEM, or CONCAT if the value is set at most once, anywhere.
+ 1 in a SUBREG used for SUBREG_PROMOTED_UNSIGNED_P.
+ 1 in a SYMBOL_REF if it addresses something in the per-function
+ constants pool.
+ 1 in a CALL_INSN logically equivalent to ECF_CONST and TREE_READONLY.
+ 1 in a NOTE, or EXPR_LIST for a const call.
+ 1 in a JUMP_INSN of an annulling branch.
+ 1 in a CONCAT is VAL_EXPR_IS_CLOBBERED in var-tracking.cc.
+ 1 in a preserved VALUE is PRESERVED_VALUE_P in cselib.cc.
+ 1 in a clobber temporarily created for LRA.
+ Dumped as "/u" in RTL dumps. */
+ unsigned int unchanging : 1;
+ /* 1 in a MEM or ASM_OPERANDS expression if the memory reference is volatile.
+ 1 in an INSN, CALL_INSN, JUMP_INSN, CODE_LABEL, BARRIER, or NOTE
+ if it has been deleted.
+ 1 in a REG expression if corresponds to a variable declared by the user,
+ 0 for an internally generated temporary.
+ 1 in a SUBREG used for SUBREG_PROMOTED_UNSIGNED_P.
+ 1 in a LABEL_REF, REG_LABEL_TARGET or REG_LABEL_OPERAND note for a
+ non-local label.
+ In a SYMBOL_REF, this flag is used for machine-specific purposes.
+ In a PREFETCH, this flag indicates that it should be considered a
+ scheduling barrier.
+ 1 in a CONCAT is VAL_NEEDS_RESOLUTION in var-tracking.cc.
+ Dumped as "/v" in RTL dumps. */
+ unsigned int volatil : 1;
+ /* 1 in a REG if the register is used only in exit code a loop.
+ 1 in a SUBREG expression if was generated from a variable with a
+ promoted mode.
+ 1 in a CODE_LABEL if the label is used for nonlocal gotos
+ and must not be deleted even if its count is zero.
+ 1 in an INSN, JUMP_INSN or CALL_INSN if this insn must be scheduled
+ together with the preceding insn. Valid only within sched.
+ 1 in an INSN, JUMP_INSN, or CALL_INSN if insn is in a delay slot and
+ from the target of a branch. Valid from reorg until end of compilation;
+ cleared before used.
+
+ The name of the field is historical. It used to be used in MEMs
+ to record whether the MEM accessed part of a structure.
+ Dumped as "/s" in RTL dumps. */
+ unsigned int in_struct : 1;
+ /* At the end of RTL generation, 1 if this rtx is used. This is used for
+ copying shared structure. See `unshare_all_rtl'.
+ In a REG, this is not needed for that purpose, and used instead
+ in `leaf_renumber_regs_insn'.
+ 1 in a SYMBOL_REF, means that emit_library_call
+ has used it as the function.
+ 1 in a CONCAT is VAL_HOLDS_TRACK_EXPR in var-tracking.cc.
+ 1 in a VALUE or DEBUG_EXPR is VALUE_RECURSED_INTO in var-tracking.cc. */
+ unsigned int used : 1;
+ /* 1 in an INSN or a SET if this rtx is related to the call frame,
+ either changing how we compute the frame address or saving and
+ restoring registers in the prologue and epilogue.
+ 1 in a REG or MEM if it is a pointer.
+ 1 in a SYMBOL_REF if it addresses something in the per-function
+ constant string pool.
+ 1 in a VALUE is VALUE_CHANGED in var-tracking.cc.
+ Dumped as "/f" in RTL dumps. */
+ unsigned frame_related : 1;
+ /* 1 in a REG or PARALLEL that is the current function's return value.
+ 1 in a SYMBOL_REF for a weak symbol.
+ 1 in a CALL_INSN logically equivalent to ECF_PURE and DECL_PURE_P.
+ 1 in a CONCAT is VAL_EXPR_HAS_REVERSE in var-tracking.cc.
+ 1 in a VALUE or DEBUG_EXPR is NO_LOC_P in var-tracking.cc.
+ Dumped as "/i" in RTL dumps. */
+ unsigned return_val : 1;
+
+ union {
+ /* The final union field is aligned to 64 bits on LP64 hosts,
+ giving a 32-bit gap after the fields above. We optimize the
+ layout for that case and use the gap for extra code-specific
+ information. */
+
+ /* The ORIGINAL_REGNO of a REG. */
+ unsigned int original_regno;
+
+ /* The INSN_UID of an RTX_INSN-class code. */
+ int insn_uid;
+
+ /* The SYMBOL_REF_FLAGS of a SYMBOL_REF. */
+ unsigned int symbol_ref_flags;
+
+ /* The PAT_VAR_LOCATION_STATUS of a VAR_LOCATION. */
+ enum var_init_status var_location_status;
+
+ /* In a CONST_WIDE_INT (aka hwivec_def), this is the number of
+ HOST_WIDE_INTs in the hwivec_def. */
+ unsigned int num_elem;
+
+ /* Information about a CONST_VECTOR. */
+ struct
+ {
+ /* The value of CONST_VECTOR_NPATTERNS. */
+ unsigned int npatterns : 16;
+
+ /* The value of CONST_VECTOR_NELTS_PER_PATTERN. */
+ unsigned int nelts_per_pattern : 8;
+
+ /* For future expansion. */
+ unsigned int unused : 8;
+ } const_vector;
+ } GTY ((skip)) u2;
+
+ /* The first element of the operands of this rtx.
+ The number of operands and their types are controlled
+ by the `code' field, according to rtl.def. */
+ union u {
+ rtunion fld[1];
+ HOST_WIDE_INT hwint[1];
+ struct reg_info reg;
+ struct block_symbol block_sym;
+ struct real_value rv;
+ struct fixed_value fv;
+ struct hwivec_def hwiv;
+ struct const_poly_int_def cpi;
+ } GTY ((special ("rtx_def"), desc ("GET_CODE (&%0)"))) u;
+};
+
+/* A node for constructing singly-linked lists of rtx. */
+
+struct GTY(()) rtx_expr_list : public rtx_def
+{
+private:
+ /* No extra fields, but adds invariant: (GET_CODE (X) == EXPR_LIST). */
+
+public:
+ /* Get next in list. */
+ rtx_expr_list *next () const;
+
+ /* Get at the underlying rtx. */
+ rtx element () const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_expr_list *>::test (rtx rt)
+{
+ return rt->code == EXPR_LIST;
+}
+
+struct GTY(()) rtx_insn_list : public rtx_def
+{
+private:
+ /* No extra fields, but adds invariant: (GET_CODE (X) == INSN_LIST).
+
+ This is an instance of:
+
+ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA)
+
+ i.e. a node for constructing singly-linked lists of rtx_insn *, where
+ the list is "external" to the insn (as opposed to the doubly-linked
+ list embedded within rtx_insn itself). */
+
+public:
+ /* Get next in list. */
+ rtx_insn_list *next () const;
+
+ /* Get at the underlying instruction. */
+ rtx_insn *insn () const;
+
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_insn_list *>::test (rtx rt)
+{
+ return rt->code == INSN_LIST;
+}
+
+/* A node with invariant GET_CODE (X) == SEQUENCE i.e. a vector of rtx,
+ typically (but not always) of rtx_insn *, used in the late passes. */
+
+struct GTY(()) rtx_sequence : public rtx_def
+{
+private:
+ /* No extra fields, but adds invariant: (GET_CODE (X) == SEQUENCE). */
+
+public:
+ /* Get number of elements in sequence. */
+ int len () const;
+
+ /* Get i-th element of the sequence. */
+ rtx element (int index) const;
+
+ /* Get i-th element of the sequence, with a checked cast to
+ rtx_insn *. */
+ rtx_insn *insn (int index) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_sequence *>::test (rtx rt)
+{
+ return rt->code == SEQUENCE;
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const rtx_sequence *>::test (const_rtx rt)
+{
+ return rt->code == SEQUENCE;
+}
+
+struct GTY(()) rtx_insn : public rtx_def
+{
+public:
+ /* No extra fields, but adds the invariant:
+
+ (INSN_P (X)
+ || NOTE_P (X)
+ || JUMP_TABLE_DATA_P (X)
+ || BARRIER_P (X)
+ || LABEL_P (X))
+
+ i.e. that we must be able to use the following:
+ INSN_UID ()
+ NEXT_INSN ()
+ PREV_INSN ()
+ i.e. we have an rtx that has an INSN_UID field and can be part of
+ a linked list of insns.
+ */
+
+ /* Returns true if this insn has been deleted. */
+
+ bool deleted () const { return volatil; }
+
+ /* Mark this insn as deleted. */
+
+ void set_deleted () { volatil = true; }
+
+ /* Mark this insn as not deleted. */
+
+ void set_undeleted () { volatil = false; }
+};
+
+/* Subclasses of rtx_insn. */
+
+struct GTY(()) rtx_debug_insn : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ DEBUG_INSN_P (X) aka (GET_CODE (X) == DEBUG_INSN)
+ i.e. an annotation for tracking variable assignments.
+
+ This is an instance of:
+ DEF_RTL_EXPR(DEBUG_INSN, "debug_insn", "uuBeiie", RTX_INSN)
+ from rtl.def. */
+};
+
+struct GTY(()) rtx_nonjump_insn : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ NONJUMP_INSN_P (X) aka (GET_CODE (X) == INSN)
+ i.e an instruction that cannot jump.
+
+ This is an instance of:
+ DEF_RTL_EXPR(INSN, "insn", "uuBeiie", RTX_INSN)
+ from rtl.def. */
+};
+
+struct GTY(()) rtx_jump_insn : public rtx_insn
+{
+public:
+ /* No extra fields, but adds the invariant:
+ JUMP_P (X) aka (GET_CODE (X) == JUMP_INSN)
+ i.e. an instruction that can possibly jump.
+
+ This is an instance of:
+ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "uuBeiie0", RTX_INSN)
+ from rtl.def. */
+
+ /* Returns jump target of this instruction. The returned value is not
+ necessarily a code label: it may also be a RETURN or SIMPLE_RETURN
+ expression. Also, when the code label is marked "deleted", it is
+ replaced by a NOTE. In some cases the value is NULL_RTX. */
+
+ inline rtx jump_label () const;
+
+ /* Returns jump target cast to rtx_code_label *. */
+
+ inline rtx_code_label *jump_target () const;
+
+ /* Set jump target. */
+
+ inline void set_jump_target (rtx_code_label *);
+};
+
+struct GTY(()) rtx_call_insn : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ CALL_P (X) aka (GET_CODE (X) == CALL_INSN)
+ i.e. an instruction that can possibly call a subroutine
+ but which will not change which instruction comes next
+ in the current function.
+
+ This is an instance of:
+ DEF_RTL_EXPR(CALL_INSN, "call_insn", "uuBeiiee", RTX_INSN)
+ from rtl.def. */
+};
+
+struct GTY(()) rtx_jump_table_data : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ JUMP_TABLE_DATA_P (X) aka (GET_CODE (INSN) == JUMP_TABLE_DATA)
+ i.e. a data for a jump table, considered an instruction for
+ historical reasons.
+
+ This is an instance of:
+ DEF_RTL_EXPR(JUMP_TABLE_DATA, "jump_table_data", "uuBe0000", RTX_INSN)
+ from rtl.def. */
+
+ /* This can be either:
+
+ (a) a table of absolute jumps, in which case PATTERN (this) is an
+ ADDR_VEC with arg 0 a vector of labels, or
+
+ (b) a table of relative jumps (e.g. for -fPIC), in which case
+ PATTERN (this) is an ADDR_DIFF_VEC, with arg 0 a LABEL_REF and
+ arg 1 the vector of labels.
+
+ This method gets the underlying vec. */
+
+ inline rtvec get_labels () const;
+ inline scalar_int_mode get_data_mode () const;
+};
+
+struct GTY(()) rtx_barrier : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ BARRIER_P (X) aka (GET_CODE (X) == BARRIER)
+ i.e. a marker that indicates that control will not flow through.
+
+ This is an instance of:
+ DEF_RTL_EXPR(BARRIER, "barrier", "uu00000", RTX_EXTRA)
+ from rtl.def. */
+};
+
+struct GTY(()) rtx_code_label : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ LABEL_P (X) aka (GET_CODE (X) == CODE_LABEL)
+ i.e. a label in the assembler.
+
+ This is an instance of:
+ DEF_RTL_EXPR(CODE_LABEL, "code_label", "uuB00is", RTX_EXTRA)
+ from rtl.def. */
+};
+
+struct GTY(()) rtx_note : public rtx_insn
+{
+ /* No extra fields, but adds the invariant:
+ NOTE_P(X) aka (GET_CODE (X) == NOTE)
+ i.e. a note about the corresponding source code.
+
+ This is an instance of:
+ DEF_RTL_EXPR(NOTE, "note", "uuB0ni", RTX_EXTRA)
+ from rtl.def. */
+};
+
+/* The size in bytes of an rtx header (code, mode and flags). */
+#define RTX_HDR_SIZE offsetof (struct rtx_def, u)
+
+/* The size in bytes of an rtx with code CODE. */
+#define RTX_CODE_SIZE(CODE) rtx_code_size[CODE]
+
+#define NULL_RTX (rtx) 0
+
+/* The "next" and "previous" RTX, relative to this one. */
+
+#define RTX_NEXT(X) (rtx_next[GET_CODE (X)] == 0 ? NULL \
+ : *(rtx *)(((char *)X) + rtx_next[GET_CODE (X)]))
+
+/* FIXME: the "NEXT_INSN (PREV_INSN (X)) == X" condition shouldn't be needed.
+ */
+#define RTX_PREV(X) ((INSN_P (X) \
+ || NOTE_P (X) \
+ || JUMP_TABLE_DATA_P (X) \
+ || BARRIER_P (X) \
+ || LABEL_P (X)) \
+ && PREV_INSN (as_a <rtx_insn *> (X)) != NULL \
+ && NEXT_INSN (PREV_INSN (as_a <rtx_insn *> (X))) == X \
+ ? PREV_INSN (as_a <rtx_insn *> (X)) : NULL)
+
+/* Define macros to access the `code' field of the rtx. */
+
+#define GET_CODE(RTX) ((enum rtx_code) (RTX)->code)
+#define PUT_CODE(RTX, CODE) ((RTX)->code = (CODE))
+
+#define GET_MODE(RTX) ((machine_mode) (RTX)->mode)
+#define PUT_MODE_RAW(RTX, MODE) ((RTX)->mode = (MODE))
+
+/* RTL vector. These appear inside RTX's when there is a need
+ for a variable number of things. The principle use is inside
+ PARALLEL expressions. */
+
+struct GTY(()) rtvec_def {
+ int num_elem; /* number of elements */
+ rtx GTY ((length ("%h.num_elem"))) elem[1];
+};
+
+#define NULL_RTVEC (rtvec) 0
+
+#define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem)
+#define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (NUM))
+
+/* Predicate yielding nonzero iff X is an rtx for a register. */
+#define REG_P(X) (GET_CODE (X) == REG)
+
+/* Predicate yielding nonzero iff X is an rtx for a memory location. */
+#define MEM_P(X) (GET_CODE (X) == MEM)
+
+#if TARGET_SUPPORTS_WIDE_INT
+
+/* Match CONST_*s that can represent compile-time constant integers. */
+#define CASE_CONST_SCALAR_INT \
+ case CONST_INT: \
+ case CONST_WIDE_INT
+
+/* Match CONST_*s for which pointer equality corresponds to value
+ equality. */
+#define CASE_CONST_UNIQUE \
+ case CONST_INT: \
+ case CONST_WIDE_INT: \
+ case CONST_POLY_INT: \
+ case CONST_DOUBLE: \
+ case CONST_FIXED
+
+/* Match all CONST_* rtxes. */
+#define CASE_CONST_ANY \
+ case CONST_INT: \
+ case CONST_WIDE_INT: \
+ case CONST_POLY_INT: \
+ case CONST_DOUBLE: \
+ case CONST_FIXED: \
+ case CONST_VECTOR
+
+#else
+
+/* Match CONST_*s that can represent compile-time constant integers. */
+#define CASE_CONST_SCALAR_INT \
+ case CONST_INT: \
+ case CONST_DOUBLE
+
+/* Match CONST_*s for which pointer equality corresponds to value
+ equality. */
+#define CASE_CONST_UNIQUE \
+ case CONST_INT: \
+ case CONST_DOUBLE: \
+ case CONST_FIXED
+
+/* Match all CONST_* rtxes. */
+#define CASE_CONST_ANY \
+ case CONST_INT: \
+ case CONST_DOUBLE: \
+ case CONST_FIXED: \
+ case CONST_VECTOR
+#endif
+
+/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
+#define CONST_INT_P(X) (GET_CODE (X) == CONST_INT)
+
+/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
+#define CONST_WIDE_INT_P(X) (GET_CODE (X) == CONST_WIDE_INT)
+
+/* Predicate yielding nonzero iff X is an rtx for a polynomial constant
+ integer. */
+#define CONST_POLY_INT_P(X) \
+ (NUM_POLY_INT_COEFFS > 1 && GET_CODE (X) == CONST_POLY_INT)
+
+/* Predicate yielding nonzero iff X is an rtx for a constant fixed-point. */
+#define CONST_FIXED_P(X) (GET_CODE (X) == CONST_FIXED)
+
+/* Predicate yielding true iff X is an rtx for a double-int
+ or floating point constant. */
+#define CONST_DOUBLE_P(X) (GET_CODE (X) == CONST_DOUBLE)
+
+/* Predicate yielding true iff X is an rtx for a double-int. */
+#define CONST_DOUBLE_AS_INT_P(X) \
+ (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) == VOIDmode)
+
+/* Predicate yielding true iff X is an rtx for a integer const. */
+#if TARGET_SUPPORTS_WIDE_INT
+#define CONST_SCALAR_INT_P(X) \
+ (CONST_INT_P (X) || CONST_WIDE_INT_P (X))
+#else
+#define CONST_SCALAR_INT_P(X) \
+ (CONST_INT_P (X) || CONST_DOUBLE_AS_INT_P (X))
+#endif
+
+/* Predicate yielding true iff X is an rtx for a double-int. */
+#define CONST_DOUBLE_AS_FLOAT_P(X) \
+ (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) != VOIDmode)
+
+/* Predicate yielding nonzero iff X is an rtx for a constant vector. */
+#define CONST_VECTOR_P(X) (GET_CODE (X) == CONST_VECTOR)
+
+/* Predicate yielding nonzero iff X is a label insn. */
+#define LABEL_P(X) (GET_CODE (X) == CODE_LABEL)
+
+/* Predicate yielding nonzero iff X is a jump insn. */
+#define JUMP_P(X) (GET_CODE (X) == JUMP_INSN)
+
+/* Predicate yielding nonzero iff X is a call insn. */
+#define CALL_P(X) (GET_CODE (X) == CALL_INSN)
+
+/* 1 if RTX is a call_insn for a fake call.
+ CALL_INSN use "used" flag to indicate it's a fake call. */
+#define FAKE_CALL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("FAKE_CALL_P", (RTX), CALL_INSN)->used)
+
+/* Predicate yielding nonzero iff X is an insn that cannot jump. */
+#define NONJUMP_INSN_P(X) (GET_CODE (X) == INSN)
+
+/* Predicate yielding nonzero iff X is a debug note/insn. */
+#define DEBUG_INSN_P(X) (GET_CODE (X) == DEBUG_INSN)
+
+/* Predicate yielding nonzero iff X is an insn that is not a debug insn. */
+#define NONDEBUG_INSN_P(X) (NONJUMP_INSN_P (X) || JUMP_P (X) || CALL_P (X))
+
+/* Nonzero if DEBUG_MARKER_INSN_P may possibly hold. */
+#define MAY_HAVE_DEBUG_MARKER_INSNS debug_nonbind_markers_p
+/* Nonzero if DEBUG_BIND_INSN_P may possibly hold. */
+#define MAY_HAVE_DEBUG_BIND_INSNS flag_var_tracking_assignments
+/* Nonzero if DEBUG_INSN_P may possibly hold. */
+#define MAY_HAVE_DEBUG_INSNS \
+ (MAY_HAVE_DEBUG_MARKER_INSNS || MAY_HAVE_DEBUG_BIND_INSNS)
+
+/* Predicate yielding nonzero iff X is a real insn. */
+#define INSN_P(X) (NONDEBUG_INSN_P (X) || DEBUG_INSN_P (X))
+
+/* Predicate yielding nonzero iff X is a note insn. */
+#define NOTE_P(X) (GET_CODE (X) == NOTE)
+
+/* Predicate yielding nonzero iff X is a barrier insn. */
+#define BARRIER_P(X) (GET_CODE (X) == BARRIER)
+
+/* Predicate yielding nonzero iff X is a data for a jump table. */
+#define JUMP_TABLE_DATA_P(INSN) (GET_CODE (INSN) == JUMP_TABLE_DATA)
+
+/* Predicate yielding nonzero iff RTX is a subreg. */
+#define SUBREG_P(RTX) (GET_CODE (RTX) == SUBREG)
+
+/* Predicate yielding true iff RTX is a symbol ref. */
+#define SYMBOL_REF_P(RTX) (GET_CODE (RTX) == SYMBOL_REF)
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_insn *>::test (rtx rt)
+{
+ return (INSN_P (rt)
+ || NOTE_P (rt)
+ || JUMP_TABLE_DATA_P (rt)
+ || BARRIER_P (rt)
+ || LABEL_P (rt));
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <const rtx_insn *>::test (const_rtx rt)
+{
+ return (INSN_P (rt)
+ || NOTE_P (rt)
+ || JUMP_TABLE_DATA_P (rt)
+ || BARRIER_P (rt)
+ || LABEL_P (rt));
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_debug_insn *>::test (rtx rt)
+{
+ return DEBUG_INSN_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_nonjump_insn *>::test (rtx rt)
+{
+ return NONJUMP_INSN_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_jump_insn *>::test (rtx rt)
+{
+ return JUMP_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_jump_insn *>::test (rtx_insn *insn)
+{
+ return JUMP_P (insn);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_call_insn *>::test (rtx rt)
+{
+ return CALL_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_call_insn *>::test (rtx_insn *insn)
+{
+ return CALL_P (insn);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_jump_table_data *>::test (rtx rt)
+{
+ return JUMP_TABLE_DATA_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_jump_table_data *>::test (rtx_insn *insn)
+{
+ return JUMP_TABLE_DATA_P (insn);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_barrier *>::test (rtx rt)
+{
+ return BARRIER_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_code_label *>::test (rtx rt)
+{
+ return LABEL_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_code_label *>::test (rtx_insn *insn)
+{
+ return LABEL_P (insn);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_note *>::test (rtx rt)
+{
+ return NOTE_P (rt);
+}
+
+template <>
+template <>
+inline bool
+is_a_helper <rtx_note *>::test (rtx_insn *insn)
+{
+ return NOTE_P (insn);
+}
+
+/* Predicate yielding nonzero iff X is a return or simple_return. */
+#define ANY_RETURN_P(X) \
+ (GET_CODE (X) == RETURN || GET_CODE (X) == SIMPLE_RETURN)
+
+/* 1 if X is a unary operator. */
+
+#define UNARY_P(X) \
+ (GET_RTX_CLASS (GET_CODE (X)) == RTX_UNARY)
+
+/* 1 if X is a binary operator. */
+
+#define BINARY_P(X) \
+ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_BINARY_MASK) == RTX_BINARY_RESULT)
+
+/* 1 if X is an arithmetic operator. */
+
+#define ARITHMETIC_P(X) \
+ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_ARITHMETIC_MASK) \
+ == RTX_ARITHMETIC_RESULT)
+
+/* 1 if X is an arithmetic operator. */
+
+#define COMMUTATIVE_ARITH_P(X) \
+ (GET_RTX_CLASS (GET_CODE (X)) == RTX_COMM_ARITH)
+
+/* 1 if X is a commutative arithmetic operator or a comparison operator.
+ These two are sometimes selected together because it is possible to
+ swap the two operands. */
+
+#define SWAPPABLE_OPERANDS_P(X) \
+ ((1 << GET_RTX_CLASS (GET_CODE (X))) \
+ & ((1 << RTX_COMM_ARITH) | (1 << RTX_COMM_COMPARE) \
+ | (1 << RTX_COMPARE)))
+
+/* 1 if X is a non-commutative operator. */
+
+#define NON_COMMUTATIVE_P(X) \
+ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_COMMUTATIVE_MASK) \
+ == RTX_NON_COMMUTATIVE_RESULT)
+
+/* 1 if X is a commutative operator on integers. */
+
+#define COMMUTATIVE_P(X) \
+ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_COMMUTATIVE_MASK) \
+ == RTX_COMMUTATIVE_RESULT)
+
+/* 1 if X is a relational operator. */
+
+#define COMPARISON_P(X) \
+ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_COMPARE_MASK) == RTX_COMPARE_RESULT)
+
+/* 1 if X is a constant value that is an integer. */
+
+#define CONSTANT_P(X) \
+ (GET_RTX_CLASS (GET_CODE (X)) == RTX_CONST_OBJ)
+
+/* 1 if X is a LABEL_REF. */
+#define LABEL_REF_P(X) \
+ (GET_CODE (X) == LABEL_REF)
+
+/* 1 if X can be used to represent an object. */
+#define OBJECT_P(X) \
+ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_OBJ_MASK) == RTX_OBJ_RESULT)
+
+/* General accessor macros for accessing the fields of an rtx. */
+
+#if defined ENABLE_RTL_CHECKING && (GCC_VERSION >= 2007)
+/* The bit with a star outside the statement expr and an & inside is
+ so that N can be evaluated only once. */
+#define RTL_CHECK1(RTX, N, C1) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
+ const enum rtx_code _code = GET_CODE (_rtx); \
+ if (_n < 0 || _n >= GET_RTX_LENGTH (_code)) \
+ rtl_check_failed_bounds (_rtx, _n, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ if (GET_RTX_FORMAT (_code)[_n] != C1) \
+ rtl_check_failed_type1 (_rtx, _n, C1, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.fld[_n]; }))
+
+#define RTL_CHECK2(RTX, N, C1, C2) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
+ const enum rtx_code _code = GET_CODE (_rtx); \
+ if (_n < 0 || _n >= GET_RTX_LENGTH (_code)) \
+ rtl_check_failed_bounds (_rtx, _n, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ if (GET_RTX_FORMAT (_code)[_n] != C1 \
+ && GET_RTX_FORMAT (_code)[_n] != C2) \
+ rtl_check_failed_type2 (_rtx, _n, C1, C2, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.fld[_n]; }))
+
+#define RTL_CHECKC1(RTX, N, C) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
+ if (GET_CODE (_rtx) != (C)) \
+ rtl_check_failed_code1 (_rtx, (C), __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.fld[_n]; }))
+
+#define RTL_CHECKC2(RTX, N, C1, C2) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
+ const enum rtx_code _code = GET_CODE (_rtx); \
+ if (_code != (C1) && _code != (C2)) \
+ rtl_check_failed_code2 (_rtx, (C1), (C2), __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.fld[_n]; }))
+
+#define RTL_CHECKC3(RTX, N, C1, C2, C3) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
+ const enum rtx_code _code = GET_CODE (_rtx); \
+ if (_code != (C1) && _code != (C2) && _code != (C3)) \
+ rtl_check_failed_code3 (_rtx, (C1), (C2), (C3), __FILE__, \
+ __LINE__, __FUNCTION__); \
+ &_rtx->u.fld[_n]; }))
+
+#define RTVEC_ELT(RTVEC, I) __extension__ \
+(*({ __typeof (RTVEC) const _rtvec = (RTVEC); const int _i = (I); \
+ if (_i < 0 || _i >= GET_NUM_ELEM (_rtvec)) \
+ rtvec_check_failed_bounds (_rtvec, _i, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtvec->elem[_i]; }))
+
+#define XWINT(RTX, N) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
+ const enum rtx_code _code = GET_CODE (_rtx); \
+ if (_n < 0 || _n >= GET_RTX_LENGTH (_code)) \
+ rtl_check_failed_bounds (_rtx, _n, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ if (GET_RTX_FORMAT (_code)[_n] != 'w') \
+ rtl_check_failed_type1 (_rtx, _n, 'w', __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.hwint[_n]; }))
+
+#define CWI_ELT(RTX, I) __extension__ \
+(*({ __typeof (RTX) const _cwi = (RTX); \
+ int _max = CWI_GET_NUM_ELEM (_cwi); \
+ const int _i = (I); \
+ if (_i < 0 || _i >= _max) \
+ cwi_check_failed_bounds (_cwi, _i, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_cwi->u.hwiv.elem[_i]; }))
+
+#define XCWINT(RTX, N, C) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != (C)) \
+ rtl_check_failed_code1 (_rtx, (C), __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.hwint[N]; }))
+
+#define XCMWINT(RTX, N, C, M) __extension__ \
+(*({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != (C) || GET_MODE (_rtx) != (M)) \
+ rtl_check_failed_code_mode (_rtx, (C), (M), false, __FILE__, \
+ __LINE__, __FUNCTION__); \
+ &_rtx->u.hwint[N]; }))
+
+#define XCNMPRV(RTX, C, M) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != (C) || GET_MODE (_rtx) == (M)) \
+ rtl_check_failed_code_mode (_rtx, (C), (M), true, __FILE__, \
+ __LINE__, __FUNCTION__); \
+ &_rtx->u.rv; })
+
+#define XCNMPFV(RTX, C, M) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != (C) || GET_MODE (_rtx) == (M)) \
+ rtl_check_failed_code_mode (_rtx, (C), (M), true, __FILE__, \
+ __LINE__, __FUNCTION__); \
+ &_rtx->u.fv; })
+
+#define REG_CHECK(RTX) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != REG) \
+ rtl_check_failed_code1 (_rtx, REG, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_rtx->u.reg; })
+
+#define BLOCK_SYMBOL_CHECK(RTX) __extension__ \
+({ __typeof (RTX) const _symbol = (RTX); \
+ const unsigned int flags = SYMBOL_REF_FLAGS (_symbol); \
+ if ((flags & SYMBOL_FLAG_HAS_BLOCK_INFO) == 0) \
+ rtl_check_failed_block_symbol (__FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_symbol->u.block_sym; })
+
+#define HWIVEC_CHECK(RTX,C) __extension__ \
+({ __typeof (RTX) const _symbol = (RTX); \
+ RTL_CHECKC1 (_symbol, 0, C); \
+ &_symbol->u.hwiv; })
+
+extern void rtl_check_failed_bounds (const_rtx, int, const char *, int,
+ const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_type1 (const_rtx, int, int, const char *, int,
+ const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_type2 (const_rtx, int, int, int, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_code1 (const_rtx, enum rtx_code, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_code2 (const_rtx, enum rtx_code, enum rtx_code,
+ const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_code3 (const_rtx, enum rtx_code, enum rtx_code,
+ enum rtx_code, const char *, int,
+ const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_code_mode (const_rtx, enum rtx_code, machine_mode,
+ bool, const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtl_check_failed_block_symbol (const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void cwi_check_failed_bounds (const_rtx, int, const char *, int,
+ const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int,
+ const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+
+#else /* not ENABLE_RTL_CHECKING */
+
+#define RTL_CHECK1(RTX, N, C1) ((RTX)->u.fld[N])
+#define RTL_CHECK2(RTX, N, C1, C2) ((RTX)->u.fld[N])
+#define RTL_CHECKC1(RTX, N, C) ((RTX)->u.fld[N])
+#define RTL_CHECKC2(RTX, N, C1, C2) ((RTX)->u.fld[N])
+#define RTL_CHECKC3(RTX, N, C1, C2, C3) ((RTX)->u.fld[N])
+#define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[I])
+#define XWINT(RTX, N) ((RTX)->u.hwint[N])
+#define CWI_ELT(RTX, I) ((RTX)->u.hwiv.elem[I])
+#define XCWINT(RTX, N, C) ((RTX)->u.hwint[N])
+#define XCMWINT(RTX, N, C, M) ((RTX)->u.hwint[N])
+#define XCNMWINT(RTX, N, C, M) ((RTX)->u.hwint[N])
+#define XCNMPRV(RTX, C, M) (&(RTX)->u.rv)
+#define XCNMPFV(RTX, C, M) (&(RTX)->u.fv)
+#define REG_CHECK(RTX) (&(RTX)->u.reg)
+#define BLOCK_SYMBOL_CHECK(RTX) (&(RTX)->u.block_sym)
+#define HWIVEC_CHECK(RTX,C) (&(RTX)->u.hwiv)
+
+#endif
+
+/* General accessor macros for accessing the flags of an rtx. */
+
+/* Access an individual rtx flag, with no checking of any kind. */
+#define RTX_FLAG(RTX, FLAG) ((RTX)->FLAG)
+
+#if defined ENABLE_RTL_FLAG_CHECKING && (GCC_VERSION >= 2007)
+#define RTL_FLAG_CHECK1(NAME, RTX, C1) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1) \
+ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_FLAG_CHECK2(NAME, RTX, C1, C2) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1 && GET_CODE(_rtx) != C2) \
+ rtl_check_failed_flag (NAME,_rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_FLAG_CHECK3(NAME, RTX, C1, C2, C3) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1 && GET_CODE(_rtx) != C2 \
+ && GET_CODE (_rtx) != C3) \
+ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_FLAG_CHECK4(NAME, RTX, C1, C2, C3, C4) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1 && GET_CODE(_rtx) != C2 \
+ && GET_CODE (_rtx) != C3 && GET_CODE(_rtx) != C4) \
+ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_FLAG_CHECK5(NAME, RTX, C1, C2, C3, C4, C5) __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1 && GET_CODE (_rtx) != C2 \
+ && GET_CODE (_rtx) != C3 && GET_CODE (_rtx) != C4 \
+ && GET_CODE (_rtx) != C5) \
+ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_FLAG_CHECK6(NAME, RTX, C1, C2, C3, C4, C5, C6) \
+ __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1 && GET_CODE (_rtx) != C2 \
+ && GET_CODE (_rtx) != C3 && GET_CODE (_rtx) != C4 \
+ && GET_CODE (_rtx) != C5 && GET_CODE (_rtx) != C6) \
+ rtl_check_failed_flag (NAME,_rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_FLAG_CHECK7(NAME, RTX, C1, C2, C3, C4, C5, C6, C7) \
+ __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (GET_CODE (_rtx) != C1 && GET_CODE (_rtx) != C2 \
+ && GET_CODE (_rtx) != C3 && GET_CODE (_rtx) != C4 \
+ && GET_CODE (_rtx) != C5 && GET_CODE (_rtx) != C6 \
+ && GET_CODE (_rtx) != C7) \
+ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+#define RTL_INSN_CHAIN_FLAG_CHECK(NAME, RTX) \
+ __extension__ \
+({ __typeof (RTX) const _rtx = (RTX); \
+ if (!INSN_CHAIN_CODE_P (GET_CODE (_rtx))) \
+ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _rtx; })
+
+extern void rtl_check_failed_flag (const char *, const_rtx, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD
+ ;
+
+#else /* not ENABLE_RTL_FLAG_CHECKING */
+
+#define RTL_FLAG_CHECK1(NAME, RTX, C1) (RTX)
+#define RTL_FLAG_CHECK2(NAME, RTX, C1, C2) (RTX)
+#define RTL_FLAG_CHECK3(NAME, RTX, C1, C2, C3) (RTX)
+#define RTL_FLAG_CHECK4(NAME, RTX, C1, C2, C3, C4) (RTX)
+#define RTL_FLAG_CHECK5(NAME, RTX, C1, C2, C3, C4, C5) (RTX)
+#define RTL_FLAG_CHECK6(NAME, RTX, C1, C2, C3, C4, C5, C6) (RTX)
+#define RTL_FLAG_CHECK7(NAME, RTX, C1, C2, C3, C4, C5, C6, C7) (RTX)
+#define RTL_INSN_CHAIN_FLAG_CHECK(NAME, RTX) (RTX)
+#endif
+
+#define XINT(RTX, N) (RTL_CHECK2 (RTX, N, 'i', 'n').rt_int)
+#define XUINT(RTX, N) (RTL_CHECK2 (RTX, N, 'i', 'n').rt_uint)
+#define XSTR(RTX, N) (RTL_CHECK2 (RTX, N, 's', 'S').rt_str)
+#define XEXP(RTX, N) (RTL_CHECK2 (RTX, N, 'e', 'u').rt_rtx)
+#define XVEC(RTX, N) (RTL_CHECK2 (RTX, N, 'E', 'V').rt_rtvec)
+#define XMODE(RTX, N) (RTL_CHECK1 (RTX, N, 'M').rt_type)
+#define XTREE(RTX, N) (RTL_CHECK1 (RTX, N, 't').rt_tree)
+#define XBBDEF(RTX, N) (RTL_CHECK1 (RTX, N, 'B').rt_bb)
+#define XTMPL(RTX, N) (RTL_CHECK1 (RTX, N, 'T').rt_str)
+#define XCFI(RTX, N) (RTL_CHECK1 (RTX, N, 'C').rt_cfi)
+
+#define XVECEXP(RTX, N, M) RTVEC_ELT (XVEC (RTX, N), M)
+#define XVECLEN(RTX, N) GET_NUM_ELEM (XVEC (RTX, N))
+
+/* These are like XINT, etc. except that they expect a '0' field instead
+ of the normal type code. */
+
+#define X0INT(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_int)
+#define X0UINT(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_uint)
+#define X0STR(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_str)
+#define X0EXP(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_rtx)
+#define X0VEC(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_rtvec)
+#define X0MODE(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_type)
+#define X0TREE(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_tree)
+#define X0BBDEF(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_bb)
+#define X0ADVFLAGS(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_addr_diff_vec_flags)
+#define X0CSELIB(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_cselib)
+#define X0MEMATTR(RTX, N) (RTL_CHECKC1 (RTX, N, MEM).rt_mem)
+#define X0CONSTANT(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_constant)
+
+/* Access a '0' field with any type. */
+#define X0ANY(RTX, N) RTL_CHECK1 (RTX, N, '0')
+
+#define XCINT(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_int)
+#define XCUINT(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_uint)
+#define XCSUBREG(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_subreg)
+#define XCSTR(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_str)
+#define XCEXP(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_rtx)
+#define XCVEC(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_rtvec)
+#define XCMODE(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_type)
+#define XCTREE(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_tree)
+#define XCBBDEF(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_bb)
+#define XCCFI(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cfi)
+#define XCCSELIB(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cselib)
+
+#define XCVECEXP(RTX, N, M, C) RTVEC_ELT (XCVEC (RTX, N, C), M)
+#define XCVECLEN(RTX, N, C) GET_NUM_ELEM (XCVEC (RTX, N, C))
+
+#define XC2EXP(RTX, N, C1, C2) (RTL_CHECKC2 (RTX, N, C1, C2).rt_rtx)
+#define XC3EXP(RTX, N, C1, C2, C3) (RTL_CHECKC3 (RTX, N, C1, C2, C3).rt_rtx)
+
+
+/* Methods of rtx_expr_list. */
+
+inline rtx_expr_list *rtx_expr_list::next () const
+{
+ rtx tmp = XEXP (this, 1);
+ return safe_as_a <rtx_expr_list *> (tmp);
+}
+
+inline rtx rtx_expr_list::element () const
+{
+ return XEXP (this, 0);
+}
+
+/* Methods of rtx_insn_list. */
+
+inline rtx_insn_list *rtx_insn_list::next () const
+{
+ rtx tmp = XEXP (this, 1);
+ return safe_as_a <rtx_insn_list *> (tmp);
+}
+
+inline rtx_insn *rtx_insn_list::insn () const
+{
+ rtx tmp = XEXP (this, 0);
+ return safe_as_a <rtx_insn *> (tmp);
+}
+
+/* Methods of rtx_sequence. */
+
+inline int rtx_sequence::len () const
+{
+ return XVECLEN (this, 0);
+}
+
+inline rtx rtx_sequence::element (int index) const
+{
+ return XVECEXP (this, 0, index);
+}
+
+inline rtx_insn *rtx_sequence::insn (int index) const
+{
+ return as_a <rtx_insn *> (XVECEXP (this, 0, index));
+}
+
+/* ACCESS MACROS for particular fields of insns. */
+
+/* Holds a unique number for each insn.
+ These are not necessarily sequentially increasing. */
+inline int INSN_UID (const_rtx insn)
+{
+ return RTL_INSN_CHAIN_FLAG_CHECK ("INSN_UID",
+ (insn))->u2.insn_uid;
+}
+inline int& INSN_UID (rtx insn)
+{
+ return RTL_INSN_CHAIN_FLAG_CHECK ("INSN_UID",
+ (insn))->u2.insn_uid;
+}
+
+/* Chain insns together in sequence. */
+
+/* For now these are split in two: an rvalue form:
+ PREV_INSN/NEXT_INSN
+ and an lvalue form:
+ SET_NEXT_INSN/SET_PREV_INSN. */
+
+inline rtx_insn *PREV_INSN (const rtx_insn *insn)
+{
+ rtx prev = XEXP (insn, 0);
+ return safe_as_a <rtx_insn *> (prev);
+}
+
+inline rtx& SET_PREV_INSN (rtx_insn *insn)
+{
+ return XEXP (insn, 0);
+}
+
+inline rtx_insn *NEXT_INSN (const rtx_insn *insn)
+{
+ rtx next = XEXP (insn, 1);
+ return safe_as_a <rtx_insn *> (next);
+}
+
+inline rtx& SET_NEXT_INSN (rtx_insn *insn)
+{
+ return XEXP (insn, 1);
+}
+
+inline basic_block BLOCK_FOR_INSN (const_rtx insn)
+{
+ return XBBDEF (insn, 2);
+}
+
+inline basic_block& BLOCK_FOR_INSN (rtx insn)
+{
+ return XBBDEF (insn, 2);
+}
+
+inline void set_block_for_insn (rtx_insn *insn, basic_block bb)
+{
+ BLOCK_FOR_INSN (insn) = bb;
+}
+
+/* The body of an insn. */
+inline rtx PATTERN (const_rtx insn)
+{
+ return XEXP (insn, 3);
+}
+
+inline rtx& PATTERN (rtx insn)
+{
+ return XEXP (insn, 3);
+}
+
+inline unsigned int INSN_LOCATION (const rtx_insn *insn)
+{
+ return XUINT (insn, 4);
+}
+
+inline unsigned int& INSN_LOCATION (rtx_insn *insn)
+{
+ return XUINT (insn, 4);
+}
+
+inline bool INSN_HAS_LOCATION (const rtx_insn *insn)
+{
+ return LOCATION_LOCUS (INSN_LOCATION (insn)) != UNKNOWN_LOCATION;
+}
+
+/* LOCATION of an RTX if relevant. */
+#define RTL_LOCATION(X) (INSN_P (X) ? \
+ INSN_LOCATION (as_a <rtx_insn *> (X)) \
+ : UNKNOWN_LOCATION)
+
+/* Code number of instruction, from when it was recognized.
+ -1 means this instruction has not been recognized yet. */
+#define INSN_CODE(INSN) XINT (INSN, 5)
+
+inline rtvec rtx_jump_table_data::get_labels () const
+{
+ rtx pat = PATTERN (this);
+ if (GET_CODE (pat) == ADDR_VEC)
+ return XVEC (pat, 0);
+ else
+ return XVEC (pat, 1); /* presumably an ADDR_DIFF_VEC */
+}
+
+/* Return the mode of the data in the table, which is always a scalar
+ integer. */
+
+inline scalar_int_mode
+rtx_jump_table_data::get_data_mode () const
+{
+ return as_a <scalar_int_mode> (GET_MODE (PATTERN (this)));
+}
+
+/* If LABEL is followed by a jump table, return the table, otherwise
+ return null. */
+
+inline rtx_jump_table_data *
+jump_table_for_label (const rtx_code_label *label)
+{
+ return safe_dyn_cast <rtx_jump_table_data *> (NEXT_INSN (label));
+}
+
+#define RTX_FRAME_RELATED_P(RTX) \
+ (RTL_FLAG_CHECK6 ("RTX_FRAME_RELATED_P", (RTX), DEBUG_INSN, INSN, \
+ CALL_INSN, JUMP_INSN, BARRIER, SET)->frame_related)
+
+/* 1 if JUMP RTX is a crossing jump. */
+#define CROSSING_JUMP_P(RTX) \
+ (RTL_FLAG_CHECK1 ("CROSSING_JUMP_P", (RTX), JUMP_INSN)->jump)
+
+/* 1 if RTX is a call to a const function. Built from ECF_CONST and
+ TREE_READONLY. */
+#define RTL_CONST_CALL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("RTL_CONST_CALL_P", (RTX), CALL_INSN)->unchanging)
+
+/* 1 if RTX is a call to a pure function. Built from ECF_PURE and
+ DECL_PURE_P. */
+#define RTL_PURE_CALL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("RTL_PURE_CALL_P", (RTX), CALL_INSN)->return_val)
+
+/* 1 if RTX is a call to a const or pure function. */
+#define RTL_CONST_OR_PURE_CALL_P(RTX) \
+ (RTL_CONST_CALL_P (RTX) || RTL_PURE_CALL_P (RTX))
+
+/* 1 if RTX is a call to a looping const or pure function. Built from
+ ECF_LOOPING_CONST_OR_PURE and DECL_LOOPING_CONST_OR_PURE_P. */
+#define RTL_LOOPING_CONST_OR_PURE_CALL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("CONST_OR_PURE_CALL_P", (RTX), CALL_INSN)->call)
+
+/* 1 if RTX is a call_insn for a sibling call. */
+#define SIBLING_CALL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("SIBLING_CALL_P", (RTX), CALL_INSN)->jump)
+
+/* 1 if RTX is a jump_insn, call_insn, or insn that is an annulling branch. */
+#define INSN_ANNULLED_BRANCH_P(RTX) \
+ (RTL_FLAG_CHECK1 ("INSN_ANNULLED_BRANCH_P", (RTX), JUMP_INSN)->unchanging)
+
+/* 1 if RTX is an insn in a delay slot and is from the target of the branch.
+ If the branch insn has INSN_ANNULLED_BRANCH_P set, this insn should only be
+ executed if the branch is taken. For annulled branches with this bit
+ clear, the insn should be executed only if the branch is not taken. */
+#define INSN_FROM_TARGET_P(RTX) \
+ (RTL_FLAG_CHECK3 ("INSN_FROM_TARGET_P", (RTX), INSN, JUMP_INSN, \
+ CALL_INSN)->in_struct)
+
+/* In an ADDR_DIFF_VEC, the flags for RTX for use by branch shortening.
+ See the comments for ADDR_DIFF_VEC in rtl.def. */
+#define ADDR_DIFF_VEC_FLAGS(RTX) X0ADVFLAGS (RTX, 4)
+
+/* In a VALUE, the value cselib has assigned to RTX.
+ This is a "struct cselib_val", see cselib.h. */
+#define CSELIB_VAL_PTR(RTX) X0CSELIB (RTX, 0)
+
+/* Holds a list of notes on what this insn does to various REGs.
+ It is a chain of EXPR_LIST rtx's, where the second operand is the
+ chain pointer and the first operand is the REG being described.
+ The mode field of the EXPR_LIST contains not a real machine mode
+ but a value from enum reg_note. */
+#define REG_NOTES(INSN) XEXP(INSN, 6)
+
+/* In an ENTRY_VALUE this is the DECL_INCOMING_RTL of the argument in
+ question. */
+#define ENTRY_VALUE_EXP(RTX) (RTL_CHECKC1 (RTX, 0, ENTRY_VALUE).rt_rtx)
+
+enum reg_note
+{
+#define DEF_REG_NOTE(NAME) NAME,
+#include "reg-notes.def"
+#undef DEF_REG_NOTE
+ REG_NOTE_MAX
+};
+
+/* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */
+#define REG_NOTE_KIND(LINK) ((enum reg_note) GET_MODE (LINK))
+#define PUT_REG_NOTE_KIND(LINK, KIND) \
+ PUT_MODE_RAW (LINK, (machine_mode) (KIND))
+
+/* Names for REG_NOTE's in EXPR_LIST insn's. */
+
+extern const char * const reg_note_name[];
+#define GET_REG_NOTE_NAME(MODE) (reg_note_name[(int) (MODE)])
+
+/* This field is only present on CALL_INSNs. It holds a chain of EXPR_LIST of
+ USE, CLOBBER and SET expressions.
+ USE expressions list the registers filled with arguments that
+ are passed to the function.
+ CLOBBER expressions document the registers explicitly clobbered
+ by this CALL_INSN.
+ SET expressions say that the return value of the call (the SET_DEST)
+ is equivalent to a value available before the call (the SET_SRC).
+ This kind of SET is used when the return value is predictable in
+ advance. It is purely an optimisation hint; unlike USEs and CLOBBERs,
+ it does not affect register liveness.
+
+ Pseudo registers cannot be mentioned in this list. */
+#define CALL_INSN_FUNCTION_USAGE(INSN) XEXP(INSN, 7)
+
+/* The label-number of a code-label. The assembler label
+ is made from `L' and the label-number printed in decimal.
+ Label numbers are unique in a compilation. */
+#define CODE_LABEL_NUMBER(INSN) XINT (INSN, 5)
+
+/* In a NOTE that is a line number, this is a string for the file name that the
+ line is in. We use the same field to record block numbers temporarily in
+ NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes. (We avoid lots of casts
+ between ints and pointers if we use a different macro for the block number.)
+ */
+
+/* Opaque data. */
+#define NOTE_DATA(INSN) RTL_CHECKC1 (INSN, 3, NOTE)
+#define NOTE_DELETED_LABEL_NAME(INSN) XCSTR (INSN, 3, NOTE)
+#define SET_INSN_DELETED(INSN) set_insn_deleted (INSN);
+#define NOTE_BLOCK(INSN) XCTREE (INSN, 3, NOTE)
+#define NOTE_EH_HANDLER(INSN) XCINT (INSN, 3, NOTE)
+#define NOTE_BASIC_BLOCK(INSN) XCBBDEF (INSN, 3, NOTE)
+#define NOTE_VAR_LOCATION(INSN) XCEXP (INSN, 3, NOTE)
+#define NOTE_MARKER_LOCATION(INSN) XCUINT (INSN, 3, NOTE)
+#define NOTE_CFI(INSN) XCCFI (INSN, 3, NOTE)
+#define NOTE_LABEL_NUMBER(INSN) XCINT (INSN, 3, NOTE)
+
+/* In a NOTE that is a line number, this is the line number.
+ Other kinds of NOTEs are identified by negative numbers here. */
+#define NOTE_KIND(INSN) XCINT (INSN, 4, NOTE)
+
+/* Nonzero if INSN is a note marking the beginning of a basic block. */
+#define NOTE_INSN_BASIC_BLOCK_P(INSN) \
+ (NOTE_P (INSN) && NOTE_KIND (INSN) == NOTE_INSN_BASIC_BLOCK)
+
+/* Nonzero if INSN is a debug nonbind marker note,
+ for which NOTE_MARKER_LOCATION can be used. */
+#define NOTE_MARKER_P(INSN) \
+ (NOTE_P (INSN) && \
+ (NOTE_KIND (INSN) == NOTE_INSN_BEGIN_STMT \
+ || NOTE_KIND (INSN) == NOTE_INSN_INLINE_ENTRY))
+
+/* Variable declaration and the location of a variable. */
+#define PAT_VAR_LOCATION_DECL(PAT) (XCTREE ((PAT), 0, VAR_LOCATION))
+#define PAT_VAR_LOCATION_LOC(PAT) (XCEXP ((PAT), 1, VAR_LOCATION))
+
+/* Initialization status of the variable in the location. Status
+ can be unknown, uninitialized or initialized. See enumeration
+ type below. */
+#define PAT_VAR_LOCATION_STATUS(PAT) \
+ (RTL_FLAG_CHECK1 ("PAT_VAR_LOCATION_STATUS", PAT, VAR_LOCATION) \
+ ->u2.var_location_status)
+
+/* Accessors for a NOTE_INSN_VAR_LOCATION. */
+#define NOTE_VAR_LOCATION_DECL(NOTE) \
+ PAT_VAR_LOCATION_DECL (NOTE_VAR_LOCATION (NOTE))
+#define NOTE_VAR_LOCATION_LOC(NOTE) \
+ PAT_VAR_LOCATION_LOC (NOTE_VAR_LOCATION (NOTE))
+#define NOTE_VAR_LOCATION_STATUS(NOTE) \
+ PAT_VAR_LOCATION_STATUS (NOTE_VAR_LOCATION (NOTE))
+
+/* Evaluate to TRUE if INSN is a debug insn that denotes a variable
+ location/value tracking annotation. */
+#define DEBUG_BIND_INSN_P(INSN) \
+ (DEBUG_INSN_P (INSN) \
+ && (GET_CODE (PATTERN (INSN)) \
+ == VAR_LOCATION))
+/* Evaluate to TRUE if INSN is a debug insn that denotes a program
+ source location marker. */
+#define DEBUG_MARKER_INSN_P(INSN) \
+ (DEBUG_INSN_P (INSN) \
+ && (GET_CODE (PATTERN (INSN)) \
+ != VAR_LOCATION))
+/* Evaluate to the marker kind. */
+#define INSN_DEBUG_MARKER_KIND(INSN) \
+ (GET_CODE (PATTERN (INSN)) == DEBUG_MARKER \
+ ? (GET_MODE (PATTERN (INSN)) == VOIDmode \
+ ? NOTE_INSN_BEGIN_STMT \
+ : GET_MODE (PATTERN (INSN)) == BLKmode \
+ ? NOTE_INSN_INLINE_ENTRY \
+ : (enum insn_note)-1) \
+ : (enum insn_note)-1)
+/* Create patterns for debug markers. These and the above abstract
+ the representation, so that it's easier to get rid of the abuse of
+ the mode to hold the marker kind. Other marker types are
+ envisioned, so a single bit flag won't do; maybe separate RTL codes
+ wouldn't be a problem. */
+#define GEN_RTX_DEBUG_MARKER_BEGIN_STMT_PAT() \
+ gen_rtx_DEBUG_MARKER (VOIDmode)
+#define GEN_RTX_DEBUG_MARKER_INLINE_ENTRY_PAT() \
+ gen_rtx_DEBUG_MARKER (BLKmode)
+
+/* The VAR_LOCATION rtx in a DEBUG_INSN. */
+#define INSN_VAR_LOCATION(INSN) \
+ (RTL_FLAG_CHECK1 ("INSN_VAR_LOCATION", PATTERN (INSN), VAR_LOCATION))
+/* A pointer to the VAR_LOCATION rtx in a DEBUG_INSN. */
+#define INSN_VAR_LOCATION_PTR(INSN) \
+ (&PATTERN (INSN))
+
+/* Accessors for a tree-expanded var location debug insn. */
+#define INSN_VAR_LOCATION_DECL(INSN) \
+ PAT_VAR_LOCATION_DECL (INSN_VAR_LOCATION (INSN))
+#define INSN_VAR_LOCATION_LOC(INSN) \
+ PAT_VAR_LOCATION_LOC (INSN_VAR_LOCATION (INSN))
+#define INSN_VAR_LOCATION_STATUS(INSN) \
+ PAT_VAR_LOCATION_STATUS (INSN_VAR_LOCATION (INSN))
+
+/* Expand to the RTL that denotes an unknown variable location in a
+ DEBUG_INSN. */
+#define gen_rtx_UNKNOWN_VAR_LOC() (gen_rtx_CLOBBER (VOIDmode, const0_rtx))
+
+/* Determine whether X is such an unknown location. */
+#define VAR_LOC_UNKNOWN_P(X) \
+ (GET_CODE (X) == CLOBBER && XEXP ((X), 0) == const0_rtx)
+
+/* 1 if RTX is emitted after a call, but it should take effect before
+ the call returns. */
+#define NOTE_DURING_CALL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("NOTE_VAR_LOCATION_DURING_CALL_P", (RTX), NOTE)->call)
+
+/* DEBUG_EXPR_DECL corresponding to a DEBUG_EXPR RTX. */
+#define DEBUG_EXPR_TREE_DECL(RTX) XCTREE (RTX, 0, DEBUG_EXPR)
+
+/* VAR_DECL/PARM_DECL DEBUG_IMPLICIT_PTR takes address of. */
+#define DEBUG_IMPLICIT_PTR_DECL(RTX) XCTREE (RTX, 0, DEBUG_IMPLICIT_PTR)
+
+/* PARM_DECL DEBUG_PARAMETER_REF references. */
+#define DEBUG_PARAMETER_REF_DECL(RTX) XCTREE (RTX, 0, DEBUG_PARAMETER_REF)
+
+/* Codes that appear in the NOTE_KIND field for kinds of notes
+ that are not line numbers. These codes are all negative.
+
+ Notice that we do not try to use zero here for any of
+ the special note codes because sometimes the source line
+ actually can be zero! This happens (for example) when we
+ are generating code for the per-translation-unit constructor
+ and destructor routines for some C++ translation unit. */
+
+enum insn_note
+{
+#define DEF_INSN_NOTE(NAME) NAME,
+#include "insn-notes.def"
+#undef DEF_INSN_NOTE
+
+ NOTE_INSN_MAX
+};
+
+/* Names for NOTE insn's other than line numbers. */
+
+extern const char * const note_insn_name[NOTE_INSN_MAX];
+#define GET_NOTE_INSN_NAME(NOTE_CODE) \
+ (note_insn_name[(NOTE_CODE)])
+
+/* The name of a label, in case it corresponds to an explicit label
+ in the input source code. */
+#define LABEL_NAME(RTX) XCSTR (RTX, 6, CODE_LABEL)
+
+/* In jump.cc, each label contains a count of the number
+ of LABEL_REFs that point at it, so unused labels can be deleted. */
+#define LABEL_NUSES(RTX) XCINT (RTX, 4, CODE_LABEL)
+
+/* Labels carry a two-bit field composed of the ->jump and ->call
+ bits. This field indicates whether the label is an alternate
+ entry point, and if so, what kind. */
+enum label_kind
+{
+ LABEL_NORMAL = 0, /* ordinary label */
+ LABEL_STATIC_ENTRY, /* alternate entry point, not exported */
+ LABEL_GLOBAL_ENTRY, /* alternate entry point, exported */
+ LABEL_WEAK_ENTRY /* alternate entry point, exported as weak symbol */
+};
+
+#if defined ENABLE_RTL_FLAG_CHECKING && (GCC_VERSION > 2007)
+
+/* Retrieve the kind of LABEL. */
+#define LABEL_KIND(LABEL) __extension__ \
+({ __typeof (LABEL) const _label = (LABEL); \
+ if (! LABEL_P (_label)) \
+ rtl_check_failed_flag ("LABEL_KIND", _label, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ (enum label_kind) ((_label->jump << 1) | _label->call); })
+
+/* Set the kind of LABEL. */
+#define SET_LABEL_KIND(LABEL, KIND) do { \
+ __typeof (LABEL) const _label = (LABEL); \
+ const unsigned int _kind = (KIND); \
+ if (! LABEL_P (_label)) \
+ rtl_check_failed_flag ("SET_LABEL_KIND", _label, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ _label->jump = ((_kind >> 1) & 1); \
+ _label->call = (_kind & 1); \
+} while (0)
+
+#else
+
+/* Retrieve the kind of LABEL. */
+#define LABEL_KIND(LABEL) \
+ ((enum label_kind) (((LABEL)->jump << 1) | (LABEL)->call))
+
+/* Set the kind of LABEL. */
+#define SET_LABEL_KIND(LABEL, KIND) do { \
+ rtx const _label = (LABEL); \
+ const unsigned int _kind = (KIND); \
+ _label->jump = ((_kind >> 1) & 1); \
+ _label->call = (_kind & 1); \
+} while (0)
+
+#endif /* rtl flag checking */
+
+#define LABEL_ALT_ENTRY_P(LABEL) (LABEL_KIND (LABEL) != LABEL_NORMAL)
+
+/* In jump.cc, each JUMP_INSN can point to a label that it can jump to,
+ so that if the JUMP_INSN is deleted, the label's LABEL_NUSES can
+ be decremented and possibly the label can be deleted. */
+#define JUMP_LABEL(INSN) XCEXP (INSN, 7, JUMP_INSN)
+
+inline rtx_insn *JUMP_LABEL_AS_INSN (const rtx_insn *insn)
+{
+ return safe_as_a <rtx_insn *> (JUMP_LABEL (insn));
+}
+
+/* Methods of rtx_jump_insn. */
+
+inline rtx rtx_jump_insn::jump_label () const
+{
+ return JUMP_LABEL (this);
+}
+
+inline rtx_code_label *rtx_jump_insn::jump_target () const
+{
+ return safe_as_a <rtx_code_label *> (JUMP_LABEL (this));
+}
+
+inline void rtx_jump_insn::set_jump_target (rtx_code_label *target)
+{
+ JUMP_LABEL (this) = target;
+}
+
+/* Once basic blocks are found, each CODE_LABEL starts a chain that
+ goes through all the LABEL_REFs that jump to that label. The chain
+ eventually winds up at the CODE_LABEL: it is circular. */
+#define LABEL_REFS(LABEL) XCEXP (LABEL, 3, CODE_LABEL)
+
+/* Get the label that a LABEL_REF references. */
+inline rtx_insn *
+label_ref_label (const_rtx ref)
+{
+ return as_a<rtx_insn *> (XCEXP (ref, 0, LABEL_REF));
+}
+
+/* Set the label that LABEL_REF ref refers to. */
+
+inline void
+set_label_ref_label (rtx ref, rtx_insn *label)
+{
+ XCEXP (ref, 0, LABEL_REF) = label;
+}
+
+/* For a REG rtx, REGNO extracts the register number. REGNO can only
+ be used on RHS. Use SET_REGNO to change the value. */
+#define REGNO(RTX) (rhs_regno(RTX))
+#define SET_REGNO(RTX, N) (df_ref_change_reg_with_loc (RTX, N))
+
+/* Return the number of consecutive registers in a REG. This is always
+ 1 for pseudo registers and is determined by TARGET_HARD_REGNO_NREGS for
+ hard registers. */
+#define REG_NREGS(RTX) (REG_CHECK (RTX)->nregs)
+
+/* ORIGINAL_REGNO holds the number the register originally had; for a
+ pseudo register turned into a hard reg this will hold the old pseudo
+ register number. */
+#define ORIGINAL_REGNO(RTX) \
+ (RTL_FLAG_CHECK1 ("ORIGINAL_REGNO", (RTX), REG)->u2.original_regno)
+
+/* Force the REGNO macro to only be used on the lhs. */
+inline unsigned int
+rhs_regno (const_rtx x)
+{
+ return REG_CHECK (x)->regno;
+}
+
+/* Return the final register in REG X plus one. */
+inline unsigned int
+END_REGNO (const_rtx x)
+{
+ return REGNO (x) + REG_NREGS (x);
+}
+
+/* Change the REGNO and REG_NREGS of REG X to the specified values,
+ bypassing the df machinery. */
+inline void
+set_regno_raw (rtx x, unsigned int regno, unsigned int nregs)
+{
+ reg_info *reg = REG_CHECK (x);
+ reg->regno = regno;
+ reg->nregs = nregs;
+}
+
+/* 1 if RTX is a reg or parallel that is the current function's return
+ value. */
+#define REG_FUNCTION_VALUE_P(RTX) \
+ (RTL_FLAG_CHECK2 ("REG_FUNCTION_VALUE_P", (RTX), REG, PARALLEL)->return_val)
+
+/* 1 if RTX is a reg that corresponds to a variable declared by the user. */
+#define REG_USERVAR_P(RTX) \
+ (RTL_FLAG_CHECK1 ("REG_USERVAR_P", (RTX), REG)->volatil)
+
+/* 1 if RTX is a reg that holds a pointer value. */
+#define REG_POINTER(RTX) \
+ (RTL_FLAG_CHECK1 ("REG_POINTER", (RTX), REG)->frame_related)
+
+/* 1 if RTX is a mem that holds a pointer value. */
+#define MEM_POINTER(RTX) \
+ (RTL_FLAG_CHECK1 ("MEM_POINTER", (RTX), MEM)->frame_related)
+
+/* 1 if the given register REG corresponds to a hard register. */
+#define HARD_REGISTER_P(REG) (HARD_REGISTER_NUM_P (REGNO (REG)))
+
+/* 1 if the given register number REG_NO corresponds to a hard register. */
+#define HARD_REGISTER_NUM_P(REG_NO) ((REG_NO) < FIRST_PSEUDO_REGISTER)
+
+/* For a CONST_INT rtx, INTVAL extracts the integer. */
+#define INTVAL(RTX) XCWINT (RTX, 0, CONST_INT)
+#define UINTVAL(RTX) ((unsigned HOST_WIDE_INT) INTVAL (RTX))
+
+/* For a CONST_WIDE_INT, CONST_WIDE_INT_NUNITS is the number of
+ elements actually needed to represent the constant.
+ CONST_WIDE_INT_ELT gets one of the elements. 0 is the least
+ significant HOST_WIDE_INT. */
+#define CONST_WIDE_INT_VEC(RTX) HWIVEC_CHECK (RTX, CONST_WIDE_INT)
+#define CONST_WIDE_INT_NUNITS(RTX) CWI_GET_NUM_ELEM (RTX)
+#define CONST_WIDE_INT_ELT(RTX, N) CWI_ELT (RTX, N)
+
+/* For a CONST_POLY_INT, CONST_POLY_INT_COEFFS gives access to the
+ individual coefficients, in the form of a trailing_wide_ints structure. */
+#define CONST_POLY_INT_COEFFS(RTX) \
+ (RTL_FLAG_CHECK1("CONST_POLY_INT_COEFFS", (RTX), \
+ CONST_POLY_INT)->u.cpi.coeffs)
+
+/* For a CONST_DOUBLE:
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ For a VOIDmode, there are two integers CONST_DOUBLE_LOW is the
+ low-order word and ..._HIGH the high-order.
+#endif
+ For a float, there is a REAL_VALUE_TYPE structure, and
+ CONST_DOUBLE_REAL_VALUE(r) is a pointer to it. */
+#define CONST_DOUBLE_LOW(r) XCMWINT (r, 0, CONST_DOUBLE, VOIDmode)
+#define CONST_DOUBLE_HIGH(r) XCMWINT (r, 1, CONST_DOUBLE, VOIDmode)
+#define CONST_DOUBLE_REAL_VALUE(r) \
+ ((const struct real_value *) XCNMPRV (r, CONST_DOUBLE, VOIDmode))
+
+#define CONST_FIXED_VALUE(r) \
+ ((const struct fixed_value *) XCNMPFV (r, CONST_FIXED, VOIDmode))
+#define CONST_FIXED_VALUE_HIGH(r) \
+ ((HOST_WIDE_INT) (CONST_FIXED_VALUE (r)->data.high))
+#define CONST_FIXED_VALUE_LOW(r) \
+ ((HOST_WIDE_INT) (CONST_FIXED_VALUE (r)->data.low))
+
+/* For a CONST_VECTOR, return element #n. */
+#define CONST_VECTOR_ELT(RTX, N) const_vector_elt (RTX, N)
+
+/* See rtl.texi for a description of these macros. */
+#define CONST_VECTOR_NPATTERNS(RTX) \
+ (RTL_FLAG_CHECK1 ("CONST_VECTOR_NPATTERNS", (RTX), CONST_VECTOR) \
+ ->u2.const_vector.npatterns)
+
+#define CONST_VECTOR_NELTS_PER_PATTERN(RTX) \
+ (RTL_FLAG_CHECK1 ("CONST_VECTOR_NELTS_PER_PATTERN", (RTX), CONST_VECTOR) \
+ ->u2.const_vector.nelts_per_pattern)
+
+#define CONST_VECTOR_DUPLICATE_P(RTX) \
+ (CONST_VECTOR_NELTS_PER_PATTERN (RTX) == 1)
+
+#define CONST_VECTOR_STEPPED_P(RTX) \
+ (CONST_VECTOR_NELTS_PER_PATTERN (RTX) == 3)
+
+#define CONST_VECTOR_ENCODED_ELT(RTX, N) XCVECEXP (RTX, 0, N, CONST_VECTOR)
+
+/* Return the number of elements encoded directly in a CONST_VECTOR. */
+
+inline unsigned int
+const_vector_encoded_nelts (const_rtx x)
+{
+ return CONST_VECTOR_NPATTERNS (x) * CONST_VECTOR_NELTS_PER_PATTERN (x);
+}
+
+/* For a CONST_VECTOR, return the number of elements in a vector. */
+#define CONST_VECTOR_NUNITS(RTX) GET_MODE_NUNITS (GET_MODE (RTX))
+
+/* For a SUBREG rtx, SUBREG_REG extracts the value we want a subreg of.
+ SUBREG_BYTE extracts the byte-number. */
+
+#define SUBREG_REG(RTX) XCEXP (RTX, 0, SUBREG)
+#define SUBREG_BYTE(RTX) XCSUBREG (RTX, 1, SUBREG)
+
+/* in rtlanal.cc */
+/* Return the right cost to give to an operation
+ to make the cost of the corresponding register-to-register instruction
+ N times that of a fast register-to-register instruction. */
+#define COSTS_N_INSNS(N) ((N) * 4)
+
+/* Maximum cost of an rtl expression. This value has the special meaning
+ not to use an rtx with this cost under any circumstances. */
+#define MAX_COST INT_MAX
+
+/* Return true if CODE always has VOIDmode. */
+
+inline bool
+always_void_p (enum rtx_code code)
+{
+ return code == SET;
+}
+
+/* A structure to hold all available cost information about an rtl
+ expression. */
+struct full_rtx_costs
+{
+ int speed;
+ int size;
+};
+
+/* Initialize a full_rtx_costs structure C to the maximum cost. */
+inline void
+init_costs_to_max (struct full_rtx_costs *c)
+{
+ c->speed = MAX_COST;
+ c->size = MAX_COST;
+}
+
+/* Initialize a full_rtx_costs structure C to zero cost. */
+inline void
+init_costs_to_zero (struct full_rtx_costs *c)
+{
+ c->speed = 0;
+ c->size = 0;
+}
+
+/* Compare two full_rtx_costs structures A and B, returning true
+ if A < B when optimizing for speed. */
+inline bool
+costs_lt_p (struct full_rtx_costs *a, struct full_rtx_costs *b,
+ bool speed)
+{
+ if (speed)
+ return (a->speed < b->speed
+ || (a->speed == b->speed && a->size < b->size));
+ else
+ return (a->size < b->size
+ || (a->size == b->size && a->speed < b->speed));
+}
+
+/* Increase both members of the full_rtx_costs structure C by the
+ cost of N insns. */
+inline void
+costs_add_n_insns (struct full_rtx_costs *c, int n)
+{
+ c->speed += COSTS_N_INSNS (n);
+ c->size += COSTS_N_INSNS (n);
+}
+
+/* Describes the shape of a subreg:
+
+ inner_mode == the mode of the SUBREG_REG
+ offset == the SUBREG_BYTE
+ outer_mode == the mode of the SUBREG itself. */
+class subreg_shape {
+public:
+ subreg_shape (machine_mode, poly_uint16, machine_mode);
+ bool operator == (const subreg_shape &) const;
+ bool operator != (const subreg_shape &) const;
+ unsigned HOST_WIDE_INT unique_id () const;
+
+ machine_mode inner_mode;
+ poly_uint16 offset;
+ machine_mode outer_mode;
+};
+
+inline
+subreg_shape::subreg_shape (machine_mode inner_mode_in,
+ poly_uint16 offset_in,
+ machine_mode outer_mode_in)
+ : inner_mode (inner_mode_in), offset (offset_in), outer_mode (outer_mode_in)
+{}
+
+inline bool
+subreg_shape::operator == (const subreg_shape &other) const
+{
+ return (inner_mode == other.inner_mode
+ && known_eq (offset, other.offset)
+ && outer_mode == other.outer_mode);
+}
+
+inline bool
+subreg_shape::operator != (const subreg_shape &other) const
+{
+ return !operator == (other);
+}
+
+/* Return an integer that uniquely identifies this shape. Structures
+ like rtx_def assume that a mode can fit in an 8-bit bitfield and no
+ current mode is anywhere near being 65536 bytes in size, so the
+ id comfortably fits in an int. */
+
+inline unsigned HOST_WIDE_INT
+subreg_shape::unique_id () const
+{
+ { STATIC_ASSERT (MAX_MACHINE_MODE <= 256); }
+ { STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 3); }
+ { STATIC_ASSERT (sizeof (offset.coeffs[0]) <= 2); }
+ int res = (int) inner_mode + ((int) outer_mode << 8);
+ for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res += (HOST_WIDE_INT) offset.coeffs[i] << ((1 + i) * 16);
+ return res;
+}
+
+/* Return the shape of a SUBREG rtx. */
+
+inline subreg_shape
+shape_of_subreg (const_rtx x)
+{
+ return subreg_shape (GET_MODE (SUBREG_REG (x)),
+ SUBREG_BYTE (x), GET_MODE (x));
+}
+
+/* Information about an address. This structure is supposed to be able
+ to represent all supported target addresses. Please extend it if it
+ is not yet general enough. */
+struct address_info {
+ /* The mode of the value being addressed, or VOIDmode if this is
+ a load-address operation with no known address mode. */
+ machine_mode mode;
+
+ /* The address space. */
+ addr_space_t as;
+
+ /* True if this is an RTX_AUTOINC address. */
+ bool autoinc_p;
+
+ /* A pointer to the top-level address. */
+ rtx *outer;
+
+ /* A pointer to the inner address, after all address mutations
+ have been stripped from the top-level address. It can be one
+ of the following:
+
+ - A {PRE,POST}_{INC,DEC} of *BASE. SEGMENT, INDEX and DISP are null.
+
+ - A {PRE,POST}_MODIFY of *BASE. In this case either INDEX or DISP
+ points to the step value, depending on whether the step is variable
+ or constant respectively. SEGMENT is null.
+
+ - A plain sum of the form SEGMENT + BASE + INDEX + DISP,
+ with null fields evaluating to 0. */
+ rtx *inner;
+
+ /* Components that make up *INNER. Each one may be null or nonnull.
+ When nonnull, their meanings are as follows:
+
+ - *SEGMENT is the "segment" of memory to which the address refers.
+ This value is entirely target-specific and is only called a "segment"
+ because that's its most typical use. It contains exactly one UNSPEC,
+ pointed to by SEGMENT_TERM. The contents of *SEGMENT do not need
+ reloading.
+
+ - *BASE is a variable expression representing a base address.
+ It contains exactly one REG, SUBREG or MEM, pointed to by BASE_TERM.
+
+ - *INDEX is a variable expression representing an index value.
+ It may be a scaled expression, such as a MULT. It has exactly
+ one REG, SUBREG or MEM, pointed to by INDEX_TERM.
+
+ - *DISP is a constant, possibly mutated. DISP_TERM points to the
+ unmutated RTX_CONST_OBJ. */
+ rtx *segment;
+ rtx *base;
+ rtx *index;
+ rtx *disp;
+
+ rtx *segment_term;
+ rtx *base_term;
+ rtx *index_term;
+ rtx *disp_term;
+
+ /* In a {PRE,POST}_MODIFY address, this points to a second copy
+ of BASE_TERM, otherwise it is null. */
+ rtx *base_term2;
+
+ /* ADDRESS if this structure describes an address operand, MEM if
+ it describes a MEM address. */
+ enum rtx_code addr_outer_code;
+
+ /* If BASE is nonnull, this is the code of the rtx that contains it. */
+ enum rtx_code base_outer_code;
+};
+
+/* This is used to bundle an rtx and a mode together so that the pair
+ can be used with the wi:: routines. If we ever put modes into rtx
+ integer constants, this should go away and then just pass an rtx in. */
+typedef std::pair <rtx, machine_mode> rtx_mode_t;
+
+namespace wi
+{
+ template <>
+ struct int_traits <rtx_mode_t>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ static const bool host_dependent_precision = false;
+ /* This ought to be true, except for the special case that BImode
+ is canonicalized to STORE_FLAG_VALUE, which might be 1. */
+ static const bool is_sign_extended = false;
+ static unsigned int get_precision (const rtx_mode_t &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const rtx_mode_t &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <rtx_mode_t>::get_precision (const rtx_mode_t &x)
+{
+ return GET_MODE_PRECISION (as_a <scalar_mode> (x.second));
+}
+
+inline wi::storage_ref
+wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *,
+ unsigned int precision,
+ const rtx_mode_t &x)
+{
+ gcc_checking_assert (precision == get_precision (x));
+ switch (GET_CODE (x.first))
+ {
+ case CONST_INT:
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ /* Nonzero BImodes are stored as STORE_FLAG_VALUE, which on many
+ targets is 1 rather than -1. */
+ gcc_checking_assert (INTVAL (x.first)
+ == sext_hwi (INTVAL (x.first), precision)
+ || (x.second == BImode && INTVAL (x.first) == 1));
+
+ return wi::storage_ref (&INTVAL (x.first), 1, precision);
+
+ case CONST_WIDE_INT:
+ return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0),
+ CONST_WIDE_INT_NUNITS (x.first), precision);
+
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ case CONST_DOUBLE:
+ return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision);
+#endif
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+namespace wi
+{
+ hwi_with_prec shwi (HOST_WIDE_INT, machine_mode mode);
+ wide_int min_value (machine_mode, signop);
+ wide_int max_value (machine_mode, signop);
+}
+
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, machine_mode mode)
+{
+ return shwi (val, GET_MODE_PRECISION (as_a <scalar_mode> (mode)));
+}
+
+/* Produce the smallest number that is represented in MODE. The precision
+ is taken from MODE and the sign from SGN. */
+inline wide_int
+wi::min_value (machine_mode mode, signop sgn)
+{
+ return min_value (GET_MODE_PRECISION (as_a <scalar_mode> (mode)), sgn);
+}
+
+/* Produce the largest number that is represented in MODE. The precision
+ is taken from MODE and the sign from SGN. */
+inline wide_int
+wi::max_value (machine_mode mode, signop sgn)
+{
+ return max_value (GET_MODE_PRECISION (as_a <scalar_mode> (mode)), sgn);
+}
+
+namespace wi
+{
+ typedef poly_int<NUM_POLY_INT_COEFFS,
+ generic_wide_int <wide_int_ref_storage <false, false> > >
+ rtx_to_poly_wide_ref;
+ rtx_to_poly_wide_ref to_poly_wide (const_rtx, machine_mode);
+}
+
+/* Return the value of a CONST_POLY_INT in its native precision. */
+
+inline wi::rtx_to_poly_wide_ref
+const_poly_int_value (const_rtx x)
+{
+ poly_int<NUM_POLY_INT_COEFFS, WIDE_INT_REF_FOR (wide_int)> res;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res.coeffs[i] = CONST_POLY_INT_COEFFS (x)[i];
+ return res;
+}
+
+/* Return true if X is a scalar integer or a CONST_POLY_INT. The value
+ can then be extracted using wi::to_poly_wide. */
+
+inline bool
+poly_int_rtx_p (const_rtx x)
+{
+ return CONST_SCALAR_INT_P (x) || CONST_POLY_INT_P (x);
+}
+
+/* Access X (which satisfies poly_int_rtx_p) as a poly_wide_int.
+ MODE is the mode of X. */
+
+inline wi::rtx_to_poly_wide_ref
+wi::to_poly_wide (const_rtx x, machine_mode mode)
+{
+ if (CONST_POLY_INT_P (x))
+ return const_poly_int_value (x);
+ return rtx_mode_t (const_cast<rtx> (x), mode);
+}
+
+/* Return the value of X as a poly_int64. */
+
+inline poly_int64
+rtx_to_poly_int64 (const_rtx x)
+{
+ if (CONST_POLY_INT_P (x))
+ {
+ poly_int64 res;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res.coeffs[i] = CONST_POLY_INT_COEFFS (x)[i].to_shwi ();
+ return res;
+ }
+ return INTVAL (x);
+}
+
+/* Return true if arbitrary value X is an integer constant that can
+ be represented as a poly_int64. Store the value in *RES if so,
+ otherwise leave it unmodified. */
+
+inline bool
+poly_int_rtx_p (const_rtx x, poly_int64_pod *res)
+{
+ if (CONST_INT_P (x))
+ {
+ *res = INTVAL (x);
+ return true;
+ }
+ if (CONST_POLY_INT_P (x))
+ {
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ if (!wi::fits_shwi_p (CONST_POLY_INT_COEFFS (x)[i]))
+ return false;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res->coeffs[i] = CONST_POLY_INT_COEFFS (x)[i].to_shwi ();
+ return true;
+ }
+ return false;
+}
+
+extern void init_rtlanal (void);
+extern int rtx_cost (rtx, machine_mode, enum rtx_code, int, bool);
+extern int address_cost (rtx, machine_mode, addr_space_t, bool);
+extern void get_full_rtx_cost (rtx, machine_mode, enum rtx_code, int,
+ struct full_rtx_costs *);
+extern bool native_encode_rtx (machine_mode, rtx, vec<target_unit> &,
+ unsigned int, unsigned int);
+extern rtx native_decode_rtx (machine_mode, const vec<target_unit> &,
+ unsigned int);
+extern rtx native_decode_vector_rtx (machine_mode, const vec<target_unit> &,
+ unsigned int, unsigned int, unsigned int);
+extern poly_uint64 subreg_lsb (const_rtx);
+extern poly_uint64 subreg_size_lsb (poly_uint64, poly_uint64, poly_uint64);
+extern poly_uint64 subreg_size_offset_from_lsb (poly_uint64, poly_uint64,
+ poly_uint64);
+extern bool read_modify_subreg_p (const_rtx);
+
+/* Given a subreg's OUTER_MODE, INNER_MODE, and SUBREG_BYTE, return the
+ bit offset at which the subreg begins (counting from the least significant
+ bit of the operand). */
+
+inline poly_uint64
+subreg_lsb_1 (machine_mode outer_mode, machine_mode inner_mode,
+ poly_uint64 subreg_byte)
+{
+ return subreg_size_lsb (GET_MODE_SIZE (outer_mode),
+ GET_MODE_SIZE (inner_mode), subreg_byte);
+}
+
+/* Return the subreg byte offset for a subreg whose outer mode is
+ OUTER_MODE, whose inner mode is INNER_MODE, and where there are
+ LSB_SHIFT *bits* between the lsb of the outer value and the lsb of
+ the inner value. This is the inverse of subreg_lsb_1 (which converts
+ byte offsets to bit shifts). */
+
+inline poly_uint64
+subreg_offset_from_lsb (machine_mode outer_mode,
+ machine_mode inner_mode,
+ poly_uint64 lsb_shift)
+{
+ return subreg_size_offset_from_lsb (GET_MODE_SIZE (outer_mode),
+ GET_MODE_SIZE (inner_mode), lsb_shift);
+}
+
+extern unsigned int subreg_regno_offset (unsigned int, machine_mode,
+ poly_uint64, machine_mode);
+extern bool subreg_offset_representable_p (unsigned int, machine_mode,
+ poly_uint64, machine_mode);
+extern unsigned int subreg_regno (const_rtx);
+extern int simplify_subreg_regno (unsigned int, machine_mode,
+ poly_uint64, machine_mode);
+extern int lowpart_subreg_regno (unsigned int, machine_mode,
+ machine_mode);
+extern unsigned int subreg_nregs (const_rtx);
+extern unsigned int subreg_nregs_with_regno (unsigned int, const_rtx);
+extern unsigned HOST_WIDE_INT nonzero_bits (const_rtx, machine_mode);
+extern unsigned int num_sign_bit_copies (const_rtx, machine_mode);
+extern bool constant_pool_constant_p (rtx);
+extern bool truncated_to_mode (machine_mode, const_rtx);
+extern int low_bitmask_len (machine_mode, unsigned HOST_WIDE_INT);
+extern void split_double (rtx, rtx *, rtx *);
+extern rtx *strip_address_mutations (rtx *, enum rtx_code * = 0);
+extern void decompose_address (struct address_info *, rtx *,
+ machine_mode, addr_space_t, enum rtx_code);
+extern void decompose_lea_address (struct address_info *, rtx *);
+extern void decompose_mem_address (struct address_info *, rtx);
+extern void update_address (struct address_info *);
+extern HOST_WIDE_INT get_index_scale (const struct address_info *);
+extern enum rtx_code get_index_code (const struct address_info *);
+
+/* 1 if RTX is a subreg containing a reg that is already known to be
+ sign- or zero-extended from the mode of the subreg to the mode of
+ the reg. SUBREG_PROMOTED_UNSIGNED_P gives the signedness of the
+ extension.
+
+ When used as a LHS, is means that this extension must be done
+ when assigning to SUBREG_REG. */
+
+#define SUBREG_PROMOTED_VAR_P(RTX) \
+ (RTL_FLAG_CHECK1 ("SUBREG_PROMOTED", (RTX), SUBREG)->in_struct)
+
+/* Valid for subregs which are SUBREG_PROMOTED_VAR_P(). In that case
+ this gives the necessary extensions:
+ 0 - signed (SPR_SIGNED)
+ 1 - normal unsigned (SPR_UNSIGNED)
+ 2 - value is both sign and unsign extended for mode
+ (SPR_SIGNED_AND_UNSIGNED).
+ -1 - pointer unsigned, which most often can be handled like unsigned
+ extension, except for generating instructions where we need to
+ emit special code (ptr_extend insns) on some architectures
+ (SPR_POINTER). */
+
+const int SRP_POINTER = -1;
+const int SRP_SIGNED = 0;
+const int SRP_UNSIGNED = 1;
+const int SRP_SIGNED_AND_UNSIGNED = 2;
+
+/* Sets promoted mode for SUBREG_PROMOTED_VAR_P(). */
+#define SUBREG_PROMOTED_SET(RTX, VAL) \
+do { \
+ rtx const _rtx = RTL_FLAG_CHECK1 ("SUBREG_PROMOTED_SET", \
+ (RTX), SUBREG); \
+ switch (VAL) \
+ { \
+ case SRP_POINTER: \
+ _rtx->volatil = 0; \
+ _rtx->unchanging = 0; \
+ break; \
+ case SRP_SIGNED: \
+ _rtx->volatil = 0; \
+ _rtx->unchanging = 1; \
+ break; \
+ case SRP_UNSIGNED: \
+ _rtx->volatil = 1; \
+ _rtx->unchanging = 0; \
+ break; \
+ case SRP_SIGNED_AND_UNSIGNED: \
+ _rtx->volatil = 1; \
+ _rtx->unchanging = 1; \
+ break; \
+ } \
+} while (0)
+
+/* Gets the value stored in promoted mode for SUBREG_PROMOTED_VAR_P(),
+ including SRP_SIGNED_AND_UNSIGNED if promoted for
+ both signed and unsigned. */
+#define SUBREG_PROMOTED_GET(RTX) \
+ (2 * (RTL_FLAG_CHECK1 ("SUBREG_PROMOTED_GET", (RTX), SUBREG)->volatil)\
+ + (RTX)->unchanging - 1)
+
+/* Returns sign of promoted mode for SUBREG_PROMOTED_VAR_P(). */
+#define SUBREG_PROMOTED_SIGN(RTX) \
+ ((RTL_FLAG_CHECK1 ("SUBREG_PROMOTED_SIGN", (RTX), SUBREG)->volatil) ? 1\
+ : (RTX)->unchanging - 1)
+
+/* Predicate to check if RTX of SUBREG_PROMOTED_VAR_P() is promoted
+ for SIGNED type. */
+#define SUBREG_PROMOTED_SIGNED_P(RTX) \
+ (RTL_FLAG_CHECK1 ("SUBREG_PROMOTED_SIGNED_P", (RTX), SUBREG)->unchanging)
+
+/* Predicate to check if RTX of SUBREG_PROMOTED_VAR_P() is promoted
+ for UNSIGNED type. */
+#define SUBREG_PROMOTED_UNSIGNED_P(RTX) \
+ (RTL_FLAG_CHECK1 ("SUBREG_PROMOTED_UNSIGNED_P", (RTX), SUBREG)->volatil)
+
+/* Checks if RTX of SUBREG_PROMOTED_VAR_P() is promoted for given SIGN. */
+#define SUBREG_CHECK_PROMOTED_SIGN(RTX, SIGN) \
+((SIGN) == SRP_POINTER ? SUBREG_PROMOTED_GET (RTX) == SRP_POINTER \
+ : (SIGN) == SRP_SIGNED ? SUBREG_PROMOTED_SIGNED_P (RTX) \
+ : SUBREG_PROMOTED_UNSIGNED_P (RTX))
+
+/* True if the REG is the static chain register for some CALL_INSN. */
+#define STATIC_CHAIN_REG_P(RTX) \
+ (RTL_FLAG_CHECK1 ("STATIC_CHAIN_REG_P", (RTX), REG)->jump)
+
+/* True if the subreg was generated by LRA for reload insns. Such
+ subregs are valid only during LRA. */
+#define LRA_SUBREG_P(RTX) \
+ (RTL_FLAG_CHECK1 ("LRA_SUBREG_P", (RTX), SUBREG)->jump)
+
+/* Access various components of an ASM_OPERANDS rtx. */
+
+#define ASM_OPERANDS_TEMPLATE(RTX) XCSTR (RTX, 0, ASM_OPERANDS)
+#define ASM_OPERANDS_OUTPUT_CONSTRAINT(RTX) XCSTR (RTX, 1, ASM_OPERANDS)
+#define ASM_OPERANDS_OUTPUT_IDX(RTX) XCINT (RTX, 2, ASM_OPERANDS)
+#define ASM_OPERANDS_INPUT_VEC(RTX) XCVEC (RTX, 3, ASM_OPERANDS)
+#define ASM_OPERANDS_INPUT_CONSTRAINT_VEC(RTX) XCVEC (RTX, 4, ASM_OPERANDS)
+#define ASM_OPERANDS_INPUT(RTX, N) XCVECEXP (RTX, 3, N, ASM_OPERANDS)
+#define ASM_OPERANDS_INPUT_LENGTH(RTX) XCVECLEN (RTX, 3, ASM_OPERANDS)
+#define ASM_OPERANDS_INPUT_CONSTRAINT_EXP(RTX, N) \
+ XCVECEXP (RTX, 4, N, ASM_OPERANDS)
+#define ASM_OPERANDS_INPUT_CONSTRAINT(RTX, N) \
+ XSTR (XCVECEXP (RTX, 4, N, ASM_OPERANDS), 0)
+#define ASM_OPERANDS_INPUT_MODE(RTX, N) \
+ GET_MODE (XCVECEXP (RTX, 4, N, ASM_OPERANDS))
+#define ASM_OPERANDS_LABEL_VEC(RTX) XCVEC (RTX, 5, ASM_OPERANDS)
+#define ASM_OPERANDS_LABEL_LENGTH(RTX) XCVECLEN (RTX, 5, ASM_OPERANDS)
+#define ASM_OPERANDS_LABEL(RTX, N) XCVECEXP (RTX, 5, N, ASM_OPERANDS)
+#define ASM_OPERANDS_SOURCE_LOCATION(RTX) XCUINT (RTX, 6, ASM_OPERANDS)
+#define ASM_INPUT_SOURCE_LOCATION(RTX) XCUINT (RTX, 1, ASM_INPUT)
+
+/* 1 if RTX is a mem that is statically allocated in read-only memory. */
+#define MEM_READONLY_P(RTX) \
+ (RTL_FLAG_CHECK1 ("MEM_READONLY_P", (RTX), MEM)->unchanging)
+
+/* 1 if RTX is a mem and we should keep the alias set for this mem
+ unchanged when we access a component. Set to 1, or example, when we
+ are already in a non-addressable component of an aggregate. */
+#define MEM_KEEP_ALIAS_SET_P(RTX) \
+ (RTL_FLAG_CHECK1 ("MEM_KEEP_ALIAS_SET_P", (RTX), MEM)->jump)
+
+/* 1 if RTX is a mem or asm_operand for a volatile reference. */
+#define MEM_VOLATILE_P(RTX) \
+ (RTL_FLAG_CHECK3 ("MEM_VOLATILE_P", (RTX), MEM, ASM_OPERANDS, \
+ ASM_INPUT)->volatil)
+
+/* 1 if RTX is a mem that cannot trap. */
+#define MEM_NOTRAP_P(RTX) \
+ (RTL_FLAG_CHECK1 ("MEM_NOTRAP_P", (RTX), MEM)->call)
+
+/* The memory attribute block. We provide access macros for each value
+ in the block and provide defaults if none specified. */
+#define MEM_ATTRS(RTX) X0MEMATTR (RTX, 1)
+
+/* The register attribute block. We provide access macros for each value
+ in the block and provide defaults if none specified. */
+#define REG_ATTRS(RTX) (REG_CHECK (RTX)->attrs)
+
+#ifndef GENERATOR_FILE
+/* For a MEM rtx, the alias set. If 0, this MEM is not in any alias
+ set, and may alias anything. Otherwise, the MEM can only alias
+ MEMs in a conflicting alias set. This value is set in a
+ language-dependent manner in the front-end, and should not be
+ altered in the back-end. These set numbers are tested with
+ alias_sets_conflict_p. */
+#define MEM_ALIAS_SET(RTX) (get_mem_attrs (RTX)->alias)
+
+/* For a MEM rtx, the decl it is known to refer to, if it is known to
+ refer to part of a DECL. It may also be a COMPONENT_REF. */
+#define MEM_EXPR(RTX) (get_mem_attrs (RTX)->expr)
+
+/* For a MEM rtx, true if its MEM_OFFSET is known. */
+#define MEM_OFFSET_KNOWN_P(RTX) (get_mem_attrs (RTX)->offset_known_p)
+
+/* For a MEM rtx, the offset from the start of MEM_EXPR. */
+#define MEM_OFFSET(RTX) (get_mem_attrs (RTX)->offset)
+
+/* For a MEM rtx, the address space. */
+#define MEM_ADDR_SPACE(RTX) (get_mem_attrs (RTX)->addrspace)
+
+/* For a MEM rtx, true if its MEM_SIZE is known. */
+#define MEM_SIZE_KNOWN_P(RTX) (get_mem_attrs (RTX)->size_known_p)
+
+/* For a MEM rtx, the size in bytes of the MEM. */
+#define MEM_SIZE(RTX) (get_mem_attrs (RTX)->size)
+
+/* For a MEM rtx, the alignment in bits. We can use the alignment of the
+ mode as a default when STRICT_ALIGNMENT, but not if not. */
+#define MEM_ALIGN(RTX) (get_mem_attrs (RTX)->align)
+#else
+#define MEM_ADDR_SPACE(RTX) ADDR_SPACE_GENERIC
+#endif
+
+/* For a REG rtx, the decl it is known to refer to, if it is known to
+ refer to part of a DECL. */
+#define REG_EXPR(RTX) (REG_ATTRS (RTX) == 0 ? 0 : REG_ATTRS (RTX)->decl)
+
+/* For a REG rtx, the offset from the start of REG_EXPR, if known, as an
+ HOST_WIDE_INT. */
+#define REG_OFFSET(RTX) (REG_ATTRS (RTX) == 0 ? 0 : REG_ATTRS (RTX)->offset)
+
+/* Copy the attributes that apply to memory locations from RHS to LHS. */
+#define MEM_COPY_ATTRIBUTES(LHS, RHS) \
+ (MEM_VOLATILE_P (LHS) = MEM_VOLATILE_P (RHS), \
+ MEM_NOTRAP_P (LHS) = MEM_NOTRAP_P (RHS), \
+ MEM_READONLY_P (LHS) = MEM_READONLY_P (RHS), \
+ MEM_KEEP_ALIAS_SET_P (LHS) = MEM_KEEP_ALIAS_SET_P (RHS), \
+ MEM_POINTER (LHS) = MEM_POINTER (RHS), \
+ MEM_ATTRS (LHS) = MEM_ATTRS (RHS))
+
+/* 1 if RTX is a label_ref for a nonlocal label. */
+/* Likewise in an expr_list for a REG_LABEL_OPERAND or
+ REG_LABEL_TARGET note. */
+#define LABEL_REF_NONLOCAL_P(RTX) \
+ (RTL_FLAG_CHECK1 ("LABEL_REF_NONLOCAL_P", (RTX), LABEL_REF)->volatil)
+
+/* 1 if RTX is a code_label that should always be considered to be needed. */
+#define LABEL_PRESERVE_P(RTX) \
+ (RTL_FLAG_CHECK2 ("LABEL_PRESERVE_P", (RTX), CODE_LABEL, NOTE)->in_struct)
+
+/* During sched, 1 if RTX is an insn that must be scheduled together
+ with the preceding insn. */
+#define SCHED_GROUP_P(RTX) \
+ (RTL_FLAG_CHECK4 ("SCHED_GROUP_P", (RTX), DEBUG_INSN, INSN, \
+ JUMP_INSN, CALL_INSN)->in_struct)
+
+/* For a SET rtx, SET_DEST is the place that is set
+ and SET_SRC is the value it is set to. */
+#define SET_DEST(RTX) XC2EXP (RTX, 0, SET, CLOBBER)
+#define SET_SRC(RTX) XCEXP (RTX, 1, SET)
+#define SET_IS_RETURN_P(RTX) \
+ (RTL_FLAG_CHECK1 ("SET_IS_RETURN_P", (RTX), SET)->jump)
+
+/* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */
+#define TRAP_CONDITION(RTX) XCEXP (RTX, 0, TRAP_IF)
+#define TRAP_CODE(RTX) XCEXP (RTX, 1, TRAP_IF)
+
+/* For a COND_EXEC rtx, COND_EXEC_TEST is the condition to base
+ conditionally executing the code on, COND_EXEC_CODE is the code
+ to execute if the condition is true. */
+#define COND_EXEC_TEST(RTX) XCEXP (RTX, 0, COND_EXEC)
+#define COND_EXEC_CODE(RTX) XCEXP (RTX, 1, COND_EXEC)
+
+/* 1 if RTX is a symbol_ref that addresses this function's rtl
+ constants pool. */
+#define CONSTANT_POOL_ADDRESS_P(RTX) \
+ (RTL_FLAG_CHECK1 ("CONSTANT_POOL_ADDRESS_P", (RTX), SYMBOL_REF)->unchanging)
+
+/* 1 if RTX is a symbol_ref that addresses a value in the file's
+ tree constant pool. This information is private to varasm.cc. */
+#define TREE_CONSTANT_POOL_ADDRESS_P(RTX) \
+ (RTL_FLAG_CHECK1 ("TREE_CONSTANT_POOL_ADDRESS_P", \
+ (RTX), SYMBOL_REF)->frame_related)
+
+/* Used if RTX is a symbol_ref, for machine-specific purposes. */
+#define SYMBOL_REF_FLAG(RTX) \
+ (RTL_FLAG_CHECK1 ("SYMBOL_REF_FLAG", (RTX), SYMBOL_REF)->volatil)
+
+/* 1 if RTX is a symbol_ref that has been the library function in
+ emit_library_call. */
+#define SYMBOL_REF_USED(RTX) \
+ (RTL_FLAG_CHECK1 ("SYMBOL_REF_USED", (RTX), SYMBOL_REF)->used)
+
+/* 1 if RTX is a symbol_ref for a weak symbol. */
+#define SYMBOL_REF_WEAK(RTX) \
+ (RTL_FLAG_CHECK1 ("SYMBOL_REF_WEAK", (RTX), SYMBOL_REF)->return_val)
+
+/* A pointer attached to the SYMBOL_REF; either SYMBOL_REF_DECL or
+ SYMBOL_REF_CONSTANT. */
+#define SYMBOL_REF_DATA(RTX) X0ANY ((RTX), 1)
+
+/* Set RTX's SYMBOL_REF_DECL to DECL. RTX must not be a constant
+ pool symbol. */
+#define SET_SYMBOL_REF_DECL(RTX, DECL) \
+ (gcc_assert (!CONSTANT_POOL_ADDRESS_P (RTX)), X0TREE ((RTX), 1) = (DECL))
+
+/* The tree (decl or constant) associated with the symbol, or null. */
+#define SYMBOL_REF_DECL(RTX) \
+ (CONSTANT_POOL_ADDRESS_P (RTX) ? NULL : X0TREE ((RTX), 1))
+
+/* Set RTX's SYMBOL_REF_CONSTANT to C. RTX must be a constant pool symbol. */
+#define SET_SYMBOL_REF_CONSTANT(RTX, C) \
+ (gcc_assert (CONSTANT_POOL_ADDRESS_P (RTX)), X0CONSTANT ((RTX), 1) = (C))
+
+/* The rtx constant pool entry for a symbol, or null. */
+#define SYMBOL_REF_CONSTANT(RTX) \
+ (CONSTANT_POOL_ADDRESS_P (RTX) ? X0CONSTANT ((RTX), 1) : NULL)
+
+/* A set of flags on a symbol_ref that are, in some respects, redundant with
+ information derivable from the tree decl associated with this symbol.
+ Except that we build a *lot* of SYMBOL_REFs that aren't associated with a
+ decl. In some cases this is a bug. But beyond that, it's nice to cache
+ this information to avoid recomputing it. Finally, this allows space for
+ the target to store more than one bit of information, as with
+ SYMBOL_REF_FLAG. */
+#define SYMBOL_REF_FLAGS(RTX) \
+ (RTL_FLAG_CHECK1 ("SYMBOL_REF_FLAGS", (RTX), SYMBOL_REF) \
+ ->u2.symbol_ref_flags)
+
+/* These flags are common enough to be defined for all targets. They
+ are computed by the default version of targetm.encode_section_info. */
+
+/* Set if this symbol is a function. */
+#define SYMBOL_FLAG_FUNCTION (1 << 0)
+#define SYMBOL_REF_FUNCTION_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_FUNCTION) != 0)
+/* Set if targetm.binds_local_p is true. */
+#define SYMBOL_FLAG_LOCAL (1 << 1)
+#define SYMBOL_REF_LOCAL_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_LOCAL) != 0)
+/* Set if targetm.in_small_data_p is true. */
+#define SYMBOL_FLAG_SMALL (1 << 2)
+#define SYMBOL_REF_SMALL_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_SMALL) != 0)
+/* The three-bit field at [5:3] is true for TLS variables; use
+ SYMBOL_REF_TLS_MODEL to extract the field as an enum tls_model. */
+#define SYMBOL_FLAG_TLS_SHIFT 3
+#define SYMBOL_REF_TLS_MODEL(RTX) \
+ ((enum tls_model) ((SYMBOL_REF_FLAGS (RTX) >> SYMBOL_FLAG_TLS_SHIFT) & 7))
+/* Set if this symbol is not defined in this translation unit. */
+#define SYMBOL_FLAG_EXTERNAL (1 << 6)
+#define SYMBOL_REF_EXTERNAL_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_EXTERNAL) != 0)
+/* Set if this symbol has a block_symbol structure associated with it. */
+#define SYMBOL_FLAG_HAS_BLOCK_INFO (1 << 7)
+#define SYMBOL_REF_HAS_BLOCK_INFO_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_HAS_BLOCK_INFO) != 0)
+/* Set if this symbol is a section anchor. SYMBOL_REF_ANCHOR_P implies
+ SYMBOL_REF_HAS_BLOCK_INFO_P. */
+#define SYMBOL_FLAG_ANCHOR (1 << 8)
+#define SYMBOL_REF_ANCHOR_P(RTX) \
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_ANCHOR) != 0)
+
+/* Subsequent bits are available for the target to use. */
+#define SYMBOL_FLAG_MACH_DEP_SHIFT 9
+#define SYMBOL_FLAG_MACH_DEP (1 << SYMBOL_FLAG_MACH_DEP_SHIFT)
+
+/* If SYMBOL_REF_HAS_BLOCK_INFO_P (RTX), this is the object_block
+ structure to which the symbol belongs, or NULL if it has not been
+ assigned a block. */
+#define SYMBOL_REF_BLOCK(RTX) (BLOCK_SYMBOL_CHECK (RTX)->block)
+
+/* If SYMBOL_REF_HAS_BLOCK_INFO_P (RTX), this is the offset of RTX from
+ the first object in SYMBOL_REF_BLOCK (RTX). The value is negative if
+ RTX has not yet been assigned to a block, or it has not been given an
+ offset within that block. */
+#define SYMBOL_REF_BLOCK_OFFSET(RTX) (BLOCK_SYMBOL_CHECK (RTX)->offset)
+
+/* True if RTX is flagged to be a scheduling barrier. */
+#define PREFETCH_SCHEDULE_BARRIER_P(RTX) \
+ (RTL_FLAG_CHECK1 ("PREFETCH_SCHEDULE_BARRIER_P", (RTX), PREFETCH)->volatil)
+
+/* Indicate whether the machine has any sort of auto increment addressing.
+ If not, we can avoid checking for REG_INC notes. */
+
+#if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) \
+ || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT) \
+ || defined (HAVE_PRE_MODIFY_DISP) || defined (HAVE_POST_MODIFY_DISP) \
+ || defined (HAVE_PRE_MODIFY_REG) || defined (HAVE_POST_MODIFY_REG))
+#define AUTO_INC_DEC 1
+#else
+#define AUTO_INC_DEC 0
+#endif
+
+/* Define a macro to look for REG_INC notes,
+ but save time on machines where they never exist. */
+
+#if AUTO_INC_DEC
+#define FIND_REG_INC_NOTE(INSN, REG) \
+ ((REG) != NULL_RTX && REG_P ((REG)) \
+ ? find_regno_note ((INSN), REG_INC, REGNO (REG)) \
+ : find_reg_note ((INSN), REG_INC, (REG)))
+#else
+#define FIND_REG_INC_NOTE(INSN, REG) 0
+#endif
+
+#ifndef HAVE_PRE_INCREMENT
+#define HAVE_PRE_INCREMENT 0
+#endif
+
+#ifndef HAVE_PRE_DECREMENT
+#define HAVE_PRE_DECREMENT 0
+#endif
+
+#ifndef HAVE_POST_INCREMENT
+#define HAVE_POST_INCREMENT 0
+#endif
+
+#ifndef HAVE_POST_DECREMENT
+#define HAVE_POST_DECREMENT 0
+#endif
+
+#ifndef HAVE_POST_MODIFY_DISP
+#define HAVE_POST_MODIFY_DISP 0
+#endif
+
+#ifndef HAVE_POST_MODIFY_REG
+#define HAVE_POST_MODIFY_REG 0
+#endif
+
+#ifndef HAVE_PRE_MODIFY_DISP
+#define HAVE_PRE_MODIFY_DISP 0
+#endif
+
+#ifndef HAVE_PRE_MODIFY_REG
+#define HAVE_PRE_MODIFY_REG 0
+#endif
+
+
+/* Some architectures do not have complete pre/post increment/decrement
+ instruction sets, or only move some modes efficiently. These macros
+ allow us to tune autoincrement generation. */
+
+#ifndef USE_LOAD_POST_INCREMENT
+#define USE_LOAD_POST_INCREMENT(MODE) HAVE_POST_INCREMENT
+#endif
+
+#ifndef USE_LOAD_POST_DECREMENT
+#define USE_LOAD_POST_DECREMENT(MODE) HAVE_POST_DECREMENT
+#endif
+
+#ifndef USE_LOAD_PRE_INCREMENT
+#define USE_LOAD_PRE_INCREMENT(MODE) HAVE_PRE_INCREMENT
+#endif
+
+#ifndef USE_LOAD_PRE_DECREMENT
+#define USE_LOAD_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT
+#endif
+
+#ifndef USE_STORE_POST_INCREMENT
+#define USE_STORE_POST_INCREMENT(MODE) HAVE_POST_INCREMENT
+#endif
+
+#ifndef USE_STORE_POST_DECREMENT
+#define USE_STORE_POST_DECREMENT(MODE) HAVE_POST_DECREMENT
+#endif
+
+#ifndef USE_STORE_PRE_INCREMENT
+#define USE_STORE_PRE_INCREMENT(MODE) HAVE_PRE_INCREMENT
+#endif
+
+#ifndef USE_STORE_PRE_DECREMENT
+#define USE_STORE_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT
+#endif
+
+/* Nonzero when we are generating CONCATs. */
+extern int generating_concat_p;
+
+/* Nonzero when we are expanding trees to RTL. */
+extern int currently_expanding_to_rtl;
+
+/* Generally useful functions. */
+
+#ifndef GENERATOR_FILE
+/* Return the cost of SET X. SPEED_P is true if optimizing for speed
+ rather than size. */
+
+inline int
+set_rtx_cost (rtx x, bool speed_p)
+{
+ return rtx_cost (x, VOIDmode, INSN, 4, speed_p);
+}
+
+/* Like set_rtx_cost, but return both the speed and size costs in C. */
+
+inline void
+get_full_set_rtx_cost (rtx x, struct full_rtx_costs *c)
+{
+ get_full_rtx_cost (x, VOIDmode, INSN, 4, c);
+}
+
+/* Return the cost of moving X into a register, relative to the cost
+ of a register move. SPEED_P is true if optimizing for speed rather
+ than size. */
+
+inline int
+set_src_cost (rtx x, machine_mode mode, bool speed_p)
+{
+ return rtx_cost (x, mode, SET, 1, speed_p);
+}
+
+/* Like set_src_cost, but return both the speed and size costs in C. */
+
+inline void
+get_full_set_src_cost (rtx x, machine_mode mode, struct full_rtx_costs *c)
+{
+ get_full_rtx_cost (x, mode, SET, 1, c);
+}
+#endif
+
+/* A convenience macro to validate the arguments of a zero_extract
+ expression. It determines whether SIZE lies inclusively within
+ [1, RANGE], POS lies inclusively within between [0, RANGE - 1]
+ and the sum lies inclusively within [1, RANGE]. RANGE must be
+ >= 1, but SIZE and POS may be negative. */
+#define EXTRACT_ARGS_IN_RANGE(SIZE, POS, RANGE) \
+ (IN_RANGE ((POS), 0, (unsigned HOST_WIDE_INT) (RANGE) - 1) \
+ && IN_RANGE ((SIZE), 1, (unsigned HOST_WIDE_INT) (RANGE) \
+ - (unsigned HOST_WIDE_INT)(POS)))
+
+/* In explow.cc */
+extern HOST_WIDE_INT trunc_int_for_mode (HOST_WIDE_INT, machine_mode);
+extern poly_int64 trunc_int_for_mode (poly_int64, machine_mode);
+extern rtx plus_constant (machine_mode, rtx, poly_int64, bool = false);
+extern HOST_WIDE_INT get_stack_check_protect (void);
+
+/* In rtl.cc */
+extern rtx rtx_alloc (RTX_CODE CXX_MEM_STAT_INFO);
+inline rtx
+rtx_init (rtx rt, RTX_CODE code)
+{
+ memset (rt, 0, RTX_HDR_SIZE);
+ PUT_CODE (rt, code);
+ return rt;
+}
+#define rtx_alloca(code) \
+ rtx_init ((rtx) alloca (RTX_CODE_SIZE ((code))), (code))
+extern rtx rtx_alloc_stat_v (RTX_CODE MEM_STAT_DECL, int);
+#define rtx_alloc_v(c, SZ) rtx_alloc_stat_v (c MEM_STAT_INFO, SZ)
+#define const_wide_int_alloc(NWORDS) \
+ rtx_alloc_v (CONST_WIDE_INT, \
+ (sizeof (struct hwivec_def) \
+ + ((NWORDS)-1) * sizeof (HOST_WIDE_INT))) \
+
+extern rtvec rtvec_alloc (size_t);
+extern rtvec shallow_copy_rtvec (rtvec);
+extern bool shared_const_p (const_rtx);
+extern rtx copy_rtx (rtx);
+extern enum rtx_code classify_insn (rtx);
+extern void dump_rtx_statistics (void);
+
+/* In emit-rtl.cc */
+extern rtx copy_rtx_if_shared (rtx);
+
+/* In rtl.cc */
+extern unsigned int rtx_size (const_rtx);
+extern rtx shallow_copy_rtx (const_rtx CXX_MEM_STAT_INFO);
+extern int rtx_equal_p (const_rtx, const_rtx);
+extern bool rtvec_all_equal_p (const_rtvec);
+extern bool rtvec_series_p (rtvec, int);
+
+/* Return true if X is a vector constant with a duplicated element value. */
+
+inline bool
+const_vec_duplicate_p (const_rtx x)
+{
+ return (GET_CODE (x) == CONST_VECTOR
+ && CONST_VECTOR_NPATTERNS (x) == 1
+ && CONST_VECTOR_DUPLICATE_P (x));
+}
+
+/* Return true if X is a vector constant with a duplicated element value.
+ Store the duplicated element in *ELT if so. */
+
+template <typename T>
+inline bool
+const_vec_duplicate_p (T x, T *elt)
+{
+ if (const_vec_duplicate_p (x))
+ {
+ *elt = CONST_VECTOR_ENCODED_ELT (x, 0);
+ return true;
+ }
+ return false;
+}
+
+/* Return true if X is a vector with a duplicated element value, either
+ constant or nonconstant. Store the duplicated element in *ELT if so. */
+
+template <typename T>
+inline bool
+vec_duplicate_p (T x, T *elt)
+{
+ if (GET_CODE (x) == VEC_DUPLICATE
+ && !VECTOR_MODE_P (GET_MODE (XEXP (x, 0))))
+ {
+ *elt = XEXP (x, 0);
+ return true;
+ }
+ return const_vec_duplicate_p (x, elt);
+}
+
+/* If X is a vector constant with a duplicated element value, return that
+ element value, otherwise return X. */
+
+template <typename T>
+inline T
+unwrap_const_vec_duplicate (T x)
+{
+ if (const_vec_duplicate_p (x))
+ x = CONST_VECTOR_ELT (x, 0);
+ return x;
+}
+
+/* In emit-rtl.cc. */
+extern wide_int const_vector_int_elt (const_rtx, unsigned int);
+extern rtx const_vector_elt (const_rtx, unsigned int);
+extern bool const_vec_series_p_1 (const_rtx, rtx *, rtx *);
+
+/* Return true if X is an integer constant vector that contains a linear
+ series of the form:
+
+ { B, B + S, B + 2 * S, B + 3 * S, ... }
+
+ for a nonzero S. Store B and S in *BASE_OUT and *STEP_OUT on sucess. */
+
+inline bool
+const_vec_series_p (const_rtx x, rtx *base_out, rtx *step_out)
+{
+ if (GET_CODE (x) == CONST_VECTOR
+ && CONST_VECTOR_NPATTERNS (x) == 1
+ && !CONST_VECTOR_DUPLICATE_P (x))
+ return const_vec_series_p_1 (x, base_out, step_out);
+ return false;
+}
+
+/* Return true if X is a vector that contains a linear series of the
+ form:
+
+ { B, B + S, B + 2 * S, B + 3 * S, ... }
+
+ where B and S are constant or nonconstant. Store B and S in
+ *BASE_OUT and *STEP_OUT on sucess. */
+
+inline bool
+vec_series_p (const_rtx x, rtx *base_out, rtx *step_out)
+{
+ if (GET_CODE (x) == VEC_SERIES)
+ {
+ *base_out = XEXP (x, 0);
+ *step_out = XEXP (x, 1);
+ return true;
+ }
+ return const_vec_series_p (x, base_out, step_out);
+}
+
+/* Return true if CONST_VECTORs X and Y, which are known to have the same mode,
+ also have the same encoding. This means that they are equal whenever their
+ operands are equal. */
+
+inline bool
+same_vector_encodings_p (const_rtx x, const_rtx y)
+{
+ /* Don't be fussy about the encoding of constant-length vectors,
+ since XVECEXP (X, 0) and XVECEXP (Y, 0) list all the elements anyway. */
+ if (poly_uint64 (CONST_VECTOR_NUNITS (x)).is_constant ())
+ return true;
+
+ return (CONST_VECTOR_NPATTERNS (x) == CONST_VECTOR_NPATTERNS (y)
+ && (CONST_VECTOR_NELTS_PER_PATTERN (x)
+ == CONST_VECTOR_NELTS_PER_PATTERN (y)));
+}
+
+/* Return the unpromoted (outer) mode of SUBREG_PROMOTED_VAR_P subreg X. */
+
+inline scalar_int_mode
+subreg_unpromoted_mode (rtx x)
+{
+ gcc_checking_assert (SUBREG_PROMOTED_VAR_P (x));
+ return as_a <scalar_int_mode> (GET_MODE (x));
+}
+
+/* Return the promoted (inner) mode of SUBREG_PROMOTED_VAR_P subreg X. */
+
+inline scalar_int_mode
+subreg_promoted_mode (rtx x)
+{
+ gcc_checking_assert (SUBREG_PROMOTED_VAR_P (x));
+ return as_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)));
+}
+
+/* In emit-rtl.cc */
+extern rtvec gen_rtvec_v (int, rtx *);
+extern rtvec gen_rtvec_v (int, rtx_insn **);
+extern rtx gen_reg_rtx (machine_mode);
+extern rtx gen_rtx_REG_offset (rtx, machine_mode, unsigned int, poly_int64);
+extern rtx gen_reg_rtx_offset (rtx, machine_mode, int);
+extern rtx gen_reg_rtx_and_attrs (rtx);
+extern rtx_code_label *gen_label_rtx (void);
+extern rtx gen_lowpart_common (machine_mode, rtx);
+
+/* In cse.cc */
+extern rtx gen_lowpart_if_possible (machine_mode, rtx);
+
+/* In emit-rtl.cc */
+extern rtx gen_highpart (machine_mode, rtx);
+extern rtx gen_highpart_mode (machine_mode, machine_mode, rtx);
+extern rtx operand_subword (rtx, poly_uint64, int, machine_mode);
+
+/* In emit-rtl.cc */
+extern rtx operand_subword_force (rtx, poly_uint64, machine_mode);
+extern int subreg_lowpart_p (const_rtx);
+extern poly_uint64 subreg_size_lowpart_offset (poly_uint64, poly_uint64);
+
+/* Return true if a subreg of mode OUTERMODE would only access part of
+ an inner register with mode INNERMODE. The other bits of the inner
+ register would then be "don't care" on read. The behavior for writes
+ depends on REGMODE_NATURAL_SIZE; bits in the same REGMODE_NATURAL_SIZE-d
+ chunk would be clobbered but other bits would be preserved. */
+
+inline bool
+partial_subreg_p (machine_mode outermode, machine_mode innermode)
+{
+ /* Modes involved in a subreg must be ordered. In particular, we must
+ always know at compile time whether the subreg is paradoxical. */
+ poly_int64 outer_prec = GET_MODE_PRECISION (outermode);
+ poly_int64 inner_prec = GET_MODE_PRECISION (innermode);
+ gcc_checking_assert (ordered_p (outer_prec, inner_prec));
+ return maybe_lt (outer_prec, inner_prec);
+}
+
+/* Likewise return true if X is a subreg that is smaller than the inner
+ register. Use read_modify_subreg_p to test whether writing to such
+ a subreg preserves any part of the inner register. */
+
+inline bool
+partial_subreg_p (const_rtx x)
+{
+ if (GET_CODE (x) != SUBREG)
+ return false;
+ return partial_subreg_p (GET_MODE (x), GET_MODE (SUBREG_REG (x)));
+}
+
+/* Return true if a subreg with the given outer and inner modes is
+ paradoxical. */
+
+inline bool
+paradoxical_subreg_p (machine_mode outermode, machine_mode innermode)
+{
+ /* Modes involved in a subreg must be ordered. In particular, we must
+ always know at compile time whether the subreg is paradoxical. */
+ poly_int64 outer_prec = GET_MODE_PRECISION (outermode);
+ poly_int64 inner_prec = GET_MODE_PRECISION (innermode);
+ gcc_checking_assert (ordered_p (outer_prec, inner_prec));
+ return maybe_gt (outer_prec, inner_prec);
+}
+
+/* Return true if X is a paradoxical subreg, false otherwise. */
+
+inline bool
+paradoxical_subreg_p (const_rtx x)
+{
+ if (GET_CODE (x) != SUBREG)
+ return false;
+ return paradoxical_subreg_p (GET_MODE (x), GET_MODE (SUBREG_REG (x)));
+}
+
+/* Return the SUBREG_BYTE for an OUTERMODE lowpart of an INNERMODE value. */
+
+inline poly_uint64
+subreg_lowpart_offset (machine_mode outermode, machine_mode innermode)
+{
+ return subreg_size_lowpart_offset (GET_MODE_SIZE (outermode),
+ GET_MODE_SIZE (innermode));
+}
+
+/* Given that a subreg has outer mode OUTERMODE and inner mode INNERMODE,
+ return the smaller of the two modes if they are different sizes,
+ otherwise return the outer mode. */
+
+inline machine_mode
+narrower_subreg_mode (machine_mode outermode, machine_mode innermode)
+{
+ return paradoxical_subreg_p (outermode, innermode) ? innermode : outermode;
+}
+
+/* Given that a subreg has outer mode OUTERMODE and inner mode INNERMODE,
+ return the mode that is big enough to hold both the outer and inner
+ values. Prefer the outer mode in the event of a tie. */
+
+inline machine_mode
+wider_subreg_mode (machine_mode outermode, machine_mode innermode)
+{
+ return partial_subreg_p (outermode, innermode) ? innermode : outermode;
+}
+
+/* Likewise for subreg X. */
+
+inline machine_mode
+wider_subreg_mode (const_rtx x)
+{
+ return wider_subreg_mode (GET_MODE (x), GET_MODE (SUBREG_REG (x)));
+}
+
+extern poly_uint64 subreg_size_highpart_offset (poly_uint64, poly_uint64);
+
+/* Return the SUBREG_BYTE for an OUTERMODE highpart of an INNERMODE value. */
+
+inline poly_uint64
+subreg_highpart_offset (machine_mode outermode, machine_mode innermode)
+{
+ return subreg_size_highpart_offset (GET_MODE_SIZE (outermode),
+ GET_MODE_SIZE (innermode));
+}
+
+extern poly_int64 byte_lowpart_offset (machine_mode, machine_mode);
+extern poly_int64 subreg_memory_offset (machine_mode, machine_mode,
+ poly_uint64);
+extern poly_int64 subreg_memory_offset (const_rtx);
+extern rtx make_safe_from (rtx, rtx);
+extern rtx convert_memory_address_addr_space_1 (scalar_int_mode, rtx,
+ addr_space_t, bool, bool);
+extern rtx convert_memory_address_addr_space (scalar_int_mode, rtx,
+ addr_space_t);
+#define convert_memory_address(to_mode,x) \
+ convert_memory_address_addr_space ((to_mode), (x), ADDR_SPACE_GENERIC)
+extern const char *get_insn_name (int);
+extern rtx_insn *get_last_insn_anywhere (void);
+extern rtx_insn *get_first_nonnote_insn (void);
+extern rtx_insn *get_last_nonnote_insn (void);
+extern void start_sequence (void);
+extern void push_to_sequence (rtx_insn *);
+extern void push_to_sequence2 (rtx_insn *, rtx_insn *);
+extern void end_sequence (void);
+#if TARGET_SUPPORTS_WIDE_INT == 0
+extern double_int rtx_to_double_int (const_rtx);
+#endif
+extern void cwi_output_hex (FILE *, const_rtx);
+#if TARGET_SUPPORTS_WIDE_INT == 0
+extern rtx immed_double_const (HOST_WIDE_INT, HOST_WIDE_INT,
+ machine_mode);
+#endif
+extern rtx immed_wide_int_const (const poly_wide_int_ref &, machine_mode);
+
+/* In varasm.cc */
+extern rtx force_const_mem (machine_mode, rtx);
+
+/* In varasm.cc */
+
+struct function;
+extern rtx get_pool_constant (const_rtx);
+extern rtx get_pool_constant_mark (rtx, bool *);
+extern fixed_size_mode get_pool_mode (const_rtx);
+extern rtx simplify_subtraction (rtx);
+extern void decide_function_section (tree);
+
+/* In emit-rtl.cc */
+extern rtx_insn *emit_insn_before (rtx, rtx_insn *);
+extern rtx_insn *emit_insn_before_noloc (rtx, rtx_insn *, basic_block);
+extern rtx_insn *emit_insn_before_setloc (rtx, rtx_insn *, location_t);
+extern rtx_jump_insn *emit_jump_insn_before (rtx, rtx_insn *);
+extern rtx_jump_insn *emit_jump_insn_before_noloc (rtx, rtx_insn *);
+extern rtx_jump_insn *emit_jump_insn_before_setloc (rtx, rtx_insn *,
+ location_t);
+extern rtx_insn *emit_call_insn_before (rtx, rtx_insn *);
+extern rtx_insn *emit_call_insn_before_noloc (rtx, rtx_insn *);
+extern rtx_insn *emit_call_insn_before_setloc (rtx, rtx_insn *, location_t);
+extern rtx_insn *emit_debug_insn_before (rtx, rtx_insn *);
+extern rtx_insn *emit_debug_insn_before_noloc (rtx, rtx_insn *);
+extern rtx_insn *emit_debug_insn_before_setloc (rtx, rtx_insn *, location_t);
+extern rtx_barrier *emit_barrier_before (rtx_insn *);
+extern rtx_code_label *emit_label_before (rtx_code_label *, rtx_insn *);
+extern rtx_note *emit_note_before (enum insn_note, rtx_insn *);
+extern rtx_insn *emit_insn_after (rtx, rtx_insn *);
+extern rtx_insn *emit_insn_after_noloc (rtx, rtx_insn *, basic_block);
+extern rtx_insn *emit_insn_after_setloc (rtx, rtx_insn *, location_t);
+extern rtx_jump_insn *emit_jump_insn_after (rtx, rtx_insn *);
+extern rtx_jump_insn *emit_jump_insn_after_noloc (rtx, rtx_insn *);
+extern rtx_jump_insn *emit_jump_insn_after_setloc (rtx, rtx_insn *, location_t);
+extern rtx_insn *emit_call_insn_after (rtx, rtx_insn *);
+extern rtx_insn *emit_call_insn_after_noloc (rtx, rtx_insn *);
+extern rtx_insn *emit_call_insn_after_setloc (rtx, rtx_insn *, location_t);
+extern rtx_insn *emit_debug_insn_after (rtx, rtx_insn *);
+extern rtx_insn *emit_debug_insn_after_noloc (rtx, rtx_insn *);
+extern rtx_insn *emit_debug_insn_after_setloc (rtx, rtx_insn *, location_t);
+extern rtx_barrier *emit_barrier_after (rtx_insn *);
+extern rtx_insn *emit_label_after (rtx_insn *, rtx_insn *);
+extern rtx_note *emit_note_after (enum insn_note, rtx_insn *);
+extern rtx_insn *emit_insn (rtx);
+extern rtx_insn *emit_debug_insn (rtx);
+extern rtx_insn *emit_jump_insn (rtx);
+extern rtx_insn *emit_call_insn (rtx);
+extern rtx_code_label *emit_label (rtx);
+extern rtx_jump_table_data *emit_jump_table_data (rtx);
+extern rtx_barrier *emit_barrier (void);
+extern rtx_note *emit_note (enum insn_note);
+extern rtx_note *emit_note_copy (rtx_note *);
+extern rtx_insn *gen_clobber (rtx);
+extern rtx_insn *emit_clobber (rtx);
+extern rtx_insn *gen_use (rtx);
+extern rtx_insn *emit_use (rtx);
+extern rtx_insn *make_insn_raw (rtx);
+extern void add_function_usage_to (rtx, rtx);
+extern rtx_call_insn *last_call_insn (void);
+extern rtx_insn *previous_insn (rtx_insn *);
+extern rtx_insn *next_insn (rtx_insn *);
+extern rtx_insn *prev_nonnote_insn (rtx_insn *);
+extern rtx_insn *next_nonnote_insn (rtx_insn *);
+extern rtx_insn *prev_nondebug_insn (rtx_insn *);
+extern rtx_insn *next_nondebug_insn (rtx_insn *);
+extern rtx_insn *prev_nonnote_nondebug_insn (rtx_insn *);
+extern rtx_insn *prev_nonnote_nondebug_insn_bb (rtx_insn *);
+extern rtx_insn *next_nonnote_nondebug_insn (rtx_insn *);
+extern rtx_insn *next_nonnote_nondebug_insn_bb (rtx_insn *);
+extern rtx_insn *prev_real_insn (rtx_insn *);
+extern rtx_insn *next_real_insn (rtx_insn *);
+extern rtx_insn *prev_real_nondebug_insn (rtx_insn *);
+extern rtx_insn *next_real_nondebug_insn (rtx);
+extern rtx_insn *prev_active_insn (rtx_insn *);
+extern rtx_insn *next_active_insn (rtx_insn *);
+extern int active_insn_p (const rtx_insn *);
+
+/* In emit-rtl.cc */
+extern int insn_line (const rtx_insn *);
+extern const char * insn_file (const rtx_insn *);
+extern tree insn_scope (const rtx_insn *);
+extern expanded_location insn_location (const rtx_insn *);
+extern int insn_discriminator (const rtx_insn *);
+extern location_t prologue_location, epilogue_location;
+
+/* In jump.cc */
+extern enum rtx_code reverse_condition (enum rtx_code);
+extern enum rtx_code reverse_condition_maybe_unordered (enum rtx_code);
+extern enum rtx_code swap_condition (enum rtx_code);
+extern enum rtx_code unsigned_condition (enum rtx_code);
+extern enum rtx_code signed_condition (enum rtx_code);
+extern void mark_jump_label (rtx, rtx_insn *, int);
+
+/* Return true if integer comparison operator CODE interprets its operands
+ as unsigned. */
+
+inline bool
+unsigned_condition_p (enum rtx_code code)
+{
+ return unsigned_condition (code) == code;
+}
+
+/* In jump.cc */
+extern rtx_insn *delete_related_insns (rtx);
+
+/* In recog.cc */
+extern rtx *find_constant_term_loc (rtx *);
+
+/* In emit-rtl.cc */
+extern rtx_insn *try_split (rtx, rtx_insn *, int);
+
+/* In insn-recog.cc (generated by genrecog). */
+extern rtx_insn *split_insns (rtx, rtx_insn *);
+
+/* In simplify-rtx.cc */
+
+/* A class that records the context in which a simplification
+ is being mode. */
+class simplify_context
+{
+public:
+ rtx simplify_unary_operation (rtx_code, machine_mode, rtx, machine_mode);
+ rtx simplify_binary_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_ternary_operation (rtx_code, machine_mode, machine_mode,
+ rtx, rtx, rtx);
+ rtx simplify_relational_operation (rtx_code, machine_mode, machine_mode,
+ rtx, rtx);
+ rtx simplify_subreg (machine_mode, rtx, machine_mode, poly_uint64);
+
+ rtx lowpart_subreg (machine_mode, rtx, machine_mode);
+
+ rtx simplify_merge_mask (rtx, rtx, int);
+
+ rtx simplify_gen_unary (rtx_code, machine_mode, rtx, machine_mode);
+ rtx simplify_gen_binary (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_gen_ternary (rtx_code, machine_mode, machine_mode,
+ rtx, rtx, rtx);
+ rtx simplify_gen_relational (rtx_code, machine_mode, machine_mode, rtx, rtx);
+ rtx simplify_gen_subreg (machine_mode, rtx, machine_mode, poly_uint64);
+ rtx simplify_gen_vec_select (rtx, unsigned int);
+
+ /* Tracks the level of MEM nesting for the value being simplified:
+ 0 means the value is not in a MEM, >0 means it is. This is needed
+ because the canonical representation of multiplication is different
+ inside a MEM than outside. */
+ unsigned int mem_depth = 0;
+
+ /* Tracks number of simplify_associative_operation calls performed during
+ outermost simplify* call. */
+ unsigned int assoc_count = 0;
+
+ /* Limit for the above number, return NULL from
+ simplify_associative_operation after we reach that assoc_count. */
+ static const unsigned int max_assoc_count = 64;
+
+private:
+ rtx simplify_truncation (machine_mode, rtx, machine_mode);
+ rtx simplify_byte_swapping_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_associative_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_distributive_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_logical_relational_operation (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_binary_operation_series (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_distribute_over_subregs (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_shift_const_int (rtx_code, machine_mode, rtx, unsigned int);
+ rtx simplify_plus_minus (rtx_code, machine_mode, rtx, rtx);
+ rtx simplify_cond_clz_ctz (rtx, rtx_code, rtx, rtx);
+
+ rtx simplify_unary_operation_1 (rtx_code, machine_mode, rtx);
+ rtx simplify_binary_operation_1 (rtx_code, machine_mode, rtx, rtx, rtx, rtx);
+ rtx simplify_ternary_operation_1 (rtx_code, machine_mode, machine_mode,
+ rtx, rtx, rtx);
+ rtx simplify_relational_operation_1 (rtx_code, machine_mode, machine_mode,
+ rtx, rtx);
+};
+
+inline rtx
+simplify_unary_operation (rtx_code code, machine_mode mode, rtx op,
+ machine_mode op_mode)
+{
+ return simplify_context ().simplify_unary_operation (code, mode, op,
+ op_mode);
+}
+
+inline rtx
+simplify_binary_operation (rtx_code code, machine_mode mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_binary_operation (code, mode, op0, op1);
+}
+
+inline rtx
+simplify_ternary_operation (rtx_code code, machine_mode mode,
+ machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
+{
+ return simplify_context ().simplify_ternary_operation (code, mode, op0_mode,
+ op0, op1, op2);
+}
+
+inline rtx
+simplify_relational_operation (rtx_code code, machine_mode mode,
+ machine_mode op_mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_relational_operation (code, mode,
+ op_mode, op0, op1);
+}
+
+inline rtx
+simplify_subreg (machine_mode outermode, rtx op, machine_mode innermode,
+ poly_uint64 byte)
+{
+ return simplify_context ().simplify_subreg (outermode, op, innermode, byte);
+}
+
+inline rtx
+simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
+ machine_mode op_mode)
+{
+ return simplify_context ().simplify_gen_unary (code, mode, op, op_mode);
+}
+
+inline rtx
+simplify_gen_binary (rtx_code code, machine_mode mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_gen_binary (code, mode, op0, op1);
+}
+
+inline rtx
+simplify_gen_ternary (rtx_code code, machine_mode mode, machine_mode op0_mode,
+ rtx op0, rtx op1, rtx op2)
+{
+ return simplify_context ().simplify_gen_ternary (code, mode, op0_mode,
+ op0, op1, op2);
+}
+
+inline rtx
+simplify_gen_relational (rtx_code code, machine_mode mode,
+ machine_mode op_mode, rtx op0, rtx op1)
+{
+ return simplify_context ().simplify_gen_relational (code, mode, op_mode,
+ op0, op1);
+}
+
+inline rtx
+simplify_gen_subreg (machine_mode outermode, rtx op, machine_mode innermode,
+ poly_uint64 byte)
+{
+ return simplify_context ().simplify_gen_subreg (outermode, op,
+ innermode, byte);
+}
+
+inline rtx
+simplify_gen_vec_select (rtx op, unsigned int index)
+{
+ return simplify_context ().simplify_gen_vec_select (op, index);
+}
+
+inline rtx
+lowpart_subreg (machine_mode outermode, rtx op, machine_mode innermode)
+{
+ return simplify_context ().lowpart_subreg (outermode, op, innermode);
+}
+
+extern rtx simplify_const_unary_operation (enum rtx_code, machine_mode,
+ rtx, machine_mode);
+extern rtx simplify_const_binary_operation (enum rtx_code, machine_mode,
+ rtx, rtx);
+extern rtx simplify_const_relational_operation (enum rtx_code,
+ machine_mode, rtx, rtx);
+extern rtx simplify_replace_fn_rtx (rtx, const_rtx,
+ rtx (*fn) (rtx, const_rtx, void *), void *);
+extern rtx simplify_replace_rtx (rtx, const_rtx, rtx);
+extern rtx simplify_rtx (const_rtx);
+extern rtx avoid_constant_pool_reference (rtx);
+extern rtx delegitimize_mem_from_attrs (rtx);
+extern bool mode_signbit_p (machine_mode, const_rtx);
+extern bool val_signbit_p (machine_mode, unsigned HOST_WIDE_INT);
+extern bool val_signbit_known_set_p (machine_mode,
+ unsigned HOST_WIDE_INT);
+extern bool val_signbit_known_clear_p (machine_mode,
+ unsigned HOST_WIDE_INT);
+
+/* In reginfo.cc */
+extern machine_mode choose_hard_reg_mode (unsigned int, unsigned int,
+ const predefined_function_abi *);
+extern const HARD_REG_SET &simplifiable_subregs (const subreg_shape &);
+
+/* In emit-rtl.cc */
+extern rtx set_for_reg_notes (rtx);
+extern rtx set_unique_reg_note (rtx, enum reg_note, rtx);
+extern rtx set_dst_reg_note (rtx, enum reg_note, rtx, rtx);
+extern void set_insn_deleted (rtx_insn *);
+
+/* Functions in rtlanal.cc */
+
+extern rtx single_set_2 (const rtx_insn *, const_rtx);
+extern rtx simple_regno_set (rtx, unsigned int);
+extern bool contains_symbol_ref_p (const_rtx);
+extern bool contains_symbolic_reference_p (const_rtx);
+extern bool contains_constant_pool_address_p (const_rtx);
+extern void add_auto_inc_notes (rtx_insn *, rtx);
+
+/* Handle the cheap and common cases inline for performance. */
+
+inline rtx single_set (const rtx_insn *insn)
+{
+ if (!INSN_P (insn))
+ return NULL_RTX;
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ return PATTERN (insn);
+
+ /* Defer to the more expensive case. */
+ return single_set_2 (insn, PATTERN (insn));
+}
+
+extern scalar_int_mode get_address_mode (rtx mem);
+extern int rtx_addr_can_trap_p (const_rtx);
+extern bool nonzero_address_p (const_rtx);
+extern int rtx_unstable_p (const_rtx);
+extern bool rtx_varies_p (const_rtx, bool);
+extern bool rtx_addr_varies_p (const_rtx, bool);
+extern rtx get_call_rtx_from (const rtx_insn *);
+extern tree get_call_fndecl (const rtx_insn *);
+extern HOST_WIDE_INT get_integer_term (const_rtx);
+extern rtx get_related_value (const_rtx);
+extern bool offset_within_block_p (const_rtx, HOST_WIDE_INT);
+extern void split_const (rtx, rtx *, rtx *);
+extern rtx strip_offset (rtx, poly_int64_pod *);
+extern poly_int64 get_args_size (const_rtx);
+extern bool unsigned_reg_p (rtx);
+extern int reg_mentioned_p (const_rtx, const_rtx);
+extern int count_occurrences (const_rtx, const_rtx, int);
+extern int reg_referenced_p (const_rtx, const_rtx);
+extern int reg_used_between_p (const_rtx, const rtx_insn *, const rtx_insn *);
+extern int reg_set_between_p (const_rtx, const rtx_insn *, const rtx_insn *);
+extern int commutative_operand_precedence (rtx);
+extern bool swap_commutative_operands_p (rtx, rtx);
+extern int modified_between_p (const_rtx, const rtx_insn *, const rtx_insn *);
+extern int no_labels_between_p (const rtx_insn *, const rtx_insn *);
+extern int modified_in_p (const_rtx, const_rtx);
+extern int reg_set_p (const_rtx, const_rtx);
+extern int multiple_sets (const_rtx);
+extern int set_noop_p (const_rtx);
+extern int noop_move_p (const rtx_insn *);
+extern bool refers_to_regno_p (unsigned int, unsigned int, const_rtx, rtx *);
+extern int reg_overlap_mentioned_p (const_rtx, const_rtx);
+extern const_rtx set_of (const_rtx, const_rtx);
+extern void record_hard_reg_sets (rtx, const_rtx, void *);
+extern void record_hard_reg_uses (rtx *, void *);
+extern void find_all_hard_regs (const_rtx, HARD_REG_SET *);
+extern void find_all_hard_reg_sets (const rtx_insn *, HARD_REG_SET *, bool);
+extern void note_pattern_stores (const_rtx,
+ void (*) (rtx, const_rtx, void *), void *);
+extern void note_stores (const rtx_insn *,
+ void (*) (rtx, const_rtx, void *), void *);
+extern void note_uses (rtx *, void (*) (rtx *, void *), void *);
+extern int dead_or_set_p (const rtx_insn *, const_rtx);
+extern int dead_or_set_regno_p (const rtx_insn *, unsigned int);
+extern rtx find_reg_note (const_rtx, enum reg_note, const_rtx);
+extern rtx find_regno_note (const_rtx, enum reg_note, unsigned int);
+extern rtx find_reg_equal_equiv_note (const_rtx);
+extern rtx find_constant_src (const rtx_insn *);
+extern int find_reg_fusage (const_rtx, enum rtx_code, const_rtx);
+extern int find_regno_fusage (const_rtx, enum rtx_code, unsigned int);
+extern rtx alloc_reg_note (enum reg_note, rtx, rtx);
+extern void add_reg_note (rtx, enum reg_note, rtx);
+extern void add_int_reg_note (rtx_insn *, enum reg_note, int);
+extern void add_args_size_note (rtx_insn *, poly_int64);
+extern void add_shallow_copy_of_reg_note (rtx_insn *, rtx);
+extern rtx duplicate_reg_note (rtx);
+extern void remove_note (rtx_insn *, const_rtx);
+extern bool remove_reg_equal_equiv_notes (rtx_insn *, bool = false);
+extern void remove_reg_equal_equiv_notes_for_regno (unsigned int);
+extern int side_effects_p (const_rtx);
+extern int volatile_refs_p (const_rtx);
+extern int volatile_insn_p (const_rtx);
+extern int may_trap_p_1 (const_rtx, unsigned);
+extern int may_trap_p (const_rtx);
+extern int may_trap_or_fault_p (const_rtx);
+extern bool can_throw_internal (const_rtx);
+extern bool can_throw_external (const_rtx);
+extern bool insn_could_throw_p (const_rtx);
+extern bool insn_nothrow_p (const_rtx);
+extern bool can_nonlocal_goto (const rtx_insn *);
+extern void copy_reg_eh_region_note_forward (rtx, rtx_insn *, rtx);
+extern void copy_reg_eh_region_note_backward (rtx, rtx_insn *, rtx);
+extern rtx replace_rtx (rtx, rtx, rtx, bool = false);
+extern void replace_label (rtx *, rtx, rtx, bool);
+extern void replace_label_in_insn (rtx_insn *, rtx_insn *, rtx_insn *, bool);
+extern bool rtx_referenced_p (const_rtx, const_rtx);
+extern bool tablejump_p (const rtx_insn *, rtx_insn **, rtx_jump_table_data **);
+extern rtx tablejump_casesi_pattern (const rtx_insn *insn);
+extern int computed_jump_p (const rtx_insn *);
+extern bool tls_referenced_p (const_rtx);
+extern bool contains_mem_rtx_p (rtx x);
+extern bool register_asm_p (const_rtx);
+
+/* Overload for refers_to_regno_p for checking a single register. */
+inline bool
+refers_to_regno_p (unsigned int regnum, const_rtx x, rtx* loc = NULL)
+{
+ return refers_to_regno_p (regnum, regnum + 1, x, loc);
+}
+
+/* Callback for for_each_inc_dec, to process the autoinc operation OP
+ within MEM that sets DEST to SRC + SRCOFF, or SRC if SRCOFF is
+ NULL. The callback is passed the same opaque ARG passed to
+ for_each_inc_dec. Return zero to continue looking for other
+ autoinc operations or any other value to interrupt the traversal and
+ return that value to the caller of for_each_inc_dec. */
+typedef int (*for_each_inc_dec_fn) (rtx mem, rtx op, rtx dest, rtx src,
+ rtx srcoff, void *arg);
+extern int for_each_inc_dec (rtx, for_each_inc_dec_fn, void *arg);
+
+typedef int (*rtx_equal_p_callback_function) (const_rtx *, const_rtx *,
+ rtx *, rtx *);
+extern int rtx_equal_p_cb (const_rtx, const_rtx,
+ rtx_equal_p_callback_function);
+
+typedef int (*hash_rtx_callback_function) (const_rtx, machine_mode, rtx *,
+ machine_mode *);
+extern unsigned hash_rtx_cb (const_rtx, machine_mode, int *, int *,
+ bool, hash_rtx_callback_function);
+
+extern rtx regno_use_in (unsigned int, rtx);
+extern int auto_inc_p (const_rtx);
+extern bool in_insn_list_p (const rtx_insn_list *, const rtx_insn *);
+extern void remove_node_from_insn_list (const rtx_insn *, rtx_insn_list **);
+extern int loc_mentioned_in_p (rtx *, const_rtx);
+extern rtx_insn *find_first_parameter_load (rtx_insn *, rtx_insn *);
+extern bool keep_with_call_p (const rtx_insn *);
+extern bool label_is_jump_target_p (const_rtx, const rtx_insn *);
+extern int pattern_cost (rtx, bool);
+extern int insn_cost (rtx_insn *, bool);
+extern unsigned seq_cost (const rtx_insn *, bool);
+
+/* Given an insn and condition, return a canonical description of
+ the test being made. */
+extern rtx canonicalize_condition (rtx_insn *, rtx, int, rtx_insn **, rtx,
+ int, int);
+
+/* Given a JUMP_INSN, return a canonical description of the test
+ being made. */
+extern rtx get_condition (rtx_insn *, rtx_insn **, int, int);
+
+/* Information about a subreg of a hard register. */
+struct subreg_info
+{
+ /* Offset of first hard register involved in the subreg. */
+ int offset;
+ /* Number of hard registers involved in the subreg. In the case of
+ a paradoxical subreg, this is the number of registers that would
+ be modified by writing to the subreg; some of them may be don't-care
+ when reading from the subreg. */
+ int nregs;
+ /* Whether this subreg can be represented as a hard reg with the new
+ mode (by adding OFFSET to the original hard register). */
+ bool representable_p;
+};
+
+extern void subreg_get_info (unsigned int, machine_mode,
+ poly_uint64, machine_mode,
+ struct subreg_info *);
+
+/* lists.cc */
+
+extern void free_EXPR_LIST_list (rtx_expr_list **);
+extern void free_INSN_LIST_list (rtx_insn_list **);
+extern void free_EXPR_LIST_node (rtx);
+extern void free_INSN_LIST_node (rtx);
+extern rtx_insn_list *alloc_INSN_LIST (rtx, rtx);
+extern rtx_insn_list *copy_INSN_LIST (rtx_insn_list *);
+extern rtx_insn_list *concat_INSN_LIST (rtx_insn_list *, rtx_insn_list *);
+extern rtx_expr_list *alloc_EXPR_LIST (int, rtx, rtx);
+extern void remove_free_INSN_LIST_elem (rtx_insn *, rtx_insn_list **);
+extern rtx remove_list_elem (rtx, rtx *);
+extern rtx_insn *remove_free_INSN_LIST_node (rtx_insn_list **);
+extern rtx remove_free_EXPR_LIST_node (rtx_expr_list **);
+
+
+/* reginfo.cc */
+
+/* Resize reg info. */
+extern bool resize_reg_info (void);
+/* Free up register info memory. */
+extern void free_reg_info (void);
+extern void init_subregs_of_mode (void);
+extern void finish_subregs_of_mode (void);
+extern void reginfo_cc_finalize (void);
+
+/* recog.cc */
+extern rtx extract_asm_operands (rtx);
+extern int asm_noperands (const_rtx);
+extern const char *decode_asm_operands (rtx, rtx *, rtx **, const char **,
+ machine_mode *, location_t *);
+extern void get_referenced_operands (const char *, bool *, unsigned int);
+
+extern enum reg_class reg_preferred_class (int);
+extern enum reg_class reg_alternate_class (int);
+extern enum reg_class reg_allocno_class (int);
+extern void setup_reg_classes (int, enum reg_class, enum reg_class,
+ enum reg_class);
+
+extern void split_all_insns (void);
+extern unsigned int split_all_insns_noflow (void);
+
+#define MAX_SAVED_CONST_INT 64
+extern GTY(()) rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1];
+
+#define const0_rtx (const_int_rtx[MAX_SAVED_CONST_INT])
+#define const1_rtx (const_int_rtx[MAX_SAVED_CONST_INT+1])
+#define const2_rtx (const_int_rtx[MAX_SAVED_CONST_INT+2])
+#define constm1_rtx (const_int_rtx[MAX_SAVED_CONST_INT-1])
+extern GTY(()) rtx const_true_rtx;
+
+extern GTY(()) rtx const_tiny_rtx[4][(int) MAX_MACHINE_MODE];
+
+/* Returns a constant 0 rtx in mode MODE. Integer modes are treated the
+ same as VOIDmode. */
+
+#define CONST0_RTX(MODE) (const_tiny_rtx[0][(int) (MODE)])
+
+/* Likewise, for the constants 1 and 2 and -1. */
+
+#define CONST1_RTX(MODE) (const_tiny_rtx[1][(int) (MODE)])
+#define CONST2_RTX(MODE) (const_tiny_rtx[2][(int) (MODE)])
+#define CONSTM1_RTX(MODE) (const_tiny_rtx[3][(int) (MODE)])
+
+extern GTY(()) rtx pc_rtx;
+extern GTY(()) rtx ret_rtx;
+extern GTY(()) rtx simple_return_rtx;
+extern GTY(()) rtx_insn *invalid_insn_rtx;
+
+/* If HARD_FRAME_POINTER_REGNUM is defined, then a special dummy reg
+ is used to represent the frame pointer. This is because the
+ hard frame pointer and the automatic variables are separated by an amount
+ that cannot be determined until after register allocation. We can assume
+ that in this case ELIMINABLE_REGS will be defined, one action of which
+ will be to eliminate FRAME_POINTER_REGNUM into HARD_FRAME_POINTER_REGNUM. */
+#ifndef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM FRAME_POINTER_REGNUM
+#endif
+
+#ifndef HARD_FRAME_POINTER_IS_FRAME_POINTER
+#define HARD_FRAME_POINTER_IS_FRAME_POINTER \
+ (HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM)
+#endif
+
+#ifndef HARD_FRAME_POINTER_IS_ARG_POINTER
+#define HARD_FRAME_POINTER_IS_ARG_POINTER \
+ (HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM)
+#endif
+
+/* Index labels for global_rtl. */
+enum global_rtl_index
+{
+ GR_STACK_POINTER,
+ GR_FRAME_POINTER,
+/* For register elimination to work properly these hard_frame_pointer_rtx,
+ frame_pointer_rtx, and arg_pointer_rtx must be the same if they refer to
+ the same register. */
+#if FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
+ GR_ARG_POINTER = GR_FRAME_POINTER,
+#endif
+#if HARD_FRAME_POINTER_IS_FRAME_POINTER
+ GR_HARD_FRAME_POINTER = GR_FRAME_POINTER,
+#else
+ GR_HARD_FRAME_POINTER,
+#endif
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+#if HARD_FRAME_POINTER_IS_ARG_POINTER
+ GR_ARG_POINTER = GR_HARD_FRAME_POINTER,
+#else
+ GR_ARG_POINTER,
+#endif
+#endif
+ GR_VIRTUAL_INCOMING_ARGS,
+ GR_VIRTUAL_STACK_ARGS,
+ GR_VIRTUAL_STACK_DYNAMIC,
+ GR_VIRTUAL_OUTGOING_ARGS,
+ GR_VIRTUAL_CFA,
+ GR_VIRTUAL_PREFERRED_STACK_BOUNDARY,
+
+ GR_MAX
+};
+
+/* Target-dependent globals. */
+struct GTY(()) target_rtl {
+ /* All references to the hard registers in global_rtl_index go through
+ these unique rtl objects. On machines where the frame-pointer and
+ arg-pointer are the same register, they use the same unique object.
+
+ After register allocation, other rtl objects which used to be pseudo-regs
+ may be clobbered to refer to the frame-pointer register.
+ But references that were originally to the frame-pointer can be
+ distinguished from the others because they contain frame_pointer_rtx.
+
+ When to use frame_pointer_rtx and hard_frame_pointer_rtx is a little
+ tricky: until register elimination has taken place hard_frame_pointer_rtx
+ should be used if it is being set, and frame_pointer_rtx otherwise. After
+ register elimination hard_frame_pointer_rtx should always be used.
+ On machines where the two registers are same (most) then these are the
+ same. */
+ rtx x_global_rtl[GR_MAX];
+
+ /* A unique representation of (REG:Pmode PIC_OFFSET_TABLE_REGNUM). */
+ rtx x_pic_offset_table_rtx;
+
+ /* A unique representation of (REG:Pmode RETURN_ADDRESS_POINTER_REGNUM).
+ This is used to implement __builtin_return_address for some machines;
+ see for instance the MIPS port. */
+ rtx x_return_address_pointer_rtx;
+
+ /* Commonly used RTL for hard registers. These objects are not
+ necessarily unique, so we allocate them separately from global_rtl.
+ They are initialized once per compilation unit, then copied into
+ regno_reg_rtx at the beginning of each function. */
+ rtx x_initial_regno_reg_rtx[FIRST_PSEUDO_REGISTER];
+
+ /* A sample (mem:M stack_pointer_rtx) rtx for each mode M. */
+ rtx x_top_of_stack[MAX_MACHINE_MODE];
+
+ /* Static hunks of RTL used by the aliasing code; these are treated
+ as persistent to avoid unnecessary RTL allocations. */
+ rtx x_static_reg_base_value[FIRST_PSEUDO_REGISTER];
+
+ /* The default memory attributes for each mode. */
+ class mem_attrs *x_mode_mem_attrs[(int) MAX_MACHINE_MODE];
+
+ /* Track if RTL has been initialized. */
+ bool target_specific_initialized;
+};
+
+extern GTY(()) struct target_rtl default_target_rtl;
+#if SWITCHABLE_TARGET
+extern struct target_rtl *this_target_rtl;
+#else
+#define this_target_rtl (&default_target_rtl)
+#endif
+
+#define global_rtl \
+ (this_target_rtl->x_global_rtl)
+#define pic_offset_table_rtx \
+ (this_target_rtl->x_pic_offset_table_rtx)
+#define return_address_pointer_rtx \
+ (this_target_rtl->x_return_address_pointer_rtx)
+#define top_of_stack \
+ (this_target_rtl->x_top_of_stack)
+#define mode_mem_attrs \
+ (this_target_rtl->x_mode_mem_attrs)
+
+/* All references to certain hard regs, except those created
+ by allocating pseudo regs into them (when that's possible),
+ go through these unique rtx objects. */
+#define stack_pointer_rtx (global_rtl[GR_STACK_POINTER])
+#define frame_pointer_rtx (global_rtl[GR_FRAME_POINTER])
+#define hard_frame_pointer_rtx (global_rtl[GR_HARD_FRAME_POINTER])
+#define arg_pointer_rtx (global_rtl[GR_ARG_POINTER])
+
+#ifndef GENERATOR_FILE
+/* Return the attributes of a MEM rtx. */
+inline const class mem_attrs *
+get_mem_attrs (const_rtx x)
+{
+ class mem_attrs *attrs;
+
+ attrs = MEM_ATTRS (x);
+ if (!attrs)
+ attrs = mode_mem_attrs[(int) GET_MODE (x)];
+ return attrs;
+}
+#endif
+
+/* Include the RTL generation functions. */
+
+#ifndef GENERATOR_FILE
+#include "genrtl.h"
+#undef gen_rtx_ASM_INPUT
+#define gen_rtx_ASM_INPUT(MODE, ARG0) \
+ gen_rtx_fmt_si (ASM_INPUT, (MODE), (ARG0), 0)
+#define gen_rtx_ASM_INPUT_loc(MODE, ARG0, LOC) \
+ gen_rtx_fmt_si (ASM_INPUT, (MODE), (ARG0), (LOC))
+#endif
+
+/* There are some RTL codes that require special attention; the
+ generation functions included above do the raw handling. If you
+ add to this list, modify special_rtx in gengenrtl.cc as well. */
+
+extern rtx_expr_list *gen_rtx_EXPR_LIST (machine_mode, rtx, rtx);
+extern rtx_insn_list *gen_rtx_INSN_LIST (machine_mode, rtx, rtx);
+extern rtx_insn *
+gen_rtx_INSN (machine_mode mode, rtx_insn *prev_insn, rtx_insn *next_insn,
+ basic_block bb, rtx pattern, int location, int code,
+ rtx reg_notes);
+extern rtx gen_rtx_CONST_INT (machine_mode, HOST_WIDE_INT);
+extern rtx gen_rtx_CONST_VECTOR (machine_mode, rtvec);
+extern void set_mode_and_regno (rtx, machine_mode, unsigned int);
+extern rtx init_raw_REG (rtx, machine_mode, unsigned int);
+extern rtx gen_raw_REG (machine_mode, unsigned int);
+#define alloca_raw_REG(mode, regno) \
+ init_raw_REG (rtx_alloca (REG), (mode), (regno))
+extern rtx gen_rtx_REG (machine_mode, unsigned int);
+extern rtx gen_rtx_SUBREG (machine_mode, rtx, poly_uint64);
+extern rtx gen_rtx_MEM (machine_mode, rtx);
+extern rtx gen_rtx_VAR_LOCATION (machine_mode, tree, rtx,
+ enum var_init_status);
+
+#ifdef GENERATOR_FILE
+#define PUT_MODE(RTX, MODE) PUT_MODE_RAW (RTX, MODE)
+#else
+inline void
+PUT_MODE (rtx x, machine_mode mode)
+{
+ if (REG_P (x))
+ set_mode_and_regno (x, mode, REGNO (x));
+ else
+ PUT_MODE_RAW (x, mode);
+}
+#endif
+
+#define GEN_INT(N) gen_rtx_CONST_INT (VOIDmode, (N))
+
+/* Virtual registers are used during RTL generation to refer to locations into
+ the stack frame when the actual location isn't known until RTL generation
+ is complete. The routine instantiate_virtual_regs replaces these with
+ the proper value, which is normally {frame,arg,stack}_pointer_rtx plus
+ a constant. */
+
+#define FIRST_VIRTUAL_REGISTER (FIRST_PSEUDO_REGISTER)
+
+/* This points to the first word of the incoming arguments passed on the stack,
+ either by the caller or by the callee when pretending it was passed by the
+ caller. */
+
+#define virtual_incoming_args_rtx (global_rtl[GR_VIRTUAL_INCOMING_ARGS])
+
+#define VIRTUAL_INCOMING_ARGS_REGNUM (FIRST_VIRTUAL_REGISTER)
+
+/* If FRAME_GROWS_DOWNWARD, this points to immediately above the first
+ variable on the stack. Otherwise, it points to the first variable on
+ the stack. */
+
+#define virtual_stack_vars_rtx (global_rtl[GR_VIRTUAL_STACK_ARGS])
+
+#define VIRTUAL_STACK_VARS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 1)
+
+/* This points to the location of dynamically-allocated memory on the stack
+ immediately after the stack pointer has been adjusted by the amount
+ desired. */
+
+#define virtual_stack_dynamic_rtx (global_rtl[GR_VIRTUAL_STACK_DYNAMIC])
+
+#define VIRTUAL_STACK_DYNAMIC_REGNUM ((FIRST_VIRTUAL_REGISTER) + 2)
+
+/* This points to the location in the stack at which outgoing arguments should
+ be written when the stack is pre-pushed (arguments pushed using push
+ insns always use sp). */
+
+#define virtual_outgoing_args_rtx (global_rtl[GR_VIRTUAL_OUTGOING_ARGS])
+
+#define VIRTUAL_OUTGOING_ARGS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 3)
+
+/* This points to the Canonical Frame Address of the function. This
+ should correspond to the CFA produced by INCOMING_FRAME_SP_OFFSET,
+ but is calculated relative to the arg pointer for simplicity; the
+ frame pointer nor stack pointer are necessarily fixed relative to
+ the CFA until after reload. */
+
+#define virtual_cfa_rtx (global_rtl[GR_VIRTUAL_CFA])
+
+#define VIRTUAL_CFA_REGNUM ((FIRST_VIRTUAL_REGISTER) + 4)
+
+#define LAST_VIRTUAL_POINTER_REGISTER ((FIRST_VIRTUAL_REGISTER) + 4)
+
+/* This is replaced by crtl->preferred_stack_boundary / BITS_PER_UNIT
+ when finalized. */
+
+#define virtual_preferred_stack_boundary_rtx \
+ (global_rtl[GR_VIRTUAL_PREFERRED_STACK_BOUNDARY])
+
+#define VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM \
+ ((FIRST_VIRTUAL_REGISTER) + 5)
+
+#define LAST_VIRTUAL_REGISTER ((FIRST_VIRTUAL_REGISTER) + 5)
+
+/* Nonzero if REGNUM is a pointer into the stack frame. */
+#define REGNO_PTR_FRAME_P(REGNUM) \
+ ((REGNUM) == STACK_POINTER_REGNUM \
+ || (REGNUM) == FRAME_POINTER_REGNUM \
+ || (REGNUM) == HARD_FRAME_POINTER_REGNUM \
+ || (REGNUM) == ARG_POINTER_REGNUM \
+ || ((REGNUM) >= FIRST_VIRTUAL_REGISTER \
+ && (REGNUM) <= LAST_VIRTUAL_POINTER_REGISTER))
+
+/* REGNUM never really appearing in the INSN stream. */
+#define INVALID_REGNUM (~(unsigned int) 0)
+
+/* REGNUM for which no debug information can be generated. */
+#define IGNORED_DWARF_REGNUM (INVALID_REGNUM - 1)
+
+extern rtx output_constant_def (tree, int);
+extern rtx lookup_constant_def (tree);
+
+/* Nonzero after end of reload pass.
+ Set to 1 or 0 by reload1.cc. */
+
+extern int reload_completed;
+
+/* Nonzero after thread_prologue_and_epilogue_insns has run. */
+extern int epilogue_completed;
+
+/* Set to 1 while reload_as_needed is operating.
+ Required by some machines to handle any generated moves differently. */
+
+extern int reload_in_progress;
+
+/* Set to 1 while in lra. */
+extern int lra_in_progress;
+
+/* This macro indicates whether you may create a new
+ pseudo-register. */
+
+#define can_create_pseudo_p() (!reload_in_progress && !reload_completed)
+
+#ifdef STACK_REGS
+/* Nonzero after end of regstack pass.
+ Set to 1 or 0 by reg-stack.cc. */
+extern int regstack_completed;
+#endif
+
+/* If this is nonzero, we do not bother generating VOLATILE
+ around volatile memory references, and we are willing to
+ output indirect addresses. If cse is to follow, we reject
+ indirect addresses so a useful potential cse is generated;
+ if it is used only once, instruction combination will produce
+ the same indirect address eventually. */
+extern int cse_not_expected;
+
+/* Translates rtx code to tree code, for those codes needed by
+ real_arithmetic. The function returns an int because the caller may not
+ know what `enum tree_code' means. */
+
+extern int rtx_to_tree_code (enum rtx_code);
+
+/* In cse.cc */
+extern int delete_trivially_dead_insns (rtx_insn *, int);
+extern int exp_equiv_p (const_rtx, const_rtx, int, bool);
+extern unsigned hash_rtx (const_rtx x, machine_mode, int *, int *, bool);
+
+/* In dse.cc */
+extern bool check_for_inc_dec (rtx_insn *insn);
+
+/* In jump.cc */
+extern int comparison_dominates_p (enum rtx_code, enum rtx_code);
+extern bool jump_to_label_p (const rtx_insn *);
+extern int condjump_p (const rtx_insn *);
+extern int any_condjump_p (const rtx_insn *);
+extern int any_uncondjump_p (const rtx_insn *);
+extern rtx pc_set (const rtx_insn *);
+extern rtx condjump_label (const rtx_insn *);
+extern int simplejump_p (const rtx_insn *);
+extern int returnjump_p (const rtx_insn *);
+extern int eh_returnjump_p (rtx_insn *);
+extern int onlyjump_p (const rtx_insn *);
+extern int invert_jump_1 (rtx_jump_insn *, rtx);
+extern int invert_jump (rtx_jump_insn *, rtx, int);
+extern int rtx_renumbered_equal_p (const_rtx, const_rtx);
+extern int true_regnum (const_rtx);
+extern unsigned int reg_or_subregno (const_rtx);
+extern int redirect_jump_1 (rtx_insn *, rtx);
+extern void redirect_jump_2 (rtx_jump_insn *, rtx, rtx, int, int);
+extern int redirect_jump (rtx_jump_insn *, rtx, int);
+extern void rebuild_jump_labels (rtx_insn *);
+extern void rebuild_jump_labels_chain (rtx_insn *);
+extern rtx reversed_comparison (const_rtx, machine_mode);
+extern enum rtx_code reversed_comparison_code (const_rtx, const rtx_insn *);
+extern enum rtx_code reversed_comparison_code_parts (enum rtx_code, const_rtx,
+ const_rtx, const rtx_insn *);
+extern void delete_for_peephole (rtx_insn *, rtx_insn *);
+extern int condjump_in_parallel_p (const rtx_insn *);
+
+/* In emit-rtl.cc. */
+extern int max_reg_num (void);
+extern int max_label_num (void);
+extern int get_first_label_num (void);
+extern void maybe_set_first_label_num (rtx_code_label *);
+extern void delete_insns_since (rtx_insn *);
+extern void mark_reg_pointer (rtx, int);
+extern void mark_user_reg (rtx);
+extern void reset_used_flags (rtx);
+extern void set_used_flags (rtx);
+extern void reorder_insns (rtx_insn *, rtx_insn *, rtx_insn *);
+extern void reorder_insns_nobb (rtx_insn *, rtx_insn *, rtx_insn *);
+extern int get_max_insn_count (void);
+extern int in_sequence_p (void);
+extern void init_emit (void);
+extern void init_emit_regs (void);
+extern void init_derived_machine_modes (void);
+extern void init_emit_once (void);
+extern void push_topmost_sequence (void);
+extern void pop_topmost_sequence (void);
+extern void set_new_first_and_last_insn (rtx_insn *, rtx_insn *);
+extern unsigned int unshare_all_rtl (void);
+extern void unshare_all_rtl_again (rtx_insn *);
+extern void unshare_all_rtl_in_chain (rtx_insn *);
+extern void verify_rtl_sharing (void);
+extern void add_insn (rtx_insn *);
+extern void add_insn_before (rtx_insn *, rtx_insn *, basic_block);
+extern void add_insn_after (rtx_insn *, rtx_insn *, basic_block);
+extern void remove_insn (rtx_insn *);
+extern rtx_insn *emit (rtx, bool = true);
+extern void emit_insn_at_entry (rtx);
+extern rtx gen_lowpart_SUBREG (machine_mode, rtx);
+extern rtx gen_const_mem (machine_mode, rtx);
+extern rtx gen_frame_mem (machine_mode, rtx);
+extern rtx gen_tmp_stack_mem (machine_mode, rtx);
+extern bool validate_subreg (machine_mode, machine_mode,
+ const_rtx, poly_uint64);
+
+/* In combine.cc */
+extern unsigned int extended_count (const_rtx, machine_mode, int);
+extern rtx remove_death (unsigned int, rtx_insn *);
+extern void dump_combine_stats (FILE *);
+extern void dump_combine_total_stats (FILE *);
+extern rtx make_compound_operation (rtx, enum rtx_code);
+
+/* In sched-rgn.cc. */
+extern void schedule_insns (void);
+
+/* In sched-ebb.cc. */
+extern void schedule_ebbs (void);
+
+/* In sel-sched-dump.cc. */
+extern void sel_sched_fix_param (const char *param, const char *val);
+
+/* In print-rtl.cc */
+extern const char *print_rtx_head;
+extern void debug (const rtx_def &ref);
+extern void debug (const rtx_def *ptr);
+extern void debug_rtx (const_rtx);
+extern void debug_rtx_list (const rtx_insn *, int);
+extern void debug_rtx_range (const rtx_insn *, const rtx_insn *);
+extern const rtx_insn *debug_rtx_find (const rtx_insn *, int);
+extern void print_mem_expr (FILE *, const_tree);
+extern void print_rtl (FILE *, const_rtx);
+extern void print_simple_rtl (FILE *, const_rtx);
+extern int print_rtl_single (FILE *, const_rtx);
+extern int print_rtl_single_with_indent (FILE *, const_rtx, int);
+extern void print_inline_rtx (FILE *, const_rtx, int);
+
+/* In stmt.cc */
+extern void expand_null_return (void);
+extern void expand_naked_return (void);
+extern void emit_jump (rtx);
+
+/* Memory operation built-ins differ by return value. Mapping
+ of the enum values is following:
+ - RETURN_BEGIN - return destination, e.g. memcpy
+ - RETURN_END - return destination + n, e.g. mempcpy
+ - RETURN_END_MINUS_ONE - return a pointer to the terminating
+ null byte of the string, e.g. strcpy
+*/
+
+enum memop_ret
+{
+ RETURN_BEGIN,
+ RETURN_END,
+ RETURN_END_MINUS_ONE
+};
+
+/* In expr.cc */
+extern rtx move_by_pieces (rtx, rtx, unsigned HOST_WIDE_INT,
+ unsigned int, memop_ret);
+extern poly_int64 find_args_size_adjust (rtx_insn *);
+extern poly_int64 fixup_args_size_notes (rtx_insn *, rtx_insn *, poly_int64);
+
+/* In expmed.cc */
+extern void init_expmed (void);
+extern void expand_inc (rtx, rtx);
+extern void expand_dec (rtx, rtx);
+
+/* In lower-subreg.cc */
+extern void init_lower_subreg (void);
+
+/* In gcse.cc */
+extern bool can_copy_p (machine_mode);
+extern bool can_assign_to_reg_without_clobbers_p (rtx, machine_mode);
+extern rtx_insn *prepare_copy_insn (rtx, rtx);
+
+/* In cprop.cc */
+extern rtx fis_get_condition (rtx_insn *);
+
+/* In ira.cc */
+extern HARD_REG_SET eliminable_regset;
+extern void mark_elimination (int, int);
+
+/* In reginfo.cc */
+extern int reg_classes_intersect_p (reg_class_t, reg_class_t);
+extern int reg_class_subset_p (reg_class_t, reg_class_t);
+extern void globalize_reg (tree, int);
+extern void init_reg_modes_target (void);
+extern void init_regs (void);
+extern void reinit_regs (void);
+extern void init_fake_stack_mems (void);
+extern void save_register_info (void);
+extern void init_reg_sets (void);
+extern void regclass (rtx, int);
+extern void reg_scan (rtx_insn *, unsigned int);
+extern void fix_register (const char *, int, int);
+extern const HARD_REG_SET *valid_mode_changes_for_regno (unsigned int);
+
+/* In reload1.cc */
+extern int function_invariant_p (const_rtx);
+
+/* In calls.cc */
+enum libcall_type
+{
+ LCT_NORMAL = 0,
+ LCT_CONST = 1,
+ LCT_PURE = 2,
+ LCT_NORETURN = 3,
+ LCT_THROW = 4,
+ LCT_RETURNS_TWICE = 5
+};
+
+extern rtx emit_library_call_value_1 (int, rtx, rtx, enum libcall_type,
+ machine_mode, int, rtx_mode_t *);
+
+/* Output a library call and discard the returned value. FUN is the
+ address of the function, as a SYMBOL_REF rtx, and OUTMODE is the mode
+ of the (discarded) return value. FN_TYPE is LCT_NORMAL for `normal'
+ calls, LCT_CONST for `const' calls, LCT_PURE for `pure' calls, or
+ another LCT_ value for other types of library calls.
+
+ There are different overloads of this function for different numbers
+ of arguments. In each case the argument value is followed by its mode. */
+
+inline void
+emit_library_call (rtx fun, libcall_type fn_type, machine_mode outmode)
+{
+ emit_library_call_value_1 (0, fun, NULL_RTX, fn_type, outmode, 0, NULL);
+}
+
+inline void
+emit_library_call (rtx fun, libcall_type fn_type, machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode)
+{
+ rtx_mode_t args[] = { rtx_mode_t (arg1, arg1_mode) };
+ emit_library_call_value_1 (0, fun, NULL_RTX, fn_type, outmode, 1, args);
+}
+
+inline void
+emit_library_call (rtx fun, libcall_type fn_type, machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode,
+ rtx arg2, machine_mode arg2_mode)
+{
+ rtx_mode_t args[] = {
+ rtx_mode_t (arg1, arg1_mode),
+ rtx_mode_t (arg2, arg2_mode)
+ };
+ emit_library_call_value_1 (0, fun, NULL_RTX, fn_type, outmode, 2, args);
+}
+
+inline void
+emit_library_call (rtx fun, libcall_type fn_type, machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode,
+ rtx arg2, machine_mode arg2_mode,
+ rtx arg3, machine_mode arg3_mode)
+{
+ rtx_mode_t args[] = {
+ rtx_mode_t (arg1, arg1_mode),
+ rtx_mode_t (arg2, arg2_mode),
+ rtx_mode_t (arg3, arg3_mode)
+ };
+ emit_library_call_value_1 (0, fun, NULL_RTX, fn_type, outmode, 3, args);
+}
+
+inline void
+emit_library_call (rtx fun, libcall_type fn_type, machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode,
+ rtx arg2, machine_mode arg2_mode,
+ rtx arg3, machine_mode arg3_mode,
+ rtx arg4, machine_mode arg4_mode)
+{
+ rtx_mode_t args[] = {
+ rtx_mode_t (arg1, arg1_mode),
+ rtx_mode_t (arg2, arg2_mode),
+ rtx_mode_t (arg3, arg3_mode),
+ rtx_mode_t (arg4, arg4_mode)
+ };
+ emit_library_call_value_1 (0, fun, NULL_RTX, fn_type, outmode, 4, args);
+}
+
+/* Like emit_library_call, but return the value produced by the call.
+ Use VALUE to store the result if it is nonnull, otherwise pick a
+ convenient location. */
+
+inline rtx
+emit_library_call_value (rtx fun, rtx value, libcall_type fn_type,
+ machine_mode outmode)
+{
+ return emit_library_call_value_1 (1, fun, value, fn_type, outmode, 0, NULL);
+}
+
+inline rtx
+emit_library_call_value (rtx fun, rtx value, libcall_type fn_type,
+ machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode)
+{
+ rtx_mode_t args[] = { rtx_mode_t (arg1, arg1_mode) };
+ return emit_library_call_value_1 (1, fun, value, fn_type, outmode, 1, args);
+}
+
+inline rtx
+emit_library_call_value (rtx fun, rtx value, libcall_type fn_type,
+ machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode,
+ rtx arg2, machine_mode arg2_mode)
+{
+ rtx_mode_t args[] = {
+ rtx_mode_t (arg1, arg1_mode),
+ rtx_mode_t (arg2, arg2_mode)
+ };
+ return emit_library_call_value_1 (1, fun, value, fn_type, outmode, 2, args);
+}
+
+inline rtx
+emit_library_call_value (rtx fun, rtx value, libcall_type fn_type,
+ machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode,
+ rtx arg2, machine_mode arg2_mode,
+ rtx arg3, machine_mode arg3_mode)
+{
+ rtx_mode_t args[] = {
+ rtx_mode_t (arg1, arg1_mode),
+ rtx_mode_t (arg2, arg2_mode),
+ rtx_mode_t (arg3, arg3_mode)
+ };
+ return emit_library_call_value_1 (1, fun, value, fn_type, outmode, 3, args);
+}
+
+inline rtx
+emit_library_call_value (rtx fun, rtx value, libcall_type fn_type,
+ machine_mode outmode,
+ rtx arg1, machine_mode arg1_mode,
+ rtx arg2, machine_mode arg2_mode,
+ rtx arg3, machine_mode arg3_mode,
+ rtx arg4, machine_mode arg4_mode)
+{
+ rtx_mode_t args[] = {
+ rtx_mode_t (arg1, arg1_mode),
+ rtx_mode_t (arg2, arg2_mode),
+ rtx_mode_t (arg3, arg3_mode),
+ rtx_mode_t (arg4, arg4_mode)
+ };
+ return emit_library_call_value_1 (1, fun, value, fn_type, outmode, 4, args);
+}
+
+/* In varasm.cc */
+extern void init_varasm_once (void);
+
+extern rtx make_debug_expr_from_rtl (const_rtx);
+
+/* In read-rtl.cc */
+#ifdef GENERATOR_FILE
+extern bool read_rtx (const char *, vec<rtx> *);
+#endif
+
+/* In alias.cc */
+extern rtx canon_rtx (rtx);
+extern int true_dependence (const_rtx, machine_mode, const_rtx);
+extern rtx get_addr (rtx);
+extern int canon_true_dependence (const_rtx, machine_mode, rtx,
+ const_rtx, rtx);
+extern int read_dependence (const_rtx, const_rtx);
+extern int anti_dependence (const_rtx, const_rtx);
+extern int canon_anti_dependence (const_rtx, bool,
+ const_rtx, machine_mode, rtx);
+extern int output_dependence (const_rtx, const_rtx);
+extern int canon_output_dependence (const_rtx, bool,
+ const_rtx, machine_mode, rtx);
+extern int may_alias_p (const_rtx, const_rtx);
+extern void init_alias_target (void);
+extern void init_alias_analysis (void);
+extern void end_alias_analysis (void);
+extern void vt_equate_reg_base_value (const_rtx, const_rtx);
+extern bool memory_modified_in_insn_p (const_rtx, const_rtx);
+extern bool may_be_sp_based_p (rtx);
+extern rtx gen_hard_reg_clobber (machine_mode, unsigned int);
+extern rtx get_reg_known_value (unsigned int);
+extern bool get_reg_known_equiv_p (unsigned int);
+extern rtx get_reg_base_value (unsigned int);
+extern rtx extract_mem_from_operand (rtx);
+
+#ifdef STACK_REGS
+extern int stack_regs_mentioned (const_rtx insn);
+#endif
+
+/* In toplev.cc */
+extern GTY(()) rtx stack_limit_rtx;
+
+/* In var-tracking.cc */
+extern unsigned int variable_tracking_main (void);
+extern void delete_vta_debug_insns (bool);
+
+/* In stor-layout.cc. */
+extern void get_mode_bounds (scalar_int_mode, int,
+ scalar_int_mode, rtx *, rtx *);
+
+/* In loop-iv.cc */
+extern rtx canon_condition (rtx);
+extern void simplify_using_condition (rtx, rtx *, bitmap);
+
+/* In final.cc */
+extern unsigned int compute_alignments (void);
+extern void update_alignments (vec<rtx> &);
+extern int asm_str_count (const char *templ);
+
+struct rtl_hooks
+{
+ rtx (*gen_lowpart) (machine_mode, rtx);
+ rtx (*gen_lowpart_no_emit) (machine_mode, rtx);
+ rtx (*reg_nonzero_bits) (const_rtx, scalar_int_mode, scalar_int_mode,
+ unsigned HOST_WIDE_INT *);
+ rtx (*reg_num_sign_bit_copies) (const_rtx, scalar_int_mode, scalar_int_mode,
+ unsigned int *);
+ bool (*reg_truncated_to_mode) (machine_mode, const_rtx);
+
+ /* Whenever you add entries here, make sure you adjust rtlhooks-def.h. */
+};
+
+/* Each pass can provide its own. */
+extern struct rtl_hooks rtl_hooks;
+
+/* ... but then it has to restore these. */
+extern const struct rtl_hooks general_rtl_hooks;
+
+/* Keep this for the nonce. */
+#define gen_lowpart rtl_hooks.gen_lowpart
+
+extern void insn_locations_init (void);
+extern void insn_locations_finalize (void);
+extern void set_curr_insn_location (location_t);
+extern location_t curr_insn_location (void);
+extern void set_insn_locations (rtx_insn *, location_t);
+
+/* rtl-error.cc */
+extern void _fatal_insn_not_found (const_rtx, const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void _fatal_insn (const char *, const_rtx, const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+
+#define fatal_insn(msgid, insn) \
+ _fatal_insn (msgid, insn, __FILE__, __LINE__, __FUNCTION__)
+#define fatal_insn_not_found(insn) \
+ _fatal_insn_not_found (insn, __FILE__, __LINE__, __FUNCTION__)
+
+/* reginfo.cc */
+extern tree GTY(()) global_regs_decl[FIRST_PSEUDO_REGISTER];
+
+/* Information about the function that is propagated by the RTL backend.
+ Available only for functions that has been already assembled. */
+
+struct GTY(()) cgraph_rtl_info {
+ unsigned int preferred_incoming_stack_boundary;
+
+ /* Which registers the function clobbers, either directly or by
+ calling another function. */
+ HARD_REG_SET function_used_regs;
+};
+
+/* If loads from memories of mode MODE always sign or zero extend,
+ return SIGN_EXTEND or ZERO_EXTEND as appropriate. Return UNKNOWN
+ otherwise. */
+
+inline rtx_code
+load_extend_op (machine_mode mode)
+{
+ scalar_int_mode int_mode;
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && GET_MODE_PRECISION (int_mode) < BITS_PER_WORD)
+ return LOAD_EXTEND_OP (int_mode);
+ return UNKNOWN;
+}
+
+/* If X is a PLUS of a base and a constant offset, add the constant to *OFFSET
+ and return the base. Return X otherwise. */
+
+inline rtx
+strip_offset_and_add (rtx x, poly_int64_pod *offset)
+{
+ if (GET_CODE (x) == PLUS)
+ {
+ poly_int64 suboffset;
+ x = strip_offset (x, &suboffset);
+ *offset = poly_uint64 (*offset) + suboffset;
+ }
+ return x;
+}
+
+/* Return true if X is an operation that always operates on the full
+ registers for WORD_REGISTER_OPERATIONS architectures. */
+
+inline bool
+word_register_operation_p (const_rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ case ROTATE:
+ case ROTATERT:
+ case SIGN_EXTRACT:
+ case ZERO_EXTRACT:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+/* Holds an rtx comparison to simplify passing many parameters pertaining to a
+ single comparison. */
+
+struct rtx_comparison {
+ rtx_code code;
+ rtx op0, op1;
+ machine_mode mode;
+};
+
+/* gtype-desc.cc. */
+extern void gt_ggc_mx (rtx &);
+extern void gt_pch_nx (rtx &);
+extern void gt_pch_nx (rtx &, gt_pointer_operator, void *);
+
+#endif /* ! GCC_RTL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlanal.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlanal.h
new file mode 100644
index 0000000..8762276
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlanal.h
@@ -0,0 +1,343 @@
+/* Analyze RTL for GNU compiler.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Note that for historical reasons, many rtlanal.cc functions are
+ declared in rtl.h rather than here. */
+
+#ifndef GCC_RTLANAL_H
+#define GCC_RTLANAL_H
+
+/* A dummy register value that represents the whole of variable memory.
+ Using ~0U means that arrays that track both registers and memory can
+ be indexed by regno + 1. */
+const unsigned int MEM_REGNO = ~0U;
+
+/* Bitmasks of flags describing an rtx_obj_reference. See the accessors
+ in the class for details. */
+namespace rtx_obj_flags
+{
+ const uint16_t IS_READ = 1U << 0;
+ const uint16_t IS_WRITE = 1U << 1;
+ const uint16_t IS_CLOBBER = 1U << 2;
+ const uint16_t IS_PRE_POST_MODIFY = 1U << 3;
+ const uint16_t IS_MULTIREG = 1U << 4;
+ const uint16_t IN_MEM_LOAD = 1U << 5;
+ const uint16_t IN_MEM_STORE = 1U << 6;
+ const uint16_t IN_SUBREG = 1U << 7;
+ const uint16_t IN_NOTE = 1U << 8;
+
+ /* Flags that apply to all subrtxes of the rtx they were originally
+ added for. */
+ static const uint16_t STICKY_FLAGS = IN_NOTE;
+}
+
+/* Contains information about a reference to a register or variable memory. */
+class rtx_obj_reference
+{
+public:
+ rtx_obj_reference () = default;
+ rtx_obj_reference (unsigned int regno, uint16_t flags,
+ machine_mode mode, unsigned int multireg_offset = 0);
+
+ bool is_reg () const { return regno != MEM_REGNO; }
+ bool is_mem () const { return regno == MEM_REGNO; }
+
+ /* True if the reference is a read or a write respectively.
+ Both flags are set in a read-modify-write context, such as
+ for read_modify_subreg_p. */
+ bool is_read () const { return flags & rtx_obj_flags::IS_READ; }
+ bool is_write () const { return flags & rtx_obj_flags::IS_WRITE; }
+
+ /* True if IS_WRITE and if the write is a clobber rather than a set. */
+ bool is_clobber () const { return flags & rtx_obj_flags::IS_CLOBBER; }
+
+ /* True if the reference is updated by an RTX_AUTOINC. Both IS_READ
+ and IS_WRITE are also true if so. */
+ bool is_pre_post_modify () const
+ {
+ return flags & rtx_obj_flags::IS_PRE_POST_MODIFY;
+ }
+
+ /* True if the register is part of a multi-register hard REG. */
+ bool is_multireg () const { return flags & rtx_obj_flags::IS_MULTIREG; }
+
+ /* True if the reference occurs in the address of a load MEM. */
+ bool in_mem_load () const { return flags & rtx_obj_flags::IN_MEM_LOAD; }
+
+ /* True if the reference occurs in the address of a store MEM. */
+ bool in_mem_store () const { return flags & rtx_obj_flags::IN_MEM_STORE; }
+
+ /* True if the reference occurs in any kind of MEM address. */
+ bool in_address () const { return in_mem_load () || in_mem_store (); }
+
+ /* True if the reference occurs in a SUBREG. */
+ bool in_subreg () const { return flags & rtx_obj_flags::IN_SUBREG; }
+
+ /* True if the reference occurs in a REG_EQUAL or REG_EQUIV note. */
+ bool in_note () const { return flags & rtx_obj_flags::IN_NOTE; }
+
+ /* The referenced register, or MEM_REGNO for variable memory. */
+ unsigned int regno;
+
+ /* A bitmask of rtx_obj_flags. */
+ unsigned int flags : 16;
+
+ /* The mode of the reference. If IS_MULTIREG, this is the mode of
+ REGNO - MULTIREG_OFFSET. */
+ machine_mode mode : 8;
+
+ /* If IS_MULTIREG, the offset of REGNO from the start of the register. */
+ unsigned int multireg_offset : 8;
+};
+
+/* Construct a reference with the given fields. */
+
+inline rtx_obj_reference::rtx_obj_reference (unsigned int regno, uint16_t flags,
+ machine_mode mode,
+ unsigned int multireg_offset)
+ : regno (regno),
+ flags (flags),
+ mode (mode),
+ multireg_offset (multireg_offset)
+{
+}
+
+/* Contains information about an rtx or an instruction, including a
+ list of rtx_obj_references. The storage backing the list needs
+ to be filled in by assigning to REF_BEGIN and REF_END. */
+
+class rtx_properties
+{
+public:
+ rtx_properties ();
+
+ void try_to_add_reg (const_rtx x, unsigned int flags = 0);
+ void try_to_add_dest (const_rtx x, unsigned int flags = 0);
+ void try_to_add_src (const_rtx x, unsigned int flags = 0);
+ void try_to_add_pattern (const_rtx pat);
+ void try_to_add_note (const_rtx x);
+ void try_to_add_insn (const rtx_insn *insn, bool include_notes);
+
+ iterator_range<rtx_obj_reference *> refs () const;
+
+ /* Return the number of rtx_obj_references that have been recorded. */
+ size_t num_refs () const { return ref_iter - ref_begin; }
+
+ bool has_side_effects () const;
+
+ /* [REF_BEGIN, REF_END) is the maximum extent of the memory available
+ for recording references. REG_ITER is the first unused entry. */
+ rtx_obj_reference *ref_begin;
+ rtx_obj_reference *ref_iter;
+ rtx_obj_reference *ref_end;
+
+ /* True if the rtx includes an asm. */
+ unsigned int has_asm : 1;
+
+ /* True if the rtx includes a call. */
+ unsigned int has_call : 1;
+
+ /* True if the rtx includes an RTX_AUTOINC expression. */
+ unsigned int has_pre_post_modify : 1;
+
+ /* True if the rtx contains volatile references, in the sense of
+ volatile_refs_p. */
+ unsigned int has_volatile_refs : 1;
+
+ /* For future expansion. */
+ unsigned int spare : 28;
+};
+
+inline rtx_properties::rtx_properties ()
+ : ref_begin (nullptr),
+ ref_iter (nullptr),
+ ref_end (nullptr),
+ has_asm (false),
+ has_call (false),
+ has_pre_post_modify (false),
+ has_volatile_refs (false),
+ spare (0)
+{
+}
+
+/* Like add_src, but treat X has being part of a REG_EQUAL or
+ REG_EQUIV note. */
+
+inline void
+rtx_properties::try_to_add_note (const_rtx x)
+{
+ try_to_add_src (x, rtx_obj_flags::IN_NOTE);
+}
+
+/* Return true if the rtx has side effects, in the sense of
+ side_effects_p (except for side_effects_p's special handling
+ of combine.cc clobbers). */
+
+inline bool
+rtx_properties::has_side_effects () const
+{
+ return has_volatile_refs || has_pre_post_modify || has_call;
+}
+
+/* Return an iterator range for all the references, suitable for
+ range-based for loops. */
+
+inline iterator_range<rtx_obj_reference *>
+rtx_properties::refs () const
+{
+ return { ref_begin, ref_iter };
+}
+
+/* BASE is derived from rtx_properties and provides backing storage
+ for REF_BEGIN. It has a grow () method that increases the amount
+ of memory available if the initial allocation was too small. */
+
+template<typename Base>
+class growing_rtx_properties : public Base
+{
+public:
+ template<typename... Args>
+ growing_rtx_properties (Args...);
+
+ template<typename AddFn>
+ void repeat (AddFn add);
+
+ /* Wrappers around the try_to_* functions that always succeed. */
+ void add_dest (const_rtx x, unsigned int flags = 0);
+ void add_src (const_rtx x, unsigned int flags = 0);
+ void add_pattern (const_rtx pat);
+ void add_note (const_rtx x);
+ void add_insn (const rtx_insn *insn, bool include_notes);
+};
+
+template<typename Base>
+template<typename... Args>
+growing_rtx_properties<Base>::growing_rtx_properties (Args... args)
+ : Base (std::forward<Args> (args)...)
+{
+}
+
+/* Perform ADD until there is enough room to hold the result. */
+
+template<typename Base>
+template<typename AddFn>
+inline void
+growing_rtx_properties<Base>::repeat (AddFn add)
+{
+ ptrdiff_t count = this->num_refs ();
+ for (;;)
+ {
+ add ();
+ /* This retries if the storage happened to be exactly the right size,
+ but that's expected to be a rare case and so isn't worth
+ optimizing for. */
+ if (LIKELY (this->ref_iter != this->ref_end))
+ break;
+ this->grow (count);
+ }
+}
+
+template<typename Base>
+inline void
+growing_rtx_properties<Base>::add_dest (const_rtx x, unsigned int flags)
+{
+ repeat ([&]() { this->try_to_add_dest (x, flags); });
+}
+
+template<typename Base>
+inline void
+growing_rtx_properties<Base>::add_src (const_rtx x, unsigned int flags)
+{
+ repeat ([&]() { this->try_to_add_src (x, flags); });
+}
+
+template<typename Base>
+inline void
+growing_rtx_properties<Base>::add_pattern (const_rtx pat)
+{
+ repeat ([&]() { this->try_to_add_pattern (pat); });
+}
+
+template<typename Base>
+inline void
+growing_rtx_properties<Base>::add_note (const_rtx x)
+{
+ repeat ([&]() { this->try_to_add_note (x); });
+}
+
+template<typename Base>
+inline void
+growing_rtx_properties<Base>::add_insn (const rtx_insn *insn, bool include_notes)
+{
+ repeat ([&]() { this->try_to_add_insn (insn, include_notes); });
+}
+
+/* A base class for vec_rtx_properties; see there for details. */
+
+class vec_rtx_properties_base : public rtx_properties
+{
+ static const size_t SIZE = 32;
+
+public:
+ vec_rtx_properties_base ();
+ ~vec_rtx_properties_base ();
+
+protected:
+ void grow (ptrdiff_t);
+
+private:
+ rtx_obj_reference m_storage[SIZE];
+};
+
+inline vec_rtx_properties_base::vec_rtx_properties_base ()
+{
+ ref_begin = ref_iter = m_storage;
+ ref_end = m_storage + SIZE;
+}
+
+inline vec_rtx_properties_base::~vec_rtx_properties_base ()
+{
+ if (UNLIKELY (ref_begin != m_storage))
+ free (ref_begin);
+}
+
+/* A rtx_properties that stores its references in a temporary array.
+ Like auto_vec, the array is initially on the stack, but can switch
+ to the heap if necessary.
+
+ The reason for implementing this as a derived class is that the
+ default on-stack size should be enough for the vast majority of
+ expressions and instructions. It's therefore not worth paying
+ the cost of conditionally calling grow code at every site that
+ records a new reference. Instead, the rtx_properties code can use
+ trivial iterator updates for the common case, and in the rare case
+ that the vector needs to be resized, we can pay the cost of
+ collecting the references a second time. */
+using vec_rtx_properties = growing_rtx_properties<vec_rtx_properties_base>;
+
+bool
+vec_series_highpart_p (machine_mode result_mode, machine_mode op_mode,
+ rtx sel);
+
+bool
+vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel);
+
+bool
+contains_paradoxical_subreg_p (rtx x);
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhash.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhash.h
new file mode 100644
index 0000000..52756a9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhash.h
@@ -0,0 +1,31 @@
+/* Register Transfer Language (RTL) hash functions.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef RTL_HASH_H
+#define RTL_HASH_H 1
+
+
+namespace inchash
+{
+
+extern void add_rtx (const_rtx, hash &);
+
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhooks-def.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhooks-def.h
new file mode 100644
index 0000000..208dace
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtlhooks-def.h
@@ -0,0 +1,48 @@
+/* Default macros to initialize an rtl_hooks data structure.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RTL_HOOKS_DEF_H
+#define GCC_RTL_HOOKS_DEF_H
+
+#include "rtl.h"
+
+#define RTL_HOOKS_GEN_LOWPART gen_lowpart_general
+#define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_if_possible
+#define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_general
+#define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_general
+#define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode_general
+
+/* The structure is defined in rtl.h. */
+#define RTL_HOOKS_INITIALIZER { \
+ RTL_HOOKS_GEN_LOWPART, \
+ RTL_HOOKS_GEN_LOWPART_NO_EMIT, \
+ RTL_HOOKS_REG_NONZERO_REG_BITS, \
+ RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES, \
+ RTL_HOOKS_REG_TRUNCATED_TO_MODE \
+}
+
+extern rtx gen_lowpart_general (machine_mode, rtx);
+extern rtx reg_nonzero_bits_general (const_rtx, scalar_int_mode,
+ scalar_int_mode,
+ unsigned HOST_WIDE_INT *);
+extern rtx reg_num_sign_bit_copies_general (const_rtx, scalar_int_mode,
+ scalar_int_mode, unsigned int *);
+extern bool reg_truncated_to_mode_general (machine_mode, const_rtx);
+
+#endif /* GCC_RTL_HOOKS_DEF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtx-vector-builder.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtx-vector-builder.h
new file mode 100644
index 0000000..bc6bb86
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/rtx-vector-builder.h
@@ -0,0 +1,125 @@
+/* A class for building vector rtx constants.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RTX_VECTOR_BUILDER_H
+#define GCC_RTX_VECTOR_BUILDER_H
+
+#include "vector-builder.h"
+
+/* This class is used to build VECTOR_CSTs from a sequence of elements.
+ See vector_builder for more details. */
+class rtx_vector_builder : public vector_builder<rtx, machine_mode,
+ rtx_vector_builder>
+{
+ typedef vector_builder<rtx, machine_mode, rtx_vector_builder> parent;
+ friend class vector_builder<rtx, machine_mode, rtx_vector_builder>;
+
+public:
+ rtx_vector_builder () : m_mode (VOIDmode) {}
+ rtx_vector_builder (machine_mode, unsigned int, unsigned int);
+ rtx build (rtvec);
+ rtx build ();
+
+ machine_mode mode () const { return m_mode; }
+
+ void new_vector (machine_mode, unsigned int, unsigned int);
+
+private:
+ bool equal_p (rtx, rtx) const;
+ bool allow_steps_p () const;
+ bool integral_p (rtx) const;
+ poly_wide_int step (rtx, rtx) const;
+ rtx apply_step (rtx, unsigned int, const poly_wide_int &) const;
+ bool can_elide_p (rtx) const { return true; }
+ void note_representative (rtx *, rtx) {}
+
+ static poly_uint64 shape_nelts (machine_mode mode)
+ { return GET_MODE_NUNITS (mode); }
+ static poly_uint64 nelts_of (const_rtx x)
+ { return CONST_VECTOR_NUNITS (x); }
+ static unsigned int npatterns_of (const_rtx x)
+ { return CONST_VECTOR_NPATTERNS (x); }
+ static unsigned int nelts_per_pattern_of (const_rtx x)
+ { return CONST_VECTOR_NELTS_PER_PATTERN (x); }
+
+ rtx find_cached_value ();
+
+ machine_mode m_mode;
+};
+
+/* Create a new builder for a vector of mode MODE. Initially encode the
+ value as NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements
+ each. */
+
+inline
+rtx_vector_builder::rtx_vector_builder (machine_mode mode,
+ unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ new_vector (mode, npatterns, nelts_per_pattern);
+}
+
+/* Start building a new vector of mode MODE. Initially encode the value
+ as NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each. */
+
+inline void
+rtx_vector_builder::new_vector (machine_mode mode, unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ m_mode = mode;
+ parent::new_vector (GET_MODE_NUNITS (mode), npatterns, nelts_per_pattern);
+}
+
+/* Return true if elements ELT1 and ELT2 are equal. */
+
+inline bool
+rtx_vector_builder::equal_p (rtx elt1, rtx elt2) const
+{
+ return rtx_equal_p (elt1, elt2);
+}
+
+/* Return true if a stepped representation is OK. We don't allow
+ linear series for anything other than integers, to avoid problems
+ with rounding. */
+
+inline bool
+rtx_vector_builder::allow_steps_p () const
+{
+ return is_a <scalar_int_mode> (GET_MODE_INNER (m_mode));
+}
+
+/* Return true if element ELT can be interpreted as an integer. */
+
+inline bool
+rtx_vector_builder::integral_p (rtx elt) const
+{
+ return CONST_SCALAR_INT_P (elt);
+}
+
+/* Return the value of element ELT2 minus the value of element ELT1.
+ Both elements are known to be CONST_SCALAR_INT_Ps. */
+
+inline poly_wide_int
+rtx_vector_builder::step (rtx elt1, rtx elt2) const
+{
+ return (wi::to_poly_wide (elt2, GET_MODE_INNER (m_mode))
+ - wi::to_poly_wide (elt1, GET_MODE_INNER (m_mode)));
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/run-rtl-passes.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/run-rtl-passes.h
new file mode 100644
index 0000000..4474bcd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/run-rtl-passes.h
@@ -0,0 +1,25 @@
+/* run-rtl-passes.h - Run a subset of the RTL passes
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RUN_RTL_PASSES_H
+#define GCC_RUN_RTL_PASSES_H
+
+extern void run_rtl_passes (char *initial_pass_name);
+
+#endif /* GCC_RUN_RTL_PASSES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/safe-ctype.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/safe-ctype.h
new file mode 100644
index 0000000..a3cb185
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/safe-ctype.h
@@ -0,0 +1,150 @@
+/* <ctype.h> replacement macros.
+
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+ Contributed by Zack Weinberg <zackw@stanford.edu>.
+
+This file is part of the libiberty library.
+Libiberty is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+Libiberty is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with libiberty; see the file COPYING.LIB. If
+not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+/* This is a compatible replacement of the standard C library's <ctype.h>
+ with the following properties:
+
+ - Implements all isxxx() macros required by C99.
+ - Also implements some character classes useful when
+ parsing C-like languages.
+ - Does not change behavior depending on the current locale.
+ - Behaves properly for all values in the range of a signed or
+ unsigned char.
+
+ To avoid conflicts, this header defines the isxxx functions in upper
+ case, e.g. ISALPHA not isalpha. */
+
+#ifndef SAFE_CTYPE_H
+#define SAFE_CTYPE_H
+
+/* Determine host character set. */
+#define HOST_CHARSET_UNKNOWN 0
+#define HOST_CHARSET_ASCII 1
+#define HOST_CHARSET_EBCDIC 2
+
+#if '\n' == 0x0A && ' ' == 0x20 && '0' == 0x30 \
+ && 'A' == 0x41 && 'a' == 0x61 && '!' == 0x21
+# define HOST_CHARSET HOST_CHARSET_ASCII
+#else
+# if '\n' == 0x15 && ' ' == 0x40 && '0' == 0xF0 \
+ && 'A' == 0xC1 && 'a' == 0x81 && '!' == 0x5A
+# define HOST_CHARSET HOST_CHARSET_EBCDIC
+# else
+# define HOST_CHARSET HOST_CHARSET_UNKNOWN
+# endif
+#endif
+
+/* Categories. */
+
+enum {
+ /* In C99 */
+ _sch_isblank = 0x0001, /* space \t */
+ _sch_iscntrl = 0x0002, /* nonprinting characters */
+ _sch_isdigit = 0x0004, /* 0-9 */
+ _sch_islower = 0x0008, /* a-z */
+ _sch_isprint = 0x0010, /* any printing character including ' ' */
+ _sch_ispunct = 0x0020, /* all punctuation */
+ _sch_isspace = 0x0040, /* space \t \n \r \f \v */
+ _sch_isupper = 0x0080, /* A-Z */
+ _sch_isxdigit = 0x0100, /* 0-9A-Fa-f */
+
+ /* Extra categories useful to cpplib. */
+ _sch_isidst = 0x0200, /* A-Za-z_ */
+ _sch_isvsp = 0x0400, /* \n \r */
+ _sch_isnvsp = 0x0800, /* space \t \f \v \0 */
+
+ /* Combinations of the above. */
+ _sch_isalpha = _sch_isupper|_sch_islower, /* A-Za-z */
+ _sch_isalnum = _sch_isalpha|_sch_isdigit, /* A-Za-z0-9 */
+ _sch_isidnum = _sch_isidst|_sch_isdigit, /* A-Za-z0-9_ */
+ _sch_isgraph = _sch_isalnum|_sch_ispunct, /* isprint and not space */
+ _sch_iscppsp = _sch_isvsp|_sch_isnvsp, /* isspace + \0 */
+ _sch_isbasic = _sch_isprint|_sch_iscppsp /* basic charset of ISO C
+ (plus ` and @) */
+};
+
+/* Character classification. */
+extern const unsigned short _sch_istable[256];
+
+#define _sch_test(c, bit) (_sch_istable[(c) & 0xff] & (unsigned short)(bit))
+
+#define ISALPHA(c) _sch_test(c, _sch_isalpha)
+#define ISALNUM(c) _sch_test(c, _sch_isalnum)
+#define ISBLANK(c) _sch_test(c, _sch_isblank)
+#define ISCNTRL(c) _sch_test(c, _sch_iscntrl)
+#define ISDIGIT(c) _sch_test(c, _sch_isdigit)
+#define ISGRAPH(c) _sch_test(c, _sch_isgraph)
+#define ISLOWER(c) _sch_test(c, _sch_islower)
+#define ISPRINT(c) _sch_test(c, _sch_isprint)
+#define ISPUNCT(c) _sch_test(c, _sch_ispunct)
+#define ISSPACE(c) _sch_test(c, _sch_isspace)
+#define ISUPPER(c) _sch_test(c, _sch_isupper)
+#define ISXDIGIT(c) _sch_test(c, _sch_isxdigit)
+
+#define ISIDNUM(c) _sch_test(c, _sch_isidnum)
+#define ISIDST(c) _sch_test(c, _sch_isidst)
+#define IS_ISOBASIC(c) _sch_test(c, _sch_isbasic)
+#define IS_VSPACE(c) _sch_test(c, _sch_isvsp)
+#define IS_NVSPACE(c) _sch_test(c, _sch_isnvsp)
+#define IS_SPACE_OR_NUL(c) _sch_test(c, _sch_iscppsp)
+
+/* Character transformation. */
+extern const unsigned char _sch_toupper[256];
+extern const unsigned char _sch_tolower[256];
+#define TOUPPER(c) _sch_toupper[(c) & 0xff]
+#define TOLOWER(c) _sch_tolower[(c) & 0xff]
+
+/* Prevent the users of safe-ctype.h from accidently using the routines
+ from ctype.h. Initially, the approach was to produce an error when
+ detecting that ctype.h has been included. But this was causing
+ trouble as ctype.h might get indirectly included as a result of
+ including another system header (for instance gnulib's stdint.h).
+ So we include ctype.h here and then immediately redefine its macros. */
+
+#include <ctype.h>
+#undef isalpha
+#define isalpha(c) do_not_use_isalpha_with_safe_ctype
+#undef isalnum
+#define isalnum(c) do_not_use_isalnum_with_safe_ctype
+#undef iscntrl
+#define iscntrl(c) do_not_use_iscntrl_with_safe_ctype
+#undef isdigit
+#define isdigit(c) do_not_use_isdigit_with_safe_ctype
+#undef isgraph
+#define isgraph(c) do_not_use_isgraph_with_safe_ctype
+#undef islower
+#define islower(c) do_not_use_islower_with_safe_ctype
+#undef isprint
+#define isprint(c) do_not_use_isprint_with_safe_ctype
+#undef ispunct
+#define ispunct(c) do_not_use_ispunct_with_safe_ctype
+#undef isspace
+#define isspace(c) do_not_use_isspace_with_safe_ctype
+#undef isupper
+#define isupper(c) do_not_use_isupper_with_safe_ctype
+#undef isxdigit
+#define isxdigit(c) do_not_use_isxdigit_with_safe_ctype
+#undef toupper
+#define toupper(c) do_not_use_toupper_with_safe_ctype
+#undef tolower
+#define tolower(c) do_not_use_tolower_with_safe_ctype
+
+#endif /* SAFE_CTYPE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sanitizer.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sanitizer.def
new file mode 100644
index 0000000..d47cc7d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sanitizer.def
@@ -0,0 +1,669 @@
+/* This file contains the definitions and documentation for the
+ Address Sanitizer and Thread Sanitizer builtins used in the GNU compiler.
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, you should define a macro:
+
+ DEF_BUILTIN_STUB(ENUM, NAME)
+ DEF_SANITIZER_BUILTIN (ENUM, NAME, TYPE, ATTRS)
+
+ See builtins.def for details.
+ The builtins are created by the C-family of FEs in c-family/c-common.cc,
+ for other FEs by asan.cc. */
+
+/* This has to come before all the sanitizer builtins. */
+DEF_BUILTIN_STUB(BEGIN_SANITIZER_BUILTINS, (const char *)0)
+
+/* Address Sanitizer */
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_VERSION_MISMATCH_CHECK,
+ "__asan_version_mismatch_check_v8",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+/* Do not reorder the BUILT_IN_ASAN_{REPORT,CHECK}* builtins, e.g. cfgcleanup.cc
+ relies on this order. */
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD1, "__asan_report_load1",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD2, "__asan_report_load2",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD4, "__asan_report_load4",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD8, "__asan_report_load8",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD16, "__asan_report_load16",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD_N, "__asan_report_load_n",
+ BT_FN_VOID_PTR_PTRMODE,
+ ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE1, "__asan_report_store1",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE2, "__asan_report_store2",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE4, "__asan_report_store4",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE8, "__asan_report_store8",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE16, "__asan_report_store16",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE_N, "__asan_report_store_n",
+ BT_FN_VOID_PTR_PTRMODE,
+ ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD1_NOABORT,
+ "__asan_report_load1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD2_NOABORT,
+ "__asan_report_load2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD4_NOABORT,
+ "__asan_report_load4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD8_NOABORT,
+ "__asan_report_load8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD16_NOABORT,
+ "__asan_report_load16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD_N_NOABORT,
+ "__asan_report_load_n_noabort",
+ BT_FN_VOID_PTR_PTRMODE,
+ ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE1_NOABORT,
+ "__asan_report_store1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE2_NOABORT,
+ "__asan_report_store2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE4_NOABORT,
+ "__asan_report_store4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE8_NOABORT,
+ "__asan_report_store8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE16_NOABORT,
+ "__asan_report_store16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE_N_NOABORT,
+ "__asan_report_store_n_noabort",
+ BT_FN_VOID_PTR_PTRMODE,
+ ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD1, "__asan_load1",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD2, "__asan_load2",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD4, "__asan_load4",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD8, "__asan_load8",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD16, "__asan_load16",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOADN, "__asan_loadN",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE1, "__asan_store1",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE2, "__asan_store2",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE4, "__asan_store4",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE8, "__asan_store8",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE16, "__asan_store16",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STOREN, "__asan_storeN",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD1_NOABORT, "__asan_load1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD2_NOABORT, "__asan_load2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD4_NOABORT, "__asan_load4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD8_NOABORT, "__asan_load8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD16_NOABORT, "__asan_load16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOADN_NOABORT, "__asan_loadN_noabort",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE1_NOABORT, "__asan_store1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE2_NOABORT, "__asan_store2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE4_NOABORT, "__asan_store4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE8_NOABORT, "__asan_store8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE16_NOABORT, "__asan_store16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STOREN_NOABORT, "__asan_storeN_noabort",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REGISTER_GLOBALS,
+ "__asan_register_globals",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_UNREGISTER_GLOBALS,
+ "__asan_unregister_globals",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_HANDLE_NO_RETURN,
+ "__asan_handle_no_return",
+ BT_FN_VOID, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_BEFORE_DYNAMIC_INIT,
+ "__asan_before_dynamic_init",
+ BT_FN_VOID_CONST_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_AFTER_DYNAMIC_INIT,
+ "__asan_after_dynamic_init",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_POISON_STACK_MEMORY,
+ "__asan_poison_stack_memory",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_UNPOISON_STACK_MEMORY,
+ "__asan_unpoison_stack_memory",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_ALLOCA_POISON, "__asan_alloca_poison",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_ALLOCAS_UNPOISON, "__asan_allocas_unpoison",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_POINTER_COMPARE, "__sanitizer_ptr_cmp",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_POINTER_SUBTRACT, "__sanitizer_ptr_sub",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+
+/* Hardware Address Sanitizer. */
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_INIT, "__hwasan_init",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD1, "__hwasan_load1",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD2, "__hwasan_load2",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD4, "__hwasan_load4",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD8, "__hwasan_load8",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD16, "__hwasan_load16",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOADN, "__hwasan_loadN",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE1, "__hwasan_store1",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE2, "__hwasan_store2",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE4, "__hwasan_store4",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE8, "__hwasan_store8",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE16, "__hwasan_store16",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STOREN, "__hwasan_storeN",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD1_NOABORT, "__hwasan_load1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD2_NOABORT, "__hwasan_load2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD4_NOABORT, "__hwasan_load4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD8_NOABORT, "__hwasan_load8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOAD16_NOABORT, "__hwasan_load16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_LOADN_NOABORT, "__hwasan_loadN_noabort",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE1_NOABORT, "__hwasan_store1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE2_NOABORT, "__hwasan_store2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE4_NOABORT, "__hwasan_store4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE8_NOABORT, "__hwasan_store8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STORE16_NOABORT,
+ "__hwasan_store16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_STOREN_NOABORT, "__hwasan_storeN_noabort",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_TAG_MISMATCH4, "__hwasan_tag_mismatch4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_HANDLE_LONGJMP, "__hwasan_handle_longjmp",
+ BT_FN_VOID_CONST_PTR, ATTR_NOTHROW_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_TAG_PTR, "__hwasan_tag_pointer",
+ BT_FN_PTR_CONST_PTR_UINT8, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_HWASAN_TAG_MEM, "__hwasan_tag_memory",
+ BT_FN_VOID_PTR_UINT8_PTRMODE, ATTR_NOTHROW_LIST)
+
+/* Thread Sanitizer */
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_INIT, "__tsan_init",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_FUNC_ENTRY, "__tsan_func_entry",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_FUNC_EXIT, "__tsan_func_exit",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VPTR_UPDATE, "__tsan_vptr_update",
+ BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_READ1, "__tsan_read1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_READ2, "__tsan_read2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_READ4, "__tsan_read4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_READ8, "__tsan_read8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_READ16, "__tsan_read16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_WRITE1, "__tsan_write1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_WRITE2, "__tsan_write2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_WRITE4, "__tsan_write4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_WRITE8, "__tsan_write8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_WRITE16, "__tsan_write16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_READ_RANGE, "__tsan_read_range",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_WRITE_RANGE, "__tsan_write_range",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_READ1, "__tsan_volatile_read1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_READ2, "__tsan_volatile_read2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_READ4, "__tsan_volatile_read4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_READ8, "__tsan_volatile_read8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_READ16, "__tsan_volatile_read16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_WRITE1, "__tsan_volatile_write1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_WRITE2, "__tsan_volatile_write2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_WRITE4, "__tsan_volatile_write4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_WRITE8, "__tsan_volatile_write8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_VOLATILE_WRITE16, "__tsan_volatile_write16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_LOAD,
+ "__tsan_atomic8_load",
+ BT_FN_I1_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_LOAD,
+ "__tsan_atomic16_load",
+ BT_FN_I2_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_LOAD,
+ "__tsan_atomic32_load",
+ BT_FN_I4_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_LOAD,
+ "__tsan_atomic64_load",
+ BT_FN_I8_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_LOAD,
+ "__tsan_atomic128_load",
+ BT_FN_I16_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_STORE,
+ "__tsan_atomic8_store",
+ BT_FN_VOID_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_STORE,
+ "__tsan_atomic16_store",
+ BT_FN_VOID_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_STORE,
+ "__tsan_atomic32_store",
+ BT_FN_VOID_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_STORE,
+ "__tsan_atomic64_store",
+ BT_FN_VOID_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_STORE,
+ "__tsan_atomic128_store",
+ BT_FN_VOID_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_EXCHANGE,
+ "__tsan_atomic8_exchange",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_EXCHANGE,
+ "__tsan_atomic16_exchange",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_EXCHANGE,
+ "__tsan_atomic32_exchange",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_EXCHANGE,
+ "__tsan_atomic64_exchange",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_EXCHANGE,
+ "__tsan_atomic128_exchange",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_FETCH_ADD,
+ "__tsan_atomic8_fetch_add",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_FETCH_ADD,
+ "__tsan_atomic16_fetch_add",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_FETCH_ADD,
+ "__tsan_atomic32_fetch_add",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_FETCH_ADD,
+ "__tsan_atomic64_fetch_add",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_FETCH_ADD,
+ "__tsan_atomic128_fetch_add",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_FETCH_SUB,
+ "__tsan_atomic8_fetch_sub",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_FETCH_SUB,
+ "__tsan_atomic16_fetch_sub",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_FETCH_SUB,
+ "__tsan_atomic32_fetch_sub",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_FETCH_SUB,
+ "__tsan_atomic64_fetch_sub",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_FETCH_SUB,
+ "__tsan_atomic128_fetch_sub",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_FETCH_AND,
+ "__tsan_atomic8_fetch_and",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_FETCH_AND,
+ "__tsan_atomic16_fetch_and",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_FETCH_AND,
+ "__tsan_atomic32_fetch_and",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_FETCH_AND,
+ "__tsan_atomic64_fetch_and",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_FETCH_AND,
+ "__tsan_atomic128_fetch_and",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_FETCH_OR,
+ "__tsan_atomic8_fetch_or",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_FETCH_OR,
+ "__tsan_atomic16_fetch_or",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_FETCH_OR,
+ "__tsan_atomic32_fetch_or",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_FETCH_OR,
+ "__tsan_atomic64_fetch_or",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_FETCH_OR,
+ "__tsan_atomic128_fetch_or",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_FETCH_XOR,
+ "__tsan_atomic8_fetch_xor",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_FETCH_XOR,
+ "__tsan_atomic16_fetch_xor",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_FETCH_XOR,
+ "__tsan_atomic32_fetch_xor",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_FETCH_XOR,
+ "__tsan_atomic64_fetch_xor",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_FETCH_XOR,
+ "__tsan_atomic128_fetch_xor",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_FETCH_NAND,
+ "__tsan_atomic8_fetch_nand",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_FETCH_NAND,
+ "__tsan_atomic16_fetch_nand",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_FETCH_NAND,
+ "__tsan_atomic32_fetch_nand",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_FETCH_NAND,
+ "__tsan_atomic64_fetch_nand",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_FETCH_NAND,
+ "__tsan_atomic128_fetch_nand",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_COMPARE_EXCHANGE_STRONG,
+ "__tsan_atomic8_compare_exchange_strong",
+ BT_FN_BOOL_VPTR_PTR_I1_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_COMPARE_EXCHANGE_STRONG,
+ "__tsan_atomic16_compare_exchange_strong",
+ BT_FN_BOOL_VPTR_PTR_I2_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_COMPARE_EXCHANGE_STRONG,
+ "__tsan_atomic32_compare_exchange_strong",
+ BT_FN_BOOL_VPTR_PTR_I4_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_COMPARE_EXCHANGE_STRONG,
+ "__tsan_atomic64_compare_exchange_strong",
+ BT_FN_BOOL_VPTR_PTR_I8_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_COMPARE_EXCHANGE_STRONG,
+ "__tsan_atomic128_compare_exchange_strong",
+ BT_FN_BOOL_VPTR_PTR_I16_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC8_COMPARE_EXCHANGE_WEAK,
+ "__tsan_atomic8_compare_exchange_weak",
+ BT_FN_BOOL_VPTR_PTR_I1_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC16_COMPARE_EXCHANGE_WEAK,
+ "__tsan_atomic16_compare_exchange_weak",
+ BT_FN_BOOL_VPTR_PTR_I2_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC32_COMPARE_EXCHANGE_WEAK,
+ "__tsan_atomic32_compare_exchange_weak",
+ BT_FN_BOOL_VPTR_PTR_I4_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC64_COMPARE_EXCHANGE_WEAK,
+ "__tsan_atomic64_compare_exchange_weak",
+ BT_FN_BOOL_VPTR_PTR_I8_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC128_COMPARE_EXCHANGE_WEAK,
+ "__tsan_atomic128_compare_exchange_weak",
+ BT_FN_BOOL_VPTR_PTR_I16_INT_INT,
+ ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC_THREAD_FENCE,
+ "__tsan_atomic_thread_fence",
+ BT_FN_VOID_INT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_ATOMIC_SIGNAL_FENCE,
+ "__tsan_atomic_signal_fence",
+ BT_FN_VOID_INT, ATTR_NOTHROW_LEAF_LIST)
+
+/* Undefined Behavior Sanitizer */
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW,
+ "__ubsan_handle_divrem_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS,
+ "__ubsan_handle_shift_out_of_bounds",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE,
+ "__ubsan_handle_builtin_unreachable",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_MISSING_RETURN,
+ "__ubsan_handle_missing_return",
+ BT_FN_VOID_PTR,
+ ATTR_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE,
+ "__ubsan_handle_vla_bound_not_positive",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1,
+ "__ubsan_handle_type_mismatch_v1",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW,
+ "__ubsan_handle_add_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW,
+ "__ubsan_handle_sub_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW,
+ "__ubsan_handle_mul_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW,
+ "__ubsan_handle_negate_overflow",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE,
+ "__ubsan_handle_load_invalid_value",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_POINTER_OVERFLOW,
+ "__ubsan_handle_pointer_overflow",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW_ABORT,
+ "__ubsan_handle_divrem_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT,
+ "__ubsan_handle_shift_out_of_bounds_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE_ABORT,
+ "__ubsan_handle_vla_bound_not_positive_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT,
+ "__ubsan_handle_type_mismatch_v1_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW_ABORT,
+ "__ubsan_handle_add_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW_ABORT,
+ "__ubsan_handle_sub_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW_ABORT,
+ "__ubsan_handle_mul_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW_ABORT,
+ "__ubsan_handle_negate_overflow_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT,
+ "__ubsan_handle_load_invalid_value_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_POINTER_OVERFLOW_ABORT,
+ "__ubsan_handle_pointer_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW,
+ "__ubsan_handle_float_cast_overflow",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW_ABORT,
+ "__ubsan_handle_float_cast_overflow_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS,
+ "__ubsan_handle_out_of_bounds",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS_ABORT,
+ "__ubsan_handle_out_of_bounds_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_ARG,
+ "__ubsan_handle_nonnull_arg",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_ARG_ABORT,
+ "__ubsan_handle_nonnull_arg_abort",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1,
+ "__ubsan_handle_nonnull_return_v1",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT,
+ "__ubsan_handle_nonnull_return_v1_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_INVALID_BUILTIN,
+ "__ubsan_handle_invalid_builtin",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_INVALID_BUILTIN_ABORT,
+ "__ubsan_handle_invalid_builtin_abort",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_DYNAMIC_TYPE_CACHE_MISS,
+ "__ubsan_handle_dynamic_type_cache_miss",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_DYNAMIC_TYPE_CACHE_MISS_ABORT,
+ "__ubsan_handle_dynamic_type_cache_miss_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+
+/* Sanitizer coverage */
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_PC,
+ "__sanitizer_cov_trace_pc",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CMP1,
+ "__sanitizer_cov_trace_cmp1",
+ BT_FN_VOID_UINT8_UINT8, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CMP2,
+ "__sanitizer_cov_trace_cmp2",
+ BT_FN_VOID_UINT16_UINT16, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CMP4,
+ "__sanitizer_cov_trace_cmp4",
+ BT_FN_VOID_UINT32_UINT32, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CMP8,
+ "__sanitizer_cov_trace_cmp8",
+ BT_FN_VOID_UINT64_UINT64, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP1,
+ "__sanitizer_cov_trace_const_cmp1",
+ BT_FN_VOID_UINT8_UINT8, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP2,
+ "__sanitizer_cov_trace_const_cmp2",
+ BT_FN_VOID_UINT16_UINT16, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP4,
+ "__sanitizer_cov_trace_const_cmp4",
+ BT_FN_VOID_UINT32_UINT32, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CONST_CMP8,
+ "__sanitizer_cov_trace_const_cmp8",
+ BT_FN_VOID_UINT64_UINT64, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CMPF,
+ "__sanitizer_cov_trace_cmpf",
+ BT_FN_VOID_FLOAT_FLOAT, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_CMPD,
+ "__sanitizer_cov_trace_cmpd",
+ BT_FN_VOID_DOUBLE_DOUBLE, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_SANITIZER_COV_TRACE_SWITCH,
+ "__sanitizer_cov_trace_switch",
+ BT_FN_VOID_UINT64_PTR, ATTR_NOTHROW_LEAF_LIST)
+
+/* This has to come after all the sanitizer builtins. */
+DEF_BUILTIN_STUB(END_SANITIZER_BUILTINS, (const char *)0)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sbitmap.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sbitmap.h
new file mode 100644
index 0000000..61172db
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sbitmap.h
@@ -0,0 +1,321 @@
+/* Simple bitmaps.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SBITMAP_H
+#define GCC_SBITMAP_H
+
+/* Implementation of sets using simple bitmap vectors.
+
+ This set representation is suitable for non-sparse sets with a known
+ (a priori) universe. The set is represented as a simple array of the
+ host's fastest unsigned integer. For a given member I in the set:
+ - the element for I will be at sbitmap[I / (bits per element)]
+ - the position for I within element is I % (bits per element)
+
+ This representation is very space-efficient for large non-sparse sets
+ with random access patterns.
+
+ The following operations can be performed in O(1) time:
+
+ * set_size : SBITMAP_SIZE
+ * member_p : bitmap_bit_p
+ * add_member : bitmap_set_bit
+ * remove_member : bitmap_clear_bit
+
+ Most other operations on this set representation are O(U) where U is
+ the size of the set universe:
+
+ * clear : bitmap_clear
+ * choose_one : bitmap_first_set_bit /
+ bitmap_last_set_bit
+ * forall : EXECUTE_IF_SET_IN_BITMAP
+ * set_copy : bitmap_copy
+ * set_intersection : bitmap_and
+ * set_union : bitmap_ior
+ * set_difference : bitmap_and_compl
+ * set_disjuction : (not implemented)
+ * set_compare : bitmap_equal_p
+ * bit_in_range_p : bitmap_bit_in_range_p
+
+ Some operations on 3 sets that occur frequently in data flow problems
+ are also implemented:
+
+ * A | (B & C) : bitmap_or_and
+ * A | (B & ~C) : bitmap_ior_and_compl
+ * A & (B | C) : bitmap_and_or
+
+ Most of the set functions have two variants: One that returns non-zero
+ if members were added or removed from the target set, and one that just
+ performs the operation without feedback. The former operations are a
+ bit more expensive but the result can often be used to avoid iterations
+ on other sets.
+
+ Allocating a bitmap is done with sbitmap_alloc, and resizing is
+ performed with sbitmap_resize.
+
+ The storage requirements for simple bitmap sets is O(U) where U is the
+ size of the set universe (colloquially the number of bits in the bitmap).
+
+ This set representation works well for relatively small data flow problems
+ (there are special routines for that, see sbitmap_vector_*). The set
+ operations can be vectorized and there is almost no computating overhead,
+ so that even sparse simple bitmap sets outperform dedicated sparse set
+ representations like linked-list bitmaps. For larger problems, the size
+ overhead of simple bitmap sets gets too high and other set representations
+ have to be used. */
+
+#define SBITMAP_ELT_BITS (HOST_BITS_PER_WIDEST_FAST_INT * 1u)
+#define SBITMAP_ELT_TYPE unsigned HOST_WIDEST_FAST_INT
+
+struct simple_bitmap_def
+{
+ unsigned int n_bits; /* Number of bits. */
+ unsigned int size; /* Size in elements. */
+ SBITMAP_ELT_TYPE elms[1]; /* The elements. */
+};
+
+/* Return the set size needed for N elements. */
+#define SBITMAP_SET_SIZE(N) (((N) + SBITMAP_ELT_BITS - 1) / SBITMAP_ELT_BITS)
+
+/* Return the number of bits in BITMAP. */
+#define SBITMAP_SIZE(BITMAP) ((BITMAP)->n_bits)
+
+/* Verify that access at INDEX in bitmap MAP is valid. */
+
+inline void
+bitmap_check_index (const_sbitmap map, int index)
+{
+ gcc_checking_assert (index >= 0);
+ gcc_checking_assert ((unsigned int)index < map->n_bits);
+}
+
+/* Verify that bitmaps A and B have same size. */
+
+inline void
+bitmap_check_sizes (const_sbitmap a, const_sbitmap b)
+{
+ gcc_checking_assert (a->n_bits == b->n_bits);
+}
+
+/* Test if bit number bitno in the bitmap is set. */
+inline bool
+bitmap_bit_p (const_sbitmap map, int bitno)
+{
+ bitmap_check_index (map, bitno);
+
+ size_t i = bitno / SBITMAP_ELT_BITS;
+ unsigned int s = bitno % SBITMAP_ELT_BITS;
+ return (map->elms[i] >> s) & (SBITMAP_ELT_TYPE) 1;
+}
+
+/* Set bit number BITNO in the sbitmap MAP.
+ Return true if the bit changed. */
+
+inline bool
+bitmap_set_bit (sbitmap map, int bitno)
+{
+ bitmap_check_index (map, bitno);
+
+ size_t i = bitno / SBITMAP_ELT_BITS;
+ unsigned int s = bitno % SBITMAP_ELT_BITS;
+ if (map->elms[i] & ((SBITMAP_ELT_TYPE) 1 << s))
+ return false;
+ map->elms[i] |= (SBITMAP_ELT_TYPE) 1 << s;
+ return true;
+}
+
+/* Reset bit number BITNO in the sbitmap MAP.
+ Return true if the bit changed. */
+
+inline bool
+bitmap_clear_bit (sbitmap map, int bitno)
+{
+ bitmap_check_index (map, bitno);
+
+ size_t i = bitno / SBITMAP_ELT_BITS;
+ unsigned int s = bitno % SBITMAP_ELT_BITS;
+ if (!(map->elms[i] & ((SBITMAP_ELT_TYPE) 1 << s)))
+ return false;
+ map->elms[i] &= ~((SBITMAP_ELT_TYPE) 1 << s);
+ return true;
+}
+
+/* The iterator for sbitmap. */
+struct sbitmap_iterator {
+ /* The pointer to the first word of the bitmap. */
+ const SBITMAP_ELT_TYPE *ptr;
+
+ /* The size of the bitmap. */
+ unsigned int size;
+
+ /* The current word index. */
+ unsigned int word_num;
+
+ /* The current bit index (not modulo SBITMAP_ELT_BITS). */
+ unsigned int bit_num;
+
+ /* The words currently visited. */
+ SBITMAP_ELT_TYPE word;
+};
+
+/* Initialize the iterator I with sbitmap BMP and the initial index
+ MIN. */
+
+inline void
+bmp_iter_set_init (sbitmap_iterator *i, const_sbitmap bmp,
+ unsigned int min, unsigned *bit_no ATTRIBUTE_UNUSED)
+{
+ i->word_num = min / (unsigned int) SBITMAP_ELT_BITS;
+ i->bit_num = min;
+ i->size = bmp->size;
+ i->ptr = bmp->elms;
+
+ if (i->word_num >= i->size)
+ i->word = 0;
+ else
+ i->word = (i->ptr[i->word_num]
+ >> (i->bit_num % (unsigned int) SBITMAP_ELT_BITS));
+}
+
+/* Return true if we have more bits to visit, in which case *N is set
+ to the index of the bit to be visited. Otherwise, return
+ false. */
+
+inline bool
+bmp_iter_set (sbitmap_iterator *i, unsigned int *n)
+{
+ /* Skip words that are zeros. */
+ for (; i->word == 0; i->word = i->ptr[i->word_num])
+ {
+ i->word_num++;
+
+ /* If we have reached the end, break. */
+ if (i->word_num >= i->size)
+ return false;
+
+ i->bit_num = i->word_num * SBITMAP_ELT_BITS;
+ }
+
+ /* Skip bits that are zero. */
+ for (; (i->word & 1) == 0; i->word >>= 1)
+ i->bit_num++;
+
+ *n = i->bit_num;
+
+ return true;
+}
+
+/* Advance to the next bit. */
+
+inline void
+bmp_iter_next (sbitmap_iterator *i, unsigned *bit_no ATTRIBUTE_UNUSED)
+{
+ i->word >>= 1;
+ i->bit_num++;
+}
+
+/* Loop over all elements of SBITMAP, starting with MIN. In each
+ iteration, N is set to the index of the bit being visited. ITER is
+ an instance of sbitmap_iterator used to iterate the bitmap. */
+
+#ifndef EXECUTE_IF_SET_IN_BITMAP
+/* See bitmap.h for the other definition of EXECUTE_IF_SET_IN_BITMAP. */
+#define EXECUTE_IF_SET_IN_BITMAP(BITMAP, MIN, BITNUM, ITER) \
+ for (bmp_iter_set_init (&(ITER), (BITMAP), (MIN), &(BITNUM)); \
+ bmp_iter_set (&(ITER), &(BITNUM)); \
+ bmp_iter_next (&(ITER), &(BITNUM)))
+#endif
+
+inline void sbitmap_free (sbitmap map)
+{
+ free (map);
+}
+
+inline void sbitmap_vector_free (sbitmap * vec)
+{
+ free (vec);
+}
+
+extern void dump_bitmap (FILE *, const_sbitmap);
+extern void debug_raw (const simple_bitmap_def &ref);
+extern void debug_raw (const simple_bitmap_def *ptr);
+extern void dump_bitmap_file (FILE *, const_sbitmap);
+extern void debug (const simple_bitmap_def &ref);
+extern void debug (const simple_bitmap_def *ptr);
+extern void dump_bitmap_vector (FILE *, const char *, const char *, sbitmap *,
+ int);
+extern sbitmap sbitmap_alloc (unsigned int);
+extern sbitmap *sbitmap_vector_alloc (unsigned int, unsigned int);
+extern sbitmap sbitmap_resize (sbitmap, unsigned int, int);
+extern void bitmap_copy (sbitmap, const_sbitmap);
+extern int bitmap_equal_p (const_sbitmap, const_sbitmap);
+extern unsigned int bitmap_count_bits (const_sbitmap);
+extern bool bitmap_empty_p (const_sbitmap);
+extern void bitmap_clear (sbitmap);
+extern void bitmap_clear_range (sbitmap, unsigned, unsigned);
+extern void bitmap_set_range (sbitmap, unsigned, unsigned);
+extern void bitmap_ones (sbitmap);
+extern void bitmap_vector_clear (sbitmap *, unsigned int);
+extern void bitmap_vector_ones (sbitmap *, unsigned int);
+
+extern bool bitmap_ior_and_compl (sbitmap, const_sbitmap,
+ const_sbitmap, const_sbitmap);
+extern void bitmap_and_compl (sbitmap, const_sbitmap, const_sbitmap);
+extern void bitmap_not (sbitmap, const_sbitmap);
+extern bool bitmap_or_and (sbitmap, const_sbitmap,
+ const_sbitmap, const_sbitmap);
+extern bool bitmap_and_or (sbitmap, const_sbitmap,
+ const_sbitmap, const_sbitmap);
+extern bool bitmap_intersect_p (const_sbitmap, const_sbitmap);
+extern bool bitmap_and (sbitmap, const_sbitmap, const_sbitmap);
+extern bool bitmap_ior (sbitmap, const_sbitmap, const_sbitmap);
+extern bool bitmap_xor (sbitmap, const_sbitmap, const_sbitmap);
+extern bool bitmap_subset_p (const_sbitmap, const_sbitmap);
+extern bool bitmap_bit_in_range_p (const_sbitmap, unsigned int, unsigned int);
+
+extern int bitmap_first_set_bit (const_sbitmap);
+extern int bitmap_last_set_bit (const_sbitmap);
+
+extern void debug_bitmap (const_sbitmap);
+extern sbitmap sbitmap_realloc (sbitmap, unsigned int);
+
+/* a class that ties the lifetime of a sbitmap to its scope. */
+class auto_sbitmap
+{
+public:
+ explicit auto_sbitmap (unsigned int size) :
+ m_bitmap (sbitmap_alloc (size)) {}
+ ~auto_sbitmap () { sbitmap_free (m_bitmap); }
+
+ /* Allow calling sbitmap functions on our bitmap. */
+ operator sbitmap () { return m_bitmap; }
+ operator const_sbitmap () const { return m_bitmap; }
+
+private:
+ /* Prevent making a copy that refers to our sbitmap. */
+ auto_sbitmap (const auto_sbitmap &);
+ auto_sbitmap &operator = (const auto_sbitmap &);
+ auto_sbitmap (auto_sbitmap &&);
+ auto_sbitmap &operator = (auto_sbitmap &&);
+
+ /* The bitmap we are managing. */
+ sbitmap m_bitmap;
+};
+
+#endif /* ! GCC_SBITMAP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sched-int.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sched-int.h
new file mode 100644
index 0000000..97b7d2d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sched-int.h
@@ -0,0 +1,1687 @@
+/* Instruction scheduling pass. This file contains definitions used
+ internally in the scheduler.
+ Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SCHED_INT_H
+#define GCC_SCHED_INT_H
+
+#ifdef INSN_SCHEDULING
+
+/* Identificator of a scheduler pass. */
+enum sched_pass_id_t { SCHED_PASS_UNKNOWN, SCHED_RGN_PASS, SCHED_EBB_PASS,
+ SCHED_SMS_PASS, SCHED_SEL_PASS };
+
+/* The algorithm used to implement -fsched-pressure. */
+enum sched_pressure_algorithm
+{
+ SCHED_PRESSURE_NONE,
+ SCHED_PRESSURE_WEIGHTED,
+ SCHED_PRESSURE_MODEL
+};
+
+typedef vec<basic_block> bb_vec_t;
+typedef vec<rtx_insn *> insn_vec_t;
+typedef vec<rtx_insn *> rtx_vec_t;
+
+extern void sched_init_bbs (void);
+
+extern void sched_extend_luids (void);
+extern void sched_init_insn_luid (rtx_insn *);
+extern void sched_init_luids (const bb_vec_t &);
+extern void sched_finish_luids (void);
+
+extern void sched_extend_target (void);
+
+extern void haifa_init_h_i_d (const bb_vec_t &);
+extern void haifa_finish_h_i_d (void);
+
+/* Hooks that are common to all the schedulers. */
+struct common_sched_info_def
+{
+ /* Called after blocks were rearranged due to movement of jump instruction.
+ The first parameter - index of basic block, in which jump currently is.
+ The second parameter - index of basic block, in which jump used
+ to be.
+ The third parameter - index of basic block, that follows the second
+ parameter. */
+ void (*fix_recovery_cfg) (int, int, int);
+
+ /* Called to notify frontend, that new basic block is being added.
+ The first parameter - new basic block.
+ The second parameter - block, after which new basic block is being added,
+ or the exit block, if recovery block is being added,
+ or NULL, if standalone block is being added. */
+ void (*add_block) (basic_block, basic_block);
+
+ /* Estimate number of insns in the basic block. */
+ int (*estimate_number_of_insns) (basic_block);
+
+ /* Given a non-insn (!INSN_P (x)) return
+ -1 - if this rtx don't need a luid.
+ 0 - if it should have the same luid as the previous insn.
+ 1 - if it needs a separate luid. */
+ int (*luid_for_non_insn) (rtx);
+
+ /* Scheduler pass identifier. It is preferably used in assertions. */
+ enum sched_pass_id_t sched_pass_id;
+};
+
+extern struct common_sched_info_def *common_sched_info;
+
+extern const struct common_sched_info_def haifa_common_sched_info;
+
+/* Return true if selective scheduling pass is working. */
+inline bool
+sel_sched_p (void)
+{
+ return common_sched_info->sched_pass_id == SCHED_SEL_PASS;
+}
+
+/* Returns maximum priority that an insn was assigned to. */
+extern int get_rgn_sched_max_insns_priority (void);
+
+/* Increases effective priority for INSN by AMOUNT. */
+extern void sel_add_to_insn_priority (rtx, int);
+
+/* True if during selective scheduling we need to emulate some of haifa
+ scheduler behavior. */
+extern int sched_emulate_haifa_p;
+
+/* Mapping from INSN_UID to INSN_LUID. In the end all other per insn data
+ structures should be indexed by luid. */
+extern vec<int> sched_luids;
+#define INSN_LUID(INSN) (sched_luids[INSN_UID (INSN)])
+#define LUID_BY_UID(UID) (sched_luids[UID])
+
+#define SET_INSN_LUID(INSN, LUID) \
+(sched_luids[INSN_UID (INSN)] = (LUID))
+
+/* The highest INSN_LUID. */
+extern int sched_max_luid;
+
+extern int insn_luid (rtx);
+
+/* This list holds ripped off notes from the current block. These notes will
+ be attached to the beginning of the block when its scheduling is
+ finished. */
+extern rtx_insn *note_list;
+
+extern void remove_notes (rtx_insn *, rtx_insn *);
+extern rtx_insn *restore_other_notes (rtx_insn *, basic_block);
+extern void sched_insns_init (rtx);
+extern void sched_insns_finish (void);
+
+extern void *xrecalloc (void *, size_t, size_t, size_t);
+
+extern void reemit_notes (rtx_insn *);
+
+/* Functions in haifa-sched.cc. */
+extern int haifa_classify_insn (const_rtx);
+
+/* Functions in sel-sched-ir.cc. */
+extern void sel_find_rgns (void);
+extern void sel_mark_hard_insn (rtx);
+
+extern size_t dfa_state_size;
+
+extern void advance_state (state_t);
+
+extern void setup_sched_dump (void);
+extern void sched_init (void);
+extern void sched_finish (void);
+
+extern bool sel_insn_is_speculation_check (rtx);
+
+/* Describe the ready list of the scheduler.
+ VEC holds space enough for all insns in the current region. VECLEN
+ says how many exactly.
+ FIRST is the index of the element with the highest priority; i.e. the
+ last one in the ready list, since elements are ordered by ascending
+ priority.
+ N_READY determines how many insns are on the ready list.
+ N_DEBUG determines how many debug insns are on the ready list. */
+struct ready_list
+{
+ rtx_insn **vec;
+ int veclen;
+ int first;
+ int n_ready;
+ int n_debug;
+};
+
+extern signed char *ready_try;
+extern struct ready_list ready;
+
+extern int max_issue (struct ready_list *, int, state_t, bool, int *);
+
+extern void ebb_compute_jump_reg_dependencies (rtx, regset);
+
+extern edge find_fallthru_edge_from (basic_block);
+
+extern void (* sched_init_only_bb) (basic_block, basic_block);
+extern basic_block (* sched_split_block) (basic_block, rtx);
+extern basic_block sched_split_block_1 (basic_block, rtx);
+extern basic_block (* sched_create_empty_bb) (basic_block);
+extern basic_block sched_create_empty_bb_1 (basic_block);
+
+extern basic_block sched_create_recovery_block (basic_block *);
+extern void sched_create_recovery_edges (basic_block, basic_block,
+ basic_block);
+
+/* Pointer to data describing the current DFA state. */
+extern state_t curr_state;
+
+/* Type to represent status of a dependence. */
+typedef unsigned int ds_t;
+#define BITS_PER_DEP_STATUS HOST_BITS_PER_INT
+
+/* Type to represent weakness of speculative dependence. */
+typedef unsigned int dw_t;
+
+extern enum reg_note ds_to_dk (ds_t);
+extern ds_t dk_to_ds (enum reg_note);
+
+/* Describe a dependency that can be broken by making a replacement
+ in one of the patterns. LOC is the location, ORIG and NEWVAL the
+ two alternative contents, and INSN the instruction that must be
+ changed. */
+struct dep_replacement
+{
+ rtx *loc;
+ rtx orig;
+ rtx newval;
+ rtx_insn *insn;
+};
+
+/* Information about the dependency. */
+struct _dep
+{
+ /* Producer. */
+ rtx_insn *pro;
+
+ /* Consumer. */
+ rtx_insn *con;
+
+ /* If nonnull, holds a pointer to information about how to break the
+ dependency by making a replacement in one of the insns. There is
+ only one such dependency for each insn that must be modified in
+ order to break such a dependency. */
+ struct dep_replacement *replace;
+
+ /* Dependency status. This field holds all dependency types and additional
+ information for speculative dependencies. */
+ ds_t status;
+
+ /* Dependency major type. This field is superseded by STATUS above.
+ Though, it is still in place because some targets use it. */
+ ENUM_BITFIELD(reg_note) type:6;
+
+ unsigned nonreg:1;
+ unsigned multiple:1;
+
+ /* Cached cost of the dependency. Make sure to update UNKNOWN_DEP_COST
+ when changing the size of this field. */
+ int cost:20;
+
+ unsigned unused:4;
+};
+
+#define UNKNOWN_DEP_COST ((int) ((unsigned int) -1 << 19))
+
+typedef struct _dep dep_def;
+typedef dep_def *dep_t;
+
+#define DEP_PRO(D) ((D)->pro)
+#define DEP_CON(D) ((D)->con)
+#define DEP_TYPE(D) ((D)->type)
+#define DEP_STATUS(D) ((D)->status)
+#define DEP_COST(D) ((D)->cost)
+#define DEP_NONREG(D) ((D)->nonreg)
+#define DEP_MULTIPLE(D) ((D)->multiple)
+#define DEP_REPLACE(D) ((D)->replace)
+
+/* Functions to work with dep. */
+
+extern void init_dep_1 (dep_t, rtx_insn *, rtx_insn *, enum reg_note, ds_t);
+extern void init_dep (dep_t, rtx_insn *, rtx_insn *, enum reg_note);
+
+extern void sd_debug_dep (dep_t);
+
+/* Definition of this struct resides below. */
+struct _dep_node;
+typedef struct _dep_node *dep_node_t;
+
+/* A link in the dependency list. This is essentially an equivalent of a
+ single {INSN, DEPS}_LIST rtx. */
+struct _dep_link
+{
+ /* Dep node with all the data. */
+ dep_node_t node;
+
+ /* Next link in the list. For the last one it is NULL. */
+ struct _dep_link *next;
+
+ /* Pointer to the next field of the previous link in the list.
+ For the first link this points to the deps_list->first.
+
+ With help of this field it is easy to remove and insert links to the
+ list. */
+ struct _dep_link **prev_nextp;
+};
+typedef struct _dep_link *dep_link_t;
+
+#define DEP_LINK_NODE(N) ((N)->node)
+#define DEP_LINK_NEXT(N) ((N)->next)
+#define DEP_LINK_PREV_NEXTP(N) ((N)->prev_nextp)
+
+/* Macros to work dep_link. For most usecases only part of the dependency
+ information is need. These macros conveniently provide that piece of
+ information. */
+
+#define DEP_LINK_DEP(N) (DEP_NODE_DEP (DEP_LINK_NODE (N)))
+#define DEP_LINK_PRO(N) (DEP_PRO (DEP_LINK_DEP (N)))
+#define DEP_LINK_CON(N) (DEP_CON (DEP_LINK_DEP (N)))
+#define DEP_LINK_TYPE(N) (DEP_TYPE (DEP_LINK_DEP (N)))
+#define DEP_LINK_STATUS(N) (DEP_STATUS (DEP_LINK_DEP (N)))
+
+/* A list of dep_links. */
+struct _deps_list
+{
+ /* First element. */
+ dep_link_t first;
+
+ /* Total number of elements in the list. */
+ int n_links;
+};
+typedef struct _deps_list *deps_list_t;
+
+#define DEPS_LIST_FIRST(L) ((L)->first)
+#define DEPS_LIST_N_LINKS(L) ((L)->n_links)
+
+/* Suppose we have a dependence Y between insn pro1 and con1, where pro1 has
+ additional dependents con0 and con2, and con1 is dependent on additional
+ insns pro0 and pro1:
+
+ .con0 pro0
+ . ^ |
+ . | |
+ . | |
+ . X A
+ . | |
+ . | |
+ . | V
+ .pro1--Y-->con1
+ . | ^
+ . | |
+ . | |
+ . Z B
+ . | |
+ . | |
+ . V |
+ .con2 pro2
+
+ This is represented using a "dep_node" for each dependence arc, which are
+ connected as follows (diagram is centered around Y which is fully shown;
+ other dep_nodes shown partially):
+
+ . +------------+ +--------------+ +------------+
+ . : dep_node X : | dep_node Y | : dep_node Z :
+ . : : | | : :
+ . : : | | : :
+ . : forw : | forw | : forw :
+ . : +--------+ : | +--------+ | : +--------+ :
+ forw_deps : |dep_link| : | |dep_link| | : |dep_link| :
+ +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
+ |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
+ +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
+ . ^ ^ : | ^ | : | | ^ | | : | | :
+ . | | : | | | : | | | | | : | | :
+ . | +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
+ . | : | | | : | | | | | : | | | :
+ . | : | +----+ | : | | +----+ | | : | +----+ | :
+ . | : | |prev| | : | | |prev| | | : | |prev| | :
+ . | : | |next| | : | | |next| | | : | |next| | :
+ . | : | +----+ | : | | +----+ | | : | +----+ | :
+ . | : | | :<-+ | | | |<-+ : | | :<-+
+ . | : | +----+ | : | | | +----+ | | | : | +----+ | : |
+ . | : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
+ . | : | +----+ | : | | +----+ | | : | +----+ | :
+ . | : | | : | | | | : | | :
+ . | : +--------+ : | +--------+ | : +--------+ :
+ . | : : | | : :
+ . | : SAME pro1 : | +--------+ | : SAME pro1 :
+ . | : DIFF con0 : | |dep | | : DIFF con2 :
+ . | : : | | | | : :
+ . | | | +----+ | |
+ .RTX<------------------------+--+-|pro1| | |
+ .pro1 | | +----+ | |
+ . | | | |
+ . | | +----+ | |
+ .RTX<------------------------+--+-|con1| | |
+ .con1 | | +----+ | |
+ . | | | | |
+ . | | | +----+ | |
+ . | | | |kind| | |
+ . | | | +----+ | |
+ . | : : | | |stat| | | : :
+ . | : DIFF pro0 : | | +----+ | | : DIFF pro2 :
+ . | : SAME con1 : | | | | : SAME con1 :
+ . | : : | +--------+ | : :
+ . | : : | | : :
+ . | : back : | back | : back :
+ . v : +--------+ : | +--------+ | : +--------+ :
+ back_deps : |dep_link| : | |dep_link| | : |dep_link| :
+ +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
+ |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
+ +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
+ . ^ : | ^ | : | | ^ | | : | | :
+ . | : | | | : | | | | | : | | :
+ . +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
+ . : | | | : | | | | | : | | | :
+ . : | +----+ | : | | +----+ | | : | +----+ | :
+ . : | |prev| | : | | |prev| | | : | |prev| | :
+ . : | |next| | : | | |next| | | : | |next| | :
+ . : | +----+ | : | | +----+ | | : | +----+ | :
+ . : | | :<-+ | | | |<-+ : | | :<-+
+ . : | +----+ | : | | | +----+ | | | : | +----+ | : |
+ . : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
+ . : | +----+ | : | | +----+ | | : | +----+ | :
+ . : | | : | | | | : | | :
+ . : +--------+ : | +--------+ | : +--------+ :
+ . : : | | : :
+ . : dep_node A : | dep_node Y | : dep_node B :
+ . +------------+ +--------------+ +------------+
+*/
+
+struct _dep_node
+{
+ /* Backward link. */
+ struct _dep_link back;
+
+ /* The dep. */
+ struct _dep dep;
+
+ /* Forward link. */
+ struct _dep_link forw;
+};
+
+#define DEP_NODE_BACK(N) (&(N)->back)
+#define DEP_NODE_DEP(N) (&(N)->dep)
+#define DEP_NODE_FORW(N) (&(N)->forw)
+
+/* The following enumeration values tell us what dependencies we
+ should use to implement the barrier. We use true-dependencies for
+ TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
+enum reg_pending_barrier_mode
+{
+ NOT_A_BARRIER = 0,
+ MOVE_BARRIER,
+ TRUE_BARRIER
+};
+
+/* Whether a register movement is associated with a call. */
+enum post_call_group
+{
+ not_post_call,
+ post_call,
+ post_call_initial
+};
+
+/* Insns which affect pseudo-registers. */
+struct deps_reg
+{
+ rtx_insn_list *uses;
+ rtx_insn_list *sets;
+ rtx_insn_list *implicit_sets;
+ rtx_insn_list *control_uses;
+ rtx_insn_list *clobbers;
+ int uses_length;
+ int clobbers_length;
+};
+
+/* Describe state of dependencies used during sched_analyze phase. */
+class deps_desc
+{
+public:
+ /* The *_insns and *_mems are paired lists. Each pending memory operation
+ will have a pointer to the MEM rtx on one list and a pointer to the
+ containing insn on the other list in the same place in the list. */
+
+ /* We can't use add_dependence like the old code did, because a single insn
+ may have multiple memory accesses, and hence needs to be on the list
+ once for each memory access. Add_dependence won't let you add an insn
+ to a list more than once. */
+
+ /* An INSN_LIST containing all insns with pending read operations. */
+ rtx_insn_list *pending_read_insns;
+
+ /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
+ rtx_expr_list *pending_read_mems;
+
+ /* An INSN_LIST containing all insns with pending write operations. */
+ rtx_insn_list *pending_write_insns;
+
+ /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
+ rtx_expr_list *pending_write_mems;
+
+ /* An INSN_LIST containing all jump insns. */
+ rtx_insn_list *pending_jump_insns;
+
+ /* We must prevent the above lists from ever growing too large since
+ the number of dependencies produced is at least O(N*N),
+ and execution time is at least O(4*N*N), as a function of the
+ length of these pending lists. */
+
+ /* Indicates the length of the pending_read list. */
+ int pending_read_list_length;
+
+ /* Indicates the length of the pending_write list. */
+ int pending_write_list_length;
+
+ /* Length of the pending memory flush list plus the length of the pending
+ jump insn list. Large functions with no calls may build up extremely
+ large lists. */
+ int pending_flush_length;
+
+ /* The last insn upon which all memory references must depend.
+ This is an insn which flushed the pending lists, creating a dependency
+ between it and all previously pending memory references. This creates
+ a barrier (or a checkpoint) which no memory reference is allowed to cross.
+
+ This includes all non constant CALL_INSNs. When we do interprocedural
+ alias analysis, this restriction can be relaxed.
+ This may also be an INSN that writes memory if the pending lists grow
+ too large. */
+ rtx_insn_list *last_pending_memory_flush;
+
+ /* A list of the last function calls we have seen. We use a list to
+ represent last function calls from multiple predecessor blocks.
+ Used to prevent register lifetimes from expanding unnecessarily. */
+ rtx_insn_list *last_function_call;
+
+ /* A list of the last function calls that may not return normally
+ we have seen. We use a list to represent last function calls from
+ multiple predecessor blocks. Used to prevent moving trapping insns
+ across such calls. */
+ rtx_insn_list *last_function_call_may_noreturn;
+
+ /* A list of insns which use a pseudo register that does not already
+ cross a call. We create dependencies between each of those insn
+ and the next call insn, to ensure that they won't cross a call after
+ scheduling is done. */
+ rtx_insn_list *sched_before_next_call;
+
+ /* Similarly, a list of insns which should not cross a branch. */
+ rtx_insn_list *sched_before_next_jump;
+
+ /* Used to keep post-call pseudo/hard reg movements together with
+ the call. */
+ enum post_call_group in_post_call_group_p;
+
+ /* The last debug insn we've seen. */
+ rtx_insn *last_debug_insn;
+
+ /* The last insn bearing REG_ARGS_SIZE that we've seen. */
+ rtx_insn *last_args_size;
+
+ /* A list of all prologue insns we have seen without intervening epilogue
+ insns, and one of all epilogue insns we have seen without intervening
+ prologue insns. This is used to prevent mixing prologue and epilogue
+ insns. See PR78029. */
+ rtx_insn_list *last_prologue;
+ rtx_insn_list *last_epilogue;
+
+ /* Whether the last *logue insn was an epilogue insn or a prologue insn
+ instead. */
+ bool last_logue_was_epilogue;
+
+ /* The maximum register number for the following arrays. Before reload
+ this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
+ int max_reg;
+
+ /* Element N is the next insn that sets (hard or pseudo) register
+ N within the current basic block; or zero, if there is no
+ such insn. Needed for new registers which may be introduced
+ by splitting insns. */
+ struct deps_reg *reg_last;
+
+ /* Element N is set for each register that has any nonzero element
+ in reg_last[N].{uses,sets,clobbers}. */
+ regset_head reg_last_in_use;
+
+ /* Shows the last value of reg_pending_barrier associated with the insn. */
+ enum reg_pending_barrier_mode last_reg_pending_barrier;
+
+ /* True when this context should be treated as a readonly by
+ the analysis. */
+ BOOL_BITFIELD readonly : 1;
+};
+
+typedef class deps_desc *deps_t;
+
+/* This structure holds some state of the current scheduling pass, and
+ contains some function pointers that abstract out some of the non-generic
+ functionality from functions such as schedule_block or schedule_insn.
+ There is one global variable, current_sched_info, which points to the
+ sched_info structure currently in use. */
+struct haifa_sched_info
+{
+ /* Add all insns that are initially ready to the ready list. Called once
+ before scheduling a set of insns. */
+ void (*init_ready_list) (void);
+ /* Called after taking an insn from the ready list. Returns nonzero if
+ this insn can be scheduled, nonzero if we should silently discard it. */
+ int (*can_schedule_ready_p) (rtx_insn *);
+ /* Return nonzero if there are more insns that should be scheduled. */
+ int (*schedule_more_p) (void);
+ /* Called after an insn has all its hard dependencies resolved.
+ Adjusts status of instruction (which is passed through second parameter)
+ to indicate if instruction should be moved to the ready list or the
+ queue, or if it should silently discard it (until next resolved
+ dependence). */
+ ds_t (*new_ready) (rtx_insn *, ds_t);
+ /* Compare priority of two insns. Return a positive number if the second
+ insn is to be preferred for scheduling, and a negative one if the first
+ is to be preferred. Zero if they are equally good. */
+ int (*rank) (rtx_insn *, rtx_insn *);
+ /* Return a string that contains the insn uid and optionally anything else
+ necessary to identify this insn in an output. It's valid to use a
+ static buffer for this. The ALIGNED parameter should cause the string
+ to be formatted so that multiple output lines will line up nicely. */
+ const char *(*print_insn) (const rtx_insn *, int);
+ /* Return nonzero if an insn should be included in priority
+ calculations. */
+ int (*contributes_to_priority) (rtx_insn *, rtx_insn *);
+
+ /* Return true if scheduling insn (passed as the parameter) will trigger
+ finish of scheduling current block. */
+ bool (*insn_finishes_block_p) (rtx_insn *);
+
+ /* The boundaries of the set of insns to be scheduled. */
+ rtx_insn *prev_head, *next_tail;
+
+ /* Filled in after the schedule is finished; the first and last scheduled
+ insns. */
+ rtx_insn *head, *tail;
+
+ /* If nonzero, enables an additional sanity check in schedule_block. */
+ unsigned int queue_must_finish_empty:1;
+
+ /* Maximum priority that has been assigned to an insn. */
+ int sched_max_insns_priority;
+
+ /* Hooks to support speculative scheduling. */
+
+ /* Called to notify frontend that instruction is being added (second
+ parameter == 0) or removed (second parameter == 1). */
+ void (*add_remove_insn) (rtx_insn *, int);
+
+ /* Called to notify the frontend that instruction INSN is being
+ scheduled. */
+ void (*begin_schedule_ready) (rtx_insn *insn);
+
+ /* Called to notify the frontend that an instruction INSN is about to be
+ moved to its correct place in the final schedule. This is done for all
+ insns in order of the schedule. LAST indicates the last scheduled
+ instruction. */
+ void (*begin_move_insn) (rtx_insn *insn, rtx_insn *last);
+
+ /* If the second parameter is not NULL, return nonnull value, if the
+ basic block should be advanced.
+ If the second parameter is NULL, return the next basic block in EBB.
+ The first parameter is the current basic block in EBB. */
+ basic_block (*advance_target_bb) (basic_block, rtx_insn *);
+
+ /* Allocate memory, store the frontend scheduler state in it, and
+ return it. */
+ void *(*save_state) (void);
+ /* Restore frontend scheduler state from the argument, and free the
+ memory. */
+ void (*restore_state) (void *);
+
+ /* ??? FIXME: should use straight bitfields inside sched_info instead of
+ this flag field. */
+ unsigned int flags;
+};
+
+/* This structure holds description of the properties for speculative
+ scheduling. */
+struct spec_info_def
+{
+ /* Holds types of allowed speculations: BEGIN_{DATA|CONTROL},
+ BE_IN_{DATA_CONTROL}. */
+ int mask;
+
+ /* A dump file for additional information on speculative scheduling. */
+ FILE *dump;
+
+ /* Minimal cumulative weakness of speculative instruction's
+ dependencies, so that insn will be scheduled. */
+ dw_t data_weakness_cutoff;
+
+ /* Minimal usefulness of speculative instruction to be considered for
+ scheduling. */
+ int control_weakness_cutoff;
+
+ /* Flags from the enum SPEC_SCHED_FLAGS. */
+ int flags;
+};
+typedef struct spec_info_def *spec_info_t;
+
+extern spec_info_t spec_info;
+
+extern struct haifa_sched_info *current_sched_info;
+
+/* Do register pressure sensitive insn scheduling if the flag is set
+ up. */
+extern enum sched_pressure_algorithm sched_pressure;
+
+/* Map regno -> its pressure class. The map defined only when
+ SCHED_PRESSURE_P is true. */
+extern enum reg_class *sched_regno_pressure_class;
+
+/* Indexed by INSN_UID, the collection of all data associated with
+ a single instruction. */
+
+struct _haifa_deps_insn_data
+{
+ /* The number of incoming edges in the forward dependency graph.
+ As scheduling proceeds, counts are decreased. An insn moves to
+ the ready queue when its counter reaches zero. */
+ int dep_count;
+
+ /* Nonzero if instruction has internal dependence
+ (e.g. add_dependence was invoked with (insn == elem)). */
+ unsigned int has_internal_dep;
+
+ /* NB: We can't place 'struct _deps_list' here instead of deps_list_t into
+ h_i_d because when h_i_d extends, addresses of the deps_list->first
+ change without updating deps_list->first->next->prev_nextp. Thus
+ BACK_DEPS and RESOLVED_BACK_DEPS are allocated on the heap and FORW_DEPS
+ list is allocated on the obstack. */
+
+ /* A list of hard backward dependencies. The insn is a consumer of all the
+ deps mentioned here. */
+ deps_list_t hard_back_deps;
+
+ /* A list of speculative (weak) dependencies. The insn is a consumer of all
+ the deps mentioned here. */
+ deps_list_t spec_back_deps;
+
+ /* A list of insns which depend on the instruction. Unlike 'back_deps',
+ it represents forward dependencies. */
+ deps_list_t forw_deps;
+
+ /* A list of scheduled producers of the instruction. Links are being moved
+ from 'back_deps' to 'resolved_back_deps' while scheduling. */
+ deps_list_t resolved_back_deps;
+
+ /* A list of scheduled consumers of the instruction. Links are being moved
+ from 'forw_deps' to 'resolved_forw_deps' while scheduling to fasten the
+ search in 'forw_deps'. */
+ deps_list_t resolved_forw_deps;
+
+ /* If the insn is conditional (either through COND_EXEC, or because
+ it is a conditional branch), this records the condition. NULL
+ for insns that haven't been seen yet or don't have a condition;
+ const_true_rtx to mark an insn without a condition, or with a
+ condition that has been clobbered by a subsequent insn. */
+ rtx cond;
+
+ /* For a conditional insn, a list of insns that could set the condition
+ register. Used when generating control dependencies. */
+ rtx_insn_list *cond_deps;
+
+ /* True if the condition in 'cond' should be reversed to get the actual
+ condition. */
+ unsigned int reverse_cond : 1;
+
+ /* Some insns (e.g. call) are not allowed to move across blocks. */
+ unsigned int cant_move : 1;
+};
+
+
+/* Bits used for storing values of the fields in the following
+ structure. */
+#define INCREASE_BITS 8
+
+/* The structure describes how the corresponding insn increases the
+ register pressure for each pressure class. */
+struct reg_pressure_data
+{
+ /* Pressure increase for given class because of clobber. */
+ unsigned int clobber_increase : INCREASE_BITS;
+ /* Increase in register pressure for given class because of register
+ sets. */
+ unsigned int set_increase : INCREASE_BITS;
+ /* Pressure increase for given class because of unused register
+ set. */
+ unsigned int unused_set_increase : INCREASE_BITS;
+ /* Pressure change: #sets - #deaths. */
+ int change : INCREASE_BITS;
+};
+
+/* The following structure describes usage of registers by insns. */
+struct reg_use_data
+{
+ /* Regno used in the insn. */
+ int regno;
+ /* Insn using the regno. */
+ rtx_insn *insn;
+ /* Cyclic list of elements with the same regno. */
+ struct reg_use_data *next_regno_use;
+ /* List of elements with the same insn. */
+ struct reg_use_data *next_insn_use;
+};
+
+/* The following structure describes used sets of registers by insns.
+ Registers are pseudos whose pressure class is not NO_REGS or hard
+ registers available for allocations. */
+struct reg_set_data
+{
+ /* Regno used in the insn. */
+ int regno;
+ /* Insn setting the regno. */
+ rtx insn;
+ /* List of elements with the same insn. */
+ struct reg_set_data *next_insn_set;
+};
+
+enum autopref_multipass_data_status {
+ /* Entry is irrelevant for auto-prefetcher. */
+ AUTOPREF_MULTIPASS_DATA_IRRELEVANT = -2,
+ /* Entry is uninitialized. */
+ AUTOPREF_MULTIPASS_DATA_UNINITIALIZED = -1,
+ /* Entry is relevant for auto-prefetcher and insn can be delayed
+ to allow another insn through. */
+ AUTOPREF_MULTIPASS_DATA_NORMAL = 0,
+ /* Entry is relevant for auto-prefetcher, but insn should not be
+ delayed as that will break scheduling. */
+ AUTOPREF_MULTIPASS_DATA_DONT_DELAY = 1
+};
+
+/* Data for modeling cache auto-prefetcher. */
+struct autopref_multipass_data_
+{
+ /* Base part of memory address. */
+ rtx base;
+
+ /* Memory offsets from the base. */
+ int offset;
+
+ /* Entry status. */
+ enum autopref_multipass_data_status status;
+};
+typedef struct autopref_multipass_data_ autopref_multipass_data_def;
+typedef autopref_multipass_data_def *autopref_multipass_data_t;
+
+struct _haifa_insn_data
+{
+ /* We can't place 'struct _deps_list' into h_i_d instead of deps_list_t
+ because when h_i_d extends, addresses of the deps_list->first
+ change without updating deps_list->first->next->prev_nextp. */
+
+ /* Logical uid gives the original ordering of the insns. */
+ int luid;
+
+ /* A priority for each insn. */
+ int priority;
+
+ /* The fusion priority for each insn. */
+ int fusion_priority;
+
+ /* The minimum clock tick at which the insn becomes ready. This is
+ used to note timing constraints for the insns in the pending list. */
+ int tick;
+
+ /* For insns that are scheduled at a fixed difference from another,
+ this records the tick in which they must be ready. */
+ int exact_tick;
+
+ /* INTER_TICK is used to adjust INSN_TICKs of instructions from the
+ subsequent blocks in a region. */
+ int inter_tick;
+
+ /* Used temporarily to estimate an INSN_TICK value for an insn given
+ current knowledge. */
+ int tick_estimate;
+
+ /* See comment on QUEUE_INDEX macro in haifa-sched.cc. */
+ int queue_index;
+
+ short cost;
+
+ /* '> 0' if priority is valid,
+ '== 0' if priority was not yet computed,
+ '< 0' if priority in invalid and should be recomputed. */
+ signed char priority_status;
+
+ /* Set if there's DEF-USE dependence between some speculatively
+ moved load insn and this one. */
+ unsigned int fed_by_spec_load : 1;
+ unsigned int is_load_insn : 1;
+ /* Nonzero if this insn has negative-cost forward dependencies against
+ an already scheduled insn. */
+ unsigned int feeds_backtrack_insn : 1;
+
+ /* Nonzero if this insn is a shadow of another, scheduled after a fixed
+ delay. We only emit shadows at the end of a cycle, with no other
+ real insns following them. */
+ unsigned int shadow_p : 1;
+
+ /* Used internally in unschedule_insns_until to mark insns that must have
+ their TODO_SPEC recomputed. */
+ unsigned int must_recompute_spec : 1;
+
+ /* What speculations are necessary to apply to schedule the instruction. */
+ ds_t todo_spec;
+
+ /* What speculations were already applied. */
+ ds_t done_spec;
+
+ /* What speculations are checked by this instruction. */
+ ds_t check_spec;
+
+ /* Recovery block for speculation checks. */
+ basic_block recovery_block;
+
+ /* Original pattern of the instruction. */
+ rtx orig_pat;
+
+ /* For insns with DEP_CONTROL dependencies, the predicated pattern if it
+ was ever successfully constructed. */
+ rtx predicated_pat;
+
+ /* The following array contains info how the insn increases register
+ pressure. There is an element for each cover class of pseudos
+ referenced in insns. */
+ struct reg_pressure_data *reg_pressure;
+ /* The following array contains maximal reg pressure between last
+ scheduled insn and given insn. There is an element for each
+ pressure class of pseudos referenced in insns. This info updated
+ after scheduling each insn for each insn between the two
+ mentioned insns. */
+ int *max_reg_pressure;
+ /* The following list contains info about used pseudos and hard
+ registers available for allocation. */
+ struct reg_use_data *reg_use_list;
+ /* The following list contains info about set pseudos and hard
+ registers available for allocation. */
+ struct reg_set_data *reg_set_list;
+ /* Info about how scheduling the insn changes cost of register
+ pressure excess (between source and target). */
+ int reg_pressure_excess_cost_change;
+ int model_index;
+
+ /* Original order of insns in the ready list. */
+ int rfs_debug_orig_order;
+
+ /* The deciding reason for INSN's place in the ready list. */
+ int last_rfs_win;
+
+ /* Two entries for cache auto-prefetcher model: one for mem reads,
+ and one for mem writes. */
+ autopref_multipass_data_def autopref_multipass_data[2];
+};
+
+typedef struct _haifa_insn_data haifa_insn_data_def;
+typedef haifa_insn_data_def *haifa_insn_data_t;
+
+
+extern vec<haifa_insn_data_def> h_i_d;
+
+#define HID(INSN) (&h_i_d[INSN_UID (INSN)])
+
+/* Accessor macros for h_i_d. There are more in haifa-sched.cc and
+ sched-rgn.cc. */
+#define INSN_PRIORITY(INSN) (HID (INSN)->priority)
+#define INSN_FUSION_PRIORITY(INSN) (HID (INSN)->fusion_priority)
+#define INSN_REG_PRESSURE(INSN) (HID (INSN)->reg_pressure)
+#define INSN_MAX_REG_PRESSURE(INSN) (HID (INSN)->max_reg_pressure)
+#define INSN_REG_USE_LIST(INSN) (HID (INSN)->reg_use_list)
+#define INSN_REG_SET_LIST(INSN) (HID (INSN)->reg_set_list)
+#define INSN_REG_PRESSURE_EXCESS_COST_CHANGE(INSN) \
+ (HID (INSN)->reg_pressure_excess_cost_change)
+#define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
+#define INSN_MODEL_INDEX(INSN) (HID (INSN)->model_index)
+#define INSN_AUTOPREF_MULTIPASS_DATA(INSN) \
+ (HID (INSN)->autopref_multipass_data)
+
+typedef struct _haifa_deps_insn_data haifa_deps_insn_data_def;
+typedef haifa_deps_insn_data_def *haifa_deps_insn_data_t;
+
+
+extern vec<haifa_deps_insn_data_def> h_d_i_d;
+
+#define HDID(INSN) (&h_d_i_d[INSN_LUID (INSN)])
+#define INSN_DEP_COUNT(INSN) (HDID (INSN)->dep_count)
+#define HAS_INTERNAL_DEP(INSN) (HDID (INSN)->has_internal_dep)
+#define INSN_FORW_DEPS(INSN) (HDID (INSN)->forw_deps)
+#define INSN_RESOLVED_BACK_DEPS(INSN) (HDID (INSN)->resolved_back_deps)
+#define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
+#define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
+#define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
+#define INSN_CACHED_COND(INSN) (HDID (INSN)->cond)
+#define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
+#define INSN_COND_DEPS(INSN) (HDID (INSN)->cond_deps)
+#define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
+#define CANT_MOVE_BY_LUID(LUID) (h_d_i_d[LUID].cant_move)
+
+
+#define INSN_PRIORITY(INSN) (HID (INSN)->priority)
+#define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
+#define INSN_PRIORITY_KNOWN(INSN) (INSN_PRIORITY_STATUS (INSN) > 0)
+#define TODO_SPEC(INSN) (HID (INSN)->todo_spec)
+#define DONE_SPEC(INSN) (HID (INSN)->done_spec)
+#define CHECK_SPEC(INSN) (HID (INSN)->check_spec)
+#define RECOVERY_BLOCK(INSN) (HID (INSN)->recovery_block)
+#define ORIG_PAT(INSN) (HID (INSN)->orig_pat)
+#define PREDICATED_PAT(INSN) (HID (INSN)->predicated_pat)
+
+/* INSN is either a simple or a branchy speculation check. */
+#define IS_SPECULATION_CHECK_P(INSN) \
+ (sel_sched_p () ? sel_insn_is_speculation_check (INSN) : RECOVERY_BLOCK (INSN) != NULL)
+
+/* INSN is a speculation check that will simply reexecute the speculatively
+ scheduled instruction if the speculation fails. */
+#define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
+ (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR_FOR_FN (cfun))
+
+/* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
+ speculation fails. Insns in that block will reexecute the speculatively
+ scheduled code and then will return immediately after INSN thus preserving
+ semantics of the program. */
+#define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
+ (RECOVERY_BLOCK (INSN) != NULL \
+ && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR_FOR_FN (cfun))
+
+
+/* Dep status (aka ds_t) of the link encapsulates all information for a given
+ dependency, including everything that is needed for speculative scheduling.
+
+ The lay-out of a ds_t is as follows:
+
+ 1. Integers corresponding to the probability of the dependence to *not*
+ exist. This is the probability that overcoming this dependence will
+ not be followed by execution of the recovery code. Note that however
+ high this probability is, the recovery code should still always be
+ generated to preserve semantics of the program.
+
+ The probability values can be set or retrieved using the functions
+ the set_dep_weak() and get_dep_weak() in sched-deps.cc. The values
+ are always in the range [0, MAX_DEP_WEAK].
+
+ BEGIN_DATA : BITS_PER_DEP_WEAK
+ BE_IN_DATA : BITS_PER_DEP_WEAK
+ BEGIN_CONTROL : BITS_PER_DEP_WEAK
+ BE_IN_CONTROL : BITS_PER_DEP_WEAK
+
+ The basic type of DS_T is a host int. For a 32-bits int, the values
+ will each take 6 bits.
+
+ 2. The type of dependence. This supercedes the old-style REG_NOTE_KIND
+ values. TODO: Use this field instead of DEP_TYPE, or make DEP_TYPE
+ extract the dependence type from here.
+
+ dep_type : 4 => DEP_{TRUE|OUTPUT|ANTI|CONTROL}
+
+ 3. Various flags:
+
+ HARD_DEP : 1 => Set if an instruction has a non-speculative
+ dependence. This is an instruction property
+ so this bit can only appear in the TODO_SPEC
+ field of an instruction.
+ DEP_POSTPONED : 1 => Like HARD_DEP, but the hard dependence may
+ still be broken by adjusting the instruction.
+ DEP_CANCELLED : 1 => Set if a dependency has been broken using
+ some form of speculation.
+ RESERVED : 1 => Reserved for use in the delay slot scheduler.
+
+ See also: check_dep_status () in sched-deps.cc . */
+
+/* The number of bits per weakness probability. There are 4 weakness types
+ and we need 8 bits for other data in a DS_T. */
+#define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 8) / 4)
+
+/* Mask of speculative weakness in dep_status. */
+#define DEP_WEAK_MASK ((1 << BITS_PER_DEP_WEAK) - 1)
+
+/* This constant means that dependence is fake with 99.999...% probability.
+ This is the maximum value, that can appear in dep_status.
+ Note, that we don't want MAX_DEP_WEAK to be the same as DEP_WEAK_MASK for
+ debugging reasons. Though, it can be set to DEP_WEAK_MASK, and, when
+ done so, we'll get fast (mul for)/(div by) NO_DEP_WEAK. */
+#define MAX_DEP_WEAK (DEP_WEAK_MASK - 1)
+
+/* This constant means that dependence is 99.999...% real and it is a really
+ bad idea to overcome it (though this can be done, preserving program
+ semantics). */
+#define MIN_DEP_WEAK 1
+
+/* This constant represents 100% probability.
+ E.g. it is used to represent weakness of dependence, that doesn't exist.
+ This value never appears in a ds_t, it is only used for computing the
+ weakness of a dependence. */
+#define NO_DEP_WEAK (MAX_DEP_WEAK + MIN_DEP_WEAK)
+
+/* Default weakness of speculative dependence. Used when we can't say
+ neither bad nor good about the dependence. */
+#define UNCERTAIN_DEP_WEAK (MAX_DEP_WEAK - MAX_DEP_WEAK / 4)
+
+/* Offset for speculative weaknesses in dep_status. */
+enum SPEC_TYPES_OFFSETS {
+ BEGIN_DATA_BITS_OFFSET = 0,
+ BE_IN_DATA_BITS_OFFSET = BEGIN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
+ BEGIN_CONTROL_BITS_OFFSET = BE_IN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
+ BE_IN_CONTROL_BITS_OFFSET = BEGIN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK
+};
+
+/* The following defines provide numerous constants used to distinguish
+ between different types of speculative dependencies. They are also
+ used as masks to clear/preserve the bits corresponding to the type
+ of dependency weakness. */
+
+/* Dependence can be overcome with generation of new data speculative
+ instruction. */
+#define BEGIN_DATA (((ds_t) DEP_WEAK_MASK) << BEGIN_DATA_BITS_OFFSET)
+
+/* This dependence is to the instruction in the recovery block, that was
+ formed to recover after data-speculation failure.
+ Thus, this dependence can overcome with generating of the copy of
+ this instruction in the recovery block. */
+#define BE_IN_DATA (((ds_t) DEP_WEAK_MASK) << BE_IN_DATA_BITS_OFFSET)
+
+/* Dependence can be overcome with generation of new control speculative
+ instruction. */
+#define BEGIN_CONTROL (((ds_t) DEP_WEAK_MASK) << BEGIN_CONTROL_BITS_OFFSET)
+
+/* This dependence is to the instruction in the recovery block, that was
+ formed to recover after control-speculation failure.
+ Thus, this dependence can be overcome with generating of the copy of
+ this instruction in the recovery block. */
+#define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
+
+/* A few convenient combinations. */
+#define BEGIN_SPEC (BEGIN_DATA | BEGIN_CONTROL)
+#define DATA_SPEC (BEGIN_DATA | BE_IN_DATA)
+#define CONTROL_SPEC (BEGIN_CONTROL | BE_IN_CONTROL)
+#define SPECULATIVE (DATA_SPEC | CONTROL_SPEC)
+#define BE_IN_SPEC (BE_IN_DATA | BE_IN_CONTROL)
+
+/* Constants, that are helpful in iterating through dep_status. */
+#define FIRST_SPEC_TYPE BEGIN_DATA
+#define LAST_SPEC_TYPE BE_IN_CONTROL
+#define SPEC_TYPE_SHIFT BITS_PER_DEP_WEAK
+
+/* Dependence on instruction can be of multiple types
+ (e.g. true and output). This fields enhance REG_NOTE_KIND information
+ of the dependence. */
+#define DEP_TRUE (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK))
+#define DEP_OUTPUT (DEP_TRUE << 1)
+#define DEP_ANTI (DEP_OUTPUT << 1)
+#define DEP_CONTROL (DEP_ANTI << 1)
+
+#define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI | DEP_CONTROL)
+
+/* Instruction has non-speculative dependence. This bit represents the
+ property of an instruction - not the one of a dependence.
+ Therefore, it can appear only in the TODO_SPEC field of an instruction. */
+#define HARD_DEP (DEP_CONTROL << 1)
+
+/* Like HARD_DEP, but dependencies can perhaps be broken by modifying
+ the instructions. This is used for example to change:
+
+ rn++ => rm=[rn + 4]
+ rm=[rn] rn++
+
+ For instructions that have this bit set, one of the dependencies of
+ the instructions will have a non-NULL REPLACE field in its DEP_T.
+ Just like HARD_DEP, this bit is only ever set in TODO_SPEC. */
+#define DEP_POSTPONED (HARD_DEP << 1)
+
+/* Set if a dependency is cancelled via speculation. */
+#define DEP_CANCELLED (DEP_POSTPONED << 1)
+
+
+/* This represents the results of calling sched-deps.cc functions,
+ which modify dependencies. */
+enum DEPS_ADJUST_RESULT {
+ /* No dependence needed (e.g. producer == consumer). */
+ DEP_NODEP,
+ /* Dependence is already present and wasn't modified. */
+ DEP_PRESENT,
+ /* Existing dependence was modified to include additional information. */
+ DEP_CHANGED,
+ /* New dependence has been created. */
+ DEP_CREATED
+};
+
+/* Represents the bits that can be set in the flags field of the
+ sched_info structure. */
+enum SCHED_FLAGS {
+ /* If set, generate links between instruction as DEPS_LIST.
+ Otherwise, generate usual INSN_LIST links. */
+ USE_DEPS_LIST = 1,
+ /* Perform data or control (or both) speculation.
+ Results in generation of data and control speculative dependencies.
+ Requires USE_DEPS_LIST set. */
+ DO_SPECULATION = USE_DEPS_LIST << 1,
+ DO_BACKTRACKING = DO_SPECULATION << 1,
+ DO_PREDICATION = DO_BACKTRACKING << 1,
+ DONT_BREAK_DEPENDENCIES = DO_PREDICATION << 1,
+ SCHED_RGN = DONT_BREAK_DEPENDENCIES << 1,
+ SCHED_EBB = SCHED_RGN << 1,
+ /* Scheduler can possibly create new basic blocks. Used for assertions. */
+ NEW_BBS = SCHED_EBB << 1,
+ SEL_SCHED = NEW_BBS << 1
+};
+
+enum SPEC_SCHED_FLAGS {
+ COUNT_SPEC_IN_CRITICAL_PATH = 1,
+ SEL_SCHED_SPEC_DONT_CHECK_CONTROL = COUNT_SPEC_IN_CRITICAL_PATH << 1
+};
+
+#define NOTE_NOT_BB_P(NOTE) (NOTE_P (NOTE) && (NOTE_KIND (NOTE) \
+ != NOTE_INSN_BASIC_BLOCK))
+
+extern FILE *sched_dump;
+extern int sched_verbose;
+
+extern spec_info_t spec_info;
+extern bool haifa_recovery_bb_ever_added_p;
+
+/* Exception Free Loads:
+
+ We define five classes of speculative loads: IFREE, IRISKY,
+ PFREE, PRISKY, and MFREE.
+
+ IFREE loads are loads that are proved to be exception-free, just
+ by examining the load insn. Examples for such loads are loads
+ from TOC and loads of global data.
+
+ IRISKY loads are loads that are proved to be exception-risky,
+ just by examining the load insn. Examples for such loads are
+ volatile loads and loads from shared memory.
+
+ PFREE loads are loads for which we can prove, by examining other
+ insns, that they are exception-free. Currently, this class consists
+ of loads for which we are able to find a "similar load", either in
+ the target block, or, if only one split-block exists, in that split
+ block. Load2 is similar to load1 if both have same single base
+ register. We identify only part of the similar loads, by finding
+ an insn upon which both load1 and load2 have a DEF-USE dependence.
+
+ PRISKY loads are loads for which we can prove, by examining other
+ insns, that they are exception-risky. Currently we have two proofs for
+ such loads. The first proof detects loads that are probably guarded by a
+ test on the memory address. This proof is based on the
+ backward and forward data dependence information for the region.
+ Let load-insn be the examined load.
+ Load-insn is PRISKY iff ALL the following hold:
+
+ - insn1 is not in the same block as load-insn
+ - there is a DEF-USE dependence chain (insn1, ..., load-insn)
+ - test-insn is either a compare or a branch, not in the same block
+ as load-insn
+ - load-insn is reachable from test-insn
+ - there is a DEF-USE dependence chain (insn1, ..., test-insn)
+
+ This proof might fail when the compare and the load are fed
+ by an insn not in the region. To solve this, we will add to this
+ group all loads that have no input DEF-USE dependence.
+
+ The second proof detects loads that are directly or indirectly
+ fed by a speculative load. This proof is affected by the
+ scheduling process. We will use the flag fed_by_spec_load.
+ Initially, all insns have this flag reset. After a speculative
+ motion of an insn, if insn is either a load, or marked as
+ fed_by_spec_load, we will also mark as fed_by_spec_load every
+ insn1 for which a DEF-USE dependence (insn, insn1) exists. A
+ load which is fed_by_spec_load is also PRISKY.
+
+ MFREE (maybe-free) loads are all the remaining loads. They may be
+ exception-free, but we cannot prove it.
+
+ Now, all loads in IFREE and PFREE classes are considered
+ exception-free, while all loads in IRISKY and PRISKY classes are
+ considered exception-risky. As for loads in the MFREE class,
+ these are considered either exception-free or exception-risky,
+ depending on whether we are pessimistic or optimistic. We have
+ to take the pessimistic approach to assure the safety of
+ speculative scheduling, but we can take the optimistic approach
+ by invoking the -fsched_spec_load_dangerous option. */
+
+enum INSN_TRAP_CLASS
+{
+ TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
+ PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
+};
+
+#define WORST_CLASS(class1, class2) \
+((class1 > class2) ? class1 : class2)
+
+#ifndef __GNUC__
+#define __inline
+#endif
+
+#ifndef HAIFA_INLINE
+#define HAIFA_INLINE __inline
+#endif
+
+struct sched_deps_info_def
+{
+ /* Called when computing dependencies for a JUMP_INSN. This function
+ should store the set of registers that must be considered as set by
+ the jump in the regset. */
+ void (*compute_jump_reg_dependencies) (rtx, regset);
+
+ /* Start analyzing insn. */
+ void (*start_insn) (rtx_insn *);
+
+ /* Finish analyzing insn. */
+ void (*finish_insn) (void);
+
+ /* Start analyzing insn LHS (Left Hand Side). */
+ void (*start_lhs) (rtx);
+
+ /* Finish analyzing insn LHS. */
+ void (*finish_lhs) (void);
+
+ /* Start analyzing insn RHS (Right Hand Side). */
+ void (*start_rhs) (rtx);
+
+ /* Finish analyzing insn RHS. */
+ void (*finish_rhs) (void);
+
+ /* Note set of the register. */
+ void (*note_reg_set) (int);
+
+ /* Note clobber of the register. */
+ void (*note_reg_clobber) (int);
+
+ /* Note use of the register. */
+ void (*note_reg_use) (int);
+
+ /* Note memory dependence of type DS between MEM1 and MEM2 (which is
+ in the INSN2). */
+ void (*note_mem_dep) (rtx mem1, rtx mem2, rtx_insn *insn2, ds_t ds);
+
+ /* Note a dependence of type DS from the INSN. */
+ void (*note_dep) (rtx_insn *, ds_t ds);
+
+ /* Nonzero if we should use cselib for better alias analysis. This
+ must be 0 if the dependency information is used after sched_analyze
+ has completed, e.g. if we're using it to initialize state for successor
+ blocks in region scheduling. */
+ unsigned int use_cselib : 1;
+
+ /* If set, generate links between instruction as DEPS_LIST.
+ Otherwise, generate usual INSN_LIST links. */
+ unsigned int use_deps_list : 1;
+
+ /* Generate data and control speculative dependencies.
+ Requires USE_DEPS_LIST set. */
+ unsigned int generate_spec_deps : 1;
+};
+
+extern struct sched_deps_info_def *sched_deps_info;
+
+
+/* Functions in sched-deps.cc. */
+extern rtx sched_get_reverse_condition_uncached (const rtx_insn *);
+extern bool sched_insns_conditions_mutex_p (const rtx_insn *,
+ const rtx_insn *);
+extern bool sched_insn_is_legitimate_for_speculation_p (const rtx_insn *, ds_t);
+extern void add_dependence (rtx_insn *, rtx_insn *, enum reg_note);
+extern void sched_analyze (class deps_desc *, rtx_insn *, rtx_insn *);
+extern void init_deps (class deps_desc *, bool);
+extern void init_deps_reg_last (class deps_desc *);
+extern void free_deps (class deps_desc *);
+extern void init_deps_global (void);
+extern void finish_deps_global (void);
+extern void deps_analyze_insn (class deps_desc *, rtx_insn *);
+extern void remove_from_deps (class deps_desc *, rtx_insn *);
+extern void init_insn_reg_pressure_info (rtx_insn *);
+extern void get_implicit_reg_pending_clobbers (HARD_REG_SET *, rtx_insn *);
+
+extern dw_t get_dep_weak (ds_t, ds_t);
+extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
+extern dw_t estimate_dep_weak (rtx, rtx);
+extern ds_t ds_merge (ds_t, ds_t);
+extern ds_t ds_full_merge (ds_t, ds_t, rtx, rtx);
+extern ds_t ds_max_merge (ds_t, ds_t);
+extern dw_t ds_weak (ds_t);
+extern ds_t ds_get_speculation_types (ds_t);
+extern ds_t ds_get_max_dep_weak (ds_t);
+
+extern void sched_deps_init (bool);
+extern void sched_deps_finish (void);
+
+extern void haifa_note_reg_set (int);
+extern void haifa_note_reg_clobber (int);
+extern void haifa_note_reg_use (int);
+
+extern void maybe_extend_reg_info_p (void);
+
+extern void deps_start_bb (class deps_desc *, rtx_insn *);
+extern enum reg_note ds_to_dt (ds_t);
+
+extern bool deps_pools_are_empty_p (void);
+extern void sched_free_deps (rtx_insn *, rtx_insn *, bool);
+extern void extend_dependency_caches (int, bool);
+
+extern void debug_ds (ds_t);
+
+
+/* Functions in haifa-sched.cc. */
+extern void initialize_live_range_shrinkage (void);
+extern void finish_live_range_shrinkage (void);
+extern void sched_init_region_reg_pressure_info (void);
+extern void free_global_sched_pressure_data (void);
+extern int haifa_classify_insn (const_rtx);
+extern void get_ebb_head_tail (basic_block, basic_block,
+ rtx_insn **, rtx_insn **);
+extern int no_real_insns_p (const rtx_insn *, const rtx_insn *);
+
+extern int insn_sched_cost (rtx_insn *);
+extern int dep_cost_1 (dep_t, dw_t);
+extern int dep_cost (dep_t);
+extern int set_priorities (rtx_insn *, rtx_insn *);
+
+extern void sched_setup_bb_reg_pressure_info (basic_block, rtx_insn *);
+extern bool schedule_block (basic_block *, state_t);
+
+extern int cycle_issued_insns;
+extern int issue_rate;
+extern int dfa_lookahead;
+
+extern int autopref_multipass_dfa_lookahead_guard (rtx_insn *, int);
+
+extern rtx_insn *ready_element (struct ready_list *, int);
+extern rtx_insn **ready_lastpos (struct ready_list *);
+
+extern int try_ready (rtx_insn *);
+extern void sched_extend_ready_list (int);
+extern void sched_finish_ready_list (void);
+extern void sched_change_pattern (rtx, rtx);
+extern int sched_speculate_insn (rtx_insn *, ds_t, rtx *);
+extern void unlink_bb_notes (basic_block, basic_block);
+extern void add_block (basic_block, basic_block);
+extern rtx_note *bb_note (basic_block);
+extern void concat_note_lists (rtx_insn *, rtx_insn **);
+extern rtx_insn *sched_emit_insn (rtx);
+extern rtx_insn *get_ready_element (int);
+extern int number_in_ready (void);
+
+/* Types and functions in sched-ebb.cc. */
+
+extern basic_block schedule_ebb (rtx_insn *, rtx_insn *, bool);
+extern void schedule_ebbs_init (void);
+extern void schedule_ebbs_finish (void);
+
+/* Types and functions in sched-rgn.cc. */
+
+/* A region is the main entity for interblock scheduling: insns
+ are allowed to move between blocks in the same region, along
+ control flow graph edges, in the 'up' direction. */
+struct region
+{
+ /* Number of extended basic blocks in region. */
+ int rgn_nr_blocks;
+ /* cblocks in the region (actually index in rgn_bb_table). */
+ int rgn_blocks;
+ /* Dependencies for this region are already computed. Basically, indicates,
+ that this is a recovery block. */
+ unsigned int dont_calc_deps : 1;
+ /* This region has at least one non-trivial ebb. */
+ unsigned int has_real_ebb : 1;
+};
+
+extern int nr_regions;
+extern region *rgn_table;
+extern int *rgn_bb_table;
+extern int *block_to_bb;
+extern int *containing_rgn;
+
+/* Often used short-hand in the scheduler. The rest of the compiler uses
+ BLOCK_FOR_INSN(INSN) and an indirect reference to get the basic block
+ number ("index"). For historical reasons, the scheduler does not. */
+#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0)
+
+#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
+#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
+#define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps)
+#define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb)
+#define BLOCK_TO_BB(block) (block_to_bb[block])
+#define CONTAINING_RGN(block) (containing_rgn[block])
+
+/* The mapping from ebb to block. */
+extern int *ebb_head;
+#define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
+#define EBB_FIRST_BB(ebb) BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (ebb))
+#define EBB_LAST_BB(ebb) \
+ BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[ebb_head[ebb + 1] - 1])
+#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
+
+extern int current_nr_blocks;
+extern int current_blocks;
+extern int target_bb;
+extern bool sched_no_dce;
+
+extern void set_modulo_params (int, int, int, int);
+extern void record_delay_slot_pair (rtx_insn *, rtx_insn *, int, int);
+extern rtx_insn *real_insn_for_shadow (rtx_insn *);
+extern void discard_delay_pairs_above (int);
+extern void free_delay_pairs (void);
+extern void add_delay_dependencies (rtx_insn *);
+extern bool sched_is_disabled_for_current_region_p (void);
+extern void sched_rgn_init (bool);
+extern void sched_rgn_finish (void);
+extern void rgn_setup_region (int);
+extern void sched_rgn_compute_dependencies (int);
+extern void sched_rgn_local_init (int);
+extern void sched_rgn_local_finish (void);
+extern void sched_rgn_local_free (void);
+extern void extend_regions (void);
+extern void rgn_make_new_region_out_of_new_block (basic_block);
+
+extern void compute_priorities (void);
+extern void increase_insn_priority (rtx_insn *, int);
+extern void debug_rgn_dependencies (int);
+extern void debug_dependencies (rtx_insn *, rtx_insn *);
+extern void dump_rgn_dependencies_dot (FILE *);
+extern void dump_rgn_dependencies_dot (const char *);
+
+extern void free_rgn_deps (void);
+extern int contributes_to_priority (rtx_insn *, rtx_insn *);
+extern void extend_rgns (int *, int *, sbitmap, int *);
+extern void deps_join (class deps_desc *, class deps_desc *);
+
+extern void rgn_setup_common_sched_info (void);
+extern void rgn_setup_sched_infos (void);
+
+extern void debug_regions (void);
+extern void debug_region (int);
+extern void dump_region_dot (FILE *, int);
+extern void dump_region_dot_file (const char *, int);
+
+extern void haifa_sched_init (void);
+extern void haifa_sched_finish (void);
+
+extern void find_modifiable_mems (rtx_insn *, rtx_insn *);
+
+/* sched-deps.cc interface to walk, add, search, update, resolve, delete
+ and debug instruction dependencies. */
+
+/* Constants defining dependences lists. */
+
+/* No list. */
+#define SD_LIST_NONE (0)
+
+/* hard_back_deps. */
+#define SD_LIST_HARD_BACK (1)
+
+/* spec_back_deps. */
+#define SD_LIST_SPEC_BACK (2)
+
+/* forw_deps. */
+#define SD_LIST_FORW (4)
+
+/* resolved_back_deps. */
+#define SD_LIST_RES_BACK (8)
+
+/* resolved_forw_deps. */
+#define SD_LIST_RES_FORW (16)
+
+#define SD_LIST_BACK (SD_LIST_HARD_BACK | SD_LIST_SPEC_BACK)
+
+/* A type to hold above flags. */
+typedef int sd_list_types_def;
+
+extern void sd_next_list (const_rtx, sd_list_types_def *, deps_list_t *, bool *);
+
+/* Iterator to walk through, resolve and delete dependencies. */
+struct _sd_iterator
+{
+ /* What lists to walk. Can be any combination of SD_LIST_* flags. */
+ sd_list_types_def types;
+
+ /* Instruction dependencies lists of which will be walked. */
+ rtx insn;
+
+ /* Pointer to the next field of the previous element. This is not
+ simply a pointer to the next element to allow easy deletion from the
+ list. When a dep is being removed from the list the iterator
+ will automatically advance because the value in *linkp will start
+ referring to the next element. */
+ dep_link_t *linkp;
+
+ /* True if the current list is a resolved one. */
+ bool resolved_p;
+};
+
+typedef struct _sd_iterator sd_iterator_def;
+
+/* ??? We can move some definitions that are used in below inline functions
+ out of sched-int.h to sched-deps.cc provided that the below functions will
+ become global externals.
+ These definitions include:
+ * struct _deps_list: opaque pointer is needed at global scope.
+ * struct _dep_link: opaque pointer is needed at scope of sd_iterator_def.
+ * struct _dep_node: opaque pointer is needed at scope of
+ struct _deps_link. */
+
+/* Return initialized iterator. */
+inline sd_iterator_def
+sd_iterator_start (rtx insn, sd_list_types_def types)
+{
+ /* Some dep_link a pointer to which will return NULL. */
+ static dep_link_t null_link = NULL;
+
+ sd_iterator_def i;
+
+ i.types = types;
+ i.insn = insn;
+ i.linkp = &null_link;
+
+ /* Avoid 'uninitialized warning'. */
+ i.resolved_p = false;
+
+ return i;
+}
+
+/* Return the current element. */
+inline bool
+sd_iterator_cond (sd_iterator_def *it_ptr, dep_t *dep_ptr)
+{
+ while (true)
+ {
+ dep_link_t link = *it_ptr->linkp;
+
+ if (link != NULL)
+ {
+ *dep_ptr = DEP_LINK_DEP (link);
+ return true;
+ }
+ else
+ {
+ sd_list_types_def types = it_ptr->types;
+
+ if (types != SD_LIST_NONE)
+ /* Switch to next list. */
+ {
+ deps_list_t list;
+
+ sd_next_list (it_ptr->insn,
+ &it_ptr->types, &list, &it_ptr->resolved_p);
+
+ if (list)
+ {
+ it_ptr->linkp = &DEPS_LIST_FIRST (list);
+ continue;
+ }
+ }
+
+ *dep_ptr = NULL;
+ return false;
+ }
+ }
+}
+
+/* Advance iterator. */
+inline void
+sd_iterator_next (sd_iterator_def *it_ptr)
+{
+ it_ptr->linkp = &DEP_LINK_NEXT (*it_ptr->linkp);
+}
+
+/* A cycle wrapper. */
+#define FOR_EACH_DEP(INSN, LIST_TYPES, ITER, DEP) \
+ for ((ITER) = sd_iterator_start ((INSN), (LIST_TYPES)); \
+ sd_iterator_cond (&(ITER), &(DEP)); \
+ sd_iterator_next (&(ITER)))
+
+#define IS_DISPATCH_ON 1
+#define IS_CMP 2
+#define DISPATCH_VIOLATION 3
+#define FITS_DISPATCH_WINDOW 4
+#define DISPATCH_INIT 5
+#define ADD_TO_DISPATCH_WINDOW 6
+
+extern int sd_lists_size (const_rtx, sd_list_types_def);
+extern bool sd_lists_empty_p (const_rtx, sd_list_types_def);
+extern void sd_init_insn (rtx_insn *);
+extern void sd_finish_insn (rtx_insn *);
+extern dep_t sd_find_dep_between (rtx, rtx, bool);
+extern void sd_add_dep (dep_t, bool);
+extern enum DEPS_ADJUST_RESULT sd_add_or_update_dep (dep_t, bool);
+extern void sd_resolve_dep (sd_iterator_def);
+extern void sd_unresolve_dep (sd_iterator_def);
+extern void sd_copy_back_deps (rtx_insn *, rtx_insn *, bool);
+extern void sd_delete_dep (sd_iterator_def);
+extern void sd_debug_lists (rtx, sd_list_types_def);
+
+/* Macros and declarations for scheduling fusion. */
+#define FUSION_MAX_PRIORITY (INT_MAX)
+extern bool sched_fusion;
+
+#endif /* INSN_SCHEDULING */
+
+#endif /* GCC_SCHED_INT_H */
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-dump.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-dump.h
new file mode 100644
index 0000000..2a207ce
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-dump.h
@@ -0,0 +1,233 @@
+/* Instruction scheduling pass. Log dumping infrastructure.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_SEL_SCHED_DUMP_H
+#define GCC_SEL_SCHED_DUMP_H
+
+
+
+/* These values control the dumping of control flow graph to the .dot file. */
+enum sel_dump_cfg_def
+ {
+ /* Dump only current region. */
+ SEL_DUMP_CFG_CURRENT_REGION = 2,
+
+ /* Dump note_list for this bb. */
+ SEL_DUMP_CFG_BB_NOTES_LIST = 4,
+
+ /* Dump availability set from the bb header. */
+ SEL_DUMP_CFG_AV_SET = 8,
+
+ /* Dump liveness set from the bb header. */
+ SEL_DUMP_CFG_LV_SET = 16,
+
+ /* Dump insns of the given block. */
+ SEL_DUMP_CFG_BB_INSNS = 32,
+
+ /* Show current fences when dumping cfg. */
+ SEL_DUMP_CFG_FENCES = 64,
+
+ /* Show insn's seqnos when dumping cfg. */
+ SEL_DUMP_CFG_INSN_SEQNO = 128,
+
+ /* Dump function name when dumping cfg. */
+ SEL_DUMP_CFG_FUNCTION_NAME = 256,
+
+ /* Dump loop father number of the given bb. */
+ SEL_DUMP_CFG_BB_LOOP = 512,
+
+ /* The default flags for cfg dumping. */
+ SEL_DUMP_CFG_FLAGS = (SEL_DUMP_CFG_CURRENT_REGION
+ | SEL_DUMP_CFG_BB_NOTES_LIST
+ | SEL_DUMP_CFG_AV_SET
+ | SEL_DUMP_CFG_LV_SET
+ | SEL_DUMP_CFG_BB_INSNS
+ | SEL_DUMP_CFG_FENCES
+ | SEL_DUMP_CFG_INSN_SEQNO
+ | SEL_DUMP_CFG_BB_LOOP)
+ };
+
+/* These values control the dumping of insns containing in expressions. */
+enum dump_insn_rtx_def
+ {
+ /* Dump insn's UID. */
+ DUMP_INSN_RTX_UID = 2,
+
+ /* Dump insn's pattern. */
+ DUMP_INSN_RTX_PATTERN = 4,
+
+ /* Dump insn's basic block number. */
+ DUMP_INSN_RTX_BBN = 8,
+
+ /* Dump all of the above. */
+ DUMP_INSN_RTX_ALL = (DUMP_INSN_RTX_UID | DUMP_INSN_RTX_PATTERN
+ | DUMP_INSN_RTX_BBN)
+ };
+
+extern void dump_insn_rtx_1 (rtx, int);
+extern void dump_insn_rtx (rtx);
+extern void debug_insn_rtx (rtx);
+
+/* These values control dumping of vinsns. The meaning of different fields
+ of a vinsn is explained in sel-sched-ir.h. */
+enum dump_vinsn_def
+ {
+ /* Dump the insn behind this vinsn. */
+ DUMP_VINSN_INSN_RTX = 2,
+
+ /* Dump vinsn's type. */
+ DUMP_VINSN_TYPE = 4,
+
+ /* Dump vinsn's count. */
+ DUMP_VINSN_COUNT = 8,
+
+ /* Dump the cost (default latency) of the insn behind this vinsn. */
+ DUMP_VINSN_COST = 16,
+
+ /* Dump all of the above. */
+ DUMP_VINSN_ALL = (DUMP_VINSN_INSN_RTX | DUMP_VINSN_TYPE | DUMP_VINSN_COUNT
+ | DUMP_VINSN_COST)
+ };
+
+extern void dump_vinsn_1 (vinsn_t, int);
+extern void dump_vinsn (vinsn_t);
+extern void debug_vinsn (vinsn_t);
+
+extern void debug (vinsn_def &ref);
+extern void debug (vinsn_def *ptr);
+extern void debug_verbose (vinsn_def &ref);
+extern void debug_verbose (vinsn_def *ptr);
+
+
+/* These values control dumping of expressions. The meaning of the fields
+ is explained in sel-sched-ir.h. */
+enum dump_expr_def
+ {
+ /* Dump the vinsn behind this expression. */
+ DUMP_EXPR_VINSN = 2,
+
+ /* Dump expression's SPEC parameter. */
+ DUMP_EXPR_SPEC = 4,
+
+ /* Dump expression's priority. */
+ DUMP_EXPR_PRIORITY = 8,
+
+ /* Dump the number of times this expression was scheduled. */
+ DUMP_EXPR_SCHED_TIMES = 16,
+
+ /* Dump speculative status of the expression. */
+ DUMP_EXPR_SPEC_DONE_DS = 32,
+
+ /* Dump the basic block number which originated this expression. */
+ DUMP_EXPR_ORIG_BB = 64,
+
+ /* Dump expression's usefulness. */
+ DUMP_EXPR_USEFULNESS = 128,
+
+ /* Dump all of the above. */
+ DUMP_EXPR_ALL = (DUMP_EXPR_VINSN | DUMP_EXPR_SPEC | DUMP_EXPR_PRIORITY
+ | DUMP_EXPR_SCHED_TIMES | DUMP_EXPR_SPEC_DONE_DS
+ | DUMP_EXPR_ORIG_BB | DUMP_EXPR_USEFULNESS)
+ };
+
+extern void dump_expr_1 (expr_t, int);
+extern void dump_expr (expr_t);
+extern void debug_expr (expr_t);
+
+extern void debug (expr_def &ref);
+extern void debug (expr_def *ptr);
+extern void debug_verbose (expr_def &ref);
+extern void debug_verbose (expr_def *ptr);
+
+
+/* A enumeration for dumping flags of an insn. The difference from
+ dump_insn_rtx_def is that these fields are for insns in stream only. */
+enum dump_insn_def
+{
+ /* Dump expression of this insn. */
+ DUMP_INSN_EXPR = 2,
+
+ /* Dump insn's seqno. */
+ DUMP_INSN_SEQNO = 4,
+
+ /* Dump the cycle on which insn was scheduled. */
+ DUMP_INSN_SCHED_CYCLE = 8,
+
+ /* Dump insn's UID. */
+ DUMP_INSN_UID = 16,
+
+ /* Dump insn's pattern. */
+ DUMP_INSN_PATTERN = 32,
+
+ /* Dump insn's basic block number. */
+ DUMP_INSN_BBN = 64,
+
+ /* Dump all of the above. */
+ DUMP_INSN_ALL = (DUMP_INSN_EXPR | DUMP_INSN_SEQNO | DUMP_INSN_BBN
+ | DUMP_INSN_SCHED_CYCLE | DUMP_INSN_UID | DUMP_INSN_PATTERN)
+};
+
+extern void dump_insn_1 (insn_t, int);
+extern void dump_insn (insn_t);
+extern void debug_insn (insn_t);
+
+/* When this flag is on, we are dumping to the .dot file.
+ When it is off, we are dumping to log. */
+extern bool sched_dump_to_dot_p;
+
+
+/* Functions from sel-sched-dump.cc. */
+extern void sel_print (const char *fmt, ...) ATTRIBUTE_PRINTF_1;
+extern const char * sel_print_insn (const rtx_insn *, int);
+extern void free_sel_dump_data (void);
+
+extern void block_start (void);
+extern void block_finish (void);
+extern int get_print_blocks_num (void);
+extern void line_start (void);
+extern void line_finish (void);
+
+extern void sel_print_rtl (rtx x);
+extern void dump_insn_1 (insn_t, int);
+extern void dump_insn (insn_t);
+extern void dump_insn_vector (rtx_vec_t);
+extern void dump_expr (expr_t);
+extern void dump_used_regs (bitmap);
+extern void dump_av_set (av_set_t);
+extern void dump_lv_set (regset);
+extern void dump_blist (blist_t);
+extern void dump_flist (flist_t);
+extern void dump_hard_reg_set (const char *, HARD_REG_SET);
+extern void sel_debug_cfg_1 (int);
+extern void sel_debug_cfg (void);
+extern void setup_dump_cfg_params (void);
+
+/* Debug functions. */
+extern void debug_expr (expr_t);
+extern void debug_av_set (av_set_t);
+extern void debug_lv_set (regset);
+extern void debug_ilist (ilist_t);
+extern void debug_blist (blist_t);
+extern void debug (vec<rtx> &ref);
+extern void debug (vec<rtx> *ptr);
+extern void debug_insn_vector (rtx_vec_t);
+extern void debug_hard_reg_set (HARD_REG_SET);
+extern rtx debug_mem_addr_value (rtx);
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-ir.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-ir.h
new file mode 100644
index 0000000..0e87134
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched-ir.h
@@ -0,0 +1,1674 @@
+/* Instruction scheduling pass. This file contains definitions used
+ internally in the scheduler.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SEL_SCHED_IR_H
+#define GCC_SEL_SCHED_IR_H
+
+/* For state_t. */
+/* For reg_note. */
+
+/* tc_t is a short for target context. This is a state of the target
+ backend. */
+typedef void *tc_t;
+
+/* List data types used for av sets, fences, paths, and boundaries. */
+
+/* Forward declarations for types that are part of some list nodes. */
+struct _list_node;
+
+/* List backend. */
+typedef struct _list_node *_list_t;
+#define _LIST_NEXT(L) ((L)->next)
+
+/* Instruction data that is part of vinsn type. */
+struct idata_def;
+typedef struct idata_def *idata_t;
+
+/* A virtual instruction, i.e. an instruction as seen by the scheduler. */
+struct vinsn_def;
+typedef struct vinsn_def *vinsn_t;
+
+/* RTX list.
+ This type is the backend for ilist. */
+typedef _list_t _xlist_t;
+#define _XLIST_X(L) ((L)->u.x)
+#define _XLIST_NEXT(L) (_LIST_NEXT (L))
+
+/* Instruction. */
+typedef rtx_insn *insn_t;
+
+/* List of insns. */
+typedef _list_t ilist_t;
+#define ILIST_INSN(L) ((L)->u.insn)
+#define ILIST_NEXT(L) (_LIST_NEXT (L))
+
+/* This lists possible transformations that done locally, i.e. in
+ moveup_expr. */
+enum local_trans_type
+ {
+ TRANS_SUBSTITUTION,
+ TRANS_SPECULATION
+ };
+
+/* This struct is used to record the history of expression's
+ transformations. */
+struct expr_history_def_1
+{
+ /* UID of the insn. */
+ unsigned uid;
+
+ /* How the expression looked like. */
+ vinsn_t old_expr_vinsn;
+
+ /* How the expression looks after the transformation. */
+ vinsn_t new_expr_vinsn;
+
+ /* And its speculative status. */
+ ds_t spec_ds;
+
+ /* Type of the transformation. */
+ enum local_trans_type type;
+};
+
+typedef struct expr_history_def_1 expr_history_def;
+
+
+/* Expression information. */
+struct _expr
+{
+ /* Insn description. */
+ vinsn_t vinsn;
+
+ /* SPEC is the degree of speculativeness.
+ FIXME: now spec is increased when an rhs is moved through a
+ conditional, thus showing only control speculativeness. In the
+ future we'd like to count data spec separately to allow a better
+ control on scheduling. */
+ int spec;
+
+ /* Degree of speculativeness measured as probability of executing
+ instruction's original basic block given relative to
+ the current scheduling point. */
+ int usefulness;
+
+ /* A priority of this expression. */
+ int priority;
+
+ /* A priority adjustment of this expression. */
+ int priority_adj;
+
+ /* Number of times the insn was scheduled. */
+ int sched_times;
+
+ /* A basic block index this was originated from. Zero when there is
+ more than one originator. */
+ int orig_bb_index;
+
+ /* Instruction should be of SPEC_DONE_DS type in order to be moved to this
+ point. */
+ ds_t spec_done_ds;
+
+ /* SPEC_TO_CHECK_DS hold speculation types that should be checked
+ (used only during move_op ()). */
+ ds_t spec_to_check_ds;
+
+ /* Cycle on which original insn was scheduled. Zero when it has not yet
+ been scheduled or more than one originator. */
+ int orig_sched_cycle;
+
+ /* This vector contains the history of insn's transformations. */
+ vec<expr_history_def> history_of_changes;
+
+ /* True (1) when original target (register or memory) of this instruction
+ is available for scheduling, false otherwise. -1 means we're not sure;
+ please run find_used_regs to clarify. */
+ signed char target_available;
+
+ /* True when this expression needs a speculation check to be scheduled.
+ This is used during find_used_regs. */
+ BOOL_BITFIELD needs_spec_check_p : 1;
+
+ /* True when the expression was substituted. Used for statistical
+ purposes. */
+ BOOL_BITFIELD was_substituted : 1;
+
+ /* True when the expression was renamed. */
+ BOOL_BITFIELD was_renamed : 1;
+
+ /* True when expression can't be moved. */
+ BOOL_BITFIELD cant_move : 1;
+};
+
+typedef struct _expr expr_def;
+typedef expr_def *expr_t;
+
+#define EXPR_VINSN(EXPR) ((EXPR)->vinsn)
+#define EXPR_INSN_RTX(EXPR) (VINSN_INSN_RTX (EXPR_VINSN (EXPR)))
+#define EXPR_PATTERN(EXPR) (VINSN_PATTERN (EXPR_VINSN (EXPR)))
+#define EXPR_LHS(EXPR) (VINSN_LHS (EXPR_VINSN (EXPR)))
+#define EXPR_RHS(EXPR) (VINSN_RHS (EXPR_VINSN (EXPR)))
+#define EXPR_TYPE(EXPR) (VINSN_TYPE (EXPR_VINSN (EXPR)))
+#define EXPR_SEPARABLE_P(EXPR) (VINSN_SEPARABLE_P (EXPR_VINSN (EXPR)))
+
+#define EXPR_SPEC(EXPR) ((EXPR)->spec)
+#define EXPR_USEFULNESS(EXPR) ((EXPR)->usefulness)
+#define EXPR_PRIORITY(EXPR) ((EXPR)->priority)
+#define EXPR_PRIORITY_ADJ(EXPR) ((EXPR)->priority_adj)
+#define EXPR_SCHED_TIMES(EXPR) ((EXPR)->sched_times)
+#define EXPR_ORIG_BB_INDEX(EXPR) ((EXPR)->orig_bb_index)
+#define EXPR_ORIG_SCHED_CYCLE(EXPR) ((EXPR)->orig_sched_cycle)
+#define EXPR_SPEC_DONE_DS(EXPR) ((EXPR)->spec_done_ds)
+#define EXPR_SPEC_TO_CHECK_DS(EXPR) ((EXPR)->spec_to_check_ds)
+#define EXPR_HISTORY_OF_CHANGES(EXPR) ((EXPR)->history_of_changes)
+#define EXPR_TARGET_AVAILABLE(EXPR) ((EXPR)->target_available)
+#define EXPR_NEEDS_SPEC_CHECK_P(EXPR) ((EXPR)->needs_spec_check_p)
+#define EXPR_WAS_SUBSTITUTED(EXPR) ((EXPR)->was_substituted)
+#define EXPR_WAS_RENAMED(EXPR) ((EXPR)->was_renamed)
+#define EXPR_CANT_MOVE(EXPR) ((EXPR)->cant_move)
+
+/* Insn definition for list of original insns in find_used_regs. */
+struct _def
+{
+ insn_t orig_insn;
+
+ /* FIXME: Get rid of CROSSED_CALL_ABIS in each def, since if we're moving up
+ rhs from two different places, but only one of the code motion paths
+ crosses a call, we can't use any of the call_used_regs, no matter which
+ path or whether all paths crosses a call. Thus we should move
+ CROSSED_CALL_ABIS to static params. */
+ unsigned int crossed_call_abis;
+};
+typedef struct _def *def_t;
+
+
+/* Availability sets are sets of expressions we're scheduling. */
+typedef _list_t av_set_t;
+#define _AV_SET_EXPR(L) (&(L)->u.expr)
+#define _AV_SET_NEXT(L) (_LIST_NEXT (L))
+
+
+/* Boundary of the current fence group. */
+struct _bnd
+{
+ /* The actual boundary instruction. */
+ insn_t to;
+
+ /* Its path to the fence. */
+ ilist_t ptr;
+
+ /* Availability set at the boundary. */
+ av_set_t av;
+
+ /* This set moved to the fence. */
+ av_set_t av1;
+
+ /* Deps context at this boundary. As long as we have one boundary per fence,
+ this is just a pointer to the same deps context as in the corresponding
+ fence. */
+ deps_t dc;
+};
+typedef struct _bnd *bnd_t;
+#define BND_TO(B) ((B)->to)
+
+/* PTR stands not for pointer as you might think, but as a Path To Root of the
+ current instruction group from boundary B. */
+#define BND_PTR(B) ((B)->ptr)
+#define BND_AV(B) ((B)->av)
+#define BND_AV1(B) ((B)->av1)
+#define BND_DC(B) ((B)->dc)
+
+/* List of boundaries. */
+typedef _list_t blist_t;
+#define BLIST_BND(L) (&(L)->u.bnd)
+#define BLIST_NEXT(L) (_LIST_NEXT (L))
+
+
+/* Fence information. A fence represents current scheduling point and also
+ blocks code motion through it when pipelining. */
+struct _fence
+{
+ /* Insn before which we gather an instruction group.*/
+ insn_t insn;
+
+ /* Modeled state of the processor pipeline. */
+ state_t state;
+
+ /* Current cycle that is being scheduled on this fence. */
+ int cycle;
+
+ /* Number of insns that were scheduled on the current cycle.
+ This information has to be local to a fence. */
+ int cycle_issued_insns;
+
+ /* At the end of fill_insns () this field holds the list of the instructions
+ that are inner boundaries of the scheduled parallel group. */
+ ilist_t bnds;
+
+ /* Deps context at this fence. It is used to model dependencies at the
+ fence so that insn ticks can be properly evaluated. */
+ deps_t dc;
+
+ /* Target context at this fence. Used to save and load any local target
+ scheduling information when changing fences. */
+ tc_t tc;
+
+ /* A vector of insns that are scheduled but not yet completed. */
+ vec<rtx_insn *, va_gc> *executing_insns;
+
+ /* A vector indexed by UIDs that caches the earliest cycle on which
+ an insn can be scheduled on this fence. */
+ int *ready_ticks;
+
+ /* Its size. */
+ int ready_ticks_size;
+
+ /* Insn, which has been scheduled last on this fence. */
+ rtx_insn *last_scheduled_insn;
+
+ /* The last value of can_issue_more variable on this fence. */
+ int issue_more;
+
+ /* If non-NULL force the next scheduled insn to be SCHED_NEXT. */
+ rtx_insn *sched_next;
+
+ /* True if fill_insns processed this fence. */
+ BOOL_BITFIELD processed_p : 1;
+
+ /* True if fill_insns actually scheduled something on this fence. */
+ BOOL_BITFIELD scheduled_p : 1;
+
+ /* True when the next insn scheduled here would start a cycle. */
+ BOOL_BITFIELD starts_cycle_p : 1;
+
+ /* True when the next insn scheduled here would be scheduled after a stall. */
+ BOOL_BITFIELD after_stall_p : 1;
+};
+typedef struct _fence *fence_t;
+
+#define FENCE_INSN(F) ((F)->insn)
+#define FENCE_STATE(F) ((F)->state)
+#define FENCE_BNDS(F) ((F)->bnds)
+#define FENCE_PROCESSED_P(F) ((F)->processed_p)
+#define FENCE_SCHEDULED_P(F) ((F)->scheduled_p)
+#define FENCE_ISSUED_INSNS(F) ((F)->cycle_issued_insns)
+#define FENCE_CYCLE(F) ((F)->cycle)
+#define FENCE_STARTS_CYCLE_P(F) ((F)->starts_cycle_p)
+#define FENCE_AFTER_STALL_P(F) ((F)->after_stall_p)
+#define FENCE_DC(F) ((F)->dc)
+#define FENCE_TC(F) ((F)->tc)
+#define FENCE_LAST_SCHEDULED_INSN(F) ((F)->last_scheduled_insn)
+#define FENCE_ISSUE_MORE(F) ((F)->issue_more)
+#define FENCE_EXECUTING_INSNS(F) ((F)->executing_insns)
+#define FENCE_READY_TICKS(F) ((F)->ready_ticks)
+#define FENCE_READY_TICKS_SIZE(F) ((F)->ready_ticks_size)
+#define FENCE_SCHED_NEXT(F) ((F)->sched_next)
+
+/* List of fences. */
+typedef _list_t flist_t;
+#define FLIST_FENCE(L) (&(L)->u.fence)
+#define FLIST_NEXT(L) (_LIST_NEXT (L))
+
+/* List of fences with pointer to the tail node. */
+struct flist_tail_def
+{
+ flist_t head;
+ flist_t *tailp;
+};
+
+typedef struct flist_tail_def *flist_tail_t;
+#define FLIST_TAIL_HEAD(L) ((L)->head)
+#define FLIST_TAIL_TAILP(L) ((L)->tailp)
+
+/* List node information. A list node can be any of the types above. */
+struct _list_node
+{
+ _list_t next;
+
+ union
+ {
+ rtx x;
+ insn_t insn;
+ struct _bnd bnd;
+ expr_def expr;
+ struct _fence fence;
+ struct _def def;
+ void *data;
+ } u;
+};
+
+
+/* _list_t functions.
+ All of _*list_* functions are used through accessor macros, thus
+ we can't move them in sel-sched-ir.cc. */
+extern object_allocator<_list_node> sched_lists_pool;
+
+inline _list_t
+_list_alloc (void)
+{
+ return sched_lists_pool.allocate ();
+}
+
+inline void
+_list_add (_list_t *lp)
+{
+ _list_t l = _list_alloc ();
+
+ _LIST_NEXT (l) = *lp;
+ *lp = l;
+}
+
+inline void
+_list_remove_nofree (_list_t *lp)
+{
+ _list_t n = *lp;
+
+ *lp = _LIST_NEXT (n);
+}
+
+inline void
+_list_remove (_list_t *lp)
+{
+ _list_t n = *lp;
+
+ *lp = _LIST_NEXT (n);
+ sched_lists_pool.remove (n);
+}
+
+inline void
+_list_clear (_list_t *l)
+{
+ while (*l)
+ _list_remove (l);
+}
+
+
+/* List iterator backend. */
+struct _list_iterator
+{
+ /* The list we're iterating. */
+ _list_t *lp;
+
+ /* True when this iterator supprts removing. */
+ bool can_remove_p;
+
+ /* True when we've actually removed something. */
+ bool removed_p;
+};
+
+inline void
+_list_iter_start (_list_iterator *ip, _list_t *lp, bool can_remove_p)
+{
+ ip->lp = lp;
+ ip->can_remove_p = can_remove_p;
+ ip->removed_p = false;
+}
+
+inline void
+_list_iter_next (_list_iterator *ip)
+{
+ if (!ip->removed_p)
+ ip->lp = &_LIST_NEXT (*ip->lp);
+ else
+ ip->removed_p = false;
+}
+
+inline void
+_list_iter_remove (_list_iterator *ip)
+{
+ gcc_assert (!ip->removed_p && ip->can_remove_p);
+ _list_remove (ip->lp);
+ ip->removed_p = true;
+}
+
+inline void
+_list_iter_remove_nofree (_list_iterator *ip)
+{
+ gcc_assert (!ip->removed_p && ip->can_remove_p);
+ _list_remove_nofree (ip->lp);
+ ip->removed_p = true;
+}
+
+/* General macros to traverse a list. FOR_EACH_* interfaces are
+ implemented using these. */
+#define _FOR_EACH(TYPE, ELEM, I, L) \
+ for (_list_iter_start (&(I), &(L), false); \
+ _list_iter_cond_##TYPE (*(I).lp, &(ELEM)); \
+ _list_iter_next (&(I)))
+
+#define _FOR_EACH_1(TYPE, ELEM, I, LP) \
+ for (_list_iter_start (&(I), (LP), true); \
+ _list_iter_cond_##TYPE (*(I).lp, &(ELEM)); \
+ _list_iter_next (&(I)))
+
+
+/* _xlist_t functions. */
+
+inline void
+_xlist_add (_xlist_t *lp, rtx x)
+{
+ _list_add (lp);
+ _XLIST_X (*lp) = x;
+}
+
+#define _xlist_remove(LP) (_list_remove (LP))
+#define _xlist_clear(LP) (_list_clear (LP))
+
+inline bool
+_xlist_is_in_p (_xlist_t l, rtx x)
+{
+ while (l)
+ {
+ if (_XLIST_X (l) == x)
+ return true;
+ l = _XLIST_NEXT (l);
+ }
+
+ return false;
+}
+
+/* Used through _FOR_EACH. */
+inline bool
+_list_iter_cond_x (_xlist_t l, rtx *xp)
+{
+ if (l)
+ {
+ *xp = _XLIST_X (l);
+ return true;
+ }
+
+ return false;
+}
+
+#define _xlist_iter_remove(IP) (_list_iter_remove (IP))
+
+typedef _list_iterator _xlist_iterator;
+#define _FOR_EACH_X(X, I, L) _FOR_EACH (x, (X), (I), (L))
+#define _FOR_EACH_X_1(X, I, LP) _FOR_EACH_1 (x, (X), (I), (LP))
+
+
+/* ilist_t functions. */
+
+inline void
+ilist_add (ilist_t *lp, insn_t insn)
+{
+ _list_add (lp);
+ ILIST_INSN (*lp) = insn;
+}
+#define ilist_remove(LP) (_list_remove (LP))
+#define ilist_clear(LP) (_list_clear (LP))
+
+inline bool
+ilist_is_in_p (ilist_t l, insn_t insn)
+{
+ while (l)
+ {
+ if (ILIST_INSN (l) == insn)
+ return true;
+ l = ILIST_NEXT (l);
+ }
+
+ return false;
+}
+
+/* Used through _FOR_EACH. */
+inline bool
+_list_iter_cond_insn (ilist_t l, insn_t *ip)
+{
+ if (l)
+ {
+ *ip = ILIST_INSN (l);
+ return true;
+ }
+
+ return false;
+}
+
+#define ilist_iter_remove(IP) (_list_iter_remove (IP))
+
+typedef _list_iterator ilist_iterator;
+#define FOR_EACH_INSN(INSN, I, L) _FOR_EACH (insn, (INSN), (I), (L))
+#define FOR_EACH_INSN_1(INSN, I, LP) _FOR_EACH_1 (insn, (INSN), (I), (LP))
+
+
+/* Av set iterators. */
+typedef _list_iterator av_set_iterator;
+#define FOR_EACH_EXPR(EXPR, I, AV) _FOR_EACH (expr, (EXPR), (I), (AV))
+#define FOR_EACH_EXPR_1(EXPR, I, AV) _FOR_EACH_1 (expr, (EXPR), (I), (AV))
+
+inline bool
+_list_iter_cond_expr (av_set_t av, expr_t *exprp)
+{
+ if (av)
+ {
+ *exprp = _AV_SET_EXPR (av);
+ return true;
+ }
+
+ return false;
+}
+
+
+/* Def list iterators. */
+typedef _list_t def_list_t;
+typedef _list_iterator def_list_iterator;
+
+#define DEF_LIST_NEXT(L) (_LIST_NEXT (L))
+#define DEF_LIST_DEF(L) (&(L)->u.def)
+
+#define FOR_EACH_DEF(DEF, I, DEF_LIST) _FOR_EACH (def, (DEF), (I), (DEF_LIST))
+
+inline bool
+_list_iter_cond_def (def_list_t def_list, def_t *def)
+{
+ if (def_list)
+ {
+ *def = DEF_LIST_DEF (def_list);
+ return true;
+ }
+
+ return false;
+}
+
+
+/* InstructionData. Contains information about insn pattern. */
+struct idata_def
+{
+ /* Type of the insn.
+ o CALL_INSN - Call insn
+ o JUMP_INSN - Jump insn
+ o INSN - INSN that cannot be cloned
+ o USE - INSN that can be cloned
+ o SET - INSN that can be cloned and separable into lhs and rhs
+ o PC - simplejump. Insns that simply redirect control flow should not
+ have any dependencies. Sched-deps.c, though, might consider them as
+ producers or consumers of certain registers. To avoid that we handle
+ dependency for simple jumps ourselves. */
+ int type;
+
+ /* If insn is a SET, this is its left hand side. */
+ rtx lhs;
+
+ /* If insn is a SET, this is its right hand side. */
+ rtx rhs;
+
+ /* Registers that are set/used by this insn. This info is now gathered
+ via sched-deps.cc. The downside of this is that we also use live info
+ from flow that is accumulated in the basic blocks. These two infos
+ can be slightly inconsistent, hence in the beginning we make a pass
+ through CFG and calculating the conservative solution for the info in
+ basic blocks. When this scheduler will be switched to use dataflow,
+ this can be unified as df gives us both per basic block and per
+ instruction info. Actually, we don't do that pass and just hope
+ for the best. */
+ regset reg_sets;
+
+ regset reg_clobbers;
+
+ regset reg_uses;
+};
+
+#define IDATA_TYPE(ID) ((ID)->type)
+#define IDATA_LHS(ID) ((ID)->lhs)
+#define IDATA_RHS(ID) ((ID)->rhs)
+#define IDATA_REG_SETS(ID) ((ID)->reg_sets)
+#define IDATA_REG_USES(ID) ((ID)->reg_uses)
+#define IDATA_REG_CLOBBERS(ID) ((ID)->reg_clobbers)
+
+/* Type to represent all needed info to emit an insn.
+ This is a virtual equivalent of the insn.
+ Every insn in the stream has an associated vinsn. This is used
+ to reduce memory consumption basing on the fact that many insns
+ don't change through the scheduler.
+
+ vinsn can be either normal or unique.
+ * Normal vinsn is the one, that can be cloned multiple times and typically
+ corresponds to normal instruction.
+
+ * Unique vinsn derivates from CALL, ASM, JUMP (for a while) and other
+ unusual stuff. Such a vinsn is described by its INSN field, which is a
+ reference to the original instruction. */
+struct vinsn_def
+{
+ /* Associated insn. */
+ rtx_insn *insn_rtx;
+
+ /* Its description. */
+ struct idata_def id;
+
+ /* Hash of vinsn. It is computed either from pattern or from rhs using
+ hash_rtx. It is not placed in ID for faster compares. */
+ unsigned hash;
+
+ /* Hash of the insn_rtx pattern. */
+ unsigned hash_rtx;
+
+ /* Smart pointer counter. */
+ int count;
+
+ /* Cached cost of the vinsn. To access it please use vinsn_cost (). */
+ int cost;
+
+ /* Mark insns that may trap so we don't move them through jumps. */
+ bool may_trap_p;
+};
+
+#define VINSN_INSN_RTX(VI) ((VI)->insn_rtx)
+#define VINSN_PATTERN(VI) (PATTERN (VINSN_INSN_RTX (VI)))
+
+#define VINSN_ID(VI) (&((VI)->id))
+#define VINSN_HASH(VI) ((VI)->hash)
+#define VINSN_HASH_RTX(VI) ((VI)->hash_rtx)
+#define VINSN_TYPE(VI) (IDATA_TYPE (VINSN_ID (VI)))
+#define VINSN_SEPARABLE_P(VI) (VINSN_TYPE (VI) == SET)
+#define VINSN_CLONABLE_P(VI) (VINSN_SEPARABLE_P (VI) || VINSN_TYPE (VI) == USE)
+#define VINSN_UNIQUE_P(VI) (!VINSN_CLONABLE_P (VI))
+#define VINSN_LHS(VI) (IDATA_LHS (VINSN_ID (VI)))
+#define VINSN_RHS(VI) (IDATA_RHS (VINSN_ID (VI)))
+#define VINSN_REG_SETS(VI) (IDATA_REG_SETS (VINSN_ID (VI)))
+#define VINSN_REG_USES(VI) (IDATA_REG_USES (VINSN_ID (VI)))
+#define VINSN_REG_CLOBBERS(VI) (IDATA_REG_CLOBBERS (VINSN_ID (VI)))
+#define VINSN_COUNT(VI) ((VI)->count)
+#define VINSN_MAY_TRAP_P(VI) ((VI)->may_trap_p)
+
+
+/* An entry of the hashtable describing transformations happened when
+ moving up through an insn. */
+struct transformed_insns
+{
+ /* Previous vinsn. Used to find the proper element. */
+ vinsn_t vinsn_old;
+
+ /* A new vinsn. */
+ vinsn_t vinsn_new;
+
+ /* Speculative status. */
+ ds_t ds;
+
+ /* Type of transformation happened. */
+ enum local_trans_type type;
+
+ /* Whether a conflict on the target register happened. */
+ BOOL_BITFIELD was_target_conflict : 1;
+
+ /* Whether a check was needed. */
+ BOOL_BITFIELD needs_check : 1;
+};
+
+/* Indexed by INSN_LUID, the collection of all data associated with
+ a single instruction that is in the stream. */
+class _sel_insn_data
+{
+public:
+ /* The expression that contains vinsn for this insn and some
+ flow-sensitive data like priority. */
+ expr_def expr;
+
+ /* If (WS_LEVEL == GLOBAL_LEVEL) then AV is empty. */
+ int ws_level;
+
+ /* A number that helps in defining a traversing order for a region. */
+ int seqno;
+
+ /* A liveness data computed above this insn. */
+ regset live;
+
+ /* An INSN_UID bit is set when deps analysis result is already known. */
+ bitmap analyzed_deps;
+
+ /* An INSN_UID bit is set when a hard dep was found, not set when
+ no dependence is found. This is meaningful only when the analyzed_deps
+ bitmap has its bit set. */
+ bitmap found_deps;
+
+ /* An INSN_UID bit is set when this is a bookkeeping insn generated from
+ a parent with this uid. If a parent is a bookkeeping copy, all its
+ originators are transitively included in this set. */
+ bitmap originators;
+
+ /* A hashtable caching the result of insn transformations through this one. */
+ htab_t transformed_insns;
+
+ /* A context incapsulating this insn. */
+ class deps_desc deps_context;
+
+ /* This field is initialized at the beginning of scheduling and is used
+ to handle sched group instructions. If it is non-null, then it points
+ to the instruction, which should be forced to schedule next. Such
+ instructions are unique. */
+ insn_t sched_next;
+
+ /* Cycle at which insn was scheduled. It is greater than zero if insn was
+ scheduled. This is used for bundling. */
+ int sched_cycle;
+
+ /* Cycle at which insn's data will be fully ready. */
+ int ready_cycle;
+
+ /* Speculations that are being checked by this insn. */
+ ds_t spec_checked_ds;
+
+ /* Whether the live set valid or not. */
+ BOOL_BITFIELD live_valid_p : 1;
+ /* Insn is an ASM. */
+ BOOL_BITFIELD asm_p : 1;
+
+ /* True when an insn is scheduled after we've determined that a stall is
+ required.
+ This is used when emulating the Haifa scheduler for bundling. */
+ BOOL_BITFIELD after_stall_p : 1;
+};
+
+typedef class _sel_insn_data sel_insn_data_def;
+typedef sel_insn_data_def *sel_insn_data_t;
+
+extern vec<sel_insn_data_def> s_i_d;
+
+/* Accessor macros for s_i_d. */
+#define SID(INSN) (&s_i_d[INSN_LUID (INSN)])
+#define SID_BY_UID(UID) (&s_i_d[LUID_BY_UID (UID)])
+
+extern sel_insn_data_def insn_sid (insn_t);
+
+#define INSN_ASM_P(INSN) (SID (INSN)->asm_p)
+#define INSN_SCHED_NEXT(INSN) (SID (INSN)->sched_next)
+#define INSN_ANALYZED_DEPS(INSN) (SID (INSN)->analyzed_deps)
+#define INSN_FOUND_DEPS(INSN) (SID (INSN)->found_deps)
+#define INSN_DEPS_CONTEXT(INSN) (SID (INSN)->deps_context)
+#define INSN_ORIGINATORS(INSN) (SID (INSN)->originators)
+#define INSN_ORIGINATORS_BY_UID(UID) (SID_BY_UID (UID)->originators)
+#define INSN_TRANSFORMED_INSNS(INSN) (SID (INSN)->transformed_insns)
+
+#define INSN_EXPR(INSN) (&SID (INSN)->expr)
+#define INSN_LIVE(INSN) (SID (INSN)->live)
+#define INSN_LIVE_VALID_P(INSN) (SID (INSN)->live_valid_p)
+#define INSN_VINSN(INSN) (EXPR_VINSN (INSN_EXPR (INSN)))
+#define INSN_TYPE(INSN) (VINSN_TYPE (INSN_VINSN (INSN)))
+#define INSN_SIMPLEJUMP_P(INSN) (INSN_TYPE (INSN) == PC)
+#define INSN_LHS(INSN) (VINSN_LHS (INSN_VINSN (INSN)))
+#define INSN_RHS(INSN) (VINSN_RHS (INSN_VINSN (INSN)))
+#define INSN_REG_SETS(INSN) (VINSN_REG_SETS (INSN_VINSN (INSN)))
+#define INSN_REG_CLOBBERS(INSN) (VINSN_REG_CLOBBERS (INSN_VINSN (INSN)))
+#define INSN_REG_USES(INSN) (VINSN_REG_USES (INSN_VINSN (INSN)))
+#define INSN_SCHED_TIMES(INSN) (EXPR_SCHED_TIMES (INSN_EXPR (INSN)))
+#define INSN_SEQNO(INSN) (SID (INSN)->seqno)
+#define INSN_AFTER_STALL_P(INSN) (SID (INSN)->after_stall_p)
+#define INSN_SCHED_CYCLE(INSN) (SID (INSN)->sched_cycle)
+#define INSN_READY_CYCLE(INSN) (SID (INSN)->ready_cycle)
+#define INSN_SPEC_CHECKED_DS(INSN) (SID (INSN)->spec_checked_ds)
+
+/* A global level shows whether an insn is valid or not. */
+extern int global_level;
+
+#define INSN_WS_LEVEL(INSN) (SID (INSN)->ws_level)
+
+extern av_set_t get_av_set (insn_t);
+extern int get_av_level (insn_t);
+
+#define AV_SET(INSN) (get_av_set (INSN))
+#define AV_LEVEL(INSN) (get_av_level (INSN))
+#define AV_SET_VALID_P(INSN) (AV_LEVEL (INSN) == global_level)
+
+/* A list of fences currently in the works. */
+extern flist_t fences;
+
+/* A NOP pattern used as a placeholder for real insns. */
+extern rtx nop_pattern;
+
+/* An insn that 'contained' in EXIT block. */
+extern rtx_insn *exit_insn;
+
+/* Provide a separate luid for the insn. */
+#define INSN_INIT_TODO_LUID (1)
+
+/* Initialize s_s_i_d. */
+#define INSN_INIT_TODO_SSID (2)
+
+/* Initialize data for simplejump. */
+#define INSN_INIT_TODO_SIMPLEJUMP (4)
+
+/* Return true if INSN is a local NOP. The nop is local in the sense that
+ it was emitted by the scheduler as a temporary insn and will soon be
+ deleted. These nops are identified by their pattern. */
+#define INSN_NOP_P(INSN) (PATTERN (INSN) == nop_pattern)
+
+/* Return true if INSN is linked into instruction stream.
+ NB: It is impossible for INSN to have one field null and the other not
+ null: gcc_assert ((PREV_INSN (INSN) == NULL_RTX)
+ == (NEXT_INSN (INSN) == NULL_RTX)) is valid. */
+#define INSN_IN_STREAM_P(INSN) (PREV_INSN (INSN) && NEXT_INSN (INSN))
+
+/* Return true if INSN is in current fence. */
+#define IN_CURRENT_FENCE_P(INSN) (flist_lookup (fences, INSN) != NULL)
+
+/* Marks loop as being considered for pipelining. */
+#define MARK_LOOP_FOR_PIPELINING(LOOP) ((LOOP)->aux = (void *)(size_t)(1))
+#define LOOP_MARKED_FOR_PIPELINING_P(LOOP) ((size_t)((LOOP)->aux))
+
+/* Saved loop preheader to transfer when scheduling the loop. */
+#define LOOP_PREHEADER_BLOCKS(LOOP) ((size_t)((LOOP)->aux) == 1 \
+ ? NULL \
+ : ((vec<basic_block> *) (LOOP)->aux))
+#define SET_LOOP_PREHEADER_BLOCKS(LOOP,BLOCKS) ((LOOP)->aux \
+ = (BLOCKS != NULL \
+ ? BLOCKS \
+ : (LOOP)->aux))
+
+extern bitmap blocks_to_reschedule;
+
+
+/* A variable to track which part of rtx we are scanning in
+ sched-deps.cc: sched_analyze_insn (). */
+enum deps_where_t
+{
+ DEPS_IN_INSN,
+ DEPS_IN_LHS,
+ DEPS_IN_RHS,
+ DEPS_IN_NOWHERE
+};
+
+
+/* Per basic block data for the whole CFG. */
+struct sel_global_bb_info_def
+{
+ /* For each bb header this field contains a set of live registers.
+ For all other insns this field has a NULL.
+ We also need to know LV sets for the instructions, that are immediately
+ after the border of the region. */
+ regset lv_set;
+
+ /* Status of LV_SET.
+ true - block has usable LV_SET.
+ false - block's LV_SET should be recomputed. */
+ bool lv_set_valid_p;
+};
+
+typedef sel_global_bb_info_def *sel_global_bb_info_t;
+
+
+/* Per basic block data. This array is indexed by basic block index. */
+extern vec<sel_global_bb_info_def> sel_global_bb_info;
+
+extern void sel_extend_global_bb_info (void);
+extern void sel_finish_global_bb_info (void);
+
+/* Get data for BB. */
+#define SEL_GLOBAL_BB_INFO(BB) \
+ (&sel_global_bb_info[(BB)->index])
+
+/* Access macros. */
+#define BB_LV_SET(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set)
+#define BB_LV_SET_VALID_P(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set_valid_p)
+
+/* Per basic block data for the region. */
+struct sel_region_bb_info_def
+{
+ /* This insn stream is constructed in such a way that it should be
+ traversed by PREV_INSN field - (*not* NEXT_INSN). */
+ rtx_insn *note_list;
+
+ /* Cached availability set at the beginning of a block.
+ See also AV_LEVEL () for conditions when this av_set can be used. */
+ av_set_t av_set;
+
+ /* If (AV_LEVEL == GLOBAL_LEVEL) then AV is valid. */
+ int av_level;
+};
+
+typedef sel_region_bb_info_def *sel_region_bb_info_t;
+
+
+/* Per basic block data. This array is indexed by basic block index. */
+extern vec<sel_region_bb_info_def> sel_region_bb_info;
+
+/* Get data for BB. */
+#define SEL_REGION_BB_INFO(BB) (&sel_region_bb_info[(BB)->index])
+
+/* Get BB's note_list.
+ A note_list is a list of various notes that was scattered across BB
+ before scheduling, and will be appended at the beginning of BB after
+ scheduling is finished. */
+#define BB_NOTE_LIST(BB) (SEL_REGION_BB_INFO (BB)->note_list)
+
+#define BB_AV_SET(BB) (SEL_REGION_BB_INFO (BB)->av_set)
+#define BB_AV_LEVEL(BB) (SEL_REGION_BB_INFO (BB)->av_level)
+#define BB_AV_SET_VALID_P(BB) (BB_AV_LEVEL (BB) == global_level)
+
+/* Used in bb_in_ebb_p. */
+extern bitmap_head *forced_ebb_heads;
+
+/* The loop nest being pipelined. */
+extern class loop *current_loop_nest;
+
+/* Saves pipelined blocks. Bitmap is indexed by bb->index. */
+extern sbitmap bbs_pipelined;
+
+/* Various flags. */
+extern bool enable_moveup_set_path_p;
+extern bool pipelining_p;
+extern bool bookkeeping_p;
+extern int max_insns_to_rename;
+extern bool preheader_removed;
+
+/* Software lookahead window size.
+ According to the results in Nakatani and Ebcioglu [1993], window size of 16
+ is enough to extract most ILP in integer code. */
+#define MAX_WS (param_selsched_max_lookahead)
+
+extern regset sel_all_regs;
+
+
+/* Successor iterator backend. */
+struct succ_iterator
+{
+ /* True if we're at BB end. */
+ bool bb_end;
+
+ /* An edge on which we're iterating. */
+ edge e1;
+
+ /* The previous edge saved after skipping empty blocks. */
+ edge e2;
+
+ /* Edge iterator used when there are successors in other basic blocks. */
+ edge_iterator ei;
+
+ /* Successor block we're traversing. */
+ basic_block bb;
+
+ /* Flags that are passed to the iterator. We return only successors
+ that comply to these flags. */
+ short flags;
+
+ /* When flags include SUCCS_ALL, this will be set to the exact type
+ of the successor we're traversing now. */
+ short current_flags;
+
+ /* If skip to loop exits, save here information about loop exits. */
+ int current_exit;
+ vec<edge> loop_exits;
+};
+
+/* A structure returning all successor's information. */
+struct succs_info
+{
+ /* Flags that these succcessors were computed with. */
+ short flags;
+
+ /* Successors that correspond to the flags. */
+ insn_vec_t succs_ok;
+
+ /* Their probabilities. As of now, we don't need this for other
+ successors. */
+ vec<int> probs_ok;
+
+ /* Other successors. */
+ insn_vec_t succs_other;
+
+ /* Probability of all successors. */
+ int all_prob;
+
+ /* The number of all successors. */
+ int all_succs_n;
+
+ /* The number of good successors. */
+ int succs_ok_n;
+};
+
+/* Some needed definitions. */
+extern basic_block after_recovery;
+
+extern rtx_insn *sel_bb_head (basic_block);
+extern rtx_insn *sel_bb_end (basic_block);
+extern bool sel_bb_empty_p (basic_block);
+extern bool in_current_region_p (basic_block);
+
+/* True when BB is a header of the inner loop. */
+inline bool
+inner_loop_header_p (basic_block bb)
+{
+ class loop *inner_loop;
+
+ if (!current_loop_nest)
+ return false;
+
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return false;
+
+ inner_loop = bb->loop_father;
+ if (inner_loop == current_loop_nest)
+ return false;
+
+ /* If successor belongs to another loop. */
+ if (bb == inner_loop->header
+ && flow_bb_inside_loop_p (current_loop_nest, bb))
+ {
+ /* Could be '=' here because of wrong loop depths. */
+ gcc_assert (loop_depth (inner_loop) >= loop_depth (current_loop_nest));
+ return true;
+ }
+
+ return false;
+}
+
+/* Return exit edges of LOOP, filtering out edges with the same dest bb. */
+inline vec<edge>
+get_loop_exit_edges_unique_dests (const class loop *loop)
+{
+ vec<edge> edges = vNULL;
+ struct loop_exit *exit;
+
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)
+ && current_loops->state & LOOPS_HAVE_RECORDED_EXITS);
+
+ for (exit = loop->exits->next; exit->e; exit = exit->next)
+ {
+ int i;
+ edge e;
+ bool was_dest = false;
+
+ for (i = 0; edges.iterate (i, &e); i++)
+ if (e->dest == exit->e->dest)
+ {
+ was_dest = true;
+ break;
+ }
+
+ if (!was_dest)
+ edges.safe_push (exit->e);
+ }
+ return edges;
+}
+
+inline bool
+sel_bb_empty_or_nop_p (basic_block bb)
+{
+ insn_t first = sel_bb_head (bb), last;
+
+ if (first == NULL_RTX)
+ return true;
+
+ if (!INSN_NOP_P (first))
+ return false;
+
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return false;
+
+ last = sel_bb_end (bb);
+ if (first != last)
+ return false;
+
+ return true;
+}
+
+/* Collect all loop exits recursively, skipping empty BBs between them.
+ E.g. if BB is a loop header which has several loop exits,
+ traverse all of them and if any of them turns out to be another loop header
+ (after skipping empty BBs), add its loop exits to the resulting vector
+ as well. */
+inline vec<edge>
+get_all_loop_exits (basic_block bb)
+{
+ vec<edge> exits = vNULL;
+
+ /* If bb is empty, and we're skipping to loop exits, then
+ consider bb as a possible gate to the inner loop now. */
+ while (sel_bb_empty_or_nop_p (bb)
+ && in_current_region_p (bb)
+ && EDGE_COUNT (bb->succs) > 0)
+ {
+ bb = single_succ (bb);
+
+ /* This empty block could only lead outside the region. */
+ gcc_assert (! in_current_region_p (bb));
+ }
+
+ /* And now check whether we should skip over inner loop. */
+ if (inner_loop_header_p (bb))
+ {
+ class loop *this_loop;
+ class loop *pred_loop = NULL;
+ int i;
+ unsigned this_depth;
+ edge e;
+
+ for (this_loop = bb->loop_father;
+ this_loop && this_loop != current_loop_nest;
+ this_loop = loop_outer (this_loop))
+ pred_loop = this_loop;
+
+ this_loop = pred_loop;
+ gcc_assert (this_loop != NULL);
+
+ exits = get_loop_exit_edges_unique_dests (this_loop);
+ this_depth = loop_depth (this_loop);
+
+ /* Traverse all loop headers. Be careful not to go back
+ to the outer loop's header (see PR 84206). */
+ for (i = 0; exits.iterate (i, &e); i++)
+ if ((in_current_region_p (e->dest)
+ || (inner_loop_header_p (e->dest)))
+ && loop_depth (e->dest->loop_father) >= this_depth)
+ {
+ auto_vec<edge> next_exits = get_all_loop_exits (e->dest);
+
+ if (next_exits.exists ())
+ {
+ int j;
+ edge ne;
+
+ /* Add all loop exits for the current edge into the
+ resulting vector. */
+ for (j = 0; next_exits.iterate (j, &ne); j++)
+ exits.safe_push (ne);
+
+ /* Remove the original edge. */
+ exits.ordered_remove (i);
+
+ /* Decrease the loop counter so we won't skip anything. */
+ i--;
+ continue;
+ }
+ }
+ }
+
+ return exits;
+}
+
+/* Flags to pass to compute_succs_info and FOR_EACH_SUCC.
+ Any successor will fall into exactly one category. */
+
+/* Include normal successors. */
+#define SUCCS_NORMAL (1)
+
+/* Include back-edge successors. */
+#define SUCCS_BACK (2)
+
+/* Include successors that are outside of the current region. */
+#define SUCCS_OUT (4)
+
+/* When pipelining of the outer loops is enabled, skip innermost loops
+ to their exits. */
+#define SUCCS_SKIP_TO_LOOP_EXITS (8)
+
+/* Include all successors. */
+#define SUCCS_ALL (SUCCS_NORMAL | SUCCS_BACK | SUCCS_OUT)
+
+/* We need to return a succ_iterator to avoid 'unitialized' warning
+ during bootstrap. */
+inline succ_iterator
+_succ_iter_start (insn_t *succp, insn_t insn, int flags)
+{
+ succ_iterator i;
+
+ basic_block bb = BLOCK_FOR_INSN (insn);
+
+ gcc_assert (INSN_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn));
+
+ i.flags = flags;
+
+ /* Avoid 'uninitialized' warning. */
+ *succp = NULL;
+ i.e1 = NULL;
+ i.e2 = NULL;
+ i.bb = bb;
+ i.current_flags = 0;
+ i.current_exit = -1;
+ i.loop_exits.create (0);
+
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun) && BB_END (bb) != insn)
+ {
+ i.bb_end = false;
+
+ /* Avoid 'uninitialized' warning. */
+ i.ei.index = 0;
+ i.ei.container = 0;
+ }
+ else
+ {
+ i.ei = ei_start (bb->succs);
+ i.bb_end = true;
+ }
+
+ return i;
+}
+
+inline bool
+_succ_iter_cond (succ_iterator *ip, insn_t *succp, insn_t insn,
+ bool check (edge, succ_iterator *))
+{
+ if (!ip->bb_end)
+ {
+ /* When we're in a middle of a basic block, return
+ the next insn immediately, but only when SUCCS_NORMAL is set. */
+ if (*succp != NULL || (ip->flags & SUCCS_NORMAL) == 0)
+ return false;
+
+ *succp = NEXT_INSN (insn);
+ ip->current_flags = SUCCS_NORMAL;
+ return true;
+ }
+ else
+ {
+ while (1)
+ {
+ edge e_tmp = NULL;
+
+ /* First, try loop exits, if we have them. */
+ if (ip->loop_exits.exists ())
+ {
+ do
+ {
+ ip->loop_exits.iterate (ip->current_exit, &e_tmp);
+ ip->current_exit++;
+ }
+ while (e_tmp && !check (e_tmp, ip));
+
+ if (!e_tmp)
+ ip->loop_exits.release ();
+ }
+
+ /* If we have found a successor, then great. */
+ if (e_tmp)
+ {
+ ip->e1 = e_tmp;
+ break;
+ }
+
+ /* If not, then try the next edge. */
+ while (ei_cond (ip->ei, &(ip->e1)))
+ {
+ basic_block bb = ip->e1->dest;
+
+ /* Consider bb as a possible loop header. */
+ if ((ip->flags & SUCCS_SKIP_TO_LOOP_EXITS)
+ && flag_sel_sched_pipelining_outer_loops
+ && (!in_current_region_p (bb)
+ || BLOCK_TO_BB (ip->bb->index)
+ < BLOCK_TO_BB (bb->index)))
+ {
+ /* Get all loop exits recursively. */
+ ip->loop_exits = get_all_loop_exits (bb);
+
+ if (ip->loop_exits.exists ())
+ {
+ ip->current_exit = 0;
+ /* Move the iterator now, because we won't do
+ succ_iter_next until loop exits will end. */
+ ei_next (&(ip->ei));
+ break;
+ }
+ }
+
+ /* bb is not a loop header, check as usual. */
+ if (check (ip->e1, ip))
+ break;
+
+ ei_next (&(ip->ei));
+ }
+
+ /* If loop_exits are non null, we have found an inner loop;
+ do one more iteration to fetch an edge from these exits. */
+ if (ip->loop_exits.exists ())
+ continue;
+
+ /* Otherwise, we've found an edge in a usual way. Break now. */
+ break;
+ }
+
+ if (ip->e1)
+ {
+ basic_block bb = ip->e2->dest;
+
+ if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb == after_recovery)
+ *succp = exit_insn;
+ else
+ {
+ *succp = sel_bb_head (bb);
+
+ gcc_assert (ip->flags != SUCCS_NORMAL
+ || *succp == NEXT_INSN (bb_note (bb)));
+ gcc_assert (BLOCK_FOR_INSN (*succp) == bb);
+ }
+
+ return true;
+ }
+ else
+ return false;
+ }
+}
+
+inline void
+_succ_iter_next (succ_iterator *ip)
+{
+ gcc_assert (!ip->e2 || ip->e1);
+
+ if (ip->bb_end && ip->e1 && !ip->loop_exits.exists ())
+ ei_next (&(ip->ei));
+}
+
+/* Returns true when E1 is an eligible successor edge, possibly skipping
+ empty blocks. When E2P is not null, the resulting edge is written there.
+ FLAGS are used to specify whether back edges and out-of-region edges
+ should be considered. */
+inline bool
+_eligible_successor_edge_p (edge e1, succ_iterator *ip)
+{
+ edge e2 = e1;
+ basic_block bb;
+ int flags = ip->flags;
+ bool src_outside_rgn = !in_current_region_p (e1->src);
+
+ gcc_assert (flags != 0);
+
+ if (src_outside_rgn)
+ {
+ /* Any successor of the block that is outside current region is
+ ineligible, except when we're skipping to loop exits. */
+ gcc_assert (flags & (SUCCS_OUT | SUCCS_SKIP_TO_LOOP_EXITS));
+
+ if (flags & SUCCS_OUT)
+ return false;
+ }
+
+ bb = e2->dest;
+
+ /* Skip empty blocks, but be careful not to leave the region. */
+ while (1)
+ {
+ if (!sel_bb_empty_p (bb))
+ {
+ edge ne;
+ basic_block nbb;
+
+ if (!sel_bb_empty_or_nop_p (bb))
+ break;
+
+ ne = EDGE_SUCC (bb, 0);
+ nbb = ne->dest;
+
+ if (!in_current_region_p (nbb)
+ && !(flags & SUCCS_OUT))
+ break;
+
+ e2 = ne;
+ bb = nbb;
+ continue;
+ }
+
+ if (!in_current_region_p (bb)
+ && !(flags & SUCCS_OUT))
+ return false;
+
+ if (EDGE_COUNT (bb->succs) == 0)
+ return false;
+
+ e2 = EDGE_SUCC (bb, 0);
+ bb = e2->dest;
+ }
+
+ /* Save the second edge for later checks. */
+ ip->e2 = e2;
+
+ if (in_current_region_p (bb))
+ {
+ /* BLOCK_TO_BB sets topological order of the region here.
+ It is important to use real predecessor here, which is ip->bb,
+ as we may well have e1->src outside current region,
+ when skipping to loop exits. */
+ bool succeeds_in_top_order = (BLOCK_TO_BB (ip->bb->index)
+ < BLOCK_TO_BB (bb->index));
+
+ /* This is true for the all cases except the last one. */
+ ip->current_flags = SUCCS_NORMAL;
+
+ /* We are advancing forward in the region, as usual. */
+ if (succeeds_in_top_order)
+ {
+ /* We are skipping to loop exits here. */
+ gcc_assert (!src_outside_rgn
+ || flag_sel_sched_pipelining_outer_loops);
+ return !!(flags & SUCCS_NORMAL);
+ }
+
+ /* This is a back edge. During pipelining we ignore back edges,
+ but only when it leads to the same loop. It can lead to the header
+ of the outer loop, which will also be the preheader of
+ the current loop. */
+ if (pipelining_p
+ && e1->src->loop_father == bb->loop_father)
+ return !!(flags & SUCCS_NORMAL);
+
+ /* A back edge should be requested explicitly. */
+ ip->current_flags = SUCCS_BACK;
+ return !!(flags & SUCCS_BACK);
+ }
+
+ ip->current_flags = SUCCS_OUT;
+ return !!(flags & SUCCS_OUT);
+}
+
+#define FOR_EACH_SUCC_1(SUCC, ITER, INSN, FLAGS) \
+ for ((ITER) = _succ_iter_start (&(SUCC), (INSN), (FLAGS)); \
+ _succ_iter_cond (&(ITER), &(SUCC), (INSN), _eligible_successor_edge_p); \
+ _succ_iter_next (&(ITER)))
+
+#define FOR_EACH_SUCC(SUCC, ITER, INSN) \
+ FOR_EACH_SUCC_1 (SUCC, ITER, INSN, SUCCS_NORMAL)
+
+/* Return the current edge along which a successor was built. */
+#define SUCC_ITER_EDGE(ITER) ((ITER)->e1)
+
+/* Return the next block of BB not running into inconsistencies. */
+inline basic_block
+bb_next_bb (basic_block bb)
+{
+ switch (EDGE_COUNT (bb->succs))
+ {
+ case 0:
+ return bb->next_bb;
+
+ case 1:
+ return single_succ (bb);
+
+ case 2:
+ return FALLTHRU_EDGE (bb)->dest;
+
+ default:
+ return bb->next_bb;
+ }
+}
+
+
+
+/* Functions that are used in sel-sched.cc. */
+
+/* List functions. */
+extern ilist_t ilist_copy (ilist_t);
+extern ilist_t ilist_invert (ilist_t);
+extern void blist_add (blist_t *, insn_t, ilist_t, deps_t);
+extern void blist_remove (blist_t *);
+extern void flist_tail_init (flist_tail_t);
+
+extern fence_t flist_lookup (flist_t, insn_t);
+extern void flist_clear (flist_t *);
+extern void def_list_add (def_list_t *, insn_t, unsigned int);
+
+/* Target context functions. */
+extern tc_t create_target_context (bool);
+extern void set_target_context (tc_t);
+extern void reset_target_context (tc_t, bool);
+
+/* Deps context functions. */
+extern void advance_deps_context (deps_t, insn_t);
+
+/* Fences functions. */
+extern void init_fences (insn_t);
+extern void add_clean_fence_to_fences (flist_tail_t, insn_t, fence_t);
+extern void add_dirty_fence_to_fences (flist_tail_t, insn_t, fence_t);
+extern void move_fence_to_fences (flist_t, flist_tail_t);
+
+/* Pool functions. */
+extern regset get_regset_from_pool (void);
+extern regset get_clear_regset_from_pool (void);
+extern void return_regset_to_pool (regset);
+extern void free_regset_pool (void);
+
+extern insn_t get_nop_from_pool (insn_t);
+extern void return_nop_to_pool (insn_t, bool);
+extern void free_nop_pool (void);
+
+/* Vinsns functions. */
+extern bool vinsn_separable_p (vinsn_t);
+extern bool vinsn_cond_branch_p (vinsn_t);
+extern void recompute_vinsn_lhs_rhs (vinsn_t);
+extern int sel_vinsn_cost (vinsn_t);
+extern insn_t sel_gen_insn_from_rtx_after (rtx, expr_t, int, insn_t);
+extern insn_t sel_gen_recovery_insn_from_rtx_after (rtx, expr_t, int, insn_t);
+extern insn_t sel_gen_insn_from_expr_after (expr_t, vinsn_t, int, insn_t);
+extern insn_t sel_move_insn (expr_t, int, insn_t);
+extern void vinsn_attach (vinsn_t);
+extern void vinsn_detach (vinsn_t);
+extern vinsn_t vinsn_copy (vinsn_t, bool);
+extern bool vinsn_equal_p (vinsn_t, vinsn_t);
+
+/* EXPR functions. */
+extern void copy_expr (expr_t, expr_t);
+extern void copy_expr_onside (expr_t, expr_t);
+extern void merge_expr_data (expr_t, expr_t, insn_t);
+extern void merge_expr (expr_t, expr_t, insn_t);
+extern void clear_expr (expr_t);
+extern unsigned expr_dest_regno (expr_t);
+extern rtx expr_dest_reg (expr_t);
+extern int find_in_history_vect (vec<expr_history_def> ,
+ rtx, vinsn_t, bool);
+extern void insert_in_history_vect (vec<expr_history_def> *,
+ unsigned, enum local_trans_type,
+ vinsn_t, vinsn_t, ds_t);
+extern void mark_unavailable_targets (av_set_t, av_set_t, regset);
+extern int speculate_expr (expr_t, ds_t);
+
+/* Av set functions. */
+extern void av_set_add (av_set_t *, expr_t);
+extern void av_set_iter_remove (av_set_iterator *);
+extern expr_t av_set_lookup (av_set_t, vinsn_t);
+extern expr_t merge_with_other_exprs (av_set_t *, av_set_iterator *, expr_t);
+extern bool av_set_is_in_p (av_set_t, vinsn_t);
+extern av_set_t av_set_copy (av_set_t);
+extern void av_set_union_and_clear (av_set_t *, av_set_t *, insn_t);
+extern void av_set_union_and_live (av_set_t *, av_set_t *, regset, regset, insn_t);
+extern void av_set_clear (av_set_t *);
+extern void av_set_leave_one_nonspec (av_set_t *);
+extern expr_t av_set_element (av_set_t, int);
+extern void av_set_substract_cond_branches (av_set_t *);
+extern void av_set_split_usefulness (av_set_t, int, int);
+extern void av_set_code_motion_filter (av_set_t *, av_set_t);
+
+extern void sel_save_haifa_priorities (void);
+
+extern void sel_init_global_and_expr (bb_vec_t);
+extern void sel_finish_global_and_expr (void);
+
+extern regset compute_live (insn_t);
+extern bool register_unavailable_p (regset, rtx);
+
+/* Dependence analysis functions. */
+extern void sel_clear_has_dependence (void);
+extern ds_t has_dependence_p (expr_t, insn_t, ds_t **);
+
+extern int tick_check_p (expr_t, deps_t, fence_t);
+
+/* Functions to work with insns. */
+extern bool lhs_of_insn_equals_to_dest_p (insn_t, rtx);
+extern bool insn_eligible_for_subst_p (insn_t);
+extern void get_dest_and_mode (rtx, rtx *, machine_mode *);
+
+extern bool bookkeeping_can_be_created_if_moved_through_p (insn_t);
+extern bool sel_remove_insn (insn_t, bool, bool);
+extern bool bb_header_p (insn_t);
+extern void sel_init_invalid_data_sets (insn_t);
+extern bool insn_at_boundary_p (insn_t);
+
+/* Basic block and CFG functions. */
+
+extern rtx_insn *sel_bb_head (basic_block);
+extern bool sel_bb_head_p (insn_t);
+extern rtx_insn *sel_bb_end (basic_block);
+extern bool sel_bb_end_p (insn_t);
+extern bool sel_bb_empty_p (basic_block);
+
+extern bool in_current_region_p (basic_block);
+extern basic_block fallthru_bb_of_jump (const rtx_insn *);
+
+extern void sel_init_bbs (bb_vec_t);
+extern void sel_finish_bbs (void);
+
+extern struct succs_info * compute_succs_info (insn_t, short);
+extern void free_succs_info (struct succs_info *);
+extern bool sel_insn_has_single_succ_p (insn_t, int);
+extern bool sel_num_cfg_preds_gt_1 (insn_t);
+extern int get_seqno_by_preds (rtx_insn *);
+
+extern bool bb_ends_ebb_p (basic_block);
+extern bool in_same_ebb_p (insn_t, insn_t);
+
+extern bool tidy_control_flow (basic_block, bool);
+extern void free_bb_note_pool (void);
+
+extern void purge_empty_blocks (void);
+extern basic_block sel_split_edge (edge);
+extern basic_block sel_create_recovery_block (insn_t);
+extern bool sel_redirect_edge_and_branch (edge, basic_block);
+extern void sel_redirect_edge_and_branch_force (edge, basic_block);
+extern void sel_init_pipelining (void);
+extern void sel_finish_pipelining (void);
+extern void sel_sched_region (int);
+extern loop_p get_loop_nest_for_rgn (unsigned int);
+extern bool considered_for_pipelining_p (class loop *);
+extern void make_region_from_loop_preheader (vec<basic_block> *&);
+extern void sel_add_loop_preheaders (bb_vec_t *);
+extern bool sel_is_loop_preheader_p (basic_block);
+extern void clear_outdated_rtx_info (basic_block);
+extern void free_data_sets (basic_block);
+extern void exchange_data_sets (basic_block, basic_block);
+extern void copy_data_sets (basic_block, basic_block);
+
+extern void sel_register_cfg_hooks (void);
+extern void sel_unregister_cfg_hooks (void);
+
+/* Expression transformation routines. */
+extern rtx_insn *create_insn_rtx_from_pattern (rtx, rtx);
+extern vinsn_t create_vinsn_from_insn_rtx (rtx_insn *, bool);
+extern rtx_insn *create_copy_of_insn_rtx (rtx);
+extern void change_vinsn_in_expr (expr_t, vinsn_t);
+
+/* Various initialization functions. */
+extern void init_lv_sets (void);
+extern void free_lv_sets (void);
+extern void setup_nop_and_exit_insns (void);
+extern void free_nop_and_exit_insns (void);
+extern void free_data_for_scheduled_insn (insn_t);
+extern void setup_nop_vinsn (void);
+extern void free_nop_vinsn (void);
+extern void sel_set_sched_flags (void);
+extern void sel_setup_sched_infos (void);
+extern void alloc_sched_pools (void);
+extern void free_sched_pools (void);
+
+#endif /* GCC_SEL_SCHED_IR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched.h
new file mode 100644
index 0000000..36f742f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sel-sched.h
@@ -0,0 +1,27 @@
+/* Instruction scheduling pass.
+ Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SEL_SCHED_H
+#define GCC_SEL_SCHED_H
+
+/* The main entry point. */
+extern void run_selective_scheduling (void);
+extern bool maybe_skip_selective_scheduling (void);
+
+#endif /* GCC_SEL_SCHED_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-diagnostic.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-diagnostic.h
new file mode 100644
index 0000000..f8c6dd2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-diagnostic.h
@@ -0,0 +1,49 @@
+/* Selftest support for diagnostics.
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SELFTEST_DIAGNOSTIC_H
+#define GCC_SELFTEST_DIAGNOSTIC_H
+
+/* The selftest code should entirely disappear in a production
+ configuration, hence we guard all of it with #if CHECKING_P. */
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Convenience subclass of diagnostic_context for testing
+ the diagnostic subsystem. */
+
+class test_diagnostic_context : public diagnostic_context
+{
+ public:
+ test_diagnostic_context ();
+ ~test_diagnostic_context ();
+
+ /* Implementation of diagnostic_start_span_fn, hiding the
+ real filename (to avoid printing the names of tempfiles). */
+ static void
+ start_span_cb (diagnostic_context *context, expanded_location exploc);
+};
+
+} // namespace selftest
+
+#endif /* #if CHECKING_P */
+
+#endif /* GCC_SELFTEST_DIAGNOSTIC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-rtl.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-rtl.h
new file mode 100644
index 0000000..255b9e3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest-rtl.h
@@ -0,0 +1,100 @@
+/* A self-testing framework, for use by -fself-test.
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SELFTEST_RTL_H
+#define GCC_SELFTEST_RTL_H
+
+/* The selftest code should entirely disappear in a production
+ configuration, hence we guard all of it with #if CHECKING_P. */
+
+#if CHECKING_P
+
+class rtx_reuse_manager;
+
+namespace selftest {
+
+/* Verify that X is dumped as EXPECTED_DUMP, using compact mode.
+ Use LOC as the effective location when reporting errors. */
+
+extern void
+assert_rtl_dump_eq (const location &loc, const char *expected_dump, rtx x,
+ rtx_reuse_manager *reuse_manager);
+
+/* Verify that RTX is dumped as EXPECTED_DUMP, using compact mode. */
+
+#define ASSERT_RTL_DUMP_EQ(EXPECTED_DUMP, RTX) \
+ assert_rtl_dump_eq (SELFTEST_LOCATION, (EXPECTED_DUMP), (RTX), NULL)
+
+/* As above, but using REUSE_MANAGER when dumping. */
+
+#define ASSERT_RTL_DUMP_EQ_WITH_REUSE(EXPECTED_DUMP, RTX, REUSE_MANAGER) \
+ assert_rtl_dump_eq (SELFTEST_LOCATION, (EXPECTED_DUMP), (RTX), \
+ (REUSE_MANAGER))
+
+#define ASSERT_RTX_EQ(EXPECTED, ACTUAL) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_RTX_EQ (" #EXPECTED ", " #ACTUAL ")"; \
+ ::selftest::assert_rtx_eq_at (SELFTEST_LOCATION, desc_, (EXPECTED), \
+ (ACTUAL)); \
+ SELFTEST_END_STMT
+
+extern void assert_rtx_eq_at (const location &, const char *, rtx, rtx);
+
+/* Evaluate rtx EXPECTED and ACTUAL and compare them with ==
+ (i.e. pointer equality), calling ::selftest::pass if they are
+ equal, aborting if they are non-equal. */
+
+#define ASSERT_RTX_PTR_EQ(EXPECTED, ACTUAL) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_RTX_PTR_EQ (" #EXPECTED ", " #ACTUAL ")"; \
+ ::selftest::assert_rtx_ptr_eq_at (SELFTEST_LOCATION, desc_, (EXPECTED), \
+ (ACTUAL)); \
+ SELFTEST_END_STMT
+
+/* Compare rtx EXPECTED and ACTUAL by pointer equality, calling
+ ::selftest::pass if they are equal, aborting if they are non-equal.
+ LOC is the effective location of the assertion, MSG describes it. */
+
+extern void assert_rtx_ptr_eq_at (const location &loc, const char *msg,
+ rtx expected, rtx actual);
+
+/* A class for testing RTL function dumps. */
+
+class rtl_dump_test
+{
+ public:
+ /* Takes ownership of PATH. */
+ rtl_dump_test (const location &loc, char *path);
+ ~rtl_dump_test ();
+
+ private:
+ char *m_path;
+};
+
+/* Get the insn with the given uid, or NULL if not found. */
+
+extern rtx_insn *get_insn_by_uid (int uid);
+
+extern void verify_three_block_rtl_cfg (function *fun);
+
+} /* end of namespace selftest. */
+
+#endif /* #if CHECKING_P */
+
+#endif /* GCC_SELFTEST_RTL_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest.h
new file mode 100644
index 0000000..20d522a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/selftest.h
@@ -0,0 +1,492 @@
+/* A self-testing framework, for use by -fself-test.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SELFTEST_H
+#define GCC_SELFTEST_H
+
+/* The selftest code should entirely disappear in a production
+ configuration, hence we guard all of it with #if CHECKING_P. */
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* A struct describing the source-location of a selftest, to make it
+ easier to track down failing tests. */
+
+class location
+{
+public:
+ location (const char *file, int line, const char *function)
+ : m_file (file), m_line (line), m_function (function) {}
+
+ const char *m_file;
+ int m_line;
+ const char *m_function;
+};
+
+/* A macro for use in selftests and by the ASSERT_ macros below,
+ constructing a selftest::location for the current source location. */
+
+#define SELFTEST_LOCATION \
+ (::selftest::location (__FILE__, __LINE__, __FUNCTION__))
+
+/* The entrypoint for running all tests. */
+
+extern void run_tests ();
+
+/* Record the successful outcome of some aspect of the test. */
+
+extern void pass (const location &loc, const char *msg);
+
+/* Report the failed outcome of some aspect of the test and abort. */
+
+extern void fail (const location &loc, const char *msg)
+ ATTRIBUTE_NORETURN;
+
+/* As "fail", but using printf-style formatted output. */
+
+extern void fail_formatted (const location &loc, const char *fmt, ...)
+ ATTRIBUTE_PRINTF_2 ATTRIBUTE_NORETURN;
+
+/* Implementation detail of ASSERT_STREQ. */
+
+extern void assert_streq (const location &loc,
+ const char *desc_val1, const char *desc_val2,
+ const char *val1, const char *val2);
+
+/* Implementation detail of ASSERT_STR_CONTAINS. */
+
+extern void assert_str_contains (const location &loc,
+ const char *desc_haystack,
+ const char *desc_needle,
+ const char *val_haystack,
+ const char *val_needle);
+
+/* Implementation detail of ASSERT_STR_STARTSWITH. */
+
+extern void assert_str_startswith (const location &loc,
+ const char *desc_str,
+ const char *desc_prefix,
+ const char *val_str,
+ const char *val_prefix);
+
+
+/* A named temporary file for use in selftests.
+ Usable for writing out files, and as the base class for
+ temp_source_file.
+ The file is unlinked in the destructor. */
+
+class named_temp_file
+{
+ public:
+ named_temp_file (const char *suffix);
+ ~named_temp_file ();
+ const char *get_filename () const { return m_filename; }
+
+ private:
+ char *m_filename;
+};
+
+/* A class for writing out a temporary sourcefile for use in selftests
+ of input handling. */
+
+class temp_source_file : public named_temp_file
+{
+ public:
+ temp_source_file (const location &loc, const char *suffix,
+ const char *content);
+ temp_source_file (const location &loc, const char *suffix,
+ const char *content, size_t sz);
+};
+
+/* RAII-style class for avoiding introducing locale-specific differences
+ in strings containing localized quote marks, by temporarily overriding
+ the "open_quote" and "close_quote" globals to something hardcoded.
+
+ Specifically, the C locale's values are used:
+ - open_quote becomes "`"
+ - close_quote becomes "'"
+ for the lifetime of the object. */
+
+class auto_fix_quotes
+{
+ public:
+ auto_fix_quotes ();
+ ~auto_fix_quotes ();
+
+ private:
+ const char *m_saved_open_quote;
+ const char *m_saved_close_quote;
+};
+
+/* Various selftests involving location-handling require constructing a
+ line table and one or more line maps within it.
+
+ For maximum test coverage we want to run these tests with a variety
+ of situations:
+ - line_table->default_range_bits: some frontends use a non-zero value
+ and others use zero
+ - the fallback modes within line-map.cc: there are various threshold
+ values for location_t beyond line-map.cc changes
+ behavior (disabling of the range-packing optimization, disabling
+ of column-tracking). We can exercise these by starting the line_table
+ at interesting values at or near these thresholds.
+
+ The following struct describes a particular case within our test
+ matrix. */
+
+class line_table_case;
+
+/* A class for overriding the global "line_table" within a selftest,
+ restoring its value afterwards. At most one instance of this
+ class can exist at once, due to the need to keep the old value
+ of line_table as a GC root. */
+
+class line_table_test
+{
+ public:
+ /* Default constructor. Override "line_table", using sane defaults
+ for the temporary line_table. */
+ line_table_test ();
+
+ /* Constructor. Override "line_table", using the case described by C. */
+ line_table_test (const line_table_case &c);
+
+ /* Destructor. Restore the saved line_table. */
+ ~line_table_test ();
+};
+
+/* Helper function for selftests that need a function decl. */
+
+extern tree make_fndecl (tree return_type,
+ const char *name,
+ vec <tree> &param_types,
+ bool is_variadic = false);
+
+/* Run TESTCASE multiple times, once for each case in our test matrix. */
+
+extern void
+for_each_line_table_case (void (*testcase) (const line_table_case &));
+
+/* Read the contents of PATH into memory, returning a 0-terminated buffer
+ that must be freed by the caller.
+ Fail (and abort) if there are any problems, with LOC as the reported
+ location of the failure. */
+
+extern char *read_file (const location &loc, const char *path);
+
+/* Convert a path relative to SRCDIR/gcc/testsuite/selftests
+ to a real path (either absolute, or relative to pwd).
+ The result should be freed by the caller. */
+
+extern char *locate_file (const char *path);
+
+/* The path of SRCDIR/testsuite/selftests. */
+
+extern const char *path_to_selftest_files;
+
+/* selftest::test_runner is an implementation detail of selftest::run_tests,
+ exposed here to allow plugins to run their own suites of tests. */
+
+class test_runner
+{
+ public:
+ test_runner (const char *name);
+ ~test_runner ();
+
+ private:
+ const char *m_name;
+ long m_start_time;
+};
+
+/* Declarations for specific families of tests (by source file), in
+ alphabetical order. */
+extern void attribs_cc_tests ();
+extern void bitmap_cc_tests ();
+extern void cgraph_cc_tests ();
+extern void convert_cc_tests ();
+extern void diagnostic_format_json_cc_tests ();
+extern void diagnostic_show_locus_cc_tests ();
+extern void digraph_cc_tests ();
+extern void dumpfile_cc_tests ();
+extern void edit_context_cc_tests ();
+extern void et_forest_cc_tests ();
+extern void fibonacci_heap_cc_tests ();
+extern void fold_const_cc_tests ();
+extern void function_tests_cc_tests ();
+extern void ggc_tests_cc_tests ();
+extern void gimple_cc_tests ();
+extern void hash_map_tests_cc_tests ();
+extern void hash_set_tests_cc_tests ();
+extern void input_cc_tests ();
+extern void json_cc_tests ();
+extern void optinfo_emit_json_cc_tests ();
+extern void opts_cc_tests ();
+extern void ordered_hash_map_tests_cc_tests ();
+extern void predict_cc_tests ();
+extern void pretty_print_cc_tests ();
+extern void range_tests ();
+extern void range_op_tests ();
+extern void relation_tests ();
+extern void gimple_range_tests ();
+extern void read_rtl_function_cc_tests ();
+extern void rtl_tests_cc_tests ();
+extern void sbitmap_cc_tests ();
+extern void selftest_cc_tests ();
+extern void simplify_rtx_cc_tests ();
+extern void spellcheck_cc_tests ();
+extern void spellcheck_tree_cc_tests ();
+extern void splay_tree_cc_tests ();
+extern void sreal_cc_tests ();
+extern void store_merging_cc_tests ();
+extern void tree_cc_tests ();
+extern void tree_cfg_cc_tests ();
+extern void tree_diagnostic_path_cc_tests ();
+extern void tristate_cc_tests ();
+extern void typed_splay_tree_cc_tests ();
+extern void vec_cc_tests ();
+extern void vec_perm_indices_cc_tests ();
+extern void wide_int_cc_tests ();
+extern void opt_suggestions_cc_tests ();
+extern void dbgcnt_cc_tests ();
+extern void ipa_modref_tree_cc_tests ();
+
+extern int num_passes;
+
+} /* end of namespace selftest. */
+
+/* Macros for writing tests. */
+
+/* Evaluate EXPR and coerce to bool, calling
+ ::selftest::pass if it is true,
+ ::selftest::fail if it false. */
+
+#define ASSERT_TRUE(EXPR) \
+ ASSERT_TRUE_AT (SELFTEST_LOCATION, (EXPR))
+
+/* Like ASSERT_TRUE, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_TRUE_AT(LOC, EXPR) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_TRUE (" #EXPR ")"; \
+ bool actual_ = (EXPR); \
+ if (actual_) \
+ ::selftest::pass ((LOC), desc_); \
+ else \
+ ::selftest::fail ((LOC), desc_); \
+ SELFTEST_END_STMT
+
+/* Evaluate EXPR and coerce to bool, calling
+ ::selftest::pass if it is false,
+ ::selftest::fail if it true. */
+
+#define ASSERT_FALSE(EXPR) \
+ ASSERT_FALSE_AT (SELFTEST_LOCATION, (EXPR))
+
+/* Like ASSERT_FALSE, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_FALSE_AT(LOC, EXPR) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_FALSE (" #EXPR ")"; \
+ bool actual_ = (EXPR); \
+ if (actual_) \
+ ::selftest::fail ((LOC), desc_); \
+ else \
+ ::selftest::pass ((LOC), desc_); \
+ SELFTEST_END_STMT
+
+/* Evaluate VAL1 and VAL2 and compare them with ==, calling
+ ::selftest::pass if they are equal,
+ ::selftest::fail if they are non-equal. */
+
+#define ASSERT_EQ(VAL1, VAL2) \
+ ASSERT_EQ_AT ((SELFTEST_LOCATION), (VAL1), (VAL2))
+
+/* Like ASSERT_EQ, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_EQ_AT(LOC, VAL1, VAL2) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_EQ (" #VAL1 ", " #VAL2 ")"; \
+ if ((VAL1) == (VAL2)) \
+ ::selftest::pass ((LOC), desc_); \
+ else \
+ ::selftest::fail ((LOC), desc_); \
+ SELFTEST_END_STMT
+
+/* Evaluate VAL1 and VAL2 and compare them with known_eq, calling
+ ::selftest::pass if they are always equal,
+ ::selftest::fail if they might be non-equal. */
+
+#define ASSERT_KNOWN_EQ(VAL1, VAL2) \
+ ASSERT_KNOWN_EQ_AT ((SELFTEST_LOCATION), (VAL1), (VAL2))
+
+/* Like ASSERT_KNOWN_EQ, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_KNOWN_EQ_AT(LOC, VAL1, VAL2) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc = "ASSERT_KNOWN_EQ (" #VAL1 ", " #VAL2 ")"; \
+ if (known_eq (VAL1, VAL2)) \
+ ::selftest::pass ((LOC), desc); \
+ else \
+ ::selftest::fail ((LOC), desc); \
+ SELFTEST_END_STMT
+
+/* Evaluate VAL1 and VAL2 and compare them with !=, calling
+ ::selftest::pass if they are non-equal,
+ ::selftest::fail if they are equal. */
+
+#define ASSERT_NE(VAL1, VAL2) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_NE (" #VAL1 ", " #VAL2 ")"; \
+ if ((VAL1) != (VAL2)) \
+ ::selftest::pass (SELFTEST_LOCATION, desc_); \
+ else \
+ ::selftest::fail (SELFTEST_LOCATION, desc_); \
+ SELFTEST_END_STMT
+
+/* Evaluate VAL1 and VAL2 and compare them with maybe_ne, calling
+ ::selftest::pass if they might be non-equal,
+ ::selftest::fail if they are known to be equal. */
+
+#define ASSERT_MAYBE_NE(VAL1, VAL2) \
+ ASSERT_MAYBE_NE_AT ((SELFTEST_LOCATION), (VAL1), (VAL2))
+
+/* Like ASSERT_MAYBE_NE, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_MAYBE_NE_AT(LOC, VAL1, VAL2) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc = "ASSERT_MAYBE_NE (" #VAL1 ", " #VAL2 ")"; \
+ if (maybe_ne (VAL1, VAL2)) \
+ ::selftest::pass ((LOC), desc); \
+ else \
+ ::selftest::fail ((LOC), desc); \
+ SELFTEST_END_STMT
+
+/* Evaluate LHS and RHS and compare them with >, calling
+ ::selftest::pass if LHS > RHS,
+ ::selftest::fail otherwise. */
+
+#define ASSERT_GT(LHS, RHS) \
+ ASSERT_GT_AT ((SELFTEST_LOCATION), (LHS), (RHS))
+
+/* Like ASSERT_GT, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_GT_AT(LOC, LHS, RHS) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_GT (" #LHS ", " #RHS ")"; \
+ if ((LHS) > (RHS)) \
+ ::selftest::pass ((LOC), desc_); \
+ else \
+ ::selftest::fail ((LOC), desc_); \
+ SELFTEST_END_STMT
+
+/* Evaluate LHS and RHS and compare them with <, calling
+ ::selftest::pass if LHS < RHS,
+ ::selftest::fail otherwise. */
+
+#define ASSERT_LT(LHS, RHS) \
+ ASSERT_LT_AT ((SELFTEST_LOCATION), (LHS), (RHS))
+
+/* Like ASSERT_LT, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_LT_AT(LOC, LHS, RHS) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_LT (" #LHS ", " #RHS ")"; \
+ if ((LHS) < (RHS)) \
+ ::selftest::pass ((LOC), desc_); \
+ else \
+ ::selftest::fail ((LOC), desc_); \
+ SELFTEST_END_STMT
+
+/* Evaluate VAL1 and VAL2 and compare them with strcmp, calling
+ ::selftest::pass if they are equal (and both are non-NULL),
+ ::selftest::fail if they are non-equal, or are both NULL. */
+
+#define ASSERT_STREQ(VAL1, VAL2) \
+ SELFTEST_BEGIN_STMT \
+ ::selftest::assert_streq (SELFTEST_LOCATION, #VAL1, #VAL2, \
+ (VAL1), (VAL2)); \
+ SELFTEST_END_STMT
+
+/* Like ASSERT_STREQ, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_STREQ_AT(LOC, VAL1, VAL2) \
+ SELFTEST_BEGIN_STMT \
+ ::selftest::assert_streq ((LOC), #VAL1, #VAL2, \
+ (VAL1), (VAL2)); \
+ SELFTEST_END_STMT
+
+/* Evaluate HAYSTACK and NEEDLE and use strstr to determine if NEEDLE
+ is within HAYSTACK.
+ ::selftest::pass if NEEDLE is found.
+ ::selftest::fail if it is not found. */
+
+#define ASSERT_STR_CONTAINS(HAYSTACK, NEEDLE) \
+ SELFTEST_BEGIN_STMT \
+ ::selftest::assert_str_contains (SELFTEST_LOCATION, #HAYSTACK, #NEEDLE, \
+ (HAYSTACK), (NEEDLE)); \
+ SELFTEST_END_STMT
+
+/* Like ASSERT_STR_CONTAINS, but treat LOC as the effective location of the
+ selftest. */
+
+#define ASSERT_STR_CONTAINS_AT(LOC, HAYSTACK, NEEDLE) \
+ SELFTEST_BEGIN_STMT \
+ ::selftest::assert_str_contains (LOC, #HAYSTACK, #NEEDLE, \
+ (HAYSTACK), (NEEDLE)); \
+ SELFTEST_END_STMT
+
+/* Evaluate STR and PREFIX and determine if STR starts with PREFIX.
+ ::selftest::pass if STR does start with PREFIX.
+ ::selftest::fail if does not, or either is NULL. */
+
+#define ASSERT_STR_STARTSWITH(STR, PREFIX) \
+ SELFTEST_BEGIN_STMT \
+ ::selftest::assert_str_startswith (SELFTEST_LOCATION, #STR, #PREFIX, \
+ (STR), (PREFIX)); \
+ SELFTEST_END_STMT
+
+/* Evaluate PRED1 (VAL1), calling ::selftest::pass if it is true,
+ ::selftest::fail if it is false. */
+
+#define ASSERT_PRED1(PRED1, VAL1) \
+ SELFTEST_BEGIN_STMT \
+ const char *desc_ = "ASSERT_PRED1 (" #PRED1 ", " #VAL1 ")"; \
+ bool actual_ = (PRED1) (VAL1); \
+ if (actual_) \
+ ::selftest::pass (SELFTEST_LOCATION, desc_); \
+ else \
+ ::selftest::fail (SELFTEST_LOCATION, desc_); \
+ SELFTEST_END_STMT
+
+#define SELFTEST_BEGIN_STMT do {
+#define SELFTEST_END_STMT } while (0)
+
+#endif /* #if CHECKING_P */
+
+#endif /* GCC_SELFTEST_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sese.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sese.h
new file mode 100644
index 0000000..252318b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sese.h
@@ -0,0 +1,310 @@
+/* Single entry single exit control flow regions.
+ Copyright (C) 2008-2023 Free Software Foundation, Inc.
+ Contributed by Jan Sjodin <jan.sjodin@amd.com> and
+ Sebastian Pop <sebastian.pop@amd.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SESE_H
+#define GCC_SESE_H
+
+typedef struct ifsese_s *ifsese;
+
+/* A Single Entry, Single Exit region is a part of the CFG delimited
+ by two edges. */
+class sese_l
+{
+public:
+ sese_l (edge e, edge x) : entry (e), exit (x) {}
+
+ operator bool () const { return entry && exit; }
+
+ edge entry;
+ edge exit;
+};
+
+void print_edge (FILE *file, const_edge e);
+void print_sese (FILE *file, const sese_l &s);
+void dump_edge (const_edge e);
+void dump_sese (const sese_l &);
+
+/* Get the entry of an sese S. */
+
+inline basic_block
+get_entry_bb (const sese_l &s)
+{
+ return s.entry->dest;
+}
+
+/* Get the exit of an sese S. */
+
+inline basic_block
+get_exit_bb (const sese_l &s)
+{
+ return s.exit->src;
+}
+
+/* Returns the index of V where ELEM can be found. -1 Otherwise. */
+template<typename T>
+int
+vec_find (const vec<T> &v, const T &elem)
+{
+ int i;
+ T t;
+ FOR_EACH_VEC_ELT (v, i, t)
+ if (elem == t)
+ return i;
+ return -1;
+}
+
+/* A helper structure for bookkeeping information about a scop in graphite. */
+typedef class sese_info_t
+{
+public:
+ /* The SESE region. */
+ sese_l region;
+
+ /* Liveout vars. */
+ bitmap liveout;
+
+ /* Liveout in debug stmts. */
+ bitmap debug_liveout;
+
+ /* Parameters used within the SCOP. */
+ vec<tree> params;
+
+ /* Maps an old name to a new decl. */
+ hash_map<tree, tree> *rename_map;
+
+ /* Basic blocks contained in this SESE. */
+ vec<basic_block> bbs;
+
+ /* The condition region generated for this sese. */
+ ifsese if_region;
+
+} *sese_info_p;
+
+extern sese_info_p new_sese_info (edge, edge);
+extern void free_sese_info (sese_info_p);
+extern void sese_insert_phis_for_liveouts (sese_info_p, basic_block, edge, edge);
+extern class loop *outermost_loop_in_sese (sese_l &, basic_block);
+extern tree scalar_evolution_in_region (const sese_l &, loop_p, tree);
+extern bool scev_analyzable_p (tree, sese_l &);
+extern bool invariant_in_sese_p_rec (tree, const sese_l &, bool *);
+extern void sese_build_liveouts (sese_info_p);
+extern bool sese_trivially_empty_bb_p (basic_block);
+
+/* The number of parameters in REGION. */
+
+inline unsigned
+sese_nb_params (sese_info_p region)
+{
+ return region->params.length ();
+}
+
+/* Checks whether BB is contained in the region delimited by ENTRY and
+ EXIT blocks. */
+
+inline bool
+bb_in_region (const_basic_block bb, const_basic_block entry, const_basic_block exit)
+{
+ return dominated_by_p (CDI_DOMINATORS, bb, entry)
+ && !(dominated_by_p (CDI_DOMINATORS, bb, exit)
+ && !dominated_by_p (CDI_DOMINATORS, entry, exit));
+}
+
+/* Checks whether BB is contained in the region delimited by ENTRY and
+ EXIT blocks. */
+
+inline bool
+bb_in_sese_p (basic_block bb, const sese_l &r)
+{
+ return bb_in_region (bb, r.entry->dest, r.exit->dest);
+}
+
+/* Returns true when STMT is defined in REGION. */
+
+inline bool
+stmt_in_sese_p (gimple *stmt, const sese_l &r)
+{
+ basic_block bb = gimple_bb (stmt);
+ return bb && bb_in_sese_p (bb, r);
+}
+
+/* Returns true when NAME is defined in REGION. */
+
+inline bool
+defined_in_sese_p (tree name, const sese_l &r)
+{
+ return stmt_in_sese_p (SSA_NAME_DEF_STMT (name), r);
+}
+
+/* Returns true when LOOP is in REGION. */
+
+inline bool
+loop_in_sese_p (class loop *loop, const sese_l &region)
+{
+ return (bb_in_sese_p (loop->header, region)
+ && bb_in_sese_p (loop->latch, region));
+}
+
+/* Returns the loop depth of LOOP in REGION. The loop depth
+ is the same as the normal loop depth, but limited by a region.
+
+ Example:
+
+ loop_0
+ loop_1
+ {
+ S0
+ <- region start
+ S1
+
+ loop_2
+ S2
+
+ S3
+ <- region end
+ }
+
+ loop_0 does not exist in the region -> invalid
+ loop_1 exists, but is not completely contained in the region -> depth 0
+ loop_2 is completely contained -> depth 1 */
+
+inline unsigned int
+sese_loop_depth (const sese_l &region, loop_p loop)
+{
+ unsigned int depth = 0;
+
+ while (loop_in_sese_p (loop, region))
+ {
+ depth++;
+ loop = loop_outer (loop);
+ }
+
+ return depth;
+}
+
+/* A single entry single exit specialized for conditions. */
+
+typedef struct ifsese_s {
+ sese_info_p region;
+ sese_info_p true_region;
+ sese_info_p false_region;
+} *ifsese;
+
+extern ifsese move_sese_in_condition (sese_info_p);
+extern void set_ifsese_condition (ifsese, tree);
+extern edge get_true_edge_from_guard_bb (basic_block);
+extern edge get_false_edge_from_guard_bb (basic_block);
+
+inline edge
+if_region_entry (ifsese if_region)
+{
+ return if_region->region->region.entry;
+}
+
+inline edge
+if_region_exit (ifsese if_region)
+{
+ return if_region->region->region.exit;
+}
+
+inline basic_block
+if_region_get_condition_block (ifsese if_region)
+{
+ return if_region_entry (if_region)->dest;
+}
+
+typedef std::pair <gimple *, tree> scalar_use;
+
+typedef struct gimple_poly_bb
+{
+ basic_block bb;
+ struct poly_bb *pbb;
+
+ /* Lists containing the restrictions of the conditional statements
+ dominating this bb. This bb can only be executed, if all conditions
+ are true.
+
+ Example:
+
+ for (i = 0; i <= 20; i++)
+ {
+ A
+
+ if (2i <= 8)
+ B
+ }
+
+ So for B there is an additional condition (2i <= 8).
+
+ List of COND_EXPR and SWITCH_EXPR. A COND_EXPR is true only if the
+ corresponding element in CONDITION_CASES is not NULL_TREE. For a
+ SWITCH_EXPR the corresponding element in CONDITION_CASES is a
+ CASE_LABEL_EXPR. */
+ vec<gimple *> conditions;
+ vec<gimple *> condition_cases;
+ vec<data_reference_p> data_refs;
+ vec<scalar_use> read_scalar_refs;
+ vec<tree> write_scalar_refs;
+} *gimple_poly_bb_p;
+
+#define GBB_BB(GBB) (GBB)->bb
+#define GBB_PBB(GBB) (GBB)->pbb
+#define GBB_DATA_REFS(GBB) (GBB)->data_refs
+#define GBB_CONDITIONS(GBB) (GBB)->conditions
+#define GBB_CONDITION_CASES(GBB) (GBB)->condition_cases
+
+/* Return the innermost loop that contains the basic block GBB. */
+
+inline class loop *
+gbb_loop (gimple_poly_bb_p gbb)
+{
+ return GBB_BB (gbb)->loop_father;
+}
+
+/* Returns the gimple loop, that corresponds to the loop_iterator_INDEX.
+ If there is no corresponding gimple loop, we return NULL. */
+
+inline loop_p
+gbb_loop_at_index (gimple_poly_bb_p gbb, sese_l &region, int index)
+{
+ loop_p loop = gbb_loop (gbb);
+ int depth = sese_loop_depth (region, loop);
+
+ while (--depth > index)
+ loop = loop_outer (loop);
+
+ gcc_assert (loop_in_sese_p (loop, region));
+
+ return loop;
+}
+
+/* The number of common loops in REGION for GBB1 and GBB2. */
+
+inline int
+nb_common_loops (sese_l &region, gimple_poly_bb_p gbb1, gimple_poly_bb_p gbb2)
+{
+ loop_p l1 = gbb_loop (gbb1);
+ loop_p l2 = gbb_loop (gbb2);
+ loop_p common = find_common_loop (l1, l2);
+
+ return sese_loop_depth (region, common);
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/shortest-paths.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/shortest-paths.h
new file mode 100644
index 0000000..8b16afc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/shortest-paths.h
@@ -0,0 +1,215 @@
+/* Template class for Dijkstra's algorithm on directed graphs.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SHORTEST_PATHS_H
+#define GCC_SHORTEST_PATHS_H
+
+#include "timevar.h"
+
+enum shortest_path_sense
+{
+ /* Find the shortest path from the given origin node to each
+ node in the graph. */
+ SPS_FROM_GIVEN_ORIGIN,
+
+ /* Find the shortest path from each node in the graph to the
+ given target node. */
+ SPS_TO_GIVEN_TARGET
+};
+
+/* A record of the shortest path for each node relative to a special
+ "given node", either:
+ SPS_FROM_GIVEN_ORIGIN:
+ from the given origin node to each node in a graph, or
+ SPS_TO_GIVEN_TARGET:
+ from each node in a graph to the given target node.
+
+ The constructor runs Dijkstra's algorithm, and the results are
+ stored in this class. */
+
+template <typename GraphTraits, typename Path_t>
+class shortest_paths
+{
+public:
+ typedef typename GraphTraits::graph_t graph_t;
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::edge_t edge_t;
+ typedef Path_t path_t;
+
+ shortest_paths (const graph_t &graph, const node_t *given_node,
+ enum shortest_path_sense sense);
+
+ path_t get_shortest_path (const node_t *other_node) const;
+ int get_shortest_distance (const node_t *other_node) const;
+
+private:
+ const graph_t &m_graph;
+
+ enum shortest_path_sense m_sense;
+
+ /* For each node (by index), the minimal distance between that node
+ and the given node (with direction depending on m_sense). */
+ auto_vec<int> m_dist;
+
+ /* For each node (by index):
+ SPS_FROM_GIVEN_ORIGIN:
+ the previous edge in the shortest path from the origin,
+ SPS_TO_GIVEN_TARGET:
+ the next edge in the shortest path to the target. */
+ auto_vec<const edge_t *> m_best_edge;
+};
+
+/* shortest_paths's constructor.
+
+ Use Dijkstra's algorithm relative to GIVEN_NODE to populate m_dist and
+ m_best_edge with enough information to be able to generate Path_t instances
+ to give the shortest path...
+ SPS_FROM_GIVEN_ORIGIN: to each node in a graph from the origin node, or
+ SPS_TO_GIVEN_TARGET: from each node in a graph to the target node. */
+
+template <typename GraphTraits, typename Path_t>
+inline
+shortest_paths<GraphTraits, Path_t>::
+shortest_paths (const graph_t &graph,
+ const node_t *given_node,
+ enum shortest_path_sense sense)
+: m_graph (graph),
+ m_sense (sense),
+ m_dist (graph.m_nodes.length ()),
+ m_best_edge (graph.m_nodes.length ())
+{
+ auto_timevar tv (TV_ANALYZER_SHORTEST_PATHS);
+
+ auto_vec<int> queue (graph.m_nodes.length ());
+
+ for (unsigned i = 0; i < graph.m_nodes.length (); i++)
+ {
+ m_dist.quick_push (INT_MAX);
+ m_best_edge.quick_push (NULL);
+ queue.quick_push (i);
+ }
+ m_dist[given_node->m_index] = 0;
+
+ while (queue.length () > 0)
+ {
+ /* Get minimal distance in queue.
+ FIXME: this is O(N^2); replace with a priority queue. */
+ int idx_with_min_dist = -1;
+ int idx_in_queue_with_min_dist = -1;
+ int min_dist = INT_MAX;
+ for (unsigned i = 0; i < queue.length (); i++)
+ {
+ int idx = queue[i];
+ if (m_dist[queue[i]] < min_dist)
+ {
+ min_dist = m_dist[idx];
+ idx_with_min_dist = idx;
+ idx_in_queue_with_min_dist = i;
+ }
+ }
+ if (idx_with_min_dist == -1)
+ break;
+ gcc_assert (idx_in_queue_with_min_dist != -1);
+
+ // FIXME: this is confusing: there are two indices here
+
+ queue.unordered_remove (idx_in_queue_with_min_dist);
+
+ node_t *n
+ = static_cast <node_t *> (m_graph.m_nodes[idx_with_min_dist]);
+
+ if (m_sense == SPS_FROM_GIVEN_ORIGIN)
+ {
+ int i;
+ edge_t *succ;
+ FOR_EACH_VEC_ELT (n->m_succs, i, succ)
+ {
+ // TODO: only for dest still in queue
+ node_t *dest = succ->m_dest;
+ int alt = m_dist[n->m_index] + 1;
+ if (alt < m_dist[dest->m_index])
+ {
+ m_dist[dest->m_index] = alt;
+ m_best_edge[dest->m_index] = succ;
+ }
+ }
+ }
+ else
+ {
+ int i;
+ edge_t *pred;
+ FOR_EACH_VEC_ELT (n->m_preds, i, pred)
+ {
+ // TODO: only for dest still in queue
+ node_t *src = pred->m_src;
+ int alt = m_dist[n->m_index] + 1;
+ if (alt < m_dist[src->m_index])
+ {
+ m_dist[src->m_index] = alt;
+ m_best_edge[src->m_index] = pred;
+ }
+ }
+ }
+ }
+}
+
+/* Generate an Path_t instance giving the shortest path between OTHER_NODE
+ and the given node.
+
+ SPS_FROM_GIVEN_ORIGIN: shortest path from given origin node to OTHER_NODE
+ SPS_TO_GIVEN_TARGET: shortest path from OTHER_NODE to given target node.
+
+ If no such path exists, return an empty path. */
+
+template <typename GraphTraits, typename Path_t>
+inline Path_t
+shortest_paths<GraphTraits, Path_t>::
+get_shortest_path (const node_t *other_node) const
+{
+ Path_t result;
+
+ while (m_best_edge[other_node->m_index])
+ {
+ result.m_edges.safe_push (m_best_edge[other_node->m_index]);
+ if (m_sense == SPS_FROM_GIVEN_ORIGIN)
+ other_node = m_best_edge[other_node->m_index]->m_src;
+ else
+ other_node = m_best_edge[other_node->m_index]->m_dest;
+ }
+
+ if (m_sense == SPS_FROM_GIVEN_ORIGIN)
+ result.m_edges.reverse ();
+
+ return result;
+}
+
+/* Get the shortest distance...
+ SPS_FROM_GIVEN_ORIGIN: ...from given origin node to OTHER_NODE
+ SPS_TO_GIVEN_TARGET: ...from OTHER_NODE to given target node. */
+
+template <typename GraphTraits, typename Path_t>
+inline int
+shortest_paths<GraphTraits, Path_t>::
+get_shortest_distance (const node_t *other_node) const
+{
+ return m_dist[other_node->m_index];
+}
+
+#endif /* GCC_SHORTEST_PATHS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/shrink-wrap.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/shrink-wrap.h
new file mode 100644
index 0000000..1616477
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/shrink-wrap.h
@@ -0,0 +1,34 @@
+/* Shrink-wrapping related functions.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SHRINK_WRAP_H
+#define GCC_SHRINK_WRAP_H
+
+#include "function.h"
+
+/* In shrink-wrap.cc. */
+extern bool requires_stack_frame_p (rtx_insn *, HARD_REG_SET, HARD_REG_SET);
+extern void try_shrink_wrapping (edge *entry_edge, rtx_insn *prologue_seq);
+extern void try_shrink_wrapping_separate (basic_block first_bb);
+#define SHRINK_WRAPPING_ENABLED \
+ (flag_shrink_wrap && targetm.have_simple_return ())
+
+#endif /* GCC_SHRINK_WRAP_H */
+
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/signop.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/signop.h
new file mode 100644
index 0000000..1dcb194
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/signop.h
@@ -0,0 +1,33 @@
+/* Operations with SIGNED and UNSIGNED. -*- C++ -*-
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef SIGNOP_H
+#define SIGNOP_H
+
+/* This type is used for the large number of functions that produce
+ different results depending on if the operands are signed types or
+ unsigned types. The signedness of a tree type can be found by
+ using the TYPE_SIGN macro. */
+
+enum signop {
+ SIGNED,
+ UNSIGNED
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sparseset.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sparseset.h
new file mode 100644
index 0000000..694ff11
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sparseset.h
@@ -0,0 +1,218 @@
+/* SparseSet implementation.
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+ Contributed by Peter Bergner <bergner@vnet.ibm.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SPARSESET_H
+#define GCC_SPARSESET_H
+
+/* Implementation of the Briggs and Torczon sparse set representation.
+ The sparse set representation was first published in:
+
+ "An Efficient Representation for Sparse Sets",
+ ACM LOPLAS, Vol. 2, Nos. 1-4, March-December 1993, Pages 59-69.
+
+ The sparse set representation is suitable for integer sets with a
+ fixed-size universe. Two vectors are used to store the members of
+ the set. If an element I is in the set, then sparse[I] is the
+ index of I in the dense vector, and dense[sparse[I]] == I. The dense
+ vector works like a stack. The size of the stack is the cardinality
+ of the set.
+
+ The following operations can be performed in O(1) time:
+
+ * clear : sparseset_clear
+ * cardinality : sparseset_cardinality
+ * set_size : sparseset_size
+ * member_p : sparseset_bit_p
+ * add_member : sparseset_set_bit
+ * remove_member : sparseset_clear_bit
+ * choose_one : sparseset_pop
+
+ Additionally, the sparse set representation supports enumeration of
+ the members in O(N) time, where n is the number of members in the set.
+ The members of the set are stored cache-friendly in the dense vector.
+ This makes it a competitive choice for iterating over relatively sparse
+ sets requiring operations:
+
+ * forall : EXECUTE_IF_SET_IN_SPARSESET
+ * set_copy : sparseset_copy
+ * set_intersection : sparseset_and
+ * set_union : sparseset_ior
+ * set_difference : sparseset_and_compl
+ * set_disjuction : (not implemented)
+ * set_compare : sparseset_equal_p
+
+ NB: It is OK to use remove_member during EXECUTE_IF_SET_IN_SPARSESET.
+ The iterator is updated for it.
+
+ Based on the efficiency of these operations, this representation of
+ sparse sets will often be superior to alternatives such as simple
+ bitmaps, linked-list bitmaps, array bitmaps, balanced binary trees,
+ hash tables, linked lists, etc., if the set is sufficiently sparse.
+ In the LOPLAS paper the cut-off point where sparse sets became faster
+ than simple bitmaps (see sbitmap.h) when N / U < 64 (where U is the
+ size of the universe of the set).
+
+ Because the set universe is fixed, the set cannot be resized. For
+ sparse sets with initially unknown size, linked-list bitmaps are a
+ better choice, see bitmap.h.
+
+ Sparse sets storage requirements are relatively large: O(U) with a
+ larger constant than sbitmaps (if the storage requirement for an
+ sbitmap with universe U is S, then the storage required for a sparse
+ set for the same universe are 2 * sizeof (SPARSESET_ELT_TYPE) * 8 * S).
+ Accessing the sparse vector is not very cache-friendly, but iterating
+ over the members in the set is cache-friendly because only the dense
+ vector is used. */
+
+/* Data Structure used for the SparseSet representation. */
+
+#define SPARSESET_ELT_TYPE unsigned int
+
+typedef struct sparseset_def
+{
+ SPARSESET_ELT_TYPE *dense; /* Dense array. */
+ SPARSESET_ELT_TYPE *sparse; /* Sparse array. */
+ SPARSESET_ELT_TYPE members; /* Number of elements. */
+ SPARSESET_ELT_TYPE size; /* Maximum number of elements. */
+ SPARSESET_ELT_TYPE iter; /* Iterator index. */
+ unsigned char iter_inc; /* Iteration increment amount. */
+ bool iterating;
+ SPARSESET_ELT_TYPE elms[2]; /* Combined dense and sparse arrays. */
+} *sparseset;
+
+#define sparseset_free(MAP) free(MAP)
+extern sparseset sparseset_alloc (SPARSESET_ELT_TYPE n_elms);
+extern void sparseset_clear_bit (sparseset, SPARSESET_ELT_TYPE);
+extern void sparseset_copy (sparseset, sparseset);
+extern void sparseset_and (sparseset, sparseset, sparseset);
+extern void sparseset_and_compl (sparseset, sparseset, sparseset);
+extern void sparseset_ior (sparseset, sparseset, sparseset);
+extern bool sparseset_equal_p (sparseset, sparseset);
+
+/* Operation: S = {}
+ Clear the set of all elements. */
+
+inline void
+sparseset_clear (sparseset s)
+{
+ s->members = 0;
+ s->iterating = false;
+}
+
+/* Return the number of elements currently in the set. */
+
+inline SPARSESET_ELT_TYPE
+sparseset_cardinality (sparseset s)
+{
+ return s->members;
+}
+
+/* Return the maximum number of elements this set can hold. */
+
+inline SPARSESET_ELT_TYPE
+sparseset_size (sparseset s)
+{
+ return s->size;
+}
+
+/* Return true if e is a member of the set S, otherwise return false. */
+
+inline bool
+sparseset_bit_p (sparseset s, SPARSESET_ELT_TYPE e)
+{
+ SPARSESET_ELT_TYPE idx;
+
+ gcc_checking_assert (e < s->size);
+
+ idx = s->sparse[e];
+
+ return idx < s->members && s->dense[idx] == e;
+}
+
+/* Low level insertion routine not meant for use outside of sparseset.[ch].
+ Assumes E is valid and not already a member of the set S. */
+
+inline void
+sparseset_insert_bit (sparseset s, SPARSESET_ELT_TYPE e, SPARSESET_ELT_TYPE idx)
+{
+ s->sparse[e] = idx;
+ s->dense[idx] = e;
+}
+
+/* Operation: S = S + {e}
+ Insert E into the set S, if it isn't already a member. */
+
+inline void
+sparseset_set_bit (sparseset s, SPARSESET_ELT_TYPE e)
+{
+ if (!sparseset_bit_p (s, e))
+ sparseset_insert_bit (s, e, s->members++);
+}
+
+/* Return and remove the last member added to the set S. */
+
+inline SPARSESET_ELT_TYPE
+sparseset_pop (sparseset s)
+{
+ SPARSESET_ELT_TYPE mem = s->members;
+
+ gcc_checking_assert (mem != 0);
+
+ s->members = mem - 1;
+ return s->dense[s->members];
+}
+
+inline void
+sparseset_iter_init (sparseset s)
+{
+ s->iter = 0;
+ s->iter_inc = 1;
+ s->iterating = true;
+}
+
+inline bool
+sparseset_iter_p (sparseset s)
+{
+ if (s->iterating && s->iter < s->members)
+ return true;
+ else
+ return s->iterating = false;
+}
+
+inline SPARSESET_ELT_TYPE
+sparseset_iter_elm (sparseset s)
+{
+ return s->dense[s->iter];
+}
+
+inline void
+sparseset_iter_next (sparseset s)
+{
+ s->iter += s->iter_inc;
+ s->iter_inc = 1;
+}
+
+#define EXECUTE_IF_SET_IN_SPARSESET(SPARSESET, ITER) \
+ for (sparseset_iter_init (SPARSESET); \
+ sparseset_iter_p (SPARSESET) \
+ && (((ITER) = sparseset_iter_elm (SPARSESET)) || 1); \
+ sparseset_iter_next (SPARSESET))
+
+#endif /* GCC_SPARSESET_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck-tree.h
new file mode 100644
index 0000000..8f1c3cc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck-tree.h
@@ -0,0 +1,51 @@
+/* Find near-matches for identifiers.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SPELLCHECK_TREE_H
+#define GCC_SPELLCHECK_TREE_H
+
+#include "spellcheck.h"
+
+/* spellcheck-tree.cc */
+
+extern edit_distance_t
+get_edit_distance (tree ident_s, tree ident_t);
+
+extern tree
+find_closest_identifier (tree target, const auto_vec<tree> *candidates);
+
+/* Specialization of edit_distance_traits for identifiers. */
+
+template <>
+struct edit_distance_traits<tree>
+{
+ static size_t get_length (tree id)
+ {
+ gcc_assert (TREE_CODE (id) == IDENTIFIER_NODE);
+ return IDENTIFIER_LENGTH (id);
+ }
+
+ static const char *get_string (tree id)
+ {
+ gcc_assert (TREE_CODE (id) == IDENTIFIER_NODE);
+ return IDENTIFIER_POINTER (id);
+ }
+};
+
+#endif /* GCC_SPELLCHECK_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck.h
new file mode 100644
index 0000000..cfa1b12
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/spellcheck.h
@@ -0,0 +1,229 @@
+/* Find near-matches for strings and identifiers.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SPELLCHECK_H
+#define GCC_SPELLCHECK_H
+
+typedef unsigned int edit_distance_t;
+const edit_distance_t MAX_EDIT_DISTANCE = UINT_MAX;
+
+/* spellcheck.cc */
+extern edit_distance_t
+get_edit_distance (const char *s, int len_s,
+ const char *t, int len_t);
+
+extern edit_distance_t
+get_edit_distance (const char *s, const char *t);
+
+extern const char *
+find_closest_string (const char *target,
+ const auto_vec<const char *> *candidates);
+
+/* A traits class for describing a string-like type usable by
+ class best_match.
+ Specializations should provide the implementations of the following:
+
+ static size_t get_length (TYPE);
+ static const char *get_string (TYPE);
+
+ get_string should return a non-NULL ptr, which does not need to be
+ 0-terminated. */
+
+template <typename TYPE>
+struct edit_distance_traits {};
+
+/* Specialization of edit_distance_traits for C-style strings. */
+
+template <>
+struct edit_distance_traits<const char *>
+{
+ static size_t get_length (const char *str)
+ {
+ gcc_assert (str);
+ return strlen (str);
+ }
+
+ static const char *get_string (const char *str)
+ {
+ gcc_assert (str);
+ return str;
+ }
+};
+
+extern edit_distance_t get_edit_distance_cutoff (size_t goal_len,
+ size_t candidate_len);
+
+/* A type for use when determining the best match against a string,
+ expressed as a template so that we can match against various
+ string-like types (const char *, frontend identifiers, and preprocessor
+ macros).
+
+ This type accumulates the best possible match against GOAL_TYPE for
+ a sequence of elements of CANDIDATE_TYPE, whilst minimizing the
+ number of calls to get_edit_distance and to
+ edit_distance_traits<T>::get_length. */
+
+template <typename GOAL_TYPE, typename CANDIDATE_TYPE>
+class best_match
+{
+ public:
+ typedef GOAL_TYPE goal_t;
+ typedef CANDIDATE_TYPE candidate_t;
+ typedef edit_distance_traits<goal_t> goal_traits;
+ typedef edit_distance_traits<candidate_t> candidate_traits;
+
+ /* Constructor. */
+
+ best_match (GOAL_TYPE goal,
+ edit_distance_t best_distance_so_far = MAX_EDIT_DISTANCE)
+ : m_goal (goal_traits::get_string (goal)),
+ m_goal_len (goal_traits::get_length (goal)),
+ m_best_candidate (NULL),
+ m_best_distance (best_distance_so_far),
+ m_best_candidate_len (0)
+ {}
+
+ /* Compare the edit distance between CANDIDATE and m_goal,
+ and if it's the best so far, record it. */
+
+ void consider (candidate_t candidate)
+ {
+ size_t candidate_len = candidate_traits::get_length (candidate);
+
+ /* Calculate a lower bound on the candidate's distance to the goal,
+ based on the difference in lengths; it will require at least
+ this many insertions/deletions. */
+ edit_distance_t min_candidate_distance
+ = abs ((ssize_t)candidate_len - (ssize_t)m_goal_len);
+
+ /* If the candidate's length is sufficiently different to that
+ of the goal string, then the number of insertions/deletions
+ may be >= the best distance so far. If so, we can reject
+ the candidate immediately without needing to compute
+ the exact distance, since it won't be an improvement. */
+ if (min_candidate_distance >= m_best_distance)
+ return;
+
+ /* If the candidate will be unable to beat the criterion in
+ get_best_meaningful_candidate, reject it without computing
+ the exact distance. */
+ edit_distance_t cutoff = get_cutoff (candidate_len);
+ if (min_candidate_distance > cutoff)
+ return;
+
+ /* Otherwise, compute the distance and see if the candidate
+ has beaten the previous best value. */
+ const char *candidate_str = candidate_traits::get_string (candidate);
+ edit_distance_t dist
+ = get_edit_distance (m_goal, m_goal_len, candidate_str, candidate_len);
+
+ bool is_better = false;
+ if (dist < m_best_distance)
+ is_better = true;
+ else if (dist == m_best_distance)
+ {
+ /* Prefer a candidate that inserts a trailing '=',
+ so that for
+ "-ftrivial-auto-var-init"
+ we suggest
+ "-ftrivial-auto-var-init="
+ rather than
+ "-Wtrivial-auto-var-init". */
+ /* Prefer a candidate has a difference in trailing sign character. */
+ if (candidate_str[candidate_len - 1] == '='
+ && m_goal[m_goal_len - 1] != '=')
+ is_better = true;
+ }
+
+ if (is_better)
+ {
+ m_best_distance = dist;
+ m_best_candidate = candidate;
+ m_best_candidate_len = candidate_len;
+ }
+ }
+
+ /* Assuming that BEST_CANDIDATE is known to be better than
+ m_best_candidate, update (without recomputing the edit distance to
+ the goal). */
+
+ void set_best_so_far (CANDIDATE_TYPE best_candidate,
+ edit_distance_t best_distance,
+ size_t best_candidate_len)
+ {
+ gcc_assert (best_distance < m_best_distance);
+ m_best_candidate = best_candidate;
+ m_best_distance = best_distance;
+ m_best_candidate_len = best_candidate_len;
+ }
+
+ /* Generate the maximum edit distance for which we consider a suggestion
+ to be meaningful, given a candidate of length CANDIDATE_LEN. */
+
+ edit_distance_t get_cutoff (size_t candidate_len) const
+ {
+ return ::get_edit_distance_cutoff (m_goal_len, candidate_len);
+ }
+
+ /* Get the best candidate so far, but applying a filter to ensure
+ that we return NULL if none of the candidates are close to the goal,
+ to avoid offering nonsensical suggestions to the user. */
+
+ candidate_t get_best_meaningful_candidate () const
+ {
+ /* If the edit distance is too high, the suggestion is likely to be
+ meaningless. */
+ if (m_best_candidate)
+ {
+ edit_distance_t cutoff = get_cutoff (m_best_candidate_len);
+ if (m_best_distance > cutoff)
+ return NULL;
+ }
+
+ /* If the goal string somehow makes it into the candidate list, offering
+ it as a suggestion will be nonsensical e.g.
+ 'constexpr' does not name a type; did you mean 'constexpr'?
+ Ultimately such suggestions are due to bugs in constructing the
+ candidate list, but as a band-aid, do not offer suggestions for
+ distance == 0 (where candidate == goal). */
+ if (m_best_distance == 0)
+ return NULL;
+
+ return m_best_candidate;
+ }
+
+ /* Get the closest candidate so far, without applying any filtering. */
+
+ candidate_t blithely_get_best_candidate () const
+ {
+ return m_best_candidate;
+ }
+
+ edit_distance_t get_best_distance () const { return m_best_distance; }
+ size_t get_best_candidate_length () const { return m_best_candidate_len; }
+
+ private:
+ const char *m_goal;
+ size_t m_goal_len;
+ candidate_t m_best_candidate;
+ edit_distance_t m_best_distance;
+ size_t m_best_candidate_len;
+};
+
+#endif /* GCC_SPELLCHECK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree-utils.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree-utils.h
new file mode 100644
index 0000000..f0f795a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree-utils.h
@@ -0,0 +1,491 @@
+// Splay tree utilities -*- C++ -*-
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+// Implement splay tree node accessors for a class that stores its
+// two child nodes in a member variable of the form:
+//
+// Node m_children[2];
+template<typename Node>
+class default_splay_tree_accessors
+{
+public:
+ using node_type = Node;
+
+ static auto
+ child (node_type node, unsigned int index)
+ -> decltype (node->m_children[index]) &
+ {
+ return node->m_children[index];
+ }
+};
+
+// Implement splay tree node accessors for a class that stores its
+// two child nodes in a member variable of the form:
+//
+// Node m_children[2];
+//
+// and also stores its parent node in a member variable of the form:
+//
+// Node m_parent;
+template<typename Node>
+class default_splay_tree_accessors_with_parent
+ : public default_splay_tree_accessors<Node>
+{
+public:
+ using node_type = Node;
+
+ static auto
+ parent (node_type node) -> decltype (node->m_parent) &
+ {
+ return node->m_parent;
+ }
+};
+
+// Base is a splay tree accessor class for nodes that have no parent field.
+// Base therefore provides a Base::child method but does not provide a
+// Base::parent method. Extend Base with dummy routines for setting the
+// parent, which is a no-op when the parent is not stored.
+template<typename Base>
+class splay_tree_accessors_without_parent : public Base
+{
+public:
+ using typename Base::node_type;
+
+ static void set_parent (node_type, node_type) {}
+};
+
+// Base is splay tree accessor class for nodes that have a parent field.
+// Base therefore provides both Base::child and Base::parent methods.
+// Extend Base with routines for setting the parent.
+template<typename Base>
+class splay_tree_accessors_with_parent : public Base
+{
+public:
+ using typename Base::node_type;
+
+ // Record that NODE's parent is now NEW_PARENT.
+ static void
+ set_parent (node_type node, node_type new_parent)
+ {
+ Base::parent (node) = new_parent;
+ }
+};
+
+// A base class that provides some splay tree operations that are common
+// to both rooted_splay_tree and rootless_splay_tree.
+//
+// Nodes in the splay tree have type Accessors::node_type; this is
+// usually a pointer type. The Accessors class provides the following
+// static member functions for accessing nodes:
+//
+// - Accessors::child (NODE, INDEX)
+// INDEX is guaranteed to be 0 or 1. If INDEX is 0, return a reference
+// to where NODE's left child is stored, otherwise return a reference
+// to where NODE's right child is stored.
+//
+// - Accessors::set_parent (NODE, PARENT)
+// Record that NODE's parent node is now PARENT.
+template<typename Accessors>
+class base_splay_tree : protected Accessors
+{
+public:
+ using typename Accessors::node_type;
+
+ // INDEX is either 0 or 1. If INDEX is 0, insert CHILD immediately
+ // before NODE, otherwise insert CHILD immediately after NODE.
+ //
+ // Complexity: O(1).
+ static void insert_child (node_type node, unsigned int index,
+ node_type child);
+
+ // Print NODE and its child nodes to PP for debugging purposes,
+ // using PRINTER (PP, N) to print the data for node N.
+ template<typename Printer>
+ static void print (pretty_printer *pp, node_type node, Printer printer);
+
+protected:
+ using Accessors::set_parent;
+
+ static node_type get_child (node_type, unsigned int);
+ static void set_child (node_type, unsigned int, node_type);
+ static node_type promote_child (node_type, unsigned int);
+ static void promote_child (node_type, unsigned int, node_type);
+
+ template<unsigned int N>
+ static node_type splay_limit (node_type);
+
+ static node_type remove_node_internal (node_type);
+
+ template<typename Printer>
+ static void print (pretty_printer *pp, node_type node, Printer printer,
+ char, vec<char> &);
+};
+
+// This class provides splay tree routines for cases in which the root
+// of the splay tree is known. It works with both nodes that store
+// their parent node and nodes that don't.
+//
+// The class is lightweight: it only contains a single root node.
+template<typename Accessors>
+class rooted_splay_tree : public base_splay_tree<Accessors>
+{
+ using parent = base_splay_tree<Accessors>;
+
+public:
+ using typename Accessors::node_type;
+
+protected:
+ // The root of the splay tree, or node_type () if the tree is empty.
+ node_type m_root;
+
+public:
+ rooted_splay_tree () : m_root () {}
+
+ // Construct a tree with the specified root node.
+ rooted_splay_tree (node_type root) : m_root (root) {}
+
+ // Return the root of the tree.
+ node_type root () const { return m_root; }
+
+ // Return true if the tree contains any nodes.
+ explicit operator bool () const { return m_root; }
+
+ // Dereference the root node.
+ node_type operator-> () { return m_root; }
+
+ // Insert NEW_NODE into the splay tree, if no equivalent node already
+ // exists. For a given node N, COMPARE (N) should return:
+ //
+ // - a negative value if NEW_NODE should come before N
+ // - zero if NEW_NODE and N are the same
+ // - a positive value if NEW_NODE should come after N
+ //
+ // Return true if NEW_NODE was inserted.
+ //
+ // On return, NEW_NODE or its equivalent is the root of the tree.
+ //
+ // Complexity: amortized O(C log N), worst-cast O(C N), where C is
+ // the complexity of the comparison.
+ template<typename Comparator>
+ bool insert (node_type new_node, Comparator compare);
+
+ // Insert NEW_NODE into the splay tree, given that NEW_NODE is the
+ // maximum node of the new tree. On return, NEW_NODE is also the
+ // root of the tree.
+ //
+ // Complexity: O(1).
+ void insert_max_node (node_type new_node);
+
+ // Splice NEXT_TREE onto this one, given that all nodes in NEXT_TREE
+ // are greater than the maximum node in this tree. NEXT_TREE should
+ // not be used afterwards.
+ //
+ // Complexity: O(1) if the root of the splay tree is already the maximum
+ // node. Otherwise amortized O(log N), worst-cast O(N).
+ void splice_next_tree (rooted_splay_tree next_tree);
+
+ // The root of the tree is currently the maximum node. Replace it
+ // with NEW_NODE.
+ //
+ // Complexity: O(1).
+ void replace_max_node_at_root (node_type new_node);
+
+ // Remove the root node of the splay tree.
+ //
+ // Complexity: O(1) if removing the maximum or minimum node.
+ // Otherwise amortized O(log N), worst-cast O(N).
+ void remove_root ();
+
+ // Split the left child of the current root out into a separate tree
+ // and return the new tree.
+ rooted_splay_tree split_before_root ();
+
+ // Split the right child of the current root out into a separate tree
+ // and return the new tree.
+ rooted_splay_tree split_after_root ();
+
+ // If the root is not the minimum node of the splay tree, bring the previous
+ // node to the root and return true, otherwise return false.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ bool splay_prev_node ();
+
+ // If the root is not the maximum node of the splay tree, bring the next
+ // node to the root and return true, otherwise return false.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ bool splay_next_node ();
+
+ // Bring the minimum node of the splay tree to the root.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ void splay_min_node ();
+
+ // Bring the maximum node of the splay tree to the root.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ void splay_max_node ();
+
+ // Return the minimum node of the splay tree, or node_type () if the
+ // tree is empty. On return, the minimum node (if any) is also the
+ // root of the tree.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ node_type min_node ();
+
+ // Return the maximum node of the splay tree, or node_type () if the
+ // tree is empty. On return, the maximum node (if any) is also the
+ // root of the tree.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ node_type max_node ();
+
+ // Search the splay tree. For a given node N, COMPARE (N) should return:
+ //
+ // - a negative value if N is bigger than the node being searched for
+ // - zero if N is the node being searched for
+ // - a positive value if N is smaller than the node being searched for
+ //
+ // If the node that COMPARE is looking for exists, install it as the root
+ // node of the splay tree. Otherwise, arbitrarily pick either:
+ //
+ // - the maximum node that is smaller than the node being searched for or
+ // - the minimum node that is bigger than the node being searched for
+ //
+ // and install that node as the root instead.
+ //
+ // Return the result of COMPARE for the new root.
+ //
+ // This form of lookup is intended for cases in which both the following
+ // are true:
+ //
+ // (a) The work that COMPARE needs to do to detect if a node is too big
+ // is the same as the work that COMPARE needs to do to detect if a
+ // node is too small. (This is not true of range comparisons,
+ // for example.)
+ //
+ // (b) COMPARE is (or might be) relatively complex.
+ //
+ // This form of lookup is also useful if the items being compared naturally
+ // provide a <=>-style comparison result, without the result having to be
+ // forced by the equivalent of a ?: expression.
+ //
+ // The implementation only invokes COMPARE once per node.
+ //
+ // Complexity: amortized O(C log N), worst-cast O(C N), where C is
+ // the complexity of the comparison.
+ template<typename Comparator>
+ auto lookup (Comparator compare) -> decltype (compare (m_root));
+
+ // Search the splay tree. For a given node N, WANT_SOMETHING_SMALLER (N)
+ // is true if N is too big and WANT_SOMETHING_BIGGER (N) is true if N
+ // is too small. Both functions return false if N is the node being
+ // searched for.
+ //
+ // If the node that is being searched for exists, install it as the root
+ // node of the splay tree and return 0. Otherwise, arbitrarily choose
+ // between these two options:
+ //
+ // - Install the maximum node that is smaller than the node being
+ // searched for as the root of the splay tree and return 1.
+ //
+ // - Install the minimum node that is bigger than the node being
+ // searched for and return -1.
+ //
+ // This form of lookup is intended for cases in which either of the
+ // following are true:
+ //
+ // (a) WANT_SOMETHING_SMALLER and WANT_SOMETHING_BIGGER test different
+ // parts of the node's data. For example, when comparing ranges,
+ // WANT_SOMETHING_SMALLER would test the lower limit of the given
+ // node's range while WANT_SOMETHING_BIGGER would test the upper
+ // limit of the given node's range.
+ //
+ // (b) There is no significant overhead to calling both
+ // WANT_SOMETHING_SMALLER and WANT_SOMETHING_BIGGER for the same node.
+ //
+ // Complexity: amortized O(C log N), worst-cast O(C N), where C is
+ // the complexity of the comparisons.
+ template<typename LeftPredicate, typename RightPredicate>
+ int lookup (LeftPredicate want_something_smaller,
+ RightPredicate want_something_bigger);
+
+ // Keep the ability to print subtrees.
+ using parent::print;
+
+ // Print the tree to PP for debugging purposes, using PRINTER (PP, N)
+ // to print the data for node N.
+ template<typename Printer>
+ void print (pretty_printer *pp, Printer printer) const;
+
+protected:
+ using parent::get_child;
+ using parent::set_child;
+ using parent::promote_child;
+
+ using parent::set_parent;
+
+ template<unsigned int N>
+ bool splay_neighbor ();
+};
+
+// Provide splay tree routines for nodes of type Accessors::node_type,
+// which doesn't have a parent field. Use Accessors::child to access
+// the children of a node.
+template<typename Accessors>
+using splay_tree_without_parent
+ = rooted_splay_tree<splay_tree_accessors_without_parent<Accessors>>;
+
+// A splay tree for nodes of type Node, which is usually a pointer type.
+// The child nodes are stored in a member variable:
+//
+// Node m_children[2];
+//
+// Node does not have a parent field.
+template<typename Node>
+using default_splay_tree
+ = splay_tree_without_parent<default_splay_tree_accessors<Node>>;
+
+// A simple splay tree node that stores a value of type T.
+template<typename T>
+class splay_tree_node
+{
+ friend class default_splay_tree_accessors<splay_tree_node *>;
+
+public:
+ splay_tree_node () = default;
+ splay_tree_node (T value) : m_value (value), m_children () {}
+
+ T &value () { return m_value; }
+ const T &value () const { return m_value; }
+
+private:
+ T m_value;
+ splay_tree_node *m_children[2];
+};
+
+// A splay tree whose nodes hold values of type T.
+template<typename T>
+using splay_tree = default_splay_tree<splay_tree_node<T> *>;
+
+// Provide splay tree routines for cases in which the root of the tree
+// is not explicitly stored.
+//
+// The nodes of the tree have type Accessors::node_type, which is usually
+// a pointer type. The nodes have a link back to their parent.
+//
+// The Accessors class provides the following static member functions:
+//
+// - Accessors::child (NODE, INDEX)
+// INDEX is guaranteed to be 0 or 1. If INDEX is 0, return a reference
+// to where NODE's left child is stored, otherwise return a reference
+// to where NODE's right child is stored.
+//
+// - Accessors::parent (NODE)
+// Return a reference to where NODE's parent is stored.
+template<typename Accessors>
+class rootless_splay_tree
+ : public base_splay_tree<splay_tree_accessors_with_parent<Accessors>>
+{
+ using full_accessors = splay_tree_accessors_with_parent<Accessors>;
+ using parent = base_splay_tree<full_accessors>;
+
+public:
+ using rooted = rooted_splay_tree<full_accessors>;
+
+ using typename Accessors::node_type;
+
+ // Remove NODE from the splay tree. Return the node that replaces it,
+ // or null if NODE had no children.
+ //
+ // Complexity: O(1) if removing the maximum or minimum node.
+ // Otherwise amortized O(log N), worst-cast O(N).
+ static node_type remove_node (node_type node);
+
+ // Splay NODE so that it becomes the root of the splay tree.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ static void splay (node_type node);
+
+ // Like splay, but take advantage of the fact that NODE is known to be
+ // the minimum node in the tree.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ static void splay_known_min_node (node_type node);
+
+ // Like splay, but take advantage of the fact that NODE is known to be
+ // the maximum node in the tree.
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ static void splay_known_max_node (node_type node);
+
+ // Splay NODE while looking for an ancestor node N for which PREDICATE (N)
+ // is true. If such an ancestor node exists, stop the splay operation
+ // early and return PREDICATE (N). Otherwise, complete the splay operation
+ // and return DEFAULT_RESULT. In the latter case, NODE is now the root of
+ // the splay tree.
+ //
+ // Note that this routine only examines nodes that happen to be ancestors
+ // of NODE. It does not search the full tree.
+ //
+ // Complexity: amortized O(P log N), worst-cast O(P N), where P is the
+ // complexity of the predicate.
+ template<typename DefaultResult, typename Predicate>
+ static auto splay_and_search (node_type node, DefaultResult default_result,
+ Predicate predicate)
+ -> decltype (predicate (node, 0));
+
+ // NODE1 and NODE2 are known to belong to the same splay tree. Return:
+ //
+ // -1 if NODE1 < NODE2
+ // 0 if NODE1 == NODE2
+ // 1 if NODE1 > NODE2
+ //
+ // Complexity: amortized O(log N), worst-cast O(N).
+ static int compare_nodes (node_type node1, node_type node2);
+
+protected:
+ using parent::get_child;
+ using parent::set_child;
+ using parent::promote_child;
+
+ static node_type get_parent (node_type);
+ using parent::set_parent;
+
+ static unsigned int child_index (node_type, node_type);
+
+ static int compare_nodes_one_way (node_type, node_type);
+
+ template<unsigned int N>
+ static void splay_known_limit (node_type);
+};
+
+// Provide rootless splay tree routines for nodes of type Node.
+// The child nodes are stored in a member variable:
+//
+// Node m_children[2];
+//
+// and the parent node is stored in a member variable:
+//
+// Node m_parent;
+template<typename Node>
+using default_rootless_splay_tree
+ = rootless_splay_tree<default_splay_tree_accessors_with_parent<Node>>;
+
+#include "splay-tree-utils.tcc"
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree.h
new file mode 100644
index 0000000..540f6c9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/splay-tree.h
@@ -0,0 +1,165 @@
+/* A splay-tree datatype.
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+ Contributed by Mark Mitchell (mark@markmitchell.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING. If not, write to
+ the Free Software Foundation, 51 Franklin Street - Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* For an easily readable description of splay-trees, see:
+
+ Lewis, Harry R. and Denenberg, Larry. Data Structures and Their
+ Algorithms. Harper-Collins, Inc. 1991.
+
+ The major feature of splay trees is that all basic tree operations
+ are amortized O(log n) time for a tree with n nodes. */
+
+#ifndef _SPLAY_TREE_H
+#define _SPLAY_TREE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "ansidecl.h"
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+/* Use typedefs for the key and data types to facilitate changing
+ these types, if necessary. These types should be sufficiently wide
+ that any pointer or scalar can be cast to these types, and then
+ cast back, without loss of precision. */
+typedef uintptr_t splay_tree_key;
+typedef uintptr_t splay_tree_value;
+
+/* Forward declaration for a node in the tree. */
+typedef struct splay_tree_node_s *splay_tree_node;
+
+/* The type of a function which compares two splay-tree keys. The
+ function should return values as for qsort. */
+typedef int (*splay_tree_compare_fn) (splay_tree_key, splay_tree_key);
+
+/* The type of a function used to deallocate any resources associated
+ with the key. If you provide this function, the splay tree
+ will take the ownership of the memory of the splay_tree_key arg
+ of splay_tree_insert. This function is called to release the keys
+ present in the tree when calling splay_tree_delete or splay_tree_remove.
+ If splay_tree_insert is called with a key equal to a key already
+ present in the tree, the old key and old value will be released. */
+typedef void (*splay_tree_delete_key_fn) (splay_tree_key);
+
+/* The type of a function used to deallocate any resources associated
+ with the value. If you provide this function, the memory of the
+ splay_tree_value arg of splay_tree_insert is managed similarly to
+ the splay_tree_key memory: see splay_tree_delete_key_fn. */
+typedef void (*splay_tree_delete_value_fn) (splay_tree_value);
+
+/* The type of a function used to iterate over the tree. */
+typedef int (*splay_tree_foreach_fn) (splay_tree_node, void*);
+
+/* The type of a function used to allocate memory for tree root and
+ node structures. The first argument is the number of bytes needed;
+ the second is a data pointer the splay tree functions pass through
+ to the allocator. This function must never return zero. */
+typedef void *(*splay_tree_allocate_fn) (int, void *);
+
+/* The type of a function used to free memory allocated using the
+ corresponding splay_tree_allocate_fn. The first argument is the
+ memory to be freed; the latter is a data pointer the splay tree
+ functions pass through to the freer. */
+typedef void (*splay_tree_deallocate_fn) (void *, void *);
+
+/* The nodes in the splay tree. */
+struct splay_tree_node_s {
+ /* The key. */
+ splay_tree_key key;
+
+ /* The value. */
+ splay_tree_value value;
+
+ /* The left and right children, respectively. */
+ splay_tree_node left;
+ splay_tree_node right;
+};
+
+/* The splay tree itself. */
+struct splay_tree_s {
+ /* The root of the tree. */
+ splay_tree_node root;
+
+ /* The comparision function. */
+ splay_tree_compare_fn comp;
+
+ /* The deallocate-key function. NULL if no cleanup is necessary. */
+ splay_tree_delete_key_fn delete_key;
+
+ /* The deallocate-value function. NULL if no cleanup is necessary. */
+ splay_tree_delete_value_fn delete_value;
+
+ /* Node allocate function. Takes allocate_data as a parameter. */
+ splay_tree_allocate_fn allocate;
+
+ /* Free function for nodes and trees. Takes allocate_data as a parameter. */
+ splay_tree_deallocate_fn deallocate;
+
+ /* Parameter for allocate/free functions. */
+ void *allocate_data;
+};
+
+typedef struct splay_tree_s *splay_tree;
+
+extern splay_tree splay_tree_new (splay_tree_compare_fn,
+ splay_tree_delete_key_fn,
+ splay_tree_delete_value_fn);
+extern splay_tree splay_tree_new_with_allocator (splay_tree_compare_fn,
+ splay_tree_delete_key_fn,
+ splay_tree_delete_value_fn,
+ splay_tree_allocate_fn,
+ splay_tree_deallocate_fn,
+ void *);
+extern splay_tree splay_tree_new_typed_alloc (splay_tree_compare_fn,
+ splay_tree_delete_key_fn,
+ splay_tree_delete_value_fn,
+ splay_tree_allocate_fn,
+ splay_tree_allocate_fn,
+ splay_tree_deallocate_fn,
+ void *);
+extern void splay_tree_delete (splay_tree);
+extern splay_tree_node splay_tree_insert (splay_tree,
+ splay_tree_key,
+ splay_tree_value);
+extern void splay_tree_remove (splay_tree, splay_tree_key);
+extern splay_tree_node splay_tree_lookup (splay_tree, splay_tree_key);
+extern splay_tree_node splay_tree_predecessor (splay_tree, splay_tree_key);
+extern splay_tree_node splay_tree_successor (splay_tree, splay_tree_key);
+extern splay_tree_node splay_tree_max (splay_tree);
+extern splay_tree_node splay_tree_min (splay_tree);
+extern int splay_tree_foreach (splay_tree, splay_tree_foreach_fn, void*);
+extern int splay_tree_compare_ints (splay_tree_key, splay_tree_key);
+extern int splay_tree_compare_pointers (splay_tree_key, splay_tree_key);
+extern int splay_tree_compare_strings (splay_tree_key, splay_tree_key);
+extern void splay_tree_delete_pointers (splay_tree_value);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _SPLAY_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sreal.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sreal.h
new file mode 100644
index 0000000..8700807
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sreal.h
@@ -0,0 +1,285 @@
+/* Definitions for simple data type for real numbers.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SREAL_H
+#define GCC_SREAL_H
+
+#define SREAL_PART_BITS 31
+
+#define UINT64_BITS 64
+
+#define SREAL_MIN_SIG ((int64_t) 1 << (SREAL_PART_BITS - 2))
+#define SREAL_MAX_SIG (((int64_t) 1 << (SREAL_PART_BITS - 1)) - 1)
+#define SREAL_MAX_EXP (INT_MAX / 4)
+
+#define SREAL_BITS SREAL_PART_BITS
+
+#define SREAL_SIGN(v) (v < 0 ? -1: 1)
+#define SREAL_ABS(v) (v < 0 ? -v: v)
+
+struct output_block;
+class lto_input_block;
+
+/* Structure for holding a simple real number. */
+class sreal
+{
+public:
+ /* Construct an uninitialized sreal. */
+ sreal () : m_sig (-1), m_exp (-1) {}
+
+ /* Construct a sreal. */
+ sreal (int64_t sig, int exp = 0)
+ {
+ normalize (sig, exp);
+ }
+
+ void dump (FILE *) const;
+ int64_t to_int () const;
+ double to_double () const;
+ void stream_out (struct output_block *);
+ static sreal stream_in (class lto_input_block *);
+ sreal operator+ (const sreal &other) const;
+ sreal operator- (const sreal &other) const;
+ sreal operator* (const sreal &other) const;
+ sreal operator/ (const sreal &other) const;
+
+ bool operator< (const sreal &other) const
+ {
+ if (m_exp == other.m_exp)
+ return m_sig < other.m_sig;
+ else
+ {
+ bool negative = m_sig < 0;
+ bool other_negative = other.m_sig < 0;
+
+ if (negative != other_negative)
+ return negative > other_negative;
+
+ bool r = m_exp < other.m_exp;
+ return negative ? !r : r;
+ }
+ }
+
+ bool operator== (const sreal &other) const
+ {
+ return m_exp == other.m_exp && m_sig == other.m_sig;
+ }
+
+ sreal operator- () const
+ {
+ sreal tmp = *this;
+ tmp.m_sig *= -1;
+
+ return tmp;
+ }
+
+ sreal shift (int s) const
+ {
+ /* Zero needs no shifting. */
+ if (!m_sig)
+ return *this;
+ gcc_checking_assert (s <= SREAL_MAX_EXP);
+ gcc_checking_assert (s >= -SREAL_MAX_EXP);
+
+ /* Overflows/drop to 0 could be handled gracefully, but hopefully we do not
+ need to do so. */
+ gcc_checking_assert (m_exp + s <= SREAL_MAX_EXP);
+ gcc_checking_assert (m_exp + s >= -SREAL_MAX_EXP);
+
+ sreal tmp = *this;
+ tmp.m_exp += s;
+
+ return tmp;
+ }
+
+ /* Global minimum sreal can hold. */
+ inline static sreal min ()
+ {
+ sreal min;
+ /* This never needs normalization. */
+ min.m_sig = -SREAL_MAX_SIG;
+ min.m_exp = SREAL_MAX_EXP;
+ return min;
+ }
+
+ /* Global minimum sreal can hold. */
+ inline static sreal max ()
+ {
+ sreal max;
+ /* This never needs normalization. */
+ max.m_sig = SREAL_MAX_SIG;
+ max.m_exp = SREAL_MAX_EXP;
+ return max;
+ }
+
+private:
+ inline void normalize (int64_t new_sig, signed int new_exp);
+ inline void normalize_up (int64_t new_sig, signed int new_exp);
+ inline void normalize_down (int64_t new_sig, signed int new_exp);
+ void shift_right (int amount);
+ static sreal signedless_plus (const sreal &a, const sreal &b, bool negative);
+ static sreal signedless_minus (const sreal &a, const sreal &b, bool negative);
+
+ int32_t m_sig; /* Significant. */
+ signed int m_exp; /* Exponent. */
+};
+
+extern void debug (const sreal &ref);
+extern void debug (const sreal *ptr);
+
+inline sreal &operator+= (sreal &a, const sreal &b)
+{
+ return a = a + b;
+}
+
+inline sreal &operator-= (sreal &a, const sreal &b)
+{
+ return a = a - b;
+}
+
+inline sreal &operator/= (sreal &a, const sreal &b)
+{
+ return a = a / b;
+}
+
+inline sreal &operator*= (sreal &a, const sreal &b)
+{
+ return a = a * b;
+}
+
+inline bool operator!= (const sreal &a, const sreal &b)
+{
+ return !(a == b);
+}
+
+inline bool operator> (const sreal &a, const sreal &b)
+{
+ return !(a == b || a < b);
+}
+
+inline bool operator<= (const sreal &a, const sreal &b)
+{
+ return a < b || a == b;
+}
+
+inline bool operator>= (const sreal &a, const sreal &b)
+{
+ return a == b || a > b;
+}
+
+inline sreal operator<< (const sreal &a, int exp)
+{
+ return a.shift (exp);
+}
+
+inline sreal operator>> (const sreal &a, int exp)
+{
+ return a.shift (-exp);
+}
+
+/* Make significant to be >= SREAL_MIN_SIG.
+
+ Make this separate method so inliner can handle hot path better. */
+
+inline void
+sreal::normalize_up (int64_t new_sig, signed int new_exp)
+{
+ unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
+ int shift = SREAL_PART_BITS - 2 - floor_log2 (sig);
+
+ gcc_checking_assert (shift > 0);
+ sig <<= shift;
+ new_exp -= shift;
+ gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
+
+ /* Check underflow. */
+ if (new_exp < -SREAL_MAX_EXP)
+ {
+ new_exp = -SREAL_MAX_EXP;
+ sig = 0;
+ }
+ m_exp = new_exp;
+ if (SREAL_SIGN (new_sig) == -1)
+ m_sig = -sig;
+ else
+ m_sig = sig;
+}
+
+/* Make significant to be <= SREAL_MAX_SIG.
+
+ Make this separate method so inliner can handle hot path better. */
+
+inline void
+sreal::normalize_down (int64_t new_sig, signed int new_exp)
+{
+ int last_bit;
+ unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
+ int shift = floor_log2 (sig) - SREAL_PART_BITS + 2;
+
+ gcc_checking_assert (shift > 0);
+ last_bit = (sig >> (shift-1)) & 1;
+ sig >>= shift;
+ new_exp += shift;
+ gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
+
+ /* Round the number. */
+ sig += last_bit;
+ if (sig > SREAL_MAX_SIG)
+ {
+ sig >>= 1;
+ new_exp++;
+ }
+
+ /* Check overflow. */
+ if (new_exp > SREAL_MAX_EXP)
+ {
+ new_exp = SREAL_MAX_EXP;
+ sig = SREAL_MAX_SIG;
+ }
+ m_exp = new_exp;
+ if (SREAL_SIGN (new_sig) == -1)
+ m_sig = -sig;
+ else
+ m_sig = sig;
+}
+
+/* Normalize *this; the hot path. */
+
+inline void
+sreal::normalize (int64_t new_sig, signed int new_exp)
+{
+ unsigned HOST_WIDE_INT sig = absu_hwi (new_sig);
+
+ if (sig == 0)
+ {
+ m_sig = 0;
+ m_exp = -SREAL_MAX_EXP;
+ }
+ else if (sig > SREAL_MAX_SIG)
+ normalize_down (new_sig, new_exp);
+ else if (sig < SREAL_MIN_SIG)
+ normalize_up (new_sig, new_exp);
+ else
+ {
+ m_sig = new_sig;
+ m_exp = new_exp;
+ }
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa-iterators.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa-iterators.h
new file mode 100644
index 0000000..3aeb89b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa-iterators.h
@@ -0,0 +1,1013 @@
+/* Header file for SSA iterators.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SSA_ITERATORS_H
+#define GCC_SSA_ITERATORS_H
+
+/* Immediate use lists are used to directly access all uses for an SSA
+ name and get pointers to the statement for each use.
+
+ The structure ssa_use_operand_t consists of PREV and NEXT pointers
+ to maintain the list. A USE pointer, which points to address where
+ the use is located and a LOC pointer which can point to the
+ statement where the use is located, or, in the case of the root
+ node, it points to the SSA name itself.
+
+ The list is anchored by an occurrence of ssa_operand_d *in* the
+ ssa_name node itself (named 'imm_uses'). This node is uniquely
+ identified by having a NULL USE pointer. and the LOC pointer
+ pointing back to the ssa_name node itself. This node forms the
+ base for a circular list, and initially this is the only node in
+ the list.
+
+ Fast iteration allows each use to be examined, but does not allow
+ any modifications to the uses or stmts.
+
+ Normal iteration allows insertion, deletion, and modification. the
+ iterator manages this by inserting a marker node into the list
+ immediately before the node currently being examined in the list.
+ this marker node is uniquely identified by having null stmt *and* a
+ null use pointer.
+
+ When iterating to the next use, the iteration routines check to see
+ if the node after the marker has changed. if it has, then the node
+ following the marker is now the next one to be visited. if not, the
+ marker node is moved past that node in the list (visualize it as
+ bumping the marker node through the list). this continues until
+ the marker node is moved to the original anchor position. the
+ marker node is then removed from the list.
+
+ If iteration is halted early, the marker node must be removed from
+ the list before continuing. */
+struct imm_use_iterator
+{
+ /* This is the current use the iterator is processing. */
+ ssa_use_operand_t *imm_use;
+ /* This marks the last use in the list (use node from SSA_NAME) */
+ ssa_use_operand_t *end_p;
+ /* This node is inserted and used to mark the end of the uses for a stmt. */
+ ssa_use_operand_t iter_node;
+ /* This is the next ssa_name to visit. IMM_USE may get removed before
+ the next one is traversed to, so it must be cached early. */
+ ssa_use_operand_t *next_imm_name;
+};
+
+
+/* Use this iterator when simply looking at stmts. Adding, deleting or
+ modifying stmts will cause this iterator to malfunction. */
+
+#define FOR_EACH_IMM_USE_FAST(DEST, ITER, SSAVAR) \
+ for ((DEST) = first_readonly_imm_use (&(ITER), (SSAVAR)); \
+ !end_readonly_imm_use_p (&(ITER)); \
+ (void) ((DEST) = next_readonly_imm_use (&(ITER))))
+
+/* Forward declare for use in the class below. */
+inline void end_imm_use_stmt_traverse (imm_use_iterator *);
+
+/* arrange to automatically call, upon descruction, end_imm_use_stmt_traverse
+ with a given pointer to imm_use_iterator. */
+struct auto_end_imm_use_stmt_traverse
+{
+ imm_use_iterator *imm;
+ auto_end_imm_use_stmt_traverse (imm_use_iterator *imm)
+ : imm (imm) {}
+ ~auto_end_imm_use_stmt_traverse ()
+ { end_imm_use_stmt_traverse (imm); }
+};
+
+/* Use this iterator to visit each stmt which has a use of SSAVAR. The
+ destructor of the auto_end_imm_use_stmt_traverse object deals with removing
+ ITER from SSAVAR's IMM_USE list even when leaving the scope early. */
+
+#define FOR_EACH_IMM_USE_STMT(STMT, ITER, SSAVAR) \
+ for (struct auto_end_imm_use_stmt_traverse \
+ auto_end_imm_use_stmt_traverse \
+ ((((STMT) = first_imm_use_stmt (&(ITER), (SSAVAR))), \
+ &(ITER))); \
+ !end_imm_use_stmt_p (&(ITER)); \
+ (void) ((STMT) = next_imm_use_stmt (&(ITER))))
+
+/* Use this iterator in combination with FOR_EACH_IMM_USE_STMT to
+ get access to each occurrence of ssavar on the stmt returned by
+ that iterator.. for instance:
+
+ FOR_EACH_IMM_USE_STMT (stmt, iter, ssavar)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ {
+ SET_USE (use_p, blah);
+ }
+ update_stmt (stmt);
+ } */
+
+#define FOR_EACH_IMM_USE_ON_STMT(DEST, ITER) \
+ for ((DEST) = first_imm_use_on_stmt (&(ITER)); \
+ !end_imm_use_on_stmt_p (&(ITER)); \
+ (void) ((DEST) = next_imm_use_on_stmt (&(ITER))))
+
+
+
+extern bool single_imm_use_1 (const ssa_use_operand_t *head,
+ use_operand_p *use_p, gimple **stmt);
+
+
+enum ssa_op_iter_type {
+ ssa_op_iter_none = 0,
+ ssa_op_iter_tree,
+ ssa_op_iter_use,
+ ssa_op_iter_def
+};
+
+/* This structure is used in the operand iterator loops. It contains the
+ items required to determine which operand is retrieved next. During
+ optimization, this structure is scalarized, and any unused fields are
+ optimized away, resulting in little overhead. */
+
+struct ssa_op_iter
+{
+ enum ssa_op_iter_type iter_type;
+ bool done;
+ int flags;
+ unsigned i;
+ unsigned numops;
+ use_optype_p uses;
+ gimple *stmt;
+};
+
+/* NOTE: Keep these in sync with doc/tree-ssa.texi. */
+/* These flags are used to determine which operands are returned during
+ execution of the loop. */
+#define SSA_OP_USE 0x01 /* Real USE operands. */
+#define SSA_OP_DEF 0x02 /* Real DEF operands. */
+#define SSA_OP_VUSE 0x04 /* VUSE operands. */
+#define SSA_OP_VDEF 0x08 /* VDEF operands. */
+/* These are commonly grouped operand flags. */
+#define SSA_OP_VIRTUAL_USES (SSA_OP_VUSE)
+#define SSA_OP_VIRTUAL_DEFS (SSA_OP_VDEF)
+#define SSA_OP_ALL_VIRTUALS (SSA_OP_VIRTUAL_USES | SSA_OP_VIRTUAL_DEFS)
+#define SSA_OP_ALL_USES (SSA_OP_VIRTUAL_USES | SSA_OP_USE)
+#define SSA_OP_ALL_DEFS (SSA_OP_VIRTUAL_DEFS | SSA_OP_DEF)
+#define SSA_OP_ALL_OPERANDS (SSA_OP_ALL_USES | SSA_OP_ALL_DEFS)
+
+/* This macro executes a loop over the operands of STMT specified in FLAG,
+ returning each operand as a 'tree' in the variable TREEVAR. ITER is an
+ ssa_op_iter structure used to control the loop. */
+#define FOR_EACH_SSA_TREE_OPERAND(TREEVAR, STMT, ITER, FLAGS) \
+ for (TREEVAR = op_iter_init_tree (&(ITER), STMT, FLAGS); \
+ !op_iter_done (&(ITER)); \
+ (void) (TREEVAR = op_iter_next_tree (&(ITER))))
+
+/* This macro executes a loop over the operands of STMT specified in FLAG,
+ returning each operand as a 'use_operand_p' in the variable USEVAR.
+ ITER is an ssa_op_iter structure used to control the loop. */
+#define FOR_EACH_SSA_USE_OPERAND(USEVAR, STMT, ITER, FLAGS) \
+ for (USEVAR = op_iter_init_use (&(ITER), STMT, FLAGS); \
+ !op_iter_done (&(ITER)); \
+ USEVAR = op_iter_next_use (&(ITER)))
+
+/* This macro executes a loop over the operands of STMT specified in FLAG,
+ returning each operand as a 'def_operand_p' in the variable DEFVAR.
+ ITER is an ssa_op_iter structure used to control the loop. */
+#define FOR_EACH_SSA_DEF_OPERAND(DEFVAR, STMT, ITER, FLAGS) \
+ for (DEFVAR = op_iter_init_def (&(ITER), STMT, FLAGS); \
+ !op_iter_done (&(ITER)); \
+ DEFVAR = op_iter_next_def (&(ITER)))
+
+/* This macro will execute a loop over all the arguments of a PHI which
+ match FLAGS. A use_operand_p is always returned via USEVAR. FLAGS
+ can be either SSA_OP_USE or SSA_OP_VIRTUAL_USES or SSA_OP_ALL_USES. */
+#define FOR_EACH_PHI_ARG(USEVAR, STMT, ITER, FLAGS) \
+ for ((USEVAR) = op_iter_init_phiuse (&(ITER), STMT, FLAGS); \
+ !op_iter_done (&(ITER)); \
+ (USEVAR) = op_iter_next_use (&(ITER)))
+
+
+/* This macro will execute a loop over a stmt, regardless of whether it is
+ a real stmt or a PHI node, looking at the USE nodes matching FLAGS. */
+#define FOR_EACH_PHI_OR_STMT_USE(USEVAR, STMT, ITER, FLAGS) \
+ for ((USEVAR) = (gimple_code (STMT) == GIMPLE_PHI \
+ ? op_iter_init_phiuse (&(ITER), \
+ as_a <gphi *> (STMT), \
+ FLAGS) \
+ : op_iter_init_use (&(ITER), STMT, FLAGS)); \
+ !op_iter_done (&(ITER)); \
+ (USEVAR) = op_iter_next_use (&(ITER)))
+
+/* This macro will execute a loop over a stmt, regardless of whether it is
+ a real stmt or a PHI node, looking at the DEF nodes matching FLAGS. */
+#define FOR_EACH_PHI_OR_STMT_DEF(DEFVAR, STMT, ITER, FLAGS) \
+ for ((DEFVAR) = (gimple_code (STMT) == GIMPLE_PHI \
+ ? op_iter_init_phidef (&(ITER), \
+ as_a <gphi *> (STMT), \
+ FLAGS) \
+ : op_iter_init_def (&(ITER), STMT, FLAGS)); \
+ !op_iter_done (&(ITER)); \
+ (DEFVAR) = op_iter_next_def (&(ITER)))
+
+/* This macro returns an operand in STMT as a tree if it is the ONLY
+ operand matching FLAGS. If there are 0 or more than 1 operand matching
+ FLAGS, then NULL_TREE is returned. */
+#define SINGLE_SSA_TREE_OPERAND(STMT, FLAGS) \
+ single_ssa_tree_operand (STMT, FLAGS)
+
+/* This macro returns an operand in STMT as a use_operand_p if it is the ONLY
+ operand matching FLAGS. If there are 0 or more than 1 operand matching
+ FLAGS, then NULL_USE_OPERAND_P is returned. */
+#define SINGLE_SSA_USE_OPERAND(STMT, FLAGS) \
+ single_ssa_use_operand (STMT, FLAGS)
+
+/* This macro returns an operand in STMT as a def_operand_p if it is the ONLY
+ operand matching FLAGS. If there are 0 or more than 1 operand matching
+ FLAGS, then NULL_DEF_OPERAND_P is returned. */
+#define SINGLE_SSA_DEF_OPERAND(STMT, FLAGS) \
+ single_ssa_def_operand (STMT, FLAGS)
+
+/* This macro returns TRUE if there are no operands matching FLAGS in STMT. */
+#define ZERO_SSA_OPERANDS(STMT, FLAGS) zero_ssa_operands (STMT, FLAGS)
+
+/* This macro counts the number of operands in STMT matching FLAGS. */
+#define NUM_SSA_OPERANDS(STMT, FLAGS) num_ssa_operands (STMT, FLAGS)
+
+
+/* Delink an immediate_uses node from its chain. */
+inline void
+delink_imm_use (ssa_use_operand_t *linknode)
+{
+ /* Return if this node is not in a list. */
+ if (linknode->prev == NULL)
+ return;
+
+ linknode->prev->next = linknode->next;
+ linknode->next->prev = linknode->prev;
+ linknode->prev = NULL;
+ linknode->next = NULL;
+}
+
+/* Link ssa_imm_use node LINKNODE into the chain for LIST. */
+inline void
+link_imm_use_to_list (ssa_use_operand_t *linknode, ssa_use_operand_t *list)
+{
+ /* Link the new node at the head of the list. If we are in the process of
+ traversing the list, we won't visit any new nodes added to it. */
+ linknode->prev = list;
+ linknode->next = list->next;
+ list->next->prev = linknode;
+ list->next = linknode;
+}
+
+/* Link ssa_imm_use node LINKNODE into the chain for DEF. */
+inline void
+link_imm_use (ssa_use_operand_t *linknode, tree def)
+{
+ ssa_use_operand_t *root;
+
+ if (!def || TREE_CODE (def) != SSA_NAME)
+ linknode->prev = NULL;
+ else
+ {
+ root = &(SSA_NAME_IMM_USE_NODE (def));
+ if (linknode->use)
+ gcc_checking_assert (*(linknode->use) == def);
+ link_imm_use_to_list (linknode, root);
+ }
+}
+
+/* Set the value of a use pointed to by USE to VAL. */
+inline void
+set_ssa_use_from_ptr (use_operand_p use, tree val)
+{
+ delink_imm_use (use);
+ *(use->use) = val;
+ link_imm_use (use, val);
+}
+
+/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occurring
+ in STMT. */
+inline void
+link_imm_use_stmt (ssa_use_operand_t *linknode, tree def, gimple *stmt)
+{
+ if (stmt)
+ link_imm_use (linknode, def);
+ else
+ link_imm_use (linknode, NULL);
+ linknode->loc.stmt = stmt;
+}
+
+/* Relink a new node in place of an old node in the list. */
+inline void
+relink_imm_use (ssa_use_operand_t *node, ssa_use_operand_t *old)
+{
+ /* The node one had better be in the same list. */
+ gcc_checking_assert (*(old->use) == *(node->use));
+ node->prev = old->prev;
+ node->next = old->next;
+ if (old->prev)
+ {
+ old->prev->next = node;
+ old->next->prev = node;
+ /* Remove the old node from the list. */
+ old->prev = NULL;
+ }
+}
+
+/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occurring
+ in STMT. */
+inline void
+relink_imm_use_stmt (ssa_use_operand_t *linknode, ssa_use_operand_t *old,
+ gimple *stmt)
+{
+ if (stmt)
+ relink_imm_use (linknode, old);
+ else
+ link_imm_use (linknode, NULL);
+ linknode->loc.stmt = stmt;
+}
+
+
+/* Return true is IMM has reached the end of the immediate use list. */
+inline bool
+end_readonly_imm_use_p (const imm_use_iterator *imm)
+{
+ return (imm->imm_use == imm->end_p);
+}
+
+/* Initialize iterator IMM to process the list for VAR. */
+inline use_operand_p
+first_readonly_imm_use (imm_use_iterator *imm, tree var)
+{
+ imm->end_p = &(SSA_NAME_IMM_USE_NODE (var));
+ imm->imm_use = imm->end_p->next;
+ imm->iter_node.next = imm->imm_use->next;
+ if (end_readonly_imm_use_p (imm))
+ return NULL_USE_OPERAND_P;
+ return imm->imm_use;
+}
+
+/* Bump IMM to the next use in the list. */
+inline use_operand_p
+next_readonly_imm_use (imm_use_iterator *imm)
+{
+ use_operand_p old = imm->imm_use;
+
+ /* If this assertion fails, it indicates the 'next' pointer has changed
+ since the last bump. This indicates that the list is being modified
+ via stmt changes, or SET_USE, or somesuch thing, and you need to be
+ using the SAFE version of the iterator. */
+ if (flag_checking)
+ {
+ gcc_assert (imm->iter_node.next == old->next);
+ imm->iter_node.next = old->next->next;
+ }
+
+ imm->imm_use = old->next;
+ if (end_readonly_imm_use_p (imm))
+ return NULL_USE_OPERAND_P;
+ return imm->imm_use;
+}
+
+
+/* Return true if VAR has no nondebug uses. */
+inline bool
+has_zero_uses (const_tree var)
+{
+ const ssa_use_operand_t *const head = &(SSA_NAME_IMM_USE_NODE (var));
+ const ssa_use_operand_t *ptr;
+
+ for (ptr = head->next; ptr != head; ptr = ptr->next)
+ if (USE_STMT (ptr) && !is_gimple_debug (USE_STMT (ptr)))
+ return false;
+
+ return true;
+}
+
+/* Return true if VAR has a single nondebug use. */
+inline bool
+has_single_use (const_tree var)
+{
+ const ssa_use_operand_t *const head = &(SSA_NAME_IMM_USE_NODE (var));
+ const ssa_use_operand_t *ptr;
+ bool single = false;
+
+ for (ptr = head->next; ptr != head; ptr = ptr->next)
+ if (USE_STMT(ptr) && !is_gimple_debug (USE_STMT (ptr)))
+ {
+ if (single)
+ return false;
+ else
+ single = true;
+ }
+
+ return single;
+}
+
+/* If VAR has only a single immediate nondebug use, return true, and
+ set USE_P and STMT to the use pointer and stmt of occurrence. */
+inline bool
+single_imm_use (const_tree var, use_operand_p *use_p, gimple **stmt)
+{
+ const ssa_use_operand_t *const ptr = &(SSA_NAME_IMM_USE_NODE (var));
+
+ /* If there aren't any uses whatsoever, we're done. */
+ if (ptr == ptr->next)
+ {
+ return_false:
+ *use_p = NULL_USE_OPERAND_P;
+ *stmt = NULL;
+ return false;
+ }
+
+ /* If there's a single use, check that it's not a debug stmt. */
+ if (ptr == ptr->next->next)
+ {
+ if (USE_STMT (ptr->next) && !is_gimple_debug (USE_STMT (ptr->next)))
+ {
+ *use_p = ptr->next;
+ *stmt = ptr->next->loc.stmt;
+ return true;
+ }
+ else
+ goto return_false;
+ }
+
+ return single_imm_use_1 (ptr, use_p, stmt);
+}
+
+/* Return the number of nondebug immediate uses of VAR. */
+inline unsigned int
+num_imm_uses (const_tree var)
+{
+ const ssa_use_operand_t *const start = &(SSA_NAME_IMM_USE_NODE (var));
+ const ssa_use_operand_t *ptr;
+ unsigned int num = 0;
+
+ if (!MAY_HAVE_DEBUG_BIND_STMTS)
+ {
+ for (ptr = start->next; ptr != start; ptr = ptr->next)
+ if (USE_STMT (ptr))
+ num++;
+ }
+ else
+ for (ptr = start->next; ptr != start; ptr = ptr->next)
+ if (USE_STMT (ptr) && !is_gimple_debug (USE_STMT (ptr)))
+ num++;
+
+ return num;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* The following set of routines are used to iterator over various type of
+ SSA operands. */
+
+/* Return true if PTR is finished iterating. */
+inline bool
+op_iter_done (const ssa_op_iter *ptr)
+{
+ return ptr->done;
+}
+
+/* Get the next iterator use value for PTR. */
+inline use_operand_p
+op_iter_next_use (ssa_op_iter *ptr)
+{
+ use_operand_p use_p;
+ gcc_checking_assert (ptr->iter_type == ssa_op_iter_use);
+ if (ptr->uses)
+ {
+ use_p = USE_OP_PTR (ptr->uses);
+ ptr->uses = ptr->uses->next;
+ return use_p;
+ }
+ if (ptr->i < ptr->numops)
+ {
+ return PHI_ARG_DEF_PTR (ptr->stmt, (ptr->i)++);
+ }
+ ptr->done = true;
+ return NULL_USE_OPERAND_P;
+}
+
+/* Get the next iterator def value for PTR. */
+inline def_operand_p
+op_iter_next_def (ssa_op_iter *ptr)
+{
+ gcc_checking_assert (ptr->iter_type == ssa_op_iter_def);
+ if (ptr->flags & SSA_OP_VDEF)
+ {
+ tree *p;
+ ptr->flags &= ~SSA_OP_VDEF;
+ p = gimple_vdef_ptr (ptr->stmt);
+ if (p && *p)
+ return p;
+ }
+ if (ptr->flags & SSA_OP_DEF)
+ {
+ while (ptr->i < ptr->numops)
+ {
+ tree *val = gimple_op_ptr (ptr->stmt, ptr->i);
+ ptr->i++;
+ if (*val)
+ {
+ if (TREE_CODE (*val) == TREE_LIST)
+ val = &TREE_VALUE (*val);
+ if (TREE_CODE (*val) == SSA_NAME
+ || is_gimple_reg (*val))
+ return val;
+ }
+ }
+ ptr->flags &= ~SSA_OP_DEF;
+ }
+
+ ptr->done = true;
+ return NULL_DEF_OPERAND_P;
+}
+
+/* Get the next iterator tree value for PTR. */
+inline tree
+op_iter_next_tree (ssa_op_iter *ptr)
+{
+ tree val;
+ gcc_checking_assert (ptr->iter_type == ssa_op_iter_tree);
+ if (ptr->uses)
+ {
+ val = USE_OP (ptr->uses);
+ ptr->uses = ptr->uses->next;
+ return val;
+ }
+ if (ptr->flags & SSA_OP_VDEF)
+ {
+ ptr->flags &= ~SSA_OP_VDEF;
+ if ((val = gimple_vdef (ptr->stmt)))
+ return val;
+ }
+ if (ptr->flags & SSA_OP_DEF)
+ {
+ while (ptr->i < ptr->numops)
+ {
+ val = gimple_op (ptr->stmt, ptr->i);
+ ptr->i++;
+ if (val)
+ {
+ if (TREE_CODE (val) == TREE_LIST)
+ val = TREE_VALUE (val);
+ if (TREE_CODE (val) == SSA_NAME
+ || is_gimple_reg (val))
+ return val;
+ }
+ }
+ ptr->flags &= ~SSA_OP_DEF;
+ }
+
+ ptr->done = true;
+ return NULL_TREE;
+}
+
+
+/* This functions clears the iterator PTR, and marks it done. This is normally
+ used to prevent warnings in the compile about might be uninitialized
+ components. */
+
+inline void
+clear_and_done_ssa_iter (ssa_op_iter *ptr)
+{
+ ptr->i = 0;
+ ptr->numops = 0;
+ ptr->uses = NULL;
+ ptr->iter_type = ssa_op_iter_none;
+ ptr->stmt = NULL;
+ ptr->done = true;
+ ptr->flags = 0;
+}
+
+/* Initialize the iterator PTR to the virtual defs in STMT. */
+inline void
+op_iter_init (ssa_op_iter *ptr, gimple *stmt, int flags)
+{
+ /* PHI nodes require a different iterator initialization path. We
+ do not support iterating over virtual defs or uses without
+ iterating over defs or uses at the same time. */
+ gcc_checking_assert (gimple_code (stmt) != GIMPLE_PHI
+ && (!(flags & SSA_OP_VDEF) || (flags & SSA_OP_DEF))
+ && (!(flags & SSA_OP_VUSE) || (flags & SSA_OP_USE)));
+ ptr->numops = 0;
+ if (flags & (SSA_OP_DEF | SSA_OP_VDEF))
+ {
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
+ case GIMPLE_CALL:
+ ptr->numops = 1;
+ break;
+ case GIMPLE_ASM:
+ ptr->numops = gimple_asm_noutputs (as_a <gasm *> (stmt));
+ break;
+ case GIMPLE_TRANSACTION:
+ ptr->numops = 0;
+ flags &= ~SSA_OP_DEF;
+ break;
+ default:
+ ptr->numops = 0;
+ flags &= ~(SSA_OP_DEF | SSA_OP_VDEF);
+ break;
+ }
+ }
+ ptr->uses = (flags & (SSA_OP_USE|SSA_OP_VUSE)) ? gimple_use_ops (stmt) : NULL;
+ if (!(flags & SSA_OP_VUSE)
+ && ptr->uses
+ && gimple_vuse (stmt) != NULL_TREE)
+ ptr->uses = ptr->uses->next;
+ ptr->done = false;
+ ptr->i = 0;
+
+ ptr->stmt = stmt;
+ ptr->flags = flags;
+}
+
+/* Initialize iterator PTR to the use operands in STMT based on FLAGS. Return
+ the first use. */
+inline use_operand_p
+op_iter_init_use (ssa_op_iter *ptr, gimple *stmt, int flags)
+{
+ gcc_checking_assert ((flags & SSA_OP_ALL_DEFS) == 0
+ && (flags & SSA_OP_USE));
+ op_iter_init (ptr, stmt, flags);
+ ptr->iter_type = ssa_op_iter_use;
+ return op_iter_next_use (ptr);
+}
+
+/* Initialize iterator PTR to the def operands in STMT based on FLAGS. Return
+ the first def. */
+inline def_operand_p
+op_iter_init_def (ssa_op_iter *ptr, gimple *stmt, int flags)
+{
+ gcc_checking_assert ((flags & SSA_OP_ALL_USES) == 0
+ && (flags & SSA_OP_DEF));
+ op_iter_init (ptr, stmt, flags);
+ ptr->iter_type = ssa_op_iter_def;
+ return op_iter_next_def (ptr);
+}
+
+/* Initialize iterator PTR to the operands in STMT based on FLAGS. Return
+ the first operand as a tree. */
+inline tree
+op_iter_init_tree (ssa_op_iter *ptr, gimple *stmt, int flags)
+{
+ op_iter_init (ptr, stmt, flags);
+ ptr->iter_type = ssa_op_iter_tree;
+ return op_iter_next_tree (ptr);
+}
+
+
+/* If there is a single operand in STMT matching FLAGS, return it. Otherwise
+ return NULL. */
+inline tree
+single_ssa_tree_operand (gimple *stmt, int flags)
+{
+ tree var;
+ ssa_op_iter iter;
+
+ var = op_iter_init_tree (&iter, stmt, flags);
+ if (op_iter_done (&iter))
+ return NULL_TREE;
+ op_iter_next_tree (&iter);
+ if (op_iter_done (&iter))
+ return var;
+ return NULL_TREE;
+}
+
+
+/* If there is a single operand in STMT matching FLAGS, return it. Otherwise
+ return NULL. */
+inline use_operand_p
+single_ssa_use_operand (gimple *stmt, int flags)
+{
+ use_operand_p var;
+ ssa_op_iter iter;
+
+ var = op_iter_init_use (&iter, stmt, flags);
+ if (op_iter_done (&iter))
+ return NULL_USE_OPERAND_P;
+ op_iter_next_use (&iter);
+ if (op_iter_done (&iter))
+ return var;
+ return NULL_USE_OPERAND_P;
+}
+
+/* Return the single virtual use operand in STMT if present. Otherwise
+ return NULL. */
+inline use_operand_p
+ssa_vuse_operand (gimple *stmt)
+{
+ if (! gimple_vuse (stmt))
+ return NULL_USE_OPERAND_P;
+ return USE_OP_PTR (gimple_use_ops (stmt));
+}
+
+
+/* If there is a single operand in STMT matching FLAGS, return it. Otherwise
+ return NULL. */
+inline def_operand_p
+single_ssa_def_operand (gimple *stmt, int flags)
+{
+ def_operand_p var;
+ ssa_op_iter iter;
+
+ var = op_iter_init_def (&iter, stmt, flags);
+ if (op_iter_done (&iter))
+ return NULL_DEF_OPERAND_P;
+ op_iter_next_def (&iter);
+ if (op_iter_done (&iter))
+ return var;
+ return NULL_DEF_OPERAND_P;
+}
+
+
+/* Return true if there are zero operands in STMT matching the type
+ given in FLAGS. */
+inline bool
+zero_ssa_operands (gimple *stmt, int flags)
+{
+ ssa_op_iter iter;
+
+ op_iter_init_tree (&iter, stmt, flags);
+ return op_iter_done (&iter);
+}
+
+
+/* Return the number of operands matching FLAGS in STMT. */
+inline int
+num_ssa_operands (gimple *stmt, int flags)
+{
+ ssa_op_iter iter;
+ tree t;
+ int num = 0;
+
+ gcc_checking_assert (gimple_code (stmt) != GIMPLE_PHI);
+ FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, flags)
+ num++;
+ return num;
+}
+
+/* If there is a single DEF in the PHI node which matches FLAG, return it.
+ Otherwise return NULL_DEF_OPERAND_P. */
+inline tree
+single_phi_def (gphi *stmt, int flags)
+{
+ tree def = PHI_RESULT (stmt);
+ if ((flags & SSA_OP_DEF) && is_gimple_reg (def))
+ return def;
+ if ((flags & SSA_OP_VIRTUAL_DEFS) && !is_gimple_reg (def))
+ return def;
+ return NULL_TREE;
+}
+
+/* Initialize the iterator PTR for uses matching FLAGS in PHI. FLAGS should
+ be either SSA_OP_USES or SSA_OP_VIRTUAL_USES. */
+inline use_operand_p
+op_iter_init_phiuse (ssa_op_iter *ptr, gphi *phi, int flags)
+{
+ tree phi_def = gimple_phi_result (phi);
+ int comp;
+
+ clear_and_done_ssa_iter (ptr);
+ ptr->done = false;
+
+ gcc_checking_assert ((flags & (SSA_OP_USE | SSA_OP_VIRTUAL_USES)) != 0);
+
+ comp = (is_gimple_reg (phi_def) ? SSA_OP_USE : SSA_OP_VIRTUAL_USES);
+
+ /* If the PHI node doesn't the operand type we care about, we're done. */
+ if ((flags & comp) == 0)
+ {
+ ptr->done = true;
+ return NULL_USE_OPERAND_P;
+ }
+
+ ptr->stmt = phi;
+ ptr->numops = gimple_phi_num_args (phi);
+ ptr->iter_type = ssa_op_iter_use;
+ ptr->flags = flags;
+ return op_iter_next_use (ptr);
+}
+
+
+/* Start an iterator for a PHI definition. */
+
+inline def_operand_p
+op_iter_init_phidef (ssa_op_iter *ptr, gphi *phi, int flags)
+{
+ tree phi_def = PHI_RESULT (phi);
+ int comp;
+
+ clear_and_done_ssa_iter (ptr);
+ ptr->done = false;
+
+ gcc_checking_assert ((flags & (SSA_OP_DEF | SSA_OP_VIRTUAL_DEFS)) != 0);
+
+ comp = (is_gimple_reg (phi_def) ? SSA_OP_DEF : SSA_OP_VIRTUAL_DEFS);
+
+ /* If the PHI node doesn't have the operand type we care about,
+ we're done. */
+ if ((flags & comp) == 0)
+ {
+ ptr->done = true;
+ return NULL_DEF_OPERAND_P;
+ }
+
+ ptr->iter_type = ssa_op_iter_def;
+ /* The first call to op_iter_next_def will terminate the iterator since
+ all the fields are NULL. Simply return the result here as the first and
+ therefore only result. */
+ return PHI_RESULT_PTR (phi);
+}
+
+/* Return true is IMM has reached the end of the immediate use stmt list. */
+
+inline bool
+end_imm_use_stmt_p (const imm_use_iterator *imm)
+{
+ return (imm->imm_use == imm->end_p);
+}
+
+/* Finished the traverse of an immediate use stmt list IMM by removing the
+ placeholder node from the list. */
+
+inline void
+end_imm_use_stmt_traverse (imm_use_iterator *imm)
+{
+ delink_imm_use (&(imm->iter_node));
+}
+
+/* Immediate use traversal of uses within a stmt require that all the
+ uses on a stmt be sequentially listed. This routine is used to build up
+ this sequential list by adding USE_P to the end of the current list
+ currently delimited by HEAD and LAST_P. The new LAST_P value is
+ returned. */
+
+inline use_operand_p
+move_use_after_head (use_operand_p use_p, use_operand_p head,
+ use_operand_p last_p)
+{
+ gcc_checking_assert (USE_FROM_PTR (use_p) == USE_FROM_PTR (head));
+ /* Skip head when we find it. */
+ if (use_p != head)
+ {
+ /* If use_p is already linked in after last_p, continue. */
+ if (last_p->next == use_p)
+ last_p = use_p;
+ else
+ {
+ /* Delink from current location, and link in at last_p. */
+ delink_imm_use (use_p);
+ link_imm_use_to_list (use_p, last_p);
+ last_p = use_p;
+ }
+ }
+ return last_p;
+}
+
+
+/* This routine will relink all uses with the same stmt as HEAD into the list
+ immediately following HEAD for iterator IMM. */
+
+inline void
+link_use_stmts_after (use_operand_p head, imm_use_iterator *imm)
+{
+ use_operand_p use_p;
+ use_operand_p last_p = head;
+ gimple *head_stmt = USE_STMT (head);
+ tree use = USE_FROM_PTR (head);
+ ssa_op_iter op_iter;
+ int flag;
+
+ /* Only look at virtual or real uses, depending on the type of HEAD. */
+ flag = (is_gimple_reg (use) ? SSA_OP_USE : SSA_OP_VIRTUAL_USES);
+
+ if (gphi *phi = dyn_cast <gphi *> (head_stmt))
+ {
+ FOR_EACH_PHI_ARG (use_p, phi, op_iter, flag)
+ if (USE_FROM_PTR (use_p) == use)
+ last_p = move_use_after_head (use_p, head, last_p);
+ }
+ else
+ {
+ if (flag == SSA_OP_USE)
+ {
+ FOR_EACH_SSA_USE_OPERAND (use_p, head_stmt, op_iter, flag)
+ if (USE_FROM_PTR (use_p) == use)
+ last_p = move_use_after_head (use_p, head, last_p);
+ }
+ else if ((use_p = gimple_vuse_op (head_stmt)) != NULL_USE_OPERAND_P)
+ {
+ if (USE_FROM_PTR (use_p) == use)
+ last_p = move_use_after_head (use_p, head, last_p);
+ }
+ }
+ /* Link iter node in after last_p. */
+ if (imm->iter_node.prev != NULL)
+ delink_imm_use (&imm->iter_node);
+ link_imm_use_to_list (&(imm->iter_node), last_p);
+}
+
+/* Initialize IMM to traverse over uses of VAR. Return the first statement. */
+inline gimple *
+first_imm_use_stmt (imm_use_iterator *imm, tree var)
+{
+ imm->end_p = &(SSA_NAME_IMM_USE_NODE (var));
+ imm->imm_use = imm->end_p->next;
+ imm->next_imm_name = NULL_USE_OPERAND_P;
+
+ /* iter_node is used as a marker within the immediate use list to indicate
+ where the end of the current stmt's uses are. Initialize it to NULL
+ stmt and use, which indicates a marker node. */
+ imm->iter_node.prev = NULL_USE_OPERAND_P;
+ imm->iter_node.next = NULL_USE_OPERAND_P;
+ imm->iter_node.loc.stmt = NULL;
+ imm->iter_node.use = NULL;
+
+ if (end_imm_use_stmt_p (imm))
+ return NULL;
+
+ link_use_stmts_after (imm->imm_use, imm);
+
+ return USE_STMT (imm->imm_use);
+}
+
+/* Bump IMM to the next stmt which has a use of var. */
+
+inline gimple *
+next_imm_use_stmt (imm_use_iterator *imm)
+{
+ imm->imm_use = imm->iter_node.next;
+ if (end_imm_use_stmt_p (imm))
+ {
+ if (imm->iter_node.prev != NULL)
+ delink_imm_use (&imm->iter_node);
+ return NULL;
+ }
+
+ link_use_stmts_after (imm->imm_use, imm);
+ return USE_STMT (imm->imm_use);
+}
+
+/* This routine will return the first use on the stmt IMM currently refers
+ to. */
+
+inline use_operand_p
+first_imm_use_on_stmt (imm_use_iterator *imm)
+{
+ imm->next_imm_name = imm->imm_use->next;
+ return imm->imm_use;
+}
+
+/* Return TRUE if the last use on the stmt IMM refers to has been visited. */
+
+inline bool
+end_imm_use_on_stmt_p (const imm_use_iterator *imm)
+{
+ return (imm->imm_use == &(imm->iter_node));
+}
+
+/* Bump to the next use on the stmt IMM refers to, return NULL if done. */
+
+inline use_operand_p
+next_imm_use_on_stmt (imm_use_iterator *imm)
+{
+ imm->imm_use = imm->next_imm_name;
+ if (end_imm_use_on_stmt_p (imm))
+ return NULL_USE_OPERAND_P;
+ else
+ {
+ imm->next_imm_name = imm->imm_use->next;
+ return imm->imm_use;
+ }
+}
+
+/* Delink all immediate_use information for STMT. */
+inline void
+delink_stmt_imm_use (gimple *stmt)
+{
+ ssa_op_iter iter;
+ use_operand_p use_p;
+
+ if (ssa_operands_active (cfun))
+ FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_ALL_USES)
+ delink_imm_use (use_p);
+}
+
+#endif /* GCC_TREE_SSA_ITERATORS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa.h
new file mode 100644
index 0000000..87c2a59
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ssa.h
@@ -0,0 +1,34 @@
+/* Common SSA files
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SSA_H
+#define GCC_SSA_H
+
+/* This is an aggregation header file. This means it should contain only
+ other include files. */
+
+#include "stringpool.h"
+#include "gimple-ssa.h"
+#include "tree-vrp.h"
+#include "range.h"
+#include "tree-ssanames.h"
+#include "tree-phinodes.h"
+#include "ssa-iterators.h"
+
+#endif /* GCC_SSA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/statistics.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/statistics.h
new file mode 100644
index 0000000..264eab5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/statistics.h
@@ -0,0 +1,71 @@
+/* Memory and optimization statistics helpers.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Cygnus Solutions.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STATISTICS
+#define GCC_STATISTICS
+
+#if ! defined GATHER_STATISTICS
+#error GATHER_STATISTICS must be defined
+#endif
+
+#define GCC_MEM_STAT_ARGUMENTS const char * ARG_UNUSED (_loc_name), int ARG_UNUSED (_loc_line), const char * ARG_UNUSED (_loc_function)
+#if GATHER_STATISTICS
+#define ALONE_MEM_STAT_DECL GCC_MEM_STAT_ARGUMENTS
+#define ALONE_FINAL_MEM_STAT_DECL ALONE_MEM_STAT_DECL
+#define ALONE_PASS_MEM_STAT _loc_name, _loc_line, _loc_function
+#define ALONE_FINAL_PASS_MEM_STAT ALONE_PASS_MEM_STAT
+#define ALONE_MEM_STAT_INFO __FILE__, __LINE__, __FUNCTION__
+#define MEM_STAT_DECL , ALONE_MEM_STAT_DECL
+#define FINAL_MEM_STAT_DECL , ALONE_FINAL_MEM_STAT_DECL
+#define PASS_MEM_STAT , ALONE_PASS_MEM_STAT
+#define FINAL_PASS_MEM_STAT , ALONE_FINAL_PASS_MEM_STAT
+#define MEM_STAT_INFO , ALONE_MEM_STAT_INFO
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+#define ALONE_CXX_MEM_STAT_INFO const char * _loc_name = __builtin_FILE (), int _loc_line = __builtin_LINE (), const char * _loc_function = __builtin_FUNCTION ()
+#else
+#define ALONE_CXX_MEM_STAT_INFO const char * _loc_name = __FILE__, int _loc_line = __LINE__, const char * _loc_function = NULL
+#endif
+#define CXX_MEM_STAT_INFO , ALONE_CXX_MEM_STAT_INFO
+#else
+#define ALONE_MEM_STAT_DECL void
+#define ALONE_FINAL_MEM_STAT_DECL GCC_MEM_STAT_ARGUMENTS
+#define ALONE_PASS_MEM_STAT
+#define ALONE_FINAL_PASS_MEM_STAT 0,0,0
+#define ALONE_MEM_STAT_INFO
+#define MEM_STAT_DECL
+#define FINAL_MEM_STAT_DECL , ALONE_FINAL_MEM_STAT_DECL
+#define PASS_MEM_STAT
+#define FINAL_PASS_MEM_STAT , ALONE_FINAL_PASS_MEM_STAT
+#define MEM_STAT_INFO ALONE_MEM_STAT_INFO
+#define ALONE_CXX_MEM_STAT_INFO
+#define CXX_MEM_STAT_INFO
+#endif
+
+struct function;
+
+/* In statistics.cc */
+extern void statistics_early_init (void);
+extern void statistics_init (void);
+extern void statistics_fini (void);
+extern void statistics_fini_pass (void);
+extern void statistics_counter_event (struct function *, const char *, int);
+extern void statistics_histogram_event (struct function *, const char *, int);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stmt.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stmt.h
new file mode 100644
index 0000000..535da8f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stmt.h
@@ -0,0 +1,53 @@
+/* Declarations and data structures for stmt.cc.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STMT_H
+#define GCC_STMT_H
+
+extern void expand_label (tree);
+extern bool parse_output_constraint (const char **, int, int, int,
+ bool *, bool *, bool *);
+extern bool parse_input_constraint (const char **, int, int, int, int,
+ const char * const *, bool *, bool *);
+extern tree resolve_asm_operand_names (tree, tree, tree, tree);
+#ifdef HARD_CONST
+/* Silly ifdef to avoid having all includers depend on hard-reg-set.h. */
+extern tree tree_overlaps_hard_reg_set (tree, HARD_REG_SET *);
+#endif
+
+/* Return the CODE_LABEL rtx for a LABEL_DECL, creating it if necessary.
+ If label was deleted, the corresponding note
+ (NOTE_INSN_DELETED{_DEBUG,}_LABEL) insn will be returned. */
+extern rtx_insn *label_rtx (tree);
+
+/* As label_rtx, but additionally the label is placed on the forced label
+ list of its containing function (i.e. it is treated as reachable even
+ if how is not obvious). */
+extern rtx_insn *force_label_rtx (tree);
+
+/* As label_rtx, but checks that label was not deleted. */
+extern rtx_code_label *jump_target_rtx (tree);
+
+/* Expand a GIMPLE_SWITCH statement. */
+extern void expand_case (gswitch *);
+
+/* Like expand_case but special-case for SJLJ exception dispatching. */
+extern void expand_sjlj_dispatch_table (rtx, vec<tree> );
+
+#endif // GCC_STMT_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stor-layout.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stor-layout.h
new file mode 100644
index 0000000..e776892
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stor-layout.h
@@ -0,0 +1,117 @@
+/* Definitions and declarations for stor-layout.cc.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STOR_LAYOUT_H
+#define GCC_STOR_LAYOUT_H
+
+extern void set_min_and_max_values_for_integral_type (tree, int, signop);
+extern void fixup_signed_type (tree);
+extern unsigned int update_alignment_for_field (record_layout_info, tree,
+ unsigned int);
+extern record_layout_info start_record_layout (tree);
+extern tree bit_from_pos (tree, tree);
+extern tree byte_from_pos (tree, tree);
+extern void pos_from_bit (tree *, tree *, unsigned int, tree);
+extern void normalize_offset (tree *, tree *, unsigned int);
+extern tree rli_size_unit_so_far (record_layout_info);
+extern tree rli_size_so_far (record_layout_info);
+extern void normalize_rli (record_layout_info);
+extern void place_field (record_layout_info, tree);
+extern void compute_record_mode (tree);
+extern void finish_bitfield_layout (tree);
+extern void finish_record_layout (record_layout_info, int);
+extern unsigned int element_precision (const_tree);
+extern void finalize_size_functions (void);
+extern void fixup_unsigned_type (tree);
+extern void initialize_sizetypes (void);
+
+/* Finish up a builtin RECORD_TYPE. Give it a name and provide its
+ fields. Optionally specify an alignment, and then lay it out. */
+extern void finish_builtin_struct (tree, const char *, tree, tree);
+
+/* Given a VAR_DECL, PARM_DECL, RESULT_DECL or FIELD_DECL node,
+ calculates the DECL_SIZE, DECL_SIZE_UNIT, DECL_ALIGN and DECL_MODE
+ fields. Call this only once for any given decl node.
+
+ Second argument is the boundary that this field can be assumed to
+ be starting at (in bits). Zero means it can be assumed aligned
+ on any boundary that may be needed. */
+extern void layout_decl (tree, unsigned);
+
+/* Given a ..._TYPE node, calculate the TYPE_SIZE, TYPE_SIZE_UNIT,
+ TYPE_ALIGN and TYPE_MODE fields. If called more than once on one
+ node, does nothing except for the first time. */
+extern void layout_type (tree);
+
+/* Return the least alignment in bytes required for type TYPE. */
+extern unsigned int min_align_of_type (tree);
+
+/* Construct various nodes representing fract or accum data types. */
+extern tree make_fract_type (int, int, int);
+extern tree make_accum_type (int, int, int);
+
+#define make_signed_fract_type(P) make_fract_type (P, 0, 0)
+#define make_unsigned_fract_type(P) make_fract_type (P, 1, 0)
+#define make_sat_signed_fract_type(P) make_fract_type (P, 0, 1)
+#define make_sat_unsigned_fract_type(P) make_fract_type (P, 1, 1)
+#define make_signed_accum_type(P) make_accum_type (P, 0, 0)
+#define make_unsigned_accum_type(P) make_accum_type (P, 1, 0)
+#define make_sat_signed_accum_type(P) make_accum_type (P, 0, 1)
+#define make_sat_unsigned_accum_type(P) make_accum_type (P, 1, 1)
+
+#define make_or_reuse_signed_fract_type(P) \
+ make_or_reuse_fract_type (P, 0, 0)
+#define make_or_reuse_unsigned_fract_type(P) \
+ make_or_reuse_fract_type (P, 1, 0)
+#define make_or_reuse_sat_signed_fract_type(P) \
+ make_or_reuse_fract_type (P, 0, 1)
+#define make_or_reuse_sat_unsigned_fract_type(P) \
+ make_or_reuse_fract_type (P, 1, 1)
+#define make_or_reuse_signed_accum_type(P) \
+ make_or_reuse_accum_type (P, 0, 0)
+#define make_or_reuse_unsigned_accum_type(P) \
+ make_or_reuse_accum_type (P, 1, 0)
+#define make_or_reuse_sat_signed_accum_type(P) \
+ make_or_reuse_accum_type (P, 0, 1)
+#define make_or_reuse_sat_unsigned_accum_type(P) \
+ make_or_reuse_accum_type (P, 1, 1)
+
+extern tree make_signed_type (int);
+extern tree make_unsigned_type (int);
+
+/* Return the mode for data of a given size SIZE and mode class CLASS.
+ If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE.
+ The value is BLKmode if no other mode is found. This is like
+ mode_for_size, but is passed a tree. */
+extern opt_machine_mode mode_for_size_tree (const_tree, enum mode_class, int);
+
+extern tree bitwise_type_for_mode (machine_mode);
+
+/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
+ a previous call to layout_decl and calls it again. */
+extern void relayout_decl (tree);
+
+/* variable_size (EXP) is like save_expr (EXP) except that it
+ is for the special case of something that is part of a
+ variable size for a data type. It makes special arrangements
+ to compute the value at the right time when the data type
+ belongs to a function parameter. */
+extern tree variable_size (tree);
+
+#endif // GCC_STOR_LAYOUT_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/streamer-hooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/streamer-hooks.h
new file mode 100644
index 0000000..de4f9d3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/streamer-hooks.h
@@ -0,0 +1,92 @@
+/* Streamer hooks. Support for adding streamer-specific callbacks to
+ generic streaming routines.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STREAMER_HOOKS_H
+#define GCC_STREAMER_HOOKS_H
+
+/* Forward declarations to avoid including unnecessary headers. */
+struct output_block;
+class lto_input_block;
+class data_in;
+
+/* Streamer hooks. These functions do additional processing as
+ needed by the module. There are two types of callbacks, those that
+ replace the default behavior and those that supplement it.
+
+ Hooks marked [REQ] are required to be set. Those marked [OPT] may
+ be NULL, if the streamer does not need to implement them. */
+struct streamer_hooks {
+ /* [REQ] Called by every tree streaming routine that needs to write
+ a tree node. The arguments are: output_block where to write the
+ node, the tree node to write and a boolean flag that should be true
+ if the caller wants to write a reference to the tree, instead of the
+ tree itself. The second boolean parameter specifies this for
+ the tree itself, the first for all siblings that are streamed.
+ The referencing mechanism is up to each streamer to implement. */
+ void (*write_tree) (struct output_block *, tree, bool, bool);
+
+ /* [REQ] Called by every tree streaming routine that needs to read
+ a tree node. It takes two arguments: an lto_input_block pointing
+ to the buffer where to read from and a data_in instance with tables
+ and descriptors needed by the unpickling routines. It returns the
+ tree instantiated from the stream. */
+ tree (*read_tree) (class lto_input_block *, class data_in *);
+
+ /* [REQ] Called by every streaming routine that needs to read a location. */
+ void (*input_location) (location_t *, struct bitpack_d *, class data_in *);
+
+ /* [REQ] Called by every streaming routine that needs to write a
+ location. */
+ void (*output_location) (struct output_block *, struct bitpack_d *,
+ location_t);
+
+ /* [REQ] Called by every streaming routine that needs to write a
+ location, both LOCATION_LOCUS and LOCATION_BLOCK. */
+ void (*output_location_and_block) (struct output_block *, struct bitpack_d *,
+ location_t);
+};
+
+#define stream_write_tree(OB, EXPR, REF_P) \
+ streamer_hooks.write_tree (OB, EXPR, REF_P, REF_P)
+
+#define stream_write_tree_shallow_non_ref(OB, EXPR, REF_P) \
+ streamer_hooks.write_tree (OB, EXPR, REF_P, false)
+
+#define stream_read_tree(IB, DATA_IN) \
+ streamer_hooks.read_tree (IB, DATA_IN)
+
+#define stream_input_location(LOCPTR, BP, DATA_IN) \
+ streamer_hooks.input_location (LOCPTR, BP, DATA_IN)
+
+#define stream_output_location(OB, BP, LOC) \
+ streamer_hooks.output_location (OB, BP, LOC)
+
+#define stream_output_location_and_block(OB, BP, LOC) \
+ streamer_hooks.output_location_and_block (OB, BP, LOC)
+
+/* Streamer hooks. */
+extern struct streamer_hooks streamer_hooks;
+
+/* In streamer-hooks.cc. */
+void streamer_hooks_init (void);
+
+#endif /* GCC_STREAMER_HOOKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stringpool.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stringpool.h
new file mode 100644
index 0000000..3364b58
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/stringpool.h
@@ -0,0 +1,43 @@
+/* Declarations and definitons for stringpool.cc.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_STRINGPOOL_H
+#define GCC_STRINGPOOL_H
+
+/* Return the (unique) IDENTIFIER_NODE node for a given name.
+ The name is supplied as a char *. */
+extern tree get_identifier (const char *);
+
+/* If an identifier with the name TEXT (a null-terminated string) has
+ previously been referred to, return that node; otherwise return
+ NULL_TREE. */
+extern tree maybe_get_identifier (const char *);
+
+/* Identical to get_identifier, except that the length is assumed
+ known. */
+extern tree get_identifier_with_length (const char *, size_t);
+
+#if GCC_VERSION >= 3000
+#define get_identifier(str) \
+ (__builtin_constant_p (str) \
+ ? get_identifier_with_length ((str), strlen (str)) \
+ : get_identifier (str))
+#endif
+
+#endif // GCC_STRINGPOOL_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/substring-locations.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/substring-locations.h
new file mode 100644
index 0000000..ce99957
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/substring-locations.h
@@ -0,0 +1,126 @@
+/* Source locations within string literals.
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SUBSTRING_LOCATIONS_H
+#define GCC_SUBSTRING_LOCATIONS_H
+
+/* The substring_loc class encapsulates information on the source location
+ of a range of characters within a STRING_CST.
+
+ If needed by a diagnostic, the actual location_t of the substring_loc
+ can be calculated by calling its get_location method. This calls a
+ langhook, since this is inherently frontend-specific. For the C family
+ of frontends, it calls back into libcpp to reparse the strings. This
+ gets the location information "on demand", rather than storing the
+ location information in the initial lex for every string. Thus the
+ substring_loc can also be thought of as a deferred call into libcpp,
+ to allow the non-trivial work of reparsing the string to be delayed
+ until we actually need it (to emit a diagnostic for a particular range
+ of characters).
+
+ substring_loc::get_location returns NULL if it succeeds, or an
+ error message if it fails. Error messages are intended for GCC
+ developers (to help debugging) rather than for end-users.
+
+ The easiest way to use a substring_loc is via the format_warning_* APIs,
+ which gracefully handle failure of substring_loc::get_location by using
+ the location of the string as a whole if substring-information is
+ unavailable. */
+
+class substring_loc
+{
+ public:
+ /* Constructor. FMT_STRING_LOC is the location of the string as
+ a whole. STRING_TYPE is the type of the string. It should be an
+ ARRAY_TYPE of INTEGER_TYPE, or a POINTER_TYPE to such an ARRAY_TYPE.
+ CARET_IDX, START_IDX, and END_IDX are offsets from the start
+ of the string data. */
+ substring_loc (location_t fmt_string_loc, tree string_type,
+ int caret_idx, int start_idx, int end_idx)
+ : m_fmt_string_loc (fmt_string_loc), m_string_type (string_type),
+ m_caret_idx (caret_idx), m_start_idx (start_idx), m_end_idx (end_idx) {}
+
+ void set_caret_index (int caret_idx) { m_caret_idx = caret_idx; }
+
+ const char *get_location (location_t *out_loc) const;
+
+ location_t get_fmt_string_loc () const { return m_fmt_string_loc; }
+ tree get_string_type () const { return m_string_type; }
+ int get_caret_idx () const { return m_caret_idx; }
+ int get_start_idx () const { return m_start_idx; }
+ int get_end_idx () const { return m_end_idx; }
+
+ private:
+ location_t m_fmt_string_loc;
+ tree m_string_type;
+ int m_caret_idx;
+ int m_start_idx;
+ int m_end_idx;
+};
+
+/* A bundle of state for emitting a diagnostic relating to a format string. */
+
+class format_string_diagnostic_t
+{
+ public:
+ format_string_diagnostic_t (const substring_loc &fmt_loc,
+ const range_label *fmt_label,
+ location_t param_loc,
+ const range_label *param_label,
+ const char *corrected_substring);
+
+ /* Functions for emitting a warning about a format string. */
+
+ bool emit_warning_va (int opt, const char *gmsgid, va_list *ap) const
+ ATTRIBUTE_GCC_DIAG (3, 0);
+
+ bool emit_warning_n_va (int opt, unsigned HOST_WIDE_INT n,
+ const char *singular_gmsgid,
+ const char *plural_gmsgid, va_list *ap) const
+ ATTRIBUTE_GCC_DIAG (4, 0) ATTRIBUTE_GCC_DIAG (5, 0);
+
+ bool emit_warning (int opt, const char *gmsgid, ...) const
+ ATTRIBUTE_GCC_DIAG (3, 4);
+
+ bool emit_warning_n (int opt, unsigned HOST_WIDE_INT n,
+ const char *singular_gmsgid,
+ const char *plural_gmsgid, ...) const
+ ATTRIBUTE_GCC_DIAG (4, 6) ATTRIBUTE_GCC_DIAG (5, 6);
+
+ private:
+ const substring_loc &m_fmt_loc;
+ const range_label *m_fmt_label;
+ location_t m_param_loc;
+ const range_label *m_param_label;
+ const char *m_corrected_substring;
+};
+
+
+/* Implementation detail, for use when implementing
+ LANG_HOOKS_GET_SUBSTRING_LOCATION. */
+
+extern const char *get_location_within_string (cpp_reader *pfile,
+ string_concat_db *concats,
+ location_t strloc,
+ enum cpp_ttype type,
+ int caret_idx,
+ int start_idx, int end_idx,
+ location_t *out_loc);
+
+#endif /* ! GCC_SUBSTRING_LOCATIONS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symbol-summary.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symbol-summary.h
new file mode 100644
index 0000000..3ed6162
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symbol-summary.h
@@ -0,0 +1,1013 @@
+/* Callgraph summary data structure.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+ Contributed by Martin Liska
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SYMBOL_SUMMARY_H
+#define GCC_SYMBOL_SUMMARY_H
+
+/* Base class for function_summary and fast_function_summary classes. */
+
+template <class T>
+class function_summary_base
+{
+public:
+ /* Default construction takes SYMTAB as an argument. */
+ function_summary_base (symbol_table *symtab,
+ cgraph_node_hook symtab_insertion,
+ cgraph_node_hook symtab_removal,
+ cgraph_2node_hook symtab_duplication
+ CXX_MEM_STAT_INFO):
+ m_symtab (symtab), m_symtab_insertion (symtab_insertion),
+ m_symtab_removal (symtab_removal),
+ m_symtab_duplication (symtab_duplication),
+ m_symtab_insertion_hook (NULL), m_symtab_duplication_hook (NULL),
+ m_allocator ("function summary" PASS_MEM_STAT)
+ {
+ enable_insertion_hook ();
+ m_symtab_removal_hook
+ = m_symtab->add_cgraph_removal_hook (m_symtab_removal, this);
+ enable_duplication_hook ();
+ }
+
+ /* Basic implementation of insert operation. */
+ virtual void insert (cgraph_node *, T *)
+ {
+ /* In most cases, it makes no sense to create summaries without
+ initializing them. */
+ gcc_unreachable ();
+ }
+
+ /* Basic implementation of removal operation. */
+ virtual void remove (cgraph_node *, T *) {}
+
+ /* Basic implementation of duplication operation. */
+ virtual void duplicate (cgraph_node *, cgraph_node *, T *, T *)
+ {
+ /* It makes no sense to not copy anything during duplication. */
+ gcc_unreachable ();
+ }
+
+ /* Enable insertion hook invocation. */
+ void enable_insertion_hook ()
+ {
+ if (m_symtab_insertion_hook == NULL)
+ m_symtab_insertion_hook
+ = m_symtab->add_cgraph_insertion_hook (m_symtab_insertion, this);
+ }
+
+ /* Enable insertion hook invocation. */
+ void disable_insertion_hook ()
+ {
+ if (m_symtab_insertion_hook != NULL)
+ {
+ m_symtab->remove_cgraph_insertion_hook (m_symtab_insertion_hook);
+ m_symtab_insertion_hook = NULL;
+ }
+ }
+
+ /* Enable duplication hook invocation. */
+ void enable_duplication_hook ()
+ {
+ if (m_symtab_duplication_hook == NULL)
+ m_symtab_duplication_hook
+ = m_symtab->add_cgraph_duplication_hook (m_symtab_duplication, this);
+ }
+
+ /* Enable duplication hook invocation. */
+ void disable_duplication_hook ()
+ {
+ if (m_symtab_duplication_hook != NULL)
+ {
+ m_symtab->remove_cgraph_duplication_hook (m_symtab_duplication_hook);
+ m_symtab_duplication_hook = NULL;
+ }
+ }
+
+protected:
+ /* Allocates new data that are stored within map. */
+ T* allocate_new ()
+ {
+ /* Call gcc_internal_because we do not want to call finalizer for
+ a type T. We call dtor explicitly. */
+ return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T ()
+ : m_allocator.allocate () ;
+ }
+
+ /* Release an item that is stored within map. */
+ void release (T *item)
+ {
+ if (is_ggc ())
+ ggc_delete (item);
+ else
+ m_allocator.remove (item);
+ }
+
+ /* Unregister all call-graph hooks. */
+ void unregister_hooks ();
+
+ /* Symbol table the summary is registered to. */
+ symbol_table *m_symtab;
+
+ /* Insertion function defined by a summary. */
+ cgraph_node_hook m_symtab_insertion;
+ /* Removal function defined by a summary. */
+ cgraph_node_hook m_symtab_removal;
+ /* Duplication function defined by a summary. */
+ cgraph_2node_hook m_symtab_duplication;
+
+ /* Internal summary insertion hook pointer. */
+ cgraph_node_hook_list *m_symtab_insertion_hook;
+ /* Internal summary removal hook pointer. */
+ cgraph_node_hook_list *m_symtab_removal_hook;
+ /* Internal summary duplication hook pointer. */
+ cgraph_2node_hook_list *m_symtab_duplication_hook;
+
+private:
+ /* Return true when the summary uses GGC memory for allocation. */
+ virtual bool is_ggc () = 0;
+
+ /* Object allocator for heap allocation. */
+ object_allocator<T> m_allocator;
+};
+
+template <typename T>
+void
+function_summary_base<T>::unregister_hooks ()
+{
+ disable_insertion_hook ();
+ m_symtab->remove_cgraph_removal_hook (m_symtab_removal_hook);
+ disable_duplication_hook ();
+}
+
+/* We want to pass just pointer types as argument for function_summary
+ template class. */
+
+template <class T>
+class function_summary
+{
+private:
+ function_summary();
+};
+
+/* Function summary is a helper class that is used to associate a data structure
+ related to a callgraph node. Typical usage can be seen in IPA passes which
+ create a temporary pass-related structures. The summary class registers
+ hooks that are triggered when a new node is inserted, duplicated and deleted.
+ A user of a summary class can ovewrite virtual methods than are triggered by
+ the summary if such hook is triggered. Apart from a callgraph node, the user
+ is given a data structure tied to the node.
+
+ The function summary class can work both with a heap-allocated memory and
+ a memory gained by garbage collected memory. */
+
+template <class T>
+class GTY((user)) function_summary <T *>: public function_summary_base<T>
+{
+public:
+ /* Default construction takes SYMTAB as an argument. */
+ function_summary (symbol_table *symtab, bool ggc = false CXX_MEM_STAT_INFO);
+
+ /* Destructor. */
+ virtual ~function_summary ();
+
+ /* Traverses all summarys with a function F called with
+ ARG as argument. */
+ template<typename Arg, bool (*f)(const T &, Arg)>
+ void traverse (Arg a) const
+ {
+ m_map.template traverse <f> (a);
+ }
+
+ /* Getter for summary callgraph node pointer. If a summary for a node
+ does not exist it will be created. */
+ T* get_create (cgraph_node *node)
+ {
+ bool existed;
+ T **v = &m_map.get_or_insert (node->get_uid (), &existed);
+ if (!existed)
+ *v = this->allocate_new ();
+
+ return *v;
+ }
+
+ /* Getter for summary callgraph node pointer. */
+ T* get (cgraph_node *node) ATTRIBUTE_PURE
+ {
+ T **v = m_map.get (node->get_uid ());
+ return v == NULL ? NULL : *v;
+ }
+
+ /* Remove node from summary. */
+ using function_summary_base<T>::remove;
+ void remove (cgraph_node *node)
+ {
+ int uid = node->get_uid ();
+ T **v = m_map.get (uid);
+ if (v)
+ {
+ m_map.remove (uid);
+ this->release (*v);
+ }
+ }
+
+ /* Return true if a summary for the given NODE already exists. */
+ bool exists (cgraph_node *node)
+ {
+ return m_map.get (node->get_uid ()) != NULL;
+ }
+
+ /* Symbol insertion hook that is registered to symbol table. */
+ static void symtab_insertion (cgraph_node *node, void *data);
+
+ /* Symbol removal hook that is registered to symbol table. */
+ static void symtab_removal (cgraph_node *node, void *data);
+
+ /* Symbol duplication hook that is registered to symbol table. */
+ static void symtab_duplication (cgraph_node *node, cgraph_node *node2,
+ void *data);
+
+protected:
+ /* Indication if we use ggc summary. */
+ bool m_ggc;
+
+private:
+ /* Indication if we use ggc summary. */
+ bool is_ggc () final override
+ {
+ return m_ggc;
+ }
+
+ typedef int_hash <int, 0, -1> map_hash;
+
+ /* Main summary store, where summary ID is used as key. */
+ hash_map <map_hash, T *> m_map;
+
+ template <typename U> friend void gt_ggc_mx (function_summary <U *> * const &);
+ template <typename U> friend void gt_pch_nx (function_summary <U *> * const &);
+ template <typename U> friend void gt_pch_nx (function_summary <U *> * const &,
+ gt_pointer_operator, void *);
+};
+
+template <typename T>
+function_summary<T *>::function_summary (symbol_table *symtab, bool ggc
+ MEM_STAT_DECL):
+ function_summary_base<T> (symtab, function_summary::symtab_insertion,
+ function_summary::symtab_removal,
+ function_summary::symtab_duplication
+ PASS_MEM_STAT),
+ m_ggc (ggc), m_map (13, ggc, true, GATHER_STATISTICS PASS_MEM_STAT) {}
+
+template <typename T>
+function_summary<T *>::~function_summary ()
+{
+ this->unregister_hooks ();
+
+ /* Release all summaries. */
+ typedef typename hash_map <map_hash, T *>::iterator map_iterator;
+ for (map_iterator it = m_map.begin (); it != m_map.end (); ++it)
+ this->release ((*it).second);
+}
+
+template <typename T>
+void
+function_summary<T *>::symtab_insertion (cgraph_node *node, void *data)
+{
+ gcc_checking_assert (node->get_uid ());
+ function_summary *summary = (function_summary <T *> *) (data);
+ summary->insert (node, summary->get_create (node));
+}
+
+template <typename T>
+void
+function_summary<T *>::symtab_removal (cgraph_node *node, void *data)
+{
+ gcc_checking_assert (node->get_uid ());
+ function_summary *summary = (function_summary <T *> *) (data);
+ summary->remove (node);
+}
+
+template <typename T>
+void
+function_summary<T *>::symtab_duplication (cgraph_node *node,
+ cgraph_node *node2, void *data)
+{
+ function_summary *summary = (function_summary <T *> *) (data);
+ T *v = summary->get (node);
+
+ if (v)
+ summary->duplicate (node, node2, v, summary->get_create (node2));
+}
+
+template <typename T>
+void
+gt_ggc_mx(function_summary<T *>* const &summary)
+{
+ gcc_checking_assert (summary->m_ggc);
+ gt_ggc_mx (&summary->m_map);
+}
+
+template <typename T>
+void
+gt_pch_nx (function_summary<T *> *const &)
+{
+ gcc_unreachable ();
+}
+
+template <typename T>
+void
+gt_pch_nx (function_summary<T *> *const &, gt_pointer_operator, void *)
+{
+ gcc_unreachable ();
+}
+
+/* Help template from std c++11. */
+
+template<typename T, typename U>
+struct is_same
+{
+ static const bool value = false;
+};
+
+template<typename T>
+struct is_same<T,T> //specialization
+{
+ static const bool value = true;
+};
+
+/* We want to pass just pointer types as argument for fast_function_summary
+ template class. */
+
+template <class T, class V>
+class fast_function_summary
+{
+private:
+ fast_function_summary ();
+};
+
+/* Function vector summary is a fast implementation of function_summary that
+ utilizes vector as primary storage of summaries. */
+
+template <class T, class V>
+class GTY((user)) fast_function_summary <T *, V>
+ : public function_summary_base<T>
+{
+public:
+ /* Default construction takes SYMTAB as an argument. */
+ fast_function_summary (symbol_table *symtab CXX_MEM_STAT_INFO);
+
+ /* Destructor. */
+ virtual ~fast_function_summary ();
+
+ /* Traverses all summarys with a function F called with
+ ARG as argument. */
+ template<typename Arg, bool (*f)(const T &, Arg)>
+ void traverse (Arg a) const
+ {
+ for (unsigned i = 0; i < m_vector->length (); i++)
+ if ((*m_vector[i]) != NULL)
+ f ((*m_vector)[i], a);
+ }
+
+ /* Getter for summary callgraph node pointer. If a summary for a node
+ does not exist it will be created. */
+ T* get_create (cgraph_node *node)
+ {
+ int id = node->get_summary_id ();
+ if (id == -1)
+ id = this->m_symtab->assign_summary_id (node);
+
+ if ((unsigned int)id >= m_vector->length ())
+ vec_safe_grow_cleared (m_vector,
+ this->m_symtab->cgraph_max_summary_id);
+
+ if ((*m_vector)[id] == NULL)
+ (*m_vector)[id] = this->allocate_new ();
+
+ return (*m_vector)[id];
+ }
+
+ /* Getter for summary callgraph node pointer. */
+ T* get (cgraph_node *node) ATTRIBUTE_PURE
+ {
+ return exists (node) ? (*m_vector)[node->get_summary_id ()] : NULL;
+ }
+
+ using function_summary_base<T>::remove;
+ void remove (cgraph_node *node)
+ {
+ if (exists (node))
+ {
+ int id = node->get_summary_id ();
+ this->release ((*m_vector)[id]);
+ (*m_vector)[id] = NULL;
+ }
+ }
+
+ /* Return true if a summary for the given NODE already exists. */
+ bool exists (cgraph_node *node)
+ {
+ int id = node->get_summary_id ();
+ return (id != -1
+ && (unsigned int)id < m_vector->length ()
+ && (*m_vector)[id] != NULL);
+ }
+
+ /* Symbol insertion hook that is registered to symbol table. */
+ static void symtab_insertion (cgraph_node *node, void *data);
+
+ /* Symbol removal hook that is registered to symbol table. */
+ static void symtab_removal (cgraph_node *node, void *data);
+
+ /* Symbol duplication hook that is registered to symbol table. */
+ static void symtab_duplication (cgraph_node *node, cgraph_node *node2,
+ void *data);
+
+private:
+ bool is_ggc () final override;
+
+ /* Summary is stored in the vector. */
+ vec <T *, V> *m_vector;
+
+ template <typename U> friend void gt_ggc_mx (fast_function_summary <U *, va_gc> * const &);
+ template <typename U> friend void gt_pch_nx (fast_function_summary <U *, va_gc> * const &);
+ template <typename U> friend void gt_pch_nx (fast_function_summary <U *, va_gc> * const &,
+ gt_pointer_operator, void *);
+};
+
+template <typename T, typename V>
+fast_function_summary<T *, V>::fast_function_summary (symbol_table *symtab
+ MEM_STAT_DECL):
+ function_summary_base<T> (symtab,
+ fast_function_summary::symtab_insertion,
+ fast_function_summary::symtab_removal,
+ fast_function_summary::symtab_duplication
+ PASS_MEM_STAT), m_vector (NULL)
+{
+ vec_alloc (m_vector, 13 PASS_MEM_STAT);
+}
+
+template <typename T, typename V>
+fast_function_summary<T *, V>::~fast_function_summary ()
+{
+ this->unregister_hooks ();
+
+ /* Release all summaries. */
+ for (unsigned i = 0; i < m_vector->length (); i++)
+ if ((*m_vector)[i] != NULL)
+ this->release ((*m_vector)[i]);
+ vec_free (m_vector);
+}
+
+template <typename T, typename V>
+void
+fast_function_summary<T *, V>::symtab_insertion (cgraph_node *node, void *data)
+{
+ gcc_checking_assert (node->get_uid ());
+ fast_function_summary *summary = (fast_function_summary <T *, V> *) (data);
+ summary->insert (node, summary->get_create (node));
+}
+
+template <typename T, typename V>
+void
+fast_function_summary<T *, V>::symtab_removal (cgraph_node *node, void *data)
+{
+ gcc_checking_assert (node->get_uid ());
+ fast_function_summary *summary = (fast_function_summary <T *, V> *) (data);
+
+ if (summary->exists (node))
+ summary->remove (node);
+}
+
+template <typename T, typename V>
+void
+fast_function_summary<T *, V>::symtab_duplication (cgraph_node *node,
+ cgraph_node *node2,
+ void *data)
+{
+ fast_function_summary *summary = (fast_function_summary <T *, V> *) (data);
+ T *v = summary->get (node);
+
+ if (v)
+ {
+ T *duplicate = summary->get_create (node2);
+ summary->duplicate (node, node2, v, duplicate);
+ }
+}
+
+template <typename T, typename V>
+inline bool
+fast_function_summary<T *, V>::is_ggc ()
+{
+ return is_same<V, va_gc>::value;
+}
+
+template <typename T>
+void
+gt_ggc_mx (fast_function_summary<T *, va_heap>* const &)
+{
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_function_summary<T *, va_heap>* const &)
+{
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_function_summary<T *, va_heap>* const&, gt_pointer_operator,
+ void *)
+{
+}
+
+template <typename T>
+void
+gt_ggc_mx (fast_function_summary<T *, va_gc>* const &summary)
+{
+ ggc_test_and_set_mark (summary->m_vector);
+ gt_ggc_mx (summary->m_vector);
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_function_summary<T *, va_gc> *const &)
+{
+ gcc_unreachable ();
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_function_summary<T *, va_gc> *const &, gt_pointer_operator,
+ void *)
+{
+ gcc_unreachable ();
+}
+
+/* Base class for call_summary and fast_call_summary classes. */
+
+template <class T>
+class call_summary_base
+{
+public:
+ /* Default construction takes SYMTAB as an argument. */
+ call_summary_base (symbol_table *symtab, cgraph_edge_hook symtab_removal,
+ cgraph_2edge_hook symtab_duplication CXX_MEM_STAT_INFO):
+ m_symtab (symtab), m_symtab_removal (symtab_removal),
+ m_symtab_duplication (symtab_duplication), m_symtab_duplication_hook (NULL),
+ m_initialize_when_cloning (false),
+ m_allocator ("call summary" PASS_MEM_STAT)
+ {
+ m_symtab_removal_hook
+ = m_symtab->add_edge_removal_hook (m_symtab_removal, this);
+ enable_duplication_hook ();
+ }
+
+ /* Basic implementation of removal operation. */
+ virtual void remove (cgraph_edge *, T *) {}
+
+ /* Basic implementation of duplication operation. */
+ virtual void duplicate (cgraph_edge *, cgraph_edge *, T *, T *)
+ {
+ gcc_unreachable ();
+ }
+
+ /* Enable duplication hook invocation. */
+ void enable_duplication_hook ()
+ {
+ if (m_symtab_duplication_hook == NULL)
+ m_symtab_duplication_hook
+ = m_symtab->add_edge_duplication_hook (m_symtab_duplication,
+ this);
+ }
+
+ /* Enable duplication hook invocation. */
+ void disable_duplication_hook ()
+ {
+ if (m_symtab_duplication_hook != NULL)
+ {
+ m_symtab->remove_edge_duplication_hook (m_symtab_duplication_hook);
+ m_symtab_duplication_hook = NULL;
+ }
+ }
+
+protected:
+ /* Allocates new data that are stored within map. */
+ T* allocate_new ()
+ {
+ /* Call gcc_internal_because we do not want to call finalizer for
+ a type T. We call dtor explicitly. */
+ return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T ()
+ : m_allocator.allocate ();
+ }
+
+ /* Release an item that is stored within map. */
+ void release (T *item)
+ {
+ if (is_ggc ())
+ ggc_delete (item);
+ else
+ m_allocator.remove (item);
+ }
+
+ /* Unregister all call-graph hooks. */
+ void unregister_hooks ();
+
+ /* Symbol table the summary is registered to. */
+ symbol_table *m_symtab;
+
+ /* Removal function defined by a summary. */
+ cgraph_edge_hook m_symtab_removal;
+ /* Duplication function defined by a summary. */
+ cgraph_2edge_hook m_symtab_duplication;
+
+ /* Internal summary removal hook pointer. */
+ cgraph_edge_hook_list *m_symtab_removal_hook;
+ /* Internal summary duplication hook pointer. */
+ cgraph_2edge_hook_list *m_symtab_duplication_hook;
+ /* Initialize summary for an edge that is cloned. */
+ bool m_initialize_when_cloning;
+
+private:
+ /* Return true when the summary uses GGC memory for allocation. */
+ virtual bool is_ggc () = 0;
+
+ /* Object allocator for heap allocation. */
+ object_allocator<T> m_allocator;
+};
+
+template <typename T>
+void
+call_summary_base<T>::unregister_hooks ()
+{
+ m_symtab->remove_edge_removal_hook (m_symtab_removal_hook);
+ disable_duplication_hook ();
+}
+
+/* An impossible class templated by non-pointers so, which makes sure that only
+ summaries gathering pointers can be created. */
+
+template <class T>
+class call_summary
+{
+private:
+ call_summary ();
+};
+
+/* Class to store auxiliary information about call graph edges. */
+
+template <class T>
+class GTY((user)) call_summary <T *>: public call_summary_base<T>
+{
+public:
+ /* Default construction takes SYMTAB as an argument. */
+ call_summary (symbol_table *symtab, bool ggc = false
+ CXX_MEM_STAT_INFO)
+ : call_summary_base<T> (symtab, call_summary::symtab_removal,
+ call_summary::symtab_duplication PASS_MEM_STAT),
+ m_ggc (ggc), m_map (13, ggc, true, GATHER_STATISTICS PASS_MEM_STAT) {}
+
+ /* Destructor. */
+ virtual ~call_summary ();
+
+ /* Traverses all summarys with an edge E called with
+ ARG as argument. */
+ template<typename Arg, bool (*f)(const T &, Arg)>
+ void traverse (Arg a) const
+ {
+ m_map.template traverse <f> (a);
+ }
+
+ /* Getter for summary callgraph edge pointer.
+ If a summary for an edge does not exist, it will be created. */
+ T* get_create (cgraph_edge *edge)
+ {
+ bool existed;
+ T **v = &m_map.get_or_insert (edge->get_uid (), &existed);
+ if (!existed)
+ *v = this->allocate_new ();
+
+ return *v;
+ }
+
+ /* Getter for summary callgraph edge pointer. */
+ T* get (cgraph_edge *edge) ATTRIBUTE_PURE
+ {
+ T **v = m_map.get (edge->get_uid ());
+ return v == NULL ? NULL : *v;
+ }
+
+ /* Remove edge from summary. */
+ using call_summary_base<T>::remove;
+ void remove (cgraph_edge *edge)
+ {
+ int uid = edge->get_uid ();
+ T **v = m_map.get (uid);
+ if (v)
+ {
+ m_map.remove (uid);
+ this->release (*v);
+ }
+ }
+
+ /* Return true if a summary for the given EDGE already exists. */
+ bool exists (cgraph_edge *edge)
+ {
+ return m_map.get (edge->get_uid ()) != NULL;
+ }
+
+ /* Symbol removal hook that is registered to symbol table. */
+ static void symtab_removal (cgraph_edge *edge, void *data);
+
+ /* Symbol duplication hook that is registered to symbol table. */
+ static void symtab_duplication (cgraph_edge *edge1, cgraph_edge *edge2,
+ void *data);
+
+protected:
+ /* Indication if we use ggc summary. */
+ bool m_ggc;
+
+private:
+ /* Indication if we use ggc summary. */
+ bool is_ggc () final override
+ {
+ return m_ggc;
+ }
+
+ typedef int_hash <int, 0, -1> map_hash;
+
+ /* Main summary store, where summary ID is used as key. */
+ hash_map <map_hash, T *> m_map;
+
+ template <typename U> friend void gt_ggc_mx (call_summary <U *> * const &);
+ template <typename U> friend void gt_pch_nx (call_summary <U *> * const &);
+ template <typename U> friend void gt_pch_nx (call_summary <U *> * const &,
+ gt_pointer_operator, void *);
+};
+
+template <typename T>
+call_summary<T *>::~call_summary ()
+{
+ this->unregister_hooks ();
+
+ /* Release all summaries. */
+ typedef typename hash_map <map_hash, T *>::iterator map_iterator;
+ for (map_iterator it = m_map.begin (); it != m_map.end (); ++it)
+ this->release ((*it).second);
+}
+
+template <typename T>
+void
+call_summary<T *>::symtab_removal (cgraph_edge *edge, void *data)
+{
+ call_summary *summary = (call_summary <T *> *) (data);
+ summary->remove (edge);
+}
+
+template <typename T>
+void
+call_summary<T *>::symtab_duplication (cgraph_edge *edge1,
+ cgraph_edge *edge2, void *data)
+{
+ call_summary *summary = (call_summary <T *> *) (data);
+ T *edge1_summary = NULL;
+
+ if (summary->m_initialize_when_cloning)
+ edge1_summary = summary->get_create (edge1);
+ else
+ edge1_summary = summary->get (edge1);
+
+ if (edge1_summary)
+ summary->duplicate (edge1, edge2, edge1_summary,
+ summary->get_create (edge2));
+}
+
+template <typename T>
+void
+gt_ggc_mx(call_summary<T *>* const &summary)
+{
+ gcc_checking_assert (summary->m_ggc);
+ gt_ggc_mx (&summary->m_map);
+}
+
+template <typename T>
+void
+gt_pch_nx (call_summary<T *> *const &)
+{
+ gcc_unreachable ();
+}
+
+template <typename T>
+void
+gt_pch_nx (call_summary<T *> *const &, gt_pointer_operator, void *)
+{
+ gcc_unreachable ();
+}
+
+/* We want to pass just pointer types as argument for fast_call_summary
+ template class. */
+
+template <class T, class V>
+class fast_call_summary
+{
+private:
+ fast_call_summary ();
+};
+
+/* Call vector summary is a fast implementation of call_summary that
+ utilizes vector as primary storage of summaries. */
+
+template <class T, class V>
+class GTY((user)) fast_call_summary <T *, V>: public call_summary_base<T>
+{
+public:
+ /* Default construction takes SYMTAB as an argument. */
+ fast_call_summary (symbol_table *symtab CXX_MEM_STAT_INFO)
+ : call_summary_base<T> (symtab, fast_call_summary::symtab_removal,
+ fast_call_summary::symtab_duplication PASS_MEM_STAT),
+ m_vector (NULL)
+ {
+ vec_alloc (m_vector, 13 PASS_MEM_STAT);
+ }
+
+ /* Destructor. */
+ virtual ~fast_call_summary ();
+
+ /* Traverses all summarys with an edge F called with
+ ARG as argument. */
+ template<typename Arg, bool (*f)(const T &, Arg)>
+ void traverse (Arg a) const
+ {
+ for (unsigned i = 0; i < m_vector->length (); i++)
+ if ((*m_vector[i]) != NULL)
+ f ((*m_vector)[i], a);
+ }
+
+ /* Getter for summary callgraph edge pointer.
+ If a summary for an edge does not exist, it will be created. */
+ T* get_create (cgraph_edge *edge)
+ {
+ int id = edge->get_summary_id ();
+ if (id == -1)
+ id = this->m_symtab->assign_summary_id (edge);
+
+ if ((unsigned)id >= m_vector->length ())
+ vec_safe_grow_cleared (m_vector, this->m_symtab->edges_max_summary_id);
+
+ if ((*m_vector)[id] == NULL)
+ (*m_vector)[id] = this->allocate_new ();
+
+ return (*m_vector)[id];
+ }
+
+ /* Getter for summary callgraph edge pointer. */
+ T* get (cgraph_edge *edge) ATTRIBUTE_PURE
+ {
+ return exists (edge) ? (*m_vector)[edge->get_summary_id ()] : NULL;
+ }
+
+ /* Remove edge from summary. */
+ using call_summary_base<T>::remove;
+ void remove (cgraph_edge *edge)
+ {
+ if (exists (edge))
+ {
+ int id = edge->get_summary_id ();
+ this->release ((*m_vector)[id]);
+ (*m_vector)[id] = NULL;
+ }
+ }
+
+ /* Return true if a summary for the given EDGE already exists. */
+ bool exists (cgraph_edge *edge)
+ {
+ int id = edge->get_summary_id ();
+ return (id != -1
+ && (unsigned)id < m_vector->length ()
+ && (*m_vector)[id] != NULL);
+ }
+
+ /* Symbol removal hook that is registered to symbol table. */
+ static void symtab_removal (cgraph_edge *edge, void *data);
+
+ /* Symbol duplication hook that is registered to symbol table. */
+ static void symtab_duplication (cgraph_edge *edge1, cgraph_edge *edge2,
+ void *data);
+
+private:
+ bool is_ggc () final override;
+
+ /* Summary is stored in the vector. */
+ vec <T *, V> *m_vector;
+
+ template <typename U> friend void gt_ggc_mx (fast_call_summary <U *, va_gc> * const &);
+ template <typename U> friend void gt_pch_nx (fast_call_summary <U *, va_gc> * const &);
+ template <typename U> friend void gt_pch_nx (fast_call_summary <U *, va_gc> * const &,
+ gt_pointer_operator, void *);
+};
+
+template <typename T, typename V>
+fast_call_summary<T *, V>::~fast_call_summary ()
+{
+ this->unregister_hooks ();
+
+ /* Release all summaries. */
+ for (unsigned i = 0; i < m_vector->length (); i++)
+ if ((*m_vector)[i] != NULL)
+ this->release ((*m_vector)[i]);
+ vec_free (m_vector);
+}
+
+template <typename T, typename V>
+void
+fast_call_summary<T *, V>::symtab_removal (cgraph_edge *edge, void *data)
+{
+ fast_call_summary *summary = (fast_call_summary <T *, V> *) (data);
+ summary->remove (edge);
+}
+
+template <typename T, typename V>
+void
+fast_call_summary<T *, V>::symtab_duplication (cgraph_edge *edge1,
+ cgraph_edge *edge2, void *data)
+{
+ fast_call_summary *summary = (fast_call_summary <T *, V> *) (data);
+ T *edge1_summary = NULL;
+
+ if (summary->m_initialize_when_cloning)
+ edge1_summary = summary->get_create (edge1);
+ else
+ edge1_summary = summary->get (edge1);
+
+ if (edge1_summary)
+ {
+ T *duplicate = summary->get_create (edge2);
+ summary->duplicate (edge1, edge2, edge1_summary, duplicate);
+ }
+}
+
+template <typename T, typename V>
+inline bool
+fast_call_summary<T *, V>::is_ggc ()
+{
+ return is_same<V, va_gc>::value;
+}
+
+template <typename T>
+void
+gt_ggc_mx (fast_call_summary<T *, va_heap>* const &summary ATTRIBUTE_UNUSED)
+{
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_call_summary<T *, va_heap>* const &summary ATTRIBUTE_UNUSED)
+{
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_call_summary<T *, va_heap>* const& summary ATTRIBUTE_UNUSED,
+ gt_pointer_operator op ATTRIBUTE_UNUSED,
+ void *cookie ATTRIBUTE_UNUSED)
+{
+}
+
+template <typename T>
+void
+gt_ggc_mx (fast_call_summary<T *, va_gc>* const &summary)
+{
+ ggc_test_and_set_mark (summary->m_vector);
+ gt_ggc_mx (&summary->m_vector);
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_call_summary<T *, va_gc> *const &)
+{
+ gcc_unreachable ();
+}
+
+template <typename T>
+void
+gt_pch_nx (fast_call_summary<T *, va_gc> *const &, gt_pointer_operator, void *)
+{
+ gcc_unreachable ();
+}
+
+#endif /* GCC_SYMBOL_SUMMARY_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-clones.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-clones.h
new file mode 100644
index 0000000..3331bc4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-clones.h
@@ -0,0 +1,77 @@
+/* Representation of adjustment made to virtual clones in the symbol table.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SYMTAB_CLONES_H
+#define GCC_SYMTAB_CLONES_H
+
+struct GTY(()) clone_info
+{
+ /* Constructor. */
+ clone_info ()
+ : tree_map (NULL),
+ param_adjustments (NULL)
+ {
+ }
+ /* Constants discovered by IPA-CP, i.e. which parameter should be replaced
+ with what. */
+ vec<ipa_replace_map *, va_gc> *tree_map;
+ /* Parameter modification that IPA-SRA decided to perform. */
+ ipa_param_adjustments *param_adjustments;
+
+ /* Return clone_info, if available. */
+ static clone_info *get (cgraph_node *node);
+
+ /* Return clone_info possibly creating new one. */
+ static clone_info *get_create (cgraph_node *node);
+
+ /* Remove clone_info. */
+ static void remove (cgraph_node *node);
+
+ /* Release all clone_infos. */
+ static void release (void);
+};
+
+/* Return clone_info, if available. */
+inline clone_info *
+clone_info::get (cgraph_node *node)
+{
+ if (!symtab->m_clones)
+ return NULL;
+ return symtab->m_clones->get (node);
+}
+
+
+/* Remove clone_info association for NODE. */
+inline void
+clone_info::remove (cgraph_node *node)
+{
+ symtab->m_clones->remove (node);
+}
+
+/* Free clone info summaries. */
+inline void
+clone_info::release ()
+{
+ if (symtab->m_clones)
+ ggc_delete (symtab->m_clones);
+ symtab->m_clones = NULL;
+}
+
+#endif /* GCC_SYMTAB_CLONES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-thunks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-thunks.h
new file mode 100644
index 0000000..fdb8826
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab-thunks.h
@@ -0,0 +1,173 @@
+/* Representation of thunks inside symbol table.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Jan Hubicka
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SYMTAB_THUNKS_H
+#define GCC_SYMTAB_THUNKS_H
+
+/* This symbol annotation holds information about thunk.
+
+ Thunks are basically wrappers around methods which are introduced in case
+ of multiple inheritance in order to adjust the value of the "this" pointer
+ or of the returned value.
+
+ In the case of this-adjusting thunks, each back-end can override the
+ can_output_mi_thunk/output_mi_thunk target hooks to generate a minimal thunk
+ (with a tail call for instance) directly as assembly. For the default hook
+ or for the case where the can_output_mi_thunk hooks return false, the thunk
+ is gimplified and lowered using the regular machinery. */
+
+struct GTY(()) thunk_info {
+ /* Constructor. */
+ thunk_info ()
+ : fixed_offset (0),
+ virtual_value (0),
+ indirect_offset (0),
+ alias (NULL),
+ this_adjusting (false),
+ virtual_offset_p (false)
+ {
+ }
+ /* Copy constructor. */
+ thunk_info (const thunk_info &t)
+ : fixed_offset (t.fixed_offset),
+ virtual_value (t.virtual_value),
+ indirect_offset (t.indirect_offset),
+ alias (t.alias),
+ this_adjusting (t.this_adjusting),
+ virtual_offset_p (t.virtual_offset_p)
+ {
+ }
+
+ /* Compare for equiality. */
+ bool
+ operator==(const thunk_info &other) const
+ {
+ return fixed_offset == other.fixed_offset
+ && virtual_value == other.virtual_value
+ && indirect_offset == other.indirect_offset
+ && this_adjusting == other.this_adjusting
+ && virtual_offset_p == other.virtual_offset_p;
+ }
+ bool
+ operator!=(const thunk_info &other) const
+ {
+ return !(*this == other);
+ }
+ /* Copy operator. */
+ thunk_info &
+ operator=(const thunk_info &other)
+ {
+ fixed_offset = other.fixed_offset;
+ virtual_value = other.virtual_value;
+ indirect_offset = other.indirect_offset;
+ alias = other.alias;
+ this_adjusting = other.this_adjusting;
+ virtual_offset_p = other.virtual_offset_p;
+ return *this;
+ }
+
+ /* Offset used to adjust "this". */
+ HOST_WIDE_INT fixed_offset;
+
+ /* Offset in the virtual table to get the offset to adjust "this". Valid iff
+ VIRTUAL_OFFSET_P is true. */
+ HOST_WIDE_INT virtual_value;
+
+ /* Offset from "this" to get the offset to adjust "this". Zero means: this
+ offset is to be ignored. */
+ HOST_WIDE_INT indirect_offset;
+
+ /* Thunk target, i.e. the method that this thunk wraps. Depending on the
+ TARGET_USE_LOCAL_THUNK_ALIAS_P macro, this may have to be a new alias. */
+ tree alias;
+
+ /* Nonzero for a "this" adjusting thunk and zero for a result adjusting
+ thunk. */
+ bool this_adjusting;
+
+ /* If true, this thunk is what we call a virtual thunk. In this case:
+ * for this-adjusting thunks, after the FIXED_OFFSET based adjustment is
+ done, add to the result the offset found in the vtable at:
+ vptr + VIRTUAL_VALUE
+ * for result-adjusting thunks, the FIXED_OFFSET adjustment is done after
+ the virtual one. */
+ bool virtual_offset_p;
+
+
+
+ /* Dump thunk_info. */
+ void dump (FILE *);
+
+ /* Stream out thunk_info. */
+ void stream_out (class lto_simple_output_block *);
+
+ /* Stream in trunk_info. */
+ void stream_in (class lto_input_block *);
+
+ hashval_t hash ();
+
+
+
+ /* Return thunk_info, if available. */
+ static thunk_info *get (cgraph_node *node);
+
+ /* Return thunk_info possibly creating new one. */
+ static thunk_info *get_create (cgraph_node *node);
+
+ /* Remove thunk_info. */
+ static void remove (cgraph_node *node);
+
+ /* Add unprocessed thunk. */
+ void register_early (cgraph_node *node);
+
+ /* Attach recorded thunks to cgraph_nodes. */
+ static void process_early_thunks ();
+
+ /* Release all thunk_infos. */
+ static void release (void);
+};
+
+bool expand_thunk (cgraph_node *, bool, bool);
+
+/* Return thunk_info, if available. */
+inline thunk_info *
+thunk_info::get (cgraph_node *node)
+{
+ if (!symtab->m_thunks)
+ return NULL;
+ return symtab->m_thunks->get (node);
+}
+
+/* Remove thunk_info association for NODE. */
+inline void
+thunk_info::remove (cgraph_node *node)
+{
+ symtab->m_thunks->remove (node);
+}
+
+/* Free thunk info summaries. */
+inline void
+thunk_info::release ()
+{
+ if (symtab->m_thunks)
+ ggc_delete (symtab->m_thunks);
+ symtab->m_thunks = NULL;
+}
+#endif /* GCC_SYMTAB_THUNKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab.h
new file mode 100644
index 0000000..c7ccc6d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/symtab.h
@@ -0,0 +1,106 @@
+/* Hash tables.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef LIBCPP_SYMTAB_H
+#define LIBCPP_SYMTAB_H
+
+#include "obstack.h"
+
+#ifndef GTY
+#define GTY(x) /* nothing */
+#endif
+
+/* This is what each hash table entry points to. It may be embedded
+ deeply within another object. */
+typedef struct ht_identifier ht_identifier;
+typedef struct ht_identifier *ht_identifier_ptr;
+struct GTY(()) ht_identifier {
+ /* This GTY markup arranges that the null-terminated identifier would still
+ stream to PCH correctly, if a null byte were to make its way into an
+ identifier somehow. */
+ const unsigned char * GTY((string_length ("1 + %h.len"))) str;
+ unsigned int len;
+ unsigned int hash_value;
+};
+
+#define HT_LEN(NODE) ((NODE)->len)
+#define HT_STR(NODE) ((NODE)->str)
+
+typedef struct ht cpp_hash_table;
+typedef struct ht_identifier *hashnode;
+
+enum ht_lookup_option {HT_NO_INSERT = 0, HT_ALLOC};
+
+/* An identifier hash table for cpplib and the front ends. */
+struct ht
+{
+ /* Identifiers are allocated from here. */
+ struct obstack stack;
+
+ hashnode *entries;
+ /* Call back, allocate a node. */
+ hashnode (*alloc_node) (cpp_hash_table *);
+ /* Call back, allocate something that hangs off a node like a cpp_macro.
+ NULL means use the usual allocator. */
+ void * (*alloc_subobject) (size_t);
+
+ unsigned int nslots; /* Total slots in the entries array. */
+ unsigned int nelements; /* Number of live elements. */
+
+ /* Link to reader, if any. For the benefit of cpplib. */
+ struct cpp_reader *pfile;
+
+ /* Table usage statistics. */
+ unsigned int searches;
+ unsigned int collisions;
+
+ /* Should 'entries' be freed when it is no longer needed? */
+ bool entries_owned;
+};
+
+/* Initialize the hashtable with 2 ^ order entries. */
+extern cpp_hash_table *ht_create (unsigned int order);
+
+/* Frees all memory associated with a hash table. */
+extern void ht_destroy (cpp_hash_table *);
+
+extern hashnode ht_lookup (cpp_hash_table *, const unsigned char *,
+ size_t, enum ht_lookup_option);
+extern hashnode ht_lookup_with_hash (cpp_hash_table *, const unsigned char *,
+ size_t, unsigned int,
+ enum ht_lookup_option);
+#define HT_HASHSTEP(r, c) ((r) * 67 + ((c) - 113));
+#define HT_HASHFINISH(r, len) ((r) + (len))
+
+/* For all nodes in TABLE, make a callback. The callback takes
+ TABLE->PFILE, the node, and a PTR, and the callback sequence stops
+ if the callback returns zero. */
+typedef int (*ht_cb) (struct cpp_reader *, hashnode, const void *);
+extern void ht_forall (cpp_hash_table *, ht_cb, const void *);
+
+/* For all nodes in TABLE, call the callback. If the callback returns
+ a nonzero value, the node is removed from the table. */
+extern void ht_purge (cpp_hash_table *, ht_cb, const void *);
+
+/* Restore the hash table. */
+extern void ht_load (cpp_hash_table *ht, hashnode *entries,
+ unsigned int nslots, unsigned int nelements, bool own);
+
+/* Dump allocation statistics to stderr. */
+extern void ht_dump_statistics (cpp_hash_table *);
+
+#endif /* LIBCPP_SYMTAB_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sync-builtins.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sync-builtins.def
new file mode 100644
index 0000000..254328b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/sync-builtins.def
@@ -0,0 +1,614 @@
+/* This file contains the definitions and documentation for the
+ synchronization builtins used in the GNU compiler.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Before including this file, you should define a macro:
+
+ DEF_SYNC_BUILTIN (ENUM, NAME, TYPE, ATTRS)
+
+ See builtins.def for details. */
+
+/* Synchronization Primitives. The "_N" version is the one that the user
+ is supposed to be using. It's overloaded, and is resolved to one of the
+ "_1" through "_16" versions, plus some extra casts. */
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_N, "__sync_fetch_and_add",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_1, "__sync_fetch_and_add_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_2, "__sync_fetch_and_add_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_4, "__sync_fetch_and_add_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_8, "__sync_fetch_and_add_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_16, "__sync_fetch_and_add_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_N, "__sync_fetch_and_sub",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_1, "__sync_fetch_and_sub_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_2, "__sync_fetch_and_sub_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_4, "__sync_fetch_and_sub_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_8, "__sync_fetch_and_sub_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_16, "__sync_fetch_and_sub_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_N, "__sync_fetch_and_or",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_1, "__sync_fetch_and_or_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_2, "__sync_fetch_and_or_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_4, "__sync_fetch_and_or_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_8, "__sync_fetch_and_or_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_16, "__sync_fetch_and_or_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_N, "__sync_fetch_and_and",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_1, "__sync_fetch_and_and_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_2, "__sync_fetch_and_and_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_4, "__sync_fetch_and_and_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_8, "__sync_fetch_and_and_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_16, "__sync_fetch_and_and_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_N, "__sync_fetch_and_xor",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_1, "__sync_fetch_and_xor_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_2, "__sync_fetch_and_xor_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_4, "__sync_fetch_and_xor_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_8, "__sync_fetch_and_xor_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_16, "__sync_fetch_and_xor_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_N, "__sync_fetch_and_nand",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_1, "__sync_fetch_and_nand_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_2, "__sync_fetch_and_nand_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_4, "__sync_fetch_and_nand_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_8, "__sync_fetch_and_nand_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_16, "__sync_fetch_and_nand_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_N, "__sync_add_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_1, "__sync_add_and_fetch_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_2, "__sync_add_and_fetch_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_4, "__sync_add_and_fetch_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_8, "__sync_add_and_fetch_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_16, "__sync_add_and_fetch_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_N, "__sync_sub_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_1, "__sync_sub_and_fetch_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_2, "__sync_sub_and_fetch_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_4, "__sync_sub_and_fetch_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_8, "__sync_sub_and_fetch_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_16, "__sync_sub_and_fetch_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_N, "__sync_or_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_1, "__sync_or_and_fetch_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_2, "__sync_or_and_fetch_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_4, "__sync_or_and_fetch_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_8, "__sync_or_and_fetch_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_16, "__sync_or_and_fetch_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_N, "__sync_and_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_1, "__sync_and_and_fetch_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_2, "__sync_and_and_fetch_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_4, "__sync_and_and_fetch_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_8, "__sync_and_and_fetch_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_16, "__sync_and_and_fetch_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_N, "__sync_xor_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_1, "__sync_xor_and_fetch_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_2, "__sync_xor_and_fetch_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_4, "__sync_xor_and_fetch_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_8, "__sync_xor_and_fetch_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_16, "__sync_xor_and_fetch_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_N, "__sync_nand_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_1, "__sync_nand_and_fetch_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_2, "__sync_nand_and_fetch_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_4, "__sync_nand_and_fetch_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_8, "__sync_nand_and_fetch_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_16, "__sync_nand_and_fetch_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N,
+ "__sync_bool_compare_and_swap",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1,
+ "__sync_bool_compare_and_swap_1",
+ BT_FN_BOOL_VPTR_I1_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2,
+ "__sync_bool_compare_and_swap_2",
+ BT_FN_BOOL_VPTR_I2_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4,
+ "__sync_bool_compare_and_swap_4",
+ BT_FN_BOOL_VPTR_I4_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8,
+ "__sync_bool_compare_and_swap_8",
+ BT_FN_BOOL_VPTR_I8_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16,
+ "__sync_bool_compare_and_swap_16",
+ BT_FN_BOOL_VPTR_I16_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N,
+ "__sync_val_compare_and_swap",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1,
+ "__sync_val_compare_and_swap_1",
+ BT_FN_I1_VPTR_I1_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2,
+ "__sync_val_compare_and_swap_2",
+ BT_FN_I2_VPTR_I2_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4,
+ "__sync_val_compare_and_swap_4",
+ BT_FN_I4_VPTR_I4_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8,
+ "__sync_val_compare_and_swap_8",
+ BT_FN_I8_VPTR_I8_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16,
+ "__sync_val_compare_and_swap_16",
+ BT_FN_I16_VPTR_I16_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_N,
+ "__sync_lock_test_and_set",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_1,
+ "__sync_lock_test_and_set_1",
+ BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_2,
+ "__sync_lock_test_and_set_2",
+ BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_4,
+ "__sync_lock_test_and_set_4",
+ BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_8,
+ "__sync_lock_test_and_set_8",
+ BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_16,
+ "__sync_lock_test_and_set_16",
+ BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_N, "__sync_lock_release",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_1, "__sync_lock_release_1",
+ BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_2, "__sync_lock_release_2",
+ BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_4, "__sync_lock_release_4",
+ BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_8, "__sync_lock_release_8",
+ BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_16, "__sync_lock_release_16",
+ BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SYNCHRONIZE, "__sync_synchronize",
+ BT_FN_VOID, ATTR_NOTHROWCALL_LEAF_LIST)
+
+/* __sync* builtins for the C++ memory model. */
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_TEST_AND_SET, "__atomic_test_and_set",
+ BT_FN_BOOL_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_CLEAR, "__atomic_clear", BT_FN_VOID_VPTR_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE,
+ "__atomic_exchange",
+ BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_N,
+ "__atomic_exchange_n",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_1,
+ "__atomic_exchange_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_2,
+ "__atomic_exchange_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_4,
+ "__atomic_exchange_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_8,
+ "__atomic_exchange_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_16,
+ "__atomic_exchange_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD,
+ "__atomic_load",
+ BT_FN_VOID_SIZE_CONST_VPTR_PTR_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_N,
+ "__atomic_load_n",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_1,
+ "__atomic_load_1",
+ BT_FN_I1_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_2,
+ "__atomic_load_2",
+ BT_FN_I2_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_4,
+ "__atomic_load_4",
+ BT_FN_I4_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_8,
+ "__atomic_load_8",
+ BT_FN_I8_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_16,
+ "__atomic_load_16",
+ BT_FN_I16_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE,
+ "__atomic_compare_exchange",
+ BT_FN_BOOL_SIZE_VPTR_PTR_PTR_INT_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N,
+ "__atomic_compare_exchange_n",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1,
+ "__atomic_compare_exchange_1",
+ BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2,
+ "__atomic_compare_exchange_2",
+ BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4,
+ "__atomic_compare_exchange_4",
+ BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8,
+ "__atomic_compare_exchange_8",
+ BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16,
+ "__atomic_compare_exchange_16",
+ BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT,
+ ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE,
+ "__atomic_store",
+ BT_FN_VOID_SIZE_VPTR_PTR_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_N,
+ "__atomic_store_n",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_1,
+ "__atomic_store_1",
+ BT_FN_VOID_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_2,
+ "__atomic_store_2",
+ BT_FN_VOID_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_4,
+ "__atomic_store_4",
+ BT_FN_VOID_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_8,
+ "__atomic_store_8",
+ BT_FN_VOID_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_16,
+ "__atomic_store_16",
+ BT_FN_VOID_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_N,
+ "__atomic_add_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_1,
+ "__atomic_add_fetch_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_2,
+ "__atomic_add_fetch_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_4,
+ "__atomic_add_fetch_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_8,
+ "__atomic_add_fetch_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_16,
+ "__atomic_add_fetch_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_N,
+ "__atomic_sub_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_1,
+ "__atomic_sub_fetch_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_2,
+ "__atomic_sub_fetch_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_4,
+ "__atomic_sub_fetch_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_8,
+ "__atomic_sub_fetch_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_16,
+ "__atomic_sub_fetch_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_N,
+ "__atomic_and_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_1,
+ "__atomic_and_fetch_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_2,
+ "__atomic_and_fetch_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_4,
+ "__atomic_and_fetch_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_8,
+ "__atomic_and_fetch_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_16,
+ "__atomic_and_fetch_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_N,
+ "__atomic_nand_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_1,
+ "__atomic_nand_fetch_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_2,
+ "__atomic_nand_fetch_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_4,
+ "__atomic_nand_fetch_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_8,
+ "__atomic_nand_fetch_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_16,
+ "__atomic_nand_fetch_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_N,
+ "__atomic_xor_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_1,
+ "__atomic_xor_fetch_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_2,
+ "__atomic_xor_fetch_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_4,
+ "__atomic_xor_fetch_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_8,
+ "__atomic_xor_fetch_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_16,
+ "__atomic_xor_fetch_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_N,
+ "__atomic_or_fetch",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_1,
+ "__atomic_or_fetch_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_2,
+ "__atomic_or_fetch_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_4,
+ "__atomic_or_fetch_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_8,
+ "__atomic_or_fetch_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_16,
+ "__atomic_or_fetch_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_N,
+ "__atomic_fetch_add",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_1,
+ "__atomic_fetch_add_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_2,
+ "__atomic_fetch_add_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_4,
+ "__atomic_fetch_add_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_8,
+ "__atomic_fetch_add_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_16,
+ "__atomic_fetch_add_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_N,
+ "__atomic_fetch_sub",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_1,
+ "__atomic_fetch_sub_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_2,
+ "__atomic_fetch_sub_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_4,
+ "__atomic_fetch_sub_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_8,
+ "__atomic_fetch_sub_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_16,
+ "__atomic_fetch_sub_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_N,
+ "__atomic_fetch_and",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_1,
+ "__atomic_fetch_and_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_2,
+ "__atomic_fetch_and_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_4,
+ "__atomic_fetch_and_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_8,
+ "__atomic_fetch_and_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_16,
+ "__atomic_fetch_and_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_N,
+ "__atomic_fetch_nand",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_1,
+ "__atomic_fetch_nand_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_2,
+ "__atomic_fetch_nand_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_4,
+ "__atomic_fetch_nand_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_8,
+ "__atomic_fetch_nand_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_16,
+ "__atomic_fetch_nand_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_N,
+ "__atomic_fetch_xor",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_1,
+ "__atomic_fetch_xor_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_2,
+ "__atomic_fetch_xor_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_4,
+ "__atomic_fetch_xor_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_8,
+ "__atomic_fetch_xor_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_16,
+ "__atomic_fetch_xor_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_N,
+ "__atomic_fetch_or",
+ BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_1,
+ "__atomic_fetch_or_1",
+ BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_2,
+ "__atomic_fetch_or_2",
+ BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_4,
+ "__atomic_fetch_or_4",
+ BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_8,
+ "__atomic_fetch_or_8",
+ BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_16,
+ "__atomic_fetch_or_16",
+ BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE,
+ "__atomic_always_lock_free",
+ BT_FN_BOOL_SIZE_CONST_VPTR, ATTR_CONST_NOTHROW_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_IS_LOCK_FREE,
+ "__atomic_is_lock_free",
+ BT_FN_BOOL_SIZE_CONST_VPTR, ATTR_CONST_NOTHROW_LEAF_LIST)
+
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_THREAD_FENCE,
+ "__atomic_thread_fence",
+ BT_FN_VOID_INT, ATTR_NOTHROW_LEAF_LIST)
+
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SIGNAL_FENCE,
+ "__atomic_signal_fence",
+ BT_FN_VOID_INT, ATTR_NOTHROW_LEAF_LIST)
+
+/* This one is actually a function in libatomic and not expected to be
+ inlined, declared here for convenience of targets generating calls
+ to it. */
+DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FERAISEEXCEPT,
+ "__atomic_feraiseexcept",
+ BT_FN_VOID_INT, ATTR_LEAF_LIST)
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/system.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/system.h
new file mode 100644
index 0000000..cf45db3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/system.h
@@ -0,0 +1,1334 @@
+/* Get common system includes and various definitions and declarations based
+ on autoconf macros.
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_SYSTEM_H
+#define GCC_SYSTEM_H
+
+/* Define this so that inttypes.h defines the PRI?64 macros even
+ when compiling with a C++ compiler. Define it here so in the
+ event inttypes.h gets pulled in by another header it is already
+ defined. */
+#define __STDC_FORMAT_MACROS
+
+/* We must include stdarg.h before stdio.h. */
+#include <stdarg.h>
+
+#ifndef va_copy
+# ifdef __va_copy
+# define va_copy(d,s) __va_copy (d, s)
+# else
+# define va_copy(d,s) ((d) = (s))
+# endif
+#endif
+
+#ifdef HAVE_STDDEF_H
+# include <stddef.h>
+#endif
+
+#include <stdio.h>
+
+/* Define a generic NULL if one hasn't already been defined. */
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* Use the unlocked open routines from libiberty. */
+
+/* Some of these are #define on some systems, e.g. on AIX to redirect
+ the names to 64bit capable functions for LARGE_FILES support. These
+ redefs are pointless here so we can override them. */
+
+#undef fopen
+#undef freopen
+
+#define fopen(PATH, MODE) fopen_unlocked (PATH, MODE)
+#define fdopen(FILDES, MODE) fdopen_unlocked (FILDES, MODE)
+#define freopen(PATH, MODE, STREAM) freopen_unlocked (PATH, MODE, STREAM)
+
+/* The compiler is not a multi-threaded application and therefore we
+ do not have to use the locking functions. In fact, using the locking
+ functions can cause the compiler to be significantly slower under
+ I/O bound conditions (such as -g -O0 on very large source files).
+
+ HAVE_DECL_PUTC_UNLOCKED actually indicates whether or not the stdio
+ code is multi-thread safe by default. If it is set to 0, then do
+ not worry about using the _unlocked functions.
+
+ fputs_unlocked, fwrite_unlocked, and fprintf_unlocked are
+ extensions and need to be prototyped by hand (since we do not
+ define _GNU_SOURCE). */
+
+#if defined HAVE_DECL_PUTC_UNLOCKED && HAVE_DECL_PUTC_UNLOCKED
+
+# ifdef HAVE_PUTC_UNLOCKED
+# undef putc
+# define putc(C, Stream) putc_unlocked (C, Stream)
+# endif
+# ifdef HAVE_PUTCHAR_UNLOCKED
+# undef putchar
+# define putchar(C) putchar_unlocked (C)
+# endif
+# ifdef HAVE_GETC_UNLOCKED
+# undef getc
+# define getc(Stream) getc_unlocked (Stream)
+# endif
+# ifdef HAVE_GETCHAR_UNLOCKED
+# undef getchar
+# define getchar() getchar_unlocked ()
+# endif
+# ifdef HAVE_FPUTC_UNLOCKED
+# undef fputc
+# define fputc(C, Stream) fputc_unlocked (C, Stream)
+# endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+# ifdef HAVE_CLEARERR_UNLOCKED
+# undef clearerr
+# define clearerr(Stream) clearerr_unlocked (Stream)
+# if defined (HAVE_DECL_CLEARERR_UNLOCKED) && !HAVE_DECL_CLEARERR_UNLOCKED
+extern void clearerr_unlocked (FILE *);
+# endif
+# endif
+# ifdef HAVE_FEOF_UNLOCKED
+# undef feof
+# define feof(Stream) feof_unlocked (Stream)
+# if defined (HAVE_DECL_FEOF_UNLOCKED) && !HAVE_DECL_FEOF_UNLOCKED
+extern int feof_unlocked (FILE *);
+# endif
+# endif
+# ifdef HAVE_FILENO_UNLOCKED
+# undef fileno
+# define fileno(Stream) fileno_unlocked (Stream)
+# if defined (HAVE_DECL_FILENO_UNLOCKED) && !HAVE_DECL_FILENO_UNLOCKED
+extern int fileno_unlocked (FILE *);
+# endif
+# endif
+# ifdef HAVE_FFLUSH_UNLOCKED
+# undef fflush
+# define fflush(Stream) fflush_unlocked (Stream)
+# if defined (HAVE_DECL_FFLUSH_UNLOCKED) && !HAVE_DECL_FFLUSH_UNLOCKED
+extern int fflush_unlocked (FILE *);
+# endif
+# endif
+# ifdef HAVE_FGETC_UNLOCKED
+# undef fgetc
+# define fgetc(Stream) fgetc_unlocked (Stream)
+# if defined (HAVE_DECL_FGETC_UNLOCKED) && !HAVE_DECL_FGETC_UNLOCKED
+extern int fgetc_unlocked (FILE *);
+# endif
+# endif
+# ifdef HAVE_FGETS_UNLOCKED
+# undef fgets
+# define fgets(S, n, Stream) fgets_unlocked (S, n, Stream)
+# if defined (HAVE_DECL_FGETS_UNLOCKED) && !HAVE_DECL_FGETS_UNLOCKED
+extern char *fgets_unlocked (char *, int, FILE *);
+# endif
+# endif
+# ifdef HAVE_FPUTS_UNLOCKED
+# undef fputs
+# define fputs(String, Stream) fputs_unlocked (String, Stream)
+# if defined (HAVE_DECL_FPUTS_UNLOCKED) && !HAVE_DECL_FPUTS_UNLOCKED
+extern int fputs_unlocked (const char *, FILE *);
+# endif
+# endif
+# ifdef HAVE_FERROR_UNLOCKED
+# undef ferror
+# define ferror(Stream) ferror_unlocked (Stream)
+# if defined (HAVE_DECL_FERROR_UNLOCKED) && !HAVE_DECL_FERROR_UNLOCKED
+extern int ferror_unlocked (FILE *);
+# endif
+# endif
+# ifdef HAVE_FREAD_UNLOCKED
+# undef fread
+# define fread(Ptr, Size, N, Stream) fread_unlocked (Ptr, Size, N, Stream)
+# if defined (HAVE_DECL_FREAD_UNLOCKED) && !HAVE_DECL_FREAD_UNLOCKED
+extern size_t fread_unlocked (void *, size_t, size_t, FILE *);
+# endif
+# endif
+# ifdef HAVE_FWRITE_UNLOCKED
+# undef fwrite
+# define fwrite(Ptr, Size, N, Stream) fwrite_unlocked (Ptr, Size, N, Stream)
+# if defined (HAVE_DECL_FWRITE_UNLOCKED) && !HAVE_DECL_FWRITE_UNLOCKED
+extern size_t fwrite_unlocked (const void *, size_t, size_t, FILE *);
+# endif
+# endif
+# ifdef HAVE_FPRINTF_UNLOCKED
+# undef fprintf
+/* We can't use a function-like macro here because we don't know if
+ we have varargs macros. */
+# define fprintf fprintf_unlocked
+# if defined (HAVE_DECL_FPRINTF_UNLOCKED) && !HAVE_DECL_FPRINTF_UNLOCKED
+extern int fprintf_unlocked (FILE *, const char *, ...);
+# endif
+# endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/* ??? Glibc's fwrite/fread_unlocked macros cause
+ "warning: signed and unsigned type in conditional expression". */
+#undef fread_unlocked
+#undef fwrite_unlocked
+
+/* Include <string> before "safe-ctype.h" to avoid GCC poisoning
+ the ctype macros through safe-ctype.h */
+
+#ifdef __cplusplus
+#ifdef INCLUDE_STRING
+# include <string>
+#endif
+#endif
+
+/* There are an extraordinary number of issues with <ctype.h>.
+ The last straw is that it varies with the locale. Use libiberty's
+ replacement instead. */
+#include "safe-ctype.h"
+
+#include <sys/types.h>
+
+#include <errno.h>
+
+#if !defined (errno) && defined (HAVE_DECL_ERRNO) && !HAVE_DECL_ERRNO
+extern int errno;
+#endif
+
+#ifdef __cplusplus
+#if defined (INCLUDE_ALGORITHM) || !defined (HAVE_SWAP_IN_UTILITY)
+# include <algorithm>
+#endif
+#ifdef INCLUDE_LIST
+# include <list>
+#endif
+#ifdef INCLUDE_MAP
+# include <map>
+#endif
+#ifdef INCLUDE_SET
+# include <set>
+#endif
+#ifdef INCLUDE_VECTOR
+# include <vector>
+#endif
+#ifdef INCLUDE_ARRAY
+# include <array>
+#endif
+#ifdef INCLUDE_FUNCTIONAL
+# include <functional>
+#endif
+# include <cstring>
+# include <initializer_list>
+# include <new>
+# include <utility>
+# include <type_traits>
+#endif
+
+/* Some of glibc's string inlines cause warnings. Plus we'd rather
+ rely on (and therefore test) GCC's string builtins. */
+#define __NO_STRING_INLINES
+
+#ifdef STRING_WITH_STRINGS
+# include <string.h>
+# include <strings.h>
+#else
+# ifdef HAVE_STRING_H
+# include <string.h>
+# else
+# ifdef HAVE_STRINGS_H
+# include <strings.h>
+# endif
+# endif
+#endif
+
+#ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+#endif
+
+/* When compiling C++ we need to include <cstdlib> as well as <stdlib.h> so
+ that it is processed before we poison "malloc"; otherwise, if a source
+ file uses a standard library header that includes <cstdlib>, we will get
+ an error about 'using std::malloc'. */
+#ifdef __cplusplus
+#include <cstdlib>
+#endif
+
+/* Undef vec_free from AIX stdlib.h header which conflicts with vec.h. */
+#undef vec_free
+
+/* If we don't have an overriding definition, set SUCCESS_EXIT_CODE and
+ FATAL_EXIT_CODE to EXIT_SUCCESS and EXIT_FAILURE respectively,
+ or 0 and 1 if those macros are not defined. */
+#ifndef SUCCESS_EXIT_CODE
+# ifdef EXIT_SUCCESS
+# define SUCCESS_EXIT_CODE EXIT_SUCCESS
+# else
+# define SUCCESS_EXIT_CODE 0
+# endif
+#endif
+
+#ifndef FATAL_EXIT_CODE
+# ifdef EXIT_FAILURE
+# define FATAL_EXIT_CODE EXIT_FAILURE
+# else
+# define FATAL_EXIT_CODE 1
+# endif
+#endif
+
+#define ICE_EXIT_CODE 4
+
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+/* We use these identifiers later and they appear in some vendor param.h's. */
+# undef PREFETCH
+# undef m_slot
+#endif
+
+#if HAVE_LIMITS_H
+# include <limits.h>
+#endif
+
+/* A macro to determine whether a VALUE lies inclusively within a
+ certain range without evaluating the VALUE more than once. This
+ macro won't warn if the VALUE is unsigned and the LOWER bound is
+ zero, as it would e.g. with "VALUE >= 0 && ...". Note the LOWER
+ bound *is* evaluated twice, and LOWER must not be greater than
+ UPPER. However the bounds themselves can be either positive or
+ negative. */
+#define IN_RANGE(VALUE, LOWER, UPPER) \
+ ((unsigned HOST_WIDE_INT) (VALUE) - (unsigned HOST_WIDE_INT) (LOWER) \
+ <= (unsigned HOST_WIDE_INT) (UPPER) - (unsigned HOST_WIDE_INT) (LOWER))
+
+/* Infrastructure for defining missing _MAX and _MIN macros. Note that
+ macros defined with these cannot be used in #if. */
+
+/* The extra casts work around common compiler bugs. */
+#define INTTYPE_SIGNED(t) (! ((t) 0 < (t) -1))
+/* The outer cast is needed to work around a bug in Cray C 5.0.3.0.
+ It is necessary at least when t == time_t. */
+#define INTTYPE_MINIMUM(t) ((t) (INTTYPE_SIGNED (t) \
+ ? (t) 1 << (sizeof (t) * CHAR_BIT - 1) : (t) 0))
+#define INTTYPE_MAXIMUM(t) ((t) (~ (t) 0 - INTTYPE_MINIMUM (t)))
+
+/* Use that infrastructure to provide a few constants. */
+#ifndef UCHAR_MAX
+# define UCHAR_MAX INTTYPE_MAXIMUM (unsigned char)
+#endif
+
+#ifdef TIME_WITH_SYS_TIME
+# include <sys/time.h>
+# include <time.h>
+#else
+# if HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# ifdef HAVE_TIME_H
+# include <time.h>
+# endif
+# endif
+#endif
+
+#ifdef HAVE_FCNTL_H
+# include <fcntl.h>
+#else
+# ifdef HAVE_SYS_FILE_H
+# include <sys/file.h>
+# endif
+#endif
+
+#ifdef HAVE_SYS_LOCKING_H
+# include <sys/locking.h>
+#endif
+
+#ifndef SEEK_SET
+# define SEEK_SET 0
+# define SEEK_CUR 1
+# define SEEK_END 2
+#endif
+#ifndef F_OK
+# define F_OK 0
+# define X_OK 1
+# define W_OK 2
+# define R_OK 4
+#endif
+#ifndef O_RDONLY
+# define O_RDONLY 0
+#endif
+#ifndef O_WRONLY
+# define O_WRONLY 1
+#endif
+#ifndef O_BINARY
+# define O_BINARY 0
+#endif
+
+/* Some systems define these in, e.g., param.h. We undefine these names
+ here to avoid the warnings. We prefer to use our definitions since we
+ know they are correct. */
+
+#undef MIN
+#undef MAX
+#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
+#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
+
+/* Returns the least number N such that N * Y >= X. */
+#define CEIL(x,y) (((x) + (y) - 1) / (y))
+
+/* This macro rounds x up to the y boundary. */
+#define ROUND_UP(x,y) (((x) + (y) - 1) & ~((y) - 1))
+
+/* This macro rounds x down to the y boundary. */
+#define ROUND_DOWN(x,y) ((x) & ~((y) - 1))
+
+#ifdef HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#ifndef WIFSIGNALED
+#define WIFSIGNALED(S) (((S) & 0xff) != 0 && ((S) & 0xff) != 0x7f)
+#endif
+#ifndef WTERMSIG
+#define WTERMSIG(S) ((S) & 0x7f)
+#endif
+#ifndef WIFEXITED
+#define WIFEXITED(S) (((S) & 0xff) == 0)
+#endif
+#ifndef WEXITSTATUS
+#define WEXITSTATUS(S) (((S) & 0xff00) >> 8)
+#endif
+#ifndef WSTOPSIG
+#define WSTOPSIG WEXITSTATUS
+#endif
+#ifndef WCOREDUMP
+#define WCOREDUMP(S) ((S) & WCOREFLG)
+#endif
+#ifndef WCOREFLG
+#define WCOREFLG 0200
+#endif
+
+#include <signal.h>
+#if !defined (SIGCHLD) && defined (SIGCLD)
+# define SIGCHLD SIGCLD
+#endif
+
+#ifdef HAVE_SYS_MMAN_H
+# include <sys/mman.h>
+#endif
+
+#ifndef MAP_FAILED
+# define MAP_FAILED ((void *)-1)
+#endif
+
+#if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
+# define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifdef HAVE_SYS_RESOURCE_H
+# include <sys/resource.h>
+#endif
+
+#ifdef HAVE_SYS_TIMES_H
+# include <sys/times.h>
+#endif
+
+/* The HAVE_DECL_* macros are three-state, undefined, 0 or 1. If they
+ are defined to 0 then we must provide the relevant declaration
+ here. These checks will be in the undefined state while configure
+ is running so be careful to test "defined (HAVE_DECL_*)". */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined (HAVE_DECL_ATOF) && !HAVE_DECL_ATOF
+extern double atof (const char *);
+#endif
+
+#if defined (HAVE_DECL_ATOL) && !HAVE_DECL_ATOL
+extern long atol (const char *);
+#endif
+
+#if defined (HAVE_DECL_FREE) && !HAVE_DECL_FREE
+extern void free (void *);
+#endif
+
+#if defined (HAVE_DECL_GETCWD) && !HAVE_DECL_GETCWD
+extern char *getcwd (char *, size_t);
+#endif
+
+#if defined (HAVE_DECL_GETENV) && !HAVE_DECL_GETENV
+extern char *getenv (const char *);
+#endif
+
+#if defined (HAVE_DECL_GETOPT) && !HAVE_DECL_GETOPT
+extern int getopt (int, char * const *, const char *);
+#endif
+
+#if defined (HAVE_DECL_GETPAGESIZE) && !HAVE_DECL_GETPAGESIZE
+extern int getpagesize (void);
+#endif
+
+#if defined (HAVE_DECL_GETWD) && !HAVE_DECL_GETWD
+extern char *getwd (char *);
+#endif
+
+#if defined (HAVE_DECL_SBRK) && !HAVE_DECL_SBRK
+extern void *sbrk (int);
+#endif
+
+#if defined (HAVE_DECL_SETENV) && !HAVE_DECL_SETENV
+int setenv(const char *, const char *, int);
+#endif
+
+#if defined (HAVE_DECL_STRSTR) && !HAVE_DECL_STRSTR
+extern char *strstr (const char *, const char *);
+#endif
+
+#if defined (HAVE_DECL_STPCPY) && !HAVE_DECL_STPCPY
+extern char *stpcpy (char *, const char *);
+#endif
+
+#if defined (HAVE_DECL_UNSETENV) && !HAVE_DECL_UNSETENV
+int unsetenv(const char *);
+#endif
+
+#if defined (HAVE_DECL_MALLOC) && !HAVE_DECL_MALLOC
+extern void *malloc (size_t);
+#endif
+
+#if defined (HAVE_DECL_CALLOC) && !HAVE_DECL_CALLOC
+extern void *calloc (size_t, size_t);
+#endif
+
+#if defined (HAVE_DECL_REALLOC) && !HAVE_DECL_REALLOC
+extern void *realloc (void *, size_t);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+#ifndef SIZE_MAX
+# define SIZE_MAX INTTYPE_MAXIMUM (size_t)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* If the system doesn't provide strsignal, we get it defined in
+ libiberty but no declaration is supplied. */
+#if !defined (HAVE_STRSIGNAL) \
+ || (defined (HAVE_DECL_STRSIGNAL) && !HAVE_DECL_STRSIGNAL)
+# ifndef strsignal
+extern const char *strsignal (int);
+# endif
+#endif
+
+#ifdef HAVE_GETRLIMIT
+# if defined (HAVE_DECL_GETRLIMIT) && !HAVE_DECL_GETRLIMIT
+# ifndef getrlimit
+struct rlimit;
+extern int getrlimit (int, struct rlimit *);
+# endif
+# endif
+#endif
+
+#ifdef HAVE_SETRLIMIT
+# if defined (HAVE_DECL_SETRLIMIT) && !HAVE_DECL_SETRLIMIT
+# ifndef setrlimit
+struct rlimit;
+extern int setrlimit (int, const struct rlimit *);
+# endif
+# endif
+#endif
+
+#if defined (HAVE_DECL_ABORT) && !HAVE_DECL_ABORT
+extern void abort (void);
+#endif
+
+#if defined (HAVE_DECL_SNPRINTF) && !HAVE_DECL_SNPRINTF
+extern int snprintf (char *, size_t, const char *, ...);
+#endif
+
+#if defined (HAVE_DECL_VSNPRINTF) && !HAVE_DECL_VSNPRINTF
+extern int vsnprintf (char *, size_t, const char *, va_list);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+/* 1 if we have C99 designated initializers. */
+#if !defined(HAVE_DESIGNATED_INITIALIZERS)
+#ifdef __cplusplus
+#define HAVE_DESIGNATED_INITIALIZERS 0
+#else
+#define HAVE_DESIGNATED_INITIALIZERS \
+ ((GCC_VERSION >= 2007) || (__STDC_VERSION__ >= 199901L))
+#endif
+#endif
+
+#if !defined(HAVE_DESIGNATED_UNION_INITIALIZERS)
+#ifdef __cplusplus
+#define HAVE_DESIGNATED_UNION_INITIALIZERS (GCC_VERSION >= 4007)
+#else
+#define HAVE_DESIGNATED_UNION_INITIALIZERS \
+ ((GCC_VERSION >= 2007) || (__STDC_VERSION__ >= 199901L))
+#endif
+#endif
+
+#if HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+
+/* Test if something is a normal file. */
+#ifndef S_ISREG
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#endif
+
+/* Test if something is a directory. */
+#ifndef S_ISDIR
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#endif
+
+/* Test if something is a character special file. */
+#ifndef S_ISCHR
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#endif
+
+/* Test if something is a block special file. */
+#ifndef S_ISBLK
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#endif
+
+/* Test if something is a socket. */
+#ifndef S_ISSOCK
+# ifdef S_IFSOCK
+# define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+# else
+# define S_ISSOCK(m) 0
+# endif
+#endif
+
+/* Test if something is a FIFO. */
+#ifndef S_ISFIFO
+# ifdef S_IFIFO
+# define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+# else
+# define S_ISFIFO(m) 0
+# endif
+#endif
+
+/* Define well known filenos if the system does not define them. */
+#ifndef STDIN_FILENO
+# define STDIN_FILENO 0
+#endif
+#ifndef STDOUT_FILENO
+# define STDOUT_FILENO 1
+#endif
+#ifndef STDERR_FILENO
+# define STDERR_FILENO 2
+#endif
+
+/* Some systems have mkdir that takes a single argument. */
+#ifdef MKDIR_TAKES_ONE_ARG
+# define mkdir(a,b) mkdir (a)
+#endif
+
+#ifndef HAVE_KILL
+# define kill(p,s) raise (s)
+#endif
+
+/* Provide a way to print an address via printf. */
+#ifndef HOST_PTR_PRINTF
+#define HOST_PTR_PRINTF "%p"
+#endif /* ! HOST_PTR_PRINTF */
+
+/* By default, colon separates directories in a path. */
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR ':'
+#endif
+
+/* Filename handling macros. */
+#include "filenames.h"
+
+/* These should be phased out in favor of IS_DIR_SEPARATOR, where possible. */
+#ifndef DIR_SEPARATOR
+# define DIR_SEPARATOR '/'
+# ifdef HAVE_DOS_BASED_FILE_SYSTEM
+# define DIR_SEPARATOR_2 '\\'
+# endif
+#endif
+
+#if defined (ENABLE_PLUGIN) && defined (HAVE_DLFCN_H)
+/* If plugin support is enabled, we could use libdl. */
+#include <dlfcn.h>
+#endif
+
+/* Do not introduce a gmp.h dependency on the build system. */
+#ifndef GENERATOR_FILE
+#include <gmp.h>
+#endif
+
+/* Get libiberty declarations. */
+#include "libiberty.h"
+
+#undef FFS /* Some systems predefine this symbol; don't let it interfere. */
+#undef FLOAT /* Likewise. */
+#undef ABS /* Likewise. */
+#undef PC /* Likewise. */
+
+/* Provide a default for the HOST_BIT_BUCKET.
+ This suffices for POSIX-like hosts. */
+
+#ifndef HOST_BIT_BUCKET
+#define HOST_BIT_BUCKET "/dev/null"
+#endif
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *) 0)->MEMBER)
+#endif
+
+/* Various error reporting routines want to use __FUNCTION__. */
+#if (GCC_VERSION < 2007)
+#ifndef __FUNCTION__
+#define __FUNCTION__ "?"
+#endif /* ! __FUNCTION__ */
+#endif
+
+/* __builtin_expect(A, B) evaluates to A, but notifies the compiler that
+ the most likely value of A is B. This feature was added at some point
+ between 2.95 and 3.0. Let's use 3.0 as the lower bound for now. */
+#if (GCC_VERSION < 3000)
+#define __builtin_expect(a, b) (a)
+#endif
+
+#define LIKELY(x) (__builtin_expect ((x), 1))
+#define UNLIKELY(x) (__builtin_expect ((x), 0))
+
+/* Some of the headers included by <memory> can use "abort" within a
+ namespace, e.g. "_VSTD::abort();", which fails after we use the
+ preprocessor to redefine "abort" as "fancy_abort" below. */
+
+#ifdef INCLUDE_MEMORY
+# include <memory>
+#endif
+
+#ifdef INCLUDE_MUTEX
+# include <mutex>
+#endif
+
+#ifdef INCLUDE_SSTREAM
+# include <sstream>
+#endif
+
+#ifdef INCLUDE_MALLOC_H
+#if defined(HAVE_MALLINFO) || defined(HAVE_MALLINFO2)
+#include <malloc.h>
+#endif
+#endif
+
+#ifdef INCLUDE_PTHREAD_H
+#include <pthread.h>
+#endif
+
+#ifdef INCLUDE_ISL
+#ifdef HAVE_isl
+#include <isl/options.h>
+#include <isl/ctx.h>
+#include <isl/val.h>
+#include <isl/set.h>
+#include <isl/union_set.h>
+#include <isl/map.h>
+#include <isl/union_map.h>
+#include <isl/aff.h>
+#include <isl/constraint.h>
+#include <isl/flow.h>
+#include <isl/ilp.h>
+#include <isl/schedule.h>
+#include <isl/ast_build.h>
+#include <isl/schedule_node.h>
+#include <isl/id.h>
+#include <isl/space.h>
+#endif
+#endif
+
+/* Redefine 'abort' to report an internal error w/o coredump, and
+ reporting the location of the error in the source file.
+ Instead of directly calling 'abort' or 'fancy_abort', GCC code
+ should normally call 'internal_error' with a specific message. */
+extern void fancy_abort (const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+#define abort() fancy_abort (__FILE__, __LINE__, __FUNCTION__)
+
+/* Use gcc_assert(EXPR) to test invariants. */
+#if ENABLE_ASSERT_CHECKING
+#define gcc_assert(EXPR) \
+ ((void)(!(EXPR) ? fancy_abort (__FILE__, __LINE__, __FUNCTION__), 0 : 0))
+#elif (GCC_VERSION >= 4005)
+#define gcc_assert(EXPR) \
+ ((void)(UNLIKELY (!(EXPR)) ? __builtin_unreachable (), 0 : 0))
+#else
+/* Include EXPR, so that unused variable warnings do not occur. */
+#define gcc_assert(EXPR) ((void)(0 && (EXPR)))
+#endif
+
+#if CHECKING_P
+#define gcc_checking_assert(EXPR) gcc_assert (EXPR)
+#else
+/* N.B.: in release build EXPR is not evaluated. */
+#define gcc_checking_assert(EXPR) ((void)(0 && (EXPR)))
+#endif
+
+#if GCC_VERSION >= 4000
+#define ALWAYS_INLINE inline __attribute__ ((always_inline))
+#else
+#define ALWAYS_INLINE inline
+#endif
+
+#if GCC_VERSION >= 3004
+#define WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+/* Use gcc_unreachable() to mark unreachable locations (like an
+ unreachable default case of a switch. Do not use gcc_assert(0). */
+#if (GCC_VERSION >= 4005) && !ENABLE_ASSERT_CHECKING
+#define gcc_unreachable() __builtin_unreachable ()
+#else
+#define gcc_unreachable() (fancy_abort (__FILE__, __LINE__, __FUNCTION__))
+#endif
+
+#if GCC_VERSION >= 7000 && defined(__has_attribute)
+# if __has_attribute(fallthrough)
+# define gcc_fallthrough() __attribute__((fallthrough))
+# else
+# define gcc_fallthrough()
+# endif
+#else
+# define gcc_fallthrough()
+#endif
+
+#if GCC_VERSION >= 3001
+#define STATIC_CONSTANT_P(X) (__builtin_constant_p (X) && (X))
+#else
+#define STATIC_CONSTANT_P(X) (false && (X))
+#endif
+
+#ifdef __cplusplus
+#define STATIC_ASSERT(X) \
+ static_assert ((X), #X)
+#else
+#define STATIC_ASSERT(X) \
+ typedef int assertion1[(X) ? 1 : -1] ATTRIBUTE_UNUSED
+#endif
+
+/* Provide a fake boolean type. We make no attempt to use the
+ C99 _Bool, as it may not be available in the bootstrap compiler,
+ and even if it is, it is liable to be buggy.
+ This must be after all inclusion of system headers, as some of
+ them will mess us up. */
+
+#undef TRUE
+#undef FALSE
+
+#ifdef __cplusplus
+ /* Obsolete. */
+# define TRUE true
+# define FALSE false
+#else /* !__cplusplus */
+# undef bool
+# undef true
+# undef false
+
+# define bool unsigned char
+# define true 1
+# define false 0
+
+ /* Obsolete. */
+# define TRUE true
+# define FALSE false
+#endif /* !__cplusplus */
+
+/* Some compilers do not allow the use of unsigned char in bitfields. */
+#define BOOL_BITFIELD unsigned int
+
+/* GCC older than 4.4 have broken C++ value initialization handling, see
+ PR11309, PR30111, PR33916, PR82939 and PR84405 for more details. */
+#if GCC_VERSION > 0 && GCC_VERSION < 4004 && !defined(__clang__)
+# define BROKEN_VALUE_INITIALIZATION
+#endif
+
+/* As the last action in this file, we poison the identifiers that
+ shouldn't be used. Note, luckily gcc-3.0's token-based integrated
+ preprocessor won't trip on poisoned identifiers that arrive from
+ the expansion of macros. E.g. #define strrchr rindex, won't error
+ if rindex is poisoned after this directive is issued and later on
+ strrchr is called.
+
+ Note: We define bypass macros for the few cases where we really
+ want to use the libc memory allocation routines. Otherwise we
+ insist you use the "x" versions from libiberty. */
+
+#define really_call_malloc malloc
+#define really_call_calloc calloc
+#define really_call_realloc realloc
+
+#if defined(FLEX_SCANNER) || defined(YYBISON) || defined(YYBYACC)
+/* Flex and bison use malloc and realloc. Yuk. Note that this means
+ really_call_* cannot be used in a .l or .y file. */
+#define malloc xmalloc
+#define realloc xrealloc
+#endif
+
+#if (GCC_VERSION >= 3000)
+
+/* Note autoconf checks for prototype declarations and includes
+ system.h while doing so. Only poison these tokens if actually
+ compiling gcc, so that the autoconf declaration tests for malloc
+ etc don't spuriously fail. */
+#ifdef IN_GCC
+
+#undef calloc
+#undef strdup
+#undef strndup
+ #pragma GCC poison calloc strdup strndup
+
+#if !defined(FLEX_SCANNER) && !defined(YYBISON)
+#undef malloc
+#undef realloc
+ #pragma GCC poison malloc realloc
+#endif
+
+/* The %m format should be used when GCC's main diagnostic functions
+ supporting %m are available, and xstrerror from libiberty
+ otherwise. */
+#undef strerror
+ #pragma GCC poison strerror
+
+/* loc_t is defined on some systems and too inviting for some
+ programmers to avoid. */
+#undef loc_t
+ #pragma GCC poison loc_t
+
+/* Old target macros that have moved to the target hooks structure. */
+ #pragma GCC poison ASM_OPEN_PAREN ASM_CLOSE_PAREN \
+ FUNCTION_PROLOGUE FUNCTION_EPILOGUE \
+ FUNCTION_END_PROLOGUE FUNCTION_BEGIN_EPILOGUE \
+ DECL_MACHINE_ATTRIBUTES COMP_TYPE_ATTRIBUTES INSERT_ATTRIBUTES \
+ VALID_MACHINE_DECL_ATTRIBUTE VALID_MACHINE_TYPE_ATTRIBUTE \
+ SET_DEFAULT_TYPE_ATTRIBUTES SET_DEFAULT_DECL_ATTRIBUTES \
+ MERGE_MACHINE_TYPE_ATTRIBUTES MERGE_MACHINE_DECL_ATTRIBUTES \
+ MD_INIT_BUILTINS MD_EXPAND_BUILTIN ASM_OUTPUT_CONSTRUCTOR \
+ ASM_OUTPUT_DESTRUCTOR SIGNED_CHAR_SPEC MAX_CHAR_TYPE_SIZE \
+ WCHAR_UNSIGNED UNIQUE_SECTION SELECT_SECTION SELECT_RTX_SECTION \
+ ENCODE_SECTION_INFO STRIP_NAME_ENCODING ASM_GLOBALIZE_LABEL \
+ ASM_OUTPUT_MI_THUNK CONST_COSTS RTX_COSTS DEFAULT_RTX_COSTS \
+ ADDRESS_COST MACHINE_DEPENDENT_REORG ASM_FILE_START ASM_FILE_END \
+ ASM_SIMPLIFY_DWARF_ADDR INIT_TARGET_OPTABS INIT_SUBTARGET_OPTABS \
+ INIT_GOFAST_OPTABS MULSI3_LIBCALL MULDI3_LIBCALL DIVSI3_LIBCALL \
+ DIVDI3_LIBCALL UDIVSI3_LIBCALL UDIVDI3_LIBCALL MODSI3_LIBCALL \
+ MODDI3_LIBCALL UMODSI3_LIBCALL UMODDI3_LIBCALL BUILD_VA_LIST_TYPE \
+ PRETEND_OUTGOING_VARARGS_NAMED STRUCT_VALUE_INCOMING_REGNUM \
+ ASM_OUTPUT_SECTION_NAME PROMOTE_FUNCTION_ARGS PROMOTE_FUNCTION_MODE \
+ STRUCT_VALUE_INCOMING STRICT_ARGUMENT_NAMING \
+ PROMOTE_FUNCTION_RETURN PROMOTE_PROTOTYPES STRUCT_VALUE_REGNUM \
+ SETUP_INCOMING_VARARGS EXPAND_BUILTIN_SAVEREGS \
+ DEFAULT_SHORT_ENUMS SPLIT_COMPLEX_ARGS MD_ASM_CLOBBERS \
+ HANDLE_PRAGMA_REDEFINE_EXTNAME HANDLE_PRAGMA_EXTERN_PREFIX \
+ MUST_PASS_IN_STACK FUNCTION_ARG_PASS_BY_REFERENCE \
+ VECTOR_MODE_SUPPORTED_P TARGET_SUPPORTS_HIDDEN \
+ FUNCTION_ARG_PARTIAL_NREGS ASM_OUTPUT_DWARF_DTPREL \
+ ALLOCATE_INITIAL_VALUE LEGITIMIZE_ADDRESS FRAME_POINTER_REQUIRED \
+ CAN_ELIMINATE TRAMPOLINE_TEMPLATE INITIALIZE_TRAMPOLINE \
+ TRAMPOLINE_ADJUST_ADDRESS STATIC_CHAIN STATIC_CHAIN_INCOMING \
+ RETURN_POPS_ARGS UNITS_PER_SIMD_WORD OVERRIDE_OPTIONS \
+ OPTIMIZATION_OPTIONS CLASS_LIKELY_SPILLED_P \
+ USING_SJLJ_EXCEPTIONS TARGET_UNWIND_INFO \
+ CAN_DEBUG_WITHOUT_FP UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
+ HOT_TEXT_SECTION_NAME LEGITIMATE_CONSTANT_P ALWAYS_STRIP_DOTDOT \
+ OUTPUT_ADDR_CONST_EXTRA SMALL_REGISTER_CLASSES ASM_OUTPUT_IDENT \
+ ASM_BYTE_OP MEMBER_TYPE_FORCES_BLK LIBGCC2_HAS_SF_MODE \
+ LIBGCC2_HAS_DF_MODE LIBGCC2_HAS_XF_MODE LIBGCC2_HAS_TF_MODE \
+ CLEAR_BY_PIECES_P MOVE_BY_PIECES_P SET_BY_PIECES_P \
+ STORE_BY_PIECES_P TARGET_FLT_EVAL_METHOD \
+ HARD_REGNO_CALL_PART_CLOBBERED HARD_REGNO_MODE_OK \
+ MODES_TIEABLE_P FUNCTION_ARG_PADDING SLOW_UNALIGNED_ACCESS \
+ HARD_REGNO_NREGS SECONDARY_MEMORY_NEEDED_MODE \
+ SECONDARY_MEMORY_NEEDED CANNOT_CHANGE_MODE_CLASS \
+ TRULY_NOOP_TRUNCATION FUNCTION_ARG_OFFSET CONSTANT_ALIGNMENT \
+ STARTING_FRAME_OFFSET
+
+/* Target macros only used for code built for the target, that have
+ moved to libgcc-tm.h or have never been present elsewhere. */
+ #pragma GCC poison DECLARE_LIBRARY_RENAMES LIBGCC2_GNU_PREFIX \
+ MD_UNWIND_SUPPORT MD_FROB_UPDATE_CONTEXT ENABLE_EXECUTE_STACK \
+ REG_VALUE_IN_UNWIND_CONTEXT ASSUME_EXTENDED_UNWIND_CONTEXT
+
+/* Other obsolete target macros, or macros that used to be in target
+ headers and were not used, and may be obsolete or may never have
+ been used. */
+ #pragma GCC poison INT_ASM_OP ASM_OUTPUT_EH_REGION_BEG CPP_PREDEFINES \
+ ASM_OUTPUT_EH_REGION_END ASM_OUTPUT_LABELREF_AS_INT SMALL_STACK \
+ DOESNT_NEED_UNWINDER EH_TABLE_LOOKUP OBJC_SELECTORS_WITHOUT_LABELS \
+ OMIT_EH_TABLE EASY_DIV_EXPR IMPLICIT_FIX_EXPR \
+ LONGJMP_RESTORE_FROM_STACK MAX_INT_TYPE_SIZE ASM_IDENTIFY_GCC \
+ STDC_VALUE TRAMPOLINE_ALIGN ASM_IDENTIFY_GCC_AFTER_SOURCE \
+ SLOW_ZERO_EXTEND SUBREG_REGNO_OFFSET DWARF_LINE_MIN_INSTR_LENGTH \
+ TRADITIONAL_RETURN_FLOAT NO_BUILTIN_SIZE_TYPE \
+ NO_BUILTIN_PTRDIFF_TYPE NO_BUILTIN_WCHAR_TYPE NO_BUILTIN_WINT_TYPE \
+ BLOCK_PROFILER BLOCK_PROFILER_CODE FUNCTION_BLOCK_PROFILER \
+ FUNCTION_BLOCK_PROFILER_EXIT MACHINE_STATE_SAVE \
+ MACHINE_STATE_RESTORE SCCS_DIRECTIVE SECTION_ASM_OP BYTEORDER \
+ ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL HOST_WORDS_BIG_ENDIAN \
+ OBJC_PROLOGUE ALLOCATE_TRAMPOLINE HANDLE_PRAGMA ROUND_TYPE_SIZE \
+ ROUND_TYPE_SIZE_UNIT CONST_SECTION_ASM_OP CRT_GET_RFIB_TEXT \
+ INSN_CACHE_DEPTH INSN_CACHE_SIZE \
+ INSN_CACHE_LINE_WIDTH INIT_SECTION_PREAMBLE NEED_ATEXIT ON_EXIT \
+ EXIT_BODY OBJECT_FORMAT_ROSE MULTIBYTE_CHARS MAP_CHARACTER \
+ LIBGCC_NEEDS_DOUBLE FINAL_PRESCAN_LABEL DEFAULT_CALLER_SAVES \
+ LOAD_ARGS_REVERSED MAX_INTEGER_COMPUTATION_MODE \
+ CONVERT_HARD_REGISTER_TO_SSA_P ASM_OUTPUT_MAIN_SOURCE_FILENAME \
+ FIRST_INSN_ADDRESS TEXT_SECTION SHARED_BSS_SECTION_ASM_OP \
+ PROMOTED_MODE EXPAND_BUILTIN_VA_END \
+ LINKER_DOES_NOT_WORK_WITH_DWARF2 FUNCTION_ARG_KEEP_AS_REFERENCE \
+ GIV_SORT_CRITERION MAX_LONG_TYPE_SIZE MAX_LONG_DOUBLE_TYPE_SIZE \
+ MAX_WCHAR_TYPE_SIZE SHARED_SECTION_ASM_OP INTEGRATE_THRESHOLD \
+ FINAL_REG_PARM_STACK_SPACE MAYBE_REG_PARM_STACK_SPACE \
+ TRADITIONAL_PIPELINE_INTERFACE DFA_PIPELINE_INTERFACE \
+ BUILTIN_SETJMP_FRAME_VALUE \
+ SUNOS4_SHARED_LIBRARIES PROMOTE_FOR_CALL_ONLY \
+ SPACE_AFTER_L_OPTION NO_RECURSIVE_FUNCTION_CSE \
+ DEFAULT_MAIN_RETURN TARGET_MEM_FUNCTIONS EXPAND_BUILTIN_VA_ARG \
+ COLLECT_PARSE_FLAG DWARF2_GENERATE_TEXT_SECTION_LABEL WINNING_GDB \
+ ASM_OUTPUT_FILENAME ASM_OUTPUT_SOURCE_LINE FILE_NAME_JOINER \
+ GDB_INV_REF_REGPARM_STABS_LETTER \
+ PUT_SDB_SRC_FILE STABS_GCC_MARKER SDB_GENERATE_FAKE \
+ NON_SAVING_SETJMP TARGET_LATE_RTL_PROLOGUE_EPILOGUE \
+ CASE_DROPS_THROUGH TARGET_BELL TARGET_BS TARGET_CR TARGET_DIGIT0 \
+ TARGET_ESC TARGET_FF TARGET_NEWLINE TARGET_TAB TARGET_VT \
+ LINK_LIBGCC_SPECIAL DONT_ACCESS_GBLS_AFTER_EPILOGUE \
+ TARGET_OPTIONS TARGET_SWITCHES EXTRA_CC_MODES FINALIZE_PIC \
+ PREDICATE_CODES SPECIAL_MODE_PREDICATES UNALIGNED_WORD_ASM_OP \
+ EXTRA_SECTIONS EXTRA_SECTION_FUNCTIONS READONLY_DATA_SECTION \
+ TARGET_ASM_EXCEPTION_SECTION TARGET_ASM_EH_FRAME_SECTION \
+ SMALL_ARG_MAX ASM_OUTPUT_SHARED_BSS ASM_OUTPUT_SHARED_COMMON \
+ ASM_OUTPUT_SHARED_LOCAL ASM_MAKE_LABEL_LINKONCE \
+ STACK_CHECK_PROBE_INTERVAL STACK_CHECK_PROBE_LOAD \
+ ORDER_REGS_FOR_LOCAL_ALLOC FUNCTION_OUTGOING_VALUE \
+ ASM_DECLARE_CONSTANT_NAME MODIFY_TARGET_NAME SWITCHES_NEED_SPACES \
+ SWITCH_CURTAILS_COMPILATION SWITCH_TAKES_ARG WORD_SWITCH_TAKES_ARG \
+ TARGET_OPTION_TRANSLATE_TABLE HANDLE_PRAGMA_PACK_PUSH_POP \
+ HANDLE_SYSV_PRAGMA HANDLE_PRAGMA_WEAK CONDITIONAL_REGISTER_USAGE \
+ FUNCTION_ARG_BOUNDARY MUST_USE_SJLJ_EXCEPTIONS US_SOFTWARE_GOFAST \
+ USING_SVR4_H SVR4_ASM_SPEC FUNCTION_ARG FUNCTION_ARG_ADVANCE \
+ FUNCTION_INCOMING_ARG IRA_COVER_CLASSES TARGET_VERSION \
+ MACHINE_TYPE TARGET_HAS_TARGETCM ASM_OUTPUT_BSS \
+ SETJMP_VIA_SAVE_AREA FORBIDDEN_INC_DEC_CLASSES \
+ PREFERRED_OUTPUT_RELOAD_CLASS SYSTEM_INCLUDE_DIR \
+ STANDARD_INCLUDE_DIR STANDARD_INCLUDE_COMPONENT \
+ LINK_ELIMINATE_DUPLICATE_LDIRECTORIES MIPS_DEBUGGING_INFO \
+ IDENT_ASM_OP ALL_COP_ADDITIONAL_REGISTER_NAMES \
+ RANGE_TEST_NON_SHORT_CIRCUIT \
+ REAL_VALUE_TRUNCATE REVERSE_CONDEXEC_PREDICATES_P \
+ TARGET_ALIGN_ANON_BITFIELDS TARGET_NARROW_VOLATILE_BITFIELDS \
+ IDENT_ASM_OP UNALIGNED_SHORT_ASM_OP UNALIGNED_INT_ASM_OP \
+ UNALIGNED_LONG_ASM_OP UNALIGNED_DOUBLE_INT_ASM_OP \
+ USE_COMMON_FOR_ONE_ONLY IFCVT_EXTRA_FIELDS IFCVT_INIT_EXTRA_FIELDS \
+ CASE_USE_BIT_TESTS FIXUNS_TRUNC_LIKE_FIX_TRUNC \
+ GO_IF_MODE_DEPENDENT_ADDRESS DELAY_SLOTS_FOR_EPILOGUE \
+ ELIGIBLE_FOR_EPILOGUE_DELAY TARGET_C99_FUNCTIONS TARGET_HAS_SINCOS \
+ REG_CLASS_FROM_LETTER CONST_OK_FOR_LETTER_P \
+ CONST_DOUBLE_OK_FOR_LETTER_P EXTRA_CONSTRAINT \
+ REG_CLASS_FROM_CONSTRAINT REG_CLASS_FOR_CONSTRAINT \
+ EXTRA_CONSTRAINT_STR EXTRA_MEMORY_CONSTRAINT \
+ EXTRA_ADDRESS_CONSTRAINT CONST_DOUBLE_OK_FOR_CONSTRAINT_P \
+ CALLER_SAVE_PROFITABLE LARGEST_EXPONENT_IS_NORMAL \
+ ROUND_TOWARDS_ZERO SF_SIZE DF_SIZE XF_SIZE TF_SIZE LIBGCC2_TF_CEXT \
+ LIBGCC2_LONG_DOUBLE_TYPE_SIZE STRUCT_VALUE \
+ EH_FRAME_IN_DATA_SECTION TARGET_FLT_EVAL_METHOD_NON_DEFAULT \
+ JCR_SECTION_NAME TARGET_USE_JCR_SECTION SDB_DEBUGGING_INFO \
+ SDB_DEBUG NO_IMPLICIT_EXTERN_C NOTICE_UPDATE_CC \
+ CC_STATUS_MDEP_INIT CC_STATUS_MDEP CC_STATUS SLOW_SHORT_ACCESS
+
+/* Hooks that are no longer used. */
+ #pragma GCC poison LANG_HOOKS_FUNCTION_MARK LANG_HOOKS_FUNCTION_FREE \
+ LANG_HOOKS_MARK_TREE LANG_HOOKS_INSERT_DEFAULT_ATTRIBUTES \
+ LANG_HOOKS_TREE_INLINING_ESTIMATE_NUM_INSNS \
+ LANG_HOOKS_PUSHLEVEL LANG_HOOKS_SET_BLOCK \
+ LANG_HOOKS_MAYBE_BUILD_CLEANUP LANG_HOOKS_UPDATE_DECL_AFTER_SAVING \
+ LANG_HOOKS_POPLEVEL LANG_HOOKS_TRUTHVALUE_CONVERSION \
+ TARGET_PROMOTE_FUNCTION_ARGS TARGET_PROMOTE_FUNCTION_RETURN \
+ LANG_HOOKS_MISSING_ARGUMENT LANG_HOOKS_HASH_TYPES \
+ TARGET_HANDLE_OFAST TARGET_OPTION_OPTIMIZATION \
+ TARGET_IRA_COVER_CLASSES TARGET_HELP \
+ TARGET_HANDLE_PRAGMA_EXTERN_PREFIX \
+ TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN \
+ TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD \
+ TARGET_MD_ASM_CLOBBERS TARGET_RELAXED_ORDERING \
+ EXTENDED_SDB_BASIC_TYPES TARGET_INVALID_PARAMETER_TYPE \
+ TARGET_INVALID_RETURN_TYPE
+
+/* Arrays that were deleted in favor of a functional interface. */
+ #pragma GCC poison built_in_decls implicit_built_in_decls
+
+/* Hooks into libgcc2. */
+ #pragma GCC poison LIBGCC2_DOUBLE_TYPE_SIZE LIBGCC2_WORDS_BIG_ENDIAN \
+ LIBGCC2_FLOAT_WORDS_BIG_ENDIAN
+
+/* Miscellaneous macros that are no longer used. */
+ #pragma GCC poison USE_MAPPED_LOCATION GET_ENVIRONMENT
+
+/* Libiberty macros that are no longer used in GCC. */
+#undef ANSI_PROTOTYPES
+#undef PTR_CONST
+#undef LONG_DOUBLE
+#undef VPARAMS
+#undef VA_OPEN
+#undef VA_FIXEDARG
+#undef VA_CLOSE
+#undef VA_START
+ #pragma GCC poison ANSI_PROTOTYPES PTR_CONST LONG_DOUBLE VPARAMS VA_OPEN \
+ VA_FIXEDARG VA_CLOSE VA_START
+#endif /* IN_GCC */
+
+/* Front ends should never have to include middle-end headers. Enforce
+ this by poisoning the header double-include protection defines. */
+#ifdef IN_GCC_FRONTEND
+#pragma GCC poison GCC_RTL_H GCC_EXCEPT_H GCC_EXPR_H
+#endif
+
+/* Note: not all uses of the `index' token (e.g. variable names and
+ structure members) have been eliminated. */
+#undef bcopy
+#undef bzero
+#undef bcmp
+#undef rindex
+ #pragma GCC poison bcopy bzero bcmp rindex
+
+/* Poison ENABLE_CHECKING macro that should be replaced with
+ 'if (flag_checking)', or with CHECKING_P macro. */
+#pragma GCC poison ENABLE_CHECKING
+
+#endif /* GCC >= 3.0 */
+
+/* This macro allows casting away const-ness to pass -Wcast-qual
+ warnings. DO NOT USE THIS UNLESS YOU REALLY HAVE TO! It should
+ only be used in certain specific cases. One valid case is where
+ the C standard definitions or prototypes force you to. E.g. if you
+ need to free a const object, or if you pass a const string to
+ execv, et al. Another valid use would be in an allocation function
+ that creates const objects that need to be initialized. In some
+ cases we have non-const functions that return the argument
+ (e.g. next_nonnote_insn). Rather than create const shadow
+ functions, we can cast away const-ness in calling these interfaces
+ if we're careful to verify that the called function does indeed not
+ modify its argument and the return value is only used in a const
+ context. (This can be somewhat dangerous as these assumptions can
+ change after the fact). Beyond these uses, most other cases of
+ using this macro should be viewed with extreme caution. */
+
+#ifdef __cplusplus
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) (const_cast<TOTYPE> (X))
+#else
+#if defined(__GNUC__) && GCC_VERSION > 4000
+/* GCC 4.0.x has a bug where it may ICE on this expression,
+ so does GCC 3.4.x (PR17436). */
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((__extension__(union {FROMTYPE _q; TOTYPE _nq;})(X))._nq)
+#elif defined(__GNUC__)
+inline char *
+helper_const_non_const_cast (const char *p)
+{
+ union {
+ const char *const_c;
+ char *c;
+ } val;
+ val.const_c = p;
+ return val.c;
+}
+
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) \
+ ((TOTYPE) helper_const_non_const_cast ((const char *) (FROMTYPE) (X)))
+#else
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((TOTYPE)(FROMTYPE)(X))
+#endif
+#endif
+#define CONST_CAST(TYPE,X) CONST_CAST2 (TYPE, const TYPE, (X))
+#define CONST_CAST_TREE(X) CONST_CAST (union tree_node *, (X))
+#define CONST_CAST_RTX(X) CONST_CAST (struct rtx_def *, (X))
+#define CONST_CAST_RTX_INSN(X) CONST_CAST (struct rtx_insn *, (X))
+#define CONST_CAST_BB(X) CONST_CAST (struct basic_block_def *, (X))
+#define CONST_CAST_GIMPLE(X) CONST_CAST (gimple *, (X))
+
+/* Activate certain diagnostics as warnings (not errors via the
+ -Werror flag). */
+#if GCC_VERSION >= 4003
+/* If asserts are disabled, activate -Wuninitialized as a warning (not
+ an error/-Werror). */
+#ifndef ENABLE_ASSERT_CHECKING
+#pragma GCC diagnostic warning "-Wuninitialized"
+#endif
+#endif
+
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+# ifdef HAVE_VALGRIND_MEMCHECK_H
+# include <valgrind/memcheck.h>
+# elif defined HAVE_MEMCHECK_H
+# include <memcheck.h>
+# else
+# include <valgrind.h>
+# endif
+/* Compatibility macros to let valgrind 3.1 work. */
+# ifndef VALGRIND_MAKE_MEM_NOACCESS
+# define VALGRIND_MAKE_MEM_NOACCESS VALGRIND_MAKE_NOACCESS
+# endif
+# ifndef VALGRIND_MAKE_MEM_DEFINED
+# define VALGRIND_MAKE_MEM_DEFINED VALGRIND_MAKE_READABLE
+# endif
+# ifndef VALGRIND_MAKE_MEM_UNDEFINED
+# define VALGRIND_MAKE_MEM_UNDEFINED VALGRIND_MAKE_WRITABLE
+# endif
+#else
+/* Avoid #ifdef:s when we can help it. */
+#define VALGRIND_DISCARD(x)
+#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
+#define VALGRIND_FREELIKE_BLOCK(x,y)
+#endif
+
+/* Macros to temporarily ignore some warnings. */
+#if GCC_VERSION >= 6000
+#define GCC_DIAGNOSTIC_STRINGIFY(x) #x
+#define GCC_DIAGNOSTIC_PUSH_IGNORED(x) \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma (GCC_DIAGNOSTIC_STRINGIFY (GCC diagnostic ignored #x))
+#define GCC_DIAGNOSTIC_POP _Pragma ("GCC diagnostic pop")
+#else
+#define GCC_DIAGNOSTIC_PUSH_IGNORED(x)
+#define GCC_DIAGNOSTIC_POP
+#endif
+
+/* In LTO -fwhole-program build we still want to keep the debug functions available
+ for debugger. Mark them as used to prevent removal. */
+#if (GCC_VERSION > 4000)
+#define DEBUG_FUNCTION __attribute__ ((__used__))
+#define DEBUG_VARIABLE __attribute__ ((__used__))
+#else
+#define DEBUG_FUNCTION
+#define DEBUG_VARIABLE
+#endif
+
+/* General macro to extract bit Y of X. */
+#define TEST_BIT(X, Y) (((X) >> (Y)) & 1)
+
+/* Get definitions of HOST_WIDE_INT. */
+#include "hwint.h"
+
+typedef int sort_r_cmp_fn (const void *, const void *, void *);
+void qsort_chk (void *, size_t, size_t, sort_r_cmp_fn *, void *);
+void gcc_sort_r (void *, size_t, size_t, sort_r_cmp_fn *, void *);
+void gcc_qsort (void *, size_t, size_t, int (*)(const void *, const void *));
+void gcc_stablesort (void *, size_t, size_t,
+ int (*)(const void *, const void *));
+void gcc_stablesort_r (void *, size_t, size_t, sort_r_cmp_fn *, void *data);
+/* Redirect four-argument qsort calls to gcc_qsort; one-argument invocations
+ correspond to vec::qsort, and use C qsort internally. */
+#define PP_5th(a1, a2, a3, a4, a5, ...) a5
+#undef qsort
+#define qsort(...) PP_5th (__VA_ARGS__, gcc_qsort, 3, 2, qsort, 0) (__VA_ARGS__)
+
+#define ONE_K 1024
+#define ONE_M (ONE_K * ONE_K)
+#define ONE_G (ONE_K * ONE_M)
+
+/* Display a number as an integer multiple of either:
+ - 1024, if said integer is >= to 10 K (in base 2)
+ - 1024 * 1024, if said integer is >= 10 M in (base 2)
+ */
+#define SIZE_SCALE(x) (((x) < 10 * ONE_K \
+ ? (x) \
+ : ((x) < 10 * ONE_M \
+ ? (x) / ONE_K \
+ : (x) / ONE_M)))
+
+/* For a given integer, display either:
+ - the character 'k', if the number is higher than 10 K (in base 2)
+ but strictly lower than 10 M (in base 2)
+ - the character 'M' if the number is higher than 10 M (in base2)
+ - the charcter ' ' if the number is strictly lower than 10 K */
+#define SIZE_LABEL(x) ((x) < 10 * ONE_K ? ' ' : ((x) < 10 * ONE_M ? 'k' : 'M'))
+
+/* Display an integer amount as multiple of 1K or 1M (in base 2).
+ Display the correct unit (either k, M, or ' ') after the amount, as
+ well. */
+#define SIZE_AMOUNT(size) (uint64_t)SIZE_SCALE (size), SIZE_LABEL (size)
+
+/* Format string particle for printing a SIZE_AMOUNT with N being the width
+ of the number. */
+#define PRsa(n) "%" #n PRIu64 "%c"
+
+/* System headers may define NULL to be an integer (e.g. 0L), which cannot be
+ used safely in certain contexts (e.g. as sentinels). Redefine NULL to
+ nullptr in order to make it safer. Note that this might confuse system
+ headers, however, by convention they must not be included after this point.
+*/
+#ifdef __cplusplus
+#undef NULL
+#define NULL nullptr
+#endif
+
+/* Return true if STR string starts with PREFIX. */
+
+inline bool
+startswith (const char *str, const char *prefix)
+{
+ return strncmp (str, prefix, strlen (prefix)) == 0;
+}
+
+/* Return true if STR string ends with SUFFIX. */
+
+inline bool
+endswith (const char *str, const char *suffix)
+{
+ size_t str_len = strlen (str);
+ size_t suffix_len = strlen (suffix);
+ if (str_len < suffix_len)
+ return false;
+
+ return memcmp (str + str_len - suffix_len, suffix, suffix_len) == 0;
+}
+
+#endif /* ! GCC_SYSTEM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-def.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-def.h
new file mode 100644
index 0000000..847698a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-def.h
@@ -0,0 +1,125 @@
+/* Default initializers for a generic GCC target.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+/* See target.def for a description of what this file contains and how to
+ use it.
+
+ We want to have non-NULL default definitions of all hook functions,
+ even if they do nothing. */
+
+/* Note that if one of these macros must be defined in an OS .h file
+ rather than the .c file, then we need to wrap the default
+ definition in a #ifndef, since files include tm.h before this one. */
+
+#define TARGET_ASM_ALIGNED_HI_OP "\t.short\t"
+#define TARGET_ASM_ALIGNED_SI_OP "\t.long\t"
+#define TARGET_ASM_ALIGNED_DI_OP NULL
+#define TARGET_ASM_ALIGNED_TI_OP NULL
+
+/* GAS and SYSV4 assemblers accept these. */
+#if defined (OBJECT_FORMAT_ELF)
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.2byte\t"
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.4byte\t"
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.8byte\t"
+#define TARGET_ASM_UNALIGNED_TI_OP NULL
+#else
+#define TARGET_ASM_UNALIGNED_HI_OP NULL
+#define TARGET_ASM_UNALIGNED_SI_OP NULL
+#define TARGET_ASM_UNALIGNED_DI_OP NULL
+#define TARGET_ASM_UNALIGNED_TI_OP NULL
+#endif /* OBJECT_FORMAT_ELF */
+
+/* There is no standard way to handle P{S,D,T}Imode, targets must implement them
+ if required. */
+#define TARGET_ASM_ALIGNED_PSI_OP NULL
+#define TARGET_ASM_UNALIGNED_PSI_OP NULL
+#define TARGET_ASM_ALIGNED_PDI_OP NULL
+#define TARGET_ASM_UNALIGNED_PDI_OP NULL
+#define TARGET_ASM_ALIGNED_PTI_OP NULL
+#define TARGET_ASM_UNALIGNED_PTI_OP NULL
+
+#if !defined(TARGET_ASM_CONSTRUCTOR) && !defined(USE_COLLECT2)
+# ifdef CTORS_SECTION_ASM_OP
+# define TARGET_ASM_CONSTRUCTOR default_ctor_section_asm_out_constructor
+# else
+# ifdef TARGET_ASM_NAMED_SECTION
+# define TARGET_ASM_CONSTRUCTOR default_named_section_asm_out_constructor
+# else
+# define TARGET_ASM_CONSTRUCTOR default_asm_out_constructor
+# endif
+# endif
+#endif
+
+#if !defined(TARGET_ASM_DESTRUCTOR) && !defined(USE_COLLECT2)
+# ifdef DTORS_SECTION_ASM_OP
+# define TARGET_ASM_DESTRUCTOR default_dtor_section_asm_out_destructor
+# else
+# ifdef TARGET_ASM_NAMED_SECTION
+# define TARGET_ASM_DESTRUCTOR default_named_section_asm_out_destructor
+# else
+# define TARGET_ASM_DESTRUCTOR default_asm_out_destructor
+# endif
+# endif
+#endif
+
+#if !defined(TARGET_HAVE_CTORS_DTORS)
+# if defined(TARGET_ASM_CONSTRUCTOR) && defined(TARGET_ASM_DESTRUCTOR)
+# define TARGET_HAVE_CTORS_DTORS true
+# endif
+#endif
+
+#ifndef TARGET_TERMINATE_DW2_EH_FRAME_INFO
+#ifdef EH_FRAME_SECTION_NAME
+#define TARGET_TERMINATE_DW2_EH_FRAME_INFO false
+#endif
+#endif
+
+#if !defined(TARGET_ASM_OUTPUT_ANCHOR) && !defined(ASM_OUTPUT_DEF)
+#define TARGET_ASM_OUTPUT_ANCHOR NULL
+#endif
+
+#define TARGET_ASM_ALIGNED_INT_OP \
+ {TARGET_ASM_ALIGNED_HI_OP, \
+ TARGET_ASM_ALIGNED_PSI_OP, \
+ TARGET_ASM_ALIGNED_SI_OP, \
+ TARGET_ASM_ALIGNED_PDI_OP, \
+ TARGET_ASM_ALIGNED_DI_OP, \
+ TARGET_ASM_ALIGNED_PTI_OP, \
+ TARGET_ASM_ALIGNED_TI_OP}
+
+#define TARGET_ASM_UNALIGNED_INT_OP \
+ {TARGET_ASM_UNALIGNED_HI_OP, \
+ TARGET_ASM_UNALIGNED_PSI_OP, \
+ TARGET_ASM_UNALIGNED_SI_OP, \
+ TARGET_ASM_UNALIGNED_PDI_OP, \
+ TARGET_ASM_UNALIGNED_DI_OP, \
+ TARGET_ASM_UNALIGNED_PTI_OP, \
+ TARGET_ASM_UNALIGNED_TI_OP}
+
+#if !defined (TARGET_FUNCTION_INCOMING_ARG)
+#define TARGET_FUNCTION_INCOMING_ARG TARGET_FUNCTION_ARG
+#endif
+
+#include "target-hooks-def.h"
+
+#include "hooks.h"
+#include "targhooks.h"
+#include "insn-target-def.h"
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-globals.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-globals.h
new file mode 100644
index 0000000..daedf66
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-globals.h
@@ -0,0 +1,95 @@
+/* Target-dependent globals.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TARGET_GLOBALS_H
+#define TARGET_GLOBALS_H 1
+
+#if SWITCHABLE_TARGET
+extern class target_flag_state *this_target_flag_state;
+extern struct target_regs *this_target_regs;
+extern struct target_rtl *this_target_rtl;
+extern struct target_recog *this_target_recog;
+extern struct target_hard_regs *this_target_hard_regs;
+extern struct target_function_abi_info *this_target_function_abi_info;
+extern struct target_reload *this_target_reload;
+extern struct target_expmed *this_target_expmed;
+extern struct target_optabs *this_target_optabs;
+extern struct target_libfuncs *this_target_libfuncs;
+extern struct target_cfgloop *this_target_cfgloop;
+extern struct target_ira *this_target_ira;
+extern class target_ira_int *this_target_ira_int;
+extern struct target_builtins *this_target_builtins;
+extern struct target_gcse *this_target_gcse;
+extern struct target_bb_reorder *this_target_bb_reorder;
+extern struct target_lower_subreg *this_target_lower_subreg;
+#endif
+
+class GTY(()) target_globals {
+public:
+ ~target_globals ();
+
+ class target_flag_state *GTY((skip)) flag_state;
+ struct target_regs *GTY((skip)) regs;
+ struct target_rtl *rtl;
+ struct target_recog *GTY((skip)) recog;
+ struct target_hard_regs *GTY((skip)) hard_regs;
+ struct target_function_abi_info *GTY((skip)) function_abi_info;
+ struct target_reload *GTY((skip)) reload;
+ struct target_expmed *GTY((skip)) expmed;
+ struct target_optabs *GTY((skip)) optabs;
+ struct target_libfuncs *libfuncs;
+ struct target_cfgloop *GTY((skip)) cfgloop;
+ struct target_ira *GTY((skip)) ira;
+ class target_ira_int *GTY((skip)) ira_int;
+ struct target_builtins *GTY((skip)) builtins;
+ struct target_gcse *GTY((skip)) gcse;
+ struct target_bb_reorder *GTY((skip)) bb_reorder;
+ struct target_lower_subreg *GTY((skip)) lower_subreg;
+};
+
+#if SWITCHABLE_TARGET
+extern class target_globals default_target_globals;
+
+extern class target_globals *save_target_globals (void);
+extern class target_globals *save_target_globals_default_opts (void);
+
+inline void
+restore_target_globals (class target_globals *g)
+{
+ this_target_flag_state = g->flag_state;
+ this_target_regs = g->regs;
+ this_target_rtl = g->rtl;
+ this_target_recog = g->recog;
+ this_target_hard_regs = g->hard_regs;
+ this_target_function_abi_info = g->function_abi_info;
+ this_target_reload = g->reload;
+ this_target_expmed = g->expmed;
+ this_target_optabs = g->optabs;
+ this_target_libfuncs = g->libfuncs;
+ this_target_cfgloop = g->cfgloop;
+ this_target_ira = g->ira;
+ this_target_ira_int = g->ira_int;
+ this_target_builtins = g->builtins;
+ this_target_gcse = g->gcse;
+ this_target_bb_reorder = g->bb_reorder;
+ this_target_lower_subreg = g->lower_subreg;
+}
+#endif
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-hooks-macros.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-hooks-macros.h
new file mode 100644
index 0000000..8ce06e8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-hooks-macros.h
@@ -0,0 +1,80 @@
+/* Common macros for target hook definitions.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* The following macros should be provided by the including file:
+
+ DEFHOOK(NAME, DOC, TYPE, PARAMS, INIT): Define a function-valued hook.
+ DEFHOOKPOD(NAME, DOC, TYPE, INIT): Define a piece-of-data 'hook'. */
+
+/* Defaults for optional macros:
+ DEFHOOKPODX(NAME, TYPE, INIT): Like DEFHOOKPOD, but share documentation
+ with the previous 'hook'. */
+#ifndef DEFHOOKPODX
+#define DEFHOOKPODX(NAME, TYPE, INIT) DEFHOOKPOD (NAME, 0, TYPE, INIT)
+#endif
+
+/* HOOKSTRUCT(FRAGMENT): Declarator fragments to encapsulate all the
+ members into a struct gcc_target, which in turn contains several
+ sub-structs. */
+#ifndef HOOKSTRUCT
+#define HOOKSTRUCT(FRAGMENT)
+#endif
+/* HOOK_VECTOR: Start a struct declaration, which then gets its own initializer.
+ HOOK_VECTOR_END: Close a struct declaration, providing a member declarator
+ name for nested use. */
+#ifndef HOOK_VECTOR_1
+#define HOOK_VECTOR_1(NAME, FRAGMENT) HOOKSTRUCT (FRAGMENT)
+#endif
+#define HOOK_VECTOR(INIT_NAME, SNAME) HOOK_VECTOR_1 (INIT_NAME, struct SNAME {)
+#define HOOK_VECTOR_END(DECL_NAME) HOOK_VECTOR_1(,} DECL_NAME ;)
+
+/* FIXME: For pre-existing hooks, we can't place the documentation in the
+ documentation field here till we get permission from the FSF to include
+ it in GPLed software - the target hook documentation is so far only
+ available under the GFDL. */
+
+/* A hook should generally be documented by a string in the DOC parameter,
+ which should contain texinfo markup. If the documentation is only available
+ under the GPL, but not under the GFDL, put it in a comment above the hook
+ definition. If the function declaration is available both under GPL and
+ GFDL, but the documentation is only available under the GFDL, put the
+ documentaton in tm.texi.in, heading with @hook <hookname> and closing
+ the paragraph with @end deftypefn / deftypevr as appropriate, and marking
+ the next autogenerated hook with @hook <hookname>.
+ In both these cases, leave the DOC string empty, i.e. "".
+ Sometimes, for some historic reason the function declaration
+ has to be documented differently
+ than what it is. In that case, use DEFHOOK_UNDOC to suppress auto-generation
+ of documentation. DEFHOOK_UNDOC takes a DOC string which it ignores, so
+ you can put GPLed documentation string there if you have hopes that you
+ can clear the declaration & documentation for GFDL distribution later,
+ in which case you can then simply change the DEFHOOK_UNDOC to DEFHOOK
+ to turn on the autogeneration of the documentation.
+
+ A documentation string of "*" means not to emit any documentation at all,
+ and is mainly used internally for DEFHOOK_UNDOC. It should generally not
+ be used otherwise, but it has its use for exceptional cases where automatic
+ documentation is not wanted, and the real documentation is elsewere, like
+ for TARGET_ASM_{,UN}ALIGNED_INT_OP, which are hooks only for implementation
+ purposes; they refer to structs, the components of which are documented as
+ separate hooks TARGET_ASM_{,UN}ALIGNED_[HSDT]I_OP.
+ A DOC string of 0 is for internal use of DEFHOOKPODX and special table
+ entries only. */
+
+/* Empty macro arguments are undefined in C90, so use an empty macro
+ to close top-level hook structures. */
+#define C90_EMPTY_HACK
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-insns.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-insns.def
new file mode 100644
index 0000000..c4415d0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target-insns.def
@@ -0,0 +1,108 @@
+/* Target instruction definitions.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file has one entry for each public pattern name that the target
+ can provide. It is only used if no distinction between operand modes
+ is necessary. If separate patterns are needed for different modes
+ (so as to distinguish addition of QImode values from addition of
+ HImode values, for example) then an optab should be used instead.
+
+ Each entry has the form:
+
+ DEF_TARGET_INSN (name, prototype)
+
+ where NAME is the name of the pattern and PROTOTYPE is its C prototype.
+ The prototype should use parameter names of the form "x0", "x1", etc.
+ for the operands that the .md pattern is required to have, followed by
+ parameter names of the form "optN" for operands that the .md pattern
+ may choose to ignore. Patterns that never take operands should have
+ a prototype "(void)".
+
+ Pattern names should be documented in md.texi rather than here. */
+DEF_TARGET_INSN (allocate_stack, (rtx x0, rtx x1))
+DEF_TARGET_INSN (atomic_test_and_set, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (builtin_longjmp, (rtx x0))
+DEF_TARGET_INSN (builtin_setjmp_receiver, (rtx x0))
+DEF_TARGET_INSN (builtin_setjmp_setup, (rtx x0))
+DEF_TARGET_INSN (canonicalize_funcptr_for_compare, (rtx x0, rtx x1))
+DEF_TARGET_INSN (call, (rtx x0, rtx opt1, rtx opt2, rtx opt3))
+DEF_TARGET_INSN (call_pop, (rtx x0, rtx opt1, rtx opt2, rtx opt3))
+DEF_TARGET_INSN (call_value, (rtx x0, rtx x1, rtx opt2, rtx opt3, rtx opt4))
+DEF_TARGET_INSN (call_value_pop, (rtx x0, rtx x1, rtx opt2, rtx opt3,
+ rtx opt4))
+DEF_TARGET_INSN (casesi, (rtx x0, rtx x1, rtx x2, rtx x3, rtx x4))
+DEF_TARGET_INSN (check_stack, (rtx x0))
+DEF_TARGET_INSN (clear_cache, (rtx x0, rtx x1))
+DEF_TARGET_INSN (doloop_begin, (rtx x0, rtx x1))
+DEF_TARGET_INSN (doloop_end, (rtx x0, rtx x1))
+DEF_TARGET_INSN (eh_return, (rtx x0))
+DEF_TARGET_INSN (epilogue, (void))
+DEF_TARGET_INSN (exception_receiver, (void))
+DEF_TARGET_INSN (extv, (rtx x0, rtx x1, rtx x2, rtx x3))
+DEF_TARGET_INSN (extzv, (rtx x0, rtx x1, rtx x2, rtx x3))
+DEF_TARGET_INSN (indirect_jump, (rtx x0))
+DEF_TARGET_INSN (insv, (rtx x0, rtx x1, rtx x2, rtx x3))
+DEF_TARGET_INSN (jump, (rtx x0))
+DEF_TARGET_INSN (load_multiple, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (mem_thread_fence, (rtx x0))
+DEF_TARGET_INSN (memory_barrier, (void))
+DEF_TARGET_INSN (memory_blockage, (void))
+DEF_TARGET_INSN (movstr, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (nonlocal_goto, (rtx x0, rtx x1, rtx x2, rtx x3))
+DEF_TARGET_INSN (nonlocal_goto_receiver, (void))
+DEF_TARGET_INSN (oacc_dim_pos, (rtx x0, rtx x1))
+DEF_TARGET_INSN (oacc_dim_size, (rtx x0, rtx x1))
+DEF_TARGET_INSN (oacc_fork, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (oacc_join, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (omp_simt_enter, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (omp_simt_exit, (rtx x0))
+DEF_TARGET_INSN (omp_simt_lane, (rtx x0))
+DEF_TARGET_INSN (omp_simt_last_lane, (rtx x0, rtx x1))
+DEF_TARGET_INSN (omp_simt_ordered, (rtx x0, rtx x1))
+DEF_TARGET_INSN (omp_simt_vote_any, (rtx x0, rtx x1))
+DEF_TARGET_INSN (omp_simt_xchg_bfly, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (omp_simt_xchg_idx, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (prefetch, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (probe_stack, (rtx x0))
+DEF_TARGET_INSN (probe_stack_address, (rtx x0))
+DEF_TARGET_INSN (prologue, (void))
+DEF_TARGET_INSN (ptr_extend, (rtx x0, rtx x1))
+DEF_TARGET_INSN (reload_load_address, (rtx x0, rtx x1))
+DEF_TARGET_INSN (restore_stack_block, (rtx x0, rtx x1))
+DEF_TARGET_INSN (restore_stack_function, (rtx x0, rtx x1))
+DEF_TARGET_INSN (restore_stack_nonlocal, (rtx x0, rtx x1))
+DEF_TARGET_INSN (return, (void))
+DEF_TARGET_INSN (save_stack_block, (rtx x0, rtx x1))
+DEF_TARGET_INSN (save_stack_function, (rtx x0, rtx x1))
+DEF_TARGET_INSN (save_stack_nonlocal, (rtx x0, rtx x1))
+DEF_TARGET_INSN (sibcall, (rtx x0, rtx opt1, rtx opt2, rtx opt3))
+DEF_TARGET_INSN (sibcall_epilogue, (void))
+DEF_TARGET_INSN (sibcall_value, (rtx x0, rtx x1, rtx opt2, rtx opt3,
+ rtx opt4))
+DEF_TARGET_INSN (simple_return, (void))
+DEF_TARGET_INSN (split_stack_prologue, (void))
+DEF_TARGET_INSN (split_stack_space_check, (rtx x0, rtx x1))
+DEF_TARGET_INSN (stack_protect_combined_set, (rtx x0, rtx x1))
+DEF_TARGET_INSN (stack_protect_set, (rtx x0, rtx x1))
+DEF_TARGET_INSN (stack_protect_combined_test, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (stack_protect_test, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (store_multiple, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (tablejump, (rtx x0, rtx x1))
+DEF_TARGET_INSN (trap, (void))
+DEF_TARGET_INSN (unique, (void))
+DEF_TARGET_INSN (untyped_call, (rtx x0, rtx x1, rtx x2))
+DEF_TARGET_INSN (untyped_return, (rtx x0, rtx x1))
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.def
new file mode 100644
index 0000000..171bbd1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.def
@@ -0,0 +1,7143 @@
+/* Target hook definitions.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+/* See target-hooks-macros.h for details of macros that should be
+ provided by the including file, and how to use them here. */
+#include "target-hooks-macros.h"
+
+#undef HOOK_TYPE
+#define HOOK_TYPE "Target Hook"
+
+HOOK_VECTOR (TARGET_INITIALIZER, gcc_target)
+
+/* Functions that output assembler for the target. */
+#define HOOK_PREFIX "TARGET_ASM_"
+HOOK_VECTOR (TARGET_ASM_OUT, asm_out)
+
+/* Opening and closing parentheses for asm expression grouping. */
+DEFHOOKPOD
+(open_paren,
+ "These target hooks are C string constants, describing the syntax in the\n\
+assembler for grouping arithmetic expressions. If not overridden, they\n\
+default to normal parentheses, which is correct for most assemblers.",
+ const char *, "(")
+DEFHOOKPODX (close_paren, const char *, ")")
+
+/* Assembler instructions for creating various kinds of integer object. */
+DEFHOOKPOD
+(byte_op,
+ "@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_HI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_PSI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_SI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_PDI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_DI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_PTI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_ALIGNED_TI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_HI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_PSI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_SI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_PDI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_DI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_PTI_OP\n\
+@deftypevrx {Target Hook} {const char *} TARGET_ASM_UNALIGNED_TI_OP\n\
+These hooks specify assembly directives for creating certain kinds\n\
+of integer object. The @code{TARGET_ASM_BYTE_OP} directive creates a\n\
+byte-sized object, the @code{TARGET_ASM_ALIGNED_HI_OP} one creates an\n\
+aligned two-byte object, and so on. Any of the hooks may be\n\
+@code{NULL}, indicating that no suitable directive is available.\n\
+\n\
+The compiler will print these strings at the start of a new line,\n\
+followed immediately by the object's initial value. In most cases,\n\
+the string should contain a tab, a pseudo-op, and then another tab.",
+ const char *, "\t.byte\t")
+DEFHOOKPOD (aligned_op, "*", struct asm_int_op, TARGET_ASM_ALIGNED_INT_OP)
+DEFHOOKPOD (unaligned_op, "*", struct asm_int_op, TARGET_ASM_UNALIGNED_INT_OP)
+
+/* Try to output the assembler code for an integer object whose
+ value is given by X. SIZE is the size of the object in bytes and
+ ALIGNED_P indicates whether it is aligned. Return true if
+ successful. Only handles cases for which BYTE_OP, ALIGNED_OP
+ and UNALIGNED_OP are NULL. */
+DEFHOOK
+(integer,
+ "The @code{assemble_integer} function uses this hook to output an\n\
+integer object. @var{x} is the object's value, @var{size} is its size\n\
+in bytes and @var{aligned_p} indicates whether it is aligned. The\n\
+function should return @code{true} if it was able to output the\n\
+object. If it returns false, @code{assemble_integer} will try to\n\
+split the object into smaller parts.\n\
+\n\
+The default implementation of this hook will use the\n\
+@code{TARGET_ASM_BYTE_OP} family of strings, returning @code{false}\n\
+when the relevant string is @code{NULL}.",
+ /* Only handles cases for which BYTE_OP, ALIGNED_OP and UNALIGNED_OP are
+ NULL. */
+ bool, (rtx x, unsigned int size, int aligned_p),
+ default_assemble_integer)
+
+/* Assembly strings required after the .cfi_startproc label. */
+DEFHOOK
+(post_cfi_startproc,
+ "This target hook is used to emit assembly strings required by the target\n\
+after the .cfi_startproc directive. The first argument is the file stream to\n\
+write the strings to and the second argument is the function\'s declaration. The\n\
+expected use is to add more .cfi_* directives.\n\
+\n\
+The default is to not output any assembly strings.",
+ void, (FILE *, tree),
+ hook_void_FILEptr_tree)
+
+/* Notify the backend that we have completed emitting the data for a
+ decl. */
+DEFHOOK
+(decl_end,
+ "Define this hook if the target assembler requires a special marker to\n\
+terminate an initialized variable declaration.",
+ void, (void),
+ hook_void_void)
+
+/* Output code that will globalize a label. */
+DEFHOOK
+(globalize_label,
+ "This target hook is a function to output to the stdio stream\n\
+@var{stream} some commands that will make the label @var{name} global;\n\
+that is, available for reference from other files.\n\
+\n\
+The default implementation relies on a proper definition of\n\
+@code{GLOBAL_ASM_OP}.",
+ void, (FILE *stream, const char *name),
+ default_globalize_label)
+
+/* Output code that will globalize a declaration. */
+DEFHOOK
+(globalize_decl_name,
+ "This target hook is a function to output to the stdio stream\n\
+@var{stream} some commands that will make the name associated with @var{decl}\n\
+global; that is, available for reference from other files.\n\
+\n\
+The default implementation uses the TARGET_ASM_GLOBALIZE_LABEL target hook.",
+ void, (FILE *stream, tree decl), default_globalize_decl_name)
+
+/* Output code that will declare an external variable. */
+DEFHOOK
+(assemble_undefined_decl,
+ "This target hook is a function to output to the stdio stream\n\
+@var{stream} some commands that will declare the name associated with\n\
+@var{decl} which is not defined in the current translation unit. Most\n\
+assemblers do not require anything to be output in this case.",
+ void, (FILE *stream, const char *name, const_tree decl),
+ hook_void_FILEptr_constcharptr_const_tree)
+
+/* Output code that will emit a label for unwind info, if this
+ target requires such labels. Second argument is the decl the
+ unwind info is associated with, third is a boolean: true if
+ this is for exception handling, fourth is a boolean: true if
+ this is only a placeholder for an omitted FDE. */
+DEFHOOK
+(emit_unwind_label,
+ "This target hook emits a label at the beginning of each FDE@. It\n\
+should be defined on targets where FDEs need special labels, and it\n\
+should write the appropriate label, for the FDE associated with the\n\
+function declaration @var{decl}, to the stdio stream @var{stream}.\n\
+The third argument, @var{for_eh}, is a boolean: true if this is for an\n\
+exception table. The fourth argument, @var{empty}, is a boolean:\n\
+true if this is a placeholder label for an omitted FDE@.\n\
+\n\
+The default is that FDEs are not given nonlocal labels.",
+ void, (FILE *stream, tree decl, int for_eh, int empty),
+ default_emit_unwind_label)
+
+/* Output code that will emit a label to divide up the exception table. */
+DEFHOOK
+(emit_except_table_label,
+ "This target hook emits a label at the beginning of the exception table.\n\
+It should be defined on targets where it is desirable for the table\n\
+to be broken up according to function.\n\
+\n\
+The default is that no label is emitted.",
+ void, (FILE *stream),
+ default_emit_except_table_label)
+
+/* Emit a directive for setting the personality for the function. */
+DEFHOOK
+(emit_except_personality,
+ "If the target implements @code{TARGET_ASM_UNWIND_EMIT}, this hook may be\n\
+used to emit a directive to install a personality hook into the unwind\n\
+info. This hook should not be used if dwarf2 unwind info is used.",
+ void, (rtx personality),
+ NULL)
+
+/* If necessary, modify personality and LSDA references to handle
+ indirection. This is used when the assembler supports CFI directives. */
+DEFHOOK
+(make_eh_symbol_indirect,
+ "If necessary, modify personality and LSDA references to handle indirection.\n\
+The original symbol is in @code{origsymbol} and if @code{pubvis} is true\n\
+the symbol is visible outside the TU.",
+ rtx, (rtx origsymbol, bool pubvis),
+ NULL)
+
+/* Emit any directives required to unwind this instruction. */
+DEFHOOK
+(unwind_emit,
+ "This target hook emits assembly directives required to unwind the\n\
+given instruction. This is only used when @code{TARGET_EXCEPT_UNWIND_INFO}\n\
+returns @code{UI_TARGET}.",
+ void, (FILE *stream, rtx_insn *insn),
+ NULL)
+
+DEFHOOKPOD
+(unwind_emit_before_insn,
+ "True if the @code{TARGET_ASM_UNWIND_EMIT} hook should be called before\n\
+the assembly for @var{insn} has been emitted, false if the hook should\n\
+be called afterward.",
+ bool, true)
+
+/* Return true if the target needs extra instructions to restore the current
+ frame address after a DW_CFA_restore_state opcode. */
+DEFHOOK
+(should_restore_cfa_state,
+ "For DWARF-based unwind frames, two CFI instructions provide for save and\n\
+restore of register state. GCC maintains the current frame address (CFA)\n\
+separately from the register bank but the unwinder in libgcc preserves this\n\
+state along with the registers (and this is expected by the code that writes\n\
+the unwind frames). This hook allows the target to specify that the CFA data\n\
+is not saved/restored along with the registers by the target unwinder so that\n\
+suitable additional instructions should be emitted to restore it.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Generate an internal label.
+ For now this is just a wrapper for ASM_GENERATE_INTERNAL_LABEL. */
+DEFHOOK_UNDOC
+(generate_internal_label,
+ "",
+ void, (char *buf, const char *prefix, unsigned long labelno),
+ default_generate_internal_label)
+
+/* Output an internal label. */
+DEFHOOK
+(internal_label,
+ "A function to output to the stdio stream @var{stream} a label whose\n\
+name is made from the string @var{prefix} and the number @var{labelno}.\n\
+\n\
+It is absolutely essential that these labels be distinct from the labels\n\
+used for user-level functions and variables. Otherwise, certain programs\n\
+will have name conflicts with internal labels.\n\
+\n\
+It is desirable to exclude internal labels from the symbol table of the\n\
+object file. Most assemblers have a naming convention for labels that\n\
+should be excluded; on many systems, the letter @samp{L} at the\n\
+beginning of a label has this effect. You should find out what\n\
+convention your system uses, and follow it.\n\
+\n\
+The default version of this function utilizes @code{ASM_GENERATE_INTERNAL_LABEL}.",
+ void, (FILE *stream, const char *prefix, unsigned long labelno),
+ default_internal_label)
+
+/* Output label for the constant. */
+DEFHOOK
+(declare_constant_name,
+ "A target hook to output to the stdio stream @var{file} any text necessary\n\
+for declaring the name @var{name} of a constant which is being defined. This\n\
+target hook is responsible for outputting the label definition (perhaps using\n\
+@code{assemble_label}). The argument @var{exp} is the value of the constant,\n\
+and @var{size} is the size of the constant in bytes. The @var{name}\n\
+will be an internal label.\n\
+\n\
+The default version of this target hook, define the @var{name} in the\n\
+usual manner as a label (by means of @code{assemble_label}).\n\
+\n\
+You may wish to use @code{ASM_OUTPUT_TYPE_DIRECTIVE} in this target hook.",
+ void, (FILE *file, const char *name, const_tree expr, HOST_WIDE_INT size),
+ default_asm_declare_constant_name)
+
+/* Emit a ttype table reference to a typeinfo object. */
+DEFHOOK
+(ttype,
+ "This hook is used to output a reference from a frame unwinding table to\n\
+the type_info object identified by @var{sym}. It should return @code{true}\n\
+if the reference was output. Returning @code{false} will cause the\n\
+reference to be output using the normal Dwarf2 routines.",
+ bool, (rtx sym),
+ hook_bool_rtx_false)
+
+/* Emit an assembler directive to set visibility for the symbol
+ associated with the tree decl. */
+DEFHOOK
+(assemble_visibility,
+ "This target hook is a function to output to @var{asm_out_file} some\n\
+commands that will make the symbol(s) associated with @var{decl} have\n\
+hidden, protected or internal visibility as specified by @var{visibility}.",
+ void, (tree decl, int visibility),
+ default_assemble_visibility)
+
+DEFHOOK
+(print_patchable_function_entry,
+ "Generate a patchable area at the function start, consisting of\n\
+@var{patch_area_size} NOP instructions. If the target supports named\n\
+sections and if @var{record_p} is true, insert a pointer to the current\n\
+location in the table of patchable functions. The default implementation\n\
+of the hook places the table of pointers in the special section named\n\
+@code{__patchable_function_entries}.",
+ void, (FILE *file, unsigned HOST_WIDE_INT patch_area_size, bool record_p),
+ default_print_patchable_function_entry)
+
+/* Output the assembler code for entry to a function. */
+DEFHOOK
+(function_prologue,
+ "If defined, a function that outputs the assembler code for entry to a\n\
+function. The prologue is responsible for setting up the stack frame,\n\
+initializing the frame pointer register, saving registers that must be\n\
+saved, and allocating @var{size} additional bytes of storage for the\n\
+local variables. @var{file} is a stdio stream to which the assembler\n\
+code should be output.\n\
+\n\
+The label for the beginning of the function need not be output by this\n\
+macro. That has already been done when the macro is run.\n\
+\n\
+@findex regs_ever_live\n\
+To determine which registers to save, the macro can refer to the array\n\
+@code{regs_ever_live}: element @var{r} is nonzero if hard register\n\
+@var{r} is used anywhere within the function. This implies the function\n\
+prologue should save register @var{r}, provided it is not one of the\n\
+call-used registers. (@code{TARGET_ASM_FUNCTION_EPILOGUE} must likewise use\n\
+@code{regs_ever_live}.)\n\
+\n\
+On machines that have ``register windows'', the function entry code does\n\
+not save on the stack the registers that are in the windows, even if\n\
+they are supposed to be preserved by function calls; instead it takes\n\
+appropriate steps to ``push'' the register stack, if any non-call-used\n\
+registers are used in the function.\n\
+\n\
+@findex frame_pointer_needed\n\
+On machines where functions may or may not have frame-pointers, the\n\
+function entry code must vary accordingly; it must set up the frame\n\
+pointer if one is wanted, and not otherwise. To determine whether a\n\
+frame pointer is in wanted, the macro can refer to the variable\n\
+@code{frame_pointer_needed}. The variable's value will be 1 at run\n\
+time in a function that needs a frame pointer. @xref{Elimination}.\n\
+\n\
+The function entry code is responsible for allocating any stack space\n\
+required for the function. This stack space consists of the regions\n\
+listed below. In most cases, these regions are allocated in the\n\
+order listed, with the last listed region closest to the top of the\n\
+stack (the lowest address if @code{STACK_GROWS_DOWNWARD} is defined, and\n\
+the highest address if it is not defined). You can use a different order\n\
+for a machine if doing so is more convenient or required for\n\
+compatibility reasons. Except in cases where required by standard\n\
+or by a debugger, there is no reason why the stack layout used by GCC\n\
+need agree with that used by other compilers for a machine.",
+ void, (FILE *file),
+ default_function_pro_epilogue)
+
+/* Output the assembler code for end of prologue. */
+DEFHOOK
+(function_end_prologue,
+ "If defined, a function that outputs assembler code at the end of a\n\
+prologue. This should be used when the function prologue is being\n\
+emitted as RTL, and you have some extra assembler that needs to be\n\
+emitted. @xref{prologue instruction pattern}.",
+ void, (FILE *file),
+ no_asm_to_stream)
+
+/* Output the assembler code for start of epilogue. */
+DEFHOOK
+(function_begin_epilogue,
+ "If defined, a function that outputs assembler code at the start of an\n\
+epilogue. This should be used when the function epilogue is being\n\
+emitted as RTL, and you have some extra assembler that needs to be\n\
+emitted. @xref{epilogue instruction pattern}.",
+ void, (FILE *file),
+ no_asm_to_stream)
+
+/* Output the assembler code for function exit. */
+DEFHOOK
+(function_epilogue,
+ "If defined, a function that outputs the assembler code for exit from a\n\
+function. The epilogue is responsible for restoring the saved\n\
+registers and stack pointer to their values when the function was\n\
+called, and returning control to the caller. This macro takes the\n\
+same argument as the macro @code{TARGET_ASM_FUNCTION_PROLOGUE}, and the\n\
+registers to restore are determined from @code{regs_ever_live} and\n\
+@code{CALL_USED_REGISTERS} in the same way.\n\
+\n\
+On some machines, there is a single instruction that does all the work\n\
+of returning from the function. On these machines, give that\n\
+instruction the name @samp{return} and do not define the macro\n\
+@code{TARGET_ASM_FUNCTION_EPILOGUE} at all.\n\
+\n\
+Do not define a pattern named @samp{return} if you want the\n\
+@code{TARGET_ASM_FUNCTION_EPILOGUE} to be used. If you want the target\n\
+switches to control whether return instructions or epilogues are used,\n\
+define a @samp{return} pattern with a validity condition that tests the\n\
+target switches appropriately. If the @samp{return} pattern's validity\n\
+condition is false, epilogues will be used.\n\
+\n\
+On machines where functions may or may not have frame-pointers, the\n\
+function exit code must vary accordingly. Sometimes the code for these\n\
+two cases is completely different. To determine whether a frame pointer\n\
+is wanted, the macro can refer to the variable\n\
+@code{frame_pointer_needed}. The variable's value will be 1 when compiling\n\
+a function that needs a frame pointer.\n\
+\n\
+Normally, @code{TARGET_ASM_FUNCTION_PROLOGUE} and\n\
+@code{TARGET_ASM_FUNCTION_EPILOGUE} must treat leaf functions specially.\n\
+The C variable @code{current_function_is_leaf} is nonzero for such a\n\
+function. @xref{Leaf Functions}.\n\
+\n\
+On some machines, some functions pop their arguments on exit while\n\
+others leave that for the caller to do. For example, the 68020 when\n\
+given @option{-mrtd} pops arguments in functions that take a fixed\n\
+number of arguments.\n\
+\n\
+@findex pops_args\n\
+@findex crtl->args.pops_args\n\
+Your definition of the macro @code{RETURN_POPS_ARGS} decides which\n\
+functions pop their own arguments. @code{TARGET_ASM_FUNCTION_EPILOGUE}\n\
+needs to know what was decided. The number of bytes of the current\n\
+function's arguments that this function should pop is available in\n\
+@code{crtl->args.pops_args}. @xref{Scalar Return}.",
+ void, (FILE *file),
+ default_function_pro_epilogue)
+
+/* Initialize target-specific sections. */
+DEFHOOK
+(init_sections,
+ "Define this hook if you need to do something special to set up the\n\
+@file{varasm.cc} sections, or if your target has some special sections\n\
+of its own that you need to create.\n\
+\n\
+GCC calls this hook after processing the command line, but before writing\n\
+any assembly code, and before calling any of the section-returning hooks\n\
+described below.",
+ void, (void),
+ hook_void_void)
+
+/* Tell assembler to change to section NAME with attributes FLAGS.
+ If DECL is non-NULL, it is the VAR_DECL or FUNCTION_DECL with
+ which this section is associated. */
+DEFHOOK
+(named_section,
+ "Output assembly directives to switch to section @var{name}. The section\n\
+should have attributes as specified by @var{flags}, which is a bit mask\n\
+of the @code{SECTION_*} flags defined in @file{output.h}. If @var{decl}\n\
+is non-NULL, it is the @code{VAR_DECL} or @code{FUNCTION_DECL} with which\n\
+this section is associated.",
+ void, (const char *name, unsigned int flags, tree decl),
+ default_no_named_section)
+
+/* Tell assembler what section attributes to assign this elf section
+ declaration, using their numerical value. */
+DEFHOOK
+(elf_flags_numeric,
+ "This hook can be used to encode ELF section flags for which no letter\n\
+code has been defined in the assembler. It is called by\n\
+@code{default_asm_named_section} whenever the section flags need to be\n\
+emitted in the assembler output. If the hook returns true, then the\n\
+numerical value for ELF section flags should be calculated from\n\
+@var{flags} and saved in @var{*num}; the value is printed out instead of the\n\
+normal sequence of letter codes. If the hook is not defined, or if it\n\
+returns false, then @var{num} is ignored and the traditional letter sequence\n\
+is emitted.",
+ bool, (unsigned int flags, unsigned int *num),
+ hook_bool_uint_uintp_false)
+
+/* Return preferred text (sub)section for function DECL.
+ Main purpose of this function is to separate cold, normal and hot
+ functions. STARTUP is true when function is known to be used only
+ at startup (from static constructors or it is main()).
+ EXIT is true when function is known to be used only at exit
+ (from static destructors).
+ Return NULL if function should go to default text section. */
+DEFHOOK
+(function_section,
+ "Return preferred text (sub)section for function @var{decl}.\n\
+Main purpose of this function is to separate cold, normal and hot\n\
+functions. @var{startup} is true when function is known to be used only\n\
+at startup (from static constructors or it is @code{main()}).\n\
+@var{exit} is true when function is known to be used only at exit\n\
+(from static destructors).\n\
+Return NULL if function should go to default text section.",
+ section *, (tree decl, enum node_frequency freq, bool startup, bool exit),
+ default_function_section)
+
+/* Output the assembler code for function exit. */
+DEFHOOK
+(function_switched_text_sections,
+ "Used by the target to emit any assembler directives or additional\n\
+labels needed when a function is partitioned between different\n\
+sections. Output should be written to @var{file}. The function\n\
+decl is available as @var{decl} and the new section is `cold' if\n\
+@var{new_is_cold} is @code{true}.",
+ void, (FILE *file, tree decl, bool new_is_cold),
+ default_function_switched_text_sections)
+
+/* Return a mask describing how relocations should be treated when
+ selecting sections. Bit 1 should be set if global relocations
+ should be placed in a read-write section; bit 0 should be set if
+ local relocations should be placed in a read-write section. */
+DEFHOOK
+(reloc_rw_mask,
+ "Return a mask describing how relocations should be treated when\n\
+selecting sections. Bit 1 should be set if global relocations\n\
+should be placed in a read-write section; bit 0 should be set if\n\
+local relocations should be placed in a read-write section.\n\
+\n\
+The default version of this function returns 3 when @option{-fpic}\n\
+is in effect, and 0 otherwise. The hook is typically redefined\n\
+when the target cannot support (some kinds of) dynamic relocations\n\
+in read-only sections even in executables.",
+ int, (void),
+ default_reloc_rw_mask)
+
+ /* Return a flag for either generating ADDR_DIF_VEC table
+ or ADDR_VEC table for jumps in case of -fPIC/-fPIE. */
+DEFHOOK
+(generate_pic_addr_diff_vec,
+"Return true to generate ADDR_DIF_VEC table\n\
+or false to generate ADDR_VEC table for jumps in case of -fPIC.\n\
+\n\
+The default version of this function returns true if flag_pic\n\
+equals true and false otherwise",
+ bool, (void),
+ default_generate_pic_addr_diff_vec)
+
+ /* Return a section for EXP. It may be a DECL or a constant. RELOC
+ is nonzero if runtime relocations must be applied; bit 1 will be
+ set if the runtime relocations require non-local name resolution.
+ ALIGN is the required alignment of the data. */
+DEFHOOK
+(select_section,
+ "Return the section into which @var{exp} should be placed. You can\n\
+assume that @var{exp} is either a @code{VAR_DECL} node or a constant of\n\
+some sort. @var{reloc} indicates whether the initial value of @var{exp}\n\
+requires link-time relocations. Bit 0 is set when variable contains\n\
+local relocations only, while bit 1 is set for global relocations.\n\
+@var{align} is the constant alignment in bits.\n\
+\n\
+The default version of this function takes care of putting read-only\n\
+variables in @code{readonly_data_section}.\n\
+\n\
+See also @var{USE_SELECT_SECTION_FOR_FUNCTIONS}.",
+ section *, (tree exp, int reloc, unsigned HOST_WIDE_INT align),
+ default_select_section)
+
+/* Return a section for X. MODE is X's mode and ALIGN is its
+ alignment in bits. */
+DEFHOOK
+(select_rtx_section,
+ "Return the section into which a constant @var{x}, of mode @var{mode},\n\
+should be placed. You can assume that @var{x} is some kind of\n\
+constant in RTL@. The argument @var{mode} is redundant except in the\n\
+case of a @code{const_int} rtx. @var{align} is the constant alignment\n\
+in bits.\n\
+\n\
+The default version of this function takes care of putting symbolic\n\
+constants in @code{flag_pic} mode in @code{data_section} and everything\n\
+else in @code{readonly_data_section}.",
+ section *, (machine_mode mode, rtx x, unsigned HOST_WIDE_INT align),
+ default_select_rtx_section)
+
+/* Select a unique section name for DECL. RELOC is the same as
+ for SELECT_SECTION. */
+DEFHOOK
+(unique_section,
+ "Build up a unique section name, expressed as a @code{STRING_CST} node,\n\
+and assign it to @samp{DECL_SECTION_NAME (@var{decl})}.\n\
+As with @code{TARGET_ASM_SELECT_SECTION}, @var{reloc} indicates whether\n\
+the initial value of @var{exp} requires link-time relocations.\n\
+\n\
+The default version of this function appends the symbol name to the\n\
+ELF section name that would normally be used for the symbol. For\n\
+example, the function @code{foo} would be placed in @code{.text.foo}.\n\
+Whatever the actual target object format, this is often good enough.",
+ void, (tree decl, int reloc),
+ default_unique_section)
+
+/* Return the readonly data or relocated readonly data section
+ associated with function DECL. */
+DEFHOOK
+(function_rodata_section,
+ "Return the readonly data or reloc readonly data section associated with\n\
+@samp{DECL_SECTION_NAME (@var{decl})}. @var{relocatable} selects the latter\n\
+over the former.\n\
+The default version of this function selects @code{.gnu.linkonce.r.name} if\n\
+the function's section is @code{.gnu.linkonce.t.name}, @code{.rodata.name}\n\
+or @code{.data.rel.ro.name} if function is in @code{.text.name}, and\n\
+the normal readonly-data or reloc readonly data section otherwise.",
+ section *, (tree decl, bool relocatable),
+ default_function_rodata_section)
+
+/* Nonnull if the target wants to override the default ".rodata" prefix
+ for mergeable data sections. */
+DEFHOOKPOD
+(mergeable_rodata_prefix,
+ "Usually, the compiler uses the prefix @code{\".rodata\"} to construct\n\
+section names for mergeable constant data. Define this macro to override\n\
+the string if a different section name should be used.",
+ const char *, ".rodata")
+
+/* Return the section to be used for transactional memory clone tables. */
+DEFHOOK
+(tm_clone_table_section,
+ "Return the section that should be used for transactional memory clone\n\
+tables.",
+ section *, (void), default_clone_table_section)
+
+/* Output a constructor for a symbol with a given priority. */
+DEFHOOK
+(constructor,
+ "If defined, a function that outputs assembler code to arrange to call\n\
+the function referenced by @var{symbol} at initialization time.\n\
+\n\
+Assume that @var{symbol} is a @code{SYMBOL_REF} for a function taking\n\
+no arguments and with no return value. If the target supports initialization\n\
+priorities, @var{priority} is a value between 0 and @code{MAX_INIT_PRIORITY};\n\
+otherwise it must be @code{DEFAULT_INIT_PRIORITY}.\n\
+\n\
+If this macro is not defined by the target, a suitable default will\n\
+be chosen if (1) the target supports arbitrary section names, (2) the\n\
+target defines @code{CTORS_SECTION_ASM_OP}, or (3) @code{USE_COLLECT2}\n\
+is not defined.",
+ void, (rtx symbol, int priority), NULL)
+
+/* Output a destructor for a symbol with a given priority. */
+DEFHOOK
+(destructor,
+ "This is like @code{TARGET_ASM_CONSTRUCTOR} but used for termination\n\
+functions rather than initialization functions.",
+ void, (rtx symbol, int priority), NULL)
+
+/* Output the assembler code for a thunk function. THUNK_DECL is the
+ declaration for the thunk function itself, FUNCTION is the decl for
+ the target function. DELTA is an immediate constant offset to be
+ added to THIS. If VCALL_OFFSET is nonzero, the word at
+ *(*this + vcall_offset) should be added to THIS. */
+DEFHOOK
+(output_mi_thunk,
+ "A function that outputs the assembler code for a thunk\n\
+function, used to implement C++ virtual function calls with multiple\n\
+inheritance. The thunk acts as a wrapper around a virtual function,\n\
+adjusting the implicit object parameter before handing control off to\n\
+the real function.\n\
+\n\
+First, emit code to add the integer @var{delta} to the location that\n\
+contains the incoming first argument. Assume that this argument\n\
+contains a pointer, and is the one used to pass the @code{this} pointer\n\
+in C++. This is the incoming argument @emph{before} the function prologue,\n\
+e.g.@: @samp{%o0} on a sparc. The addition must preserve the values of\n\
+all other incoming arguments.\n\
+\n\
+Then, if @var{vcall_offset} is nonzero, an additional adjustment should be\n\
+made after adding @code{delta}. In particular, if @var{p} is the\n\
+adjusted pointer, the following adjustment should be made:\n\
+\n\
+@smallexample\n\
+p += (*((ptrdiff_t **)p))[vcall_offset/sizeof(ptrdiff_t)]\n\
+@end smallexample\n\
+\n\
+After the additions, emit code to jump to @var{function}, which is a\n\
+@code{FUNCTION_DECL}. This is a direct pure jump, not a call, and does\n\
+not touch the return address. Hence returning from @var{FUNCTION} will\n\
+return to whoever called the current @samp{thunk}.\n\
+\n\
+The effect must be as if @var{function} had been called directly with\n\
+the adjusted first argument. This macro is responsible for emitting all\n\
+of the code for a thunk function; @code{TARGET_ASM_FUNCTION_PROLOGUE}\n\
+and @code{TARGET_ASM_FUNCTION_EPILOGUE} are not invoked.\n\
+\n\
+The @var{thunk_fndecl} is redundant. (@var{delta} and @var{function}\n\
+have already been extracted from it.) It might possibly be useful on\n\
+some targets, but probably not.\n\
+\n\
+If you do not define this macro, the target-independent code in the C++\n\
+front end will generate a less efficient heavyweight thunk that calls\n\
+@var{function} instead of jumping to it. The generic approach does\n\
+not support varargs.",
+ void, (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset, tree function),
+ NULL)
+
+/* Determine whether output_mi_thunk would succeed. */
+/* ??? Ideally, this hook would not exist, and success or failure
+ would be returned from output_mi_thunk directly. But there's
+ too much undo-able setup involved in invoking output_mi_thunk.
+ Could be fixed by making output_mi_thunk emit rtl instead of
+ text to the output file. */
+DEFHOOK
+(can_output_mi_thunk,
+ "A function that returns true if TARGET_ASM_OUTPUT_MI_THUNK would be able\n\
+to output the assembler code for the thunk function specified by the\n\
+arguments it is passed, and false otherwise. In the latter case, the\n\
+generic approach will be used by the C++ front end, with the limitations\n\
+previously exposed.",
+ bool, (const_tree thunk_fndecl, HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset, const_tree function),
+ hook_bool_const_tree_hwi_hwi_const_tree_false)
+
+/* Output any boilerplate text needed at the beginning of a
+ translation unit. */
+DEFHOOK
+(file_start,
+ "Output to @code{asm_out_file} any text which the assembler expects to\n\
+find at the beginning of a file. The default behavior is controlled\n\
+by two flags, documented below. Unless your target's assembler is\n\
+quite unusual, if you override the default, you should call\n\
+@code{default_file_start} at some point in your target hook. This\n\
+lets other target files rely on these variables.",
+ void, (void),
+ default_file_start)
+
+/* Output any boilerplate text needed at the end of a translation unit. */
+DEFHOOK
+(file_end,
+ "Output to @code{asm_out_file} any text which the assembler expects\n\
+to find at the end of a file. The default is to output nothing.",
+ void, (void),
+ hook_void_void)
+
+/* Output any boilerplate text needed at the beginning of an
+ LTO output stream. */
+DEFHOOK
+(lto_start,
+ "Output to @code{asm_out_file} any text which the assembler expects\n\
+to find at the start of an LTO section. The default is to output\n\
+nothing.",
+ void, (void),
+ hook_void_void)
+
+/* Output any boilerplate text needed at the end of an
+ LTO output stream. */
+DEFHOOK
+(lto_end,
+ "Output to @code{asm_out_file} any text which the assembler expects\n\
+to find at the end of an LTO section. The default is to output\n\
+nothing.",
+ void, (void),
+ hook_void_void)
+
+/* Output any boilerplace text needed at the end of a
+ translation unit before debug and unwind info is emitted. */
+DEFHOOK
+(code_end,
+ "Output to @code{asm_out_file} any text which is needed before emitting\n\
+unwind info and debug info at the end of a file. Some targets emit\n\
+here PIC setup thunks that cannot be emitted at the end of file,\n\
+because they couldn't have unwind info then. The default is to output\n\
+nothing.",
+ void, (void),
+ hook_void_void)
+
+/* Output an assembler pseudo-op to declare a library function name
+ external. */
+DEFHOOK
+(external_libcall,
+ "This target hook is a function to output to @var{asm_out_file} an assembler\n\
+pseudo-op to declare a library function name external. The name of the\n\
+library function is given by @var{symref}, which is a @code{symbol_ref}.",
+ void, (rtx symref),
+ default_external_libcall)
+
+/* Output an assembler directive to mark decl live. This instructs
+ linker to not dead code strip this symbol. */
+DEFHOOK
+(mark_decl_preserved,
+ "This target hook is a function to output to @var{asm_out_file} an assembler\n\
+directive to annotate @var{symbol} as used. The Darwin target uses the\n\
+.no_dead_code_strip directive.",
+ void, (const char *symbol),
+ hook_void_constcharptr)
+
+/* Output a record of the command line switches that have been passed. */
+DEFHOOK
+(record_gcc_switches,
+ "Provides the target with the ability to record the gcc command line\n\
+switches provided as argument.\n\
+\n\
+By default this hook is set to NULL, but an example implementation is\n\
+provided for ELF based targets. Called @var{elf_record_gcc_switches},\n\
+it records the switches as ASCII text inside a new, string mergeable\n\
+section in the assembler output file. The name of the new section is\n\
+provided by the @code{TARGET_ASM_RECORD_GCC_SWITCHES_SECTION} target\n\
+hook.",
+ void, (const char *),
+ NULL)
+
+/* The name of the section that the example ELF implementation of
+ record_gcc_switches will use to store the information. Target
+ specific versions of record_gcc_switches may or may not use
+ this information. */
+DEFHOOKPOD
+(record_gcc_switches_section,
+ "This is the name of the section that will be created by the example\n\
+ELF implementation of the @code{TARGET_ASM_RECORD_GCC_SWITCHES} target\n\
+hook.",
+ const char *, ".GCC.command.line")
+
+/* Output the definition of a section anchor. */
+DEFHOOK
+(output_anchor,
+ "Write the assembly code to define section anchor @var{x}, which is a\n\
+@code{SYMBOL_REF} for which @samp{SYMBOL_REF_ANCHOR_P (@var{x})} is true.\n\
+The hook is called with the assembly output position set to the beginning\n\
+of @code{SYMBOL_REF_BLOCK (@var{x})}.\n\
+\n\
+If @code{ASM_OUTPUT_DEF} is available, the hook's default definition uses\n\
+it to define the symbol as @samp{. + SYMBOL_REF_BLOCK_OFFSET (@var{x})}.\n\
+If @code{ASM_OUTPUT_DEF} is not available, the hook's default definition\n\
+is @code{NULL}, which disables the use of section anchors altogether.",
+ void, (rtx x),
+ default_asm_output_anchor)
+
+DEFHOOK
+(output_ident,
+ "Output a string based on @var{name}, suitable for the @samp{#ident}\n\
+directive, or the equivalent directive or pragma in non-C-family languages.\n\
+If this hook is not defined, nothing is output for the @samp{#ident}\n\
+directive.",
+ void, (const char *name),
+ hook_void_constcharptr)
+
+/* Output a DTP-relative reference to a TLS symbol. */
+DEFHOOK
+(output_dwarf_dtprel,
+ "If defined, this target hook is a function which outputs a DTP-relative\n\
+reference to the given TLS symbol of the specified size.",
+ void, (FILE *file, int size, rtx x),
+ NULL)
+
+/* Some target machines need to postscan each insn after it is output. */
+DEFHOOK
+(final_postscan_insn,
+ "If defined, this target hook is a function which is executed just after the\n\
+output of assembler code for @var{insn}, to change the mode of the assembler\n\
+if necessary.\n\
+\n\
+Here the argument @var{opvec} is the vector containing the operands\n\
+extracted from @var{insn}, and @var{noperands} is the number of\n\
+elements of the vector which contain meaningful data for this insn.\n\
+The contents of this vector are what was used to convert the insn\n\
+template into assembler code, so you can change the assembler mode\n\
+by checking the contents of the vector.",
+ void, (FILE *file, rtx_insn *insn, rtx *opvec, int noperands),
+ NULL)
+
+/* Emit the trampoline template. This hook may be NULL. */
+DEFHOOK
+(trampoline_template,
+ "This hook is called by @code{assemble_trampoline_template} to output,\n\
+on the stream @var{f}, assembler code for a block of data that contains\n\
+the constant parts of a trampoline. This code should not include a\n\
+label---the label is taken care of automatically.\n\
+\n\
+If you do not define this hook, it means no template is needed\n\
+for the target. Do not define this hook on systems where the block move\n\
+code to copy the trampoline into place would be larger than the code\n\
+to generate it on the spot.",
+ void, (FILE *f),
+ NULL)
+
+DEFHOOK
+(output_source_filename,
+ "Output DWARF debugging information which indicates that filename\n\
+@var{name} is the current source file to the stdio stream @var{file}.\n\
+\n\
+This target hook need not be defined if the standard form of output\n\
+for the file format in use is appropriate.",
+ void ,(FILE *file, const char *name),
+ default_asm_output_source_filename)
+
+DEFHOOK
+(output_addr_const_extra,
+ "A target hook to recognize @var{rtx} patterns that @code{output_addr_const}\n\
+can't deal with, and output assembly code to @var{file} corresponding to\n\
+the pattern @var{x}. This may be used to allow machine-dependent\n\
+@code{UNSPEC}s to appear within constants.\n\
+\n\
+If target hook fails to recognize a pattern, it must return @code{false},\n\
+so that a standard error message is printed. If it prints an error message\n\
+itself, by calling, for example, @code{output_operand_lossage}, it may just\n\
+return @code{true}.",
+ bool, (FILE *file, rtx x),
+ hook_bool_FILEptr_rtx_false)
+
+/* ??? The TARGET_PRINT_OPERAND* hooks are part of the asm_out struct,
+ even though that is not reflected in the macro name to override their
+ initializers. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_"
+
+/* Emit a machine-specific insn operand. */
+/* ??? tm.texi only documents the old macro PRINT_OPERAND,
+ not this hook, and uses a different name for the argument FILE. */
+DEFHOOK_UNDOC
+(print_operand,
+ "",
+ void, (FILE *file, rtx x, int code),
+ default_print_operand)
+
+/* Emit a machine-specific memory address. */
+/* ??? tm.texi only documents the old macro PRINT_OPERAND_ADDRESS,
+ not this hook, and uses different argument names. */
+DEFHOOK_UNDOC
+(print_operand_address,
+ "",
+ void, (FILE *file, machine_mode mode, rtx addr),
+ default_print_operand_address)
+
+/* Determine whether CODE is a valid punctuation character for the
+ `print_operand' hook. */
+/* ??? tm.texi only documents the old macro PRINT_OPERAND_PUNCT_VALID_P,
+ not this hook. */
+DEFHOOK_UNDOC
+(print_operand_punct_valid_p,
+ "",
+ bool ,(unsigned char code),
+ default_print_operand_punct_valid_p)
+
+/* Given a symbol name, perform same mangling as assemble_name and
+ ASM_OUTPUT_LABELREF, returning result as an IDENTIFIER_NODE. */
+DEFHOOK
+(mangle_assembler_name,
+ "Given a symbol @var{name}, perform same mangling as @code{varasm.cc}'s\n\
+@code{assemble_name}, but in memory rather than to a file stream, returning\n\
+result as an @code{IDENTIFIER_NODE}. Required for correct LTO symtabs. The\n\
+default implementation calls the @code{TARGET_STRIP_NAME_ENCODING} hook and\n\
+then prepends the @code{USER_LABEL_PREFIX}, if any.",
+ tree, (const char *name),
+ default_mangle_assembler_name)
+
+HOOK_VECTOR_END (asm_out)
+
+/* Functions relating to instruction scheduling. All of these
+ default to null pointers, which haifa-sched.cc looks for and handles. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_SCHED_"
+HOOK_VECTOR (TARGET_SCHED, sched)
+
+/* Given the current cost, COST, of an insn, INSN, calculate and
+ return a new cost based on its relationship to DEP_INSN through
+ the dependence LINK. The default is to make no adjustment. */
+DEFHOOK
+(adjust_cost,
+ "This function corrects the value of @var{cost} based on the\n\
+relationship between @var{insn} and @var{dep_insn} through a\n\
+dependence of type dep_type, and strength @var{dw}. It should return the new\n\
+value. The default is to make no adjustment to @var{cost}. This can be\n\
+used for example to specify to the scheduler using the traditional pipeline\n\
+description that an output- or anti-dependence does not incur the same cost\n\
+as a data-dependence. If the scheduler using the automaton based pipeline\n\
+description, the cost of anti-dependence is zero and the cost of\n\
+output-dependence is maximum of one and the difference of latency\n\
+times of the first and the second insns. If these values are not\n\
+acceptable, you could use the hook to modify them too. See also\n\
+@pxref{Processor pipeline description}.",
+ int, (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn, int cost,
+ unsigned int dw),
+ NULL)
+
+/* Adjust the priority of an insn as you see fit. Returns the new priority. */
+DEFHOOK
+(adjust_priority,
+ "This hook adjusts the integer scheduling priority @var{priority} of\n\
+@var{insn}. It should return the new priority. Increase the priority to\n\
+execute @var{insn} earlier, reduce the priority to execute @var{insn}\n\
+later. Do not define this hook if you do not need to adjust the\n\
+scheduling priorities of insns.",
+ int, (rtx_insn *insn, int priority), NULL)
+
+/* Function which returns the maximum number of insns that can be
+ scheduled in the same machine cycle. This must be constant
+ over an entire compilation. The default is 1. */
+DEFHOOK
+(issue_rate,
+ "This hook returns the maximum number of instructions that can ever\n\
+issue at the same time on the target machine. The default is one.\n\
+Although the insn scheduler can define itself the possibility of issue\n\
+an insn on the same cycle, the value can serve as an additional\n\
+constraint to issue insns on the same simulated processor cycle (see\n\
+hooks @samp{TARGET_SCHED_REORDER} and @samp{TARGET_SCHED_REORDER2}).\n\
+This value must be constant over the entire compilation. If you need\n\
+it to vary depending on what the instructions are, you must use\n\
+@samp{TARGET_SCHED_VARIABLE_ISSUE}.",
+ int, (void), NULL)
+
+/* Calculate how much this insn affects how many more insns we
+ can emit this cycle. Default is they all cost the same. */
+DEFHOOK
+(variable_issue,
+ "This hook is executed by the scheduler after it has scheduled an insn\n\
+from the ready list. It should return the number of insns which can\n\
+still be issued in the current cycle. The default is\n\
+@samp{@w{@var{more} - 1}} for insns other than @code{CLOBBER} and\n\
+@code{USE}, which normally are not counted against the issue rate.\n\
+You should define this hook if some insns take more machine resources\n\
+than others, so that fewer insns can follow them in the same cycle.\n\
+@var{file} is either a null pointer, or a stdio stream to write any\n\
+debug output to. @var{verbose} is the verbose level provided by\n\
+@option{-fsched-verbose-@var{n}}. @var{insn} is the instruction that\n\
+was scheduled.",
+ int, (FILE *file, int verbose, rtx_insn *insn, int more), NULL)
+
+/* Initialize machine-dependent scheduling code. */
+DEFHOOK
+(init,
+ "This hook is executed by the scheduler at the beginning of each block of\n\
+instructions that are to be scheduled. @var{file} is either a null\n\
+pointer, or a stdio stream to write any debug output to. @var{verbose}\n\
+is the verbose level provided by @option{-fsched-verbose-@var{n}}.\n\
+@var{max_ready} is the maximum number of insns in the current scheduling\n\
+region that can be live at the same time. This can be used to allocate\n\
+scratch space if it is needed, e.g.@: by @samp{TARGET_SCHED_REORDER}.",
+ void, (FILE *file, int verbose, int max_ready), NULL)
+
+/* Finalize machine-dependent scheduling code. */
+DEFHOOK
+(finish,
+ "This hook is executed by the scheduler at the end of each block of\n\
+instructions that are to be scheduled. It can be used to perform\n\
+cleanup of any actions done by the other scheduling hooks. @var{file}\n\
+is either a null pointer, or a stdio stream to write any debug output\n\
+to. @var{verbose} is the verbose level provided by\n\
+@option{-fsched-verbose-@var{n}}.",
+ void, (FILE *file, int verbose), NULL)
+
+ /* Initialize machine-dependent function wide scheduling code. */
+DEFHOOK
+(init_global,
+ "This hook is executed by the scheduler after function level initializations.\n\
+@var{file} is either a null pointer, or a stdio stream to write any debug output to.\n\
+@var{verbose} is the verbose level provided by @option{-fsched-verbose-@var{n}}.\n\
+@var{old_max_uid} is the maximum insn uid when scheduling begins.",
+ void, (FILE *file, int verbose, int old_max_uid), NULL)
+
+/* Finalize machine-dependent function wide scheduling code. */
+DEFHOOK
+(finish_global,
+ "This is the cleanup hook corresponding to @code{TARGET_SCHED_INIT_GLOBAL}.\n\
+@var{file} is either a null pointer, or a stdio stream to write any debug output to.\n\
+@var{verbose} is the verbose level provided by @option{-fsched-verbose-@var{n}}.",
+ void, (FILE *file, int verbose), NULL)
+
+/* Reorder insns in a machine-dependent fashion, in two different
+ places. Default does nothing. */
+DEFHOOK
+(reorder,
+ "This hook is executed by the scheduler after it has scheduled the ready\n\
+list, to allow the machine description to reorder it (for example to\n\
+combine two small instructions together on @samp{VLIW} machines).\n\
+@var{file} is either a null pointer, or a stdio stream to write any\n\
+debug output to. @var{verbose} is the verbose level provided by\n\
+@option{-fsched-verbose-@var{n}}. @var{ready} is a pointer to the ready\n\
+list of instructions that are ready to be scheduled. @var{n_readyp} is\n\
+a pointer to the number of elements in the ready list. The scheduler\n\
+reads the ready list in reverse order, starting with\n\
+@var{ready}[@var{*n_readyp} @minus{} 1] and going to @var{ready}[0]. @var{clock}\n\
+is the timer tick of the scheduler. You may modify the ready list and\n\
+the number of ready insns. The return value is the number of insns that\n\
+can issue this cycle; normally this is just @code{issue_rate}. See also\n\
+@samp{TARGET_SCHED_REORDER2}.",
+ int, (FILE *file, int verbose, rtx_insn **ready, int *n_readyp, int clock), NULL)
+
+DEFHOOK
+(reorder2,
+ "Like @samp{TARGET_SCHED_REORDER}, but called at a different time. That\n\
+function is called whenever the scheduler starts a new cycle. This one\n\
+is called once per iteration over a cycle, immediately after\n\
+@samp{TARGET_SCHED_VARIABLE_ISSUE}; it can reorder the ready list and\n\
+return the number of insns to be scheduled in the same cycle. Defining\n\
+this hook can be useful if there are frequent situations where\n\
+scheduling one insn causes other insns to become ready in the same\n\
+cycle. These other insns can then be taken into account properly.",
+ int, (FILE *file, int verbose, rtx_insn **ready, int *n_readyp, int clock), NULL)
+
+DEFHOOK
+(macro_fusion_p,
+ "This hook is used to check whether target platform supports macro fusion.",
+ bool, (void), NULL)
+
+DEFHOOK
+(macro_fusion_pair_p,
+ "This hook is used to check whether two insns should be macro fused for\n\
+a target microarchitecture. If this hook returns true for the given insn pair\n\
+(@var{prev} and @var{curr}), the scheduler will put them into a sched\n\
+group, and they will not be scheduled apart. The two insns will be either\n\
+two SET insns or a compare and a conditional jump and this hook should\n\
+validate any dependencies needed to fuse the two insns together.",
+ bool, (rtx_insn *prev, rtx_insn *curr), NULL)
+
+/* The following member value is a pointer to a function called
+ after evaluation forward dependencies of insns in chain given
+ by two parameter values (head and tail correspondingly). */
+DEFHOOK
+(dependencies_evaluation_hook,
+ "This hook is called after evaluation forward dependencies of insns in\n\
+chain given by two parameter values (@var{head} and @var{tail}\n\
+correspondingly) but before insns scheduling of the insn chain. For\n\
+example, it can be used for better insn classification if it requires\n\
+analysis of dependencies. This hook can use backward and forward\n\
+dependencies of the insn scheduler because they are already\n\
+calculated.",
+ void, (rtx_insn *head, rtx_insn *tail), NULL)
+
+/* The values of the following four members are pointers to functions
+ used to simplify the automaton descriptions. dfa_pre_cycle_insn and
+ dfa_post_cycle_insn give functions returning insns which are used to
+ change the pipeline hazard recognizer state when the new simulated
+ processor cycle correspondingly starts and finishes. The function
+ defined by init_dfa_pre_cycle_insn and init_dfa_post_cycle_insn are
+ used to initialize the corresponding insns. The default values of
+ the members result in not changing the automaton state when the
+ new simulated processor cycle correspondingly starts and finishes. */
+
+DEFHOOK
+(init_dfa_pre_cycle_insn,
+ "The hook can be used to initialize data used by the previous hook.",
+ void, (void), NULL)
+
+DEFHOOK
+(dfa_pre_cycle_insn,
+ "The hook returns an RTL insn. The automaton state used in the\n\
+pipeline hazard recognizer is changed as if the insn were scheduled\n\
+when the new simulated processor cycle starts. Usage of the hook may\n\
+simplify the automaton pipeline description for some @acronym{VLIW}\n\
+processors. If the hook is defined, it is used only for the automaton\n\
+based pipeline description. The default is not to change the state\n\
+when the new simulated processor cycle starts.",
+ rtx, (void), NULL)
+
+DEFHOOK
+(init_dfa_post_cycle_insn,
+ "The hook is analogous to @samp{TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN} but\n\
+used to initialize data used by the previous hook.",
+ void, (void), NULL)
+
+DEFHOOK
+(dfa_post_cycle_insn,
+ "The hook is analogous to @samp{TARGET_SCHED_DFA_PRE_CYCLE_INSN} but used\n\
+to changed the state as if the insn were scheduled when the new\n\
+simulated processor cycle finishes.",
+ rtx_insn *, (void), NULL)
+
+/* The values of the following two members are pointers to
+ functions used to simplify the automaton descriptions.
+ dfa_pre_advance_cycle and dfa_post_advance_cycle are getting called
+ immediately before and after cycle is advanced. */
+
+DEFHOOK
+(dfa_pre_advance_cycle,
+ "The hook to notify target that the current simulated cycle is about to finish.\n\
+The hook is analogous to @samp{TARGET_SCHED_DFA_PRE_CYCLE_INSN} but used\n\
+to change the state in more complicated situations - e.g., when advancing\n\
+state on a single insn is not enough.",
+ void, (void), NULL)
+
+DEFHOOK
+(dfa_post_advance_cycle,
+ "The hook to notify target that new simulated cycle has just started.\n\
+The hook is analogous to @samp{TARGET_SCHED_DFA_POST_CYCLE_INSN} but used\n\
+to change the state in more complicated situations - e.g., when advancing\n\
+state on a single insn is not enough.",
+ void, (void), NULL)
+
+/* The following member value is a pointer to a function returning value
+ which defines how many insns in queue `ready' will we try for
+ multi-pass scheduling. If the member value is nonzero and the
+ function returns positive value, the DFA based scheduler will make
+ multi-pass scheduling for the first cycle. In other words, we will
+ try to choose ready insn which permits to start maximum number of
+ insns on the same cycle. */
+DEFHOOK
+(first_cycle_multipass_dfa_lookahead,
+ "This hook controls better choosing an insn from the ready insn queue\n\
+for the @acronym{DFA}-based insn scheduler. Usually the scheduler\n\
+chooses the first insn from the queue. If the hook returns a positive\n\
+value, an additional scheduler code tries all permutations of\n\
+@samp{TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ()}\n\
+subsequent ready insns to choose an insn whose issue will result in\n\
+maximal number of issued insns on the same cycle. For the\n\
+@acronym{VLIW} processor, the code could actually solve the problem of\n\
+packing simple insns into the @acronym{VLIW} insn. Of course, if the\n\
+rules of @acronym{VLIW} packing are described in the automaton.\n\
+\n\
+This code also could be used for superscalar @acronym{RISC}\n\
+processors. Let us consider a superscalar @acronym{RISC} processor\n\
+with 3 pipelines. Some insns can be executed in pipelines @var{A} or\n\
+@var{B}, some insns can be executed only in pipelines @var{B} or\n\
+@var{C}, and one insn can be executed in pipeline @var{B}. The\n\
+processor may issue the 1st insn into @var{A} and the 2nd one into\n\
+@var{B}. In this case, the 3rd insn will wait for freeing @var{B}\n\
+until the next cycle. If the scheduler issues the 3rd insn the first,\n\
+the processor could issue all 3 insns per cycle.\n\
+\n\
+Actually this code demonstrates advantages of the automaton based\n\
+pipeline hazard recognizer. We try quickly and easy many insn\n\
+schedules to choose the best one.\n\
+\n\
+The default is no multipass scheduling.",
+ int, (void), NULL)
+
+/* The following member value is pointer to a function controlling
+ what insns from the ready insn queue will be considered for the
+ multipass insn scheduling. If the hook returns zero for insn
+ passed as the parameter, the insn will be not chosen to be issued. */
+DEFHOOK
+(first_cycle_multipass_dfa_lookahead_guard,
+ "\n\
+This hook controls what insns from the ready insn queue will be\n\
+considered for the multipass insn scheduling. If the hook returns\n\
+zero for @var{insn}, the insn will be considered in multipass scheduling.\n\
+Positive return values will remove @var{insn} from consideration on\n\
+the current round of multipass scheduling.\n\
+Negative return values will remove @var{insn} from consideration for given\n\
+number of cycles.\n\
+Backends should be careful about returning non-zero for highest priority\n\
+instruction at position 0 in the ready list. @var{ready_index} is passed\n\
+to allow backends make correct judgements.\n\
+\n\
+The default is that any ready insns can be chosen to be issued.",
+ int, (rtx_insn *insn, int ready_index), NULL)
+
+/* This hook prepares the target for a new round of multipass
+ scheduling.
+ DATA is a pointer to target-specific data used for multipass scheduling.
+ READY_TRY and N_READY represent the current state of search in the
+ optimization space. The target can filter out instructions that
+ should not be tried during current round by setting corresponding
+ elements in READY_TRY to non-zero.
+ FIRST_CYCLE_INSN_P is true if this is the first round of multipass
+ scheduling on current cycle. */
+DEFHOOK
+(first_cycle_multipass_begin,
+ "This hook prepares the target backend for a new round of multipass\n\
+scheduling.",
+ void, (void *data, signed char *ready_try, int n_ready, bool first_cycle_insn_p),
+ NULL)
+
+/* This hook is called when multipass scheduling evaluates instruction INSN.
+ DATA is a pointer to target-specific data that can be used to record effects
+ of INSN on CPU that are not described in DFA.
+ READY_TRY and N_READY represent the current state of search in the
+ optimization space. The target can filter out instructions that
+ should not be tried after issuing INSN by setting corresponding
+ elements in READY_TRY to non-zero.
+ INSN is the instruction being evaluated.
+ PREV_DATA is a pointer to target-specific data corresponding
+ to a state before issuing INSN. */
+DEFHOOK
+(first_cycle_multipass_issue,
+ "This hook is called when multipass scheduling evaluates instruction INSN.",
+ void, (void *data, signed char *ready_try, int n_ready, rtx_insn *insn,
+ const void *prev_data), NULL)
+
+/* This hook is called when multipass scheduling backtracks from evaluation of
+ instruction corresponding to DATA.
+ DATA is a pointer to target-specific data that stores the effects
+ of instruction from which the algorithm backtracks on CPU that are not
+ described in DFA.
+ READY_TRY and N_READY represent the current state of search in the
+ optimization space. The target can filter out instructions that
+ should not be tried after issuing INSN by setting corresponding
+ elements in READY_TRY to non-zero. */
+DEFHOOK
+(first_cycle_multipass_backtrack,
+ "This is called when multipass scheduling backtracks from evaluation of\n\
+an instruction.",
+ void, (const void *data, signed char *ready_try, int n_ready), NULL)
+
+/* This hook notifies the target about the result of the concluded current
+ round of multipass scheduling.
+ DATA is a pointer.
+ If DATA is non-NULL it points to target-specific data used for multipass
+ scheduling which corresponds to instruction at the start of the chain of
+ the winning solution. DATA is NULL when multipass scheduling cannot find
+ a good enough solution on current cycle and decides to retry later,
+ usually after advancing the cycle count. */
+DEFHOOK
+(first_cycle_multipass_end,
+ "This hook notifies the target about the result of the concluded current\n\
+round of multipass scheduling.",
+ void, (const void *data), NULL)
+
+/* This hook is called to initialize target-specific data for multipass
+ scheduling after it has been allocated.
+ DATA is a pointer to target-specific data that stores the effects
+ of instruction from which the algorithm backtracks on CPU that are not
+ described in DFA. */
+DEFHOOK
+(first_cycle_multipass_init,
+ "This hook initializes target-specific data used in multipass scheduling.",
+ void, (void *data), NULL)
+
+/* This hook is called to finalize target-specific data for multipass
+ scheduling before it is deallocated.
+ DATA is a pointer to target-specific data that stores the effects
+ of instruction from which the algorithm backtracks on CPU that are not
+ described in DFA. */
+DEFHOOK
+(first_cycle_multipass_fini,
+ "This hook finalizes target-specific data used in multipass scheduling.",
+ void, (void *data), NULL)
+
+/* The following member value is pointer to a function called by
+ the insn scheduler before issuing insn passed as the third
+ parameter on given cycle. If the hook returns nonzero, the
+ insn is not issued on given processors cycle. Instead of that,
+ the processor cycle is advanced. If the value passed through
+ the last parameter is zero, the insn ready queue is not sorted
+ on the new cycle start as usually. The first parameter passes
+ file for debugging output. The second one passes the scheduler
+ verbose level of the debugging output. The forth and the fifth
+ parameter values are correspondingly processor cycle on which
+ the previous insn has been issued and the current processor cycle. */
+DEFHOOK
+(dfa_new_cycle,
+ "This hook is called by the insn scheduler before issuing @var{insn}\n\
+on cycle @var{clock}. If the hook returns nonzero,\n\
+@var{insn} is not issued on this processor cycle. Instead,\n\
+the processor cycle is advanced. If *@var{sort_p}\n\
+is zero, the insn ready queue is not sorted on the new cycle\n\
+start as usually. @var{dump} and @var{verbose} specify the file and\n\
+verbosity level to use for debugging output.\n\
+@var{last_clock} and @var{clock} are, respectively, the\n\
+processor cycle on which the previous insn has been issued,\n\
+and the current processor cycle.",
+ int, (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
+ int clock, int *sort_p),
+ NULL)
+
+/* The following member value is a pointer to a function called by the
+ insn scheduler. It should return true if there exists a dependence
+ which is considered costly by the target, between the insn
+ DEP_PRO (&_DEP), and the insn DEP_CON (&_DEP). The first parameter is
+ the dep that represents the dependence between the two insns. The
+ second argument is the cost of the dependence as estimated by
+ the scheduler. The last argument is the distance in cycles
+ between the already scheduled insn (first parameter) and the
+ second insn (second parameter). */
+DEFHOOK
+(is_costly_dependence,
+ "This hook is used to define which dependences are considered costly by\n\
+the target, so costly that it is not advisable to schedule the insns that\n\
+are involved in the dependence too close to one another. The parameters\n\
+to this hook are as follows: The first parameter @var{_dep} is the dependence\n\
+being evaluated. The second parameter @var{cost} is the cost of the\n\
+dependence as estimated by the scheduler, and the third\n\
+parameter @var{distance} is the distance in cycles between the two insns.\n\
+The hook returns @code{true} if considering the distance between the two\n\
+insns the dependence between them is considered costly by the target,\n\
+and @code{false} otherwise.\n\
+\n\
+Defining this hook can be useful in multiple-issue out-of-order machines,\n\
+where (a) it's practically hopeless to predict the actual data/resource\n\
+delays, however: (b) there's a better chance to predict the actual grouping\n\
+that will be formed, and (c) correctly emulating the grouping can be very\n\
+important. In such targets one may want to allow issuing dependent insns\n\
+closer to one another---i.e., closer than the dependence distance; however,\n\
+not in cases of ``costly dependences'', which this hooks allows to define.",
+ bool, (struct _dep *_dep, int cost, int distance), NULL)
+
+/* The following member value is a pointer to a function called
+ by the insn scheduler. This hook is called to notify the backend
+ that new instructions were emitted. */
+DEFHOOK
+(h_i_d_extended,
+ "This hook is called by the insn scheduler after emitting a new instruction to\n\
+the instruction stream. The hook notifies a target backend to extend its\n\
+per instruction data structures.",
+ void, (void), NULL)
+
+/* Next 5 functions are for multi-point scheduling. */
+
+/* Allocate memory for scheduler context. */
+DEFHOOK
+(alloc_sched_context,
+ "Return a pointer to a store large enough to hold target scheduling context.",
+ void *, (void), NULL)
+
+/* Fills the context from the local machine scheduler context. */
+DEFHOOK
+(init_sched_context,
+ "Initialize store pointed to by @var{tc} to hold target scheduling context.\n\
+It @var{clean_p} is true then initialize @var{tc} as if scheduler is at the\n\
+beginning of the block. Otherwise, copy the current context into @var{tc}.",
+ void, (void *tc, bool clean_p), NULL)
+
+/* Sets local machine scheduler context to a saved value. */
+DEFHOOK
+(set_sched_context,
+ "Copy target scheduling context pointed to by @var{tc} to the current context.",
+ void, (void *tc), NULL)
+
+/* Clears a scheduler context so it becomes like after init. */
+DEFHOOK
+(clear_sched_context,
+ "Deallocate internal data in target scheduling context pointed to by @var{tc}.",
+ void, (void *tc), NULL)
+
+/* Frees the scheduler context. */
+DEFHOOK
+(free_sched_context,
+ "Deallocate a store for target scheduling context pointed to by @var{tc}.",
+ void, (void *tc), NULL)
+
+/* The following member value is a pointer to a function called
+ by the insn scheduler.
+ The first parameter is an instruction, the second parameter is the type
+ of the requested speculation, and the third parameter is a pointer to the
+ speculative pattern of the corresponding type (set if return value == 1).
+ It should return
+ -1, if there is no pattern, that will satisfy the requested speculation type,
+ 0, if current pattern satisfies the requested speculation type,
+ 1, if pattern of the instruction should be changed to the newly
+ generated one. */
+DEFHOOK
+(speculate_insn,
+ "This hook is called by the insn scheduler when @var{insn} has only\n\
+speculative dependencies and therefore can be scheduled speculatively.\n\
+The hook is used to check if the pattern of @var{insn} has a speculative\n\
+version and, in case of successful check, to generate that speculative\n\
+pattern. The hook should return 1, if the instruction has a speculative form,\n\
+or @minus{}1, if it doesn't. @var{request} describes the type of requested\n\
+speculation. If the return value equals 1 then @var{new_pat} is assigned\n\
+the generated speculative pattern.",
+ int, (rtx_insn *insn, unsigned int dep_status, rtx *new_pat), NULL)
+
+/* The following member value is a pointer to a function called
+ by the insn scheduler. It should return true if the check instruction
+ passed as the parameter needs a recovery block. */
+DEFHOOK
+(needs_block_p,
+ "This hook is called by the insn scheduler during generation of recovery code\n\
+for @var{insn}. It should return @code{true}, if the corresponding check\n\
+instruction should branch to recovery code, or @code{false} otherwise.",
+ bool, (unsigned int dep_status), NULL)
+
+/* The following member value is a pointer to a function called
+ by the insn scheduler. It should return a pattern for the check
+ instruction.
+ The first parameter is a speculative instruction, the second parameter
+ is the label of the corresponding recovery block (or null, if it is a
+ simple check). The third parameter is the kind of speculation that
+ is being performed. */
+DEFHOOK
+(gen_spec_check,
+ "This hook is called by the insn scheduler to generate a pattern for recovery\n\
+check instruction. If @var{mutate_p} is zero, then @var{insn} is a\n\
+speculative instruction for which the check should be generated.\n\
+@var{label} is either a label of a basic block, where recovery code should\n\
+be emitted, or a null pointer, when requested check doesn't branch to\n\
+recovery code (a simple check). If @var{mutate_p} is nonzero, then\n\
+a pattern for a branchy check corresponding to a simple check denoted by\n\
+@var{insn} should be generated. In this case @var{label} can't be null.",
+ rtx, (rtx_insn *insn, rtx_insn *label, unsigned int ds), NULL)
+
+/* The following member value is a pointer to a function that provides
+ information about the speculation capabilities of the target.
+ The parameter is a pointer to spec_info variable. */
+DEFHOOK
+(set_sched_flags,
+ "This hook is used by the insn scheduler to find out what features should be\n\
+enabled/used.\n\
+The structure *@var{spec_info} should be filled in by the target.\n\
+The structure describes speculation types that can be used in the scheduler.",
+ void, (struct spec_info_def *spec_info), NULL)
+
+DEFHOOK_UNDOC
+(get_insn_spec_ds,
+ "Return speculation types of instruction @var{insn}.",
+ unsigned int, (rtx_insn *insn), NULL)
+
+DEFHOOK_UNDOC
+(get_insn_checked_ds,
+ "Return speculation types that are checked for instruction @var{insn}",
+ unsigned int, (rtx_insn *insn), NULL)
+
+DEFHOOK
+(can_speculate_insn,
+ "Some instructions should never be speculated by the schedulers, usually\n\
+ because the instruction is too expensive to get this wrong. Often such\n\
+ instructions have long latency, and often they are not fully modeled in the\n\
+ pipeline descriptions. This hook should return @code{false} if @var{insn}\n\
+ should not be speculated.",
+ bool, (rtx_insn *insn), hook_bool_rtx_insn_true)
+
+DEFHOOK_UNDOC
+(skip_rtx_p,
+ "Return bool if rtx scanning should just skip current layer and\
+ advance to the inner rtxes.",
+ bool, (const_rtx x), NULL)
+
+/* The following member value is a pointer to a function that provides
+ information about the target resource-based lower bound which is
+ used by the swing modulo scheduler. The parameter is a pointer
+ to ddg variable. */
+DEFHOOK
+(sms_res_mii,
+ "This hook is called by the swing modulo scheduler to calculate a\n\
+resource-based lower bound which is based on the resources available in\n\
+the machine and the resources required by each instruction. The target\n\
+backend can use @var{g} to calculate such bound. A very simple lower\n\
+bound will be used in case this hook is not implemented: the total number\n\
+of instructions divided by the issue rate.",
+ int, (struct ddg *g), NULL)
+
+/* The following member value is a function that initializes dispatch
+ schedling and adds instructions to dispatch window according to its
+ parameters. */
+DEFHOOK
+(dispatch_do,
+"This hook is called by Haifa Scheduler. It performs the operation specified\n\
+in its second parameter.",
+void, (rtx_insn *insn, int x),
+hook_void_rtx_insn_int)
+
+/* The following member value is a function that returns true is
+ dispatch schedling is supported in hardware and condition passed
+ as the second parameter is true. */
+DEFHOOK
+(dispatch,
+"This hook is called by Haifa Scheduler. It returns true if dispatch scheduling\n\
+is supported in hardware and the condition specified in the parameter is true.",
+bool, (rtx_insn *insn, int x),
+hook_bool_rtx_insn_int_false)
+
+DEFHOOKPOD
+(exposed_pipeline,
+"True if the processor has an exposed pipeline, which means that not just\n\
+the order of instructions is important for correctness when scheduling, but\n\
+also the latencies of operations.",
+bool, false)
+
+/* The following member value is a function that returns number
+ of operations reassociator should try to put in parallel for
+ statements of the given type. By default 1 is used. */
+DEFHOOK
+(reassociation_width,
+"This hook is called by tree reassociator to determine a level of\n\
+parallelism required in output calculations chain.",
+int, (unsigned int opc, machine_mode mode),
+hook_int_uint_mode_1)
+
+/* The following member value is a function that returns priority for
+ fusion of each instruction via pointer parameters. */
+DEFHOOK
+(fusion_priority,
+"This hook is called by scheduling fusion pass. It calculates fusion\n\
+priorities for each instruction passed in by parameter. The priorities\n\
+are returned via pointer parameters.\n\
+\n\
+@var{insn} is the instruction whose priorities need to be calculated.\n\
+@var{max_pri} is the maximum priority can be returned in any cases.\n\
+@var{fusion_pri} is the pointer parameter through which @var{insn}'s\n\
+fusion priority should be calculated and returned.\n\
+@var{pri} is the pointer parameter through which @var{insn}'s priority\n\
+should be calculated and returned.\n\
+\n\
+Same @var{fusion_pri} should be returned for instructions which should\n\
+be scheduled together. Different @var{pri} should be returned for\n\
+instructions with same @var{fusion_pri}. @var{fusion_pri} is the major\n\
+sort key, @var{pri} is the minor sort key. All instructions will be\n\
+scheduled according to the two priorities. All priorities calculated\n\
+should be between 0 (exclusive) and @var{max_pri} (inclusive). To avoid\n\
+false dependencies, @var{fusion_pri} of instructions which need to be\n\
+scheduled together should be smaller than @var{fusion_pri} of irrelevant\n\
+instructions.\n\
+\n\
+Given below example:\n\
+\n\
+@smallexample\n\
+ ldr r10, [r1, 4]\n\
+ add r4, r4, r10\n\
+ ldr r15, [r2, 8]\n\
+ sub r5, r5, r15\n\
+ ldr r11, [r1, 0]\n\
+ add r4, r4, r11\n\
+ ldr r16, [r2, 12]\n\
+ sub r5, r5, r16\n\
+@end smallexample\n\
+\n\
+On targets like ARM/AArch64, the two pairs of consecutive loads should be\n\
+merged. Since peephole2 pass can't help in this case unless consecutive\n\
+loads are actually next to each other in instruction flow. That's where\n\
+this scheduling fusion pass works. This hook calculates priority for each\n\
+instruction based on its fustion type, like:\n\
+\n\
+@smallexample\n\
+ ldr r10, [r1, 4] ; fusion_pri=99, pri=96\n\
+ add r4, r4, r10 ; fusion_pri=100, pri=100\n\
+ ldr r15, [r2, 8] ; fusion_pri=98, pri=92\n\
+ sub r5, r5, r15 ; fusion_pri=100, pri=100\n\
+ ldr r11, [r1, 0] ; fusion_pri=99, pri=100\n\
+ add r4, r4, r11 ; fusion_pri=100, pri=100\n\
+ ldr r16, [r2, 12] ; fusion_pri=98, pri=88\n\
+ sub r5, r5, r16 ; fusion_pri=100, pri=100\n\
+@end smallexample\n\
+\n\
+Scheduling fusion pass then sorts all ready to issue instructions according\n\
+to the priorities. As a result, instructions of same fusion type will be\n\
+pushed together in instruction flow, like:\n\
+\n\
+@smallexample\n\
+ ldr r11, [r1, 0]\n\
+ ldr r10, [r1, 4]\n\
+ ldr r15, [r2, 8]\n\
+ ldr r16, [r2, 12]\n\
+ add r4, r4, r10\n\
+ sub r5, r5, r15\n\
+ add r4, r4, r11\n\
+ sub r5, r5, r16\n\
+@end smallexample\n\
+\n\
+Now peephole2 pass can simply merge the two pairs of loads.\n\
+\n\
+Since scheduling fusion pass relies on peephole2 to do real fusion\n\
+work, it is only enabled by default when peephole2 is in effect.\n\
+\n\
+This is firstly introduced on ARM/AArch64 targets, please refer to\n\
+the hook implementation for how different fusion types are supported.",
+void, (rtx_insn *insn, int max_pri, int *fusion_pri, int *pri), NULL)
+
+HOOK_VECTOR_END (sched)
+
+/* Functions relating to OpenMP SIMD and __attribute__((simd)) clones. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_SIMD_CLONE_"
+HOOK_VECTOR (TARGET_SIMD_CLONE, simd_clone)
+
+DEFHOOK
+(compute_vecsize_and_simdlen,
+"This hook should set @var{vecsize_mangle}, @var{vecsize_int}, @var{vecsize_float}\n\
+fields in @var{simd_clone} structure pointed by @var{clone_info} argument and also\n\
+@var{simdlen} field if it was previously 0.\n\
+@var{vecsize_mangle} is a marker for the backend only. @var{vecsize_int} and\n\
+@var{vecsize_float} should be left zero on targets where the number of lanes is\n\
+not determined by the bitsize (in which case @var{simdlen} is always used).\n\
+The hook should return 0 if SIMD clones shouldn't be emitted,\n\
+or number of @var{vecsize_mangle} variants that should be emitted.",
+int, (struct cgraph_node *, struct cgraph_simd_clone *, tree, int, bool), NULL)
+
+DEFHOOK
+(adjust,
+"This hook should add implicit @code{attribute(target(\"...\"))} attribute\n\
+to SIMD clone @var{node} if needed.",
+void, (struct cgraph_node *), NULL)
+
+DEFHOOK
+(usable,
+"This hook should return -1 if SIMD clone @var{node} shouldn't be used\n\
+in vectorized loops in current function, or non-negative number if it is\n\
+usable. In that case, the smaller the number is, the more desirable it is\n\
+to use it.",
+int, (struct cgraph_node *), NULL)
+
+HOOK_VECTOR_END (simd_clone)
+
+/* Functions relating to OpenMP SIMT vectorization transform. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_SIMT_"
+HOOK_VECTOR (TARGET_SIMT, simt)
+
+DEFHOOK
+(vf,
+"Return number of threads in SIMT thread group on the target.",
+int, (void), NULL)
+
+HOOK_VECTOR_END (simt)
+
+/* Functions relating to OpenMP. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_OMP_"
+HOOK_VECTOR (TARGET_OMP, omp)
+
+DEFHOOK
+(device_kind_arch_isa,
+"Return 1 if @var{trait} @var{name} is present in the OpenMP context's\n\
+device trait set, return 0 if not present in any OpenMP context in the\n\
+whole translation unit, or -1 if not present in the current OpenMP context\n\
+but might be present in another OpenMP context in the same TU.",
+int, (enum omp_device_kind_arch_isa trait, const char *name), NULL)
+
+HOOK_VECTOR_END (omp)
+
+/* Functions relating to openacc. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_GOACC_"
+HOOK_VECTOR (TARGET_GOACC, goacc)
+
+DEFHOOK
+(validate_dims,
+"This hook should check the launch dimensions provided for an OpenACC\n\
+compute region, or routine. Defaulted values are represented as -1\n\
+and non-constant values as 0. The @var{fn_level} is negative for the\n\
+function corresponding to the compute region. For a routine it is the\n\
+outermost level at which partitioned execution may be spawned. The hook\n\
+should verify non-default values. If DECL is NULL, global defaults\n\
+are being validated and unspecified defaults should be filled in.\n\
+Diagnostics should be issued as appropriate. Return\n\
+true, if changes have been made. You must override this hook to\n\
+provide dimensions larger than 1.",
+bool, (tree decl, int *dims, int fn_level, unsigned used),
+default_goacc_validate_dims)
+
+DEFHOOK
+(dim_limit,
+"This hook should return the maximum size of a particular dimension,\n\
+or zero if unbounded.",
+int, (int axis),
+default_goacc_dim_limit)
+
+DEFHOOK
+(fork_join,
+"This hook can be used to convert IFN_GOACC_FORK and IFN_GOACC_JOIN\n\
+function calls to target-specific gimple, or indicate whether they\n\
+should be retained. It is executed during the oacc_device_lower pass.\n\
+It should return true, if the call should be retained. It should\n\
+return false, if it is to be deleted (either because target-specific\n\
+gimple has been inserted before it, or there is no need for it).\n\
+The default hook returns false, if there are no RTL expanders for them.",
+bool, (gcall *call, const int *dims, bool is_fork),
+default_goacc_fork_join)
+
+DEFHOOK
+(reduction,
+"This hook is used by the oacc_transform pass to expand calls to the\n\
+@var{GOACC_REDUCTION} internal function, into a sequence of gimple\n\
+instructions. @var{call} is gimple statement containing the call to\n\
+the function. This hook removes statement @var{call} after the\n\
+expanded sequence has been inserted. This hook is also responsible\n\
+for allocating any storage for reductions when necessary.",
+void, (gcall *call),
+default_goacc_reduction)
+
+DEFHOOK
+(adjust_private_decl,
+"This hook, if defined, is used by accelerator target back-ends to adjust\n\
+OpenACC variable declarations that should be made private to the given\n\
+parallelism level (i.e. @code{GOMP_DIM_GANG}, @code{GOMP_DIM_WORKER} or\n\
+@code{GOMP_DIM_VECTOR}). A typical use for this hook is to force variable\n\
+declarations at the @code{gang} level to reside in GPU shared memory.\n\
+@var{loc} may be used for diagnostic purposes.\n\
+\n\
+You may also use the @code{TARGET_GOACC_EXPAND_VAR_DECL} hook if the\n\
+adjusted variable declaration needs to be expanded to RTL in a non-standard\n\
+way.",
+tree, (location_t loc, tree var, int level),
+NULL)
+
+DEFHOOK
+(expand_var_decl,
+"This hook, if defined, is used by accelerator target back-ends to expand\n\
+specially handled kinds of @code{VAR_DECL} expressions. A particular use is\n\
+to place variables with specific attributes inside special accelarator\n\
+memories. A return value of @code{NULL} indicates that the target does not\n\
+handle this @code{VAR_DECL}, and normal RTL expanding is resumed.\n\
+\n\
+Only define this hook if your accelerator target needs to expand certain\n\
+@code{VAR_DECL} nodes in a way that differs from the default. You can also adjust\n\
+private variables at OpenACC device-lowering time using the\n\
+@code{TARGET_GOACC_ADJUST_PRIVATE_DECL} target hook.",
+rtx, (tree var),
+NULL)
+
+DEFHOOK
+(create_worker_broadcast_record,
+"Create a record used to propagate local-variable state from an active\n\
+worker to other workers. A possible implementation might adjust the type\n\
+of REC to place the new variable in shared GPU memory.\n\
+\n\
+Presence of this target hook indicates that middle end neutering/broadcasting\n\
+be used.",
+tree, (tree rec, bool sender, const char *name, unsigned HOST_WIDE_INT offset),
+NULL)
+
+DEFHOOK
+(shared_mem_layout,
+"Lay out a fixed shared-memory region on the target. The LO and HI\n\
+arguments should be set to a range of addresses that can be used for worker\n\
+broadcasting. The dimensions, reduction size and gang-private size\n\
+arguments are for the current offload region.",
+void, (unsigned HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, int[],
+ unsigned HOST_WIDE_INT[], unsigned HOST_WIDE_INT[]),
+NULL)
+
+HOOK_VECTOR_END (goacc)
+
+/* Functions relating to vectorization. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_VECTORIZE_"
+HOOK_VECTOR (TARGET_VECTORIZE, vectorize)
+
+/* The following member value is a pointer to a function called
+ by the vectorizer, and return the decl of the target builtin
+ function. */
+DEFHOOK
+(builtin_mask_for_load,
+ "This hook should return the DECL of a function @var{f} that given an\n\
+address @var{addr} as an argument returns a mask @var{m} that can be\n\
+used to extract from two vectors the relevant data that resides in\n\
+@var{addr} in case @var{addr} is not properly aligned.\n\
+\n\
+The autovectorizer, when vectorizing a load operation from an address\n\
+@var{addr} that may be unaligned, will generate two vector loads from\n\
+the two aligned addresses around @var{addr}. It then generates a\n\
+@code{REALIGN_LOAD} operation to extract the relevant data from the\n\
+two loaded vectors. The first two arguments to @code{REALIGN_LOAD},\n\
+@var{v1} and @var{v2}, are the two vectors, each of size @var{VS}, and\n\
+the third argument, @var{OFF}, defines how the data will be extracted\n\
+from these two vectors: if @var{OFF} is 0, then the returned vector is\n\
+@var{v2}; otherwise, the returned vector is composed from the last\n\
+@var{VS}-@var{OFF} elements of @var{v1} concatenated to the first\n\
+@var{OFF} elements of @var{v2}.\n\
+\n\
+If this hook is defined, the autovectorizer will generate a call\n\
+to @var{f} (using the DECL tree that this hook returns) and will\n\
+use the return value of @var{f} as the argument @var{OFF} to\n\
+@code{REALIGN_LOAD}. Therefore, the mask @var{m} returned by @var{f}\n\
+should comply with the semantics expected by @code{REALIGN_LOAD}\n\
+described above.\n\
+If this hook is not defined, then @var{addr} will be used as\n\
+the argument @var{OFF} to @code{REALIGN_LOAD}, in which case the low\n\
+log2(@var{VS}) @minus{} 1 bits of @var{addr} will be considered.",
+ tree, (void), NULL)
+
+/* Returns a built-in function that realizes the vectorized version of
+ a target-independent function, or NULL_TREE if not available. */
+DEFHOOK
+(builtin_vectorized_function,
+ "This hook should return the decl of a function that implements the\n\
+vectorized variant of the function with the @code{combined_fn} code\n\
+@var{code} or @code{NULL_TREE} if such a function is not available.\n\
+The return type of the vectorized function shall be of vector type\n\
+@var{vec_type_out} and the argument types should be @var{vec_type_in}.",
+ tree, (unsigned code, tree vec_type_out, tree vec_type_in),
+ default_builtin_vectorized_function)
+
+/* Returns a built-in function that realizes the vectorized version of
+ a target-specific function, or NULL_TREE if not available. */
+DEFHOOK
+(builtin_md_vectorized_function,
+ "This hook should return the decl of a function that implements the\n\
+vectorized variant of target built-in function @code{fndecl}. The\n\
+return type of the vectorized function shall be of vector type\n\
+@var{vec_type_out} and the argument types should be @var{vec_type_in}.",
+ tree, (tree fndecl, tree vec_type_out, tree vec_type_in),
+ default_builtin_md_vectorized_function)
+
+/* Cost of different vector/scalar statements in vectorization cost
+ model. In case of misaligned vector loads and stores the cost depends
+ on the data type and misalignment value. */
+DEFHOOK
+(builtin_vectorization_cost,
+ "Returns cost of different scalar or vector statements for vectorization cost model.\n\
+For vector memory operations the cost may depend on type (@var{vectype}) and\n\
+misalignment value (@var{misalign}).",
+ int, (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign),
+ default_builtin_vectorization_cost)
+
+DEFHOOK
+(preferred_vector_alignment,
+ "This hook returns the preferred alignment in bits for accesses to\n\
+vectors of type @var{type} in vectorized code. This might be less than\n\
+or greater than the ABI-defined value returned by\n\
+@code{TARGET_VECTOR_ALIGNMENT}. It can be equal to the alignment of\n\
+a single element, in which case the vectorizer will not try to optimize\n\
+for alignment.\n\
+\n\
+The default hook returns @code{TYPE_ALIGN (@var{type})}, which is\n\
+correct for most targets.",
+ poly_uint64, (const_tree type),
+ default_preferred_vector_alignment)
+
+/* Returns whether the target has a preference for decomposing divisions using
+ shifts rather than multiplies. */
+DEFHOOK
+(preferred_div_as_shifts_over_mult,
+ "Sometimes it is possible to implement a vector division using a sequence\n\
+of two addition-shift pairs, giving four instructions in total.\n\
+Return true if taking this approach for @var{vectype} is likely\n\
+to be better than using a sequence involving highpart multiplication.\n\
+Default is false if @code{can_mult_highpart_p}, otherwise true.",
+ bool, (const_tree type),
+ default_preferred_div_as_shifts_over_mult)
+
+/* Return true if vector alignment is reachable (by peeling N
+ iterations) for the given scalar type. */
+DEFHOOK
+(vector_alignment_reachable,
+ "Return true if vector alignment is reachable (by peeling N iterations)\n\
+for the given scalar type @var{type}. @var{is_packed} is false if the scalar\n\
+access using @var{type} is known to be naturally aligned.",
+ bool, (const_tree type, bool is_packed),
+ default_builtin_vector_alignment_reachable)
+
+DEFHOOK
+(vec_perm_const,
+ "This hook is used to test whether the target can permute up to two\n\
+vectors of mode @var{op_mode} using the permutation vector @code{sel},\n\
+producing a vector of mode @var{mode}. The hook is also used to emit such\n\
+a permutation.\n\
+\n\
+When the hook is being used to test whether the target supports a permutation,\n\
+@var{in0}, @var{in1}, and @var{out} are all null. When the hook is being used\n\
+to emit a permutation, @var{in0} and @var{in1} are the source vectors of mode\n\
+@var{op_mode} and @var{out} is the destination vector of mode @var{mode}.\n\
+@var{in1} is the same as @var{in0} if @var{sel} describes a permutation on one\n\
+vector instead of two.\n\
+\n\
+Return true if the operation is possible, emitting instructions for it\n\
+if rtxes are provided.\n\
+\n\
+@cindex @code{vec_perm@var{m}} instruction pattern\n\
+If the hook returns false for a mode with multibyte elements, GCC will\n\
+try the equivalent byte operation. If that also fails, it will try forcing\n\
+the selector into a register and using the @var{vec_perm@var{mode}}\n\
+instruction pattern. There is no need for the hook to handle these two\n\
+implementation approaches itself.",
+ bool, (machine_mode mode, machine_mode op_mode, rtx output, rtx in0, rtx in1,
+ const vec_perm_indices &sel),
+ NULL)
+
+/* Return true if the target supports misaligned store/load of a
+ specific factor denoted in the third parameter. The last parameter
+ is true if the access is defined in a packed struct. */
+DEFHOOK
+(support_vector_misalignment,
+ "This hook should return true if the target supports misaligned vector\n\
+store/load of a specific factor denoted in the @var{misalignment}\n\
+parameter. The vector store/load should be of machine mode @var{mode} and\n\
+the elements in the vectors should be of type @var{type}. @var{is_packed}\n\
+parameter is true if the memory access is defined in a packed struct.",
+ bool,
+ (machine_mode mode, const_tree type, int misalignment, bool is_packed),
+ default_builtin_support_vector_misalignment)
+
+/* Returns the preferred mode for SIMD operations for the specified
+ scalar mode. */
+DEFHOOK
+(preferred_simd_mode,
+ "This hook should return the preferred mode for vectorizing scalar\n\
+mode @var{mode}. The default is\n\
+equal to @code{word_mode}, because the vectorizer can do some\n\
+transformations even in absence of specialized @acronym{SIMD} hardware.",
+ machine_mode,
+ (scalar_mode mode),
+ default_preferred_simd_mode)
+
+/* Returns the preferred mode for splitting SIMD reductions to. */
+DEFHOOK
+(split_reduction,
+ "This hook should return the preferred mode to split the final reduction\n\
+step on @var{mode} to. The reduction is then carried out reducing upper\n\
+against lower halves of vectors recursively until the specified mode is\n\
+reached. The default is @var{mode} which means no splitting.",
+ machine_mode,
+ (machine_mode),
+ default_split_reduction)
+
+/* Returns a mask of vector sizes to iterate over when auto-vectorizing
+ after processing the preferred one derived from preferred_simd_mode. */
+DEFHOOK
+(autovectorize_vector_modes,
+ "If using the mode returned by @code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE}\n\
+is not the only approach worth considering, this hook should add one mode to\n\
+@var{modes} for each useful alternative approach. These modes are then\n\
+passed to @code{TARGET_VECTORIZE_RELATED_MODE} to obtain the vector mode\n\
+for a given element mode.\n\
+\n\
+The modes returned in @var{modes} should use the smallest element mode\n\
+possible for the vectorization approach that they represent, preferring\n\
+integer modes over floating-poing modes in the event of a tie. The first\n\
+mode should be the @code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE} for its\n\
+element mode.\n\
+\n\
+If @var{all} is true, add suitable vector modes even when they are generally\n\
+not expected to be worthwhile.\n\
+\n\
+The hook returns a bitmask of flags that control how the modes in\n\
+@var{modes} are used. The flags are:\n\
+@table @code\n\
+@item VECT_COMPARE_COSTS\n\
+Tells the loop vectorizer to try all the provided modes and pick the one\n\
+with the lowest cost. By default the vectorizer will choose the first\n\
+mode that works.\n\
+@end table\n\
+\n\
+The hook does not need to do anything if the vector returned by\n\
+@code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE} is the only one relevant\n\
+for autovectorization. The default implementation adds no modes and\n\
+returns 0.",
+ unsigned int,
+ (vector_modes *modes, bool all),
+ default_autovectorize_vector_modes)
+
+DEFHOOK
+(related_mode,
+ "If a piece of code is using vector mode @var{vector_mode} and also wants\n\
+to operate on elements of mode @var{element_mode}, return the vector mode\n\
+it should use for those elements. If @var{nunits} is nonzero, ensure that\n\
+the mode has exactly @var{nunits} elements, otherwise pick whichever vector\n\
+size pairs the most naturally with @var{vector_mode}. Return an empty\n\
+@code{opt_machine_mode} if there is no supported vector mode with the\n\
+required properties.\n\
+\n\
+There is no prescribed way of handling the case in which @var{nunits}\n\
+is zero. One common choice is to pick a vector mode with the same size\n\
+as @var{vector_mode}; this is the natural choice if the target has a\n\
+fixed vector size. Another option is to choose a vector mode with the\n\
+same number of elements as @var{vector_mode}; this is the natural choice\n\
+if the target has a fixed number of elements. Alternatively, the hook\n\
+might choose a middle ground, such as trying to keep the number of\n\
+elements as similar as possible while applying maximum and minimum\n\
+vector sizes.\n\
+\n\
+The default implementation uses @code{mode_for_vector} to find the\n\
+requested mode, returning a mode with the same size as @var{vector_mode}\n\
+when @var{nunits} is zero. This is the correct behavior for most targets.",
+ opt_machine_mode,
+ (machine_mode vector_mode, scalar_mode element_mode, poly_uint64 nunits),
+ default_vectorize_related_mode)
+
+/* Function to get a target mode for a vector mask. */
+DEFHOOK
+(get_mask_mode,
+ "Return the mode to use for a vector mask that holds one boolean\n\
+result for each element of vector mode @var{mode}. The returned mask mode\n\
+can be a vector of integers (class @code{MODE_VECTOR_INT}), a vector of\n\
+booleans (class @code{MODE_VECTOR_BOOL}) or a scalar integer (class\n\
+@code{MODE_INT}). Return an empty @code{opt_machine_mode} if no such\n\
+mask mode exists.\n\
+\n\
+The default implementation returns a @code{MODE_VECTOR_INT} with the\n\
+same size and number of elements as @var{mode}, if such a mode exists.",
+ opt_machine_mode,
+ (machine_mode mode),
+ default_get_mask_mode)
+
+/* Function to say whether a masked operation is expensive when the
+ mask is all zeros. */
+DEFHOOK
+(empty_mask_is_expensive,
+ "This hook returns true if masked internal function @var{ifn} (really of\n\
+type @code{internal_fn}) should be considered expensive when the mask is\n\
+all zeros. GCC can then try to branch around the instruction instead.",
+ bool,
+ (unsigned ifn),
+ default_empty_mask_is_expensive)
+
+/* Target builtin that implements vector gather operation. */
+DEFHOOK
+(builtin_gather,
+ "Target builtin that implements vector gather operation. @var{mem_vectype}\n\
+is the vector type of the load and @var{index_type} is scalar type of\n\
+the index, scaled by @var{scale}.\n\
+The default is @code{NULL_TREE} which means to not vectorize gather\n\
+loads.",
+ tree,
+ (const_tree mem_vectype, const_tree index_type, int scale),
+ NULL)
+
+/* Target builtin that implements vector scatter operation. */
+DEFHOOK
+(builtin_scatter,
+"Target builtin that implements vector scatter operation. @var{vectype}\n\
+is the vector type of the store and @var{index_type} is scalar type of\n\
+the index, scaled by @var{scale}.\n\
+The default is @code{NULL_TREE} which means to not vectorize scatter\n\
+stores.",
+ tree,
+ (const_tree vectype, const_tree index_type, int scale),
+ NULL)
+
+/* Target function to initialize the cost model for a loop or block. */
+DEFHOOK
+(create_costs,
+ "This hook should initialize target-specific data structures in preparation\n\
+for modeling the costs of vectorizing a loop or basic block. The default\n\
+allocates three unsigned integers for accumulating costs for the prologue,\n\
+body, and epilogue of the loop or basic block. If @var{loop_info} is\n\
+non-NULL, it identifies the loop being vectorized; otherwise a single block\n\
+is being vectorized. If @var{costing_for_scalar} is true, it indicates the\n\
+current cost model is for the scalar version of a loop or block; otherwise\n\
+it is for the vector version.",
+ class vector_costs *,
+ (vec_info *vinfo, bool costing_for_scalar),
+ default_vectorize_create_costs)
+
+HOOK_VECTOR_END (vectorize)
+
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_"
+
+DEFHOOK
+(preferred_else_value,
+ "This hook returns the target's preferred final argument for a call\n\
+to conditional internal function @var{ifn} (really of type\n\
+@code{internal_fn}). @var{type} specifies the return type of the\n\
+function and @var{ops} are the operands to the conditional operation,\n\
+of which there are @var{nops}.\n\
+\n\
+For example, if @var{ifn} is @code{IFN_COND_ADD}, the hook returns\n\
+a value of type @var{type} that should be used when @samp{@var{ops}[0]}\n\
+and @samp{@var{ops}[1]} are conditionally added together.\n\
+\n\
+This hook is only relevant if the target supports conditional patterns\n\
+like @code{cond_add@var{m}}. The default implementation returns a zero\n\
+constant of type @var{type}.",
+ tree,
+ (unsigned ifn, tree type, unsigned nops, tree *ops),
+ default_preferred_else_value)
+
+DEFHOOK
+(record_offload_symbol,
+ "Used when offloaded functions are seen in the compilation unit and no named\n\
+sections are available. It is called once for each symbol that must be\n\
+recorded in the offload function and variable table.",
+ void, (tree),
+ hook_void_tree)
+
+DEFHOOKPOD
+(absolute_biggest_alignment,
+ "If defined, this target hook specifies the absolute biggest alignment\n\
+that a type or variable can have on this machine, otherwise,\n\
+@code{BIGGEST_ALIGNMENT} is used.",
+ HOST_WIDE_INT, BIGGEST_ALIGNMENT)
+
+/* Allow target specific overriding of option settings after options have
+ been changed by an attribute or pragma or when it is reset at the
+ end of the code affected by an attribute or pragma. */
+DEFHOOK
+(override_options_after_change,
+ "This target function is similar to the hook @code{TARGET_OPTION_OVERRIDE}\n\
+but is called when the optimize level is changed via an attribute or\n\
+pragma or when it is reset at the end of the code affected by the\n\
+attribute or pragma. It is not called at the beginning of compilation\n\
+when @code{TARGET_OPTION_OVERRIDE} is called so if you want to perform these\n\
+actions then, you should have @code{TARGET_OPTION_OVERRIDE} call\n\
+@code{TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE}.",
+ void, (void),
+ hook_void_void)
+
+DEFHOOK
+(offload_options,
+ "Used when writing out the list of options into an LTO file. It should\n\
+translate any relevant target-specific options (such as the ABI in use)\n\
+into one of the @option{-foffload} options that exist as a common interface\n\
+to express such options. It should return a string containing these options,\n\
+separated by spaces, which the caller will free.\n",
+char *, (void), hook_charptr_void_null)
+
+DEFHOOK_UNDOC
+(eh_return_filter_mode,
+ "Return machine mode for filter value.",
+ scalar_int_mode, (void),
+ default_eh_return_filter_mode)
+
+/* Return machine mode for libgcc expanded cmp instructions. */
+DEFHOOK
+(libgcc_cmp_return_mode,
+ "This target hook should return the mode to be used for the return value\n\
+of compare instructions expanded to libgcc calls. If not defined\n\
+@code{word_mode} is returned which is the right choice for a majority of\n\
+targets.",
+ scalar_int_mode, (void),
+ default_libgcc_cmp_return_mode)
+
+/* Return machine mode for libgcc expanded shift instructions. */
+DEFHOOK
+(libgcc_shift_count_mode,
+ "This target hook should return the mode to be used for the shift count operand\n\
+of shift instructions expanded to libgcc calls. If not defined\n\
+@code{word_mode} is returned which is the right choice for a majority of\n\
+targets.",
+ scalar_int_mode, (void),
+ default_libgcc_shift_count_mode)
+
+/* Return machine mode to be used for _Unwind_Word type. */
+DEFHOOK
+(unwind_word_mode,
+ "Return machine mode to be used for @code{_Unwind_Word} type.\n\
+The default is to use @code{word_mode}.",
+ scalar_int_mode, (void),
+ default_unwind_word_mode)
+
+/* Given two decls, merge their attributes and return the result. */
+DEFHOOK
+(merge_decl_attributes,
+ "Define this target hook if the merging of decl attributes needs special\n\
+handling. If defined, the result is a list of the combined\n\
+@code{DECL_ATTRIBUTES} of @var{olddecl} and @var{newdecl}.\n\
+@var{newdecl} is a duplicate declaration of @var{olddecl}. Examples of\n\
+when this is needed are when one attribute overrides another, or when an\n\
+attribute is nullified by a subsequent definition. This function may\n\
+call @code{merge_attributes} to handle machine-independent merging.\n\
+\n\
+@findex TARGET_DLLIMPORT_DECL_ATTRIBUTES\n\
+If the only target-specific handling you require is @samp{dllimport}\n\
+for Microsoft Windows targets, you should define the macro\n\
+@code{TARGET_DLLIMPORT_DECL_ATTRIBUTES} to @code{1}. The compiler\n\
+will then define a function called\n\
+@code{merge_dllimport_decl_attributes} which can then be defined as\n\
+the expansion of @code{TARGET_MERGE_DECL_ATTRIBUTES}. You can also\n\
+add @code{handle_dll_attribute} in the attribute table for your port\n\
+to perform initial processing of the @samp{dllimport} and\n\
+@samp{dllexport} attributes. This is done in @file{i386/cygwin.h} and\n\
+@file{i386/i386.cc}, for example.",
+ tree, (tree olddecl, tree newdecl),
+ merge_decl_attributes)
+
+/* Given two types, merge their attributes and return the result. */
+DEFHOOK
+(merge_type_attributes,
+ "Define this target hook if the merging of type attributes needs special\n\
+handling. If defined, the result is a list of the combined\n\
+@code{TYPE_ATTRIBUTES} of @var{type1} and @var{type2}. It is assumed\n\
+that @code{comptypes} has already been called and returned 1. This\n\
+function may call @code{merge_attributes} to handle machine-independent\n\
+merging.",
+ tree, (tree type1, tree type2),
+ merge_type_attributes)
+
+/* Table of machine attributes and functions to handle them.
+ Ignored if NULL. */
+DEFHOOKPOD
+(attribute_table,
+ "If defined, this target hook points to an array of @samp{struct\n\
+attribute_spec} (defined in @file{tree-core.h}) specifying the machine\n\
+specific attributes for this target and some of the restrictions on the\n\
+entities to which these attributes are applied and the arguments they\n\
+take.",
+ const struct attribute_spec *, NULL)
+
+/* Return true iff attribute NAME expects a plain identifier as its first
+ argument. */
+DEFHOOK
+(attribute_takes_identifier_p,
+ "If defined, this target hook is a function which returns true if the\n\
+machine-specific attribute named @var{name} expects an identifier\n\
+given as its first argument to be passed on as a plain identifier, not\n\
+subjected to name lookup. If this is not defined, the default is\n\
+false for all machine-specific attributes.",
+ bool, (const_tree name),
+ hook_bool_const_tree_false)
+
+/* Return zero if the attributes on TYPE1 and TYPE2 are incompatible,
+ one if they are compatible and two if they are nearly compatible
+ (which causes a warning to be generated). */
+DEFHOOK
+(comp_type_attributes,
+ "If defined, this target hook is a function which returns zero if the attributes on\n\
+@var{type1} and @var{type2} are incompatible, one if they are compatible,\n\
+and two if they are nearly compatible (which causes a warning to be\n\
+generated). If this is not defined, machine-specific attributes are\n\
+supposed always to be compatible.",
+ int, (const_tree type1, const_tree type2),
+ hook_int_const_tree_const_tree_1)
+
+/* Assign default attributes to the newly defined TYPE. */
+DEFHOOK
+(set_default_type_attributes,
+ "If defined, this target hook is a function which assigns default attributes to\n\
+the newly defined @var{type}.",
+ void, (tree type),
+ hook_void_tree)
+
+/* Insert attributes on the newly created DECL. */
+DEFHOOK
+(insert_attributes,
+ "Define this target hook if you want to be able to add attributes to a decl\n\
+when it is being created. This is normally useful for back ends which\n\
+wish to implement a pragma by using the attributes which correspond to\n\
+the pragma's effect. The @var{node} argument is the decl which is being\n\
+created. The @var{attr_ptr} argument is a pointer to the attribute list\n\
+for this decl. The list itself should not be modified, since it may be\n\
+shared with other decls, but attributes may be chained on the head of\n\
+the list and @code{*@var{attr_ptr}} modified to point to the new\n\
+attributes, or a copy of the list may be made if further changes are\n\
+needed.",
+ void, (tree node, tree *attr_ptr),
+ hook_void_tree_treeptr)
+
+/* Perform additional target-specific processing of generic attributes. */
+DEFHOOK
+(handle_generic_attribute,
+ "Define this target hook if you want to be able to perform additional\n\
+target-specific processing of an attribute which is handled generically\n\
+by a front end. The arguments are the same as those which are passed to\n\
+attribute handlers. So far this only affects the @var{noinit} and\n\
+@var{section} attribute.",
+ tree, (tree *node, tree name, tree args, int flags, bool *no_add_attrs),
+ hook_tree_treeptr_tree_tree_int_boolptr_null)
+
+/* Return true if FNDECL (which has at least one machine attribute)
+ can be inlined despite its machine attributes, false otherwise. */
+DEFHOOK
+(function_attribute_inlinable_p,
+ "@cindex inlining\n\
+This target hook returns @code{true} if it is OK to inline @var{fndecl}\n\
+into the current function, despite its having target-specific\n\
+attributes, @code{false} otherwise. By default, if a function has a\n\
+target specific attribute attached to it, it will not be inlined.",
+ bool, (const_tree fndecl),
+ hook_bool_const_tree_false)
+
+/* Return true if bitfields in RECORD_TYPE should follow the
+ Microsoft Visual C++ bitfield layout rules. */
+DEFHOOK
+(ms_bitfield_layout_p,
+ "This target hook returns @code{true} if bit-fields in the given\n\
+@var{record_type} are to be laid out following the rules of Microsoft\n\
+Visual C/C++, namely: (i) a bit-field won't share the same storage\n\
+unit with the previous bit-field if their underlying types have\n\
+different sizes, and the bit-field will be aligned to the highest\n\
+alignment of the underlying types of itself and of the previous\n\
+bit-field; (ii) a zero-sized bit-field will affect the alignment of\n\
+the whole enclosing structure, even if it is unnamed; except that\n\
+(iii) a zero-sized bit-field will be disregarded unless it follows\n\
+another bit-field of nonzero size. If this hook returns @code{true},\n\
+other macros that control bit-field layout are ignored.\n\
+\n\
+When a bit-field is inserted into a packed record, the whole size\n\
+of the underlying type is used by one or more same-size adjacent\n\
+bit-fields (that is, if its long:3, 32 bits is used in the record,\n\
+and any additional adjacent long bit-fields are packed into the same\n\
+chunk of 32 bits. However, if the size changes, a new field of that\n\
+size is allocated). In an unpacked record, this is the same as using\n\
+alignment, but not equivalent when packing.\n\
+\n\
+If both MS bit-fields and @samp{__attribute__((packed))} are used,\n\
+the latter will take precedence. If @samp{__attribute__((packed))} is\n\
+used on a single field when MS bit-fields are in use, it will take\n\
+precedence for that field, but the alignment of the rest of the structure\n\
+may affect its placement.",
+ bool, (const_tree record_type),
+ hook_bool_const_tree_false)
+
+/* For now this is only an interface to WORDS_BIG_ENDIAN for
+ target-independent code like the front ends, need performance testing
+ before switching completely to the target hook. */
+DEFHOOK_UNDOC
+(words_big_endian,
+ "",
+ bool, (void),
+ targhook_words_big_endian)
+
+/* Likewise for FLOAT_WORDS_BIG_ENDIAN. */
+DEFHOOK_UNDOC
+(float_words_big_endian,
+ "",
+ bool, (void),
+ targhook_float_words_big_endian)
+
+DEFHOOK
+(float_exceptions_rounding_supported_p,
+ "Returns true if the target supports IEEE 754 floating-point exceptions\n\
+and rounding modes, false otherwise. This is intended to relate to the\n\
+@code{float} and @code{double} types, but not necessarily @code{long double}.\n\
+By default, returns true if the @code{adddf3} instruction pattern is\n\
+available and false otherwise, on the assumption that hardware floating\n\
+point supports exceptions and rounding modes but software floating point\n\
+does not.",
+ bool, (void),
+ default_float_exceptions_rounding_supported_p)
+
+/* True if the target supports decimal floating point. */
+DEFHOOK
+(decimal_float_supported_p,
+ "Returns true if the target supports decimal floating point.",
+ bool, (void),
+ default_decimal_float_supported_p)
+
+/* True if the target supports fixed-point. */
+DEFHOOK
+(fixed_point_supported_p,
+ "Returns true if the target supports fixed-point arithmetic.",
+ bool, (void),
+ default_fixed_point_supported_p)
+
+/* Return true if anonymous bitfields affect structure alignment. */
+DEFHOOK
+(align_anon_bitfield,
+ "When @code{PCC_BITFIELD_TYPE_MATTERS} is true this hook will determine\n\
+whether unnamed bitfields affect the alignment of the containing\n\
+structure. The hook should return true if the structure should inherit\n\
+the alignment requirements of an unnamed bitfield's type.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Return true if volatile bitfields should use the narrowest type possible.
+ Return false if they should use the container type. */
+DEFHOOK
+(narrow_volatile_bitfield,
+ "This target hook should return @code{true} if accesses to volatile bitfields\n\
+should use the narrowest mode possible. It should return @code{false} if\n\
+these accesses should use the bitfield container type.\n\
+\n\
+The default is @code{false}.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Set up target-specific built-in functions. */
+DEFHOOK
+(init_builtins,
+ "Define this hook if you have any machine-specific built-in functions\n\
+that need to be defined. It should be a function that performs the\n\
+necessary setup.\n\
+\n\
+Machine specific built-in functions can be useful to expand special machine\n\
+instructions that would otherwise not normally be generated because\n\
+they have no equivalent in the source language (for example, SIMD vector\n\
+instructions or prefetch instructions).\n\
+\n\
+To create a built-in function, call the function\n\
+@code{lang_hooks.builtin_function}\n\
+which is defined by the language front end. You can use any type nodes set\n\
+up by @code{build_common_tree_nodes};\n\
+only language front ends that use those two functions will call\n\
+@samp{TARGET_INIT_BUILTINS}.",
+ void, (void),
+ hook_void_void)
+
+/* Initialize (if INITIALIZE_P is true) and return the target-specific
+ built-in function decl for CODE.
+ Return NULL if that is not possible. Return error_mark_node if CODE
+ is outside of the range of valid target builtin function codes. */
+DEFHOOK
+(builtin_decl,
+ "Define this hook if you have any machine-specific built-in functions\n\
+that need to be defined. It should be a function that returns the\n\
+builtin function declaration for the builtin function code @var{code}.\n\
+If there is no such builtin and it cannot be initialized at this time\n\
+if @var{initialize_p} is true the function should return @code{NULL_TREE}.\n\
+If @var{code} is out of range the function should return\n\
+@code{error_mark_node}.",
+ tree, (unsigned code, bool initialize_p), NULL)
+
+/* Expand a target-specific builtin. */
+DEFHOOK
+(expand_builtin,
+ "\n\
+Expand a call to a machine specific built-in function that was set up by\n\
+@samp{TARGET_INIT_BUILTINS}. @var{exp} is the expression for the\n\
+function call; the result should go to @var{target} if that is\n\
+convenient, and have mode @var{mode} if that is convenient.\n\
+@var{subtarget} may be used as the target for computing one of\n\
+@var{exp}'s operands. @var{ignore} is nonzero if the value is to be\n\
+ignored. This function should return the result of the call to the\n\
+built-in function.",
+ rtx,
+ (tree exp, rtx target, rtx subtarget, machine_mode mode, int ignore),
+ default_expand_builtin)
+
+/* Select a replacement for a target-specific builtin. This is done
+ *before* regular type checking, and so allows the target to
+ implement a crude form of function overloading. The result is a
+ complete expression that implements the operation. PARAMS really
+ has type VEC(tree,gc)*, but we don't want to include tree.h here. */
+DEFHOOK
+(resolve_overloaded_builtin,
+ "Select a replacement for a machine specific built-in function that\n\
+was set up by @samp{TARGET_INIT_BUILTINS}. This is done\n\
+@emph{before} regular type checking, and so allows the target to\n\
+implement a crude form of function overloading. @var{fndecl} is the\n\
+declaration of the built-in function. @var{arglist} is the list of\n\
+arguments passed to the built-in function. The result is a\n\
+complete expression that implements the operation, usually\n\
+another @code{CALL_EXPR}.\n\
+@var{arglist} really has type @samp{VEC(tree,gc)*}",
+ tree, (unsigned int /*location_t*/ loc, tree fndecl, void *arglist), NULL)
+
+DEFHOOK
+(check_builtin_call,
+ "Perform semantic checking on a call to a machine-specific built-in\n\
+function after its arguments have been constrained to the function\n\
+signature. Return true if the call is valid, otherwise report an error\n\
+and return false.\n\
+\n\
+This hook is called after @code{TARGET_RESOLVE_OVERLOADED_BUILTIN}.\n\
+The call was originally to built-in function @var{orig_fndecl},\n\
+but after the optional @code{TARGET_RESOLVE_OVERLOADED_BUILTIN}\n\
+step is now to built-in function @var{fndecl}. @var{loc} is the\n\
+location of the call and @var{args} is an array of function arguments,\n\
+of which there are @var{nargs}. @var{arg_loc} specifies the location\n\
+of each argument.",
+ bool, (location_t loc, vec<location_t> arg_loc, tree fndecl,
+ tree orig_fndecl, unsigned int nargs, tree *args),
+ NULL)
+
+/* Fold a target-specific builtin to a tree valid for both GIMPLE
+ and GENERIC. */
+DEFHOOK
+(fold_builtin,
+ "Fold a call to a machine specific built-in function that was set up by\n\
+@samp{TARGET_INIT_BUILTINS}. @var{fndecl} is the declaration of the\n\
+built-in function. @var{n_args} is the number of arguments passed to\n\
+the function; the arguments themselves are pointed to by @var{argp}.\n\
+The result is another tree, valid for both GIMPLE and GENERIC,\n\
+containing a simplified expression for the call's result. If\n\
+@var{ignore} is true the value will be ignored.",
+ tree, (tree fndecl, int n_args, tree *argp, bool ignore),
+ hook_tree_tree_int_treep_bool_null)
+
+/* Fold a target-specific builtin to a valid GIMPLE tree. */
+DEFHOOK
+(gimple_fold_builtin,
+ "Fold a call to a machine specific built-in function that was set up\n\
+by @samp{TARGET_INIT_BUILTINS}. @var{gsi} points to the gimple\n\
+statement holding the function call. Returns true if any change\n\
+was made to the GIMPLE stream.",
+ bool, (gimple_stmt_iterator *gsi),
+ hook_bool_gsiptr_false)
+
+/* Target hook is used to compare the target attributes in two functions to
+ determine which function's features get higher priority. This is used
+ during function multi-versioning to figure out the order in which two
+ versions must be dispatched. A function version with a higher priority
+ is checked for dispatching earlier. DECL1 and DECL2 are
+ the two function decls that will be compared. It returns positive value
+ if DECL1 is higher priority, negative value if DECL2 is higher priority
+ and 0 if they are the same. */
+DEFHOOK
+(compare_version_priority,
+ "This hook is used to compare the target attributes in two functions to\n\
+determine which function's features get higher priority. This is used\n\
+during function multi-versioning to figure out the order in which two\n\
+versions must be dispatched. A function version with a higher priority\n\
+is checked for dispatching earlier. @var{decl1} and @var{decl2} are\n\
+ the two function decls that will be compared.",
+ int, (tree decl1, tree decl2), NULL)
+
+/* Target hook is used to generate the dispatcher logic to invoke the right
+ function version at run-time for a given set of function versions.
+ ARG points to the callgraph node of the dispatcher function whose body
+ must be generated. */
+DEFHOOK
+(generate_version_dispatcher_body,
+ "This hook is used to generate the dispatcher logic to invoke the right\n\
+function version at run-time for a given set of function versions.\n\
+@var{arg} points to the callgraph node of the dispatcher function whose\n\
+body must be generated.",
+ tree, (void *arg), NULL)
+
+/* Target hook is used to get the dispatcher function for a set of function
+ versions. The dispatcher function is called to invoke the right function
+ version at run-time. DECL is one version from a set of semantically
+ identical versions. */
+DEFHOOK
+(get_function_versions_dispatcher,
+ "This hook is used to get the dispatcher function for a set of function\n\
+versions. The dispatcher function is called to invoke the right function\n\
+version at run-time. @var{decl} is one version from a set of semantically\n\
+identical versions.",
+ tree, (void *decl), NULL)
+
+/* Returns a code for a target-specific builtin that implements
+ reciprocal of a target-specific function, or NULL_TREE if not available. */
+DEFHOOK
+(builtin_reciprocal,
+ "This hook should return the DECL of a function that implements the\n\
+reciprocal of the machine-specific builtin function @var{fndecl}, or\n\
+@code{NULL_TREE} if such a function is not available.",
+ tree, (tree fndecl),
+ default_builtin_reciprocal)
+
+/* For a vendor-specific TYPE, return a pointer to a statically-allocated
+ string containing the C++ mangling for TYPE. In all other cases, return
+ NULL. */
+DEFHOOK
+(mangle_type,
+ "If your target defines any fundamental types, or any types your target\n\
+uses should be mangled differently from the default, define this hook\n\
+to return the appropriate encoding for these types as part of a C++\n\
+mangled name. The @var{type} argument is the tree structure representing\n\
+the type to be mangled. The hook may be applied to trees which are\n\
+not target-specific fundamental types; it should return @code{NULL}\n\
+for all such types, as well as arguments it does not recognize. If the\n\
+return value is not @code{NULL}, it must point to a statically-allocated\n\
+string constant.\n\
+\n\
+Target-specific fundamental types might be new fundamental types or\n\
+qualified versions of ordinary fundamental types. Encode new\n\
+fundamental types as @samp{@w{u @var{n} @var{name}}}, where @var{name}\n\
+is the name used for the type in source code, and @var{n} is the\n\
+length of @var{name} in decimal. Encode qualified versions of\n\
+ordinary types as @samp{@w{U @var{n} @var{name} @var{code}}}, where\n\
+@var{name} is the name used for the type qualifier in source code,\n\
+@var{n} is the length of @var{name} as above, and @var{code} is the\n\
+code used to represent the unqualified version of this type. (See\n\
+@code{write_builtin_type} in @file{cp/mangle.cc} for the list of\n\
+codes.) In both cases the spaces are for clarity; do not include any\n\
+spaces in your string.\n\
+\n\
+This hook is applied to types prior to typedef resolution. If the mangled\n\
+name for a particular type depends only on that type's main variant, you\n\
+can perform typedef resolution yourself using @code{TYPE_MAIN_VARIANT}\n\
+before mangling.\n\
+\n\
+The default version of this hook always returns @code{NULL}, which is\n\
+appropriate for a target that does not define any new fundamental\n\
+types.",
+ const char *, (const_tree type),
+ hook_constcharptr_const_tree_null)
+
+/* Temporarily add conditional target specific types for the purpose of
+ emitting C++ fundamental type tinfos. */
+DEFHOOK
+(emit_support_tinfos,
+ "If your target defines any fundamental types which depend on ISA flags,\n\
+they might need C++ tinfo symbols in libsupc++/libstdc++ regardless of\n\
+ISA flags the library is compiled with.\n\
+This hook allows creating tinfo symbols even for those cases, by temporarily\n\
+creating each corresponding fundamental type trees, calling the\n\
+@var{callback} function on it and setting the type back to @code{nullptr}.",
+ void, (emit_support_tinfos_callback callback),
+ default_emit_support_tinfos)
+
+/* Make any adjustments to libfunc names needed for this target. */
+DEFHOOK
+(init_libfuncs,
+ "This hook should declare additional library routines or rename\n\
+existing ones, using the functions @code{set_optab_libfunc} and\n\
+@code{init_one_libfunc} defined in @file{optabs.cc}.\n\
+@code{init_optabs} calls this macro after initializing all the normal\n\
+library routines.\n\
+\n\
+The default is to do nothing. Most ports don't need to define this hook.",
+ void, (void),
+ hook_void_void)
+
+ /* Add a __gnu_ prefix to library functions rather than just __. */
+DEFHOOKPOD
+(libfunc_gnu_prefix,
+ "If false (the default), internal library routines start with two\n\
+underscores. If set to true, these routines start with @code{__gnu_}\n\
+instead. E.g., @code{__muldi3} changes to @code{__gnu_muldi3}. This\n\
+currently only affects functions defined in @file{libgcc2.c}. If this\n\
+is set to true, the @file{tm.h} file must also\n\
+@code{#define LIBGCC2_GNU_PREFIX}.",
+ bool, false)
+
+/* Given a decl, a section name, and whether the decl initializer
+ has relocs, choose attributes for the section. */
+/* ??? Should be merged with SELECT_SECTION and UNIQUE_SECTION. */
+DEFHOOK
+(section_type_flags,
+ "Choose a set of section attributes for use by @code{TARGET_ASM_NAMED_SECTION}\n\
+based on a variable or function decl, a section name, and whether or not the\n\
+declaration's initializer may contain runtime relocations. @var{decl} may be\n\
+null, in which case read-write data should be assumed.\n\
+\n\
+The default version of this function handles choosing code vs data,\n\
+read-only vs read-write data, and @code{flag_pic}. You should only\n\
+need to override this if your target has special flags that might be\n\
+set via @code{__attribute__}.",
+ unsigned int, (tree decl, const char *name, int reloc),
+ default_section_type_flags)
+
+DEFHOOK
+(libc_has_function,
+ "This hook determines whether a function from a class of functions\n\
+@var{fn_class} is present in the target C library. If @var{type} is NULL,\n\
+the caller asks for support for all standard (float, double, long double)\n\
+types. If @var{type} is non-NULL, the caller asks for support for a\n\
+specific type.",
+ bool, (enum function_class fn_class, tree type),
+ default_libc_has_function)
+
+DEFHOOK
+(libc_has_fast_function,
+ "This hook determines whether a function from a class of functions\n\
+@code{(enum function_class)}@var{fcode} has a fast implementation.",
+ bool, (int fcode),
+ default_libc_has_fast_function)
+
+/* True if new jumps cannot be created, to replace existing ones or
+ not, at the current point in the compilation. */
+DEFHOOK
+(cannot_modify_jumps_p,
+ "This target hook returns @code{true} past the point in which new jump\n\
+instructions could be created. On machines that require a register for\n\
+every jump such as the SHmedia ISA of SH5, this point would typically be\n\
+reload, so this target hook should be defined to a function such as:\n\
+\n\
+@smallexample\n\
+static bool\n\
+cannot_modify_jumps_past_reload_p ()\n\
+@{\n\
+ return (reload_completed || reload_in_progress);\n\
+@}\n\
+@end smallexample",
+ bool, (void),
+ hook_bool_void_false)
+
+/* True if FOLLOWER may be modified to follow FOLLOWEE. */
+DEFHOOK
+(can_follow_jump,
+ "FOLLOWER and FOLLOWEE are JUMP_INSN instructions;\n\
+return true if FOLLOWER may be modified to follow FOLLOWEE;\n\
+false, if it can't.\n\
+For example, on some targets, certain kinds of branches can't be made to\n\
+follow through a hot/cold partitioning.",
+ bool, (const rtx_insn *follower, const rtx_insn *followee),
+ hook_bool_const_rtx_insn_const_rtx_insn_true)
+
+/* Return true if the target supports conditional execution. */
+DEFHOOK
+(have_conditional_execution,
+ "This target hook returns true if the target supports conditional execution.\n\
+This target hook is required only when the target has several different\n\
+modes and they have different conditional execution capability, such as ARM.",
+ bool, (void),
+ default_have_conditional_execution)
+
+DEFHOOK
+(gen_ccmp_first,
+ "This function prepares to emit a comparison insn for the first compare in a\n\
+ sequence of conditional comparisions. It returns an appropriate comparison\n\
+ with @code{CC} for passing to @code{gen_ccmp_next} or @code{cbranch_optab}.\n\
+ The insns to prepare the compare are saved in @var{prep_seq} and the compare\n\
+ insns are saved in @var{gen_seq}. They will be emitted when all the\n\
+ compares in the conditional comparision are generated without error.\n\
+ @var{code} is the @code{rtx_code} of the compare for @var{op0} and @var{op1}.",
+ rtx, (rtx_insn **prep_seq, rtx_insn **gen_seq, int code, tree op0, tree op1),
+ NULL)
+
+DEFHOOK
+(gen_ccmp_next,
+ "This function prepares to emit a conditional comparison within a sequence\n\
+ of conditional comparisons. It returns an appropriate comparison with\n\
+ @code{CC} for passing to @code{gen_ccmp_next} or @code{cbranch_optab}.\n\
+ The insns to prepare the compare are saved in @var{prep_seq} and the compare\n\
+ insns are saved in @var{gen_seq}. They will be emitted when all the\n\
+ compares in the conditional comparision are generated without error. The\n\
+ @var{prev} expression is the result of a prior call to @code{gen_ccmp_first}\n\
+ or @code{gen_ccmp_next}. It may return @code{NULL} if the combination of\n\
+ @var{prev} and this comparison is not supported, otherwise the result must\n\
+ be appropriate for passing to @code{gen_ccmp_next} or @code{cbranch_optab}.\n\
+ @var{code} is the @code{rtx_code} of the compare for @var{op0} and @var{op1}.\n\
+ @var{bit_code} is @code{AND} or @code{IOR}, which is the op on the compares.",
+ rtx, (rtx_insn **prep_seq, rtx_insn **gen_seq, rtx prev, int cmp_code, tree op0, tree op1, int bit_code),
+ NULL)
+
+/* Return a new value for loop unroll size. */
+DEFHOOK
+(loop_unroll_adjust,
+ "This target hook returns a new value for the number of times @var{loop}\n\
+should be unrolled. The parameter @var{nunroll} is the number of times\n\
+the loop is to be unrolled. The parameter @var{loop} is a pointer to\n\
+the loop, which is going to be checked for unrolling. This target hook\n\
+is required only when the target has special constraints like maximum\n\
+number of memory accesses.",
+ unsigned, (unsigned nunroll, class loop *loop),
+ NULL)
+
+/* True if X is a legitimate MODE-mode immediate operand. */
+DEFHOOK
+(legitimate_constant_p,
+ "This hook returns true if @var{x} is a legitimate constant for a\n\
+@var{mode}-mode immediate operand on the target machine. You can assume that\n\
+@var{x} satisfies @code{CONSTANT_P}, so you need not check this.\n\
+\n\
+The default definition returns true.",
+ bool, (machine_mode mode, rtx x),
+ hook_bool_mode_rtx_true)
+
+/* True if X is a TLS operand whose value should be pre-computed. */
+DEFHOOK
+(precompute_tls_p,
+ "This hook returns true if @var{x} is a TLS operand on the target\n\
+machine that should be pre-computed when used as the argument in a call.\n\
+You can assume that @var{x} satisfies @code{CONSTANT_P}, so you need not \n\
+check this.\n\
+\n\
+The default definition returns false.",
+ bool, (machine_mode mode, rtx x),
+ hook_bool_mode_rtx_false)
+
+/* True if the constant X cannot be placed in the constant pool. */
+DEFHOOK
+(cannot_force_const_mem,
+ "This hook should return true if @var{x} is of a form that cannot (or\n\
+should not) be spilled to the constant pool. @var{mode} is the mode\n\
+of @var{x}.\n\
+\n\
+The default version of this hook returns false.\n\
+\n\
+The primary reason to define this hook is to prevent reload from\n\
+deciding that a non-legitimate constant would be better reloaded\n\
+from the constant pool instead of spilling and reloading a register\n\
+holding the constant. This restriction is often true of addresses\n\
+of TLS symbols for various targets.",
+ bool, (machine_mode mode, rtx x),
+ hook_bool_mode_rtx_false)
+
+DEFHOOK_UNDOC
+(cannot_copy_insn_p,
+ "True if the insn @var{x} cannot be duplicated.",
+ bool, (rtx_insn *), NULL)
+
+/* True if X is considered to be commutative. */
+DEFHOOK
+(commutative_p,
+ "This target hook returns @code{true} if @var{x} is considered to be commutative.\n\
+Usually, this is just COMMUTATIVE_P (@var{x}), but the HP PA doesn't consider\n\
+PLUS to be commutative inside a MEM@. @var{outer_code} is the rtx code\n\
+of the enclosing rtl, if known, otherwise it is UNKNOWN.",
+ bool, (const_rtx x, int outer_code),
+ hook_bool_const_rtx_commutative_p)
+
+/* True if ADDR is an address-expression whose effect depends
+ on the mode of the memory reference it is used in. */
+DEFHOOK
+(mode_dependent_address_p,
+ "This hook returns @code{true} if memory address @var{addr} in address\n\
+space @var{addrspace} can have\n\
+different meanings depending on the machine mode of the memory\n\
+reference it is used for or if the address is valid for some modes\n\
+but not others.\n\
+\n\
+Autoincrement and autodecrement addresses typically have mode-dependent\n\
+effects because the amount of the increment or decrement is the size\n\
+of the operand being addressed. Some machines have other mode-dependent\n\
+addresses. Many RISC machines have no mode-dependent addresses.\n\
+\n\
+You may assume that @var{addr} is a valid address for the machine.\n\
+\n\
+The default version of this hook returns @code{false}.",
+ bool, (const_rtx addr, addr_space_t addrspace),
+ default_mode_dependent_address_p)
+
+/* Given an invalid address X for a given machine mode, try machine-specific
+ ways to make it legitimate. Return X or an invalid address on failure. */
+DEFHOOK
+(legitimize_address,
+ "This hook is given an invalid memory address @var{x} for an\n\
+operand of mode @var{mode} and should try to return a valid memory\n\
+address.\n\
+\n\
+@findex break_out_memory_refs\n\
+@var{x} will always be the result of a call to @code{break_out_memory_refs},\n\
+and @var{oldx} will be the operand that was given to that function to produce\n\
+@var{x}.\n\
+\n\
+The code of the hook should not alter the substructure of\n\
+@var{x}. If it transforms @var{x} into a more legitimate form, it\n\
+should return the new @var{x}.\n\
+\n\
+It is not necessary for this hook to come up with a legitimate address,\n\
+with the exception of native TLS addresses (@pxref{Emulated TLS}).\n\
+The compiler has standard ways of doing so in all cases. In fact, if\n\
+the target supports only emulated TLS, it\n\
+is safe to omit this hook or make it return @var{x} if it cannot find\n\
+a valid way to legitimize the address. But often a machine-dependent\n\
+strategy can generate better code.",
+ rtx, (rtx x, rtx oldx, machine_mode mode),
+ default_legitimize_address)
+
+/* Given an address RTX, undo the effects of LEGITIMIZE_ADDRESS. */
+DEFHOOK
+(delegitimize_address,
+ "This hook is used to undo the possibly obfuscating effects of the\n\
+@code{LEGITIMIZE_ADDRESS} and @code{LEGITIMIZE_RELOAD_ADDRESS} target\n\
+macros. Some backend implementations of these macros wrap symbol\n\
+references inside an @code{UNSPEC} rtx to represent PIC or similar\n\
+addressing modes. This target hook allows GCC's optimizers to understand\n\
+the semantics of these opaque @code{UNSPEC}s by converting them back\n\
+into their original form.",
+ rtx, (rtx x),
+ delegitimize_mem_from_attrs)
+
+/* Given an RTX, return true if it is not ok to emit it into debug info
+ section. */
+DEFHOOK
+(const_not_ok_for_debug_p,
+ "This hook should return true if @var{x} should not be emitted into\n\
+debug sections.",
+ bool, (rtx x),
+ default_const_not_ok_for_debug_p)
+
+/* Given an address RTX, say whether it is valid. */
+DEFHOOK
+(legitimate_address_p,
+ "A function that returns whether @var{x} (an RTX) is a legitimate memory\n\
+address on the target machine for a memory operand of mode @var{mode}.\n\
+\n\
+Legitimate addresses are defined in two variants: a strict variant and a\n\
+non-strict one. The @var{strict} parameter chooses which variant is\n\
+desired by the caller.\n\
+\n\
+The strict variant is used in the reload pass. It must be defined so\n\
+that any pseudo-register that has not been allocated a hard register is\n\
+considered a memory reference. This is because in contexts where some\n\
+kind of register is required, a pseudo-register with no hard register\n\
+must be rejected. For non-hard registers, the strict variant should look\n\
+up the @code{reg_renumber} array; it should then proceed using the hard\n\
+register number in the array, or treat the pseudo as a memory reference\n\
+if the array holds @code{-1}.\n\
+\n\
+The non-strict variant is used in other passes. It must be defined to\n\
+accept all pseudo-registers in every context where some kind of\n\
+register is required.\n\
+\n\
+Normally, constant addresses which are the sum of a @code{symbol_ref}\n\
+and an integer are stored inside a @code{const} RTX to mark them as\n\
+constant. Therefore, there is no need to recognize such sums\n\
+specifically as legitimate addresses. Normally you would simply\n\
+recognize any @code{const} as legitimate.\n\
+\n\
+Usually @code{PRINT_OPERAND_ADDRESS} is not prepared to handle constant\n\
+sums that are not marked with @code{const}. It assumes that a naked\n\
+@code{plus} indicates indexing. If so, then you @emph{must} reject such\n\
+naked constant sums as illegitimate addresses, so that none of them will\n\
+be given to @code{PRINT_OPERAND_ADDRESS}.\n\
+\n\
+@cindex @code{TARGET_ENCODE_SECTION_INFO} and address validation\n\
+On some machines, whether a symbolic address is legitimate depends on\n\
+the section that the address refers to. On these machines, define the\n\
+target hook @code{TARGET_ENCODE_SECTION_INFO} to store the information\n\
+into the @code{symbol_ref}, and then check for it here. When you see a\n\
+@code{const}, you will have to look inside it to find the\n\
+@code{symbol_ref} in order to determine the section. @xref{Assembler\n\
+Format}.\n\
+\n\
+@cindex @code{GO_IF_LEGITIMATE_ADDRESS}\n\
+Some ports are still using a deprecated legacy substitute for\n\
+this hook, the @code{GO_IF_LEGITIMATE_ADDRESS} macro. This macro\n\
+has this syntax:\n\
+\n\
+@example\n\
+#define GO_IF_LEGITIMATE_ADDRESS (@var{mode}, @var{x}, @var{label})\n\
+@end example\n\
+\n\
+@noindent\n\
+and should @code{goto @var{label}} if the address @var{x} is a valid\n\
+address on the target machine for a memory operand of mode @var{mode}.\n\
+\n\
+@findex REG_OK_STRICT\n\
+Compiler source files that want to use the strict variant of this\n\
+macro define the macro @code{REG_OK_STRICT}. You should use an\n\
+@code{#ifdef REG_OK_STRICT} conditional to define the strict variant in\n\
+that case and the non-strict variant otherwise.\n\
+\n\
+Using the hook is usually simpler because it limits the number of\n\
+files that are recompiled when changes are made.",
+ bool, (machine_mode mode, rtx x, bool strict),
+ default_legitimate_address_p)
+
+/* True if the given constant can be put into an object_block. */
+DEFHOOK
+(use_blocks_for_constant_p,
+ "This hook should return true if pool entries for constant @var{x} can\n\
+be placed in an @code{object_block} structure. @var{mode} is the mode\n\
+of @var{x}.\n\
+\n\
+The default version returns false for all constants.",
+ bool, (machine_mode mode, const_rtx x),
+ hook_bool_mode_const_rtx_false)
+
+/* True if the given decl can be put into an object_block. */
+DEFHOOK
+(use_blocks_for_decl_p,
+ "This hook should return true if pool entries for @var{decl} should\n\
+be placed in an @code{object_block} structure.\n\
+\n\
+The default version returns true for all decls.",
+ bool, (const_tree decl),
+ hook_bool_const_tree_true)
+
+/* The minimum and maximum byte offsets for anchored addresses. */
+DEFHOOKPOD
+(min_anchor_offset,
+ "The minimum offset that should be applied to a section anchor.\n\
+On most targets, it should be the smallest offset that can be\n\
+applied to a base register while still giving a legitimate address\n\
+for every mode. The default value is 0.",
+ HOST_WIDE_INT, 0)
+
+DEFHOOKPOD
+(max_anchor_offset,
+ "Like @code{TARGET_MIN_ANCHOR_OFFSET}, but the maximum (inclusive)\n\
+offset that should be applied to section anchors. The default\n\
+value is 0.",
+ HOST_WIDE_INT, 0)
+
+/* True if section anchors can be used to access the given symbol. */
+DEFHOOK
+(use_anchors_for_symbol_p,
+ "Return true if GCC should attempt to use anchors to access @code{SYMBOL_REF}\n\
+@var{x}. You can assume @samp{SYMBOL_REF_HAS_BLOCK_INFO_P (@var{x})} and\n\
+@samp{!SYMBOL_REF_ANCHOR_P (@var{x})}.\n\
+\n\
+The default version is correct for most targets, but you might need to\n\
+intercept this hook to handle things like target-specific attributes\n\
+or target-specific sections.",
+ bool, (const_rtx x),
+ default_use_anchors_for_symbol_p)
+
+/* True if target supports indirect functions. */
+DEFHOOK
+(has_ifunc_p,
+ "It returns true if the target supports GNU indirect functions.\n\
+The support includes the assembler, linker and dynamic linker.\n\
+The default value of this hook is based on target's libc.",
+ bool, (void),
+ default_has_ifunc_p)
+
+/* True if it is OK to reference indirect function resolvers locally. */
+DEFHOOK
+(ifunc_ref_local_ok,
+ "Return true if it is OK to reference indirect function resolvers\n\
+locally. The default is to return false.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* True if it is OK to do sibling call optimization for the specified
+ call expression EXP. DECL will be the called function, or NULL if
+ this is an indirect call. */
+DEFHOOK
+(function_ok_for_sibcall,
+ "True if it is OK to do sibling call optimization for the specified\n\
+call expression @var{exp}. @var{decl} will be the called function,\n\
+or @code{NULL} if this is an indirect call.\n\
+\n\
+It is not uncommon for limitations of calling conventions to prevent\n\
+tail calls to functions outside the current unit of translation, or\n\
+during PIC compilation. The hook is used to enforce these restrictions,\n\
+as the @code{sibcall} md pattern cannot fail, or fall over to a\n\
+``normal'' call. The criteria for successful sibling call optimization\n\
+may vary greatly between different architectures.",
+ bool, (tree decl, tree exp),
+ hook_bool_tree_tree_false)
+
+/* Establish appropriate back-end context for processing the function
+ FNDECL. The argument might be NULL to indicate processing at top
+ level, outside of any function scope. */
+DEFHOOK
+(set_current_function,
+ "The compiler invokes this hook whenever it changes its current function\n\
+context (@code{cfun}). You can define this function if\n\
+the back end needs to perform any initialization or reset actions on a\n\
+per-function basis. For example, it may be used to implement function\n\
+attributes that affect register usage or code generation patterns.\n\
+The argument @var{decl} is the declaration for the new function context,\n\
+and may be null to indicate that the compiler has left a function context\n\
+and is returning to processing at the top level.\n\
+The default hook function does nothing.\n\
+\n\
+GCC sets @code{cfun} to a dummy function context during initialization of\n\
+some parts of the back end. The hook function is not invoked in this\n\
+situation; you need not worry about the hook being invoked recursively,\n\
+or when the back end is in a partially-initialized state.\n\
+@code{cfun} might be @code{NULL} to indicate processing at top level,\n\
+outside of any function scope.",
+ void, (tree decl), hook_void_tree)
+
+/* True if EXP should be placed in a "small data" section. */
+DEFHOOK
+(in_small_data_p,
+ "Returns true if @var{exp} should be placed into a ``small data'' section.\n\
+The default version of this hook always returns false.",
+ bool, (const_tree exp),
+ hook_bool_const_tree_false)
+
+/* True if EXP names an object for which name resolution must resolve
+ to the current executable or shared library. */
+DEFHOOK
+(binds_local_p,
+ "Returns true if @var{exp} names an object for which name resolution\n\
+rules must resolve to the current ``module'' (dynamic shared library\n\
+or executable image).\n\
+\n\
+The default version of this hook implements the name resolution rules\n\
+for ELF, which has a looser model of global name binding than other\n\
+currently supported object file formats.",
+ bool, (const_tree exp),
+ default_binds_local_p)
+
+/* Check if profiling code is before or after prologue. */
+DEFHOOK
+(profile_before_prologue,
+ "It returns true if target wants profile code emitted before prologue.\n\n\
+The default version of this hook use the target macro\n\
+@code{PROFILE_BEFORE_PROLOGUE}.",
+ bool, (void),
+ default_profile_before_prologue)
+
+/* Return true if a leaf function should stay leaf even with profiling
+ enabled. */
+DEFHOOK
+(keep_leaf_when_profiled,
+ "This target hook returns true if the target wants the leaf flag for\n\
+the current function to stay true even if it calls mcount. This might\n\
+make sense for targets using the leaf flag only to determine whether a\n\
+stack frame needs to be generated or not and for which the call to\n\
+mcount is generated before the function prologue.",
+ bool, (void),
+ default_keep_leaf_when_profiled)
+
+/* Modify and return the identifier of a DECL's external name,
+ originally identified by ID, as required by the target,
+ (eg, append @nn to windows32 stdcall function names).
+ The default is to return ID without modification. */
+DEFHOOK
+(mangle_decl_assembler_name,
+ "Define this hook if you need to postprocess the assembler name generated\n\
+by target-independent code. The @var{id} provided to this hook will be\n\
+the computed name (e.g., the macro @code{DECL_NAME} of the @var{decl} in C,\n\
+or the mangled name of the @var{decl} in C++). The return value of the\n\
+hook is an @code{IDENTIFIER_NODE} for the appropriate mangled name on\n\
+your target system. The default implementation of this hook just\n\
+returns the @var{id} provided.",
+ tree, (tree decl, tree id),
+ default_mangle_decl_assembler_name)
+
+/* Do something target-specific to record properties of the DECL into
+ the associated SYMBOL_REF. */
+DEFHOOK
+(encode_section_info,
+ "Define this hook if references to a symbol or a constant must be\n\
+treated differently depending on something about the variable or\n\
+function named by the symbol (such as what section it is in).\n\
+\n\
+The hook is executed immediately after rtl has been created for\n\
+@var{decl}, which may be a variable or function declaration or\n\
+an entry in the constant pool. In either case, @var{rtl} is the\n\
+rtl in question. Do @emph{not} use @code{DECL_RTL (@var{decl})}\n\
+in this hook; that field may not have been initialized yet.\n\
+\n\
+In the case of a constant, it is safe to assume that the rtl is\n\
+a @code{mem} whose address is a @code{symbol_ref}. Most decls\n\
+will also have this form, but that is not guaranteed. Global\n\
+register variables, for instance, will have a @code{reg} for their\n\
+rtl. (Normally the right thing to do with such unusual rtl is\n\
+leave it alone.)\n\
+\n\
+The @var{new_decl_p} argument will be true if this is the first time\n\
+that @code{TARGET_ENCODE_SECTION_INFO} has been invoked on this decl. It will\n\
+be false for subsequent invocations, which will happen for duplicate\n\
+declarations. Whether or not anything must be done for the duplicate\n\
+declaration depends on whether the hook examines @code{DECL_ATTRIBUTES}.\n\
+@var{new_decl_p} is always true when the hook is called for a constant.\n\
+\n\
+@cindex @code{SYMBOL_REF_FLAG}, in @code{TARGET_ENCODE_SECTION_INFO}\n\
+The usual thing for this hook to do is to record flags in the\n\
+@code{symbol_ref}, using @code{SYMBOL_REF_FLAG} or @code{SYMBOL_REF_FLAGS}.\n\
+Historically, the name string was modified if it was necessary to\n\
+encode more than one bit of information, but this practice is now\n\
+discouraged; use @code{SYMBOL_REF_FLAGS}.\n\
+\n\
+The default definition of this hook, @code{default_encode_section_info}\n\
+in @file{varasm.cc}, sets a number of commonly-useful bits in\n\
+@code{SYMBOL_REF_FLAGS}. Check whether the default does what you need\n\
+before overriding it.",
+ void, (tree decl, rtx rtl, int new_decl_p),
+ default_encode_section_info)
+
+/* Undo the effects of encode_section_info on the symbol string. */
+DEFHOOK
+(strip_name_encoding,
+ "Decode @var{name} and return the real name part, sans\n\
+the characters that @code{TARGET_ENCODE_SECTION_INFO}\n\
+may have added.",
+ const char *, (const char *name),
+ default_strip_name_encoding)
+
+/* If shift optabs for MODE are known to always truncate the shift count,
+ return the mask that they apply. Return 0 otherwise. */
+DEFHOOK
+(shift_truncation_mask,
+ "This function describes how the standard shift patterns for @var{mode}\n\
+deal with shifts by negative amounts or by more than the width of the mode.\n\
+@xref{shift patterns}.\n\
+\n\
+On many machines, the shift patterns will apply a mask @var{m} to the\n\
+shift count, meaning that a fixed-width shift of @var{x} by @var{y} is\n\
+equivalent to an arbitrary-width shift of @var{x} by @var{y & m}. If\n\
+this is true for mode @var{mode}, the function should return @var{m},\n\
+otherwise it should return 0. A return value of 0 indicates that no\n\
+particular behavior is guaranteed.\n\
+\n\
+Note that, unlike @code{SHIFT_COUNT_TRUNCATED}, this function does\n\
+@emph{not} apply to general shift rtxes; it applies only to instructions\n\
+that are generated by the named shift patterns.\n\
+\n\
+The default implementation of this function returns\n\
+@code{GET_MODE_BITSIZE (@var{mode}) - 1} if @code{SHIFT_COUNT_TRUNCATED}\n\
+and 0 otherwise. This definition is always safe, but if\n\
+@code{SHIFT_COUNT_TRUNCATED} is false, and some shift patterns\n\
+nevertheless truncate the shift count, you may get better code\n\
+by overriding it.",
+ unsigned HOST_WIDE_INT, (machine_mode mode),
+ default_shift_truncation_mask)
+
+/* Return the number of divisions in the given MODE that should be present,
+ so that it is profitable to turn the division into a multiplication by
+ the reciprocal. */
+DEFHOOK
+(min_divisions_for_recip_mul,
+ "When @option{-ffast-math} is in effect, GCC tries to optimize\n\
+divisions by the same divisor, by turning them into multiplications by\n\
+the reciprocal. This target hook specifies the minimum number of divisions\n\
+that should be there for GCC to perform the optimization for a variable\n\
+of mode @var{mode}. The default implementation returns 3 if the machine\n\
+has an instruction for the division, and 2 if it does not.",
+ unsigned int, (machine_mode mode),
+ default_min_divisions_for_recip_mul)
+
+DEFHOOK
+(truly_noop_truncation,
+ "This hook returns true if it is safe to ``convert'' a value of\n\
+@var{inprec} bits to one of @var{outprec} bits (where @var{outprec} is\n\
+smaller than @var{inprec}) by merely operating on it as if it had only\n\
+@var{outprec} bits. The default returns true unconditionally, which\n\
+is correct for most machines. When @code{TARGET_TRULY_NOOP_TRUNCATION}\n\
+returns false, the machine description should provide a @code{trunc}\n\
+optab to specify the RTL that performs the required truncation.\n\
+\n\
+If @code{TARGET_MODES_TIEABLE_P} returns false for a pair of modes,\n\
+suboptimal code can result if this hook returns true for the corresponding\n\
+mode sizes. Making this hook return false in such cases may improve things.",
+ bool, (poly_uint64 outprec, poly_uint64 inprec),
+ hook_bool_puint64_puint64_true)
+
+/* If the representation of integral MODE is such that values are
+ always sign-extended to a wider mode MODE_REP then return
+ SIGN_EXTEND. Return UNKNOWN otherwise. */
+/* Note that the return type ought to be RTX_CODE, but that's not
+ necessarily defined at this point. */
+DEFHOOK
+(mode_rep_extended,
+ "The representation of an integral mode can be such that the values\n\
+are always extended to a wider integral mode. Return\n\
+@code{SIGN_EXTEND} if values of @var{mode} are represented in\n\
+sign-extended form to @var{rep_mode}. Return @code{UNKNOWN}\n\
+otherwise. (Currently, none of the targets use zero-extended\n\
+representation this way so unlike @code{LOAD_EXTEND_OP},\n\
+@code{TARGET_MODE_REP_EXTENDED} is expected to return either\n\
+@code{SIGN_EXTEND} or @code{UNKNOWN}. Also no target extends\n\
+@var{mode} to @var{rep_mode} so that @var{rep_mode} is not the next\n\
+widest integral mode and currently we take advantage of this fact.)\n\
+\n\
+Similarly to @code{LOAD_EXTEND_OP} you may return a non-@code{UNKNOWN}\n\
+value even if the extension is not performed on certain hard registers\n\
+as long as for the @code{REGNO_REG_CLASS} of these hard registers\n\
+@code{TARGET_CAN_CHANGE_MODE_CLASS} returns false.\n\
+\n\
+Note that @code{TARGET_MODE_REP_EXTENDED} and @code{LOAD_EXTEND_OP}\n\
+describe two related properties. If you define\n\
+@code{TARGET_MODE_REP_EXTENDED (mode, word_mode)} you probably also want\n\
+to define @code{LOAD_EXTEND_OP (mode)} to return the same type of\n\
+extension.\n\
+\n\
+In order to enforce the representation of @code{mode},\n\
+@code{TARGET_TRULY_NOOP_TRUNCATION} should return false when truncating to\n\
+@code{mode}.",
+ int, (scalar_int_mode mode, scalar_int_mode rep_mode),
+ default_mode_rep_extended)
+
+DEFHOOK
+(setjmp_preserves_nonvolatile_regs_p,
+ "On some targets, it is assumed that the compiler will spill all pseudos\n\
+ that are live across a call to @code{setjmp}, while other targets treat\n\
+ @code{setjmp} calls as normal function calls.\n\
+ \n\
+ This hook returns false if @code{setjmp} calls do not preserve all\n\
+ non-volatile registers so that gcc that must spill all pseudos that are\n\
+ live across @code{setjmp} calls. Define this to return true if the\n\
+ target does not need to spill all pseudos live across @code{setjmp} calls.\n\
+ The default implementation conservatively assumes all pseudos must be\n\
+ spilled across @code{setjmp} calls.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* True if MODE is valid for a pointer in __attribute__((mode("MODE"))). */
+DEFHOOK
+(valid_pointer_mode,
+ "Define this to return nonzero if the port can handle pointers\n\
+with machine mode @var{mode}. The default version of this\n\
+hook returns true for both @code{ptr_mode} and @code{Pmode}.",
+ bool, (scalar_int_mode mode),
+ default_valid_pointer_mode)
+
+/* Disambiguate with errno. */
+DEFHOOK
+(ref_may_alias_errno,
+ "Define this to return nonzero if the memory reference @var{ref}\n\
+may alias with the system C library errno location. The default\n\
+version of this hook assumes the system C library errno location\n\
+is either a declaration of type int or accessed by dereferencing\n\
+a pointer to int.",
+ bool, (ao_ref *ref),
+ default_ref_may_alias_errno)
+
+/* Support for named address spaces. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_ADDR_SPACE_"
+HOOK_VECTOR (TARGET_ADDR_SPACE_HOOKS, addr_space)
+
+/* MODE to use for a pointer into another address space. */
+DEFHOOK
+(pointer_mode,
+ "Define this to return the machine mode to use for pointers to\n\
+@var{address_space} if the target supports named address spaces.\n\
+The default version of this hook returns @code{ptr_mode}.",
+ scalar_int_mode, (addr_space_t address_space),
+ default_addr_space_pointer_mode)
+
+/* MODE to use for an address in another address space. */
+DEFHOOK
+(address_mode,
+ "Define this to return the machine mode to use for addresses in\n\
+@var{address_space} if the target supports named address spaces.\n\
+The default version of this hook returns @code{Pmode}.",
+ scalar_int_mode, (addr_space_t address_space),
+ default_addr_space_address_mode)
+
+/* True if MODE is valid for a pointer in __attribute__((mode("MODE")))
+ in another address space. */
+DEFHOOK
+(valid_pointer_mode,
+ "Define this to return nonzero if the port can handle pointers\n\
+with machine mode @var{mode} to address space @var{as}. This target\n\
+hook is the same as the @code{TARGET_VALID_POINTER_MODE} target hook,\n\
+except that it includes explicit named address space support. The default\n\
+version of this hook returns true for the modes returned by either the\n\
+@code{TARGET_ADDR_SPACE_POINTER_MODE} or @code{TARGET_ADDR_SPACE_ADDRESS_MODE}\n\
+target hooks for the given address space.",
+ bool, (scalar_int_mode mode, addr_space_t as),
+ default_addr_space_valid_pointer_mode)
+
+/* True if an address is a valid memory address to a given named address
+ space for a given mode. */
+DEFHOOK
+(legitimate_address_p,
+ "Define this to return true if @var{exp} is a valid address for mode\n\
+@var{mode} in the named address space @var{as}. The @var{strict}\n\
+parameter says whether strict addressing is in effect after reload has\n\
+finished. This target hook is the same as the\n\
+@code{TARGET_LEGITIMATE_ADDRESS_P} target hook, except that it includes\n\
+explicit named address space support.",
+ bool, (machine_mode mode, rtx exp, bool strict, addr_space_t as),
+ default_addr_space_legitimate_address_p)
+
+/* Return an updated address to convert an invalid pointer to a named
+ address space to a valid one. If NULL_RTX is returned use machine
+ independent methods to make the address valid. */
+DEFHOOK
+(legitimize_address,
+ "Define this to modify an invalid address @var{x} to be a valid address\n\
+with mode @var{mode} in the named address space @var{as}. This target\n\
+hook is the same as the @code{TARGET_LEGITIMIZE_ADDRESS} target hook,\n\
+except that it includes explicit named address space support.",
+ rtx, (rtx x, rtx oldx, machine_mode mode, addr_space_t as),
+ default_addr_space_legitimize_address)
+
+/* True if one named address space is a subset of another named address. */
+DEFHOOK
+(subset_p,
+ "Define this to return whether the @var{subset} named address space is\n\
+contained within the @var{superset} named address space. Pointers to\n\
+a named address space that is a subset of another named address space\n\
+will be converted automatically without a cast if used together in\n\
+arithmetic operations. Pointers to a superset address space can be\n\
+converted to pointers to a subset address space via explicit casts.",
+ bool, (addr_space_t subset, addr_space_t superset),
+ default_addr_space_subset_p)
+
+/* True if 0 is a valid address in the address space, or false if
+ 0 is a NULL in the address space. */
+DEFHOOK
+(zero_address_valid,
+ "Define this to modify the default handling of address 0 for the\n\
+address space. Return true if 0 should be considered a valid address.",
+ bool, (addr_space_t as),
+ default_addr_space_zero_address_valid)
+
+/* Function to convert an rtl expression from one address space to another. */
+DEFHOOK
+(convert,
+ "Define this to convert the pointer expression represented by the RTL\n\
+@var{op} with type @var{from_type} that points to a named address\n\
+space to a new pointer expression with type @var{to_type} that points\n\
+to a different named address space. When this hook it called, it is\n\
+guaranteed that one of the two address spaces is a subset of the other,\n\
+as determined by the @code{TARGET_ADDR_SPACE_SUBSET_P} target hook.",
+ rtx, (rtx op, tree from_type, tree to_type),
+ default_addr_space_convert)
+
+/* Function to encode an address space into dwarf. */
+DEFHOOK
+(debug,
+ "Define this to define how the address space is encoded in dwarf.\n\
+The result is the value to be used with @code{DW_AT_address_class}.",
+ int, (addr_space_t as),
+ default_addr_space_debug)
+
+/* Function to emit custom diagnostic if an address space is used. */
+DEFHOOK
+(diagnose_usage,
+ "Define this hook if the availability of an address space depends on\n\
+command line options and some diagnostics should be printed when the\n\
+address space is used. This hook is called during parsing and allows\n\
+to emit a better diagnostic compared to the case where the address space\n\
+was not registered with @code{c_register_addr_space}. @var{as} is\n\
+the address space as registered with @code{c_register_addr_space}.\n\
+@var{loc} is the location of the address space qualifier token.\n\
+The default implementation does nothing.",
+ void, (addr_space_t as, location_t loc),
+ default_addr_space_diagnose_usage)
+
+HOOK_VECTOR_END (addr_space)
+
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_"
+
+DEFHOOK
+(lower_local_decl_alignment,
+ "Define this hook to lower alignment of local, parm or result\n\
+decl @samp{(@var{decl})}.",
+ void, (tree decl),
+ hook_void_tree)
+
+DEFHOOK
+(static_rtx_alignment,
+ "This hook returns the preferred alignment in bits for a\n\
+statically-allocated rtx, such as a constant pool entry. @var{mode}\n\
+is the mode of the rtx. The default implementation returns\n\
+@samp{GET_MODE_ALIGNMENT (@var{mode})}.",
+ HOST_WIDE_INT, (machine_mode mode),
+ default_static_rtx_alignment)
+
+DEFHOOK
+(constant_alignment,
+ "This hook returns the alignment in bits of a constant that is being\n\
+placed in memory. @var{constant} is the constant and @var{basic_align}\n\
+is the alignment that the object would ordinarily have.\n\
+\n\
+The default definition just returns @var{basic_align}.\n\
+\n\
+The typical use of this hook is to increase alignment for string\n\
+constants to be word aligned so that @code{strcpy} calls that copy\n\
+constants can be done inline. The function\n\
+@code{constant_alignment_word_strings} provides such a definition.",
+ HOST_WIDE_INT, (const_tree constant, HOST_WIDE_INT basic_align),
+ default_constant_alignment)
+
+DEFHOOK
+(translate_mode_attribute,
+ "Define this hook if during mode attribute processing, the port should\n\
+translate machine_mode @var{mode} to another mode. For example, rs6000's\n\
+@code{KFmode}, when it is the same as @code{TFmode}.\n\
+\n\
+The default version of the hook returns that mode that was passed in.",
+ machine_mode, (machine_mode mode),
+ default_translate_mode_attribute)
+
+/* True if MODE is valid for the target. By "valid", we mean able to
+ be manipulated in non-trivial ways. In particular, this means all
+ the arithmetic is supported. */
+DEFHOOK
+(scalar_mode_supported_p,
+ "Define this to return nonzero if the port is prepared to handle\n\
+insns involving scalar mode @var{mode}. For a scalar mode to be\n\
+considered supported, all the basic arithmetic and comparisons\n\
+must work.\n\
+\n\
+The default version of this hook returns true for any mode\n\
+required to handle the basic C types (as defined by the port).\n\
+Included here are the double-word arithmetic supported by the\n\
+code in @file{optabs.cc}.",
+ bool, (scalar_mode mode),
+ default_scalar_mode_supported_p)
+
+/* Similarly for vector modes. "Supported" here is less strict. At
+ least some operations are supported; need to check optabs or builtins
+ for further details. */
+DEFHOOK
+(vector_mode_supported_p,
+ "Define this to return nonzero if the port is prepared to handle\n\
+insns involving vector mode @var{mode}. At the very least, it\n\
+must have move patterns for this mode.",
+ bool, (machine_mode mode),
+ hook_bool_mode_false)
+
+DEFHOOK
+(compatible_vector_types_p,
+ "Return true if there is no target-specific reason for treating\n\
+vector types @var{type1} and @var{type2} as distinct types. The caller\n\
+has already checked for target-independent reasons, meaning that the\n\
+types are known to have the same mode, to have the same number of elements,\n\
+and to have what the caller considers to be compatible element types.\n\
+\n\
+The main reason for defining this hook is to reject pairs of types\n\
+that are handled differently by the target's calling convention.\n\
+For example, when a new @var{N}-bit vector architecture is added\n\
+to a target, the target may want to handle normal @var{N}-bit\n\
+@code{VECTOR_TYPE} arguments and return values in the same way as\n\
+before, to maintain backwards compatibility. However, it may also\n\
+provide new, architecture-specific @code{VECTOR_TYPE}s that are passed\n\
+and returned in a more efficient way. It is then important to maintain\n\
+a distinction between the ``normal'' @code{VECTOR_TYPE}s and the new\n\
+architecture-specific ones.\n\
+\n\
+The default implementation returns true, which is correct for most targets.",
+ bool, (const_tree type1, const_tree type2),
+ hook_bool_const_tree_const_tree_true)
+
+DEFHOOK
+(vector_alignment,
+ "This hook can be used to define the alignment for a vector of type\n\
+@var{type}, in order to comply with a platform ABI. The default is to\n\
+require natural alignment for vector types. The alignment returned by\n\
+this hook must be a power-of-two multiple of the default alignment of\n\
+the vector element type.",
+ HOST_WIDE_INT, (const_tree type),
+ default_vector_alignment)
+
+DEFHOOK
+(array_mode,
+ "Return the mode that GCC should use for an array that has\n\
+@var{nelems} elements, with each element having mode @var{mode}.\n\
+Return no mode if the target has no special requirements. In the\n\
+latter case, GCC looks for an integer mode of the appropriate size\n\
+if available and uses BLKmode otherwise. Usually the search for the\n\
+integer mode is limited to @code{MAX_FIXED_MODE_SIZE}, but the\n\
+@code{TARGET_ARRAY_MODE_SUPPORTED_P} hook allows a larger mode to be\n\
+used in specific cases.\n\
+\n\
+The main use of this hook is to specify that an array of vectors should\n\
+also have a vector mode. The default implementation returns no mode.",
+ opt_machine_mode, (machine_mode mode, unsigned HOST_WIDE_INT nelems),
+ hook_optmode_mode_uhwi_none)
+
+/* True if we should try to use a scalar mode to represent an array,
+ overriding the usual MAX_FIXED_MODE limit. */
+DEFHOOK
+(array_mode_supported_p,
+ "Return true if GCC should try to use a scalar mode to store an array\n\
+of @var{nelems} elements, given that each element has mode @var{mode}.\n\
+Returning true here overrides the usual @code{MAX_FIXED_MODE} limit\n\
+and allows GCC to use any defined integer mode.\n\
+\n\
+One use of this hook is to support vector load and store operations\n\
+that operate on several homogeneous vectors. For example, ARM NEON\n\
+has operations like:\n\
+\n\
+@smallexample\n\
+int8x8x3_t vld3_s8 (const int8_t *)\n\
+@end smallexample\n\
+\n\
+where the return type is defined as:\n\
+\n\
+@smallexample\n\
+typedef struct int8x8x3_t\n\
+@{\n\
+ int8x8_t val[3];\n\
+@} int8x8x3_t;\n\
+@end smallexample\n\
+\n\
+If this hook allows @code{val} to have a scalar mode, then\n\
+@code{int8x8x3_t} can have the same mode. GCC can then store\n\
+@code{int8x8x3_t}s in registers rather than forcing them onto the stack.",
+ bool, (machine_mode mode, unsigned HOST_WIDE_INT nelems),
+ hook_bool_mode_uhwi_false)
+
+DEFHOOK
+(libgcc_floating_mode_supported_p,
+ "Define this to return nonzero if libgcc provides support for the \n\
+floating-point mode @var{mode}, which is known to pass \n\
+@code{TARGET_SCALAR_MODE_SUPPORTED_P}. The default version of this \n\
+hook returns true for all of @code{SFmode}, @code{DFmode}, \n\
+@code{XFmode} and @code{TFmode}, if such modes exist.",
+ bool, (scalar_float_mode mode),
+ default_libgcc_floating_mode_supported_p)
+
+DEFHOOK
+(floatn_mode,
+ "Define this to return the machine mode to use for the type \n\
+@code{_Float@var{n}}, if @var{extended} is false, or the type \n\
+@code{_Float@var{n}x}, if @var{extended} is true. If such a type is not\n\
+supported, return @code{opt_scalar_float_mode ()}. The default version of\n\
+this hook returns @code{SFmode} for @code{_Float32}, @code{DFmode} for\n\
+@code{_Float64} and @code{_Float32x} and @code{TFmode} for \n\
+@code{_Float128}, if those modes exist and satisfy the requirements for \n\
+those types and pass @code{TARGET_SCALAR_MODE_SUPPORTED_P} and \n\
+@code{TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P}; for @code{_Float64x}, it \n\
+returns the first of @code{XFmode} and @code{TFmode} that exists and \n\
+satisfies the same requirements; for other types, it returns \n\
+@code{opt_scalar_float_mode ()}. The hook is only called for values\n\
+of @var{n} and @var{extended} that are valid according to\n\
+ISO/IEC TS 18661-3:2015; that is, @var{n} is one of 32, 64, 128, or,\n\
+if @var{extended} is false, 16 or greater than 128 and a multiple of 32.",
+ opt_scalar_float_mode, (int n, bool extended),
+ default_floatn_mode)
+
+DEFHOOK
+(floatn_builtin_p,
+ "Define this to return true if the @code{_Float@var{n}} and\n\
+@code{_Float@var{n}x} built-in functions should implicitly enable the\n\
+built-in function without the @code{__builtin_} prefix in addition to the\n\
+normal built-in function with the @code{__builtin_} prefix. The default is\n\
+to only enable built-in functions without the @code{__builtin_} prefix for\n\
+the GNU C langauge. In strict ANSI/ISO mode, the built-in function without\n\
+the @code{__builtin_} prefix is not enabled. The argument @code{FUNC} is the\n\
+@code{enum built_in_function} id of the function to be enabled.",
+ bool, (int func),
+ default_floatn_builtin_p)
+
+/* Compute cost of moving data from a register of class FROM to one of
+ TO, using MODE. */
+DEFHOOK
+(register_move_cost,
+ "This target hook should return the cost of moving data of mode @var{mode}\n\
+from a register in class @var{from} to one in class @var{to}. The classes\n\
+are expressed using the enumeration values such as @code{GENERAL_REGS}.\n\
+A value of 2 is the default; other values are interpreted relative to\n\
+that.\n\
+\n\
+It is not required that the cost always equal 2 when @var{from} is the\n\
+same as @var{to}; on some machines it is expensive to move between\n\
+registers if they are not general registers.\n\
+\n\
+If reload sees an insn consisting of a single @code{set} between two\n\
+hard registers, and if @code{TARGET_REGISTER_MOVE_COST} applied to their\n\
+classes returns a value of 2, reload does not check to ensure that the\n\
+constraints of the insn are met. Setting a cost of other than 2 will\n\
+allow reload to verify that the constraints are met. You should do this\n\
+if the @samp{mov@var{m}} pattern's constraints do not allow such copying.\n\
+\n\
+The default version of this function returns 2.",
+ int, (machine_mode mode, reg_class_t from, reg_class_t to),
+ default_register_move_cost)
+
+/* Compute cost of moving registers to/from memory. */
+/* ??? Documenting the argument types for this hook requires a GFDL
+ license grant. Also, the documentation uses a different name for RCLASS. */
+DEFHOOK
+(memory_move_cost,
+ "This target hook should return the cost of moving data of mode @var{mode}\n\
+between a register of class @var{rclass} and memory; @var{in} is @code{false}\n\
+if the value is to be written to memory, @code{true} if it is to be read in.\n\
+This cost is relative to those in @code{TARGET_REGISTER_MOVE_COST}.\n\
+If moving between registers and memory is more expensive than between two\n\
+registers, you should add this target hook to express the relative cost.\n\
+\n\
+If you do not add this target hook, GCC uses a default cost of 4 plus\n\
+the cost of copying via a secondary reload register, if one is\n\
+needed. If your machine requires a secondary reload register to copy\n\
+between memory and a register of @var{rclass} but the reload mechanism is\n\
+more complex than copying via an intermediate, use this target hook to\n\
+reflect the actual cost of the move.\n\
+\n\
+GCC defines the function @code{memory_move_secondary_cost} if\n\
+secondary reloads are needed. It computes the costs due to copying via\n\
+a secondary register. If your machine copies from memory using a\n\
+secondary register in the conventional way but the default base value of\n\
+4 is not correct for your machine, use this target hook to add some other\n\
+value to the result of that function. The arguments to that function\n\
+are the same as to this target hook.",
+ int, (machine_mode mode, reg_class_t rclass, bool in),
+ default_memory_move_cost)
+
+DEFHOOK
+(use_by_pieces_infrastructure_p,
+ "GCC will attempt several strategies when asked to copy between\n\
+two areas of memory, or to set, clear or store to memory, for example\n\
+when copying a @code{struct}. The @code{by_pieces} infrastructure\n\
+implements such memory operations as a sequence of load, store or move\n\
+insns. Alternate strategies are to expand the\n\
+@code{cpymem} or @code{setmem} optabs, to emit a library call, or to emit\n\
+unit-by-unit, loop-based operations.\n\
+\n\
+This target hook should return true if, for a memory operation with a\n\
+given @var{size} and @var{alignment}, using the @code{by_pieces}\n\
+infrastructure is expected to result in better code generation.\n\
+Both @var{size} and @var{alignment} are measured in terms of storage\n\
+units.\n\
+\n\
+The parameter @var{op} is one of: @code{CLEAR_BY_PIECES},\n\
+@code{MOVE_BY_PIECES}, @code{SET_BY_PIECES}, @code{STORE_BY_PIECES} or\n\
+@code{COMPARE_BY_PIECES}. These describe the type of memory operation\n\
+under consideration.\n\
+\n\
+The parameter @var{speed_p} is true if the code is currently being\n\
+optimized for speed rather than size.\n\
+\n\
+Returning true for higher values of @var{size} can improve code generation\n\
+for speed if the target does not provide an implementation of the\n\
+@code{cpymem} or @code{setmem} standard names, if the @code{cpymem} or\n\
+@code{setmem} implementation would be more expensive than a sequence of\n\
+insns, or if the overhead of a library call would dominate that of\n\
+the body of the memory operation.\n\
+\n\
+Returning true for higher values of @code{size} may also cause an increase\n\
+in code size, for example where the number of insns emitted to perform a\n\
+move would be greater than that of a library call.",
+ bool, (unsigned HOST_WIDE_INT size, unsigned int alignment,
+ enum by_pieces_operation op, bool speed_p),
+ default_use_by_pieces_infrastructure_p)
+
+DEFHOOK
+(overlap_op_by_pieces_p,
+ "This target hook should return true if when the @code{by_pieces}\n\
+infrastructure is used, an offset adjusted unaligned memory operation\n\
+in the smallest integer mode for the last piece operation of a memory\n\
+region can be generated to avoid doing more than one smaller operations.",
+ bool, (void),
+ hook_bool_void_false)
+
+DEFHOOK
+(compare_by_pieces_branch_ratio,
+ "When expanding a block comparison in MODE, gcc can try to reduce the\n\
+number of branches at the expense of more memory operations. This hook\n\
+allows the target to override the default choice. It should return the\n\
+factor by which branches should be reduced over the plain expansion with\n\
+one comparison per @var{mode}-sized piece. A port can also prevent a\n\
+particular mode from being used for block comparisons by returning a\n\
+negative number from this hook.",
+ int, (machine_mode mode),
+ default_compare_by_pieces_branch_ratio)
+
+DEFHOOK
+(slow_unaligned_access,
+ "This hook returns true if memory accesses described by the\n\
+@var{mode} and @var{alignment} parameters have a cost many times greater\n\
+than aligned accesses, for example if they are emulated in a trap handler.\n\
+This hook is invoked only for unaligned accesses, i.e.@: when\n\
+@code{@var{alignment} < GET_MODE_ALIGNMENT (@var{mode})}.\n\
+\n\
+When this hook returns true, the compiler will act as if\n\
+@code{STRICT_ALIGNMENT} were true when generating code for block\n\
+moves. This can cause significantly more instructions to be produced.\n\
+Therefore, do not make this hook return true if unaligned accesses only\n\
+add a cycle or two to the time for a memory access.\n\
+\n\
+The hook must return true whenever @code{STRICT_ALIGNMENT} is true.\n\
+The default implementation returns @code{STRICT_ALIGNMENT}.",
+ bool, (machine_mode mode, unsigned int align),
+ default_slow_unaligned_access)
+
+DEFHOOK
+(optab_supported_p,
+ "Return true if the optimizers should use optab @var{op} with\n\
+modes @var{mode1} and @var{mode2} for optimization type @var{opt_type}.\n\
+The optab is known to have an associated @file{.md} instruction\n\
+whose C condition is true. @var{mode2} is only meaningful for conversion\n\
+optabs; for direct optabs it is a copy of @var{mode1}.\n\
+\n\
+For example, when called with @var{op} equal to @code{rint_optab} and\n\
+@var{mode1} equal to @code{DFmode}, the hook should say whether the\n\
+optimizers should use optab @code{rintdf2}.\n\
+\n\
+The default hook returns true for all inputs.",
+ bool, (int op, machine_mode mode1, machine_mode mode2,
+ optimization_type opt_type),
+ default_optab_supported_p)
+
+/* True for MODE if the target expects that registers in this mode will
+ be allocated to registers in a small register class. The compiler is
+ allowed to use registers explicitly used in the rtl as spill registers
+ but it should prevent extending the lifetime of these registers. */
+DEFHOOK
+(small_register_classes_for_mode_p,
+ "Define this to return nonzero for machine modes for which the port has\n\
+small register classes. If this target hook returns nonzero for a given\n\
+@var{mode}, the compiler will try to minimize the lifetime of registers\n\
+in @var{mode}. The hook may be called with @code{VOIDmode} as argument.\n\
+In this case, the hook is expected to return nonzero if it returns nonzero\n\
+for any mode.\n\
+\n\
+On some machines, it is risky to let hard registers live across arbitrary\n\
+insns. Typically, these machines have instructions that require values\n\
+to be in specific registers (like an accumulator), and reload will fail\n\
+if the required hard register is used for another purpose across such an\n\
+insn.\n\
+\n\
+Passes before reload do not know which hard registers will be used\n\
+in an instruction, but the machine modes of the registers set or used in\n\
+the instruction are already known. And for some machines, register\n\
+classes are small for, say, integer registers but not for floating point\n\
+registers. For example, the AMD x86-64 architecture requires specific\n\
+registers for the legacy x86 integer instructions, but there are many\n\
+SSE registers for floating point operations. On such targets, a good\n\
+strategy may be to return nonzero from this hook for @code{INTEGRAL_MODE_P}\n\
+machine modes but zero for the SSE register classes.\n\
+\n\
+The default version of this hook returns false for any mode. It is always\n\
+safe to redefine this hook to return with a nonzero value. But if you\n\
+unnecessarily define it, you will reduce the amount of optimizations\n\
+that can be performed in some cases. If you do not define this hook\n\
+to return a nonzero value when it is required, the compiler will run out\n\
+of spill registers and print a fatal error message.",
+ bool, (machine_mode mode),
+ hook_bool_mode_false)
+
+/* Register number for a flags register. Only needs to be defined if the
+ target is constrainted to use post-reload comparison elimination. */
+DEFHOOKPOD
+(flags_regnum,
+ "If the target has a dedicated flags register, and it needs to use the\n\
+post-reload comparison elimination pass, or the delay slot filler pass,\n\
+then this value should be set appropriately.",
+unsigned int, INVALID_REGNUM)
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+/* Note that OUTER_CODE ought to be RTX_CODE, but that's
+ not necessarily defined at this point. */
+DEFHOOK
+(rtx_costs,
+ "This target hook describes the relative costs of RTL expressions.\n\
+\n\
+The cost may depend on the precise form of the expression, which is\n\
+available for examination in @var{x}, and the fact that @var{x} appears\n\
+as operand @var{opno} of an expression with rtx code @var{outer_code}.\n\
+That is, the hook can assume that there is some rtx @var{y} such\n\
+that @samp{GET_CODE (@var{y}) == @var{outer_code}} and such that\n\
+either (a) @samp{XEXP (@var{y}, @var{opno}) == @var{x}} or\n\
+(b) @samp{XVEC (@var{y}, @var{opno})} contains @var{x}.\n\
+\n\
+@var{mode} is @var{x}'s machine mode, or for cases like @code{const_int} that\n\
+do not have a mode, the mode in which @var{x} is used.\n\
+\n\
+In implementing this hook, you can use the construct\n\
+@code{COSTS_N_INSNS (@var{n})} to specify a cost equal to @var{n} fast\n\
+instructions.\n\
+\n\
+On entry to the hook, @code{*@var{total}} contains a default estimate\n\
+for the cost of the expression. The hook should modify this value as\n\
+necessary. Traditionally, the default costs are @code{COSTS_N_INSNS (5)}\n\
+for multiplications, @code{COSTS_N_INSNS (7)} for division and modulus\n\
+operations, and @code{COSTS_N_INSNS (1)} for all other operations.\n\
+\n\
+When optimizing for code size, i.e.@: when @code{speed} is\n\
+false, this target hook should be used to estimate the relative\n\
+size cost of an expression, again relative to @code{COSTS_N_INSNS}.\n\
+\n\
+The hook returns true when all subexpressions of @var{x} have been\n\
+processed, and false when @code{rtx_cost} should recurse.",
+ bool, (rtx x, machine_mode mode, int outer_code, int opno, int *total, bool speed),
+ hook_bool_rtx_mode_int_int_intp_bool_false)
+
+/* Compute the cost of X, used as an address. Never called with
+ invalid addresses. */
+DEFHOOK
+(address_cost,
+ "This hook computes the cost of an addressing mode that contains\n\
+@var{address}. If not defined, the cost is computed from\n\
+the @var{address} expression and the @code{TARGET_RTX_COST} hook.\n\
+\n\
+For most CISC machines, the default cost is a good approximation of the\n\
+true cost of the addressing mode. However, on RISC machines, all\n\
+instructions normally have the same length and execution time. Hence\n\
+all addresses will have equal costs.\n\
+\n\
+In cases where more than one form of an address is known, the form with\n\
+the lowest cost will be used. If multiple forms have the same, lowest,\n\
+cost, the one that is the most complex will be used.\n\
+\n\
+For example, suppose an address that is equal to the sum of a register\n\
+and a constant is used twice in the same basic block. When this macro\n\
+is not defined, the address will be computed in a register and memory\n\
+references will be indirect through that register. On machines where\n\
+the cost of the addressing mode containing the sum is no higher than\n\
+that of a simple indirect reference, this will produce an additional\n\
+instruction and possibly require an additional register. Proper\n\
+specification of this macro eliminates this overhead for such machines.\n\
+\n\
+This hook is never called with an invalid address.\n\
+\n\
+On machines where an address involving more than one register is as\n\
+cheap as an address computation involving only one register, defining\n\
+@code{TARGET_ADDRESS_COST} to reflect this can cause two registers to\n\
+be live over a region of code where only one would have been if\n\
+@code{TARGET_ADDRESS_COST} were not defined in that manner. This effect\n\
+should be considered in the definition of this macro. Equivalent costs\n\
+should probably only be given to addresses with different numbers of\n\
+registers on machines with lots of registers.",
+ int, (rtx address, machine_mode mode, addr_space_t as, bool speed),
+ default_address_cost)
+
+/* Compute a cost for INSN. */
+DEFHOOK
+(insn_cost,
+ "This target hook describes the relative costs of RTL instructions.\n\
+\n\
+In implementing this hook, you can use the construct\n\
+@code{COSTS_N_INSNS (@var{n})} to specify a cost equal to @var{n} fast\n\
+instructions.\n\
+\n\
+When optimizing for code size, i.e.@: when @code{speed} is\n\
+false, this target hook should be used to estimate the relative\n\
+size cost of an expression, again relative to @code{COSTS_N_INSNS}.",
+ int, (rtx_insn *insn, bool speed), NULL)
+
+/* Give a cost, in RTX Costs units, for an edge. Like BRANCH_COST, but with
+ well defined units. */
+DEFHOOK
+(max_noce_ifcvt_seq_cost,
+ "This hook returns a value in the same units as @code{TARGET_RTX_COSTS},\n\
+giving the maximum acceptable cost for a sequence generated by the RTL\n\
+if-conversion pass when conditional execution is not available.\n\
+The RTL if-conversion pass attempts to convert conditional operations\n\
+that would require a branch to a series of unconditional operations and\n\
+@code{mov@var{mode}cc} insns. This hook returns the maximum cost of the\n\
+unconditional instructions and the @code{mov@var{mode}cc} insns.\n\
+RTL if-conversion is cancelled if the cost of the converted sequence\n\
+is greater than the value returned by this hook.\n\
+\n\
+@code{e} is the edge between the basic block containing the conditional\n\
+branch to the basic block which would be executed if the condition\n\
+were true.\n\
+\n\
+The default implementation of this hook uses the\n\
+@code{max-rtl-if-conversion-[un]predictable} parameters if they are set,\n\
+and uses a multiple of @code{BRANCH_COST} otherwise.",
+unsigned int, (edge e),
+default_max_noce_ifcvt_seq_cost)
+
+/* Return true if the given instruction sequence is a good candidate
+ as a replacement for the if-convertible sequence. */
+DEFHOOK
+(noce_conversion_profitable_p,
+ "This hook returns true if the instruction sequence @code{seq} is a good\n\
+candidate as a replacement for the if-convertible sequence described in\n\
+@code{if_info}.",
+bool, (rtx_insn *seq, struct noce_if_info *if_info),
+default_noce_conversion_profitable_p)
+
+/* Return true if new_addr should be preferred over the existing address used by
+ memref in insn. */
+DEFHOOK
+(new_address_profitable_p,
+ "Return @code{true} if it is profitable to replace the address in\n\
+@var{memref} with @var{new_addr}. This allows targets to prevent the\n\
+scheduler from undoing address optimizations. The instruction containing the\n\
+memref is @var{insn}. The default implementation returns @code{true}.",
+bool, (rtx memref, rtx_insn * insn, rtx new_addr),
+default_new_address_profitable_p)
+
+DEFHOOK
+(estimated_poly_value,
+ "Return an estimate of the runtime value of @var{val}, for use in\n\
+things like cost calculations or profiling frequencies. @var{kind} is used\n\
+to ask for the minimum, maximum, and likely estimates of the value through\n\
+the @code{POLY_VALUE_MIN}, @code{POLY_VALUE_MAX} and\n\
+@code{POLY_VALUE_LIKELY} values. The default\n\
+implementation returns the lowest possible value of @var{val}.",
+ HOST_WIDE_INT, (poly_int64 val, poly_value_estimate_kind kind),
+ default_estimated_poly_value)
+
+/* Permit speculative instructions in delay slots during delayed-branch
+ scheduling. */
+DEFHOOK
+(no_speculation_in_delay_slots_p,
+ "This predicate controls the use of the eager delay slot filler to disallow\n\
+speculatively executed instructions being placed in delay slots. Targets\n\
+such as certain MIPS architectures possess both branches with and without\n\
+delay slots. As the eager delay slot filler can decrease performance,\n\
+disabling it is beneficial when ordinary branches are available. Use of\n\
+delay slot branches filled using the basic filler is often still desirable\n\
+as the delay slot can hide a pipeline bubble.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Return where to allocate pseudo for a given hard register initial value. */
+DEFHOOK
+(allocate_initial_value,
+ "\n\
+When the initial value of a hard register has been copied in a pseudo\n\
+register, it is often not necessary to actually allocate another register\n\
+to this pseudo register, because the original hard register or a stack slot\n\
+it has been saved into can be used. @code{TARGET_ALLOCATE_INITIAL_VALUE}\n\
+is called at the start of register allocation once for each hard register\n\
+that had its initial value copied by using\n\
+@code{get_func_hard_reg_initial_val} or @code{get_hard_reg_initial_val}.\n\
+Possible values are @code{NULL_RTX}, if you don't want\n\
+to do any special allocation, a @code{REG} rtx---that would typically be\n\
+the hard register itself, if it is known not to be clobbered---or a\n\
+@code{MEM}.\n\
+If you are returning a @code{MEM}, this is only a hint for the allocator;\n\
+it might decide to use another register anyways.\n\
+You may use @code{current_function_is_leaf} or \n\
+@code{REG_N_SETS} in the hook to determine if the hard\n\
+register in question will not be clobbered.\n\
+The default value of this hook is @code{NULL}, which disables any special\n\
+allocation.",
+ rtx, (rtx hard_reg), NULL)
+
+/* Return nonzero if evaluating UNSPEC X might cause a trap.
+ FLAGS has the same meaning as in rtlanal.cc: may_trap_p_1. */
+DEFHOOK
+(unspec_may_trap_p,
+ "This target hook returns nonzero if @var{x}, an @code{unspec} or\n\
+@code{unspec_volatile} operation, might cause a trap. Targets can use\n\
+this hook to enhance precision of analysis for @code{unspec} and\n\
+@code{unspec_volatile} operations. You may call @code{may_trap_p_1}\n\
+to analyze inner elements of @var{x} in which case @var{flags} should be\n\
+passed along.",
+ int, (const_rtx x, unsigned flags),
+ default_unspec_may_trap_p)
+
+/* Given a register, this hook should return a parallel of registers
+ to represent where to find the register pieces. Define this hook
+ if the register and its mode are represented in Dwarf in
+ non-contiguous locations, or if the register should be
+ represented in more than one register in Dwarf. Otherwise, this
+ hook should return NULL_RTX. */
+DEFHOOK
+(dwarf_register_span,
+ "Given a register, this hook should return a parallel of registers to\n\
+represent where to find the register pieces. Define this hook if the\n\
+register and its mode are represented in Dwarf in non-contiguous\n\
+locations, or if the register should be represented in more than one\n\
+register in Dwarf. Otherwise, this hook should return @code{NULL_RTX}.\n\
+If not defined, the default is to return @code{NULL_RTX}.",
+ rtx, (rtx reg),
+ hook_rtx_rtx_null)
+
+/* Given a register return the mode of the corresponding DWARF frame
+ register. */
+DEFHOOK
+(dwarf_frame_reg_mode,
+ "Given a register, this hook should return the mode which the\n\
+corresponding Dwarf frame register should have. This is normally\n\
+used to return a smaller mode than the raw mode to prevent call\n\
+clobbered parts of a register altering the frame register size",
+ machine_mode, (int regno),
+ default_dwarf_frame_reg_mode)
+
+/* If expand_builtin_init_dwarf_reg_sizes needs to fill in table
+ entries not corresponding directly to registers below
+ FIRST_PSEUDO_REGISTER, this hook should generate the necessary
+ code, given the address of the table. */
+DEFHOOK
+(init_dwarf_reg_sizes_extra,
+ "If some registers are represented in Dwarf-2 unwind information in\n\
+multiple pieces, define this hook to fill in information about the\n\
+sizes of those pieces in the table used by the unwinder at runtime.\n\
+It will be called by @code{expand_builtin_init_dwarf_reg_sizes} after\n\
+filling in a single size corresponding to each hard register;\n\
+@var{address} is the address of the table.",
+ void, (tree address),
+ hook_void_tree)
+
+/* Fetch the fixed register(s) which hold condition codes, for
+ targets where it makes sense to look for duplicate assignments to
+ the condition codes. This should return true if there is such a
+ register, false otherwise. The arguments should be set to the
+ fixed register numbers. Up to two condition code registers are
+ supported. If there is only one for this target, the int pointed
+ at by the second argument should be set to -1. */
+DEFHOOK
+(fixed_condition_code_regs,
+ "On targets which use a hard\n\
+register rather than a pseudo-register to hold condition codes, the\n\
+regular CSE passes are often not able to identify cases in which the\n\
+hard register is set to a common value. Use this hook to enable a\n\
+small pass which optimizes such cases. This hook should return true\n\
+to enable this pass, and it should set the integers to which its\n\
+arguments point to the hard register numbers used for condition codes.\n\
+When there is only one such register, as is true on most systems, the\n\
+integer pointed to by @var{p2} should be set to\n\
+@code{INVALID_REGNUM}.\n\
+\n\
+The default version of this hook returns false.",
+ bool, (unsigned int *p1, unsigned int *p2),
+ hook_bool_uintp_uintp_false)
+
+/* If two condition code modes are compatible, return a condition
+ code mode which is compatible with both, such that a comparison
+ done in the returned mode will work for both of the original
+ modes. If the condition code modes are not compatible, return
+ VOIDmode. */
+DEFHOOK
+(cc_modes_compatible,
+ "On targets which use multiple condition code modes in class\n\
+@code{MODE_CC}, it is sometimes the case that a comparison can be\n\
+validly done in more than one mode. On such a system, define this\n\
+target hook to take two mode arguments and to return a mode in which\n\
+both comparisons may be validly done. If there is no such mode,\n\
+return @code{VOIDmode}.\n\
+\n\
+The default version of this hook checks whether the modes are the\n\
+same. If they are, it returns that mode. If they are different, it\n\
+returns @code{VOIDmode}.",
+ machine_mode, (machine_mode m1, machine_mode m2),
+ default_cc_modes_compatible)
+
+/* Do machine-dependent code transformations. Called just before
+ delayed-branch scheduling. */
+DEFHOOK
+(machine_dependent_reorg,
+ "If non-null, this hook performs a target-specific pass over the\n\
+instruction stream. The compiler will run it at all optimization levels,\n\
+just before the point at which it normally does delayed-branch scheduling.\n\
+\n\
+The exact purpose of the hook varies from target to target. Some use\n\
+it to do transformations that are necessary for correctness, such as\n\
+laying out in-function constant pools or avoiding hardware hazards.\n\
+Others use it as an opportunity to do some machine-dependent optimizations.\n\
+\n\
+You need not implement the hook if it has nothing to do. The default\n\
+definition is null.",
+ void, (void), NULL)
+
+/* Create the __builtin_va_list type. */
+DEFHOOK
+(build_builtin_va_list,
+ "This hook returns a type node for @code{va_list} for the target.\n\
+The default version of the hook returns @code{void*}.",
+ tree, (void),
+ std_build_builtin_va_list)
+
+/* Enumerate the va list variants. */
+DEFHOOK
+(enum_va_list_p,
+ "This target hook is used in function @code{c_common_nodes_and_builtins}\n\
+to iterate through the target specific builtin types for va_list. The\n\
+variable @var{idx} is used as iterator. @var{pname} has to be a pointer\n\
+to a @code{const char *} and @var{ptree} a pointer to a @code{tree} typed\n\
+variable.\n\
+The arguments @var{pname} and @var{ptree} are used to store the result of\n\
+this macro and are set to the name of the va_list builtin type and its\n\
+internal type.\n\
+If the return value of this macro is zero, then there is no more element.\n\
+Otherwise the @var{IDX} should be increased for the next call of this\n\
+macro to iterate through all types.",
+ int, (int idx, const char **pname, tree *ptree),
+ NULL)
+
+/* Get the cfun/fndecl calling abi __builtin_va_list type. */
+DEFHOOK
+(fn_abi_va_list,
+ "This hook returns the va_list type of the calling convention specified by\n\
+@var{fndecl}.\n\
+The default version of this hook returns @code{va_list_type_node}.",
+ tree, (tree fndecl),
+ std_fn_abi_va_list)
+
+/* Get the __builtin_va_list type dependent on input type. */
+DEFHOOK
+(canonical_va_list_type,
+ "This hook returns the va_list type of the calling convention specified by the\n\
+type of @var{type}. If @var{type} is not a valid va_list type, it returns\n\
+@code{NULL_TREE}.",
+ tree, (tree type),
+ std_canonical_va_list_type)
+
+/* ??? Documenting this hook requires a GFDL license grant. */
+DEFHOOK_UNDOC
+(expand_builtin_va_start,
+"Expand the @code{__builtin_va_start} builtin.",
+ void, (tree valist, rtx nextarg), NULL)
+
+/* Gimplifies a VA_ARG_EXPR. */
+DEFHOOK
+(gimplify_va_arg_expr,
+ "This hook performs target-specific gimplification of\n\
+@code{VA_ARG_EXPR}. The first two parameters correspond to the\n\
+arguments to @code{va_arg}; the latter two are as in\n\
+@code{gimplify.cc:gimplify_expr}.",
+ tree, (tree valist, tree type, gimple_seq *pre_p, gimple_seq *post_p),
+ std_gimplify_va_arg_expr)
+
+/* Validity-checking routines for PCH files, target-specific.
+ get_pch_validity returns a pointer to the data to be stored,
+ and stores the size in its argument. pch_valid_p gets the same
+ information back and returns NULL if the PCH is valid,
+ or an error message if not. */
+DEFHOOK
+(get_pch_validity,
+ "This hook returns a pointer to the data needed by\n\
+@code{TARGET_PCH_VALID_P} and sets\n\
+@samp{*@var{sz}} to the size of the data in bytes.",
+ void *, (size_t *sz),
+ default_get_pch_validity)
+
+DEFHOOK
+(pch_valid_p,
+ "This hook checks whether the options used to create a PCH file are\n\
+compatible with the current settings. It returns @code{NULL}\n\
+if so and a suitable error message if not. Error messages will\n\
+be presented to the user and must be localized using @samp{_(@var{msg})}.\n\
+\n\
+@var{data} is the data that was returned by @code{TARGET_GET_PCH_VALIDITY}\n\
+when the PCH file was created and @var{sz} is the size of that data in bytes.\n\
+It's safe to assume that the data was created by the same version of the\n\
+compiler, so no format checking is needed.\n\
+\n\
+The default definition of @code{default_pch_valid_p} should be\n\
+suitable for most targets.",
+ const char *, (const void *data, size_t sz),
+ default_pch_valid_p)
+
+DEFHOOK
+(prepare_pch_save,
+ "Called before writing out a PCH file. If the target has some\n\
+garbage-collected data that needs to be in a particular state on PCH loads,\n\
+it can use this hook to enforce that state. Very few targets need\n\
+to do anything here.",
+ void, (void),
+ hook_void_void)
+
+/* If nonnull, this function checks whether a PCH file with the
+ given set of target flags can be used. It returns NULL if so,
+ otherwise it returns an error message. */
+DEFHOOK
+(check_pch_target_flags,
+ "If this hook is nonnull, the default implementation of\n\
+@code{TARGET_PCH_VALID_P} will use it to check for compatible values\n\
+of @code{target_flags}. @var{pch_flags} specifies the value that\n\
+@code{target_flags} had when the PCH file was created. The return\n\
+value is the same as for @code{TARGET_PCH_VALID_P}.",
+ const char *, (int pch_flags), NULL)
+
+/* True if the compiler should give an enum type only as many
+ bytes as it takes to represent the range of possible values of
+ that type. */
+DEFHOOK
+(default_short_enums,
+ "This target hook should return true if the compiler should give an\n\
+@code{enum} type only as many bytes as it takes to represent the range\n\
+of possible values of that type. It should return false if all\n\
+@code{enum} types should be allocated like @code{int}.\n\
+\n\
+The default is to return false.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* This target hook returns an rtx that is used to store the address
+ of the current frame into the built-in setjmp buffer. */
+DEFHOOK
+(builtin_setjmp_frame_value,
+ "This target hook should return an rtx that is used to store\n\
+the address of the current frame into the built in @code{setjmp} buffer.\n\
+The default value, @code{virtual_stack_vars_rtx}, is correct for most\n\
+machines. One reason you may need to define this target hook is if\n\
+@code{hard_frame_pointer_rtx} is the appropriate value on your machine.",
+ rtx, (void),
+ default_builtin_setjmp_frame_value)
+
+/* This target hook should manipulate the outputs, inputs, constraints,
+ and clobbers the port wishes for pre-processing the asm. */
+DEFHOOK
+(md_asm_adjust,
+ "This target hook may add @dfn{clobbers} to @var{clobbers} and\n\
+@var{clobbered_regs} for any hard regs the port wishes to automatically\n\
+clobber for an asm. The @var{outputs} and @var{inputs} may be inspected\n\
+to avoid clobbering a register that is already used by the asm. @var{loc}\n\
+is the source location of the asm.\n\
+\n\
+It may modify the @var{outputs}, @var{inputs}, @var{input_modes}, and\n\
+@var{constraints} as necessary for other pre-processing. In this case the\n\
+return value is a sequence of insns to emit after the asm. Note that\n\
+changes to @var{inputs} must be accompanied by the corresponding changes\n\
+to @var{input_modes}.",
+ rtx_insn *,
+ (vec<rtx>& outputs, vec<rtx>& inputs, vec<machine_mode>& input_modes,
+ vec<const char *>& constraints, vec<rtx>& clobbers,
+ HARD_REG_SET& clobbered_regs, location_t loc),
+ NULL)
+
+/* This target hook allows the backend to specify a calling convention
+ in the debug information. This function actually returns an
+ enum dwarf_calling_convention, but because of forward declarations
+ and not wanting to include dwarf2.h everywhere target.h is included
+ the function is being declared as an int. */
+DEFHOOK
+(dwarf_calling_convention,
+ "Define this to enable the dwarf attribute @code{DW_AT_calling_convention} to\n\
+be emitted for each function. Instead of an integer return the enum\n\
+value for the @code{DW_CC_} tag.",
+ int, (const_tree function),
+ hook_int_const_tree_0)
+
+/* This target hook allows the backend to emit frame-related insns that
+ contain UNSPECs or UNSPEC_VOLATILEs. The call frame debugging info
+ engine will invoke it on insns of the form
+ (set (reg) (unspec [...] UNSPEC_INDEX))
+ and
+ (set (reg) (unspec_volatile [...] UNSPECV_INDEX))
+ to let the backend emit the call frame instructions. */
+DEFHOOK
+(dwarf_handle_frame_unspec,
+ "This target hook allows the backend to emit frame-related insns that\n\
+contain UNSPECs or UNSPEC_VOLATILEs. The DWARF 2 call frame debugging\n\
+info engine will invoke it on insns of the form\n\
+@smallexample\n\
+(set (reg) (unspec [@dots{}] UNSPEC_INDEX))\n\
+@end smallexample\n\
+and\n\
+@smallexample\n\
+(set (reg) (unspec_volatile [@dots{}] UNSPECV_INDEX)).\n\
+@end smallexample\n\
+to let the backend emit the call frame instructions. @var{label} is\n\
+the CFI label attached to the insn, @var{pattern} is the pattern of\n\
+the insn and @var{index} is @code{UNSPEC_INDEX} or @code{UNSPECV_INDEX}.",
+ void, (const char *label, rtx pattern, int index), NULL)
+
+DEFHOOK
+(dwarf_poly_indeterminate_value,
+ "Express the value of @code{poly_int} indeterminate @var{i} as a DWARF\n\
+expression, with @var{i} counting from 1. Return the number of a DWARF\n\
+register @var{R} and set @samp{*@var{factor}} and @samp{*@var{offset}} such\n\
+that the value of the indeterminate is:\n\
+@smallexample\n\
+value_of(@var{R}) / @var{factor} - @var{offset}\n\
+@end smallexample\n\
+\n\
+A target only needs to define this hook if it sets\n\
+@samp{NUM_POLY_INT_COEFFS} to a value greater than 1.",
+ unsigned int, (unsigned int i, unsigned int *factor, int *offset),
+ default_dwarf_poly_indeterminate_value)
+
+/* ??? Documenting this hook requires a GFDL license grant. */
+DEFHOOK_UNDOC
+(stdarg_optimize_hook,
+"Perform architecture specific checking of statements gimplified\
+ from @code{VA_ARG_EXPR}. @var{stmt} is the statement. Returns true if\
+ the statement doesn't need to be checked for @code{va_list} references.",
+ bool, (struct stdarg_info *ai, const gimple *stmt), NULL)
+
+/* This target hook allows the operating system to override the DECL
+ that represents the external variable that contains the stack
+ protection guard variable. The type of this DECL is ptr_type_node. */
+DEFHOOK
+(stack_protect_guard,
+ "This hook returns a @code{DECL} node for the external variable to use\n\
+for the stack protection guard. This variable is initialized by the\n\
+runtime to some random value and is used to initialize the guard value\n\
+that is placed at the top of the local stack frame. The type of this\n\
+variable must be @code{ptr_type_node}.\n\
+\n\
+The default version of this hook creates a variable called\n\
+@samp{__stack_chk_guard}, which is normally defined in @file{libgcc2.c}.",
+ tree, (void),
+ default_stack_protect_guard)
+
+/* This target hook allows the operating system to override the CALL_EXPR
+ that is invoked when a check vs the guard variable fails. */
+DEFHOOK
+(stack_protect_fail,
+ "This hook returns a @code{CALL_EXPR} that alerts the runtime that the\n\
+stack protect guard variable has been modified. This expression should\n\
+involve a call to a @code{noreturn} function.\n\
+\n\
+The default version of this hook invokes a function called\n\
+@samp{__stack_chk_fail}, taking no arguments. This function is\n\
+normally defined in @file{libgcc2.c}.",
+ tree, (void),
+ default_external_stack_protect_fail)
+
+/* This target hook allows the operating system to disable the default stack
+ protector runtime support. */
+DEFHOOK
+(stack_protect_runtime_enabled_p,
+ "Returns true if the target wants GCC's default stack protect runtime support,\n\
+otherwise return false. The default implementation always returns true.",
+ bool, (void),
+ hook_bool_void_true)
+
+DEFHOOK
+(have_speculation_safe_value,
+"This hook is used to determine the level of target support for\n\
+ @code{__builtin_speculation_safe_value}. If called with an argument\n\
+ of false, it returns true if the target has been modified to support\n\
+ this builtin. If called with an argument of true, it returns true\n\
+ if the target requires active mitigation execution might be speculative.\n\
+ \n\
+ The default implementation returns false if the target does not define\n\
+ a pattern named @code{speculation_barrier}. Else it returns true\n\
+ for the first case and whether the pattern is enabled for the current\n\
+ compilation for the second case.\n\
+ \n\
+ For targets that have no processors that can execute instructions\n\
+ speculatively an alternative implemenation of this hook is available:\n\
+ simply redefine this hook to @code{speculation_safe_value_not_needed}\n\
+ along with your other target hooks.",
+bool, (bool active), default_have_speculation_safe_value)
+
+DEFHOOK
+(speculation_safe_value,
+"This target hook can be used to generate a target-specific code\n\
+ sequence that implements the @code{__builtin_speculation_safe_value}\n\
+ built-in function. The function must always return @var{val} in\n\
+ @var{result} in mode @var{mode} when the cpu is not executing\n\
+ speculatively, but must never return that when speculating until it\n\
+ is known that the speculation will not be unwound. The hook supports\n\
+ two primary mechanisms for implementing the requirements. The first\n\
+ is to emit a speculation barrier which forces the processor to wait\n\
+ until all prior speculative operations have been resolved; the second\n\
+ is to use a target-specific mechanism that can track the speculation\n\
+ state and to return @var{failval} if it can determine that\n\
+ speculation must be unwound at a later time.\n\
+ \n\
+ The default implementation simply copies @var{val} to @var{result} and\n\
+ emits a @code{speculation_barrier} instruction if that is defined.",
+rtx, (machine_mode mode, rtx result, rtx val, rtx failval),
+ default_speculation_safe_value)
+
+DEFHOOK
+(predict_doloop_p,
+ "Return true if we can predict it is possible to use a low-overhead loop\n\
+for a particular loop. The parameter @var{loop} is a pointer to the loop.\n\
+This target hook is required only when the target supports low-overhead\n\
+loops, and will help ivopts to make some decisions.\n\
+The default version of this hook returns false.",
+ bool, (class loop *loop),
+ default_predict_doloop_p)
+
+DEFHOOKPOD
+(have_count_reg_decr_p,
+ "Return true if the target supports hardware count register for decrement\n\
+and branch.\n\
+The default value is false.",
+ bool, false)
+
+DEFHOOKPOD
+(doloop_cost_for_generic,
+ "One IV candidate dedicated for doloop is introduced in IVOPTs, we can\n\
+calculate the computation cost of adopting it to any generic IV use by\n\
+function get_computation_cost as before. But for targets which have\n\
+hardware count register support for decrement and branch, it may have to\n\
+move IV value from hardware count register to general purpose register\n\
+while doloop IV candidate is used for generic IV uses. It probably takes\n\
+expensive penalty. This hook allows target owners to define the cost for\n\
+this especially for generic IV uses.\n\
+The default value is zero.",
+ int64_t, 0)
+
+DEFHOOKPOD
+(doloop_cost_for_address,
+ "One IV candidate dedicated for doloop is introduced in IVOPTs, we can\n\
+calculate the computation cost of adopting it to any address IV use by\n\
+function get_computation_cost as before. But for targets which have\n\
+hardware count register support for decrement and branch, it may have to\n\
+move IV value from hardware count register to general purpose register\n\
+while doloop IV candidate is used for address IV uses. It probably takes\n\
+expensive penalty. This hook allows target owners to define the cost for\n\
+this escpecially for address IV uses.\n\
+The default value is zero.",
+ int64_t, 0)
+
+DEFHOOK
+(can_use_doloop_p,
+ "Return true if it is possible to use low-overhead loops (@code{doloop_end}\n\
+and @code{doloop_begin}) for a particular loop. @var{iterations} gives the\n\
+exact number of iterations, or 0 if not known. @var{iterations_max} gives\n\
+the maximum number of iterations, or 0 if not known. @var{loop_depth} is\n\
+the nesting depth of the loop, with 1 for innermost loops, 2 for loops that\n\
+contain innermost loops, and so on. @var{entered_at_top} is true if the\n\
+loop is only entered from the top.\n\
+\n\
+This hook is only used if @code{doloop_end} is available. The default\n\
+implementation returns true. You can use @code{can_use_doloop_if_innermost}\n\
+if the loop must be the innermost, and if there are no other restrictions.",
+ bool, (const widest_int &iterations, const widest_int &iterations_max,
+ unsigned int loop_depth, bool entered_at_top),
+ hook_bool_wint_wint_uint_bool_true)
+
+/* Returns NULL if target supports the insn within a doloop block,
+ otherwise it returns an error message. */
+DEFHOOK
+(invalid_within_doloop,
+ "\n\
+Take an instruction in @var{insn} and return NULL if it is valid within a\n\
+low-overhead loop, otherwise return a string explaining why doloop\n\
+could not be applied.\n\
+\n\
+Many targets use special registers for low-overhead looping. For any\n\
+instruction that clobbers these this function should return a string indicating\n\
+the reason why the doloop could not be applied.\n\
+By default, the RTL loop optimizer does not use a present doloop pattern for\n\
+loops containing function calls or branch on table instructions.",
+ const char *, (const rtx_insn *insn),
+ default_invalid_within_doloop)
+
+/* Returns the machine mode which the target prefers for doloop IV. */
+DEFHOOK
+(preferred_doloop_mode,
+"This hook takes a @var{mode} for a doloop IV, where @code{mode} is the\n\
+original mode for the operation. If the target prefers an alternate\n\
+@code{mode} for the operation, then this hook should return that mode;\n\
+otherwise the original @code{mode} should be returned. For example, on a\n\
+64-bit target, @code{DImode} might be preferred over @code{SImode}. Both the\n\
+original and the returned modes should be @code{MODE_INT}.",
+ machine_mode,
+ (machine_mode mode),
+ default_preferred_doloop_mode)
+
+/* Returns true for a legitimate combined insn. */
+DEFHOOK
+(legitimate_combined_insn,
+"Take an instruction in @var{insn} and return @code{false} if the instruction\n\
+is not appropriate as a combination of two or more instructions. The\n\
+default is to accept all instructions.",
+ bool, (rtx_insn *insn),
+ hook_bool_rtx_insn_true)
+
+DEFHOOK
+(valid_dllimport_attribute_p,
+"@var{decl} is a variable or function with @code{__attribute__((dllimport))}\n\
+specified. Use this hook if the target needs to add extra validation\n\
+checks to @code{handle_dll_attribute}.",
+ bool, (const_tree decl),
+ hook_bool_const_tree_true)
+
+/* If non-zero, align constant anchors in CSE to a multiple of this
+ value. */
+DEFHOOKPOD
+(const_anchor,
+ "On some architectures it can take multiple instructions to synthesize\n\
+a constant. If there is another constant already in a register that\n\
+is close enough in value then it is preferable that the new constant\n\
+is computed from this register using immediate addition or\n\
+subtraction. We accomplish this through CSE. Besides the value of\n\
+the constant we also add a lower and an upper constant anchor to the\n\
+available expressions. These are then queried when encountering new\n\
+constants. The anchors are computed by rounding the constant up and\n\
+down to a multiple of the value of @code{TARGET_CONST_ANCHOR}.\n\
+@code{TARGET_CONST_ANCHOR} should be the maximum positive value\n\
+accepted by immediate-add plus one. We currently assume that the\n\
+value of @code{TARGET_CONST_ANCHOR} is a power of 2. For example, on\n\
+MIPS, where add-immediate takes a 16-bit signed value,\n\
+@code{TARGET_CONST_ANCHOR} is set to @samp{0x8000}. The default value\n\
+is zero, which disables this optimization.",
+ unsigned HOST_WIDE_INT, 0)
+
+/* Defines, which target-dependent bits (upper 16) are used by port */
+DEFHOOK
+(memmodel_check,
+ "Validate target specific memory model mask bits. When NULL no target specific\n\
+memory model bits are allowed.",
+ unsigned HOST_WIDE_INT, (unsigned HOST_WIDE_INT val), NULL)
+
+/* Defines an offset bitwise ored into shifted address to get corresponding
+ Address Sanitizer shadow address, or -1 if Address Sanitizer is not
+ supported by the target. */
+DEFHOOK
+(asan_shadow_offset,
+ "Return the offset bitwise ored into shifted address to get corresponding\n\
+Address Sanitizer shadow memory address. NULL if Address Sanitizer is not\n\
+supported by the target. May return 0 if Address Sanitizer is not supported\n\
+by a subtarget.",
+ unsigned HOST_WIDE_INT, (void),
+ NULL)
+
+/* Functions relating to calls - argument passing, returns, etc. */
+/* Members of struct call have no special macro prefix. */
+HOOK_VECTOR (TARGET_CALLS, calls)
+
+DEFHOOK
+(promote_function_mode,
+ "Like @code{PROMOTE_MODE}, but it is applied to outgoing function arguments or\n\
+function return values. The target hook should return the new mode\n\
+and possibly change @code{*@var{punsignedp}} if the promotion should\n\
+change signedness. This function is called only for scalar @emph{or\n\
+pointer} types.\n\
+\n\
+@var{for_return} allows to distinguish the promotion of arguments and\n\
+return values. If it is @code{1}, a return value is being promoted and\n\
+@code{TARGET_FUNCTION_VALUE} must perform the same promotions done here.\n\
+If it is @code{2}, the returned mode should be that of the register in\n\
+which an incoming parameter is copied, or the outgoing result is computed;\n\
+then the hook should return the same mode as @code{promote_mode}, though\n\
+the signedness may be different.\n\
+\n\
+@var{type} can be NULL when promoting function arguments of libcalls.\n\
+\n\
+The default is to not promote arguments and return values. You can\n\
+also define the hook to @code{default_promote_function_mode_always_promote}\n\
+if you would like to apply the same rules given by @code{PROMOTE_MODE}.",
+ machine_mode, (const_tree type, machine_mode mode, int *punsignedp,
+ const_tree funtype, int for_return),
+ default_promote_function_mode)
+
+DEFHOOK
+(promote_prototypes,
+ "This target hook returns @code{true} if an argument declared in a\n\
+prototype as an integral type smaller than @code{int} should actually be\n\
+passed as an @code{int}. In addition to avoiding errors in certain\n\
+cases of mismatch, it also makes for better code on certain machines.\n\
+The default is to not promote prototypes.",
+ bool, (const_tree fntype),
+ hook_bool_const_tree_false)
+
+DEFHOOK
+(struct_value_rtx,
+ "This target hook should return the location of the structure value\n\
+address (normally a @code{mem} or @code{reg}), or 0 if the address is\n\
+passed as an ``invisible'' first argument. Note that @var{fndecl} may\n\
+be @code{NULL}, for libcalls. You do not need to define this target\n\
+hook if the address is always passed as an ``invisible'' first\n\
+argument.\n\
+\n\
+On some architectures the place where the structure value address\n\
+is found by the called function is not the same place that the\n\
+caller put it. This can be due to register windows, or it could\n\
+be because the function prologue moves it to a different place.\n\
+@var{incoming} is @code{1} or @code{2} when the location is needed in\n\
+the context of the called function, and @code{0} in the context of\n\
+the caller.\n\
+\n\
+If @var{incoming} is nonzero and the address is to be found on the\n\
+stack, return a @code{mem} which refers to the frame pointer. If\n\
+@var{incoming} is @code{2}, the result is being used to fetch the\n\
+structure value address at the beginning of a function. If you need\n\
+to emit adjusting code, you should do it at this point.",
+ rtx, (tree fndecl, int incoming),
+ hook_rtx_tree_int_null)
+
+DEFHOOKPOD
+(omit_struct_return_reg,
+ "Normally, when a function returns a structure by memory, the address\n\
+is passed as an invisible pointer argument, but the compiler also\n\
+arranges to return the address from the function like it would a normal\n\
+pointer return value. Define this to true if that behavior is\n\
+undesirable on your target.",
+ bool, false)
+
+DEFHOOK
+(return_in_memory,
+ "This target hook should return a nonzero value to say to return the\n\
+function value in memory, just as large structures are always returned.\n\
+Here @var{type} will be the data type of the value, and @var{fntype}\n\
+will be the type of the function doing the returning, or @code{NULL} for\n\
+libcalls.\n\
+\n\
+Note that values of mode @code{BLKmode} must be explicitly handled\n\
+by this function. Also, the option @option{-fpcc-struct-return}\n\
+takes effect regardless of this macro. On most systems, it is\n\
+possible to leave the hook undefined; this causes a default\n\
+definition to be used, whose value is the constant 1 for @code{BLKmode}\n\
+values, and 0 otherwise.\n\
+\n\
+Do not use this hook to indicate that structures and unions should always\n\
+be returned in memory. You should instead use @code{DEFAULT_PCC_STRUCT_RETURN}\n\
+to indicate this.",
+ bool, (const_tree type, const_tree fntype),
+ default_return_in_memory)
+
+DEFHOOK
+(return_in_msb,
+ "This hook should return true if values of type @var{type} are returned\n\
+at the most significant end of a register (in other words, if they are\n\
+padded at the least significant end). You can assume that @var{type}\n\
+is returned in a register; the caller is required to check this.\n\
+\n\
+Note that the register provided by @code{TARGET_FUNCTION_VALUE} must\n\
+be able to hold the complete return value. For example, if a 1-, 2-\n\
+or 3-byte structure is returned at the most significant end of a\n\
+4-byte register, @code{TARGET_FUNCTION_VALUE} should provide an\n\
+@code{SImode} rtx.",
+ bool, (const_tree type),
+ hook_bool_const_tree_false)
+
+/* Return true if a parameter must be passed by reference. TYPE may
+ be null if this is a libcall. CA may be null if this query is
+ from __builtin_va_arg. */
+DEFHOOK
+(pass_by_reference,
+ "This target hook should return @code{true} if argument @var{arg} at the\n\
+position indicated by @var{cum} should be passed by reference. This\n\
+predicate is queried after target independent reasons for being\n\
+passed by reference, such as @code{TREE_ADDRESSABLE (@var{arg}.type)}.\n\
+\n\
+If the hook returns true, a copy of that argument is made in memory and a\n\
+pointer to the argument is passed instead of the argument itself.\n\
+The pointer is passed in whatever way is appropriate for passing a pointer\n\
+to that type.",
+ bool,
+ (cumulative_args_t cum, const function_arg_info &arg),
+ hook_bool_CUMULATIVE_ARGS_arg_info_false)
+
+DEFHOOK
+(expand_builtin_saveregs,
+ "If defined, this hook produces the machine-specific code for a call to\n\
+@code{__builtin_saveregs}. This code will be moved to the very\n\
+beginning of the function, before any parameter access are made. The\n\
+return value of this function should be an RTX that contains the value\n\
+to use as the return of @code{__builtin_saveregs}.",
+ rtx, (void),
+ default_expand_builtin_saveregs)
+
+/* Returns pretend_argument_size. */
+DEFHOOK
+(setup_incoming_varargs,
+ "This target hook offers an alternative to using\n\
+@code{__builtin_saveregs} and defining the hook\n\
+@code{TARGET_EXPAND_BUILTIN_SAVEREGS}. Use it to store the anonymous\n\
+register arguments into the stack so that all the arguments appear to\n\
+have been passed consecutively on the stack. Once this is done, you can\n\
+use the standard implementation of varargs that works for machines that\n\
+pass all their arguments on the stack.\n\
+\n\
+The argument @var{args_so_far} points to the @code{CUMULATIVE_ARGS} data\n\
+structure, containing the values that are obtained after processing the\n\
+named arguments. The argument @var{arg} describes the last of these named\n\
+arguments. The argument @var{arg} should not be used if the function type\n\
+satisfies @code{TYPE_NO_NAMED_ARGS_STDARG_P}, since in that case there are\n\
+no named arguments and all arguments are accessed with @code{va_arg}.\n\
+\n\
+The target hook should do two things: first, push onto the stack all the\n\
+argument registers @emph{not} used for the named arguments, and second,\n\
+store the size of the data thus pushed into the @code{int}-valued\n\
+variable pointed to by @var{pretend_args_size}. The value that you\n\
+store here will serve as additional offset for setting up the stack\n\
+frame.\n\
+\n\
+Because you must generate code to push the anonymous arguments at\n\
+compile time without knowing their data types,\n\
+@code{TARGET_SETUP_INCOMING_VARARGS} is only useful on machines that\n\
+have just a single category of argument register and use it uniformly\n\
+for all data types.\n\
+\n\
+If the argument @var{second_time} is nonzero, it means that the\n\
+arguments of the function are being analyzed for the second time. This\n\
+happens for an inline function, which is not actually compiled until the\n\
+end of the source file. The hook @code{TARGET_SETUP_INCOMING_VARARGS} should\n\
+not generate any instructions in this case.",
+ void, (cumulative_args_t args_so_far, const function_arg_info &arg,
+ int *pretend_args_size, int second_time),
+ default_setup_incoming_varargs)
+
+DEFHOOK
+(call_args,
+ "While generating RTL for a function call, this target hook is invoked once\n\
+for each argument passed to the function, either a register returned by\n\
+@code{TARGET_FUNCTION_ARG} or a memory location. It is called just\n\
+before the point where argument registers are stored. The type of the\n\
+function to be called is also passed as the second argument; it is\n\
+@code{NULL_TREE} for libcalls. The @code{TARGET_END_CALL_ARGS} hook is\n\
+invoked just after the code to copy the return reg has been emitted.\n\
+This functionality can be used to perform special setup of call argument\n\
+registers if a target needs it.\n\
+For functions without arguments, the hook is called once with @code{pc_rtx}\n\
+passed instead of an argument register.\n\
+Most ports do not need to implement anything for this hook.",
+ void, (rtx, tree),
+ hook_void_rtx_tree)
+
+DEFHOOK
+(end_call_args,
+ "This target hook is invoked while generating RTL for a function call,\n\
+just after the point where the return reg is copied into a pseudo. It\n\
+signals that all the call argument and return registers for the just\n\
+emitted call are now no longer in use.\n\
+Most ports do not need to implement anything for this hook.",
+ void, (void),
+ hook_void_void)
+
+DEFHOOK
+(push_argument,
+ "This target hook returns @code{true} if push instructions will be\n\
+used to pass outgoing arguments. When the push instruction usage is\n\
+optional, @var{npush} is nonzero to indicate the number of bytes to\n\
+push. Otherwise, @var{npush} is zero. If the target machine does not\n\
+have a push instruction or push instruction should be avoided,\n\
+@code{false} should be returned. That directs GCC to use an alternate\n\
+strategy: to allocate the entire argument block and then store the\n\
+arguments into it. If this target hook may return @code{true},\n\
+@code{PUSH_ROUNDING} must be defined.",
+ bool, (unsigned int npush),
+ default_push_argument)
+
+DEFHOOK
+(strict_argument_naming,
+ "Define this hook to return @code{true} if the location where a function\n\
+argument is passed depends on whether or not it is a named argument.\n\
+\n\
+This hook controls how the @var{named} argument to @code{TARGET_FUNCTION_ARG}\n\
+is set for varargs and stdarg functions. If this hook returns\n\
+@code{true}, the @var{named} argument is always true for named\n\
+arguments, and false for unnamed arguments. If it returns @code{false},\n\
+but @code{TARGET_PRETEND_OUTGOING_VARARGS_NAMED} returns @code{true},\n\
+then all arguments are treated as named. Otherwise, all named arguments\n\
+except the last are treated as named.\n\
+\n\
+You need not define this hook if it always returns @code{false}.",
+ bool, (cumulative_args_t ca),
+ hook_bool_CUMULATIVE_ARGS_false)
+
+/* Returns true if we should use
+ targetm.calls.setup_incoming_varargs() and/or
+ targetm.calls.strict_argument_naming(). */
+DEFHOOK
+(pretend_outgoing_varargs_named,
+ "If you need to conditionally change ABIs so that one works with\n\
+@code{TARGET_SETUP_INCOMING_VARARGS}, but the other works like neither\n\
+@code{TARGET_SETUP_INCOMING_VARARGS} nor @code{TARGET_STRICT_ARGUMENT_NAMING} was\n\
+defined, then define this hook to return @code{true} if\n\
+@code{TARGET_SETUP_INCOMING_VARARGS} is used, @code{false} otherwise.\n\
+Otherwise, you should not define this hook.",
+ bool, (cumulative_args_t ca),
+ default_pretend_outgoing_varargs_named)
+
+/* Given a complex type T, return true if a parameter of type T
+ should be passed as two scalars. */
+DEFHOOK
+(split_complex_arg,
+ "This hook should return true if parameter of type @var{type} are passed\n\
+as two scalar parameters. By default, GCC will attempt to pack complex\n\
+arguments into the target's word size. Some ABIs require complex arguments\n\
+to be split and treated as their individual components. For example, on\n\
+AIX64, complex floats should be passed in a pair of floating point\n\
+registers, even though a complex float would fit in one 64-bit floating\n\
+point register.\n\
+\n\
+The default value of this hook is @code{NULL}, which is treated as always\n\
+false.",
+ bool, (const_tree type), NULL)
+
+/* Return true if type T, mode MODE, may not be passed in registers,
+ but must be passed on the stack. */
+/* ??? This predicate should be applied strictly after pass-by-reference.
+ Need audit to verify that this is the case. */
+DEFHOOK
+(must_pass_in_stack,
+ "This target hook should return @code{true} if we should not pass @var{arg}\n\
+solely in registers. The file @file{expr.h} defines a\n\
+definition that is usually appropriate, refer to @file{expr.h} for additional\n\
+documentation.",
+ bool, (const function_arg_info &arg),
+ must_pass_in_stack_var_size_or_pad)
+
+/* Return true if type TYPE, mode MODE, which is passed by reference,
+ should have the object copy generated by the callee rather than
+ the caller. It is never called for TYPE requiring constructors. */
+DEFHOOK
+(callee_copies,
+ "The function argument described by the parameters to this hook is\n\
+known to be passed by reference. The hook should return true if the\n\
+function argument should be copied by the callee instead of copied\n\
+by the caller.\n\
+\n\
+For any argument for which the hook returns true, if it can be\n\
+determined that the argument is not modified, then a copy need\n\
+not be generated.\n\
+\n\
+The default version of this hook always returns false.",
+ bool,
+ (cumulative_args_t cum, const function_arg_info &arg),
+ hook_bool_CUMULATIVE_ARGS_arg_info_false)
+
+/* Return zero for arguments passed entirely on the stack or entirely
+ in registers. If passed in both, return the number of bytes passed
+ in registers; the balance is therefore passed on the stack. */
+DEFHOOK
+(arg_partial_bytes,
+ "This target hook returns the number of bytes at the beginning of an\n\
+argument that must be put in registers. The value must be zero for\n\
+arguments that are passed entirely in registers or that are entirely\n\
+pushed on the stack.\n\
+\n\
+On some machines, certain arguments must be passed partially in\n\
+registers and partially in memory. On these machines, typically the\n\
+first few words of arguments are passed in registers, and the rest\n\
+on the stack. If a multi-word argument (a @code{double} or a\n\
+structure) crosses that boundary, its first few words must be passed\n\
+in registers and the rest must be pushed. This macro tells the\n\
+compiler when this occurs, and how many bytes should go in registers.\n\
+\n\
+@code{TARGET_FUNCTION_ARG} for these arguments should return the first\n\
+register to be used by the caller for this argument; likewise\n\
+@code{TARGET_FUNCTION_INCOMING_ARG}, for the called function.",
+ int, (cumulative_args_t cum, const function_arg_info &arg),
+ hook_int_CUMULATIVE_ARGS_arg_info_0)
+
+/* Update the state in CA to advance past an argument in the
+ argument list. The values MODE, TYPE, and NAMED describe that
+ argument. */
+DEFHOOK
+(function_arg_advance,
+ "This hook updates the summarizer variable pointed to by @var{ca} to\n\
+advance past argument @var{arg} in the argument list. Once this is done,\n\
+the variable @var{cum} is suitable for analyzing the @emph{following}\n\
+argument with @code{TARGET_FUNCTION_ARG}, etc.\n\
+\n\
+This hook need not do anything if the argument in question was passed\n\
+on the stack. The compiler knows how to track the amount of stack space\n\
+used for arguments without any special help.",
+ void,
+ (cumulative_args_t ca, const function_arg_info &arg),
+ default_function_arg_advance)
+
+DEFHOOK
+(function_arg_offset,
+ "This hook returns the number of bytes to add to the offset of an\n\
+argument of type @var{type} and mode @var{mode} when passed in memory.\n\
+This is needed for the SPU, which passes @code{char} and @code{short}\n\
+arguments in the preferred slot that is in the middle of the quad word\n\
+instead of starting at the top. The default implementation returns 0.",
+ HOST_WIDE_INT, (machine_mode mode, const_tree type),
+ default_function_arg_offset)
+
+DEFHOOK
+(function_arg_padding,
+ "This hook determines whether, and in which direction, to pad out\n\
+an argument of mode @var{mode} and type @var{type}. It returns\n\
+@code{PAD_UPWARD} to insert padding above the argument, @code{PAD_DOWNWARD}\n\
+to insert padding below the argument, or @code{PAD_NONE} to inhibit padding.\n\
+\n\
+The @emph{amount} of padding is not controlled by this hook, but by\n\
+@code{TARGET_FUNCTION_ARG_ROUND_BOUNDARY}. It is always just enough\n\
+to reach the next multiple of that boundary.\n\
+\n\
+This hook has a default definition that is right for most systems.\n\
+For little-endian machines, the default is to pad upward. For\n\
+big-endian machines, the default is to pad downward for an argument of\n\
+constant size shorter than an @code{int}, and upward otherwise.",
+ pad_direction, (machine_mode mode, const_tree type),
+ default_function_arg_padding)
+
+/* Return zero if the argument described by the state of CA should
+ be placed on a stack, or a hard register in which to store the
+ argument. The values MODE, TYPE, and NAMED describe that
+ argument. */
+DEFHOOK
+(function_arg,
+ "Return an RTX indicating whether function argument @var{arg} is passed\n\
+in a register and if so, which register. Argument @var{ca} summarizes all\n\
+the previous arguments.\n\
+\n\
+The return value is usually either a @code{reg} RTX for the hard\n\
+register in which to pass the argument, or zero to pass the argument\n\
+on the stack.\n\
+\n\
+The value of the expression can also be a @code{parallel} RTX@. This is\n\
+used when an argument is passed in multiple locations. The mode of the\n\
+@code{parallel} should be the mode of the entire argument. The\n\
+@code{parallel} holds any number of @code{expr_list} pairs; each one\n\
+describes where part of the argument is passed. In each\n\
+@code{expr_list} the first operand must be a @code{reg} RTX for the hard\n\
+register in which to pass this part of the argument, and the mode of the\n\
+register RTX indicates how large this part of the argument is. The\n\
+second operand of the @code{expr_list} is a @code{const_int} which gives\n\
+the offset in bytes into the entire argument of where this part starts.\n\
+As a special exception the first @code{expr_list} in the @code{parallel}\n\
+RTX may have a first operand of zero. This indicates that the entire\n\
+argument is also stored on the stack.\n\
+\n\
+The last time this hook is called, it is called with @code{MODE ==\n\
+VOIDmode}, and its result is passed to the @code{call} or @code{call_value}\n\
+pattern as operands 2 and 3 respectively.\n\
+\n\
+@cindex @file{stdarg.h} and register arguments\n\
+The usual way to make the ISO library @file{stdarg.h} work on a\n\
+machine where some arguments are usually passed in registers, is to\n\
+cause nameless arguments to be passed on the stack instead. This is\n\
+done by making @code{TARGET_FUNCTION_ARG} return 0 whenever\n\
+@var{named} is @code{false}.\n\
+\n\
+@cindex @code{TARGET_MUST_PASS_IN_STACK}, and @code{TARGET_FUNCTION_ARG}\n\
+@cindex @code{REG_PARM_STACK_SPACE}, and @code{TARGET_FUNCTION_ARG}\n\
+You may use the hook @code{targetm.calls.must_pass_in_stack}\n\
+in the definition of this macro to determine if this argument is of a\n\
+type that must be passed in the stack. If @code{REG_PARM_STACK_SPACE}\n\
+is not defined and @code{TARGET_FUNCTION_ARG} returns nonzero for such an\n\
+argument, the compiler will abort. If @code{REG_PARM_STACK_SPACE} is\n\
+defined, the argument will be computed in the stack and then loaded into\n\
+a register.",
+ rtx, (cumulative_args_t ca, const function_arg_info &arg),
+ default_function_arg)
+
+DEFHOOK
+(function_incoming_arg,
+ "Define this hook if the caller and callee on the target have different\n\
+views of where arguments are passed. Also define this hook if there are\n\
+functions that are never directly called, but are invoked by the hardware\n\
+and which have nonstandard calling conventions.\n\
+\n\
+In this case @code{TARGET_FUNCTION_ARG} computes the register in\n\
+which the caller passes the value, and\n\
+@code{TARGET_FUNCTION_INCOMING_ARG} should be defined in a similar\n\
+fashion to tell the function being called where the arguments will\n\
+arrive.\n\
+\n\
+@code{TARGET_FUNCTION_INCOMING_ARG} can also return arbitrary address\n\
+computation using hard register, which can be forced into a register,\n\
+so that it can be used to pass special arguments.\n\
+\n\
+If @code{TARGET_FUNCTION_INCOMING_ARG} is not defined,\n\
+@code{TARGET_FUNCTION_ARG} serves both purposes.",
+ rtx, (cumulative_args_t ca, const function_arg_info &arg),
+ default_function_incoming_arg)
+
+DEFHOOK
+(function_arg_boundary,
+ "This hook returns the alignment boundary, in bits, of an argument\n\
+with the specified mode and type. The default hook returns\n\
+@code{PARM_BOUNDARY} for all arguments.",
+ unsigned int, (machine_mode mode, const_tree type),
+ default_function_arg_boundary)
+
+DEFHOOK
+(function_arg_round_boundary,
+ "Normally, the size of an argument is rounded up to @code{PARM_BOUNDARY},\n\
+which is the default value for this hook. You can define this hook to\n\
+return a different value if an argument size must be rounded to a larger\n\
+value.",
+ unsigned int, (machine_mode mode, const_tree type),
+ default_function_arg_round_boundary)
+
+/* Return the diagnostic message string if function without a prototype
+ is not allowed for this 'val' argument; NULL otherwise. */
+DEFHOOK
+(invalid_arg_for_unprototyped_fn,
+ "If defined, this macro returns the diagnostic message when it is\n\
+illegal to pass argument @var{val} to function @var{funcdecl}\n\
+with prototype @var{typelist}.",
+ const char *, (const_tree typelist, const_tree funcdecl, const_tree val),
+ hook_invalid_arg_for_unprototyped_fn)
+
+/* Return an rtx for the return value location of the function
+ specified by FN_DECL_OR_TYPE with a return type of RET_TYPE. */
+DEFHOOK
+(function_value,
+ "\n\
+Define this to return an RTX representing the place where a function\n\
+returns or receives a value of data type @var{ret_type}, a tree node\n\
+representing a data type. @var{fn_decl_or_type} is a tree node\n\
+representing @code{FUNCTION_DECL} or @code{FUNCTION_TYPE} of a\n\
+function being called. If @var{outgoing} is false, the hook should\n\
+compute the register in which the caller will see the return value.\n\
+Otherwise, the hook should return an RTX representing the place where\n\
+a function returns a value.\n\
+\n\
+On many machines, only @code{TYPE_MODE (@var{ret_type})} is relevant.\n\
+(Actually, on most machines, scalar values are returned in the same\n\
+place regardless of mode.) The value of the expression is usually a\n\
+@code{reg} RTX for the hard register where the return value is stored.\n\
+The value can also be a @code{parallel} RTX, if the return value is in\n\
+multiple places. See @code{TARGET_FUNCTION_ARG} for an explanation of the\n\
+@code{parallel} form. Note that the callee will populate every\n\
+location specified in the @code{parallel}, but if the first element of\n\
+the @code{parallel} contains the whole return value, callers will use\n\
+that element as the canonical location and ignore the others. The m68k\n\
+port uses this type of @code{parallel} to return pointers in both\n\
+@samp{%a0} (the canonical location) and @samp{%d0}.\n\
+\n\
+If @code{TARGET_PROMOTE_FUNCTION_RETURN} returns true, you must apply\n\
+the same promotion rules specified in @code{PROMOTE_MODE} if\n\
+@var{valtype} is a scalar type.\n\
+\n\
+If the precise function being called is known, @var{func} is a tree\n\
+node (@code{FUNCTION_DECL}) for it; otherwise, @var{func} is a null\n\
+pointer. This makes it possible to use a different value-returning\n\
+convention for specific functions when all their calls are\n\
+known.\n\
+\n\
+Some target machines have ``register windows'' so that the register in\n\
+which a function returns its value is not the same as the one in which\n\
+the caller sees the value. For such machines, you should return\n\
+different RTX depending on @var{outgoing}.\n\
+\n\
+@code{TARGET_FUNCTION_VALUE} is not used for return values with\n\
+aggregate data types, because these are returned in another way. See\n\
+@code{TARGET_STRUCT_VALUE_RTX} and related macros, below.",
+ rtx, (const_tree ret_type, const_tree fn_decl_or_type, bool outgoing),
+ default_function_value)
+
+/* Return the rtx for the result of a libcall of mode MODE,
+ calling the function FN_NAME. */
+DEFHOOK
+(libcall_value,
+ "Define this hook if the back-end needs to know the name of the libcall\n\
+function in order to determine where the result should be returned.\n\
+\n\
+The mode of the result is given by @var{mode} and the name of the called\n\
+library function is given by @var{fun}. The hook should return an RTX\n\
+representing the place where the library function result will be returned.\n\
+\n\
+If this hook is not defined, then LIBCALL_VALUE will be used.",
+ rtx, (machine_mode mode, const_rtx fun),
+ default_libcall_value)
+
+/* Return true if REGNO is a possible register number for
+ a function value as seen by the caller. */
+DEFHOOK
+(function_value_regno_p,
+ "A target hook that return @code{true} if @var{regno} is the number of a hard\n\
+register in which the values of called function may come back.\n\
+\n\
+A register whose use for returning values is limited to serving as the\n\
+second of a pair (for a value of type @code{double}, say) need not be\n\
+recognized by this target hook.\n\
+\n\
+If the machine has register windows, so that the caller and the called\n\
+function use different registers for the return value, this target hook\n\
+should recognize only the caller's register numbers.\n\
+\n\
+If this hook is not defined, then FUNCTION_VALUE_REGNO_P will be used.",
+ bool, (const unsigned int regno),
+ default_function_value_regno_p)
+
+DEFHOOK
+(fntype_abi,
+ "Return the ABI used by a function with type @var{type}; see the\n\
+definition of @code{predefined_function_abi} for details of the ABI\n\
+descriptor. Targets only need to define this hook if they support\n\
+interoperability between several ABIs in the same translation unit.",
+ const predefined_function_abi &, (const_tree type),
+ NULL)
+
+DEFHOOK
+(insn_callee_abi,
+ "This hook returns a description of the ABI used by the target of\n\
+call instruction @var{insn}; see the definition of\n\
+@code{predefined_function_abi} for details of the ABI descriptor.\n\
+Only the global function @code{insn_callee_abi} should call this hook\n\
+directly.\n\
+\n\
+Targets only need to define this hook if they support\n\
+interoperability between several ABIs in the same translation unit.",
+ const predefined_function_abi &, (const rtx_insn *insn),
+ NULL)
+
+/* ??? Documenting this hook requires a GFDL license grant. */
+DEFHOOK_UNDOC
+(internal_arg_pointer,
+"Return an rtx for the argument pointer incoming to the\
+ current function.",
+ rtx, (void),
+ default_internal_arg_pointer)
+
+/* Update the current function stack boundary if needed. */
+DEFHOOK
+(update_stack_boundary,
+ "Define this macro to update the current function stack boundary if\n\
+necessary.",
+ void, (void), NULL)
+
+/* Handle stack alignment and return an rtx for Dynamic Realign
+ Argument Pointer if necessary. */
+DEFHOOK
+(get_drap_rtx,
+ "This hook should return an rtx for Dynamic Realign Argument Pointer (DRAP) if a\n\
+different argument pointer register is needed to access the function's\n\
+argument list due to stack realignment. Return @code{NULL} if no DRAP\n\
+is needed.",
+ rtx, (void), NULL)
+
+/* Generate instruction sequence to zero call used registers. */
+DEFHOOK
+(zero_call_used_regs,
+ "This target hook emits instructions to zero the subset of @var{selected_regs}\n\
+that could conceivably contain values that are useful to an attacker.\n\
+Return the set of registers that were actually cleared.\n\
+\n\
+For most targets, the returned set of registers is a subset of\n\
+@var{selected_regs}, however, for some of the targets (for example MIPS),\n\
+clearing some registers that are in the @var{selected_regs} requires\n\
+clearing other call used registers that are not in the @var{selected_regs},\n\
+under such situation, the returned set of registers must be a subset of all\n\
+call used registers.\n\
+\n\
+The default implementation uses normal move instructions to zero\n\
+all the registers in @var{selected_regs}. Define this hook if the\n\
+target has more efficient ways of zeroing certain registers,\n\
+or if you believe that certain registers would never contain\n\
+values that are useful to an attacker.",
+ HARD_REG_SET, (HARD_REG_SET selected_regs),
+default_zero_call_used_regs)
+
+/* Return true if all function parameters should be spilled to the
+ stack. */
+DEFHOOK
+(allocate_stack_slots_for_args,
+ "When optimization is disabled, this hook indicates whether or not\n\
+arguments should be allocated to stack slots. Normally, GCC allocates\n\
+stacks slots for arguments when not optimizing in order to make\n\
+debugging easier. However, when a function is declared with\n\
+@code{__attribute__((naked))}, there is no stack frame, and the compiler\n\
+cannot safely move arguments from the registers in which they are passed\n\
+to the stack. Therefore, this hook should return true in general, but\n\
+false for naked functions. The default implementation always returns true.",
+ bool, (void),
+ hook_bool_void_true)
+
+/* Return an rtx for the static chain for FNDECL_OR_TYPE. If INCOMING_P
+ is true, then it should be for the callee; otherwise for the caller. */
+DEFHOOK
+(static_chain,
+ "This hook replaces the use of @code{STATIC_CHAIN_REGNUM} et al for\n\
+targets that may use different static chain locations for different\n\
+nested functions. This may be required if the target has function\n\
+attributes that affect the calling conventions of the function and\n\
+those calling conventions use different static chain locations.\n\
+\n\
+The default version of this hook uses @code{STATIC_CHAIN_REGNUM} et al.\n\
+\n\
+If the static chain is passed in memory, this hook should be used to\n\
+provide rtx giving @code{mem} expressions that denote where they are stored.\n\
+Often the @code{mem} expression as seen by the caller will be at an offset\n\
+from the stack pointer and the @code{mem} expression as seen by the callee\n\
+will be at an offset from the frame pointer.\n\
+@findex stack_pointer_rtx\n\
+@findex frame_pointer_rtx\n\
+@findex arg_pointer_rtx\n\
+The variables @code{stack_pointer_rtx}, @code{frame_pointer_rtx}, and\n\
+@code{arg_pointer_rtx} will have been initialized and should be used\n\
+to refer to those items.",
+ rtx, (const_tree fndecl_or_type, bool incoming_p),
+ default_static_chain)
+
+/* Fill in the trampoline at MEM with a call to FNDECL and a
+ static chain value of CHAIN. */
+DEFHOOK
+(trampoline_init,
+ "This hook is called to initialize a trampoline.\n\
+@var{m_tramp} is an RTX for the memory block for the trampoline; @var{fndecl}\n\
+is the @code{FUNCTION_DECL} for the nested function; @var{static_chain} is an\n\
+RTX for the static chain value that should be passed to the function\n\
+when it is called.\n\
+\n\
+If the target defines @code{TARGET_ASM_TRAMPOLINE_TEMPLATE}, then the\n\
+first thing this hook should do is emit a block move into @var{m_tramp}\n\
+from the memory block returned by @code{assemble_trampoline_template}.\n\
+Note that the block move need only cover the constant parts of the\n\
+trampoline. If the target isolates the variable parts of the trampoline\n\
+to the end, not all @code{TRAMPOLINE_SIZE} bytes need be copied.\n\
+\n\
+If the target requires any other actions, such as flushing caches\n\
+(possibly calling function maybe_emit_call_builtin___clear_cache) or\n\
+enabling stack execution, these actions should be performed after\n\
+initializing the trampoline proper.",
+ void, (rtx m_tramp, tree fndecl, rtx static_chain),
+ default_trampoline_init)
+
+/* Emit a call to a function to clear the instruction cache. */
+DEFHOOK
+(emit_call_builtin___clear_cache,
+ "On targets that do not define a @code{clear_cache} insn expander,\n\
+but that define the @code{CLEAR_CACHE_INSN} macro,\n\
+maybe_emit_call_builtin___clear_cache relies on this target hook\n\
+to clear an address range in the instruction cache.\n\
+\n\
+The default implementation calls the @code{__clear_cache} builtin,\n\
+taking the assembler name from the builtin declaration. Overriding\n\
+definitions may call alternate functions, with alternate calling\n\
+conventions, or emit alternate RTX to perform the job.",
+ void, (rtx begin, rtx end),
+ default_emit_call_builtin___clear_cache)
+
+/* Adjust the address of the trampoline in a target-specific way. */
+DEFHOOK
+(trampoline_adjust_address,
+ "This hook should perform any machine-specific adjustment in\n\
+the address of the trampoline. Its argument contains the address of the\n\
+memory block that was passed to @code{TARGET_TRAMPOLINE_INIT}. In case\n\
+the address to be used for a function call should be different from the\n\
+address at which the template was stored, the different address should\n\
+be returned; otherwise @var{addr} should be returned unchanged.\n\
+If this hook is not defined, @var{addr} will be used for function calls.",
+ rtx, (rtx addr), NULL)
+
+DEFHOOKPOD
+(custom_function_descriptors,
+ "If the target can use GCC's generic descriptor mechanism for nested\n\
+functions, define this hook to a power of 2 representing an unused bit\n\
+in function pointers which can be used to differentiate descriptors at\n\
+run time. This value gives the number of bytes by which descriptor\n\
+pointers are misaligned compared to function pointers. For example, on\n\
+targets that require functions to be aligned to a 4-byte boundary, a\n\
+value of either 1 or 2 is appropriate unless the architecture already\n\
+reserves the bit for another purpose, such as on ARM.\n\
+\n\
+Define this hook to 0 if the target implements ABI support for\n\
+function descriptors in its standard calling sequence, like for example\n\
+HPPA or IA-64.\n\
+\n\
+Using descriptors for nested functions\n\
+eliminates the need for trampolines that reside on the stack and require\n\
+it to be made executable.",
+ int, -1)
+
+/* Return the number of bytes of its own arguments that a function
+ pops on returning, or 0 if the function pops no arguments and the
+ caller must therefore pop them all after the function returns. */
+/* ??? tm.texi has no types for the parameters. */
+DEFHOOK
+(return_pops_args,
+ "This target hook returns the number of bytes of its own arguments that\n\
+a function pops on returning, or 0 if the function pops no arguments\n\
+and the caller must therefore pop them all after the function returns.\n\
+\n\
+@var{fundecl} is a C variable whose value is a tree node that describes\n\
+the function in question. Normally it is a node of type\n\
+@code{FUNCTION_DECL} that describes the declaration of the function.\n\
+From this you can obtain the @code{DECL_ATTRIBUTES} of the function.\n\
+\n\
+@var{funtype} is a C variable whose value is a tree node that\n\
+describes the function in question. Normally it is a node of type\n\
+@code{FUNCTION_TYPE} that describes the data type of the function.\n\
+From this it is possible to obtain the data types of the value and\n\
+arguments (if known).\n\
+\n\
+When a call to a library function is being considered, @var{fundecl}\n\
+will contain an identifier node for the library function. Thus, if\n\
+you need to distinguish among various library functions, you can do so\n\
+by their names. Note that ``library function'' in this context means\n\
+a function used to perform arithmetic, whose name is known specially\n\
+in the compiler and was not mentioned in the C code being compiled.\n\
+\n\
+@var{size} is the number of bytes of arguments passed on the\n\
+stack. If a variable number of bytes is passed, it is zero, and\n\
+argument popping will always be the responsibility of the calling function.\n\
+\n\
+On the VAX, all functions always pop their arguments, so the definition\n\
+of this macro is @var{size}. On the 68000, using the standard\n\
+calling convention, no functions pop their arguments, so the value of\n\
+the macro is always 0 in this case. But an alternative calling\n\
+convention is available in which functions that take a fixed number of\n\
+arguments pop them but other functions (such as @code{printf}) pop\n\
+nothing (the caller pops all). When this convention is in use,\n\
+@var{funtype} is examined to determine whether a function takes a fixed\n\
+number of arguments.",
+ poly_int64, (tree fundecl, tree funtype, poly_int64 size),
+ default_return_pops_args)
+
+/* Return a mode wide enough to copy any function value that might be
+ returned. */
+DEFHOOK
+(get_raw_result_mode,
+ "This target hook returns the mode to be used when accessing raw return\n\
+registers in @code{__builtin_return}. Define this macro if the value\n\
+in @var{reg_raw_mode} is not correct. Use @code{VOIDmode} if a register\n\
+should be ignored for @code{__builtin_return} purposes.",
+ fixed_size_mode, (int regno),
+ default_get_reg_raw_mode)
+
+/* Return a mode wide enough to copy any argument value that might be
+ passed. */
+DEFHOOK
+(get_raw_arg_mode,
+ "This target hook returns the mode to be used when accessing raw argument\n\
+registers in @code{__builtin_apply_args}. Define this macro if the value\n\
+in @var{reg_raw_mode} is not correct. Use @code{VOIDmode} if a register\n\
+should be ignored for @code{__builtin_apply_args} purposes.",
+ fixed_size_mode, (int regno),
+ default_get_reg_raw_mode)
+
+/* Return true if a type is an empty record. */
+DEFHOOK
+(empty_record_p,
+ "This target hook returns true if the type is an empty record. The default\n\
+is to return @code{false}.",
+ bool, (const_tree type),
+ hook_bool_const_tree_false)
+
+/* Warn about the change in empty class parameter passing ABI. */
+DEFHOOK
+(warn_parameter_passing_abi,
+ "This target hook warns about the change in empty class parameter passing\n\
+ABI.",
+ void, (cumulative_args_t ca, tree type),
+ hook_void_CUMULATIVE_ARGS_tree)
+
+HOOK_VECTOR_END (calls)
+
+DEFHOOK
+(use_pseudo_pic_reg,
+ "This hook should return 1 in case pseudo register should be created\n\
+for pic_offset_table_rtx during function expand.",
+ bool, (void),
+ hook_bool_void_false)
+
+DEFHOOK
+(init_pic_reg,
+ "Perform a target dependent initialization of pic_offset_table_rtx.\n\
+This hook is called at the start of register allocation.",
+ void, (void),
+ hook_void_void)
+
+/* Return the diagnostic message string if conversion from FROMTYPE
+ to TOTYPE is not allowed, NULL otherwise. */
+DEFHOOK
+(invalid_conversion,
+ "If defined, this macro returns the diagnostic message when it is\n\
+invalid to convert from @var{fromtype} to @var{totype}, or @code{NULL}\n\
+if validity should be determined by the front end.",
+ const char *, (const_tree fromtype, const_tree totype),
+ hook_constcharptr_const_tree_const_tree_null)
+
+/* Return the diagnostic message string if the unary operation OP is
+ not permitted on TYPE, NULL otherwise. */
+DEFHOOK
+(invalid_unary_op,
+ "If defined, this macro returns the diagnostic message when it is\n\
+invalid to apply operation @var{op} (where unary plus is denoted by\n\
+@code{CONVERT_EXPR}) to an operand of type @var{type}, or @code{NULL}\n\
+if validity should be determined by the front end.",
+ const char *, (int op, const_tree type),
+ hook_constcharptr_int_const_tree_null)
+
+/* Return the diagnostic message string if the binary operation OP
+ is not permitted on TYPE1 and TYPE2, NULL otherwise. */
+DEFHOOK
+(invalid_binary_op,
+ "If defined, this macro returns the diagnostic message when it is\n\
+invalid to apply operation @var{op} to operands of types @var{type1}\n\
+and @var{type2}, or @code{NULL} if validity should be determined by\n\
+the front end.",
+ const char *, (int op, const_tree type1, const_tree type2),
+ hook_constcharptr_int_const_tree_const_tree_null)
+
+/* If values of TYPE are promoted to some other type when used in
+ expressions (analogous to the integer promotions), return that type,
+ or NULL_TREE otherwise. */
+DEFHOOK
+(promoted_type,
+ "If defined, this target hook returns the type to which values of\n\
+@var{type} should be promoted when they appear in expressions,\n\
+analogous to the integer promotions, or @code{NULL_TREE} to use the\n\
+front end's normal promotion rules. This hook is useful when there are\n\
+target-specific types with special promotion rules.\n\
+This is currently used only by the C and C++ front ends.",
+ tree, (const_tree type),
+ hook_tree_const_tree_null)
+
+/* Convert EXPR to TYPE, if target-specific types with special conversion
+ rules are involved. Return the converted expression, or NULL to apply
+ the standard conversion rules. */
+DEFHOOK
+(convert_to_type,
+ "If defined, this hook returns the result of converting @var{expr} to\n\
+@var{type}. It should return the converted expression,\n\
+or @code{NULL_TREE} to apply the front end's normal conversion rules.\n\
+This hook is useful when there are target-specific types with special\n\
+conversion rules.\n\
+This is currently used only by the C and C++ front ends.",
+ tree, (tree type, tree expr),
+ hook_tree_tree_tree_null)
+
+DEFHOOK
+(verify_type_context,
+ "If defined, this hook returns false if there is a target-specific reason\n\
+why type @var{type} cannot be used in the source language context described\n\
+by @var{context}. When @var{silent_p} is false, the hook also reports an\n\
+error against @var{loc} for invalid uses of @var{type}.\n\
+\n\
+Calls to this hook should be made through the global function\n\
+@code{verify_type_context}, which makes the @var{silent_p} parameter\n\
+default to false and also handles @code{error_mark_node}.\n\
+\n\
+The default implementation always returns true.",
+ bool, (location_t loc, type_context_kind context, const_tree type,
+ bool silent_p),
+ NULL)
+
+DEFHOOK
+(can_change_mode_class,
+ "This hook returns true if it is possible to bitcast values held in\n\
+registers of class @var{rclass} from mode @var{from} to mode @var{to}\n\
+and if doing so preserves the low-order bits that are common to both modes.\n\
+The result is only meaningful if @var{rclass} has registers that can hold\n\
+both @code{from} and @code{to}. The default implementation returns true.\n\
+\n\
+As an example of when such bitcasting is invalid, loading 32-bit integer or\n\
+floating-point objects into floating-point registers on Alpha extends them\n\
+to 64 bits. Therefore loading a 64-bit object and then storing it as a\n\
+32-bit object does not store the low-order 32 bits, as would be the case\n\
+for a normal register. Therefore, @file{alpha.h} defines\n\
+@code{TARGET_CAN_CHANGE_MODE_CLASS} to return:\n\
+\n\
+@smallexample\n\
+(GET_MODE_SIZE (from) == GET_MODE_SIZE (to)\n\
+ || !reg_classes_intersect_p (FLOAT_REGS, rclass))\n\
+@end smallexample\n\
+\n\
+Even if storing from a register in mode @var{to} would be valid,\n\
+if both @var{from} and @code{raw_reg_mode} for @var{rclass} are wider\n\
+than @code{word_mode}, then we must prevent @var{to} narrowing the\n\
+mode. This happens when the middle-end assumes that it can load\n\
+or store pieces of an @var{N}-word pseudo, and that the pseudo will\n\
+eventually be allocated to @var{N} @code{word_mode} hard registers.\n\
+Failure to prevent this kind of mode change will result in the\n\
+entire @code{raw_reg_mode} being modified instead of the partial\n\
+value that the middle-end intended.",
+ bool, (machine_mode from, machine_mode to, reg_class_t rclass),
+ hook_bool_mode_mode_reg_class_t_true)
+
+/* Change pseudo allocno class calculated by IRA. */
+DEFHOOK
+(ira_change_pseudo_allocno_class,
+ "A target hook which can change allocno class for given pseudo from\n\
+ allocno and best class calculated by IRA.\n\
+ \n\
+ The default version of this target hook always returns given class.",
+ reg_class_t, (int, reg_class_t, reg_class_t),
+ default_ira_change_pseudo_allocno_class)
+
+/* Return true if we use LRA instead of reload. */
+DEFHOOK
+(lra_p,
+ "A target hook which returns true if we use LRA instead of reload pass.\n\
+\n\
+The default version of this target hook returns true. New ports\n\
+should use LRA, and existing ports are encouraged to convert.",
+ bool, (void),
+ default_lra_p)
+
+/* Return register priority of given hard regno for the current target. */
+DEFHOOK
+(register_priority,
+ "A target hook which returns the register priority number to which the\n\
+register @var{hard_regno} belongs to. The bigger the number, the\n\
+more preferable the hard register usage (when all other conditions are\n\
+the same). This hook can be used to prefer some hard register over\n\
+others in LRA. For example, some x86-64 register usage needs\n\
+additional prefix which makes instructions longer. The hook can\n\
+return lower priority number for such registers make them less favorable\n\
+and as result making the generated code smaller.\n\
+\n\
+The default version of this target hook returns always zero.",
+ int, (int),
+ default_register_priority)
+
+/* Return true if we need register usage leveling. */
+DEFHOOK
+(register_usage_leveling_p,
+ "A target hook which returns true if we need register usage leveling.\n\
+That means if a few hard registers are equally good for the\n\
+assignment, we choose the least used hard register. The register\n\
+usage leveling may be profitable for some targets. Don't use the\n\
+usage leveling for targets with conditional execution or targets\n\
+with big register files as it hurts if-conversion and cross-jumping\n\
+optimizations.\n\
+\n\
+The default version of this target hook returns always false.",
+ bool, (void),
+ default_register_usage_leveling_p)
+
+/* Return true if maximal address displacement can be different. */
+DEFHOOK
+(different_addr_displacement_p,
+ "A target hook which returns true if an address with the same structure\n\
+can have different maximal legitimate displacement. For example, the\n\
+displacement can depend on memory mode or on operand combinations in\n\
+the insn.\n\
+\n\
+The default version of this target hook returns always false.",
+ bool, (void),
+ default_different_addr_displacement_p)
+
+/* Determine class for spilling pseudos of given mode into registers
+ instead of memory. */
+DEFHOOK
+(spill_class,
+ "This hook defines a class of registers which could be used for spilling\n\
+pseudos of the given mode and class, or @code{NO_REGS} if only memory\n\
+should be used. Not defining this hook is equivalent to returning\n\
+@code{NO_REGS} for all inputs.",
+ reg_class_t, (reg_class_t, machine_mode),
+ NULL)
+
+/* Determine an additional allocno class. */
+DEFHOOK
+(additional_allocno_class_p,
+ "This hook should return @code{true} if given class of registers should\n\
+be an allocno class in any way. Usually RA uses only one register\n\
+class from all classes containing the same register set. In some\n\
+complicated cases, you need to have two or more such classes as\n\
+allocno ones for RA correct work. Not defining this hook is\n\
+equivalent to returning @code{false} for all inputs.",
+ bool, (reg_class_t),
+ hook_bool_reg_class_t_false)
+
+DEFHOOK
+(cstore_mode,
+ "This hook defines the machine mode to use for the boolean result of\n\
+conditional store patterns. The ICODE argument is the instruction code\n\
+for the cstore being performed. Not definiting this hook is the same\n\
+as accepting the mode encoded into operand 0 of the cstore expander\n\
+patterns.",
+ scalar_int_mode, (enum insn_code icode),
+ default_cstore_mode)
+
+/* This target hook allows the backend to compute the register pressure
+ classes to use. */
+DEFHOOK
+(compute_pressure_classes,
+ "A target hook which lets a backend compute the set of pressure classes to\n\
+be used by those optimization passes which take register pressure into\n\
+account, as opposed to letting IRA compute them. It returns the number of\n\
+register classes stored in the array @var{pressure_classes}.",
+ int, (enum reg_class *pressure_classes), NULL)
+
+/* True if a structure, union or array with MODE containing FIELD should
+ be accessed using BLKmode. */
+DEFHOOK
+(member_type_forces_blk,
+ "Return true if a structure, union or array containing @var{field} should\n\
+be accessed using @code{BLKMODE}.\n\
+\n\
+If @var{field} is the only field in the structure, @var{mode} is its\n\
+mode, otherwise @var{mode} is VOIDmode. @var{mode} is provided in the\n\
+case where structures of one field would require the structure's mode to\n\
+retain the field's mode.\n\
+\n\
+Normally, this is not needed.",
+ bool, (const_tree field, machine_mode mode),
+ default_member_type_forces_blk)
+
+/* See tree-ssa-math-opts.cc:divmod_candidate_p for conditions
+ that gate the divod transform. */
+DEFHOOK
+(expand_divmod_libfunc,
+ "Define this hook for enabling divmod transform if the port does not have\n\
+hardware divmod insn but defines target-specific divmod libfuncs.",
+ void, (rtx libfunc, machine_mode mode, rtx op0, rtx op1, rtx *quot, rtx *rem),
+ NULL)
+
+/* Return the class for a secondary reload, and fill in extra information. */
+DEFHOOK
+(secondary_reload,
+ "Many machines have some registers that cannot be copied directly to or\n\
+from memory or even from other types of registers. An example is the\n\
+@samp{MQ} register, which on most machines, can only be copied to or\n\
+from general registers, but not memory. Below, we shall be using the\n\
+term 'intermediate register' when a move operation cannot be performed\n\
+directly, but has to be done by copying the source into the intermediate\n\
+register first, and then copying the intermediate register to the\n\
+destination. An intermediate register always has the same mode as\n\
+source and destination. Since it holds the actual value being copied,\n\
+reload might apply optimizations to re-use an intermediate register\n\
+and eliding the copy from the source when it can determine that the\n\
+intermediate register still holds the required value.\n\
+\n\
+Another kind of secondary reload is required on some machines which\n\
+allow copying all registers to and from memory, but require a scratch\n\
+register for stores to some memory locations (e.g., those with symbolic\n\
+address on the RT, and those with certain symbolic address on the SPARC\n\
+when compiling PIC)@. Scratch registers need not have the same mode\n\
+as the value being copied, and usually hold a different value than\n\
+that being copied. Special patterns in the md file are needed to\n\
+describe how the copy is performed with the help of the scratch register;\n\
+these patterns also describe the number, register class(es) and mode(s)\n\
+of the scratch register(s).\n\
+\n\
+In some cases, both an intermediate and a scratch register are required.\n\
+\n\
+For input reloads, this target hook is called with nonzero @var{in_p},\n\
+and @var{x} is an rtx that needs to be copied to a register of class\n\
+@var{reload_class} in @var{reload_mode}. For output reloads, this target\n\
+hook is called with zero @var{in_p}, and a register of class @var{reload_class}\n\
+needs to be copied to rtx @var{x} in @var{reload_mode}.\n\
+\n\
+If copying a register of @var{reload_class} from/to @var{x} requires\n\
+an intermediate register, the hook @code{secondary_reload} should\n\
+return the register class required for this intermediate register.\n\
+If no intermediate register is required, it should return NO_REGS.\n\
+If more than one intermediate register is required, describe the one\n\
+that is closest in the copy chain to the reload register.\n\
+\n\
+If scratch registers are needed, you also have to describe how to\n\
+perform the copy from/to the reload register to/from this\n\
+closest intermediate register. Or if no intermediate register is\n\
+required, but still a scratch register is needed, describe the\n\
+copy from/to the reload register to/from the reload operand @var{x}.\n\
+\n\
+You do this by setting @code{sri->icode} to the instruction code of a pattern\n\
+in the md file which performs the move. Operands 0 and 1 are the output\n\
+and input of this copy, respectively. Operands from operand 2 onward are\n\
+for scratch operands. These scratch operands must have a mode, and a\n\
+single-register-class\n\
+@c [later: or memory]\n\
+output constraint.\n\
+\n\
+When an intermediate register is used, the @code{secondary_reload}\n\
+hook will be called again to determine how to copy the intermediate\n\
+register to/from the reload operand @var{x}, so your hook must also\n\
+have code to handle the register class of the intermediate operand.\n\
+\n\
+@c [For later: maybe we'll allow multi-alternative reload patterns -\n\
+@c the port maintainer could name a mov<mode> pattern that has clobbers -\n\
+@c and match the constraints of input and output to determine the required\n\
+@c alternative. A restriction would be that constraints used to match\n\
+@c against reloads registers would have to be written as register class\n\
+@c constraints, or we need a new target macro / hook that tells us if an\n\
+@c arbitrary constraint can match an unknown register of a given class.\n\
+@c Such a macro / hook would also be useful in other places.]\n\
+\n\
+\n\
+@var{x} might be a pseudo-register or a @code{subreg} of a\n\
+pseudo-register, which could either be in a hard register or in memory.\n\
+Use @code{true_regnum} to find out; it will return @minus{}1 if the pseudo is\n\
+in memory and the hard register number if it is in a register.\n\
+\n\
+Scratch operands in memory (constraint @code{\"=m\"} / @code{\"=&m\"}) are\n\
+currently not supported. For the time being, you will have to continue\n\
+to use @code{TARGET_SECONDARY_MEMORY_NEEDED} for that purpose.\n\
+\n\
+@code{copy_cost} also uses this target hook to find out how values are\n\
+copied. If you want it to include some extra cost for the need to allocate\n\
+(a) scratch register(s), set @code{sri->extra_cost} to the additional cost.\n\
+Or if two dependent moves are supposed to have a lower cost than the sum\n\
+of the individual moves due to expected fortuitous scheduling and/or special\n\
+forwarding logic, you can set @code{sri->extra_cost} to a negative amount.",
+ reg_class_t,
+ (bool in_p, rtx x, reg_class_t reload_class, machine_mode reload_mode,
+ secondary_reload_info *sri),
+ default_secondary_reload)
+
+DEFHOOK
+(secondary_memory_needed,
+ "Certain machines have the property that some registers cannot be copied\n\
+to some other registers without using memory. Define this hook on\n\
+those machines to return true if objects of mode @var{m} in registers\n\
+of @var{class1} can only be copied to registers of class @var{class2} by\n\
+ storing a register of @var{class1} into memory and loading that memory\n\
+location into a register of @var{class2}. The default definition returns\n\
+false for all inputs.",
+ bool, (machine_mode mode, reg_class_t class1, reg_class_t class2),
+ hook_bool_mode_reg_class_t_reg_class_t_false)
+
+DEFHOOK
+(secondary_memory_needed_mode,
+ "If @code{TARGET_SECONDARY_MEMORY_NEEDED} tells the compiler to use memory\n\
+when moving between two particular registers of mode @var{mode},\n\
+this hook specifies the mode that the memory should have.\n\
+\n\
+The default depends on @code{TARGET_LRA_P}. Without LRA, the default\n\
+is to use a word-sized mode for integral modes that are smaller than a\n\
+a word. This is right thing to do on most machines because it ensures\n\
+that all bits of the register are copied and prevents accesses to the\n\
+registers in a narrower mode, which some machines prohibit for\n\
+floating-point registers.\n\
+\n\
+However, this default behavior is not correct on some machines, such as\n\
+the DEC Alpha, that store short integers in floating-point registers\n\
+differently than in integer registers. On those machines, the default\n\
+widening will not work correctly and you must define this hook to\n\
+suppress that widening in some cases. See the file @file{alpha.cc} for\n\
+details.\n\
+\n\
+With LRA, the default is to use @var{mode} unmodified.",
+ machine_mode, (machine_mode mode),
+ default_secondary_memory_needed_mode)
+
+/* Given an rtx X being reloaded into a reg required to be in class CLASS,
+ return the class of reg to actually use. */
+DEFHOOK
+(preferred_reload_class,
+ "A target hook that places additional restrictions on the register class\n\
+to use when it is necessary to copy value @var{x} into a register in class\n\
+@var{rclass}. The value is a register class; perhaps @var{rclass}, or perhaps\n\
+another, smaller class.\n\
+\n\
+The default version of this hook always returns value of @code{rclass} argument.\n\
+\n\
+Sometimes returning a more restrictive class makes better code. For\n\
+example, on the 68000, when @var{x} is an integer constant that is in range\n\
+for a @samp{moveq} instruction, the value of this macro is always\n\
+@code{DATA_REGS} as long as @var{rclass} includes the data registers.\n\
+Requiring a data register guarantees that a @samp{moveq} will be used.\n\
+\n\
+One case where @code{TARGET_PREFERRED_RELOAD_CLASS} must not return\n\
+@var{rclass} is if @var{x} is a legitimate constant which cannot be\n\
+loaded into some register class. By returning @code{NO_REGS} you can\n\
+force @var{x} into a memory location. For example, rs6000 can load\n\
+immediate values into general-purpose registers, but does not have an\n\
+instruction for loading an immediate value into a floating-point\n\
+register, so @code{TARGET_PREFERRED_RELOAD_CLASS} returns @code{NO_REGS} when\n\
+@var{x} is a floating-point constant. If the constant can't be loaded\n\
+into any kind of register, code generation will be better if\n\
+@code{TARGET_LEGITIMATE_CONSTANT_P} makes the constant illegitimate instead\n\
+of using @code{TARGET_PREFERRED_RELOAD_CLASS}.\n\
+\n\
+If an insn has pseudos in it after register allocation, reload will go\n\
+through the alternatives and call repeatedly @code{TARGET_PREFERRED_RELOAD_CLASS}\n\
+to find the best one. Returning @code{NO_REGS}, in this case, makes\n\
+reload add a @code{!} in front of the constraint: the x86 back-end uses\n\
+this feature to discourage usage of 387 registers when math is done in\n\
+the SSE registers (and vice versa).",
+ reg_class_t,
+ (rtx x, reg_class_t rclass),
+ default_preferred_reload_class)
+
+/* Like TARGET_PREFERRED_RELOAD_CLASS, but for output reloads instead of
+ input reloads. */
+DEFHOOK
+(preferred_output_reload_class,
+ "Like @code{TARGET_PREFERRED_RELOAD_CLASS}, but for output reloads instead of\n\
+input reloads.\n\
+\n\
+The default version of this hook always returns value of @code{rclass}\n\
+argument.\n\
+\n\
+You can also use @code{TARGET_PREFERRED_OUTPUT_RELOAD_CLASS} to discourage\n\
+reload from using some alternatives, like @code{TARGET_PREFERRED_RELOAD_CLASS}.",
+ reg_class_t,
+ (rtx x, reg_class_t rclass),
+ default_preferred_output_reload_class)
+
+DEFHOOK
+(select_early_remat_modes,
+ "On some targets, certain modes cannot be held in registers around a\n\
+standard ABI call and are relatively expensive to spill to the stack.\n\
+The early rematerialization pass can help in such cases by aggressively\n\
+recomputing values after calls, so that they don't need to be spilled.\n\
+\n\
+This hook returns the set of such modes by setting the associated bits\n\
+in @var{modes}. The default implementation selects no modes, which has\n\
+the effect of disabling the early rematerialization pass.",
+ void, (sbitmap modes),
+ default_select_early_remat_modes)
+
+DEFHOOK
+(class_likely_spilled_p,
+ "A target hook which returns @code{true} if pseudos that have been assigned\n\
+to registers of class @var{rclass} would likely be spilled because\n\
+registers of @var{rclass} are needed for spill registers.\n\
+\n\
+The default version of this target hook returns @code{true} if @var{rclass}\n\
+has exactly one register and @code{false} otherwise. On most machines, this\n\
+default should be used. For generally register-starved machines, such as\n\
+i386, or machines with right register constraints, such as SH, this hook\n\
+can be used to avoid excessive spilling.\n\
+\n\
+This hook is also used by some of the global intra-procedural code\n\
+transformations to throtle code motion, to avoid increasing register\n\
+pressure.",
+ bool, (reg_class_t rclass),
+ default_class_likely_spilled_p)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class RCLASS. */
+DEFHOOK
+(class_max_nregs,
+ "A target hook returns the maximum number of consecutive registers\n\
+of class @var{rclass} needed to hold a value of mode @var{mode}.\n\
+\n\
+This is closely related to the macro @code{TARGET_HARD_REGNO_NREGS}.\n\
+In fact, the value returned by @code{TARGET_CLASS_MAX_NREGS (@var{rclass},\n\
+@var{mode})} target hook should be the maximum value of\n\
+@code{TARGET_HARD_REGNO_NREGS (@var{regno}, @var{mode})} for all @var{regno}\n\
+values in the class @var{rclass}.\n\
+\n\
+This target hook helps control the handling of multiple-word values\n\
+in the reload pass.\n\
+\n\
+The default version of this target hook returns the size of @var{mode}\n\
+in words.",
+ unsigned char, (reg_class_t rclass, machine_mode mode),
+ default_class_max_nregs)
+
+DEFHOOK
+(preferred_rename_class,
+ "A target hook that places additional preference on the register\n\
+class to use when it is necessary to rename a register in class\n\
+@var{rclass} to another class, or perhaps @var{NO_REGS}, if no\n\
+preferred register class is found or hook @code{preferred_rename_class}\n\
+is not implemented.\n\
+Sometimes returning a more restrictive class makes better code. For\n\
+example, on ARM, thumb-2 instructions using @code{LO_REGS} may be\n\
+smaller than instructions using @code{GENERIC_REGS}. By returning\n\
+@code{LO_REGS} from @code{preferred_rename_class}, code size can\n\
+be reduced.",
+ reg_class_t, (reg_class_t rclass),
+ default_preferred_rename_class)
+
+/* This target hook allows the backend to avoid unsafe substitution
+ during register allocation. */
+DEFHOOK
+(cannot_substitute_mem_equiv_p,
+ "A target hook which returns @code{true} if @var{subst} can't\n\
+substitute safely pseudos with equivalent memory values during\n\
+register allocation.\n\
+The default version of this target hook returns @code{false}.\n\
+On most machines, this default should be used. For generally\n\
+machines with non orthogonal register usage for addressing, such\n\
+as SH, this hook can be used to avoid excessive spilling.",
+ bool, (rtx subst),
+ hook_bool_rtx_false)
+
+/* This target hook allows the backend to legitimize base plus
+ displacement addressing. */
+DEFHOOK
+(legitimize_address_displacement,
+ "This hook tries to split address offset @var{orig_offset} into\n\
+two parts: one that should be added to the base address to create\n\
+a local anchor point, and an additional offset that can be applied\n\
+to the anchor to address a value of mode @var{mode}. The idea is that\n\
+the local anchor could be shared by other accesses to nearby locations.\n\
+\n\
+The hook returns true if it succeeds, storing the offset of the\n\
+anchor from the base in @var{offset1} and the offset of the final address\n\
+from the anchor in @var{offset2}. The default implementation returns false.",
+ bool, (rtx *offset1, rtx *offset2, poly_int64 orig_offset, machine_mode mode),
+ default_legitimize_address_displacement)
+
+/* This target hook allows the backend to perform additional
+ processing while initializing for variable expansion. */
+DEFHOOK
+(expand_to_rtl_hook,
+ "This hook is called just before expansion into rtl, allowing the target\n\
+to perform additional initializations or analysis before the expansion.\n\
+For example, the rs6000 port uses it to allocate a scratch stack slot\n\
+for use in copying SDmode values between memory and floating point\n\
+registers whenever the function being expanded has any SDmode\n\
+usage.",
+ void, (void),
+ hook_void_void)
+
+/* This target hook allows the backend to perform additional
+ instantiations on rtx that are not actually in insns yet,
+ but will be later. */
+DEFHOOK
+(instantiate_decls,
+ "This hook allows the backend to perform additional instantiations on rtl\n\
+that are not actually in any insns yet, but will be later.",
+ void, (void),
+ hook_void_void)
+
+DEFHOOK
+(hard_regno_nregs,
+ "This hook returns the number of consecutive hard registers, starting\n\
+at register number @var{regno}, required to hold a value of mode\n\
+@var{mode}. This hook must never return zero, even if a register\n\
+cannot hold the requested mode - indicate that with\n\
+@code{TARGET_HARD_REGNO_MODE_OK} and/or\n\
+@code{TARGET_CAN_CHANGE_MODE_CLASS} instead.\n\
+\n\
+The default definition returns the number of words in @var{mode}.",
+ unsigned int, (unsigned int regno, machine_mode mode),
+ default_hard_regno_nregs)
+
+DEFHOOK
+(hard_regno_mode_ok,
+ "This hook returns true if it is permissible to store a value\n\
+of mode @var{mode} in hard register number @var{regno} (or in several\n\
+registers starting with that one). The default definition returns true\n\
+unconditionally.\n\
+\n\
+You need not include code to check for the numbers of fixed registers,\n\
+because the allocation mechanism considers them to be always occupied.\n\
+\n\
+@cindex register pairs\n\
+On some machines, double-precision values must be kept in even/odd\n\
+register pairs. You can implement that by defining this hook to reject\n\
+odd register numbers for such modes.\n\
+\n\
+The minimum requirement for a mode to be OK in a register is that the\n\
+@samp{mov@var{mode}} instruction pattern support moves between the\n\
+register and other hard register in the same class and that moving a\n\
+value into the register and back out not alter it.\n\
+\n\
+Since the same instruction used to move @code{word_mode} will work for\n\
+all narrower integer modes, it is not necessary on any machine for\n\
+this hook to distinguish between these modes, provided you define\n\
+patterns @samp{movhi}, etc., to take advantage of this. This is\n\
+useful because of the interaction between @code{TARGET_HARD_REGNO_MODE_OK}\n\
+and @code{TARGET_MODES_TIEABLE_P}; it is very desirable for all integer\n\
+modes to be tieable.\n\
+\n\
+Many machines have special registers for floating point arithmetic.\n\
+Often people assume that floating point machine modes are allowed only\n\
+in floating point registers. This is not true. Any registers that\n\
+can hold integers can safely @emph{hold} a floating point machine\n\
+mode, whether or not floating arithmetic can be done on it in those\n\
+registers. Integer move instructions can be used to move the values.\n\
+\n\
+On some machines, though, the converse is true: fixed-point machine\n\
+modes may not go in floating registers. This is true if the floating\n\
+registers normalize any value stored in them, because storing a\n\
+non-floating value there would garble it. In this case,\n\
+@code{TARGET_HARD_REGNO_MODE_OK} should reject fixed-point machine modes in\n\
+floating registers. But if the floating registers do not automatically\n\
+normalize, if you can store any bit pattern in one and retrieve it\n\
+unchanged without a trap, then any machine mode may go in a floating\n\
+register, so you can define this hook to say so.\n\
+\n\
+The primary significance of special floating registers is rather that\n\
+they are the registers acceptable in floating point arithmetic\n\
+instructions. However, this is of no concern to\n\
+@code{TARGET_HARD_REGNO_MODE_OK}. You handle it by writing the proper\n\
+constraints for those instructions.\n\
+\n\
+On some machines, the floating registers are especially slow to access,\n\
+so that it is better to store a value in a stack frame than in such a\n\
+register if floating point arithmetic is not being done. As long as the\n\
+floating registers are not in class @code{GENERAL_REGS}, they will not\n\
+be used unless some pattern's constraint asks for one.",
+ bool, (unsigned int regno, machine_mode mode),
+ hook_bool_uint_mode_true)
+
+DEFHOOK
+(modes_tieable_p,
+ "This hook returns true if a value of mode @var{mode1} is accessible\n\
+in mode @var{mode2} without copying.\n\
+\n\
+If @code{TARGET_HARD_REGNO_MODE_OK (@var{r}, @var{mode1})} and\n\
+@code{TARGET_HARD_REGNO_MODE_OK (@var{r}, @var{mode2})} are always\n\
+the same for any @var{r}, then\n\
+@code{TARGET_MODES_TIEABLE_P (@var{mode1}, @var{mode2})}\n\
+should be true. If they differ for any @var{r}, you should define\n\
+this hook to return false unless some other mechanism ensures the\n\
+accessibility of the value in a narrower mode.\n\
+\n\
+You should define this hook to return true in as many cases as\n\
+possible since doing so will allow GCC to perform better register\n\
+allocation. The default definition returns true unconditionally.",
+ bool, (machine_mode mode1, machine_mode mode2),
+ hook_bool_mode_mode_true)
+
+/* Return true if is OK to use a hard register REGNO as scratch register
+ in peephole2. */
+DEFHOOK
+(hard_regno_scratch_ok,
+ "This target hook should return @code{true} if it is OK to use a hard register\n\
+@var{regno} as scratch reg in peephole2.\n\
+\n\
+One common use of this macro is to prevent using of a register that\n\
+is not saved by a prologue in an interrupt handler.\n\
+\n\
+The default version of this hook always returns @code{true}.",
+ bool, (unsigned int regno),
+ default_hard_regno_scratch_ok)
+
+DEFHOOK
+(hard_regno_call_part_clobbered,
+ "ABIs usually specify that calls must preserve the full contents\n\
+of a particular register, or that calls can alter any part of a\n\
+particular register. This information is captured by the target macro\n\
+@code{CALL_REALLY_USED_REGISTERS}. However, some ABIs specify that calls\n\
+must preserve certain bits of a particular register but can alter others.\n\
+This hook should return true if this applies to at least one of the\n\
+registers in @samp{(reg:@var{mode} @var{regno})}, and if as a result the\n\
+call would alter part of the @var{mode} value. For example, if a call\n\
+preserves the low 32 bits of a 64-bit hard register @var{regno} but can\n\
+clobber the upper 32 bits, this hook should return true for a 64-bit mode\n\
+but false for a 32-bit mode.\n\
+\n\
+The value of @var{abi_id} comes from the @code{predefined_function_abi}\n\
+structure that describes the ABI of the call; see the definition of the\n\
+structure for more details. If (as is usual) the target uses the same ABI\n\
+for all functions in a translation unit, @var{abi_id} is always 0.\n\
+\n\
+The default implementation returns false, which is correct\n\
+for targets that don't have partly call-clobbered registers.",
+ bool, (unsigned int abi_id, unsigned int regno, machine_mode mode),
+ hook_bool_uint_uint_mode_false)
+
+DEFHOOK
+(get_multilib_abi_name,
+ "This hook returns name of multilib ABI name.",
+ const char *, (void),
+ hook_constcharptr_void_null)
+
+/* Return the smallest number of different values for which it is best to
+ use a jump-table instead of a tree of conditional branches. */
+DEFHOOK
+(case_values_threshold,
+ "This function return the smallest number of different values for which it\n\
+is best to use a jump-table instead of a tree of conditional branches.\n\
+The default is four for machines with a @code{casesi} instruction and\n\
+five otherwise. This is best for most machines.",
+ unsigned int, (void),
+ default_case_values_threshold)
+
+DEFHOOK
+(starting_frame_offset,
+ "This hook returns the offset from the frame pointer to the first local\n\
+variable slot to be allocated. If @code{FRAME_GROWS_DOWNWARD}, it is the\n\
+offset to @emph{end} of the first slot allocated, otherwise it is the\n\
+offset to @emph{beginning} of the first slot allocated. The default\n\
+implementation returns 0.",
+ HOST_WIDE_INT, (void),
+ hook_hwi_void_0)
+
+/* Optional callback to advise the target to compute the frame layout. */
+DEFHOOK
+(compute_frame_layout,
+ "This target hook is called once each time the frame layout needs to be\n\
+recalculated. The calculations can be cached by the target and can then\n\
+be used by @code{INITIAL_ELIMINATION_OFFSET} instead of re-computing the\n\
+layout on every invocation of that hook. This is particularly useful\n\
+for targets that have an expensive frame layout function. Implementing\n\
+this callback is optional.",
+ void, (void),
+ hook_void_void)
+
+/* Return true if a function must have and use a frame pointer. */
+DEFHOOK
+(frame_pointer_required,
+ "This target hook should return @code{true} if a function must have and use\n\
+a frame pointer. This target hook is called in the reload pass. If its return\n\
+value is @code{true} the function will have a frame pointer.\n\
+\n\
+This target hook can in principle examine the current function and decide\n\
+according to the facts, but on most machines the constant @code{false} or the\n\
+constant @code{true} suffices. Use @code{false} when the machine allows code\n\
+to be generated with no frame pointer, and doing so saves some time or space.\n\
+Use @code{true} when there is no possible advantage to avoiding a frame\n\
+pointer.\n\
+\n\
+In certain cases, the compiler does not know how to produce valid code\n\
+without a frame pointer. The compiler recognizes those cases and\n\
+automatically gives the function a frame pointer regardless of what\n\
+@code{targetm.frame_pointer_required} returns. You don't need to worry about\n\
+them.\n\
+\n\
+In a function that does not require a frame pointer, the frame pointer\n\
+register can be allocated for ordinary usage, unless you mark it as a\n\
+fixed register. See @code{FIXED_REGISTERS} for more information.\n\
+\n\
+Default return value is @code{false}.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Returns true if the compiler is allowed to try to replace register number
+ from-reg with register number to-reg. */
+DEFHOOK
+(can_eliminate,
+ "This target hook should return @code{true} if the compiler is allowed to\n\
+try to replace register number @var{from_reg} with register number\n\
+@var{to_reg}. This target hook will usually be @code{true}, since most of the\n\
+cases preventing register elimination are things that the compiler already\n\
+knows about.\n\
+\n\
+Default return value is @code{true}.",
+ bool, (const int from_reg, const int to_reg),
+ hook_bool_const_int_const_int_true)
+
+/* Modify any or all of fixed_regs, call_used_regs, global_regs,
+ reg_names, and reg_class_contents to account of the vagaries of the
+ target. */
+DEFHOOK
+(conditional_register_usage,
+ "This hook may conditionally modify five variables\n\
+@code{fixed_regs}, @code{call_used_regs}, @code{global_regs},\n\
+@code{reg_names}, and @code{reg_class_contents}, to take into account\n\
+any dependence of these register sets on target flags. The first three\n\
+of these are of type @code{char []} (interpreted as boolean vectors).\n\
+@code{global_regs} is a @code{const char *[]}, and\n\
+@code{reg_class_contents} is a @code{HARD_REG_SET}. Before the macro is\n\
+called, @code{fixed_regs}, @code{call_used_regs},\n\
+@code{reg_class_contents}, and @code{reg_names} have been initialized\n\
+from @code{FIXED_REGISTERS}, @code{CALL_USED_REGISTERS},\n\
+@code{REG_CLASS_CONTENTS}, and @code{REGISTER_NAMES}, respectively.\n\
+@code{global_regs} has been cleared, and any @option{-ffixed-@var{reg}},\n\
+@option{-fcall-used-@var{reg}} and @option{-fcall-saved-@var{reg}}\n\
+command options have been applied.\n\
+\n\
+@cindex disabling certain registers\n\
+@cindex controlling register usage\n\
+If the usage of an entire class of registers depends on the target\n\
+flags, you may indicate this to GCC by using this macro to modify\n\
+@code{fixed_regs} and @code{call_used_regs} to 1 for each of the\n\
+registers in the classes which should not be used by GCC@. Also make\n\
+@code{define_register_constraint}s return @code{NO_REGS} for constraints\n\
+that shouldn't be used.\n\
+\n\
+(However, if this class is not included in @code{GENERAL_REGS} and all\n\
+of the insn patterns whose constraints permit this class are\n\
+controlled by target switches, then GCC will automatically avoid using\n\
+these registers when the target switches are opposed to them.)",
+ void, (void),
+ hook_void_void)
+
+DEFHOOK
+(stack_clash_protection_alloca_probe_range,
+ "Some targets have an ABI defined interval for which no probing needs to be done.\n\
+When a probe does need to be done this same interval is used as the probe distance\n\
+up when doing stack clash protection for alloca.\n\
+On such targets this value can be set to override the default probing up interval.\n\
+Define this variable to return nonzero if such a probe range is required or zero otherwise.\n\
+Defining this hook also requires your functions which make use of alloca to have at least 8 byes\n\
+of outgoing arguments. If this is not the case the stack will be corrupted.\n\
+You need not define this macro if it would always have the value zero.",
+ HOST_WIDE_INT, (void),
+ default_stack_clash_protection_alloca_probe_range)
+
+
+/* Functions specific to the C family of frontends. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_C_"
+HOOK_VECTOR (TARGET_C, c)
+
+/* ??? Documenting this hook requires a GFDL license grant. */
+DEFHOOK_UNDOC
+(mode_for_suffix,
+"Return machine mode for non-standard constant literal suffix @var{c},\
+ or VOIDmode if non-standard suffixes are unsupported.",
+ machine_mode, (char c),
+ default_mode_for_suffix)
+
+DEFHOOK
+(excess_precision,
+ "Return a value, with the same meaning as the C99 macro\n\
+@code{FLT_EVAL_METHOD} that describes which excess precision should be\n\
+applied. @var{type} is either @code{EXCESS_PRECISION_TYPE_IMPLICIT},\n\
+@code{EXCESS_PRECISION_TYPE_FAST},\n\
+@code{EXCESS_PRECISION_TYPE_STANDARD}, or\n\
+@code{EXCESS_PRECISION_TYPE_FLOAT16}. For\n\
+@code{EXCESS_PRECISION_TYPE_IMPLICIT}, the target should return which\n\
+precision and range operations will be implictly evaluated in regardless\n\
+of the excess precision explicitly added. For\n\
+@code{EXCESS_PRECISION_TYPE_STANDARD}, \n\
+@code{EXCESS_PRECISION_TYPE_FLOAT16}, and\n\
+@code{EXCESS_PRECISION_TYPE_FAST}, the target should return the\n\
+explicit excess precision that should be added depending on the\n\
+value set for @option{-fexcess-precision=@r{[}standard@r{|}fast@r{|}16@r{]}}.\n\
+Note that unpredictable explicit excess precision does not make sense,\n\
+so a target should never return @code{FLT_EVAL_METHOD_UNPREDICTABLE}\n\
+when @var{type} is @code{EXCESS_PRECISION_TYPE_STANDARD},\n\
+@code{EXCESS_PRECISION_TYPE_FLOAT16} or\n\
+@code{EXCESS_PRECISION_TYPE_FAST}.",
+ enum flt_eval_method, (enum excess_precision_type type),
+ default_excess_precision)
+
+HOOK_VECTOR_END (c)
+
+/* Functions specific to the C++ frontend. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_CXX_"
+HOOK_VECTOR (TARGET_CXX, cxx)
+
+/* Return the integer type used for guard variables. */
+DEFHOOK
+(guard_type,
+ "Define this hook to override the integer type used for guard variables.\n\
+These are used to implement one-time construction of static objects. The\n\
+default is long_long_integer_type_node.",
+ tree, (void),
+ default_cxx_guard_type)
+
+/* Return true if only the low bit of the guard should be tested. */
+DEFHOOK
+(guard_mask_bit,
+ "This hook determines how guard variables are used. It should return\n\
+@code{false} (the default) if the first byte should be used. A return value of\n\
+@code{true} indicates that only the least significant bit should be used.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Returns the size of the array cookie for an array of type. */
+DEFHOOK
+(get_cookie_size,
+ "This hook returns the size of the cookie to use when allocating an array\n\
+whose elements have the indicated @var{type}. Assumes that it is already\n\
+known that a cookie is needed. The default is\n\
+@code{max(sizeof (size_t), alignof(type))}, as defined in section 2.7 of the\n\
+IA64/Generic C++ ABI@.",
+ tree, (tree type),
+ default_cxx_get_cookie_size)
+
+/* Returns true if the element size should be stored in the array cookie. */
+DEFHOOK
+(cookie_has_size,
+ "This hook should return @code{true} if the element size should be stored in\n\
+array cookies. The default is to return @code{false}.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Allows backends to perform additional processing when
+ deciding if a class should be exported or imported. */
+DEFHOOK
+(import_export_class,
+ "If defined by a backend this hook allows the decision made to export\n\
+class @var{type} to be overruled. Upon entry @var{import_export}\n\
+will contain 1 if the class is going to be exported, @minus{}1 if it is going\n\
+to be imported and 0 otherwise. This function should return the\n\
+modified value and perform any other actions necessary to support the\n\
+backend's targeted operating system.",
+ int, (tree type, int import_export), NULL)
+
+/* Returns true if constructors and destructors return "this". */
+DEFHOOK
+(cdtor_returns_this,
+ "This hook should return @code{true} if constructors and destructors return\n\
+the address of the object created/destroyed. The default is to return\n\
+@code{false}.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Returns true if the key method for a class can be an inline
+ function, so long as it is not declared inline in the class
+ itself. Returning true is the behavior required by the Itanium C++ ABI. */
+DEFHOOK
+(key_method_may_be_inline,
+ "This hook returns true if the key method for a class (i.e., the method\n\
+which, if defined in the current translation unit, causes the virtual\n\
+table to be emitted) may be an inline function. Under the standard\n\
+Itanium C++ ABI the key method may be an inline function so long as\n\
+the function is not declared inline in the class definition. Under\n\
+some variants of the ABI, an inline function can never be the key\n\
+method. The default is to return @code{true}.",
+ bool, (void),
+ hook_bool_void_true)
+
+DEFHOOK
+(determine_class_data_visibility,
+"@var{decl} is a virtual table, virtual table table, typeinfo object,\n\
+or other similar implicit class data object that will be emitted with\n\
+external linkage in this translation unit. No ELF visibility has been\n\
+explicitly specified. If the target needs to specify a visibility\n\
+other than that of the containing class, use this hook to set\n\
+@code{DECL_VISIBILITY} and @code{DECL_VISIBILITY_SPECIFIED}.",
+ void, (tree decl),
+ hook_void_tree)
+
+/* Returns true (the default) if virtual tables and other
+ similar implicit class data objects are always COMDAT if they
+ have external linkage. If this hook returns false, then
+ class data for classes whose virtual table will be emitted in
+ only one translation unit will not be COMDAT. */
+DEFHOOK
+(class_data_always_comdat,
+ "This hook returns true (the default) if virtual tables and other\n\
+similar implicit class data objects are always COMDAT if they have\n\
+external linkage. If this hook returns false, then class data for\n\
+classes whose virtual table will be emitted in only one translation\n\
+unit will not be COMDAT.",
+ bool, (void),
+ hook_bool_void_true)
+
+/* Returns true (the default) if the RTTI for the basic types,
+ which is always defined in the C++ runtime, should be COMDAT;
+ false if it should not be COMDAT. */
+DEFHOOK
+(library_rtti_comdat,
+ "This hook returns true (the default) if the RTTI information for\n\
+the basic types which is defined in the C++ runtime should always\n\
+be COMDAT, false if it should not be COMDAT.",
+ bool, (void),
+ hook_bool_void_true)
+
+/* Returns true if __aeabi_atexit should be used to register static
+ destructors. */
+DEFHOOK
+(use_aeabi_atexit,
+ "This hook returns true if @code{__aeabi_atexit} (as defined by the ARM EABI)\n\
+should be used to register static destructors when @option{-fuse-cxa-atexit}\n\
+is in effect. The default is to return false to use @code{__cxa_atexit}.",
+ bool, (void),
+ hook_bool_void_false)
+
+/* Returns true if target may use atexit in the same manner as
+ __cxa_atexit to register static destructors. */
+DEFHOOK
+(use_atexit_for_cxa_atexit,
+ "This hook returns true if the target @code{atexit} function can be used\n\
+in the same manner as @code{__cxa_atexit} to register C++ static\n\
+destructors. This requires that @code{atexit}-registered functions in\n\
+shared libraries are run in the correct order when the libraries are\n\
+unloaded. The default is to return false.",
+ bool, (void),
+ hook_bool_void_false)
+
+DEFHOOK
+(adjust_class_at_definition,
+"@var{type} is a C++ class (i.e., RECORD_TYPE or UNION_TYPE) that has just\n\
+been defined. Use this hook to make adjustments to the class (eg, tweak\n\
+visibility or perform any other required target modifications).",
+ void, (tree type),
+ hook_void_tree)
+
+DEFHOOK
+(decl_mangling_context,
+ "Return target-specific mangling context of @var{decl} or @code{NULL_TREE}.",
+ tree, (const_tree decl),
+ hook_tree_const_tree_null)
+
+HOOK_VECTOR_END (cxx)
+
+/* Functions and data for emulated TLS support. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_EMUTLS_"
+HOOK_VECTOR (TARGET_EMUTLS, emutls)
+
+/* Name of the address and common functions. */
+DEFHOOKPOD
+(get_address,
+ "Contains the name of the helper function that uses a TLS control\n\
+object to locate a TLS instance. The default causes libgcc's\n\
+emulated TLS helper function to be used.",
+ const char *, "__builtin___emutls_get_address")
+
+DEFHOOKPOD
+(register_common,
+ "Contains the name of the helper function that should be used at\n\
+program startup to register TLS objects that are implicitly\n\
+initialized to zero. If this is @code{NULL}, all TLS objects will\n\
+have explicit initializers. The default causes libgcc's emulated TLS\n\
+registration function to be used.",
+ const char *, "__builtin___emutls_register_common")
+
+/* Prefixes for proxy variable and template. */
+DEFHOOKPOD
+(var_section,
+ "Contains the name of the section in which TLS control variables should\n\
+be placed. The default of @code{NULL} allows these to be placed in\n\
+any section.",
+ const char *, NULL)
+
+DEFHOOKPOD
+(tmpl_section,
+ "Contains the name of the section in which TLS initializers should be\n\
+placed. The default of @code{NULL} allows these to be placed in any\n\
+section.",
+ const char *, NULL)
+
+/* Prefixes for proxy variable and template. */
+DEFHOOKPOD
+(var_prefix,
+ "Contains the prefix to be prepended to TLS control variable names.\n\
+The default of @code{NULL} uses a target-specific prefix.",
+ const char *, NULL)
+
+DEFHOOKPOD
+(tmpl_prefix,
+ "Contains the prefix to be prepended to TLS initializer objects. The\n\
+default of @code{NULL} uses a target-specific prefix.",
+ const char *, NULL)
+
+/* Function to generate field definitions of the proxy variable. */
+DEFHOOK
+(var_fields,
+ "Specifies a function that generates the FIELD_DECLs for a TLS control\n\
+object type. @var{type} is the RECORD_TYPE the fields are for and\n\
+@var{name} should be filled with the structure tag, if the default of\n\
+@code{__emutls_object} is unsuitable. The default creates a type suitable\n\
+for libgcc's emulated TLS function.",
+ tree, (tree type, tree *name),
+ default_emutls_var_fields)
+
+/* Function to initialize a proxy variable. */
+DEFHOOK
+(var_init,
+ "Specifies a function that generates the CONSTRUCTOR to initialize a\n\
+TLS control object. @var{var} is the TLS control object, @var{decl}\n\
+is the TLS object and @var{tmpl_addr} is the address of the\n\
+initializer. The default initializes libgcc's emulated TLS control object.",
+ tree, (tree var, tree decl, tree tmpl_addr),
+ default_emutls_var_init)
+
+/* Whether we are allowed to alter the usual alignment of the
+ proxy variable. */
+DEFHOOKPOD
+(var_align_fixed,
+ "Specifies whether the alignment of TLS control variable objects is\n\
+fixed and should not be increased as some backends may do to optimize\n\
+single objects. The default is false.",
+ bool, false)
+
+/* Whether we can emit debug information for TLS vars. */
+DEFHOOKPOD
+(debug_form_tls_address,
+ "Specifies whether a DWARF @code{DW_OP_form_tls_address} location descriptor\n\
+may be used to describe emulated TLS control objects.",
+ bool, false)
+
+HOOK_VECTOR_END (emutls)
+
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_OPTION_"
+HOOK_VECTOR (TARGET_OPTION_HOOKS, target_option_hooks)
+
+/* Function to validate the attribute((target(...))) strings. If
+ the option is validated, the hook should also fill in
+ DECL_FUNCTION_SPECIFIC_TARGET in the function decl node. */
+DEFHOOK
+(valid_attribute_p,
+ "This hook is called to parse @code{attribute(target(\"...\"))}, which\n\
+allows setting target-specific options on individual functions.\n\
+These function-specific options may differ\n\
+from the options specified on the command line. The hook should return\n\
+@code{true} if the options are valid.\n\
+\n\
+The hook should set the @code{DECL_FUNCTION_SPECIFIC_TARGET} field in\n\
+the function declaration to hold a pointer to a target-specific\n\
+@code{struct cl_target_option} structure.",
+ bool, (tree fndecl, tree name, tree args, int flags),
+ default_target_option_valid_attribute_p)
+
+/* Function to save any extra target state in the target options structure. */
+DEFHOOK
+(save,
+ "This hook is called to save any additional target-specific information\n\
+in the @code{struct cl_target_option} structure for function-specific\n\
+options from the @code{struct gcc_options} structure.\n\
+@xref{Option file format}.",
+ void, (struct cl_target_option *ptr, struct gcc_options *opts,
+ struct gcc_options *opts_set), NULL)
+
+/* Function to restore any extra target state from the target options
+ structure. */
+DEFHOOK
+(restore,
+ "This hook is called to restore any additional target-specific\n\
+information in the @code{struct cl_target_option} structure for\n\
+function-specific options to the @code{struct gcc_options} structure.",
+ void, (struct gcc_options *opts, struct gcc_options *opts_set,
+ struct cl_target_option *ptr), NULL)
+
+/* Function to update target-specific option information after being
+ streamed in. */
+DEFHOOK
+(post_stream_in,
+ "This hook is called to update target-specific information in the\n\
+@code{struct cl_target_option} structure after it is streamed in from\n\
+LTO bytecode.",
+ void, (struct cl_target_option *ptr), NULL)
+
+/* Function to print any extra target state from the target options
+ structure. */
+DEFHOOK
+(print,
+ "This hook is called to print any additional target-specific\n\
+information in the @code{struct cl_target_option} structure for\n\
+function-specific options.",
+ void, (FILE *file, int indent, struct cl_target_option *ptr), NULL)
+
+/* Function to parse arguments to be validated for #pragma target, and to
+ change the state if the options are valid. If the first argument is
+ NULL, the second argument specifies the default options to use. Return
+ true if the options are valid, and set the current state. */
+DEFHOOK
+(pragma_parse,
+ "This target hook parses the options for @code{#pragma GCC target}, which\n\
+sets the target-specific options for functions that occur later in the\n\
+input stream. The options accepted should be the same as those handled by the\n\
+@code{TARGET_OPTION_VALID_ATTRIBUTE_P} hook.",
+ bool, (tree args, tree pop_target),
+ default_target_option_pragma_parse)
+
+/* Do option overrides for the target. */
+DEFHOOK
+(override,
+ "Sometimes certain combinations of command options do not make sense on\n\
+a particular target machine. You can override the hook\n\
+@code{TARGET_OPTION_OVERRIDE} to take account of this. This hooks is called\n\
+once just after all the command options have been parsed.\n\
+\n\
+Don't use this hook to turn on various extra optimizations for\n\
+@option{-O}. That is what @code{TARGET_OPTION_OPTIMIZATION} is for.\n\
+\n\
+If you need to do something whenever the optimization level is\n\
+changed via the optimize attribute or pragma, see\n\
+@code{TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE}",
+ void, (void),
+ hook_void_void)
+
+/* This function returns true if DECL1 and DECL2 are versions of the same
+ function. DECL1 and DECL2 are function versions if and only if they
+ have the same function signature and different target specific attributes,
+ that is, they are compiled for different target machines. */
+DEFHOOK
+(function_versions,
+ "This target hook returns @code{true} if @var{DECL1} and @var{DECL2} are\n\
+versions of the same function. @var{DECL1} and @var{DECL2} are function\n\
+versions if and only if they have the same function signature and\n\
+different target specific attributes, that is, they are compiled for\n\
+different target machines.",
+ bool, (tree decl1, tree decl2),
+ hook_bool_tree_tree_false)
+
+/* Function to determine if one function can inline another function. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_"
+DEFHOOK
+(can_inline_p,
+ "This target hook returns @code{false} if the @var{caller} function\n\
+cannot inline @var{callee}, based on target specific information. By\n\
+default, inlining is not allowed if the callee function has function\n\
+specific target options and the caller does not use the same options.",
+ bool, (tree caller, tree callee),
+ default_target_can_inline_p)
+
+DEFHOOK
+(update_ipa_fn_target_info,
+ "Allow target to analyze all gimple statements for the given function to\n\
+record and update some target specific information for inlining. A typical\n\
+example is that a caller with one isa feature disabled is normally not\n\
+allowed to inline a callee with that same isa feature enabled even which is\n\
+attributed by always_inline, but with the conservative analysis on all\n\
+statements of the callee if we are able to guarantee the callee does not\n\
+exploit any instructions from the mismatch isa feature, it would be safe to\n\
+allow the caller to inline the callee.\n\
+@var{info} is one @code{unsigned int} value to record information in which\n\
+one set bit indicates one corresponding feature is detected in the analysis,\n\
+@var{stmt} is the statement being analyzed. Return true if target still\n\
+need to analyze the subsequent statements, otherwise return false to stop\n\
+subsequent analysis.\n\
+The default version of this hook returns false.",
+ bool, (unsigned int& info, const gimple* stmt),
+ default_update_ipa_fn_target_info)
+
+DEFHOOK
+(need_ipa_fn_target_info,
+ "Allow target to check early whether it is necessary to analyze all gimple\n\
+statements in the given function to update target specific information for\n\
+inlining. See hook @code{update_ipa_fn_target_info} for usage example of\n\
+target specific information. This hook is expected to be invoked ahead of\n\
+the iterating with hook @code{update_ipa_fn_target_info}.\n\
+@var{decl} is the function being analyzed, @var{info} is the same as what\n\
+in hook @code{update_ipa_fn_target_info}, target can do one time update\n\
+into @var{info} without iterating for some case. Return true if target\n\
+decides to analyze all gimple statements to collect information, otherwise\n\
+return false.\n\
+The default version of this hook returns false.",
+ bool, (const_tree decl, unsigned int& info),
+ default_need_ipa_fn_target_info)
+
+DEFHOOK
+(relayout_function,
+"This target hook fixes function @var{fndecl} after attributes are processed.\n\
+Default does nothing. On ARM, the default function's alignment is updated\n\
+with the attribute target.",
+ void, (tree fndecl),
+ hook_void_tree)
+
+HOOK_VECTOR_END (target_option)
+
+/* For targets that need to mark extra registers as live on entry to
+ the function, they should define this target hook and set their
+ bits in the bitmap passed in. */
+DEFHOOK
+(extra_live_on_entry,
+ "Add any hard registers to @var{regs} that are live on entry to the\n\
+function. This hook only needs to be defined to provide registers that\n\
+cannot be found by examination of FUNCTION_ARG_REGNO_P, the callee saved\n\
+registers, STATIC_CHAIN_INCOMING_REGNUM, STATIC_CHAIN_REGNUM,\n\
+TARGET_STRUCT_VALUE_RTX, FRAME_POINTER_REGNUM, EH_USES,\n\
+FRAME_POINTER_REGNUM, ARG_POINTER_REGNUM, and the PIC_OFFSET_TABLE_REGNUM.",
+ void, (bitmap regs),
+ hook_void_bitmap)
+
+/* Targets should define this target hook to mark that non-callee clobbers are
+ present in CALL_INSN_FUNCTION_USAGE for all the calls that bind to a local
+ definition. */
+DEFHOOKPOD
+(call_fusage_contains_non_callee_clobbers,
+ "Set to true if each call that binds to a local definition explicitly\n\
+clobbers or sets all non-fixed registers modified by performing the call.\n\
+That is, by the call pattern itself, or by code that might be inserted by the\n\
+linker (e.g.@: stubs, veneers, branch islands), but not including those\n\
+modifiable by the callee. The affected registers may be mentioned explicitly\n\
+in the call pattern, or included as clobbers in CALL_INSN_FUNCTION_USAGE.\n\
+The default version of this hook is set to false. The purpose of this hook\n\
+is to enable the fipa-ra optimization.",
+ bool,
+ false)
+
+/* Fill in additional registers set up by prologue into a regset. */
+DEFHOOK
+(set_up_by_prologue,
+ "This hook should add additional registers that are computed by the prologue\n\
+to the hard regset for shrink-wrapping optimization purposes.",
+ void, (struct hard_reg_set_container *),
+ NULL)
+
+/* For targets that have attributes that can affect whether a
+ function's return statements need checking. For instance a 'naked'
+ function attribute. */
+DEFHOOK
+(warn_func_return,
+ "True if a function's return statements should be checked for matching\n\
+the function's return type. This includes checking for falling off the end\n\
+of a non-void function. Return false if no such check should be made.",
+ bool, (tree),
+ hook_bool_tree_true)
+
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_SHRINK_WRAP_"
+HOOK_VECTOR (TARGET_SHRINK_WRAP_HOOKS, shrink_wrap)
+
+DEFHOOK
+(get_separate_components,
+ "This hook should return an @code{sbitmap} with the bits set for those\n\
+components that can be separately shrink-wrapped in the current function.\n\
+Return @code{NULL} if the current function should not get any separate\n\
+shrink-wrapping.\n\
+Don't define this hook if it would always return @code{NULL}.\n\
+If it is defined, the other hooks in this group have to be defined as well.",
+ sbitmap, (void),
+ NULL)
+
+DEFHOOK
+(components_for_bb,
+ "This hook should return an @code{sbitmap} with the bits set for those\n\
+components where either the prologue component has to be executed before\n\
+the @code{basic_block}, or the epilogue component after it, or both.",
+ sbitmap, (basic_block),
+ NULL)
+
+DEFHOOK
+(disqualify_components,
+ "This hook should clear the bits in the @var{components} bitmap for those\n\
+components in @var{edge_components} that the target cannot handle on edge\n\
+@var{e}, where @var{is_prologue} says if this is for a prologue or an\n\
+epilogue instead.",
+ void, (sbitmap components, edge e, sbitmap edge_components, bool is_prologue),
+ NULL)
+
+DEFHOOK
+(emit_prologue_components,
+ "Emit prologue insns for the components indicated by the parameter.",
+ void, (sbitmap),
+ NULL)
+
+DEFHOOK
+(emit_epilogue_components,
+ "Emit epilogue insns for the components indicated by the parameter.",
+ void, (sbitmap),
+ NULL)
+
+DEFHOOK
+(set_handled_components,
+ "Mark the components in the parameter as handled, so that the\n\
+@code{prologue} and @code{epilogue} named patterns know to ignore those\n\
+components. The target code should not hang on to the @code{sbitmap}, it\n\
+will be deleted after this call.",
+ void, (sbitmap),
+ NULL)
+
+HOOK_VECTOR_END (shrink_wrap)
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_"
+
+/* Determine the type of unwind info to emit for debugging. */
+DEFHOOK
+(debug_unwind_info,
+ "This hook defines the mechanism that will be used for describing frame\n\
+unwind information to the debugger. Normally the hook will return\n\
+@code{UI_DWARF2} if DWARF 2 debug information is enabled, and\n\
+return @code{UI_NONE} otherwise.\n\
+\n\
+A target may return @code{UI_DWARF2} even when DWARF 2 debug information\n\
+is disabled in order to always output DWARF 2 frame information.\n\
+\n\
+A target may return @code{UI_TARGET} if it has ABI specified unwind tables.\n\
+This will suppress generation of the normal debug frame unwind information.",
+ enum unwind_info_type, (void),
+ default_debug_unwind_info)
+
+DEFHOOK
+(reset_location_view,
+ "This hook, if defined, enables -ginternal-reset-location-views, and\n\
+uses its result to override cases in which the estimated min insn\n\
+length might be nonzero even when a PC advance (i.e., a view reset)\n\
+cannot be taken for granted.\n\
+\n\
+If the hook is defined, it must return a positive value to indicate\n\
+the insn definitely advances the PC, and so the view number can be\n\
+safely assumed to be reset; a negative value to mean the insn\n\
+definitely does not advance the PC, and os the view number must not\n\
+be reset; or zero to decide based on the estimated insn length.\n\
+\n\
+If insn length is to be regarded as reliable, set the hook to\n\
+@code{hook_int_rtx_insn_0}.",
+ int, (rtx_insn *), NULL)
+
+/* The code parameter should be of type enum rtx_code but this is not
+ defined at this time. */
+DEFHOOK
+(canonicalize_comparison,
+ "On some machines not all possible comparisons are defined, but you can\n\
+convert an invalid comparison into a valid one. For example, the Alpha\n\
+does not have a @code{GT} comparison, but you can use an @code{LT}\n\
+comparison instead and swap the order of the operands.\n\
+\n\
+On such machines, implement this hook to do any required conversions.\n\
+@var{code} is the initial comparison code and @var{op0} and @var{op1}\n\
+are the left and right operands of the comparison, respectively. If\n\
+@var{op0_preserve_value} is @code{true} the implementation is not\n\
+allowed to change the value of @var{op0} since the value might be used\n\
+in RTXs which aren't comparisons. E.g. the implementation is not\n\
+allowed to swap operands in that case.\n\
+\n\
+GCC will not assume that the comparison resulting from this macro is\n\
+valid but will see if the resulting insn matches a pattern in the\n\
+@file{md} file.\n\
+\n\
+You need not to implement this hook if it would never change the\n\
+comparison code or operands.",
+ void, (int *code, rtx *op0, rtx *op1, bool op0_preserve_value),
+ default_canonicalize_comparison)
+
+DEFHOOK
+(min_arithmetic_precision,
+ "On some RISC architectures with 64-bit registers, the processor also\n\
+maintains 32-bit condition codes that make it possible to do real 32-bit\n\
+arithmetic, although the operations are performed on the full registers.\n\
+\n\
+On such architectures, defining this hook to 32 tells the compiler to try\n\
+using 32-bit arithmetical operations setting the condition codes instead\n\
+of doing full 64-bit arithmetic.\n\
+\n\
+More generally, define this hook on RISC architectures if you want the\n\
+compiler to try using arithmetical operations setting the condition codes\n\
+with a precision lower than the word precision.\n\
+\n\
+You need not define this hook if @code{WORD_REGISTER_OPERATIONS} is not\n\
+defined to 1.",
+ unsigned int, (void), default_min_arithmetic_precision)
+
+DEFHOOKPOD
+(atomic_test_and_set_trueval,
+ "This value should be set if the result written by\n\
+@code{atomic_test_and_set} is not exactly 1, i.e.@: the\n\
+@code{bool} @code{true}.",
+ unsigned char, 1)
+
+/* Return an unsigned int representing the alignment (in bits) of the atomic
+ type which maps to machine MODE. This allows alignment to be overridden
+ as needed. */
+DEFHOOK
+(atomic_align_for_mode,
+"If defined, this function returns an appropriate alignment in bits for an\n\
+atomic object of machine_mode @var{mode}. If 0 is returned then the\n\
+default alignment for the specified mode is used.",
+ unsigned int, (machine_mode mode),
+ hook_uint_mode_0)
+
+DEFHOOK
+(atomic_assign_expand_fenv,
+"ISO C11 requires atomic compound assignments that may raise floating-point\n\
+exceptions to raise exceptions corresponding to the arithmetic operation\n\
+whose result was successfully stored in a compare-and-exchange sequence.\n\
+This requires code equivalent to calls to @code{feholdexcept},\n\
+@code{feclearexcept} and @code{feupdateenv} to be generated at\n\
+appropriate points in the compare-and-exchange sequence. This hook should\n\
+set @code{*@var{hold}} to an expression equivalent to the call to\n\
+@code{feholdexcept}, @code{*@var{clear}} to an expression equivalent to\n\
+the call to @code{feclearexcept} and @code{*@var{update}} to an expression\n\
+equivalent to the call to @code{feupdateenv}. The three expressions are\n\
+@code{NULL_TREE} on entry to the hook and may be left as @code{NULL_TREE}\n\
+if no code is required in a particular place. The default implementation\n\
+leaves all three expressions as @code{NULL_TREE}. The\n\
+@code{__atomic_feraiseexcept} function from @code{libatomic} may be of use\n\
+as part of the code generated in @code{*@var{update}}.",
+ void, (tree *hold, tree *clear, tree *update),
+ default_atomic_assign_expand_fenv)
+
+/* Leave the boolean fields at the end. */
+
+/* True if we can create zeroed data by switching to a BSS section
+ and then using ASM_OUTPUT_SKIP to allocate the space. */
+DEFHOOKPOD
+(have_switchable_bss_sections,
+ "This flag is true if we can create zeroed data by switching to a BSS\n\
+section and then using @code{ASM_OUTPUT_SKIP} to allocate the space.\n\
+This is true on most ELF targets.",
+ bool, false)
+
+/* True if "native" constructors and destructors are supported,
+ false if we're using collect2 for the job. */
+DEFHOOKPOD
+(have_ctors_dtors,
+ "This value is true if the target supports some ``native'' method of\n\
+collecting constructors and destructors to be run at startup and exit.\n\
+It is false if we must use @command{collect2}.",
+ bool, false)
+
+/* True if the target wants DTORs to be run from cxa_atexit. */
+DEFHOOKPOD
+(dtors_from_cxa_atexit,
+ "This value is true if the target wants destructors to be queued to be\n\
+run from __cxa_atexit. If this is the case then, for each priority level,\n\
+a new constructor will be entered that registers the destructors for that\n\
+level with __cxa_atexit (and there will be no destructors emitted).\n\
+It is false the method implied by @code{have_ctors_dtors} is used.",
+ bool, false)
+
+/* True if thread-local storage is supported. */
+DEFHOOKPOD
+(have_tls,
+ "Contains the value true if the target supports thread-local storage.\n\
+The default value is false.",
+ bool, false)
+
+/* True if a small readonly data section is supported. */
+DEFHOOKPOD
+(have_srodata_section,
+ "Contains the value true if the target places read-only\n\
+``small data'' into a separate section. The default value is false.",
+ bool, false)
+
+/* True if EH frame info sections should be zero-terminated. */
+DEFHOOKPOD
+(terminate_dw2_eh_frame_info,
+ "Contains the value true if the target should add a zero word onto the\n\
+end of a Dwarf-2 frame info section when used for exception handling.\n\
+Default value is false if @code{EH_FRAME_SECTION_NAME} is defined, and\n\
+true otherwise.",
+ bool, true)
+
+/* True if #NO_APP should be emitted at the beginning of assembly output. */
+DEFHOOKPOD
+(asm_file_start_app_off,
+ "If this flag is true, the text of the macro @code{ASM_APP_OFF} will be\n\
+printed as the very first line in the assembly file, unless\n\
+@option{-fverbose-asm} is in effect. (If that macro has been defined\n\
+to the empty string, this variable has no effect.) With the normal\n\
+definition of @code{ASM_APP_OFF}, the effect is to notify the GNU\n\
+assembler that it need not bother stripping comments or extra\n\
+whitespace from its input. This allows it to work a bit faster.\n\
+\n\
+The default is false. You should not set it to true unless you have\n\
+verified that your port does not generate any extra whitespace or\n\
+comments that will cause GAS to issue errors in NO_APP mode.",
+ bool, false)
+
+/* True if output_file_directive should be called for main_input_filename
+ at the beginning of assembly output. */
+DEFHOOKPOD
+(asm_file_start_file_directive,
+ "If this flag is true, @code{output_file_directive} will be called\n\
+for the primary source file, immediately after printing\n\
+@code{ASM_APP_OFF} (if that is enabled). Most ELF assemblers expect\n\
+this to be done. The default is false.",
+ bool, false)
+
+/* Returns true if we should generate exception tables for use with the
+ ARM EABI. The effects the encoding of function exception specifications. */
+DEFHOOKPOD
+(arm_eabi_unwinder,
+ "This flag should be set to @code{true} on targets that use an ARM EABI\n\
+based unwinding library, and @code{false} on other targets. This effects\n\
+the format of unwinding tables, and how the unwinder in entered after\n\
+running a cleanup. The default is @code{false}.",
+ bool, false)
+
+DEFHOOKPOD
+(want_debug_pub_sections,
+ "True if the @code{.debug_pubtypes} and @code{.debug_pubnames} sections\n\
+should be emitted. These sections are not used on most platforms, and\n\
+in particular GDB does not use them.",
+ bool, false)
+
+DEFHOOKPOD
+(delay_sched2,
+ "True if sched2 is not to be run at its normal place.\n\
+This usually means it will be run as part of machine-specific reorg.",
+bool, false)
+
+DEFHOOKPOD
+(delay_vartrack,
+ "True if vartrack is not to be run at its normal place.\n\
+This usually means it will be run as part of machine-specific reorg.",
+bool, false)
+
+DEFHOOKPOD
+(no_register_allocation,
+ "True if register allocation and the passes\n\
+following it should not be run. Usually true only for virtual assembler\n\
+targets.",
+bool, false)
+
+/* Leave the boolean fields at the end. */
+
+/* Functions related to mode switching. */
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_MODE_"
+HOOK_VECTOR (TARGET_TOGGLE_, mode_switching)
+
+DEFHOOK
+(emit,
+ "Generate one or more insns to set @var{entity} to @var{mode}.\n\
+@var{hard_reg_live} is the set of hard registers live at the point where\n\
+the insn(s) are to be inserted. @var{prev_moxde} indicates the mode\n\
+to switch from. Sets of a lower numbered entity will be emitted before\n\
+sets of a higher numbered entity to a mode of the same or lower priority.",
+ void, (int entity, int mode, int prev_mode, HARD_REG_SET regs_live), NULL)
+
+DEFHOOK
+(needed,
+ "@var{entity} is an integer specifying a mode-switched entity.\n\
+If @code{OPTIMIZE_MODE_SWITCHING} is defined, you must define this macro\n\
+to return an integer value not larger than the corresponding element\n\
+in @code{NUM_MODES_FOR_MODE_SWITCHING}, to denote the mode that @var{entity}\n\
+must be switched into prior to the execution of @var{insn}.",
+ int, (int entity, rtx_insn *insn), NULL)
+
+DEFHOOK
+(after,
+ "@var{entity} is an integer specifying a mode-switched entity.\n\
+If this macro is defined, it is evaluated for every @var{insn} during mode\n\
+switching. It determines the mode that an insn results\n\
+in (if different from the incoming mode).",
+ int, (int entity, int mode, rtx_insn *insn), NULL)
+
+DEFHOOK
+(entry,
+ "If this macro is defined, it is evaluated for every @var{entity} that\n\
+needs mode switching. It should evaluate to an integer, which is a mode\n\
+that @var{entity} is assumed to be switched to at function entry.\n\
+If @code{TARGET_MODE_ENTRY} is defined then @code{TARGET_MODE_EXIT}\n\
+must be defined.",
+ int, (int entity), NULL)
+
+DEFHOOK
+(exit,
+ "If this macro is defined, it is evaluated for every @var{entity} that\n\
+needs mode switching. It should evaluate to an integer, which is a mode\n\
+that @var{entity} is assumed to be switched to at function exit.\n\
+If @code{TARGET_MODE_EXIT} is defined then @code{TARGET_MODE_ENTRY}\n\
+must be defined.",
+ int, (int entity), NULL)
+
+DEFHOOK
+(priority,
+ "This macro specifies the order in which modes for @var{entity}\n\
+are processed. 0 is the highest priority,\n\
+@code{NUM_MODES_FOR_MODE_SWITCHING[@var{entity}] - 1} the lowest.\n\
+The value of the macro should be an integer designating a mode\n\
+for @var{entity}. For any fixed @var{entity}, @code{mode_priority}\n\
+(@var{entity}, @var{n}) shall be a bijection in 0 @dots{}\n\
+@code{num_modes_for_mode_switching[@var{entity}] - 1}.",
+ int, (int entity, int n), NULL)
+
+HOOK_VECTOR_END (mode_switching)
+
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_MEMTAG_"
+HOOK_VECTOR (TARGET_MEMTAG_, memtag)
+
+DEFHOOK
+(can_tag_addresses,
+ "True if the backend architecture naturally supports ignoring some region\n\
+of pointers. This feature means that @option{-fsanitize=hwaddress} can\n\
+work.\n\
+\n\
+At preset, this feature does not support address spaces. It also requires\n\
+@code{Pmode} to be the same as @code{ptr_mode}.",
+ bool, (), default_memtag_can_tag_addresses)
+
+DEFHOOK
+(tag_size,
+ "Return the size of a tag (in bits) for this platform.\n\
+\n\
+The default returns 8.",
+ uint8_t, (), default_memtag_tag_size)
+
+DEFHOOK
+(granule_size,
+ "Return the size in real memory that each byte in shadow memory refers to.\n\
+I.e. if a variable is @var{X} bytes long in memory, then this hook should\n\
+return the value @var{Y} such that the tag in shadow memory spans\n\
+@var{X}/@var{Y} bytes.\n\
+\n\
+Most variables will need to be aligned to this amount since two variables\n\
+that are neighbors in memory and share a tag granule would need to share\n\
+the same tag.\n\
+\n\
+The default returns 16.",
+ uint8_t, (), default_memtag_granule_size)
+
+DEFHOOK
+(insert_random_tag,
+ "Return an RTX representing the value of @var{untagged} but with a\n\
+(possibly) random tag in it.\n\
+Put that value into @var{target} if it is convenient to do so.\n\
+This function is used to generate a tagged base for the current stack frame.",
+ rtx, (rtx untagged, rtx target), default_memtag_insert_random_tag)
+
+DEFHOOK
+(add_tag,
+ "Return an RTX that represents the result of adding @var{addr_offset} to\n\
+the address in pointer @var{base} and @var{tag_offset} to the tag in pointer\n\
+@var{base}.\n\
+The resulting RTX must either be a valid memory address or be able to get\n\
+put into an operand with @code{force_operand}.\n\
+\n\
+Unlike other memtag hooks, this must return an expression and not emit any\n\
+RTL.",
+ rtx, (rtx base, poly_int64 addr_offset, uint8_t tag_offset),
+ default_memtag_add_tag)
+
+DEFHOOK
+(set_tag,
+ "Return an RTX representing @var{untagged_base} but with the tag @var{tag}.\n\
+Try and store this in @var{target} if convenient.\n\
+@var{untagged_base} is required to have a zero tag when this hook is called.\n\
+The default of this hook is to set the top byte of @var{untagged_base} to\n\
+@var{tag}.",
+ rtx, (rtx untagged_base, rtx tag, rtx target), default_memtag_set_tag)
+
+DEFHOOK
+(extract_tag,
+ "Return an RTX representing the tag stored in @var{tagged_pointer}.\n\
+Store the result in @var{target} if it is convenient.\n\
+The default represents the top byte of the original pointer.",
+ rtx, (rtx tagged_pointer, rtx target), default_memtag_extract_tag)
+
+DEFHOOK
+(untagged_pointer,
+ "Return an RTX representing @var{tagged_pointer} with its tag set to zero.\n\
+Store the result in @var{target} if convenient.\n\
+The default clears the top byte of the original pointer.",
+ rtx, (rtx tagged_pointer, rtx target), default_memtag_untagged_pointer)
+
+HOOK_VECTOR_END (memtag)
+#undef HOOK_PREFIX
+#define HOOK_PREFIX "TARGET_"
+
+#define DEF_TARGET_INSN(NAME, PROTO) \
+ DEFHOOK_UNDOC (have_##NAME, "", bool, (void), false)
+#include "target-insns.def"
+#undef DEF_TARGET_INSN
+
+#define DEF_TARGET_INSN(NAME, PROTO) \
+ DEFHOOK_UNDOC (gen_##NAME, "", rtx_insn *, PROTO, NULL)
+#include "target-insns.def"
+#undef DEF_TARGET_INSN
+
+#define DEF_TARGET_INSN(NAME, PROTO) \
+ DEFHOOKPOD (code_for_##NAME, "*", enum insn_code, CODE_FOR_nothing)
+#include "target-insns.def"
+#undef DEF_TARGET_INSN
+
+DEFHOOK
+(run_target_selftests,
+ "If selftests are enabled, run any selftests for this target.",
+ void, (void),
+ NULL)
+
+DEFHOOK
+(gcov_type_size,
+ "Returns the gcov type size in bits. This type is used for example for\n\
+counters incremented by profiling and code-coverage events. The default\n\
+value is 64, if the type size of long long is greater than 32, otherwise the\n\
+default value is 32. A 64-bit type is recommended to avoid overflows of the\n\
+counters. If the @option{-fprofile-update=atomic} is used, then the\n\
+counters are incremented using atomic operations. Targets not supporting\n\
+64-bit atomic operations may override the default value and request a 32-bit\n\
+type.",
+ HOST_WIDE_INT, (void), default_gcov_type_size)
+
+/* This value represents whether the shadow call stack is implemented on
+ the target platform. */
+DEFHOOKPOD
+(have_shadow_call_stack,
+ "This value is true if the target platform supports\n\
+@option{-fsanitize=shadow-call-stack}. The default value is false.",
+ bool, false)
+
+/* Close the 'struct gcc_target' definition. */
+HOOK_VECTOR_END (C90_EMPTY_HACK)
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.h
new file mode 100644
index 0000000..cd448e4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/target.h
@@ -0,0 +1,321 @@
+/* Data structure definitions for a generic GCC target.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>.
+
+ In other words, you are welcome to use, share and improve this program.
+ You are forbidden to forbid anyone else to use, share and improve
+ what you give them. Help stamp out software-hoarding! */
+
+
+/* This file contains a data structure that describes a GCC target.
+ At present it is incomplete, but in future it should grow to
+ contain most or all target machine and target O/S specific
+ information.
+
+ This structure has its initializer declared in target-def.h in the
+ form of large macro TARGET_INITIALIZER that expands to many smaller
+ macros.
+
+ The smaller macros each initialize one component of the structure,
+ and each has a default. Each target should have a file that
+ includes target.h and target-def.h, and overrides any inappropriate
+ defaults by undefining the relevant macro and defining a suitable
+ replacement. That file should then contain the definition of
+ "targetm" like so:
+
+ struct gcc_target targetm = TARGET_INITIALIZER;
+
+ Doing things this way allows us to bring together everything that
+ defines a GCC target. By supplying a default that is appropriate
+ to most targets, we can easily add new items without needing to
+ edit dozens of target configuration files. It should also allow us
+ to gradually reduce the amount of conditional compilation that is
+ scattered throughout GCC. */
+
+#ifndef GCC_TARGET_H
+#define GCC_TARGET_H
+
+#include "insn-codes.h"
+#include "tm.h"
+#include "hard-reg-set.h"
+
+#if CHECKING_P
+
+struct cumulative_args_t { void *magic; void *p; };
+
+#else /* !CHECKING_P */
+
+/* When using a GCC build compiler, we could use
+ __attribute__((transparent_union)) to get cumulative_args_t function
+ arguments passed like scalars where the ABI would mandate a less
+ efficient way of argument passing otherwise. However, that would come
+ at the cost of less type-safe !CHECKING_P compilation. */
+
+union cumulative_args_t { void *p; };
+
+#endif /* !CHECKING_P */
+
+/* Types of memory operation understood by the "by_pieces" infrastructure.
+ Used by the TARGET_USE_BY_PIECES_INFRASTRUCTURE_P target hook and
+ internally by the functions in expr.cc. */
+
+enum by_pieces_operation
+{
+ CLEAR_BY_PIECES,
+ MOVE_BY_PIECES,
+ SET_BY_PIECES,
+ STORE_BY_PIECES,
+ COMPARE_BY_PIECES
+};
+
+extern unsigned HOST_WIDE_INT by_pieces_ninsns (unsigned HOST_WIDE_INT,
+ unsigned int,
+ unsigned int,
+ by_pieces_operation);
+
+/* An example implementation for ELF targets. Defined in varasm.cc */
+extern void elf_record_gcc_switches (const char *);
+
+/* Some places still assume that all pointer or address modes are the
+ standard Pmode and ptr_mode. These optimizations become invalid if
+ the target actually supports multiple different modes. For now,
+ we disable such optimizations on such targets, using this function. */
+extern bool target_default_pointer_address_modes_p (void);
+
+/* For hooks which use the MOVE_RATIO macro, this gives the legacy default
+ behavior. */
+extern unsigned int get_move_ratio (bool);
+
+struct stdarg_info;
+struct spec_info_def;
+struct hard_reg_set_container;
+struct cgraph_node;
+struct cgraph_simd_clone;
+
+/* The struct used by the secondary_reload target hook. */
+struct secondary_reload_info
+{
+ /* icode is actually an enum insn_code, but we don't want to force every
+ file that includes target.h to include optabs.h . */
+ int icode;
+ int extra_cost; /* Cost for using (a) scratch register(s) to be taken
+ into account by copy_cost. */
+ /* The next two members are for the use of the backward
+ compatibility hook. */
+ struct secondary_reload_info *prev_sri;
+ int t_icode; /* Actually an enum insn_code - see above. */
+};
+
+/* This is defined in sched-int.h . */
+struct _dep;
+
+/* This is defined in ddg.h . */
+struct ddg;
+
+/* This is defined in cfgloop.h . */
+class loop;
+
+/* This is defined in ifcvt.h. */
+struct noce_if_info;
+
+/* This is defined in tree-ssa-alias.h. */
+class ao_ref;
+
+/* This is defined in tree-vectorizer.h. */
+class _stmt_vec_info;
+
+/* This is defined in calls.h. */
+class function_arg_info;
+
+/* This is defined in function-abi.h. */
+class predefined_function_abi;
+
+/* These are defined in tree-vect-stmts.cc. */
+extern tree stmt_vectype (class _stmt_vec_info *);
+extern bool stmt_in_inner_loop_p (class vec_info *, class _stmt_vec_info *);
+
+/* Assembler instructions for creating various kinds of integer object. */
+
+struct asm_int_op
+{
+ const char *hi;
+ const char *psi;
+ const char *si;
+ const char *pdi;
+ const char *di;
+ const char *pti;
+ const char *ti;
+};
+
+/* Types of costs for vectorizer cost model. */
+enum vect_cost_for_stmt
+{
+ scalar_stmt,
+ scalar_load,
+ scalar_store,
+ vector_stmt,
+ vector_load,
+ vector_gather_load,
+ unaligned_load,
+ unaligned_store,
+ vector_store,
+ vector_scatter_store,
+ vec_to_scalar,
+ scalar_to_vec,
+ cond_branch_not_taken,
+ cond_branch_taken,
+ vec_perm,
+ vec_promote_demote,
+ vec_construct
+};
+
+/* Separate locations for which the vectorizer cost model should
+ track costs. */
+enum vect_cost_model_location {
+ vect_prologue = 0,
+ vect_body = 1,
+ vect_epilogue = 2
+};
+
+class vec_perm_indices;
+
+/* The type to use for lists of vector sizes. */
+typedef vec<machine_mode> vector_modes;
+
+/* Same, but can be used to construct local lists that are
+ automatically freed. */
+typedef auto_vec<machine_mode, 8> auto_vector_modes;
+
+/* First argument of targetm.omp.device_kind_arch_isa. */
+enum omp_device_kind_arch_isa {
+ omp_device_kind,
+ omp_device_arch,
+ omp_device_isa
+};
+
+/* Flags returned by TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES:
+
+ VECT_COMPARE_COSTS
+ Tells the loop vectorizer to try all the provided modes and
+ pick the one with the lowest cost. By default the vectorizer
+ will choose the first mode that works. */
+const unsigned int VECT_COMPARE_COSTS = 1U << 0;
+
+/* The contexts in which the use of a type T can be checked by
+ TARGET_VERIFY_TYPE_CONTEXT. */
+enum type_context_kind {
+ /* Directly measuring the size of T. */
+ TCTX_SIZEOF,
+
+ /* Directly measuring the alignment of T. */
+ TCTX_ALIGNOF,
+
+ /* Creating objects of type T with static storage duration. */
+ TCTX_STATIC_STORAGE,
+
+ /* Creating objects of type T with thread-local storage duration. */
+ TCTX_THREAD_STORAGE,
+
+ /* Creating a field of type T. */
+ TCTX_FIELD,
+
+ /* Creating an array with elements of type T. */
+ TCTX_ARRAY_ELEMENT,
+
+ /* Adding to or subtracting from a pointer to T, or computing the
+ difference between two pointers when one of them is a pointer to T. */
+ TCTX_POINTER_ARITH,
+
+ /* Dynamically allocating objects of type T. */
+ TCTX_ALLOCATION,
+
+ /* Dynamically deallocating objects of type T. */
+ TCTX_DEALLOCATION,
+
+ /* Throwing or catching an object of type T. */
+ TCTX_EXCEPTIONS,
+
+ /* Capturing objects of type T by value in a closure. */
+ TCTX_CAPTURE_BY_COPY
+};
+
+enum poly_value_estimate_kind
+{
+ POLY_VALUE_MIN,
+ POLY_VALUE_MAX,
+ POLY_VALUE_LIKELY
+};
+
+typedef void (*emit_support_tinfos_callback) (tree);
+
+extern bool verify_type_context (location_t, type_context_kind, const_tree,
+ bool = false);
+
+/* The target structure. This holds all the backend hooks. */
+#define DEFHOOKPOD(NAME, DOC, TYPE, INIT) TYPE NAME;
+#define DEFHOOK(NAME, DOC, TYPE, PARAMS, INIT) TYPE (* NAME) PARAMS;
+#define DEFHOOK_UNDOC DEFHOOK
+#define HOOKSTRUCT(FRAGMENT) FRAGMENT
+
+#include "target.def"
+
+extern struct gcc_target targetm;
+
+/* Return an estimate of the runtime value of X, for use in things
+ like cost calculations or profiling frequencies. Note that this
+ function should never be used in situations where the actual
+ runtime value is needed for correctness, since the function only
+ provides a rough guess. */
+
+inline HOST_WIDE_INT
+estimated_poly_value (poly_int64 x,
+ poly_value_estimate_kind kind = POLY_VALUE_LIKELY)
+{
+ if (NUM_POLY_INT_COEFFS == 1)
+ return x.coeffs[0];
+ else
+ return targetm.estimated_poly_value (x, kind);
+}
+
+#ifdef GCC_TM_H
+
+#ifndef CUMULATIVE_ARGS_MAGIC
+#define CUMULATIVE_ARGS_MAGIC ((void *) &targetm.calls)
+#endif
+
+inline CUMULATIVE_ARGS *
+get_cumulative_args (cumulative_args_t arg)
+{
+#if CHECKING_P
+ gcc_assert (arg.magic == CUMULATIVE_ARGS_MAGIC);
+#endif /* CHECKING_P */
+ return (CUMULATIVE_ARGS *) arg.p;
+}
+
+inline cumulative_args_t
+pack_cumulative_args (CUMULATIVE_ARGS *arg)
+{
+ cumulative_args_t ret;
+
+#if CHECKING_P
+ ret.magic = CUMULATIVE_ARGS_MAGIC;
+#endif /* CHECKING_P */
+ ret.p = (void *) arg;
+ return ret;
+}
+#endif /* GCC_TM_H */
+
+#endif /* GCC_TARGET_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/targhooks.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/targhooks.h
new file mode 100644
index 0000000..cf3d310
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/targhooks.h
@@ -0,0 +1,303 @@
+/* Default target hook functions.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TARGHOOKS_H
+#define GCC_TARGHOOKS_H
+
+extern bool default_legitimate_address_p (machine_mode, rtx, bool);
+
+extern void default_external_libcall (rtx);
+extern rtx default_legitimize_address (rtx, rtx, machine_mode);
+extern bool default_legitimize_address_displacement (rtx *, rtx *,
+ poly_int64, machine_mode);
+extern bool default_const_not_ok_for_debug_p (rtx);
+
+extern int default_unspec_may_trap_p (const_rtx, unsigned);
+extern machine_mode default_promote_function_mode (const_tree, machine_mode,
+ int *, const_tree, int);
+extern machine_mode default_promote_function_mode_always_promote
+ (const_tree, machine_mode, int *, const_tree, int);
+
+extern machine_mode default_cc_modes_compatible (machine_mode,
+ machine_mode);
+
+extern bool default_return_in_memory (const_tree, const_tree);
+
+extern rtx default_expand_builtin_saveregs (void);
+extern void default_setup_incoming_varargs (cumulative_args_t,
+ const function_arg_info &,
+ int *, int);
+extern rtx default_builtin_setjmp_frame_value (void);
+extern bool default_pretend_outgoing_varargs_named (cumulative_args_t);
+
+extern scalar_int_mode default_eh_return_filter_mode (void);
+extern scalar_int_mode default_libgcc_cmp_return_mode (void);
+extern scalar_int_mode default_libgcc_shift_count_mode (void);
+extern scalar_int_mode default_unwind_word_mode (void);
+extern unsigned HOST_WIDE_INT default_shift_truncation_mask
+ (machine_mode);
+extern unsigned int default_min_divisions_for_recip_mul (machine_mode);
+extern bool default_preferred_div_as_shifts_over_mult
+ (const_tree);
+extern int default_mode_rep_extended (scalar_int_mode, scalar_int_mode);
+
+extern tree default_stack_protect_guard (void);
+extern tree default_external_stack_protect_fail (void);
+extern tree default_hidden_stack_protect_fail (void);
+
+extern machine_mode default_mode_for_suffix (char);
+
+extern tree default_cxx_guard_type (void);
+extern tree default_cxx_get_cookie_size (tree);
+
+extern bool hook_pass_by_reference_must_pass_in_stack
+ (cumulative_args_t, const function_arg_info &);
+extern bool hook_callee_copies_named
+ (cumulative_args_t ca, const function_arg_info &);
+
+extern void default_print_operand (FILE *, rtx, int);
+extern void default_print_operand_address (FILE *, machine_mode, rtx);
+extern bool default_print_operand_punct_valid_p (unsigned char);
+extern tree default_mangle_assembler_name (const char *);
+
+extern machine_mode default_translate_mode_attribute (machine_mode);
+extern bool default_scalar_mode_supported_p (scalar_mode);
+extern bool default_libgcc_floating_mode_supported_p (scalar_float_mode);
+extern opt_scalar_float_mode default_floatn_mode (int, bool);
+extern bool default_floatn_builtin_p (int);
+extern bool targhook_words_big_endian (void);
+extern bool targhook_float_words_big_endian (void);
+extern bool default_float_exceptions_rounding_supported_p (void);
+extern bool default_decimal_float_supported_p (void);
+extern bool default_fixed_point_supported_p (void);
+
+extern bool default_has_ifunc_p (void);
+
+extern bool default_predict_doloop_p (class loop *);
+extern machine_mode default_preferred_doloop_mode (machine_mode);
+extern const char * default_invalid_within_doloop (const rtx_insn *);
+
+extern tree default_builtin_vectorized_function (unsigned int, tree, tree);
+extern tree default_builtin_md_vectorized_function (tree, tree, tree);
+
+extern int default_builtin_vectorization_cost (enum vect_cost_for_stmt, tree, int);
+
+extern tree default_builtin_reciprocal (tree);
+
+extern void default_emit_support_tinfos (emit_support_tinfos_callback);
+
+extern HOST_WIDE_INT default_static_rtx_alignment (machine_mode);
+extern HOST_WIDE_INT default_constant_alignment (const_tree, HOST_WIDE_INT);
+extern HOST_WIDE_INT constant_alignment_word_strings (const_tree,
+ HOST_WIDE_INT);
+extern HOST_WIDE_INT default_vector_alignment (const_tree);
+
+extern poly_uint64 default_preferred_vector_alignment (const_tree);
+extern bool default_builtin_vector_alignment_reachable (const_tree, bool);
+extern bool
+default_builtin_support_vector_misalignment (machine_mode mode,
+ const_tree,
+ int, bool);
+extern machine_mode default_preferred_simd_mode (scalar_mode mode);
+extern machine_mode default_split_reduction (machine_mode);
+extern unsigned int default_autovectorize_vector_modes (vector_modes *, bool);
+extern opt_machine_mode default_vectorize_related_mode (machine_mode,
+ scalar_mode,
+ poly_uint64);
+extern opt_machine_mode default_get_mask_mode (machine_mode);
+extern bool default_empty_mask_is_expensive (unsigned);
+extern vector_costs *default_vectorize_create_costs (vec_info *, bool);
+
+/* OpenACC hooks. */
+extern bool default_goacc_validate_dims (tree, int [], int, unsigned);
+extern int default_goacc_dim_limit (int);
+extern bool default_goacc_fork_join (gcall *, const int [], bool);
+extern void default_goacc_reduction (gcall *);
+
+/* These are here, and not in hooks.[ch], because not all users of
+ hooks.h include tm.h, and thus we don't have CUMULATIVE_ARGS. */
+
+extern bool hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t);
+extern bool hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t);
+
+extern bool hook_bool_CUMULATIVE_ARGS_arg_info_false
+ (cumulative_args_t, const function_arg_info &);
+extern bool hook_bool_CUMULATIVE_ARGS_arg_info_true
+ (cumulative_args_t, const function_arg_info &);
+extern int hook_int_CUMULATIVE_ARGS_arg_info_0
+ (cumulative_args_t, const function_arg_info &);
+extern void hook_void_CUMULATIVE_ARGS_tree
+ (cumulative_args_t, tree);
+extern const char *hook_invalid_arg_for_unprototyped_fn
+ (const_tree, const_tree, const_tree);
+extern void default_function_arg_advance
+ (cumulative_args_t, const function_arg_info &);
+extern bool default_push_argument (unsigned int);
+extern HOST_WIDE_INT default_function_arg_offset (machine_mode, const_tree);
+extern pad_direction default_function_arg_padding (machine_mode, const_tree);
+extern rtx default_function_arg (cumulative_args_t, const function_arg_info &);
+extern rtx default_function_incoming_arg (cumulative_args_t,
+ const function_arg_info &);
+extern unsigned int default_function_arg_boundary (machine_mode,
+ const_tree);
+extern unsigned int default_function_arg_round_boundary (machine_mode,
+ const_tree);
+extern bool hook_bool_const_rtx_commutative_p (const_rtx, int);
+extern rtx default_function_value (const_tree, const_tree, bool);
+extern HARD_REG_SET default_zero_call_used_regs (HARD_REG_SET);
+extern rtx default_libcall_value (machine_mode, const_rtx);
+extern bool default_function_value_regno_p (const unsigned int);
+extern rtx default_internal_arg_pointer (void);
+extern rtx default_static_chain (const_tree, bool);
+extern void default_trampoline_init (rtx, tree, rtx);
+extern void default_emit_call_builtin___clear_cache (rtx, rtx);
+extern poly_int64 default_return_pops_args (tree, tree, poly_int64);
+extern reg_class_t default_ira_change_pseudo_allocno_class (int, reg_class_t,
+ reg_class_t);
+extern bool default_lra_p (void);
+extern int default_register_priority (int);
+extern bool default_register_usage_leveling_p (void);
+extern bool default_different_addr_displacement_p (void);
+extern reg_class_t default_secondary_reload (bool, rtx, reg_class_t,
+ machine_mode,
+ secondary_reload_info *);
+extern machine_mode default_secondary_memory_needed_mode (machine_mode);
+extern void default_target_option_override (void);
+extern void hook_void_bitmap (bitmap);
+extern int default_reloc_rw_mask (void);
+extern bool default_generate_pic_addr_diff_vec (void);
+extern void default_asm_out_constructor (rtx, int);
+extern void default_asm_out_destructor (rtx, int);
+extern tree default_mangle_decl_assembler_name (tree, tree);
+extern tree default_emutls_var_fields (tree, tree *);
+extern tree default_emutls_var_init (tree, tree, tree);
+extern unsigned int default_hard_regno_nregs (unsigned int, machine_mode);
+extern bool default_hard_regno_scratch_ok (unsigned int);
+extern bool default_mode_dependent_address_p (const_rtx, addr_space_t);
+extern bool default_new_address_profitable_p (rtx, rtx_insn *, rtx);
+extern bool default_target_option_valid_attribute_p (tree, tree, tree, int);
+extern bool default_target_option_pragma_parse (tree, tree);
+extern bool default_target_can_inline_p (tree, tree);
+extern bool default_update_ipa_fn_target_info (unsigned int &, const gimple *);
+extern bool default_need_ipa_fn_target_info (const_tree, unsigned int &);
+extern bool default_valid_pointer_mode (scalar_int_mode);
+extern bool default_ref_may_alias_errno (class ao_ref *);
+extern scalar_int_mode default_addr_space_pointer_mode (addr_space_t);
+extern scalar_int_mode default_addr_space_address_mode (addr_space_t);
+extern bool default_addr_space_valid_pointer_mode (scalar_int_mode,
+ addr_space_t);
+extern bool default_addr_space_legitimate_address_p (machine_mode, rtx,
+ bool, addr_space_t);
+extern rtx default_addr_space_legitimize_address (rtx, rtx, machine_mode,
+ addr_space_t);
+extern bool default_addr_space_subset_p (addr_space_t, addr_space_t);
+extern bool default_addr_space_zero_address_valid (addr_space_t);
+extern int default_addr_space_debug (addr_space_t);
+extern void default_addr_space_diagnose_usage (addr_space_t, location_t);
+extern rtx default_addr_space_convert (rtx, tree, tree);
+extern unsigned int default_case_values_threshold (void);
+extern bool default_have_conditional_execution (void);
+
+extern bool default_libc_has_function (enum function_class, tree);
+extern bool default_libc_has_fast_function (int fcode);
+extern bool no_c99_libc_has_function (enum function_class, tree);
+extern bool gnu_libc_has_function (enum function_class, tree);
+extern bool bsd_libc_has_function (enum function_class, tree);
+
+extern tree default_builtin_tm_load_store (tree);
+
+extern int default_memory_move_cost (machine_mode, reg_class_t, bool);
+extern int default_register_move_cost (machine_mode, reg_class_t,
+ reg_class_t);
+extern bool default_slow_unaligned_access (machine_mode, unsigned int);
+extern HOST_WIDE_INT default_estimated_poly_value (poly_int64,
+ poly_value_estimate_kind);
+
+extern bool default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT,
+ unsigned int,
+ enum by_pieces_operation,
+ bool);
+extern int default_compare_by_pieces_branch_ratio (machine_mode);
+
+extern void default_print_patchable_function_entry (FILE *,
+ unsigned HOST_WIDE_INT,
+ bool);
+extern bool default_profile_before_prologue (void);
+extern reg_class_t default_preferred_reload_class (rtx, reg_class_t);
+extern reg_class_t default_preferred_output_reload_class (rtx, reg_class_t);
+extern reg_class_t default_preferred_rename_class (reg_class_t rclass);
+extern bool default_class_likely_spilled_p (reg_class_t);
+extern unsigned char default_class_max_nregs (reg_class_t, machine_mode);
+
+extern enum unwind_info_type default_debug_unwind_info (void);
+
+extern void default_canonicalize_comparison (int *, rtx *, rtx *, bool);
+
+extern section * default_function_section(tree decl, enum node_frequency freq,
+ bool startup, bool exit);
+extern unsigned int default_dwarf_poly_indeterminate_value (unsigned int,
+ unsigned int *,
+ int *);
+extern machine_mode default_dwarf_frame_reg_mode (int);
+extern fixed_size_mode default_get_reg_raw_mode (int);
+extern bool default_keep_leaf_when_profiled ();
+
+extern void *default_get_pch_validity (size_t *);
+extern const char *default_pch_valid_p (const void *, size_t);
+
+extern void default_asm_output_ident_directive (const char*);
+
+extern scalar_int_mode default_cstore_mode (enum insn_code);
+extern bool default_member_type_forces_blk (const_tree, machine_mode);
+extern void default_atomic_assign_expand_fenv (tree *, tree *, tree *);
+extern tree build_va_arg_indirect_ref (tree);
+extern tree std_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
+extern bool can_use_doloop_if_innermost (const widest_int &,
+ const widest_int &,
+ unsigned int, bool);
+
+extern bool default_optab_supported_p (int, machine_mode, machine_mode,
+ optimization_type);
+extern unsigned int default_max_noce_ifcvt_seq_cost (edge);
+extern bool default_noce_conversion_profitable_p (rtx_insn *,
+ struct noce_if_info *);
+extern unsigned int default_min_arithmetic_precision (void);
+
+extern enum flt_eval_method
+default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED);
+extern HOST_WIDE_INT default_stack_clash_protection_alloca_probe_range (void);
+extern void default_select_early_remat_modes (sbitmap);
+extern tree default_preferred_else_value (unsigned, tree, unsigned, tree *);
+
+extern bool default_have_speculation_safe_value (bool);
+extern bool speculation_safe_value_not_needed (bool);
+extern rtx default_speculation_safe_value (machine_mode, rtx, rtx, rtx);
+
+extern bool default_memtag_can_tag_addresses ();
+extern uint8_t default_memtag_tag_size ();
+extern uint8_t default_memtag_granule_size ();
+extern rtx default_memtag_insert_random_tag (rtx, rtx);
+extern rtx default_memtag_add_tag (rtx, poly_int64, uint8_t);
+extern rtx default_memtag_set_tag (rtx, rtx, rtx);
+extern rtx default_memtag_extract_tag (rtx, rtx);
+extern rtx default_memtag_untagged_pointer (rtx, rtx);
+
+extern HOST_WIDE_INT default_gcov_type_size (void);
+
+#endif /* GCC_TARGHOOKS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.def
new file mode 100644
index 0000000..9523598
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.def
@@ -0,0 +1,346 @@
+/* This file contains the definitions for timing variables used to
+ measure run-time performance of the compiler.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+ Contributed by Alex Samuel <samuel@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file contains timing variable definitions, used by timevar.h
+ and timevar.cc.
+
+ Syntax:
+
+ DEFTIMEVAR (id, name)
+
+ where ID is the enumeral value used to identify the timing
+ variable, and NAME is a character string describing its purpose. */
+
+/* The total execution time. */
+DEFTIMEVAR (TV_TOTAL , "total time")
+/* The compiler phases.
+
+ These must be mutually exclusive, and the NAME field must begin
+ with "phase".
+
+ Also, their sum must be within a millionth of the total time (see
+ validate_phases). */
+DEFTIMEVAR (TV_PHASE_SETUP , "phase setup")
+DEFTIMEVAR (TV_PHASE_PARSING , "phase parsing")
+DEFTIMEVAR (TV_PHASE_DEFERRED , "phase lang. deferred")
+DEFTIMEVAR (TV_PHASE_LATE_PARSING_CLEANUPS, "phase late parsing cleanups")
+DEFTIMEVAR (TV_PHASE_OPT_GEN , "phase opt and generate")
+DEFTIMEVAR (TV_PHASE_LATE_ASM , "phase last asm")
+DEFTIMEVAR (TV_PHASE_STREAM_IN , "phase stream in")
+DEFTIMEVAR (TV_PHASE_STREAM_OUT , "phase stream out")
+DEFTIMEVAR (TV_PHASE_FINALIZE , "phase finalize")
+
+/* Concurrent timers, indicated by "|". */
+DEFTIMEVAR (TV_NAME_LOOKUP , "|name lookup")
+DEFTIMEVAR (TV_OVERLOAD , "|overload resolution")
+
+/* Time spent garbage-collecting. */
+DEFTIMEVAR (TV_GC , "garbage collection")
+
+/* Time spent generating dump files. */
+DEFTIMEVAR (TV_DUMP , "dump files")
+
+/* Time spent saving/restoring PCH state. */
+DEFTIMEVAR (TV_PCH_SAVE , "PCH main state save")
+DEFTIMEVAR (TV_PCH_CPP_SAVE , "PCH preprocessor state save")
+DEFTIMEVAR (TV_PCH_PTR_REALLOC , "PCH pointer reallocation")
+DEFTIMEVAR (TV_PCH_PTR_SORT , "PCH pointer sort")
+DEFTIMEVAR (TV_PCH_RESTORE , "PCH main state restore")
+DEFTIMEVAR (TV_PCH_CPP_RESTORE , "PCH preprocessor state restore")
+
+DEFTIMEVAR (TV_CGRAPH , "callgraph construction")
+DEFTIMEVAR (TV_CGRAPHOPT , "callgraph optimization")
+DEFTIMEVAR (TV_CGRAPH_FUNC_EXPANSION , "callgraph functions expansion")
+DEFTIMEVAR (TV_CGRAPH_IPA_PASSES , "callgraph ipa passes")
+DEFTIMEVAR (TV_IPA_ODR , "ipa ODR types")
+DEFTIMEVAR (TV_IPA_FNSUMMARY , "ipa function summary")
+DEFTIMEVAR (TV_IPA_UNREACHABLE , "ipa dead code removal")
+DEFTIMEVAR (TV_IPA_INHERITANCE , "ipa inheritance graph")
+DEFTIMEVAR (TV_IPA_VIRTUAL_CALL , "ipa virtual call target")
+DEFTIMEVAR (TV_IPA_DEVIRT , "ipa devirtualization")
+DEFTIMEVAR (TV_IPA_CONSTANT_PROP , "ipa cp")
+DEFTIMEVAR (TV_IPA_INLINING , "ipa inlining heuristics")
+DEFTIMEVAR (TV_IPA_FNSPLIT , "ipa function splitting")
+DEFTIMEVAR (TV_IPA_COMDATS , "ipa comdats")
+DEFTIMEVAR (TV_IPA_OPT , "ipa various optimizations")
+DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS , "lto stream decompression")
+DEFTIMEVAR (TV_IPA_LTO_COMPRESS , "lto stream compression")
+DEFTIMEVAR (TV_IPA_LTO_OUTPUT , "lto stream output")
+DEFTIMEVAR (TV_IPA_LTO_GIMPLE_IN , "ipa lto gimple in")
+DEFTIMEVAR (TV_IPA_LTO_GIMPLE_OUT , "ipa lto gimple out")
+DEFTIMEVAR (TV_IPA_LTO_DECL_IN , "ipa lto decl in")
+DEFTIMEVAR (TV_IPA_LTO_DECL_OUT , "ipa lto decl out")
+DEFTIMEVAR (TV_IPA_LTO_CTORS_IN , "ipa lto constructors in")
+DEFTIMEVAR (TV_IPA_LTO_CTORS_OUT , "ipa lto constructors out")
+DEFTIMEVAR (TV_IPA_LTO_CGRAPH_IO , "ipa lto cgraph I/O")
+DEFTIMEVAR (TV_IPA_LTO_DECL_MERGE , "ipa lto decl merge")
+DEFTIMEVAR (TV_IPA_LTO_CGRAPH_MERGE , "ipa lto cgraph merge")
+DEFTIMEVAR (TV_LTO , "lto")
+DEFTIMEVAR (TV_WHOPR_WPA , "whopr wpa")
+DEFTIMEVAR (TV_WHOPR_WPA_IO , "whopr wpa I/O")
+DEFTIMEVAR (TV_WHOPR_PARTITIONING , "whopr partitioning")
+DEFTIMEVAR (TV_WHOPR_LTRANS , "whopr ltrans")
+DEFTIMEVAR (TV_IPA_REFERENCE , "ipa reference")
+DEFTIMEVAR (TV_IPA_PROFILE , "ipa profile")
+DEFTIMEVAR (TV_IPA_AUTOFDO , "auto profile")
+DEFTIMEVAR (TV_IPA_PURE_CONST , "ipa pure const")
+DEFTIMEVAR (TV_IPA_ICF , "ipa icf")
+DEFTIMEVAR (TV_IPA_PTA , "ipa points-to")
+DEFTIMEVAR (TV_IPA_SRA , "ipa SRA")
+DEFTIMEVAR (TV_IPA_FREE_LANG_DATA , "ipa free lang data")
+DEFTIMEVAR (TV_IPA_FREE_INLINE_SUMMARY, "ipa free inline summary")
+DEFTIMEVAR (TV_IPA_MODREF , "ipa modref")
+/* Time spent by constructing CFG. */
+DEFTIMEVAR (TV_CFG , "cfg construction")
+/* Time spent by cleaning up CFG. */
+DEFTIMEVAR (TV_CLEANUP_CFG , "cfg cleanup")
+DEFTIMEVAR (TV_CFG_VERIFY , "CFG verifier")
+DEFTIMEVAR (TV_DELETE_TRIVIALLY_DEAD , "trivially dead code")
+
+/* Time spent in dataflow problems. */
+DEFTIMEVAR (TV_DF_SCAN , "df scan insns")
+DEFTIMEVAR (TV_DF_MD , "df multiple defs")
+DEFTIMEVAR (TV_DF_RD , "df reaching defs")
+DEFTIMEVAR (TV_DF_LR , "df live regs")
+DEFTIMEVAR (TV_DF_LIVE , "df live&initialized regs")
+DEFTIMEVAR (TV_DF_MIR , "df must-initialized regs")
+DEFTIMEVAR (TV_DF_CHAIN , "df use-def / def-use chains")
+DEFTIMEVAR (TV_DF_WORD_LR , "df live reg subwords")
+DEFTIMEVAR (TV_DF_NOTE , "df reg dead/unused notes")
+DEFTIMEVAR (TV_REG_STATS , "register information")
+
+DEFTIMEVAR (TV_ALIAS_ANALYSIS , "alias analysis")
+DEFTIMEVAR (TV_ALIAS_STMT_WALK , "alias stmt walking")
+DEFTIMEVAR (TV_REG_SCAN , "register scan")
+DEFTIMEVAR (TV_REBUILD_JUMP , "rebuild jump labels")
+/* Timing in various stages of the compiler. */
+DEFTIMEVAR (TV_CPP , "preprocessing")
+DEFTIMEVAR (TV_LEX , "lexical analysis")
+DEFTIMEVAR (TV_PARSE_GLOBAL , "parser (global)")
+DEFTIMEVAR (TV_PARSE_STRUCT , "parser struct body")
+DEFTIMEVAR (TV_PARSE_ENUM , "parser enumerator list")
+DEFTIMEVAR (TV_PARSE_FUNC , "parser function body")
+DEFTIMEVAR (TV_PARSE_INLINE , "parser inl. func. body")
+DEFTIMEVAR (TV_PARSE_INMETH , "parser inl. meth. body")
+DEFTIMEVAR (TV_TEMPLATE_INST , "template instantiation")
+DEFTIMEVAR (TV_CONSTEXPR , "constant expression evaluation")
+DEFTIMEVAR (TV_CONSTRAINT_NORM , "constraint normalization")
+DEFTIMEVAR (TV_CONSTRAINT_SAT , "constraint satisfaction")
+DEFTIMEVAR (TV_CONSTRAINT_SUB , "constraint subsumption")
+DEFTIMEVAR (TV_MODULE_IMPORT , "module import")
+DEFTIMEVAR (TV_MODULE_EXPORT , "module export")
+DEFTIMEVAR (TV_MODULE_MAPPER , "module mapper")
+DEFTIMEVAR (TV_FLATTEN_INLINING , "flatten inlining")
+DEFTIMEVAR (TV_EARLY_INLINING , "early inlining heuristics")
+DEFTIMEVAR (TV_INLINE_PARAMETERS , "inline parameters")
+DEFTIMEVAR (TV_INTEGRATION , "integration")
+DEFTIMEVAR (TV_TREE_GIMPLIFY , "tree gimplify")
+DEFTIMEVAR (TV_TREE_EH , "tree eh")
+DEFTIMEVAR (TV_TREE_CFG , "tree CFG construction")
+DEFTIMEVAR (TV_TREE_CLEANUP_CFG , "tree CFG cleanup")
+DEFTIMEVAR (TV_TREE_TAIL_MERGE , "tree tail merge")
+DEFTIMEVAR (TV_TREE_VRP , "tree VRP")
+DEFTIMEVAR (TV_TREE_VRP_THREADER , "tree VRP threader")
+DEFTIMEVAR (TV_TREE_EARLY_VRP , "tree Early VRP")
+DEFTIMEVAR (TV_TREE_COPY_PROP , "tree copy propagation")
+DEFTIMEVAR (TV_FIND_REFERENCED_VARS , "tree find ref. vars")
+DEFTIMEVAR (TV_TREE_PTA , "tree PTA")
+DEFTIMEVAR (TV_TREE_SSA_OTHER , "tree SSA other")
+DEFTIMEVAR (TV_TREE_INTO_SSA , "tree SSA rewrite")
+DEFTIMEVAR (TV_TREE_SSA_INCREMENTAL , "tree SSA incremental")
+DEFTIMEVAR (TV_TREE_OPS , "tree operand scan")
+DEFTIMEVAR (TV_TREE_SSA_DOMINATOR_OPTS , "dominator optimization")
+DEFTIMEVAR (TV_TREE_SSA_THREAD_JUMPS , "backwards jump threading")
+DEFTIMEVAR (TV_TREE_SRA , "tree SRA")
+DEFTIMEVAR (TV_ISOLATE_ERRONEOUS_PATHS , "isolate eroneous paths")
+DEFTIMEVAR (TV_TREE_CCP , "tree CCP")
+DEFTIMEVAR (TV_TREE_SPLIT_EDGES , "tree split crit edges")
+DEFTIMEVAR (TV_TREE_REASSOC , "tree reassociation")
+DEFTIMEVAR (TV_TREE_PRE , "tree PRE")
+DEFTIMEVAR (TV_TREE_FRE , "tree FRE")
+DEFTIMEVAR (TV_TREE_RPO_VN , "tree RPO VN")
+DEFTIMEVAR (TV_TREE_SINK , "tree code sinking")
+DEFTIMEVAR (TV_TREE_PHIOPT , "tree linearize phis")
+DEFTIMEVAR (TV_TREE_BACKPROP , "tree backward propagate")
+DEFTIMEVAR (TV_TREE_FORWPROP , "tree forward propagate")
+DEFTIMEVAR (TV_TREE_PHIPROP , "tree phiprop")
+DEFTIMEVAR (TV_TREE_DCE , "tree conservative DCE")
+DEFTIMEVAR (TV_TREE_CD_DCE , "tree aggressive DCE")
+DEFTIMEVAR (TV_TREE_CALL_CDCE , "tree buildin call DCE")
+DEFTIMEVAR (TV_TREE_DSE , "tree DSE")
+DEFTIMEVAR (TV_TREE_MERGE_PHI , "PHI merge")
+DEFTIMEVAR (TV_TREE_LOOP , "tree loop optimization")
+DEFTIMEVAR (TV_TREE_NOLOOP , "loopless fn")
+DEFTIMEVAR (TV_TREE_LOOP_BOUNDS , "tree loop bounds")
+DEFTIMEVAR (TV_LIM , "tree loop invariant motion")
+DEFTIMEVAR (TV_LINTERCHANGE , "tree loop interchange")
+DEFTIMEVAR (TV_TREE_LOOP_IVCANON , "tree canonical iv")
+DEFTIMEVAR (TV_SCEV_CONST , "scev constant prop")
+DEFTIMEVAR (TV_TREE_LOOP_UNSWITCH , "tree loop unswitching")
+DEFTIMEVAR (TV_LOOP_SPLIT , "loop splitting")
+DEFTIMEVAR (TV_LOOP_JAM , "unroll and jam")
+DEFTIMEVAR (TV_COMPLETE_UNROLL , "complete unrolling")
+DEFTIMEVAR (TV_SCALAR_CLEANUP , "scalar cleanup")
+DEFTIMEVAR (TV_TREE_PARALLELIZE_LOOPS, "tree parallelize loops")
+DEFTIMEVAR (TV_TREE_VECTORIZATION , "tree vectorization")
+DEFTIMEVAR (TV_TREE_SLP_VECTORIZATION, "tree slp vectorization")
+DEFTIMEVAR (TV_GRAPHITE , "Graphite")
+DEFTIMEVAR (TV_GRAPHITE_TRANSFORMS , "Graphite loop transforms")
+DEFTIMEVAR (TV_GRAPHITE_DATA_DEPS , "Graphite data dep analysis")
+DEFTIMEVAR (TV_GRAPHITE_CODE_GEN , "Graphite code generation")
+DEFTIMEVAR (TV_TREE_LOOP_DISTRIBUTION, "tree loop distribution")
+DEFTIMEVAR (TV_CHECK_DATA_DEPS , "tree check data dependences")
+DEFTIMEVAR (TV_TREE_PREFETCH , "tree prefetching")
+DEFTIMEVAR (TV_TREE_LOOP_IVOPTS , "tree iv optimization")
+DEFTIMEVAR (TV_PREDCOM , "predictive commoning")
+DEFTIMEVAR (TV_TREE_CH , "tree copy headers")
+DEFTIMEVAR (TV_TREE_SSA_UNCPROP , "tree SSA uncprop")
+DEFTIMEVAR (TV_TREE_NRV , "tree NRV optimization")
+DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies")
+DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier")
+DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier")
+DEFTIMEVAR (TV_TREE_SWITCH_CONVERSION, "tree switch conversion")
+DEFTIMEVAR (TV_TREE_SWITCH_LOWERING, "tree switch lowering")
+DEFTIMEVAR (TV_TREE_RECIP , "gimple CSE reciprocals")
+DEFTIMEVAR (TV_TREE_SINCOS , "gimple CSE sin/cos")
+DEFTIMEVAR (TV_TREE_POWCABS , "gimple expand pow/cabs")
+DEFTIMEVAR (TV_TREE_WIDEN_MUL , "gimple widening/fma detection")
+DEFTIMEVAR (TV_TRANS_MEM , "transactional memory")
+DEFTIMEVAR (TV_TREE_STRLEN , "tree strlen optimization")
+DEFTIMEVAR (TV_TREE_MODREF , "tree modref")
+DEFTIMEVAR (TV_TREE_ASSUMPTIONS , "tree assumptions")
+DEFTIMEVAR (TV_CGRAPH_VERIFY , "callgraph verifier")
+DEFTIMEVAR (TV_DOM_FRONTIERS , "dominance frontiers")
+DEFTIMEVAR (TV_DOMINANCE , "dominance computation")
+DEFTIMEVAR (TV_CONTROL_DEPENDENCES , "control dependences")
+DEFTIMEVAR (TV_OUT_OF_SSA , "out of ssa")
+DEFTIMEVAR (TV_VAR_EXPAND , "expand vars")
+DEFTIMEVAR (TV_EXPAND , "expand")
+DEFTIMEVAR (TV_POST_EXPAND , "post expand cleanups")
+DEFTIMEVAR (TV_VARCONST , "varconst")
+DEFTIMEVAR (TV_LOWER_SUBREG , "lower subreg")
+DEFTIMEVAR (TV_JUMP , "jump")
+DEFTIMEVAR (TV_FWPROP , "forward prop")
+DEFTIMEVAR (TV_CSE , "CSE")
+DEFTIMEVAR (TV_DCE , "dead code elimination")
+DEFTIMEVAR (TV_DSE1 , "dead store elim1")
+DEFTIMEVAR (TV_DSE2 , "dead store elim2")
+DEFTIMEVAR (TV_LOOP , "loop analysis")
+DEFTIMEVAR (TV_LOOP_INIT , "loop init")
+DEFTIMEVAR (TV_LOOP_VERSIONING , "loop versioning")
+DEFTIMEVAR (TV_LOOP_MOVE_INVARIANTS , "loop invariant motion")
+DEFTIMEVAR (TV_LOOP_UNROLL , "loop unrolling")
+DEFTIMEVAR (TV_LOOP_DOLOOP , "loop doloop")
+DEFTIMEVAR (TV_LOOP_FINI , "loop fini")
+DEFTIMEVAR (TV_CPROP , "CPROP")
+DEFTIMEVAR (TV_PRE , "PRE")
+DEFTIMEVAR (TV_HOIST , "code hoisting")
+DEFTIMEVAR (TV_LSM , "LSM")
+DEFTIMEVAR (TV_TRACER , "tracer")
+DEFTIMEVAR (TV_WEB , "web")
+DEFTIMEVAR (TV_AUTO_INC_DEC , "auto inc dec")
+DEFTIMEVAR (TV_CSE2 , "CSE 2")
+DEFTIMEVAR (TV_BRANCH_PROB , "branch prediction")
+DEFTIMEVAR (TV_COMBINE , "combiner")
+DEFTIMEVAR (TV_IFCVT , "if-conversion")
+DEFTIMEVAR (TV_MODE_SWITCH , "mode switching")
+DEFTIMEVAR (TV_SMS , "sms modulo scheduling")
+DEFTIMEVAR (TV_LIVE_RANGE_SHRINKAGE , "live range shrinkage")
+DEFTIMEVAR (TV_SCHED , "scheduling")
+DEFTIMEVAR (TV_EARLY_REMAT , "early rematerialization")
+DEFTIMEVAR (TV_IRA , "integrated RA")
+DEFTIMEVAR (TV_LRA , "LRA non-specific")
+DEFTIMEVAR (TV_LRA_ELIMINATE , "LRA virtuals elimination")
+DEFTIMEVAR (TV_LRA_INHERITANCE , "LRA reload inheritance")
+DEFTIMEVAR (TV_LRA_CREATE_LIVE_RANGES, "LRA create live ranges")
+DEFTIMEVAR (TV_LRA_ASSIGN , "LRA hard reg assignment")
+DEFTIMEVAR (TV_LRA_COALESCE , "LRA coalesce pseudo regs")
+DEFTIMEVAR (TV_LRA_REMAT , "LRA rematerialization")
+DEFTIMEVAR (TV_RELOAD , "reload")
+DEFTIMEVAR (TV_RELOAD_CSE_REGS , "reload CSE regs")
+DEFTIMEVAR (TV_GCSE_AFTER_RELOAD , "load CSE after reload")
+DEFTIMEVAR (TV_REE , "ree")
+DEFTIMEVAR (TV_THREAD_PROLOGUE_AND_EPILOGUE, "thread pro- & epilogue")
+DEFTIMEVAR (TV_IFCVT2 , "if-conversion 2")
+DEFTIMEVAR (TV_SPLIT_PATHS , "split paths")
+DEFTIMEVAR (TV_COMBINE_STACK_ADJUST , "combine stack adjustments")
+DEFTIMEVAR (TV_PEEPHOLE2 , "peephole 2")
+DEFTIMEVAR (TV_RENAME_REGISTERS , "rename registers")
+DEFTIMEVAR (TV_SCHED_FUSION , "scheduling fusion")
+DEFTIMEVAR (TV_CPROP_REGISTERS , "hard reg cprop")
+DEFTIMEVAR (TV_SCHED2 , "scheduling 2")
+DEFTIMEVAR (TV_MACH_DEP , "machine dep reorg")
+DEFTIMEVAR (TV_DBR_SCHED , "delay branch sched")
+DEFTIMEVAR (TV_REORDER_BLOCKS , "reorder blocks")
+DEFTIMEVAR (TV_SHORTEN_BRANCH , "shorten branches")
+DEFTIMEVAR (TV_REG_STACK , "reg stack")
+DEFTIMEVAR (TV_FINAL , "final")
+DEFTIMEVAR (TV_VAROUT , "variable output")
+DEFTIMEVAR (TV_SYMOUT , "symout")
+DEFTIMEVAR (TV_VAR_TRACKING , "variable tracking")
+DEFTIMEVAR (TV_VAR_TRACKING_DATAFLOW , "var-tracking dataflow")
+DEFTIMEVAR (TV_VAR_TRACKING_EMIT , "var-tracking emit")
+DEFTIMEVAR (TV_TREE_IFCOMBINE , "tree if-combine")
+DEFTIMEVAR (TV_TREE_IF_TO_SWITCH , "if to switch conversion")
+DEFTIMEVAR (TV_TREE_UNINIT , "uninit var analysis")
+DEFTIMEVAR (TV_PLUGIN_INIT , "plugin initialization")
+DEFTIMEVAR (TV_PLUGIN_RUN , "plugin execution")
+DEFTIMEVAR (TV_GIMPLE_SLSR , "straight-line strength reduction")
+DEFTIMEVAR (TV_GIMPLE_STORE_MERGING , "store merging")
+DEFTIMEVAR (TV_VTABLE_VERIFICATION , "vtable verification")
+DEFTIMEVAR (TV_TREE_UBSAN , "tree ubsan")
+DEFTIMEVAR (TV_INITIALIZE_RTL , "initialize rtl")
+DEFTIMEVAR (TV_GIMPLE_LADDRESS , "address lowering")
+DEFTIMEVAR (TV_TREE_LOOP_IFCVT , "tree loop if-conversion")
+DEFTIMEVAR (TV_WARN_ACCESS , "access analysis")
+
+/* Everything else in rest_of_compilation not included above. */
+DEFTIMEVAR (TV_EARLY_LOCAL , "early local passes")
+DEFTIMEVAR (TV_OPTIMIZE , "unaccounted optimizations")
+DEFTIMEVAR (TV_REST_OF_COMPILATION , "rest of compilation")
+DEFTIMEVAR (TV_POSTRELOAD , "unaccounted post reload")
+DEFTIMEVAR (TV_LATE_COMPILATION , "unaccounted late compilation")
+DEFTIMEVAR (TV_REMOVE_UNUSED , "remove unused locals")
+DEFTIMEVAR (TV_ADDRESS_TAKEN , "address taken")
+DEFTIMEVAR (TV_TODO , "unaccounted todo")
+DEFTIMEVAR (TV_VERIFY_LOOP_CLOSED , "verify loop closed")
+DEFTIMEVAR (TV_VERIFY_RTL_SHARING , "verify RTL sharing")
+DEFTIMEVAR (TV_REBUILD_FREQUENCIES , "rebuild frequencies")
+DEFTIMEVAR (TV_REPAIR_LOOPS , "repair loop structures")
+
+/* Stuff used by libgccjit.so. */
+DEFTIMEVAR (TV_JIT_REPLAY , "replay of JIT client activity")
+DEFTIMEVAR (TV_ASSEMBLE , "assemble JIT code")
+DEFTIMEVAR (TV_LINK , "link JIT code")
+DEFTIMEVAR (TV_LOAD , "load JIT result")
+DEFTIMEVAR (TV_JIT_ACQUIRING_MUTEX , "acquiring JIT mutex")
+DEFTIMEVAR (TV_JIT_CLIENT_CODE , "JIT client code")
+
+/* Analyzer timevars. */
+DEFTIMEVAR (TV_ANALYZER , "analyzer")
+DEFTIMEVAR (TV_ANALYZER_SUPERGRAPH , "analyzer: supergraph")
+DEFTIMEVAR (TV_ANALYZER_STATE_PURGE , "analyzer: state purge")
+DEFTIMEVAR (TV_ANALYZER_PLAN , "analyzer: planning")
+DEFTIMEVAR (TV_ANALYZER_SCC , "analyzer: scc")
+DEFTIMEVAR (TV_ANALYZER_WORKLIST , "analyzer: processing worklist")
+DEFTIMEVAR (TV_ANALYZER_DUMP , "analyzer: dump")
+DEFTIMEVAR (TV_ANALYZER_DIAGNOSTICS , "analyzer: emitting diagnostics")
+DEFTIMEVAR (TV_ANALYZER_SHORTEST_PATHS, "analyzer: shortest paths")
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.h
new file mode 100644
index 0000000..ad46573
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/timevar.h
@@ -0,0 +1,301 @@
+/* Timing variables for measuring compiler performance.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+ Contributed by Alex Samuel <samuel@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TIMEVAR_H
+#define GCC_TIMEVAR_H
+
+/* Timing variables are used to measure elapsed time in various
+ portions of the compiler. Each measures elapsed user, system, and
+ wall-clock time, as appropriate to and supported by the host
+ system.
+
+ Timing variables are defined using the DEFTIMEVAR macro in
+ timevar.def. Each has an enumeral identifier, used when referring
+ to the timing variable in code, and a character string name.
+
+ Timing variables can be used in two ways:
+
+ - On the timing stack, using timevar_push and timevar_pop.
+ Timing variables may be pushed onto the stack; elapsed time is
+ attributed to the topmost timing variable on the stack. When
+ another variable is pushed on, the previous topmost variable is
+ `paused' until the pushed variable is popped back off.
+
+ - As a standalone timer, using timevar_start and timevar_stop.
+ All time elapsed between the two calls is attributed to the
+ variable.
+*/
+
+/* This structure stores the various varieties of time that can be
+ measured. Times are stored in seconds. The time may be an
+ absolute time or a time difference; in the former case, the time
+ base is undefined, except that the difference between two times
+ produces a valid time difference. */
+
+struct timevar_time_def
+{
+ /* User time in this process. */
+ double user;
+
+ /* System time (if applicable for this host platform) in this
+ process. */
+ double sys;
+
+ /* Wall clock time. */
+ double wall;
+
+ /* Garbage collector memory. */
+ size_t ggc_mem;
+};
+
+/* An enumeration of timing variable identifiers. Constructed from
+ the contents of timevar.def. */
+
+#define DEFTIMEVAR(identifier__, name__) \
+ identifier__,
+typedef enum
+{
+ TV_NONE,
+#include "timevar.def"
+ TIMEVAR_LAST
+}
+timevar_id_t;
+#undef DEFTIMEVAR
+
+/* A class to hold all state relating to timing. */
+
+class timer;
+
+/* The singleton instance of timing state.
+
+ This is non-NULL if timevars should be used. In GCC, this happens with
+ the -ftime-report flag. Hence this is NULL for the common,
+ needs-to-be-fast case, with an early reject happening for this being
+ NULL. */
+extern timer *g_timer;
+
+/* Total amount of memory allocated by garbage collector. */
+extern size_t timevar_ggc_mem_total;
+
+extern void timevar_init (void);
+extern void timevar_start (timevar_id_t);
+extern void timevar_stop (timevar_id_t);
+extern bool timevar_cond_start (timevar_id_t);
+extern void timevar_cond_stop (timevar_id_t, bool);
+
+/* The public (within GCC) interface for timing. */
+
+class timer
+{
+ public:
+ timer ();
+ ~timer ();
+
+ void start (timevar_id_t tv);
+ void stop (timevar_id_t tv);
+ void push (timevar_id_t tv);
+ void pop (timevar_id_t tv);
+ bool cond_start (timevar_id_t tv);
+ void cond_stop (timevar_id_t tv);
+
+ void push_client_item (const char *item_name);
+ void pop_client_item ();
+
+ void print (FILE *fp);
+
+ const char *get_topmost_item_name () const;
+
+ private:
+ /* Private member functions. */
+ void validate_phases (FILE *fp) const;
+
+ struct timevar_def;
+ void push_internal (struct timevar_def *tv);
+ void pop_internal ();
+ static void print_row (FILE *fp,
+ const timevar_time_def *total,
+ const char *name, const timevar_time_def &elapsed);
+ static bool all_zero (const timevar_time_def &elapsed);
+
+ private:
+ typedef hash_map<timevar_def *, timevar_time_def> child_map_t;
+
+ /* Private type: a timing variable. */
+ struct timevar_def
+ {
+ /* Elapsed time for this variable. */
+ struct timevar_time_def elapsed;
+
+ /* If this variable is timed independently of the timing stack,
+ using timevar_start, this contains the start time. */
+ struct timevar_time_def start_time;
+
+ /* The name of this timing variable. */
+ const char *name;
+
+ /* Nonzero if this timing variable is running as a standalone
+ timer. */
+ unsigned standalone : 1;
+
+ /* Nonzero if this timing variable was ever started or pushed onto
+ the timing stack. */
+ unsigned used : 1;
+
+ child_map_t *children;
+ };
+
+ /* Private type: an element on the timing stack
+ Elapsed time is attributed to the topmost timing variable on the
+ stack. */
+ struct timevar_stack_def
+ {
+ /* The timing variable at this stack level. */
+ struct timevar_def *timevar;
+
+ /* The next lower timing variable context in the stack. */
+ struct timevar_stack_def *next;
+ };
+
+ /* A class for managing a collection of named timing items, for use
+ e.g. by libgccjit for timing client code. This class is declared
+ inside timevar.cc to avoid everything using timevar.h
+ from needing vec and hash_map. */
+ class named_items;
+
+ private:
+
+ /* Data members (all private). */
+
+ /* Declared timing variables. Constructed from the contents of
+ timevar.def. */
+ timevar_def m_timevars[TIMEVAR_LAST];
+
+ /* The top of the timing stack. */
+ timevar_stack_def *m_stack;
+
+ /* A list of unused (i.e. allocated and subsequently popped)
+ timevar_stack_def instances. */
+ timevar_stack_def *m_unused_stack_instances;
+
+ /* The time at which the topmost element on the timing stack was
+ pushed. Time elapsed since then is attributed to the topmost
+ element. */
+ timevar_time_def m_start_time;
+
+ /* If non-NULL, for use when timing libgccjit's client code. */
+ named_items *m_jit_client_items;
+
+ friend class named_items;
+};
+
+/* Provided for backward compatibility. */
+inline void
+timevar_push (timevar_id_t tv)
+{
+ if (g_timer)
+ g_timer->push (tv);
+}
+
+inline void
+timevar_pop (timevar_id_t tv)
+{
+ if (g_timer)
+ g_timer->pop (tv);
+}
+
+// This is a simple timevar wrapper class that pushes a timevar in its
+// constructor and pops the timevar in its destructor.
+class auto_timevar
+{
+ public:
+ auto_timevar (timer *t, timevar_id_t tv)
+ : m_timer (t),
+ m_tv (tv)
+ {
+ if (m_timer)
+ m_timer->push (m_tv);
+ }
+
+ explicit auto_timevar (timevar_id_t tv)
+ : m_timer (g_timer)
+ , m_tv (tv)
+ {
+ if (m_timer)
+ m_timer->push (m_tv);
+ }
+
+ ~auto_timevar ()
+ {
+ if (m_timer)
+ m_timer->pop (m_tv);
+ }
+
+ // Disallow copies.
+ auto_timevar (const auto_timevar &) = delete;
+
+ private:
+ timer *m_timer;
+ timevar_id_t m_tv;
+};
+
+// As above, but use cond_start/stop.
+class auto_cond_timevar
+{
+ public:
+ auto_cond_timevar (timer *t, timevar_id_t tv)
+ : m_timer (t),
+ m_tv (tv)
+ {
+ start ();
+ }
+
+ explicit auto_cond_timevar (timevar_id_t tv)
+ : m_timer (g_timer)
+ , m_tv (tv)
+ {
+ start ();
+ }
+
+ ~auto_cond_timevar ()
+ {
+ if (m_timer && !already_running)
+ m_timer->cond_stop (m_tv);
+ }
+
+ // Disallow copies.
+ auto_cond_timevar (const auto_cond_timevar &) = delete;
+
+ private:
+ void start()
+ {
+ if (m_timer)
+ already_running = m_timer->cond_start (m_tv);
+ else
+ already_running = false;
+ }
+
+ timer *m_timer;
+ timevar_id_t m_tv;
+ bool already_running;
+};
+
+extern void print_time (const char *, long);
+
+#endif /* ! GCC_TIMEVAR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm-preds.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm-preds.h
new file mode 100644
index 0000000..2f4bb13
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm-preds.h
@@ -0,0 +1,414 @@
+/* Generated automatically by the program 'build/genpreds'
+ from the machine description file '/data/jenkins/workspace/GNU-toolchain/arm-13/src/gcc/gcc/config/arm/arm.md'. */
+
+#ifndef GCC_TM_PREDS_H
+#define GCC_TM_PREDS_H
+
+#ifdef HAVE_MACHINE_MODES
+extern bool general_operand (rtx, machine_mode);
+extern bool address_operand (rtx, machine_mode);
+extern bool register_operand (rtx, machine_mode);
+extern bool pmode_register_operand (rtx, machine_mode);
+extern bool scratch_operand (rtx, machine_mode);
+extern bool immediate_operand (rtx, machine_mode);
+extern bool const_int_operand (rtx, machine_mode);
+extern bool const_scalar_int_operand (rtx, machine_mode);
+extern bool const_double_operand (rtx, machine_mode);
+extern bool nonimmediate_operand (rtx, machine_mode);
+extern bool nonmemory_operand (rtx, machine_mode);
+extern bool push_operand (rtx, machine_mode);
+extern bool pop_operand (rtx, machine_mode);
+extern bool memory_operand (rtx, machine_mode);
+extern bool indirect_operand (rtx, machine_mode);
+extern bool ordered_comparison_operator (rtx, machine_mode);
+extern bool comparison_operator (rtx, machine_mode);
+extern bool const_1_to_4_operand (rtx, machine_mode);
+extern bool const_2_4_8_16_operand (rtx, machine_mode);
+extern bool alu_shift_operator_lsl_1_to_4 (rtx, machine_mode);
+extern bool alu_shift_reg_p (rtx, machine_mode);
+extern bool s_register_operand (rtx, machine_mode);
+extern bool mve_memory_operand (rtx, machine_mode);
+extern bool mve_scatter_memory (rtx, machine_mode);
+extern bool mve_imm_16 (rtx, machine_mode);
+extern bool mve_imm_7 (rtx, machine_mode);
+extern bool mve_imm_8 (rtx, machine_mode);
+extern bool mve_imm_15 (rtx, machine_mode);
+extern bool mve_imm_31 (rtx, machine_mode);
+extern bool mve_imm_32 (rtx, machine_mode);
+extern bool mve_imm_selective_upto_8 (rtx, machine_mode);
+extern bool mve_vldrd_immediate (rtx, machine_mode);
+extern bool mve_vstrw_immediate (rtx, machine_mode);
+extern bool guard_addr_operand (rtx, machine_mode);
+extern bool guard_operand (rtx, machine_mode);
+extern bool vpr_register_operand (rtx, machine_mode);
+extern bool imm_for_neon_inv_logic_operand (rtx, machine_mode);
+extern bool neon_inv_logic_op2 (rtx, machine_mode);
+extern bool imm_for_neon_logic_operand (rtx, machine_mode);
+extern bool neon_logic_op2 (rtx, machine_mode);
+extern bool arm_hard_general_register_operand (rtx, machine_mode);
+extern bool low_register_operand (rtx, machine_mode);
+extern bool low_reg_or_int_operand (rtx, machine_mode);
+extern bool arm_general_register_operand (rtx, machine_mode);
+extern bool arm_low_register_operand (rtx, machine_mode);
+extern bool arm_general_adddi_operand (rtx, machine_mode);
+extern bool vfp_register_operand (rtx, machine_mode);
+extern bool vfp_hard_register_operand (rtx, machine_mode);
+extern bool zero_operand (rtx, machine_mode);
+extern bool minus_one_operand (rtx, machine_mode);
+extern bool reg_or_zero_operand (rtx, machine_mode);
+extern bool subreg_lowpart_operator (rtx, machine_mode);
+extern bool reg_or_int_operand (rtx, machine_mode);
+extern bool arm_immediate_operand (rtx, machine_mode);
+extern bool arm_immediate_di_operand (rtx, machine_mode);
+extern bool arm_neg_immediate_operand (rtx, machine_mode);
+extern bool arm_not_immediate_operand (rtx, machine_mode);
+extern bool const0_operand (rtx, machine_mode);
+extern bool arm_rhs_operand (rtx, machine_mode);
+extern bool arm_rhsm_operand (rtx, machine_mode);
+extern bool const_int_I_operand (rtx, machine_mode);
+extern bool const_int_M_operand (rtx, machine_mode);
+extern bool const_int_coproc_operand (rtx, machine_mode);
+extern bool const_int_ccde1_operand (rtx, machine_mode);
+extern bool const_int_ccde2_operand (rtx, machine_mode);
+extern bool const_int_ccde3_operand (rtx, machine_mode);
+extern bool const_int_vcde1_operand (rtx, machine_mode);
+extern bool const_int_vcde2_operand (rtx, machine_mode);
+extern bool const_int_vcde3_operand (rtx, machine_mode);
+extern bool const_int_mve_cde1_operand (rtx, machine_mode);
+extern bool const_int_mve_cde2_operand (rtx, machine_mode);
+extern bool const_int_mve_cde3_operand (rtx, machine_mode);
+extern bool shift_amount_operand (rtx, machine_mode);
+extern bool const_neon_scalar_shift_amount_operand (rtx, machine_mode);
+extern bool ssat16_imm (rtx, machine_mode);
+extern bool usat16_imm (rtx, machine_mode);
+extern bool ldrd_strd_offset_operand (rtx, machine_mode);
+extern bool arm_add_operand (rtx, machine_mode);
+extern bool arm_adddi_operand (rtx, machine_mode);
+extern bool arm_anddi_operand (rtx, machine_mode);
+extern bool arm_iordi_operand (rtx, machine_mode);
+extern bool arm_xordi_operand (rtx, machine_mode);
+extern bool arm_addimm_operand (rtx, machine_mode);
+extern bool arm_not_operand (rtx, machine_mode);
+extern bool arm_adcimm_operand (rtx, machine_mode);
+extern bool arm_di_operand (rtx, machine_mode);
+extern bool offsettable_memory_operand (rtx, machine_mode);
+extern bool call_memory_operand (rtx, machine_mode);
+extern bool arm_reload_memory_operand (rtx, machine_mode);
+extern bool vfp_compare_operand (rtx, machine_mode);
+extern bool index_operand (rtx, machine_mode);
+extern bool shiftable_operator (rtx, machine_mode);
+extern bool shiftable_operator_strict_it (rtx, machine_mode);
+extern bool logical_binary_operator (rtx, machine_mode);
+extern bool commutative_binary_operator (rtx, machine_mode);
+extern bool shift_operator (rtx, machine_mode);
+extern bool shift_nomul_operator (rtx, machine_mode);
+extern bool sat_shift_operator (rtx, machine_mode);
+extern bool long_shift_imm (rtx, machine_mode);
+extern bool arm_reg_or_long_shift_imm (rtx, machine_mode);
+extern bool mult_operator (rtx, machine_mode);
+extern bool thumb_16bit_operator (rtx, machine_mode);
+extern bool equality_operator (rtx, machine_mode);
+extern bool expandable_comparison_operator (rtx, machine_mode);
+extern bool arm_comparison_operator (rtx, machine_mode);
+extern bool arm_comparison_operator_mode (rtx, machine_mode);
+extern bool arm_comparison_operation (rtx, machine_mode);
+extern bool lt_ge_comparison_operator (rtx, machine_mode);
+extern bool arm_carry_operation (rtx, machine_mode);
+extern bool arm_borrow_operation (rtx, machine_mode);
+extern bool arm_vsel_comparison_operator (rtx, machine_mode);
+extern bool arm_cond_move_operator (rtx, machine_mode);
+extern bool nz_comparison_operator (rtx, machine_mode);
+extern bool minmax_operator (rtx, machine_mode);
+extern bool cc_register (rtx, machine_mode);
+extern bool dominant_cc_register (rtx, machine_mode);
+extern bool cc_register_operand (rtx, machine_mode);
+extern bool arm_extendqisi_mem_op (rtx, machine_mode);
+extern bool arm_reg_or_extendqisi_mem_op (rtx, machine_mode);
+extern bool power_of_two_operand (rtx, machine_mode);
+extern bool nonimmediate_di_operand (rtx, machine_mode);
+extern bool di_operand (rtx, machine_mode);
+extern bool nonimmediate_soft_df_operand (rtx, machine_mode);
+extern bool soft_df_operand (rtx, machine_mode);
+extern bool hard_sf_operand (rtx, machine_mode);
+extern bool hard_df_operand (rtx, machine_mode);
+extern bool clear_multiple_operation (rtx, machine_mode);
+extern bool clear_vfp_multiple_operation (rtx, machine_mode);
+extern bool load_multiple_operation (rtx, machine_mode);
+extern bool store_multiple_operation (rtx, machine_mode);
+extern bool pop_multiple_return (rtx, machine_mode);
+extern bool pop_multiple_fp (rtx, machine_mode);
+extern bool multi_register_push (rtx, machine_mode);
+extern bool push_mult_memory_operand (rtx, machine_mode);
+extern bool thumb1_cmp_operand (rtx, machine_mode);
+extern bool thumb1_cmpneg_operand (rtx, machine_mode);
+extern bool thumb_cbrch_target_operand (rtx, machine_mode);
+extern bool imm_or_reg_operand (rtx, machine_mode);
+extern bool const_multiple_of_8_operand (rtx, machine_mode);
+extern bool imm_for_neon_mov_operand (rtx, machine_mode);
+extern bool imm_for_neon_lshift_operand (rtx, machine_mode);
+extern bool imm_for_neon_rshift_operand (rtx, machine_mode);
+extern bool imm_lshift_or_reg_neon (rtx, machine_mode);
+extern bool imm_rshift_or_reg_neon (rtx, machine_mode);
+extern bool cmpdi_operand (rtx, machine_mode);
+extern bool arm_sync_memory_operand (rtx, machine_mode);
+extern bool vect_par_constant_high (rtx, machine_mode);
+extern bool vect_par_constant_low (rtx, machine_mode);
+extern bool const_double_vcvt_power_of_two_reciprocal (rtx, machine_mode);
+extern bool const_double_vcvt_power_of_two (rtx, machine_mode);
+extern bool neon_struct_operand (rtx, machine_mode);
+extern bool mve_struct_operand (rtx, machine_mode);
+extern bool neon_permissive_struct_operand (rtx, machine_mode);
+extern bool neon_perm_struct_or_reg_operand (rtx, machine_mode);
+extern bool add_operator (rtx, machine_mode);
+extern bool mem_noofs_operand (rtx, machine_mode);
+extern bool call_insn_operand (rtx, machine_mode);
+extern bool aligned_operand (rtx, machine_mode);
+#endif /* HAVE_MACHINE_MODES */
+
+#define CONSTRAINT_NUM_DEFINED_P 1
+enum constraint_num
+{
+ CONSTRAINT__UNKNOWN = 0,
+ CONSTRAINT_r,
+ CONSTRAINT_Up,
+ CONSTRAINT_Uf,
+ CONSTRAINT_Te,
+ CONSTRAINT_t,
+ CONSTRAINT_w,
+ CONSTRAINT_x,
+ CONSTRAINT_y,
+ CONSTRAINT_z,
+ CONSTRAINT_l,
+ CONSTRAINT_h,
+ CONSTRAINT_k,
+ CONSTRAINT_b,
+ CONSTRAINT_Cs,
+ CONSTRAINT_Ts,
+ CONSTRAINT_Pj,
+ CONSTRAINT_PJ,
+ CONSTRAINT_I,
+ CONSTRAINT_J,
+ CONSTRAINT_K,
+ CONSTRAINT_L,
+ CONSTRAINT_M,
+ CONSTRAINT_N,
+ CONSTRAINT_O,
+ CONSTRAINT_Pa,
+ CONSTRAINT_Pb,
+ CONSTRAINT_Pc,
+ CONSTRAINT_Pd,
+ CONSTRAINT_Pe,
+ CONSTRAINT_Pf,
+ CONSTRAINT_Pg,
+ CONSTRAINT_Ps,
+ CONSTRAINT_Pt,
+ CONSTRAINT_Pu,
+ CONSTRAINT_Pv,
+ CONSTRAINT_Pw,
+ CONSTRAINT_Px,
+ CONSTRAINT_Py,
+ CONSTRAINT_Pz,
+ CONSTRAINT_m,
+ CONSTRAINT_o,
+ CONSTRAINT_Ul,
+ CONSTRAINT_Ua,
+ CONSTRAINT_Uh,
+ CONSTRAINT_Ut,
+ CONSTRAINT_Uv,
+ CONSTRAINT_Ug,
+ CONSTRAINT_Uj,
+ CONSTRAINT_Uy,
+ CONSTRAINT_Un,
+ CONSTRAINT_Um,
+ CONSTRAINT_Us,
+ CONSTRAINT_Ux,
+ CONSTRAINT_Uq,
+ CONSTRAINT_Q,
+ CONSTRAINT_Uu,
+ CONSTRAINT_Uw,
+ CONSTRAINT_Uz,
+ CONSTRAINT_p,
+ CONSTRAINT_Rd,
+ CONSTRAINT_Ra,
+ CONSTRAINT_Rb,
+ CONSTRAINT_Rc,
+ CONSTRAINT_Re,
+ CONSTRAINT_Rf,
+ CONSTRAINT_Rg,
+ CONSTRAINT_j,
+ CONSTRAINT_G,
+ CONSTRAINT_Ha,
+ CONSTRAINT_Dz,
+ CONSTRAINT_DB,
+ CONSTRAINT_Da,
+ CONSTRAINT_Db,
+ CONSTRAINT_Dc,
+ CONSTRAINT_Dd,
+ CONSTRAINT_Di,
+ CONSTRAINT_Dj,
+ CONSTRAINT_Dm,
+ CONSTRAINT_Dn,
+ CONSTRAINT_DN,
+ CONSTRAINT_Dl,
+ CONSTRAINT_DL,
+ CONSTRAINT_Do,
+ CONSTRAINT_Dv,
+ CONSTRAINT_Dy,
+ CONSTRAINT_Dt,
+ CONSTRAINT_Ds,
+ CONSTRAINT_Dp,
+ CONSTRAINT_US,
+ CONSTRAINT_Ri,
+ CONSTRAINT_Rl,
+ CONSTRAINT_c,
+ CONSTRAINT_V,
+ CONSTRAINT__l,
+ CONSTRAINT__g,
+ CONSTRAINT_i,
+ CONSTRAINT_s,
+ CONSTRAINT_n,
+ CONSTRAINT_E,
+ CONSTRAINT_F,
+ CONSTRAINT_X,
+ CONSTRAINT_Tu,
+ CONSTRAINT_Ui,
+ CONSTRAINT__LIMIT
+};
+
+extern enum constraint_num lookup_constraint_1 (const char *);
+extern const unsigned char lookup_constraint_array[];
+
+/* Return the constraint at the beginning of P, or CONSTRAINT__UNKNOWN if it
+ isn't recognized. */
+
+static inline enum constraint_num
+lookup_constraint (const char *p)
+{
+ unsigned int index = lookup_constraint_array[(unsigned char) *p];
+ return (index == UCHAR_MAX
+ ? lookup_constraint_1 (p)
+ : (enum constraint_num) index);
+}
+
+extern bool (*constraint_satisfied_p_array[]) (rtx);
+
+/* Return true if X satisfies constraint C. */
+
+static inline bool
+constraint_satisfied_p (rtx x, enum constraint_num c)
+{
+ int i = (int) c - (int) CONSTRAINT_Pj;
+ return i >= 0 && constraint_satisfied_p_array[i] (x);
+}
+
+static inline bool
+insn_extra_register_constraint (enum constraint_num c)
+{
+ return c >= CONSTRAINT_r && c <= CONSTRAINT_Ts;
+}
+
+static inline bool
+insn_extra_memory_constraint (enum constraint_num c)
+{
+ return c >= CONSTRAINT_m && c <= CONSTRAINT_Uz;
+}
+
+static inline bool
+insn_extra_special_memory_constraint (enum constraint_num)
+{
+ return false;
+}
+
+static inline bool
+insn_extra_relaxed_memory_constraint (enum constraint_num)
+{
+ return false;
+}
+
+static inline bool
+insn_extra_address_constraint (enum constraint_num c)
+{
+ return c >= CONSTRAINT_p && c <= CONSTRAINT_p;
+}
+
+static inline void
+insn_extra_constraint_allows_reg_mem (enum constraint_num c,
+ bool *allows_reg, bool *allows_mem)
+{
+ if (c >= CONSTRAINT_Rd && c <= CONSTRAINT_Rl)
+ return;
+ if (c >= CONSTRAINT_c && c <= CONSTRAINT_c)
+ {
+ *allows_reg = true;
+ return;
+ }
+ if (c >= CONSTRAINT_V && c <= CONSTRAINT__g)
+ {
+ *allows_mem = true;
+ return;
+ }
+ (void) c;
+ *allows_reg = true;
+ *allows_mem = true;
+}
+
+static inline size_t
+insn_constraint_len (char fc, const char *str ATTRIBUTE_UNUSED)
+{
+ switch (fc)
+ {
+ case 'C': return 2;
+ case 'D': return 2;
+ case 'H': return 2;
+ case 'P': return 2;
+ case 'R': return 2;
+ case 'T': return 2;
+ case 'U': return 2;
+ default: break;
+ }
+ return 1;
+}
+
+#define CONSTRAINT_LEN(c_,s_) insn_constraint_len (c_,s_)
+
+extern enum reg_class reg_class_for_constraint_1 (enum constraint_num);
+
+static inline enum reg_class
+reg_class_for_constraint (enum constraint_num c)
+{
+ if (insn_extra_register_constraint (c))
+ return reg_class_for_constraint_1 (c);
+ return NO_REGS;
+}
+
+extern bool insn_const_int_ok_for_constraint (HOST_WIDE_INT, enum constraint_num);
+#define CONST_OK_FOR_CONSTRAINT_P(v_,c_,s_) \
+ insn_const_int_ok_for_constraint (v_, lookup_constraint (s_))
+
+enum constraint_type
+{
+ CT_REGISTER,
+ CT_CONST_INT,
+ CT_MEMORY,
+ CT_SPECIAL_MEMORY,
+ CT_RELAXED_MEMORY,
+ CT_ADDRESS,
+ CT_FIXED_FORM
+};
+
+static inline enum constraint_type
+get_constraint_type (enum constraint_num c)
+{
+ if (c >= CONSTRAINT_p)
+ {
+ if (c >= CONSTRAINT_Rd)
+ return CT_FIXED_FORM;
+ return CT_ADDRESS;
+ }
+ if (c >= CONSTRAINT_m)
+ return CT_MEMORY;
+ if (c >= CONSTRAINT_Pj)
+ return CT_CONST_INT;
+ return CT_REGISTER;
+}
+#endif /* tm-preds.h */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm.h
new file mode 100644
index 0000000..04adb54
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm.h
@@ -0,0 +1,37 @@
+#ifndef GCC_TM_H
+#define GCC_TM_H
+#define TARGET_CPU_DEFAULT ("arm7tdmi")
+#ifndef LIBC_GLIBC
+# define LIBC_GLIBC 1
+#endif
+#ifndef LIBC_UCLIBC
+# define LIBC_UCLIBC 2
+#endif
+#ifndef LIBC_BIONIC
+# define LIBC_BIONIC 3
+#endif
+#ifndef LIBC_MUSL
+# define LIBC_MUSL 4
+#endif
+#ifdef IN_GCC
+# include "options.h"
+# include "insn-constants.h"
+# include "config/vxworks-dummy.h"
+# include "config/elfos.h"
+# include "config/arm/unknown-elf.h"
+# include "config/arm/elf.h"
+# include "config/arm/bpabi.h"
+# include "config/newlib-stdint.h"
+# include "config/arm/aout.h"
+# include "config/arm/arm.h"
+# include "config/arm/arm-mlib.h"
+# include "config/initfini-array.h"
+#endif
+#if defined IN_GCC && !defined GENERATOR_FILE && !defined USED_FOR_TARGET
+# include "insn-flags.h"
+#endif
+#if defined IN_GCC && !defined GENERATOR_FILE
+# include "insn-modes.h"
+#endif
+# include "defaults.h"
+#endif /* GCC_TM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm_p.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm_p.h
new file mode 100644
index 0000000..70edafb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tm_p.h
@@ -0,0 +1,9 @@
+#ifndef GCC_TM_P_H
+#define GCC_TM_P_H
+#ifdef IN_GCC
+# include "config/arm/arm-flags.h"
+# include "config/arm/arm-protos.h"
+# include "config/arm/aarch-common-protos.h"
+# include "tm-preds.h"
+#endif
+#endif /* GCC_TM_P_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/toplev.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/toplev.h
new file mode 100644
index 0000000..981112d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/toplev.h
@@ -0,0 +1,102 @@
+/* toplev.h - Various declarations for functions found in toplev.cc
+ Copyright (C) 1998-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TOPLEV_H
+#define GCC_TOPLEV_H
+
+/* Decoded options, and number of such options. */
+extern struct cl_decoded_option *save_decoded_options;
+extern unsigned int save_decoded_options_count;
+extern vec<cl_decoded_option> *save_opt_decoded_options;
+
+class timer;
+
+/* Invoking the compiler. */
+class toplev
+{
+public:
+ toplev (timer *external_timer,
+ bool init_signals);
+ ~toplev ();
+
+ int main (int argc, char **argv);
+
+ void finalize ();
+
+private:
+
+ void start_timevars ();
+
+ void run_self_tests ();
+
+ bool m_use_TV_TOTAL;
+ bool m_init_signals;
+};
+
+extern void rest_of_decl_compilation (tree, int, int);
+extern void rest_of_type_compilation (tree, int);
+extern void init_optimization_passes (void);
+extern bool enable_rtl_dump_file (void);
+
+/* In except.cc. Initialize exception handling. This is used by the Ada
+ and LTO front ends to initialize EH "on demand". See lto-streamer-in.cc
+ and ada/gcc-interface/misc.cc. */
+extern void init_eh (void);
+
+extern void announce_function (tree);
+
+extern void wrapup_global_declaration_1 (tree);
+extern bool wrapup_global_declaration_2 (tree);
+extern bool wrapup_global_declarations (tree *, int);
+
+extern void global_decl_processing (void);
+
+extern void
+dump_memory_report (const char *);
+extern void dump_profile_report (void);
+
+extern void target_reinit (void);
+
+/* A unique local time stamp, might be zero if none is available. */
+extern unsigned local_tick;
+
+/* See toplev.cc. */
+extern int flag_rerun_cse_after_global_opts;
+
+extern void print_version (FILE *, const char *, bool);
+
+/* The hashtable, so that the C front ends can pass it to cpplib. */
+extern struct ht *ident_hash;
+
+/* Functions used to get and set GCC's notion of in what directory
+ compilation was started. */
+
+extern const char *get_src_pwd (void);
+extern bool set_src_pwd (const char *);
+
+/* Functions used to manipulate the random seed. */
+
+extern HOST_WIDE_INT get_random_seed (bool);
+extern void set_random_seed (const char *);
+
+extern void parse_alignment_opts (void);
+
+extern void initialize_rtl (void);
+
+#endif /* ! GCC_TOPLEV_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tracer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tracer.h
new file mode 100644
index 0000000..c51e1a6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tracer.h
@@ -0,0 +1,26 @@
+/* Header file for Tracer.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TRACER_H
+#define GCC_TRACER_H
+
+extern basic_block transform_duplicate (basic_block bb, basic_block bb2);
+extern bool ignore_bb_p (const_basic_block bb);
+
+#endif /* GCC_TRACER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/trans-mem.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/trans-mem.h
new file mode 100644
index 0000000..b17bf17
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/trans-mem.h
@@ -0,0 +1,52 @@
+/* Miscellaneous transactional memory support definitions.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>
+ and Aldy Hernandez <aldyh@redhat.com>.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TRANS_MEM_H
+#define GCC_TRANS_MEM_H
+
+/* These defines must match the enumerations in libitm.h. */
+#define PR_INSTRUMENTEDCODE 0x0001
+#define PR_UNINSTRUMENTEDCODE 0x0002
+#define PR_MULTIWAYCODE (PR_INSTRUMENTEDCODE | PR_UNINSTRUMENTEDCODE)
+#define PR_HASNOXMMUPDATE 0x0004
+#define PR_HASNOABORT 0x0008
+#define PR_HASNOIRREVOCABLE 0x0020
+#define PR_DOESGOIRREVOCABLE 0x0040
+#define PR_HASNOSIMPLEREADS 0x0080
+#define PR_AWBARRIERSOMITTED 0x0100
+#define PR_RARBARRIERSOMITTED 0x0200
+#define PR_UNDOLOGCODE 0x0400
+#define PR_PREFERUNINSTRUMENTED 0x0800
+#define PR_EXCEPTIONBLOCK 0x1000
+#define PR_HASELSE 0x2000
+#define PR_READONLY 0x4000
+
+extern void compute_transaction_bits (void);
+extern bool is_tm_ending (gimple *);
+extern tree build_tm_abort_call (location_t, bool);
+extern bool is_tm_safe (const_tree);
+extern bool is_tm_pure (const_tree);
+extern bool is_tm_may_cancel_outer (tree);
+extern bool is_tm_ending_fndecl (tree);
+extern void record_tm_replacement (tree, tree);
+extern void tm_malloc_replacement (tree);
+
+#endif // GCC_TRANS_MEM_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-affine.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-affine.h
new file mode 100644
index 0000000..4e833bc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-affine.h
@@ -0,0 +1,129 @@
+/* Operations with affine combinations of trees.
+ Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Affine combination of trees. We keep track of at most MAX_AFF_ELTS elements
+ to make things simpler; this is sufficient in most cases. */
+
+#ifndef GCC_TREE_AFFINE_H
+#define GCC_TREE_AFFINE_H
+
+
+#define MAX_AFF_ELTS 8
+
+/* Element of an affine combination. */
+
+class aff_comb_elt
+{
+public:
+ /* The value of the element. */
+ tree val;
+
+ /* Its coefficient in the combination. */
+ widest_int coef;
+};
+
+class aff_tree
+{
+public:
+ /* Type of the result of the combination. */
+ tree type;
+
+ /* Constant offset. */
+ poly_widest_int offset;
+
+ /* Number of elements of the combination. */
+ unsigned n;
+
+ /* Elements and their coefficients. Type of elements may be different from
+ TYPE, but their sizes must be the same (STRIP_NOPS is applied to the
+ elements).
+
+ The coefficients are always sign extended from the precision of TYPE
+ (regardless of signedness of TYPE). */
+ class aff_comb_elt elts[MAX_AFF_ELTS];
+
+ /* Remainder of the expression. Usually NULL, used only if there are more
+ than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
+ TYPE of POINTER_TYPEs or TYPE. */
+ tree rest;
+};
+
+class name_expansion;
+
+void aff_combination_const (aff_tree *, tree, const poly_widest_int &);
+void aff_combination_elt (aff_tree *, tree, tree);
+void aff_combination_scale (aff_tree *, const widest_int &);
+void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *);
+void aff_combination_add (aff_tree *, aff_tree *);
+void aff_combination_add_elt (aff_tree *, tree, const widest_int &);
+void aff_combination_remove_elt (aff_tree *, unsigned);
+void aff_combination_convert (aff_tree *, tree);
+void tree_to_aff_combination (tree, tree, aff_tree *);
+tree aff_combination_to_tree (aff_tree *);
+void unshare_aff_combination (aff_tree *);
+bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *,
+ poly_widest_int *);
+void aff_combination_expand (aff_tree *, hash_map<tree, name_expansion *> **);
+void tree_to_aff_combination_expand (tree, tree, aff_tree *,
+ hash_map<tree, name_expansion *> **);
+tree get_inner_reference_aff (tree, aff_tree *, poly_widest_int *);
+void free_affine_expand_cache (hash_map<tree, name_expansion *> **);
+bool aff_comb_cannot_overlap_p (aff_tree *, const poly_widest_int &,
+ const poly_widest_int &);
+
+/* Debugging functions. */
+void debug_aff (aff_tree *);
+
+/* Return AFF's type. */
+inline tree
+aff_combination_type (aff_tree *aff)
+{
+ return aff->type;
+}
+
+/* Return true if AFF is actually ZERO. */
+inline bool
+aff_combination_zero_p (aff_tree *aff)
+{
+ if (!aff)
+ return true;
+
+ if (aff->n == 0 && known_eq (aff->offset, 0))
+ return true;
+
+ return false;
+}
+
+/* Return true if AFF is actually const. */
+inline bool
+aff_combination_const_p (aff_tree *aff)
+{
+ return (aff == NULL || aff->n == 0);
+}
+
+/* Return true iff AFF contains one (negated) singleton variable. Users need
+ to make sure AFF points to a valid combination. */
+inline bool
+aff_combination_singleton_var_p (aff_tree *aff)
+{
+ return (aff->n == 1
+ && known_eq (aff->offset, 0)
+ && (aff->elts[0].coef == 1 || aff->elts[0].coef == -1));
+}
+#endif /* GCC_TREE_AFFINE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfg.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfg.h
new file mode 100644
index 0000000..9b56a68
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfg.h
@@ -0,0 +1,134 @@
+/* Data and Control Flow Analysis for Trees.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _TREE_CFG_H
+#define _TREE_CFG_H
+
+/* Location to track pending stmt for edge insertion. */
+#define PENDING_STMT(e) ((e)->insns.g)
+
+/* Garbage collection and PCH support for edge_def. */
+extern void gt_ggc_mx (edge_def *e);
+extern void gt_pch_nx (edge_def *e);
+extern void gt_pch_nx (edge_def *e, gt_pointer_operator, void *);
+
+extern void init_empty_tree_cfg_for_function (struct function *);
+extern void init_empty_tree_cfg (void);
+extern void start_recording_case_labels (void);
+extern void end_recording_case_labels (void);
+extern tree get_cases_for_edge (edge, gswitch *);
+extern basic_block label_to_block (struct function *, tree);
+extern void cleanup_dead_labels (void);
+extern bool group_case_labels_stmt (gswitch *);
+extern bool group_case_labels (void);
+extern void replace_uses_by (tree, tree);
+extern basic_block single_noncomplex_succ (basic_block bb);
+extern void notice_special_calls (gcall *);
+extern void clear_special_calls (void);
+extern edge find_taken_edge (basic_block, tree);
+extern void gimple_debug_bb (basic_block);
+extern basic_block gimple_debug_bb_n (int);
+extern void gimple_debug_cfg (int);
+extern void gimple_dump_cfg (FILE *, dump_flags_t);
+extern void dump_cfg_stats (FILE *);
+extern void debug_cfg_stats (void);
+extern bool computed_goto_p (gimple *);
+extern bool stmt_can_make_abnormal_goto (gimple *);
+extern basic_block get_abnormal_succ_dispatcher (basic_block);
+extern bool is_ctrl_stmt (gimple *);
+extern bool is_ctrl_altering_stmt (gimple *);
+extern bool simple_goto_p (gimple *);
+extern bool stmt_ends_bb_p (gimple *);
+extern bool gimple_seq_unreachable_p (gimple_seq);
+extern bool assert_unreachable_fallthru_edge_p (edge);
+extern void delete_tree_cfg_annotations (function *);
+extern gphi *get_virtual_phi (basic_block);
+extern gimple *first_stmt (basic_block);
+extern gimple *last_stmt (basic_block);
+extern gimple *last_and_only_stmt (basic_block);
+extern bool verify_gimple_in_seq (gimple_seq, bool = true);
+extern bool verify_gimple_in_cfg (struct function *, bool, bool = true);
+extern tree gimple_block_label (basic_block);
+extern void add_phi_args_after_copy_bb (basic_block);
+extern void add_phi_args_after_copy (basic_block *, unsigned, edge);
+extern basic_block split_edge_bb_loc (edge);
+extern bool gimple_duplicate_sese_region (edge, edge, basic_block *, unsigned,
+ basic_block *, bool);
+extern bool gimple_duplicate_sese_tail (edge, edge, basic_block *, unsigned,
+ basic_block *);
+extern void gather_blocks_in_sese_region (basic_block entry, basic_block exit,
+ vec<basic_block> *bbs_p);
+extern void verify_sese (basic_block, basic_block, vec<basic_block> *);
+extern bool gather_ssa_name_hash_map_from (tree const &, tree const &, void *);
+extern void fold_loop_internal_call (gimple *, tree);
+extern basic_block move_sese_region_to_fn (struct function *, basic_block,
+ basic_block, tree);
+extern void dump_function_to_file (tree, FILE *, dump_flags_t);
+extern void debug_function (tree, dump_flags_t);
+extern void print_loops_bb (FILE *, basic_block, int, int);
+extern void print_loops (FILE *, int);
+extern void debug (class loop &ref);
+extern void debug (class loop *ptr);
+extern void debug_verbose (class loop &ref);
+extern void debug_verbose (class loop *ptr);
+extern void debug_loops (int);
+extern void debug_loop (class loop *, int);
+extern void debug_loop_num (unsigned, int);
+extern void remove_edge_and_dominated_blocks (edge);
+extern bool gimple_purge_dead_eh_edges (basic_block);
+extern bool gimple_purge_all_dead_eh_edges (const_bitmap);
+extern bool gimple_purge_dead_abnormal_call_edges (basic_block);
+extern bool gimple_purge_all_dead_abnormal_call_edges (const_bitmap);
+extern void extract_true_false_edges_from_block (basic_block, edge *, edge *);
+extern tree find_case_label_for_value (const gswitch *switch_stmt, tree val);
+extern edge find_taken_edge_switch_expr (const gswitch *switch_stmt, tree val);
+extern unsigned int execute_fixup_cfg (void);
+extern unsigned int split_critical_edges (bool for_edge_insertion_p = false);
+extern basic_block insert_cond_bb (basic_block, gimple *, gimple *,
+ profile_probability);
+extern bool gimple_find_sub_bbs (gimple_seq, gimple_stmt_iterator *);
+extern bool extract_true_false_controlled_edges (basic_block, basic_block,
+ edge *, edge *);
+extern void generate_range_test (basic_block bb, tree index, tree low,
+ tree high, tree *lhs, tree *rhs);
+extern basic_block gimple_switch_label_bb (function *, gswitch *, unsigned);
+extern basic_block gimple_switch_default_bb (function *, gswitch *);
+extern edge gimple_switch_edge (function *, gswitch *, unsigned);
+extern edge gimple_switch_default_edge (function *, gswitch *);
+extern bool cond_only_block_p (basic_block);
+
+/* Return true if the LHS of a call should be removed. */
+
+inline bool
+should_remove_lhs_p (tree lhs)
+{
+ return (lhs
+ && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (lhs))) == INTEGER_CST
+ && !TREE_ADDRESSABLE (TREE_TYPE (lhs)));
+}
+
+
+inline unsigned int
+split_edges_for_insertion ()
+{
+ return split_critical_edges (/*for_edge_insertion_p=*/true);
+}
+
+#endif /* _TREE_CFG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfgcleanup.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfgcleanup.h
new file mode 100644
index 0000000..b7c7ff1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-cfgcleanup.h
@@ -0,0 +1,32 @@
+/* Header file for CFG cleanup for trees.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_CFGCLEANUP_H
+#define GCC_TREE_CFGCLEANUP_H
+
+/* In tree-cfgcleanup.cc */
+extern bitmap cfgcleanup_altered_bbs;
+extern bool cleanup_tree_cfg (unsigned = 0);
+extern bool fixup_noreturn_call (gimple *stmt);
+extern bool delete_unreachable_blocks_update_callgraph (cgraph_node *dst_node,
+ bool update_clones);
+extern unsigned clean_up_loop_closed_phi (function *);
+extern bool phi_alternatives_equal (basic_block, edge, edge);
+
+#endif /* GCC_TREE_CFGCLEANUP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-check.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-check.h
new file mode 100644
index 0000000..5f569c2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-check.h
@@ -0,0 +1,380 @@
+/* This file is generated using gencheck. Do not edit. */
+
+#ifndef GCC_TREE_CHECK_H
+#define GCC_TREE_CHECK_H
+
+#define ERROR_MARK_CHECK(t) TREE_CHECK (t, ERROR_MARK)
+#define IDENTIFIER_NODE_CHECK(t) TREE_CHECK (t, IDENTIFIER_NODE)
+#define TREE_LIST_CHECK(t) TREE_CHECK (t, TREE_LIST)
+#define TREE_VEC_CHECK(t) TREE_CHECK (t, TREE_VEC)
+#define BLOCK_CHECK(t) TREE_CHECK (t, BLOCK)
+#define OFFSET_TYPE_CHECK(t) TREE_CHECK (t, OFFSET_TYPE)
+#define ENUMERAL_TYPE_CHECK(t) TREE_CHECK (t, ENUMERAL_TYPE)
+#define BOOLEAN_TYPE_CHECK(t) TREE_CHECK (t, BOOLEAN_TYPE)
+#define INTEGER_TYPE_CHECK(t) TREE_CHECK (t, INTEGER_TYPE)
+#define REAL_TYPE_CHECK(t) TREE_CHECK (t, REAL_TYPE)
+#define POINTER_TYPE_CHECK(t) TREE_CHECK (t, POINTER_TYPE)
+#define REFERENCE_TYPE_CHECK(t) TREE_CHECK (t, REFERENCE_TYPE)
+#define NULLPTR_TYPE_CHECK(t) TREE_CHECK (t, NULLPTR_TYPE)
+#define FIXED_POINT_TYPE_CHECK(t) TREE_CHECK (t, FIXED_POINT_TYPE)
+#define COMPLEX_TYPE_CHECK(t) TREE_CHECK (t, COMPLEX_TYPE)
+#define VECTOR_TYPE_CHECK(t) TREE_CHECK (t, VECTOR_TYPE)
+#define ARRAY_TYPE_CHECK(t) TREE_CHECK (t, ARRAY_TYPE)
+#define RECORD_TYPE_CHECK(t) TREE_CHECK (t, RECORD_TYPE)
+#define UNION_TYPE_CHECK(t) TREE_CHECK (t, UNION_TYPE)
+#define QUAL_UNION_TYPE_CHECK(t) TREE_CHECK (t, QUAL_UNION_TYPE)
+#define VOID_TYPE_CHECK(t) TREE_CHECK (t, VOID_TYPE)
+#define FUNCTION_TYPE_CHECK(t) TREE_CHECK (t, FUNCTION_TYPE)
+#define METHOD_TYPE_CHECK(t) TREE_CHECK (t, METHOD_TYPE)
+#define LANG_TYPE_CHECK(t) TREE_CHECK (t, LANG_TYPE)
+#define OPAQUE_TYPE_CHECK(t) TREE_CHECK (t, OPAQUE_TYPE)
+#define VOID_CST_CHECK(t) TREE_CHECK (t, VOID_CST)
+#define INTEGER_CST_CHECK(t) TREE_CHECK (t, INTEGER_CST)
+#define POLY_INT_CST_CHECK(t) TREE_CHECK (t, POLY_INT_CST)
+#define REAL_CST_CHECK(t) TREE_CHECK (t, REAL_CST)
+#define FIXED_CST_CHECK(t) TREE_CHECK (t, FIXED_CST)
+#define COMPLEX_CST_CHECK(t) TREE_CHECK (t, COMPLEX_CST)
+#define VECTOR_CST_CHECK(t) TREE_CHECK (t, VECTOR_CST)
+#define STRING_CST_CHECK(t) TREE_CHECK (t, STRING_CST)
+#define FUNCTION_DECL_CHECK(t) TREE_CHECK (t, FUNCTION_DECL)
+#define LABEL_DECL_CHECK(t) TREE_CHECK (t, LABEL_DECL)
+#define FIELD_DECL_CHECK(t) TREE_CHECK (t, FIELD_DECL)
+#define VAR_DECL_CHECK(t) TREE_CHECK (t, VAR_DECL)
+#define CONST_DECL_CHECK(t) TREE_CHECK (t, CONST_DECL)
+#define PARM_DECL_CHECK(t) TREE_CHECK (t, PARM_DECL)
+#define TYPE_DECL_CHECK(t) TREE_CHECK (t, TYPE_DECL)
+#define RESULT_DECL_CHECK(t) TREE_CHECK (t, RESULT_DECL)
+#define DEBUG_EXPR_DECL_CHECK(t) TREE_CHECK (t, DEBUG_EXPR_DECL)
+#define DEBUG_BEGIN_STMT_CHECK(t) TREE_CHECK (t, DEBUG_BEGIN_STMT)
+#define NAMESPACE_DECL_CHECK(t) TREE_CHECK (t, NAMESPACE_DECL)
+#define IMPORTED_DECL_CHECK(t) TREE_CHECK (t, IMPORTED_DECL)
+#define NAMELIST_DECL_CHECK(t) TREE_CHECK (t, NAMELIST_DECL)
+#define TRANSLATION_UNIT_DECL_CHECK(t) TREE_CHECK (t, TRANSLATION_UNIT_DECL)
+#define COMPONENT_REF_CHECK(t) TREE_CHECK (t, COMPONENT_REF)
+#define BIT_FIELD_REF_CHECK(t) TREE_CHECK (t, BIT_FIELD_REF)
+#define ARRAY_REF_CHECK(t) TREE_CHECK (t, ARRAY_REF)
+#define ARRAY_RANGE_REF_CHECK(t) TREE_CHECK (t, ARRAY_RANGE_REF)
+#define REALPART_EXPR_CHECK(t) TREE_CHECK (t, REALPART_EXPR)
+#define IMAGPART_EXPR_CHECK(t) TREE_CHECK (t, IMAGPART_EXPR)
+#define VIEW_CONVERT_EXPR_CHECK(t) TREE_CHECK (t, VIEW_CONVERT_EXPR)
+#define INDIRECT_REF_CHECK(t) TREE_CHECK (t, INDIRECT_REF)
+#define OBJ_TYPE_REF_CHECK(t) TREE_CHECK (t, OBJ_TYPE_REF)
+#define CONSTRUCTOR_CHECK(t) TREE_CHECK (t, CONSTRUCTOR)
+#define COMPOUND_EXPR_CHECK(t) TREE_CHECK (t, COMPOUND_EXPR)
+#define MODIFY_EXPR_CHECK(t) TREE_CHECK (t, MODIFY_EXPR)
+#define INIT_EXPR_CHECK(t) TREE_CHECK (t, INIT_EXPR)
+#define TARGET_EXPR_CHECK(t) TREE_CHECK (t, TARGET_EXPR)
+#define COND_EXPR_CHECK(t) TREE_CHECK (t, COND_EXPR)
+#define VEC_DUPLICATE_EXPR_CHECK(t) TREE_CHECK (t, VEC_DUPLICATE_EXPR)
+#define VEC_SERIES_EXPR_CHECK(t) TREE_CHECK (t, VEC_SERIES_EXPR)
+#define VEC_COND_EXPR_CHECK(t) TREE_CHECK (t, VEC_COND_EXPR)
+#define VEC_PERM_EXPR_CHECK(t) TREE_CHECK (t, VEC_PERM_EXPR)
+#define BIND_EXPR_CHECK(t) TREE_CHECK (t, BIND_EXPR)
+#define CALL_EXPR_CHECK(t) TREE_CHECK (t, CALL_EXPR)
+#define WITH_CLEANUP_EXPR_CHECK(t) TREE_CHECK (t, WITH_CLEANUP_EXPR)
+#define CLEANUP_POINT_EXPR_CHECK(t) TREE_CHECK (t, CLEANUP_POINT_EXPR)
+#define PLACEHOLDER_EXPR_CHECK(t) TREE_CHECK (t, PLACEHOLDER_EXPR)
+#define PLUS_EXPR_CHECK(t) TREE_CHECK (t, PLUS_EXPR)
+#define MINUS_EXPR_CHECK(t) TREE_CHECK (t, MINUS_EXPR)
+#define MULT_EXPR_CHECK(t) TREE_CHECK (t, MULT_EXPR)
+#define POINTER_PLUS_EXPR_CHECK(t) TREE_CHECK (t, POINTER_PLUS_EXPR)
+#define POINTER_DIFF_EXPR_CHECK(t) TREE_CHECK (t, POINTER_DIFF_EXPR)
+#define MULT_HIGHPART_EXPR_CHECK(t) TREE_CHECK (t, MULT_HIGHPART_EXPR)
+#define TRUNC_DIV_EXPR_CHECK(t) TREE_CHECK (t, TRUNC_DIV_EXPR)
+#define CEIL_DIV_EXPR_CHECK(t) TREE_CHECK (t, CEIL_DIV_EXPR)
+#define FLOOR_DIV_EXPR_CHECK(t) TREE_CHECK (t, FLOOR_DIV_EXPR)
+#define ROUND_DIV_EXPR_CHECK(t) TREE_CHECK (t, ROUND_DIV_EXPR)
+#define TRUNC_MOD_EXPR_CHECK(t) TREE_CHECK (t, TRUNC_MOD_EXPR)
+#define CEIL_MOD_EXPR_CHECK(t) TREE_CHECK (t, CEIL_MOD_EXPR)
+#define FLOOR_MOD_EXPR_CHECK(t) TREE_CHECK (t, FLOOR_MOD_EXPR)
+#define ROUND_MOD_EXPR_CHECK(t) TREE_CHECK (t, ROUND_MOD_EXPR)
+#define RDIV_EXPR_CHECK(t) TREE_CHECK (t, RDIV_EXPR)
+#define EXACT_DIV_EXPR_CHECK(t) TREE_CHECK (t, EXACT_DIV_EXPR)
+#define FIX_TRUNC_EXPR_CHECK(t) TREE_CHECK (t, FIX_TRUNC_EXPR)
+#define FLOAT_EXPR_CHECK(t) TREE_CHECK (t, FLOAT_EXPR)
+#define NEGATE_EXPR_CHECK(t) TREE_CHECK (t, NEGATE_EXPR)
+#define MIN_EXPR_CHECK(t) TREE_CHECK (t, MIN_EXPR)
+#define MAX_EXPR_CHECK(t) TREE_CHECK (t, MAX_EXPR)
+#define ABS_EXPR_CHECK(t) TREE_CHECK (t, ABS_EXPR)
+#define ABSU_EXPR_CHECK(t) TREE_CHECK (t, ABSU_EXPR)
+#define LSHIFT_EXPR_CHECK(t) TREE_CHECK (t, LSHIFT_EXPR)
+#define RSHIFT_EXPR_CHECK(t) TREE_CHECK (t, RSHIFT_EXPR)
+#define LROTATE_EXPR_CHECK(t) TREE_CHECK (t, LROTATE_EXPR)
+#define RROTATE_EXPR_CHECK(t) TREE_CHECK (t, RROTATE_EXPR)
+#define BIT_IOR_EXPR_CHECK(t) TREE_CHECK (t, BIT_IOR_EXPR)
+#define BIT_XOR_EXPR_CHECK(t) TREE_CHECK (t, BIT_XOR_EXPR)
+#define BIT_AND_EXPR_CHECK(t) TREE_CHECK (t, BIT_AND_EXPR)
+#define BIT_NOT_EXPR_CHECK(t) TREE_CHECK (t, BIT_NOT_EXPR)
+#define TRUTH_ANDIF_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_ANDIF_EXPR)
+#define TRUTH_ORIF_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_ORIF_EXPR)
+#define TRUTH_AND_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_AND_EXPR)
+#define TRUTH_OR_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_OR_EXPR)
+#define TRUTH_XOR_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_XOR_EXPR)
+#define TRUTH_NOT_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_NOT_EXPR)
+#define LT_EXPR_CHECK(t) TREE_CHECK (t, LT_EXPR)
+#define LE_EXPR_CHECK(t) TREE_CHECK (t, LE_EXPR)
+#define GT_EXPR_CHECK(t) TREE_CHECK (t, GT_EXPR)
+#define GE_EXPR_CHECK(t) TREE_CHECK (t, GE_EXPR)
+#define LTGT_EXPR_CHECK(t) TREE_CHECK (t, LTGT_EXPR)
+#define EQ_EXPR_CHECK(t) TREE_CHECK (t, EQ_EXPR)
+#define NE_EXPR_CHECK(t) TREE_CHECK (t, NE_EXPR)
+#define UNORDERED_EXPR_CHECK(t) TREE_CHECK (t, UNORDERED_EXPR)
+#define ORDERED_EXPR_CHECK(t) TREE_CHECK (t, ORDERED_EXPR)
+#define UNLT_EXPR_CHECK(t) TREE_CHECK (t, UNLT_EXPR)
+#define UNLE_EXPR_CHECK(t) TREE_CHECK (t, UNLE_EXPR)
+#define UNGT_EXPR_CHECK(t) TREE_CHECK (t, UNGT_EXPR)
+#define UNGE_EXPR_CHECK(t) TREE_CHECK (t, UNGE_EXPR)
+#define UNEQ_EXPR_CHECK(t) TREE_CHECK (t, UNEQ_EXPR)
+#define RANGE_EXPR_CHECK(t) TREE_CHECK (t, RANGE_EXPR)
+#define PAREN_EXPR_CHECK(t) TREE_CHECK (t, PAREN_EXPR)
+#define CONVERT_EXPR_CHECK(t) TREE_CHECK (t, CONVERT_EXPR)
+#define ADDR_SPACE_CONVERT_EXPR_CHECK(t) TREE_CHECK (t, ADDR_SPACE_CONVERT_EXPR)
+#define FIXED_CONVERT_EXPR_CHECK(t) TREE_CHECK (t, FIXED_CONVERT_EXPR)
+#define NOP_EXPR_CHECK(t) TREE_CHECK (t, NOP_EXPR)
+#define NON_LVALUE_EXPR_CHECK(t) TREE_CHECK (t, NON_LVALUE_EXPR)
+#define COMPOUND_LITERAL_EXPR_CHECK(t) TREE_CHECK (t, COMPOUND_LITERAL_EXPR)
+#define SAVE_EXPR_CHECK(t) TREE_CHECK (t, SAVE_EXPR)
+#define ADDR_EXPR_CHECK(t) TREE_CHECK (t, ADDR_EXPR)
+#define FDESC_EXPR_CHECK(t) TREE_CHECK (t, FDESC_EXPR)
+#define BIT_INSERT_EXPR_CHECK(t) TREE_CHECK (t, BIT_INSERT_EXPR)
+#define COMPLEX_EXPR_CHECK(t) TREE_CHECK (t, COMPLEX_EXPR)
+#define CONJ_EXPR_CHECK(t) TREE_CHECK (t, CONJ_EXPR)
+#define PREDECREMENT_EXPR_CHECK(t) TREE_CHECK (t, PREDECREMENT_EXPR)
+#define PREINCREMENT_EXPR_CHECK(t) TREE_CHECK (t, PREINCREMENT_EXPR)
+#define POSTDECREMENT_EXPR_CHECK(t) TREE_CHECK (t, POSTDECREMENT_EXPR)
+#define POSTINCREMENT_EXPR_CHECK(t) TREE_CHECK (t, POSTINCREMENT_EXPR)
+#define VA_ARG_EXPR_CHECK(t) TREE_CHECK (t, VA_ARG_EXPR)
+#define TRY_CATCH_EXPR_CHECK(t) TREE_CHECK (t, TRY_CATCH_EXPR)
+#define TRY_FINALLY_EXPR_CHECK(t) TREE_CHECK (t, TRY_FINALLY_EXPR)
+#define EH_ELSE_EXPR_CHECK(t) TREE_CHECK (t, EH_ELSE_EXPR)
+#define DECL_EXPR_CHECK(t) TREE_CHECK (t, DECL_EXPR)
+#define LABEL_EXPR_CHECK(t) TREE_CHECK (t, LABEL_EXPR)
+#define GOTO_EXPR_CHECK(t) TREE_CHECK (t, GOTO_EXPR)
+#define RETURN_EXPR_CHECK(t) TREE_CHECK (t, RETURN_EXPR)
+#define EXIT_EXPR_CHECK(t) TREE_CHECK (t, EXIT_EXPR)
+#define LOOP_EXPR_CHECK(t) TREE_CHECK (t, LOOP_EXPR)
+#define SWITCH_EXPR_CHECK(t) TREE_CHECK (t, SWITCH_EXPR)
+#define CASE_LABEL_EXPR_CHECK(t) TREE_CHECK (t, CASE_LABEL_EXPR)
+#define ASM_EXPR_CHECK(t) TREE_CHECK (t, ASM_EXPR)
+#define SSA_NAME_CHECK(t) TREE_CHECK (t, SSA_NAME)
+#define CATCH_EXPR_CHECK(t) TREE_CHECK (t, CATCH_EXPR)
+#define EH_FILTER_EXPR_CHECK(t) TREE_CHECK (t, EH_FILTER_EXPR)
+#define SCEV_KNOWN_CHECK(t) TREE_CHECK (t, SCEV_KNOWN)
+#define SCEV_NOT_KNOWN_CHECK(t) TREE_CHECK (t, SCEV_NOT_KNOWN)
+#define POLYNOMIAL_CHREC_CHECK(t) TREE_CHECK (t, POLYNOMIAL_CHREC)
+#define STATEMENT_LIST_CHECK(t) TREE_CHECK (t, STATEMENT_LIST)
+#define ASSERT_EXPR_CHECK(t) TREE_CHECK (t, ASSERT_EXPR)
+#define TREE_BINFO_CHECK(t) TREE_CHECK (t, TREE_BINFO)
+#define WITH_SIZE_EXPR_CHECK(t) TREE_CHECK (t, WITH_SIZE_EXPR)
+#define REALIGN_LOAD_EXPR_CHECK(t) TREE_CHECK (t, REALIGN_LOAD_EXPR)
+#define TARGET_MEM_REF_CHECK(t) TREE_CHECK (t, TARGET_MEM_REF)
+#define MEM_REF_CHECK(t) TREE_CHECK (t, MEM_REF)
+#define OACC_PARALLEL_CHECK(t) TREE_CHECK (t, OACC_PARALLEL)
+#define OACC_KERNELS_CHECK(t) TREE_CHECK (t, OACC_KERNELS)
+#define OACC_SERIAL_CHECK(t) TREE_CHECK (t, OACC_SERIAL)
+#define OACC_DATA_CHECK(t) TREE_CHECK (t, OACC_DATA)
+#define OACC_HOST_DATA_CHECK(t) TREE_CHECK (t, OACC_HOST_DATA)
+#define OMP_PARALLEL_CHECK(t) TREE_CHECK (t, OMP_PARALLEL)
+#define OMP_TASK_CHECK(t) TREE_CHECK (t, OMP_TASK)
+#define OMP_FOR_CHECK(t) TREE_CHECK (t, OMP_FOR)
+#define OMP_SIMD_CHECK(t) TREE_CHECK (t, OMP_SIMD)
+#define OMP_DISTRIBUTE_CHECK(t) TREE_CHECK (t, OMP_DISTRIBUTE)
+#define OMP_TASKLOOP_CHECK(t) TREE_CHECK (t, OMP_TASKLOOP)
+#define OMP_LOOP_CHECK(t) TREE_CHECK (t, OMP_LOOP)
+#define OACC_LOOP_CHECK(t) TREE_CHECK (t, OACC_LOOP)
+#define OMP_TEAMS_CHECK(t) TREE_CHECK (t, OMP_TEAMS)
+#define OMP_TARGET_DATA_CHECK(t) TREE_CHECK (t, OMP_TARGET_DATA)
+#define OMP_TARGET_CHECK(t) TREE_CHECK (t, OMP_TARGET)
+#define OMP_SECTIONS_CHECK(t) TREE_CHECK (t, OMP_SECTIONS)
+#define OMP_ORDERED_CHECK(t) TREE_CHECK (t, OMP_ORDERED)
+#define OMP_CRITICAL_CHECK(t) TREE_CHECK (t, OMP_CRITICAL)
+#define OMP_SINGLE_CHECK(t) TREE_CHECK (t, OMP_SINGLE)
+#define OMP_SCOPE_CHECK(t) TREE_CHECK (t, OMP_SCOPE)
+#define OMP_TASKGROUP_CHECK(t) TREE_CHECK (t, OMP_TASKGROUP)
+#define OMP_MASKED_CHECK(t) TREE_CHECK (t, OMP_MASKED)
+#define OMP_SCAN_CHECK(t) TREE_CHECK (t, OMP_SCAN)
+#define OMP_SECTION_CHECK(t) TREE_CHECK (t, OMP_SECTION)
+#define OMP_MASTER_CHECK(t) TREE_CHECK (t, OMP_MASTER)
+#define OACC_CACHE_CHECK(t) TREE_CHECK (t, OACC_CACHE)
+#define OACC_DECLARE_CHECK(t) TREE_CHECK (t, OACC_DECLARE)
+#define OACC_ENTER_DATA_CHECK(t) TREE_CHECK (t, OACC_ENTER_DATA)
+#define OACC_EXIT_DATA_CHECK(t) TREE_CHECK (t, OACC_EXIT_DATA)
+#define OACC_UPDATE_CHECK(t) TREE_CHECK (t, OACC_UPDATE)
+#define OMP_TARGET_UPDATE_CHECK(t) TREE_CHECK (t, OMP_TARGET_UPDATE)
+#define OMP_TARGET_ENTER_DATA_CHECK(t) TREE_CHECK (t, OMP_TARGET_ENTER_DATA)
+#define OMP_TARGET_EXIT_DATA_CHECK(t) TREE_CHECK (t, OMP_TARGET_EXIT_DATA)
+#define OMP_ATOMIC_CHECK(t) TREE_CHECK (t, OMP_ATOMIC)
+#define OMP_ATOMIC_READ_CHECK(t) TREE_CHECK (t, OMP_ATOMIC_READ)
+#define OMP_ATOMIC_CAPTURE_OLD_CHECK(t) TREE_CHECK (t, OMP_ATOMIC_CAPTURE_OLD)
+#define OMP_ATOMIC_CAPTURE_NEW_CHECK(t) TREE_CHECK (t, OMP_ATOMIC_CAPTURE_NEW)
+#define OMP_CLAUSE_CHECK(t) TREE_CHECK (t, OMP_CLAUSE)
+#define TRANSACTION_EXPR_CHECK(t) TREE_CHECK (t, TRANSACTION_EXPR)
+#define DOT_PROD_EXPR_CHECK(t) TREE_CHECK (t, DOT_PROD_EXPR)
+#define WIDEN_SUM_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_SUM_EXPR)
+#define SAD_EXPR_CHECK(t) TREE_CHECK (t, SAD_EXPR)
+#define WIDEN_MULT_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_MULT_EXPR)
+#define WIDEN_MULT_PLUS_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_MULT_PLUS_EXPR)
+#define WIDEN_MULT_MINUS_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_MULT_MINUS_EXPR)
+#define WIDEN_LSHIFT_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_LSHIFT_EXPR)
+#define WIDEN_PLUS_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_PLUS_EXPR)
+#define WIDEN_MINUS_EXPR_CHECK(t) TREE_CHECK (t, WIDEN_MINUS_EXPR)
+#define VEC_WIDEN_MULT_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_MULT_HI_EXPR)
+#define VEC_WIDEN_MULT_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_MULT_LO_EXPR)
+#define VEC_WIDEN_MULT_EVEN_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_MULT_EVEN_EXPR)
+#define VEC_WIDEN_MULT_ODD_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_MULT_ODD_EXPR)
+#define VEC_UNPACK_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_UNPACK_HI_EXPR)
+#define VEC_UNPACK_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_UNPACK_LO_EXPR)
+#define VEC_UNPACK_FLOAT_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_UNPACK_FLOAT_HI_EXPR)
+#define VEC_UNPACK_FLOAT_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_UNPACK_FLOAT_LO_EXPR)
+#define VEC_UNPACK_FIX_TRUNC_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_UNPACK_FIX_TRUNC_HI_EXPR)
+#define VEC_UNPACK_FIX_TRUNC_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_UNPACK_FIX_TRUNC_LO_EXPR)
+#define VEC_PACK_TRUNC_EXPR_CHECK(t) TREE_CHECK (t, VEC_PACK_TRUNC_EXPR)
+#define VEC_PACK_SAT_EXPR_CHECK(t) TREE_CHECK (t, VEC_PACK_SAT_EXPR)
+#define VEC_PACK_FIX_TRUNC_EXPR_CHECK(t) TREE_CHECK (t, VEC_PACK_FIX_TRUNC_EXPR)
+#define VEC_PACK_FLOAT_EXPR_CHECK(t) TREE_CHECK (t, VEC_PACK_FLOAT_EXPR)
+#define VEC_WIDEN_LSHIFT_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_LSHIFT_HI_EXPR)
+#define VEC_WIDEN_LSHIFT_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_LSHIFT_LO_EXPR)
+#define VEC_WIDEN_PLUS_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_PLUS_HI_EXPR)
+#define VEC_WIDEN_PLUS_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_PLUS_LO_EXPR)
+#define VEC_WIDEN_MINUS_HI_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_MINUS_HI_EXPR)
+#define VEC_WIDEN_MINUS_LO_EXPR_CHECK(t) TREE_CHECK (t, VEC_WIDEN_MINUS_LO_EXPR)
+#define PREDICT_EXPR_CHECK(t) TREE_CHECK (t, PREDICT_EXPR)
+#define OPTIMIZATION_NODE_CHECK(t) TREE_CHECK (t, OPTIMIZATION_NODE)
+#define TARGET_OPTION_NODE_CHECK(t) TREE_CHECK (t, TARGET_OPTION_NODE)
+#define ANNOTATE_EXPR_CHECK(t) TREE_CHECK (t, ANNOTATE_EXPR)
+#define C_MAYBE_CONST_EXPR_CHECK(t) TREE_CHECK (t, C_MAYBE_CONST_EXPR)
+#define EXCESS_PRECISION_EXPR_CHECK(t) TREE_CHECK (t, EXCESS_PRECISION_EXPR)
+#define USERDEF_LITERAL_CHECK(t) TREE_CHECK (t, USERDEF_LITERAL)
+#define SIZEOF_EXPR_CHECK(t) TREE_CHECK (t, SIZEOF_EXPR)
+#define PAREN_SIZEOF_EXPR_CHECK(t) TREE_CHECK (t, PAREN_SIZEOF_EXPR)
+#define FOR_STMT_CHECK(t) TREE_CHECK (t, FOR_STMT)
+#define WHILE_STMT_CHECK(t) TREE_CHECK (t, WHILE_STMT)
+#define DO_STMT_CHECK(t) TREE_CHECK (t, DO_STMT)
+#define BREAK_STMT_CHECK(t) TREE_CHECK (t, BREAK_STMT)
+#define CONTINUE_STMT_CHECK(t) TREE_CHECK (t, CONTINUE_STMT)
+#define SWITCH_STMT_CHECK(t) TREE_CHECK (t, SWITCH_STMT)
+#define CONCEPT_DECL_CHECK(t) TREE_CHECK (t, CONCEPT_DECL)
+#define UNCONSTRAINED_ARRAY_TYPE_CHECK(t) TREE_CHECK (t, UNCONSTRAINED_ARRAY_TYPE)
+#define UNCONSTRAINED_ARRAY_REF_CHECK(t) TREE_CHECK (t, UNCONSTRAINED_ARRAY_REF)
+#define LOAD_EXPR_CHECK(t) TREE_CHECK (t, LOAD_EXPR)
+#define NULL_EXPR_CHECK(t) TREE_CHECK (t, NULL_EXPR)
+#define PLUS_NOMOD_EXPR_CHECK(t) TREE_CHECK (t, PLUS_NOMOD_EXPR)
+#define MINUS_NOMOD_EXPR_CHECK(t) TREE_CHECK (t, MINUS_NOMOD_EXPR)
+#define POWER_EXPR_CHECK(t) TREE_CHECK (t, POWER_EXPR)
+#define ATTR_ADDR_EXPR_CHECK(t) TREE_CHECK (t, ATTR_ADDR_EXPR)
+#define STMT_STMT_CHECK(t) TREE_CHECK (t, STMT_STMT)
+#define LOOP_STMT_CHECK(t) TREE_CHECK (t, LOOP_STMT)
+#define EXIT_STMT_CHECK(t) TREE_CHECK (t, EXIT_STMT)
+#define OFFSET_REF_CHECK(t) TREE_CHECK (t, OFFSET_REF)
+#define PTRMEM_CST_CHECK(t) TREE_CHECK (t, PTRMEM_CST)
+#define NEW_EXPR_CHECK(t) TREE_CHECK (t, NEW_EXPR)
+#define VEC_NEW_EXPR_CHECK(t) TREE_CHECK (t, VEC_NEW_EXPR)
+#define DELETE_EXPR_CHECK(t) TREE_CHECK (t, DELETE_EXPR)
+#define VEC_DELETE_EXPR_CHECK(t) TREE_CHECK (t, VEC_DELETE_EXPR)
+#define SCOPE_REF_CHECK(t) TREE_CHECK (t, SCOPE_REF)
+#define MEMBER_REF_CHECK(t) TREE_CHECK (t, MEMBER_REF)
+#define TYPE_EXPR_CHECK(t) TREE_CHECK (t, TYPE_EXPR)
+#define AGGR_INIT_EXPR_CHECK(t) TREE_CHECK (t, AGGR_INIT_EXPR)
+#define VEC_INIT_EXPR_CHECK(t) TREE_CHECK (t, VEC_INIT_EXPR)
+#define THROW_EXPR_CHECK(t) TREE_CHECK (t, THROW_EXPR)
+#define EMPTY_CLASS_EXPR_CHECK(t) TREE_CHECK (t, EMPTY_CLASS_EXPR)
+#define BASELINK_CHECK(t) TREE_CHECK (t, BASELINK)
+#define TEMPLATE_DECL_CHECK(t) TREE_CHECK (t, TEMPLATE_DECL)
+#define TEMPLATE_PARM_INDEX_CHECK(t) TREE_CHECK (t, TEMPLATE_PARM_INDEX)
+#define TEMPLATE_TEMPLATE_PARM_CHECK(t) TREE_CHECK (t, TEMPLATE_TEMPLATE_PARM)
+#define TEMPLATE_TYPE_PARM_CHECK(t) TREE_CHECK (t, TEMPLATE_TYPE_PARM)
+#define TYPENAME_TYPE_CHECK(t) TREE_CHECK (t, TYPENAME_TYPE)
+#define TYPEOF_TYPE_CHECK(t) TREE_CHECK (t, TYPEOF_TYPE)
+#define BOUND_TEMPLATE_TEMPLATE_PARM_CHECK(t) TREE_CHECK (t, BOUND_TEMPLATE_TEMPLATE_PARM)
+#define UNBOUND_CLASS_TEMPLATE_CHECK(t) TREE_CHECK (t, UNBOUND_CLASS_TEMPLATE)
+#define USING_DECL_CHECK(t) TREE_CHECK (t, USING_DECL)
+#define USING_STMT_CHECK(t) TREE_CHECK (t, USING_STMT)
+#define DEFERRED_PARSE_CHECK(t) TREE_CHECK (t, DEFERRED_PARSE)
+#define DEFERRED_NOEXCEPT_CHECK(t) TREE_CHECK (t, DEFERRED_NOEXCEPT)
+#define TEMPLATE_ID_EXPR_CHECK(t) TREE_CHECK (t, TEMPLATE_ID_EXPR)
+#define OVERLOAD_CHECK(t) TREE_CHECK (t, OVERLOAD)
+#define BINDING_VECTOR_CHECK(t) TREE_CHECK (t, BINDING_VECTOR)
+#define PSEUDO_DTOR_EXPR_CHECK(t) TREE_CHECK (t, PSEUDO_DTOR_EXPR)
+#define MODOP_EXPR_CHECK(t) TREE_CHECK (t, MODOP_EXPR)
+#define CAST_EXPR_CHECK(t) TREE_CHECK (t, CAST_EXPR)
+#define REINTERPRET_CAST_EXPR_CHECK(t) TREE_CHECK (t, REINTERPRET_CAST_EXPR)
+#define CONST_CAST_EXPR_CHECK(t) TREE_CHECK (t, CONST_CAST_EXPR)
+#define STATIC_CAST_EXPR_CHECK(t) TREE_CHECK (t, STATIC_CAST_EXPR)
+#define DYNAMIC_CAST_EXPR_CHECK(t) TREE_CHECK (t, DYNAMIC_CAST_EXPR)
+#define IMPLICIT_CONV_EXPR_CHECK(t) TREE_CHECK (t, IMPLICIT_CONV_EXPR)
+#define DOTSTAR_EXPR_CHECK(t) TREE_CHECK (t, DOTSTAR_EXPR)
+#define TYPEID_EXPR_CHECK(t) TREE_CHECK (t, TYPEID_EXPR)
+#define NOEXCEPT_EXPR_CHECK(t) TREE_CHECK (t, NOEXCEPT_EXPR)
+#define SPACESHIP_EXPR_CHECK(t) TREE_CHECK (t, SPACESHIP_EXPR)
+#define NON_DEPENDENT_EXPR_CHECK(t) TREE_CHECK (t, NON_DEPENDENT_EXPR)
+#define CTOR_INITIALIZER_CHECK(t) TREE_CHECK (t, CTOR_INITIALIZER)
+#define TRY_BLOCK_CHECK(t) TREE_CHECK (t, TRY_BLOCK)
+#define EH_SPEC_BLOCK_CHECK(t) TREE_CHECK (t, EH_SPEC_BLOCK)
+#define HANDLER_CHECK(t) TREE_CHECK (t, HANDLER)
+#define MUST_NOT_THROW_EXPR_CHECK(t) TREE_CHECK (t, MUST_NOT_THROW_EXPR)
+#define CLEANUP_STMT_CHECK(t) TREE_CHECK (t, CLEANUP_STMT)
+#define IF_STMT_CHECK(t) TREE_CHECK (t, IF_STMT)
+#define RANGE_FOR_STMT_CHECK(t) TREE_CHECK (t, RANGE_FOR_STMT)
+#define EXPR_STMT_CHECK(t) TREE_CHECK (t, EXPR_STMT)
+#define TAG_DEFN_CHECK(t) TREE_CHECK (t, TAG_DEFN)
+#define OFFSETOF_EXPR_CHECK(t) TREE_CHECK (t, OFFSETOF_EXPR)
+#define ADDRESSOF_EXPR_CHECK(t) TREE_CHECK (t, ADDRESSOF_EXPR)
+#define ARROW_EXPR_CHECK(t) TREE_CHECK (t, ARROW_EXPR)
+#define ALIGNOF_EXPR_CHECK(t) TREE_CHECK (t, ALIGNOF_EXPR)
+#define AT_ENCODE_EXPR_CHECK(t) TREE_CHECK (t, AT_ENCODE_EXPR)
+#define STMT_EXPR_CHECK(t) TREE_CHECK (t, STMT_EXPR)
+#define UNARY_PLUS_EXPR_CHECK(t) TREE_CHECK (t, UNARY_PLUS_EXPR)
+#define STATIC_ASSERT_CHECK(t) TREE_CHECK (t, STATIC_ASSERT)
+#define TYPE_ARGUMENT_PACK_CHECK(t) TREE_CHECK (t, TYPE_ARGUMENT_PACK)
+#define NONTYPE_ARGUMENT_PACK_CHECK(t) TREE_CHECK (t, NONTYPE_ARGUMENT_PACK)
+#define TYPE_PACK_EXPANSION_CHECK(t) TREE_CHECK (t, TYPE_PACK_EXPANSION)
+#define EXPR_PACK_EXPANSION_CHECK(t) TREE_CHECK (t, EXPR_PACK_EXPANSION)
+#define ARGUMENT_PACK_SELECT_CHECK(t) TREE_CHECK (t, ARGUMENT_PACK_SELECT)
+#define UNARY_LEFT_FOLD_EXPR_CHECK(t) TREE_CHECK (t, UNARY_LEFT_FOLD_EXPR)
+#define UNARY_RIGHT_FOLD_EXPR_CHECK(t) TREE_CHECK (t, UNARY_RIGHT_FOLD_EXPR)
+#define BINARY_LEFT_FOLD_EXPR_CHECK(t) TREE_CHECK (t, BINARY_LEFT_FOLD_EXPR)
+#define BINARY_RIGHT_FOLD_EXPR_CHECK(t) TREE_CHECK (t, BINARY_RIGHT_FOLD_EXPR)
+#define BIT_CAST_EXPR_CHECK(t) TREE_CHECK (t, BIT_CAST_EXPR)
+#define TRAIT_EXPR_CHECK(t) TREE_CHECK (t, TRAIT_EXPR)
+#define TRAIT_TYPE_CHECK(t) TREE_CHECK (t, TRAIT_TYPE)
+#define LAMBDA_EXPR_CHECK(t) TREE_CHECK (t, LAMBDA_EXPR)
+#define DECLTYPE_TYPE_CHECK(t) TREE_CHECK (t, DECLTYPE_TYPE)
+#define BASES_CHECK(t) TREE_CHECK (t, BASES)
+#define DEPENDENT_OPERATOR_TYPE_CHECK(t) TREE_CHECK (t, DEPENDENT_OPERATOR_TYPE)
+#define TEMPLATE_INFO_CHECK(t) TREE_CHECK (t, TEMPLATE_INFO)
+#define OMP_DEPOBJ_CHECK(t) TREE_CHECK (t, OMP_DEPOBJ)
+#define CONSTRAINT_INFO_CHECK(t) TREE_CHECK (t, CONSTRAINT_INFO)
+#define WILDCARD_DECL_CHECK(t) TREE_CHECK (t, WILDCARD_DECL)
+#define REQUIRES_EXPR_CHECK(t) TREE_CHECK (t, REQUIRES_EXPR)
+#define SIMPLE_REQ_CHECK(t) TREE_CHECK (t, SIMPLE_REQ)
+#define TYPE_REQ_CHECK(t) TREE_CHECK (t, TYPE_REQ)
+#define COMPOUND_REQ_CHECK(t) TREE_CHECK (t, COMPOUND_REQ)
+#define NESTED_REQ_CHECK(t) TREE_CHECK (t, NESTED_REQ)
+#define ATOMIC_CONSTR_CHECK(t) TREE_CHECK (t, ATOMIC_CONSTR)
+#define CONJ_CONSTR_CHECK(t) TREE_CHECK (t, CONJ_CONSTR)
+#define DISJ_CONSTR_CHECK(t) TREE_CHECK (t, DISJ_CONSTR)
+#define CHECK_CONSTR_CHECK(t) TREE_CHECK (t, CHECK_CONSTR)
+#define CO_AWAIT_EXPR_CHECK(t) TREE_CHECK (t, CO_AWAIT_EXPR)
+#define CO_YIELD_EXPR_CHECK(t) TREE_CHECK (t, CO_YIELD_EXPR)
+#define CO_RETURN_EXPR_CHECK(t) TREE_CHECK (t, CO_RETURN_EXPR)
+#define ASSERTION_STMT_CHECK(t) TREE_CHECK (t, ASSERTION_STMT)
+#define PRECONDITION_STMT_CHECK(t) TREE_CHECK (t, PRECONDITION_STMT)
+#define POSTCONDITION_STMT_CHECK(t) TREE_CHECK (t, POSTCONDITION_STMT)
+#define UNSIGNED_RSHIFT_EXPR_CHECK(t) TREE_CHECK (t, UNSIGNED_RSHIFT_EXPR)
+#define FLOAT_MOD_EXPR_CHECK(t) TREE_CHECK (t, FLOAT_MOD_EXPR)
+#define FUNCFRAME_INFO_CHECK(t) TREE_CHECK (t, FUNCFRAME_INFO)
+#define SET_TYPE_CHECK(t) TREE_CHECK (t, SET_TYPE)
+#define CLASS_INTERFACE_TYPE_CHECK(t) TREE_CHECK (t, CLASS_INTERFACE_TYPE)
+#define CLASS_IMPLEMENTATION_TYPE_CHECK(t) TREE_CHECK (t, CLASS_IMPLEMENTATION_TYPE)
+#define CATEGORY_INTERFACE_TYPE_CHECK(t) TREE_CHECK (t, CATEGORY_INTERFACE_TYPE)
+#define CATEGORY_IMPLEMENTATION_TYPE_CHECK(t) TREE_CHECK (t, CATEGORY_IMPLEMENTATION_TYPE)
+#define PROTOCOL_INTERFACE_TYPE_CHECK(t) TREE_CHECK (t, PROTOCOL_INTERFACE_TYPE)
+#define KEYWORD_DECL_CHECK(t) TREE_CHECK (t, KEYWORD_DECL)
+#define INSTANCE_METHOD_DECL_CHECK(t) TREE_CHECK (t, INSTANCE_METHOD_DECL)
+#define CLASS_METHOD_DECL_CHECK(t) TREE_CHECK (t, CLASS_METHOD_DECL)
+#define PROPERTY_DECL_CHECK(t) TREE_CHECK (t, PROPERTY_DECL)
+#define MESSAGE_SEND_EXPR_CHECK(t) TREE_CHECK (t, MESSAGE_SEND_EXPR)
+#define CLASS_REFERENCE_EXPR_CHECK(t) TREE_CHECK (t, CLASS_REFERENCE_EXPR)
+#define PROPERTY_REF_CHECK(t) TREE_CHECK (t, PROPERTY_REF)
+
+#endif /* GCC_TREE_CHECK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-chrec.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-chrec.h
new file mode 100644
index 0000000..9c412dc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-chrec.h
@@ -0,0 +1,253 @@
+/* Chains of recurrences.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Sebastian Pop <pop@cri.ensmp.fr>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_CHREC_H
+#define GCC_TREE_CHREC_H
+
+/* The following trees are unique elements. Thus the comparison of another
+ element to these elements should be done on the pointer to these trees,
+ and not on their value.
+
+ extern tree chrec_not_analyzed_yet;
+ extern tree chrec_dont_know;
+ extern tree chrec_known;
+
+ chrec_not_analyzed_yet is NULL_TREE and the others are defined
+ in global_trees[]. */
+
+/* After having added an automatically generated element, please
+ include it in the following function. */
+
+inline bool
+automatically_generated_chrec_p (const_tree chrec)
+{
+ return (chrec == chrec_dont_know
+ || chrec == chrec_known);
+}
+
+/* The tree nodes aka. CHRECs. */
+
+inline bool
+tree_is_chrec (const_tree expr)
+{
+ if (TREE_CODE (expr) == POLYNOMIAL_CHREC
+ || automatically_generated_chrec_p (expr))
+ return true;
+ else
+ return false;
+}
+
+
+enum ev_direction {EV_DIR_GROWS, EV_DIR_DECREASES, EV_DIR_UNKNOWN};
+enum ev_direction scev_direction (const_tree);
+
+/* Chrec folding functions. */
+extern tree chrec_fold_plus (tree, tree, tree);
+extern tree chrec_fold_minus (tree, tree, tree);
+extern tree chrec_fold_multiply (tree, tree, tree);
+extern tree chrec_convert (tree, tree, gimple *, bool = true, tree = NULL);
+extern tree chrec_convert_rhs (tree, tree, gimple *);
+extern tree chrec_convert_aggressive (tree, tree, bool *);
+
+/* Operations. */
+extern tree chrec_apply (unsigned, tree, tree);
+extern tree chrec_apply_map (tree, vec<tree> );
+extern tree chrec_replace_initial_condition (tree, tree);
+extern tree initial_condition (tree);
+extern tree initial_condition_in_loop_num (tree, unsigned);
+extern tree evolution_part_in_loop_num (tree, unsigned);
+extern tree hide_evolution_in_other_loops_than_loop (tree, unsigned);
+extern tree reset_evolution_in_loop (unsigned, tree, tree);
+extern tree chrec_merge (tree, tree);
+extern void for_each_scev_op (tree *, bool (*) (tree *, void *), void *);
+extern bool convert_affine_scev (class loop *, tree, tree *, tree *, gimple *,
+ bool, tree = NULL);
+
+/* Observers. */
+extern bool eq_evolutions_p (const_tree, const_tree);
+extern bool is_multivariate_chrec (const_tree);
+extern bool chrec_contains_symbols (const_tree, class loop * = NULL);
+extern bool chrec_contains_symbols_defined_in_loop (const_tree, unsigned);
+extern bool chrec_contains_undetermined (const_tree);
+extern bool tree_contains_chrecs (const_tree, int *);
+extern bool evolution_function_is_affine_multivariate_p (const_tree, int);
+extern bool evolution_function_is_univariate_p (const_tree, int = 0);
+extern unsigned nb_vars_in_chrec (tree);
+extern bool evolution_function_is_invariant_p (tree, int);
+extern bool scev_is_linear_expression (tree);
+extern bool evolution_function_right_is_integer_cst (const_tree);
+
+/* Determines whether CHREC is equal to zero. */
+
+inline bool
+chrec_zerop (const_tree chrec)
+{
+ if (chrec == NULL_TREE)
+ return false;
+
+ if (TREE_CODE (chrec) == INTEGER_CST)
+ return integer_zerop (chrec);
+
+ return false;
+}
+
+/* Determines whether CHREC is a loop invariant with respect to LOOP_NUM.
+ Set the result in RES and return true when the property can be computed. */
+
+inline bool
+no_evolution_in_loop_p (tree chrec, unsigned loop_num, bool *res)
+{
+ tree scev;
+
+ if (chrec == chrec_not_analyzed_yet
+ || chrec == chrec_dont_know
+ || chrec_contains_symbols_defined_in_loop (chrec, loop_num))
+ return false;
+
+ STRIP_NOPS (chrec);
+ scev = hide_evolution_in_other_loops_than_loop (chrec, loop_num);
+ *res = !tree_contains_chrecs (scev, NULL);
+ return true;
+}
+
+/* Build a polynomial chain of recurrence. */
+
+inline tree
+build_polynomial_chrec (unsigned loop_num,
+ tree left,
+ tree right)
+{
+ bool val;
+
+ if (left == chrec_dont_know
+ || right == chrec_dont_know)
+ return chrec_dont_know;
+
+ if (!no_evolution_in_loop_p (left, loop_num, &val)
+ || !val)
+ return chrec_dont_know;
+
+ /* Types of left and right sides of a chrec should be compatible, but
+ pointer CHRECs are special in that the evolution is of ptroff type. */
+ if (POINTER_TYPE_P (TREE_TYPE (left)))
+ gcc_checking_assert (ptrofftype_p (TREE_TYPE (right)));
+ else
+ {
+ /* Pointer types should occur only on the left hand side, i.e. in
+ the base of the chrec, and not in the step. */
+ gcc_checking_assert (!POINTER_TYPE_P (TREE_TYPE (right))
+ && types_compatible_p (TREE_TYPE (left),
+ TREE_TYPE (right)));
+ }
+
+ if (chrec_zerop (right))
+ return left;
+
+ tree chrec = build2 (POLYNOMIAL_CHREC, TREE_TYPE (left), left, right);
+ CHREC_VARIABLE (chrec) = loop_num;
+ return chrec;
+}
+
+/* Determines whether the expression CHREC is a constant. */
+
+inline bool
+evolution_function_is_constant_p (const_tree chrec)
+{
+ if (chrec == NULL_TREE)
+ return false;
+
+ return is_gimple_min_invariant (chrec);
+}
+
+/* Determine whether CHREC is an affine evolution function in LOOPNUM. */
+
+inline bool
+evolution_function_is_affine_in_loop (const_tree chrec, int loopnum)
+{
+ if (chrec == NULL_TREE)
+ return false;
+
+ switch (TREE_CODE (chrec))
+ {
+ case POLYNOMIAL_CHREC:
+ if (evolution_function_is_invariant_p (CHREC_LEFT (chrec), loopnum)
+ && evolution_function_is_invariant_p (CHREC_RIGHT (chrec), loopnum))
+ return true;
+ else
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Determine whether CHREC is an affine evolution function or not. */
+
+inline bool
+evolution_function_is_affine_p (const_tree chrec)
+{
+ return chrec
+ && TREE_CODE (chrec) == POLYNOMIAL_CHREC
+ && evolution_function_is_invariant_p (CHREC_RIGHT (chrec),
+ CHREC_VARIABLE (chrec))
+ && (TREE_CODE (CHREC_RIGHT (chrec)) != POLYNOMIAL_CHREC
+ || evolution_function_is_affine_p (CHREC_RIGHT (chrec)));
+}
+
+/* Determines whether EXPR does not contains chrec expressions. */
+
+inline bool
+tree_does_not_contain_chrecs (const_tree expr)
+{
+ return !tree_contains_chrecs (expr, NULL);
+}
+
+/* Returns the type of the chrec. */
+
+inline tree
+chrec_type (const_tree chrec)
+{
+ if (automatically_generated_chrec_p (chrec))
+ return NULL_TREE;
+
+ return TREE_TYPE (chrec);
+}
+
+inline tree
+chrec_fold_op (enum tree_code code, tree type, tree op0, tree op1)
+{
+ switch (code)
+ {
+ case PLUS_EXPR:
+ return chrec_fold_plus (type, op0, op1);
+
+ case MINUS_EXPR:
+ return chrec_fold_minus (type, op0, op1);
+
+ case MULT_EXPR:
+ return chrec_fold_multiply (type, op0, op1);
+
+ default:
+ gcc_unreachable ();
+ }
+
+}
+
+#endif /* GCC_TREE_CHREC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-core.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-core.h
new file mode 100644
index 0000000..fd2be57
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-core.h
@@ -0,0 +1,2389 @@
+/* Core data structures for the 'tree' type.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_CORE_H
+#define GCC_TREE_CORE_H
+
+#include "symtab.h"
+
+/* This file contains all the data structures that define the 'tree' type.
+ There are no accessor macros nor functions in this file. Only the
+ basic data structures, extern declarations and type definitions. */
+
+/*---------------------------------------------------------------------------
+ Forward type declarations. Mostly to avoid including unnecessary headers
+---------------------------------------------------------------------------*/
+struct function;
+struct real_value;
+struct fixed_value;
+struct ptr_info_def;
+struct irange_storage_slot;
+struct die_struct;
+
+
+/*---------------------------------------------------------------------------
+ #defined constants
+---------------------------------------------------------------------------*/
+/* Nonzero if this is a call to a function whose return value depends
+ solely on its arguments, has no side effects, and does not read
+ global memory. This corresponds to TREE_READONLY for function
+ decls. */
+#define ECF_CONST (1 << 0)
+
+/* Nonzero if this is a call to "pure" function (like const function,
+ but may read memory. This corresponds to DECL_PURE_P for function
+ decls. */
+#define ECF_PURE (1 << 1)
+
+/* Nonzero if this is ECF_CONST or ECF_PURE but cannot be proven to no
+ infinite loop. This corresponds to DECL_LOOPING_CONST_OR_PURE_P
+ for function decls.*/
+#define ECF_LOOPING_CONST_OR_PURE (1 << 2)
+
+/* Nonzero if this call will never return. */
+#define ECF_NORETURN (1 << 3)
+
+/* Nonzero if this is a call to malloc or a related function. */
+#define ECF_MALLOC (1 << 4)
+
+/* Nonzero if it is plausible that this is a call to alloca. */
+#define ECF_MAY_BE_ALLOCA (1 << 5)
+
+/* Nonzero if this is a call to a function that won't throw an exception. */
+#define ECF_NOTHROW (1 << 6)
+
+/* Nonzero if this is a call to setjmp or a related function. */
+#define ECF_RETURNS_TWICE (1 << 7)
+
+/* Nonzero if this call replaces the current stack frame. */
+#define ECF_SIBCALL (1 << 8)
+
+/* Function does not read or write memory (but may have side effects, so
+ it does not necessarily fit ECF_CONST). */
+#define ECF_NOVOPS (1 << 9)
+
+/* The function does not lead to calls within current function unit. */
+#define ECF_LEAF (1 << 10)
+
+/* Nonzero if this call returns its first argument. */
+#define ECF_RET1 (1 << 11)
+
+/* Nonzero if this call does not affect transactions. */
+#define ECF_TM_PURE (1 << 12)
+
+/* Nonzero if this call is into the transaction runtime library. */
+#define ECF_TM_BUILTIN (1 << 13)
+
+/* Nonzero if this is an indirect call by descriptor. */
+#define ECF_BY_DESCRIPTOR (1 << 14)
+
+/* Nonzero if this is a cold function. */
+#define ECF_COLD (1 << 15)
+
+/* Call argument flags. */
+
+/* Nonzero if the argument is not used by the function. */
+#define EAF_UNUSED (1 << 1)
+
+/* Following flags come in pairs. First one is about direct dereferences
+ from the parameter, while the second is about memory reachable by
+ recursive dereferences. */
+
+/* Nonzero if memory reached by the argument is not clobbered. */
+#define EAF_NO_DIRECT_CLOBBER (1 << 2)
+#define EAF_NO_INDIRECT_CLOBBER (1 << 3)
+
+/* Nonzero if the argument does not escape. */
+#define EAF_NO_DIRECT_ESCAPE (1 << 4)
+#define EAF_NO_INDIRECT_ESCAPE (1 << 5)
+
+/* Nonzero if the argument does not escape to return value. */
+#define EAF_NOT_RETURNED_DIRECTLY (1 << 6)
+#define EAF_NOT_RETURNED_INDIRECTLY (1 << 7)
+
+/* Nonzero if the argument is not read. */
+#define EAF_NO_DIRECT_READ (1 << 8)
+#define EAF_NO_INDIRECT_READ (1 << 9)
+
+/* Call return flags. */
+/* Mask for the argument number that is returned. Lower two bits of
+ the return flags, encodes argument slots zero to three. */
+#define ERF_RETURN_ARG_MASK (3)
+
+/* Nonzero if the return value is equal to the argument number
+ flags & ERF_RETURN_ARG_MASK. */
+#define ERF_RETURNS_ARG (1 << 2)
+
+/* Nonzero if the return value does not alias with anything. Functions
+ with the malloc attribute have this set on their return value. */
+#define ERF_NOALIAS (1 << 3)
+
+
+/*---------------------------------------------------------------------------
+ Enumerations
+---------------------------------------------------------------------------*/
+/* Codes of tree nodes. */
+#define DEFTREECODE(SYM, STRING, TYPE, NARGS) SYM,
+#define END_OF_BASE_TREE_CODES LAST_AND_UNUSED_TREE_CODE,
+
+enum tree_code {
+#include "all-tree.def"
+MAX_TREE_CODES
+};
+
+#undef DEFTREECODE
+#undef END_OF_BASE_TREE_CODES
+
+/* Number of language-independent tree codes. */
+#define NUM_TREE_CODES \
+ ((int) LAST_AND_UNUSED_TREE_CODE)
+
+#define CODE_CONTAINS_STRUCT(CODE, STRUCT) \
+ (tree_contains_struct[(CODE)][(STRUCT)])
+
+
+/* Classify which part of the compiler has defined a given builtin function.
+ Note that we assume below that this is no more than two bits. */
+enum built_in_class {
+ NOT_BUILT_IN = 0,
+ BUILT_IN_FRONTEND,
+ BUILT_IN_MD,
+ BUILT_IN_NORMAL
+};
+
+/* Last marker used for LTO stremaing of built_in_class. We cannot add it
+ to the enum since we need the enumb to fit in 2 bits. */
+#define BUILT_IN_LAST (BUILT_IN_NORMAL + 1)
+
+/* Codes that identify the various built in functions
+ so that expand_call can identify them quickly. */
+#define DEF_BUILTIN(ENUM, N, C, T, LT, B, F, NA, AT, IM, COND) ENUM,
+enum built_in_function {
+#include "builtins.def"
+ /* Complex division routines in libgcc. These are done via builtins
+ because emit_library_call_value can't handle complex values. */
+ BUILT_IN_COMPLEX_MUL_MIN,
+ BUILT_IN_COMPLEX_MUL_MAX
+ = BUILT_IN_COMPLEX_MUL_MIN
+ + MAX_MODE_COMPLEX_FLOAT
+ - MIN_MODE_COMPLEX_FLOAT,
+
+ BUILT_IN_COMPLEX_DIV_MIN,
+ BUILT_IN_COMPLEX_DIV_MAX
+ = BUILT_IN_COMPLEX_DIV_MIN
+ + MAX_MODE_COMPLEX_FLOAT
+ - MIN_MODE_COMPLEX_FLOAT,
+
+ /* Upper bound on non-language-specific builtins. */
+ END_BUILTINS
+};
+
+/* Internal functions. */
+enum internal_fn {
+#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) IFN_##CODE,
+#include "internal-fn.def"
+ IFN_LAST
+};
+
+/* An enum that combines target-independent built-in functions with
+ internal functions, so that they can be treated in a similar way.
+ The numbers for built-in functions are the same as for the
+ built_in_function enum. The numbers for internal functions
+ start at END_BUITLINS. */
+enum combined_fn {
+#define DEF_BUILTIN(ENUM, N, C, T, LT, B, F, NA, AT, IM, COND) \
+ CFN_##ENUM = int (ENUM),
+#include "builtins.def"
+
+
+#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
+ CFN_##CODE = int (END_BUILTINS) + int (IFN_##CODE),
+#include "internal-fn.def"
+
+ CFN_LAST
+};
+
+/* Tree code classes. Each tree_code has an associated code class
+ represented by a TREE_CODE_CLASS. */
+enum tree_code_class {
+ tcc_exceptional, /* An exceptional code (fits no category). */
+ tcc_constant, /* A constant. */
+ /* Order of tcc_type and tcc_declaration is important. */
+ tcc_type, /* A type object code. */
+ tcc_declaration, /* A declaration (also serving as variable refs). */
+ tcc_reference, /* A reference to storage. */
+ tcc_comparison, /* A comparison expression. */
+ tcc_unary, /* A unary arithmetic expression. */
+ tcc_binary, /* A binary arithmetic expression. */
+ tcc_statement, /* A statement expression, which have side effects
+ but usually no interesting value. */
+ tcc_vl_exp, /* A function call or other expression with a
+ variable-length operand vector. */
+ tcc_expression /* Any other expression. */
+};
+
+/* OMP_CLAUSE codes. Do not reorder, as this is used to index into
+ the tables omp_clause_num_ops and omp_clause_code_name. */
+enum omp_clause_code {
+ /* Clause zero is special-cased inside the parser
+ (c_parser_omp_variable_list). */
+ OMP_CLAUSE_ERROR = 0,
+
+ /* OpenACC/OpenMP clause: private (variable_list). */
+ OMP_CLAUSE_PRIVATE,
+
+ /* OpenMP clause: shared (variable_list). */
+ OMP_CLAUSE_SHARED,
+
+ /* OpenACC/OpenMP clause: firstprivate (variable_list). */
+ OMP_CLAUSE_FIRSTPRIVATE,
+
+ /* OpenMP clause: lastprivate (variable_list). */
+ OMP_CLAUSE_LASTPRIVATE,
+
+ /* OpenACC/OpenMP clause: reduction (operator:variable_list).
+ OMP_CLAUSE_REDUCTION_CODE: The tree_code of the operator.
+ Operand 1: OMP_CLAUSE_REDUCTION_INIT: Stmt-list to initialize the var.
+ Operand 2: OMP_CLAUSE_REDUCTION_MERGE: Stmt-list to merge private var
+ into the shared one.
+ Operand 3: OMP_CLAUSE_REDUCTION_PLACEHOLDER: A dummy VAR_DECL
+ placeholder used in OMP_CLAUSE_REDUCTION_{INIT,MERGE}.
+ Operand 4: OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER: Another dummy
+ VAR_DECL placeholder, used like the above for C/C++ array
+ reductions. */
+ OMP_CLAUSE_REDUCTION,
+
+ /* OpenMP clause: task_reduction (operator:variable_list). */
+ OMP_CLAUSE_TASK_REDUCTION,
+
+ /* OpenMP clause: in_reduction (operator:variable_list). */
+ OMP_CLAUSE_IN_REDUCTION,
+
+ /* OpenMP clause: copyin (variable_list). */
+ OMP_CLAUSE_COPYIN,
+
+ /* OpenMP clause: copyprivate (variable_list). */
+ OMP_CLAUSE_COPYPRIVATE,
+
+ /* OpenMP clause: linear (variable-list[:linear-step]). */
+ OMP_CLAUSE_LINEAR,
+
+ /* OpenMP clause: affinity([depend-modifier :] variable-list). */
+ OMP_CLAUSE_AFFINITY,
+
+ /* OpenMP clause: aligned (variable-list[:alignment]). */
+ OMP_CLAUSE_ALIGNED,
+
+ /* OpenMP clause: allocate ([allocator:]variable-list). */
+ OMP_CLAUSE_ALLOCATE,
+
+ /* OpenMP clause: depend ({in,out,inout}:variable-list). */
+ OMP_CLAUSE_DEPEND,
+
+ /* OpenMP clause: nontemporal (variable-list). */
+ OMP_CLAUSE_NONTEMPORAL,
+
+ /* OpenMP clause: uniform (argument-list). */
+ OMP_CLAUSE_UNIFORM,
+
+ /* OpenMP clause: enter (extended-list).
+ to is a deprecated alias when it appears in declare target. */
+ OMP_CLAUSE_ENTER,
+
+ /* OpenMP clause: link (variable-list). */
+ OMP_CLAUSE_LINK,
+
+ /* OpenMP clause: detach (event-handle). */
+ OMP_CLAUSE_DETACH,
+
+ /* OpenACC clause: use_device (variable-list).
+ OpenMP clause: use_device_ptr (ptr-list). */
+ OMP_CLAUSE_USE_DEVICE_PTR,
+
+ /* OpenMP clause: use_device_addr (variable-list). */
+ OMP_CLAUSE_USE_DEVICE_ADDR,
+
+ /* OpenMP clause: is_device_ptr (variable-list). */
+ OMP_CLAUSE_IS_DEVICE_PTR,
+
+ /* OpenMP clause: inclusive (variable-list). */
+ OMP_CLAUSE_INCLUSIVE,
+
+ /* OpenMP clause: exclusive (variable-list). */
+ OMP_CLAUSE_EXCLUSIVE,
+
+ /* OpenMP clause: from (variable-list). */
+ OMP_CLAUSE_FROM,
+
+ /* OpenMP clause: to (variable-list). */
+ OMP_CLAUSE_TO,
+
+ /* OpenACC clauses: {copy, copyin, copyout, create, delete, deviceptr,
+ device, host (self), present, present_or_copy (pcopy), present_or_copyin
+ (pcopyin), present_or_copyout (pcopyout), present_or_create (pcreate)}
+ (variable-list).
+
+ OpenMP clause: map ({alloc:,to:,from:,tofrom:,}variable-list). */
+ OMP_CLAUSE_MAP,
+
+ /* OpenMP clause: has_device_addr (variable-list). */
+ OMP_CLAUSE_HAS_DEVICE_ADDR,
+
+ /* OpenMP clause: doacross ({source,sink}:vec). */
+ OMP_CLAUSE_DOACROSS,
+
+ /* Internal structure to hold OpenACC cache directive's variable-list.
+ #pragma acc cache (variable-list). */
+ OMP_CLAUSE__CACHE_,
+
+ /* OpenACC clause: gang [(gang-argument-list)].
+ Where
+ gang-argument-list: [gang-argument-list, ] gang-argument
+ gang-argument: [num:] integer-expression
+ | static: size-expression
+ size-expression: * | integer-expression. */
+ OMP_CLAUSE_GANG,
+
+ /* OpenACC clause: async [(integer-expression)]. */
+ OMP_CLAUSE_ASYNC,
+
+ /* OpenACC clause: wait [(integer-expression-list)]. */
+ OMP_CLAUSE_WAIT,
+
+ /* OpenACC clause: auto. */
+ OMP_CLAUSE_AUTO,
+
+ /* OpenACC clause: seq. */
+ OMP_CLAUSE_SEQ,
+
+ /* Internal clause: temporary for combined loops expansion. */
+ OMP_CLAUSE__LOOPTEMP_,
+
+ /* Internal clause: temporary for task reductions. */
+ OMP_CLAUSE__REDUCTEMP_,
+
+ /* Internal clause: temporary for lastprivate(conditional:). */
+ OMP_CLAUSE__CONDTEMP_,
+
+ /* Internal clause: temporary for inscan reductions. */
+ OMP_CLAUSE__SCANTEMP_,
+
+ /* OpenACC/OpenMP clause: if (scalar-expression). */
+ OMP_CLAUSE_IF,
+
+ /* OpenMP clause: num_threads (integer-expression). */
+ OMP_CLAUSE_NUM_THREADS,
+
+ /* OpenMP clause: schedule. */
+ OMP_CLAUSE_SCHEDULE,
+
+ /* OpenMP clause: nowait. */
+ OMP_CLAUSE_NOWAIT,
+
+ /* OpenMP clause: ordered [(constant-integer-expression)]. */
+ OMP_CLAUSE_ORDERED,
+
+ /* OpenACC/OpenMP clause: default. */
+ OMP_CLAUSE_DEFAULT,
+
+ /* OpenACC/OpenMP clause: collapse (constant-integer-expression). */
+ OMP_CLAUSE_COLLAPSE,
+
+ /* OpenMP clause: untied. */
+ OMP_CLAUSE_UNTIED,
+
+ /* OpenMP clause: final (scalar-expression). */
+ OMP_CLAUSE_FINAL,
+
+ /* OpenMP clause: mergeable. */
+ OMP_CLAUSE_MERGEABLE,
+
+ /* OpenMP clause: device (integer-expression). */
+ OMP_CLAUSE_DEVICE,
+
+ /* OpenMP clause: dist_schedule (static[:chunk-size]). */
+ OMP_CLAUSE_DIST_SCHEDULE,
+
+ /* OpenMP clause: inbranch. */
+ OMP_CLAUSE_INBRANCH,
+
+ /* OpenMP clause: notinbranch. */
+ OMP_CLAUSE_NOTINBRANCH,
+
+ /* OpenMP clause: num_teams(integer-expression). */
+ OMP_CLAUSE_NUM_TEAMS,
+
+ /* OpenMP clause: thread_limit(integer-expression). */
+ OMP_CLAUSE_THREAD_LIMIT,
+
+ /* OpenMP clause: proc_bind ({master,close,spread}). */
+ OMP_CLAUSE_PROC_BIND,
+
+ /* OpenMP clause: safelen (constant-integer-expression). */
+ OMP_CLAUSE_SAFELEN,
+
+ /* OpenMP clause: simdlen (constant-integer-expression). */
+ OMP_CLAUSE_SIMDLEN,
+
+ /* OpenMP clause: device_type ({host,nohost,any}). */
+ OMP_CLAUSE_DEVICE_TYPE,
+
+ /* OpenMP clause: for. */
+ OMP_CLAUSE_FOR,
+
+ /* OpenMP clause: parallel. */
+ OMP_CLAUSE_PARALLEL,
+
+ /* OpenMP clause: sections. */
+ OMP_CLAUSE_SECTIONS,
+
+ /* OpenMP clause: taskgroup. */
+ OMP_CLAUSE_TASKGROUP,
+
+ /* OpenMP clause: priority (integer-expression). */
+ OMP_CLAUSE_PRIORITY,
+
+ /* OpenMP clause: grainsize (integer-expression). */
+ OMP_CLAUSE_GRAINSIZE,
+
+ /* OpenMP clause: num_tasks (integer-expression). */
+ OMP_CLAUSE_NUM_TASKS,
+
+ /* OpenMP clause: nogroup. */
+ OMP_CLAUSE_NOGROUP,
+
+ /* OpenMP clause: threads. */
+ OMP_CLAUSE_THREADS,
+
+ /* OpenMP clause: simd. */
+ OMP_CLAUSE_SIMD,
+
+ /* OpenMP clause: hint (integer-expression). */
+ OMP_CLAUSE_HINT,
+
+ /* OpenMP clause: defaultmap (tofrom: scalar). */
+ OMP_CLAUSE_DEFAULTMAP,
+
+ /* OpenMP clause: order (concurrent). */
+ OMP_CLAUSE_ORDER,
+
+ /* OpenMP clause: bind (binding). */
+ OMP_CLAUSE_BIND,
+
+ /* OpenMP clause: filter (integer-expression). */
+ OMP_CLAUSE_FILTER,
+
+ /* Internally used only clause, holding SIMD uid. */
+ OMP_CLAUSE__SIMDUID_,
+
+ /* Internally used only clause, flag whether this is SIMT simd
+ loop or not. */
+ OMP_CLAUSE__SIMT_,
+
+ /* OpenACC clause: independent. */
+ OMP_CLAUSE_INDEPENDENT,
+
+ /* OpenACC clause: worker [( [num:] integer-expression)]. */
+ OMP_CLAUSE_WORKER,
+
+ /* OpenACC clause: vector [( [length:] integer-expression)]. */
+ OMP_CLAUSE_VECTOR,
+
+ /* OpenACC clause: num_gangs (integer-expression). */
+ OMP_CLAUSE_NUM_GANGS,
+
+ /* OpenACC clause: num_workers (integer-expression). */
+ OMP_CLAUSE_NUM_WORKERS,
+
+ /* OpenACC clause: vector_length (integer-expression). */
+ OMP_CLAUSE_VECTOR_LENGTH,
+
+ /* OpenACC clause: tile ( size-expr-list ). */
+ OMP_CLAUSE_TILE,
+
+ /* OpenACC clause: if_present. */
+ OMP_CLAUSE_IF_PRESENT,
+
+ /* OpenACC clause: finalize. */
+ OMP_CLAUSE_FINALIZE,
+
+ /* OpenACC clause: nohost. */
+ OMP_CLAUSE_NOHOST,
+};
+
+#undef DEFTREESTRUCT
+#define DEFTREESTRUCT(ENUM, NAME) ENUM,
+enum tree_node_structure_enum {
+#include "treestruct.def"
+ LAST_TS_ENUM
+};
+#undef DEFTREESTRUCT
+
+enum omp_clause_schedule_kind {
+ OMP_CLAUSE_SCHEDULE_STATIC,
+ OMP_CLAUSE_SCHEDULE_DYNAMIC,
+ OMP_CLAUSE_SCHEDULE_GUIDED,
+ OMP_CLAUSE_SCHEDULE_AUTO,
+ OMP_CLAUSE_SCHEDULE_RUNTIME,
+ OMP_CLAUSE_SCHEDULE_MASK = (1 << 3) - 1,
+ OMP_CLAUSE_SCHEDULE_MONOTONIC = (1 << 3),
+ OMP_CLAUSE_SCHEDULE_NONMONOTONIC = (1 << 4),
+ OMP_CLAUSE_SCHEDULE_LAST = 2 * OMP_CLAUSE_SCHEDULE_NONMONOTONIC - 1
+};
+
+enum omp_clause_default_kind {
+ OMP_CLAUSE_DEFAULT_UNSPECIFIED,
+ OMP_CLAUSE_DEFAULT_SHARED,
+ OMP_CLAUSE_DEFAULT_NONE,
+ OMP_CLAUSE_DEFAULT_PRIVATE,
+ OMP_CLAUSE_DEFAULT_FIRSTPRIVATE,
+ OMP_CLAUSE_DEFAULT_PRESENT,
+ OMP_CLAUSE_DEFAULT_LAST
+};
+
+enum omp_clause_defaultmap_kind {
+ OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED,
+ OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR,
+ OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE,
+ OMP_CLAUSE_DEFAULTMAP_CATEGORY_ALLOCATABLE,
+ OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER,
+ OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK = 7,
+ OMP_CLAUSE_DEFAULTMAP_ALLOC = 1 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_TO = 2 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_FROM = 3 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_TOFROM = 4 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE
+ = 5 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_NONE = 6 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_DEFAULT
+ = 7 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1),
+ OMP_CLAUSE_DEFAULTMAP_MASK = 7 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1)
+};
+
+enum omp_clause_bind_kind {
+ OMP_CLAUSE_BIND_TEAMS,
+ OMP_CLAUSE_BIND_PARALLEL,
+ OMP_CLAUSE_BIND_THREAD
+};
+
+/* memory-order-clause on OpenMP atomic/flush constructs or
+ argument of atomic_default_mem_order clause. */
+enum omp_memory_order {
+ OMP_MEMORY_ORDER_UNSPECIFIED,
+ OMP_MEMORY_ORDER_RELAXED,
+ OMP_MEMORY_ORDER_ACQUIRE,
+ OMP_MEMORY_ORDER_RELEASE,
+ OMP_MEMORY_ORDER_ACQ_REL,
+ OMP_MEMORY_ORDER_SEQ_CST,
+ OMP_MEMORY_ORDER_MASK = 7,
+ OMP_FAIL_MEMORY_ORDER_UNSPECIFIED = OMP_MEMORY_ORDER_UNSPECIFIED * 8,
+ OMP_FAIL_MEMORY_ORDER_RELAXED = OMP_MEMORY_ORDER_RELAXED * 8,
+ OMP_FAIL_MEMORY_ORDER_ACQUIRE = OMP_MEMORY_ORDER_ACQUIRE * 8,
+ OMP_FAIL_MEMORY_ORDER_RELEASE = OMP_MEMORY_ORDER_RELEASE * 8,
+ OMP_FAIL_MEMORY_ORDER_ACQ_REL = OMP_MEMORY_ORDER_ACQ_REL * 8,
+ OMP_FAIL_MEMORY_ORDER_SEQ_CST = OMP_MEMORY_ORDER_SEQ_CST * 8,
+ OMP_FAIL_MEMORY_ORDER_MASK = OMP_MEMORY_ORDER_MASK * 8
+};
+#define OMP_FAIL_MEMORY_ORDER_SHIFT 3
+
+/* There is a TYPE_QUAL value for each type qualifier. They can be
+ combined by bitwise-or to form the complete set of qualifiers for a
+ type. */
+enum cv_qualifier {
+ TYPE_UNQUALIFIED = 0x0,
+ TYPE_QUAL_CONST = 0x1,
+ TYPE_QUAL_VOLATILE = 0x2,
+ TYPE_QUAL_RESTRICT = 0x4,
+ TYPE_QUAL_ATOMIC = 0x8
+};
+
+/* Standard named or nameless data types of the C compiler. */
+enum tree_index {
+ TI_ERROR_MARK,
+ TI_INTQI_TYPE,
+ TI_INTHI_TYPE,
+ TI_INTSI_TYPE,
+ TI_INTDI_TYPE,
+ TI_INTTI_TYPE,
+
+ TI_UINTQI_TYPE,
+ TI_UINTHI_TYPE,
+ TI_UINTSI_TYPE,
+ TI_UINTDI_TYPE,
+ TI_UINTTI_TYPE,
+
+ TI_ATOMICQI_TYPE,
+ TI_ATOMICHI_TYPE,
+ TI_ATOMICSI_TYPE,
+ TI_ATOMICDI_TYPE,
+ TI_ATOMICTI_TYPE,
+
+ TI_UINT16_TYPE,
+ TI_UINT32_TYPE,
+ TI_UINT64_TYPE,
+ TI_UINT128_TYPE,
+
+ TI_VOID,
+
+ TI_INTEGER_ZERO,
+ TI_INTEGER_ONE,
+ TI_INTEGER_THREE,
+ TI_INTEGER_MINUS_ONE,
+ TI_NULL_POINTER,
+
+ TI_SIZE_ZERO,
+ TI_SIZE_ONE,
+
+ TI_BITSIZE_ZERO,
+ TI_BITSIZE_ONE,
+ TI_BITSIZE_UNIT,
+
+ TI_PUBLIC,
+ TI_PROTECTED,
+ TI_PRIVATE,
+
+ TI_BOOLEAN_FALSE,
+ TI_BOOLEAN_TRUE,
+
+ TI_FLOAT_TYPE,
+ TI_DOUBLE_TYPE,
+ TI_LONG_DOUBLE_TYPE,
+
+ /* __bf16 type if supported (used in C++ as std::bfloat16_t). */
+ TI_BFLOAT16_TYPE,
+
+ /* The _FloatN and _FloatNx types must be consecutive, and in the
+ same sequence as the corresponding complex types, which must also
+ be consecutive; _FloatN must come before _FloatNx; the order must
+ also be the same as in the floatn_nx_types array and the RID_*
+ values in c-common.h. This is so that iterations over these
+ types work as intended. */
+ TI_FLOAT16_TYPE,
+ TI_FLOATN_TYPE_FIRST = TI_FLOAT16_TYPE,
+ TI_FLOATN_NX_TYPE_FIRST = TI_FLOAT16_TYPE,
+ TI_FLOAT32_TYPE,
+ TI_FLOAT64_TYPE,
+ TI_FLOAT128_TYPE,
+ TI_FLOATN_TYPE_LAST = TI_FLOAT128_TYPE,
+#define NUM_FLOATN_TYPES (TI_FLOATN_TYPE_LAST - TI_FLOATN_TYPE_FIRST + 1)
+ TI_FLOAT32X_TYPE,
+ TI_FLOATNX_TYPE_FIRST = TI_FLOAT32X_TYPE,
+ TI_FLOAT64X_TYPE,
+ TI_FLOAT128X_TYPE,
+ TI_FLOATNX_TYPE_LAST = TI_FLOAT128X_TYPE,
+ TI_FLOATN_NX_TYPE_LAST = TI_FLOAT128X_TYPE,
+#define NUM_FLOATNX_TYPES (TI_FLOATNX_TYPE_LAST - TI_FLOATNX_TYPE_FIRST + 1)
+#define NUM_FLOATN_NX_TYPES (TI_FLOATN_NX_TYPE_LAST \
+ - TI_FLOATN_NX_TYPE_FIRST \
+ + 1)
+
+ /* Type used by certain backends for __float128, which in C++ should be
+ distinct type from _Float128 for backwards compatibility reasons. */
+ TI_FLOAT128T_TYPE,
+
+ /* Put the complex types after their component types, so that in (sequential)
+ tree streaming we can assert that their component types have already been
+ handled (see tree-streamer.cc:record_common_node). */
+ TI_COMPLEX_INTEGER_TYPE,
+ TI_COMPLEX_FLOAT_TYPE,
+ TI_COMPLEX_DOUBLE_TYPE,
+ TI_COMPLEX_LONG_DOUBLE_TYPE,
+
+ TI_COMPLEX_FLOAT16_TYPE,
+ TI_COMPLEX_FLOATN_NX_TYPE_FIRST = TI_COMPLEX_FLOAT16_TYPE,
+ TI_COMPLEX_FLOAT32_TYPE,
+ TI_COMPLEX_FLOAT64_TYPE,
+ TI_COMPLEX_FLOAT128_TYPE,
+ TI_COMPLEX_FLOAT32X_TYPE,
+ TI_COMPLEX_FLOAT64X_TYPE,
+ TI_COMPLEX_FLOAT128X_TYPE,
+
+ TI_FLOAT_PTR_TYPE,
+ TI_DOUBLE_PTR_TYPE,
+ TI_LONG_DOUBLE_PTR_TYPE,
+ TI_INTEGER_PTR_TYPE,
+
+ TI_VOID_TYPE,
+ TI_PTR_TYPE,
+ TI_CONST_PTR_TYPE,
+ TI_SIZE_TYPE,
+ TI_PID_TYPE,
+ TI_PTRDIFF_TYPE,
+ TI_VA_LIST_TYPE,
+ TI_VA_LIST_GPR_COUNTER_FIELD,
+ TI_VA_LIST_FPR_COUNTER_FIELD,
+ TI_BOOLEAN_TYPE,
+ TI_FILEPTR_TYPE,
+ TI_CONST_TM_PTR_TYPE,
+ TI_FENV_T_PTR_TYPE,
+ TI_CONST_FENV_T_PTR_TYPE,
+ TI_FEXCEPT_T_PTR_TYPE,
+ TI_CONST_FEXCEPT_T_PTR_TYPE,
+ TI_POINTER_SIZED_TYPE,
+
+ TI_DFLOAT32_TYPE,
+ TI_DFLOAT64_TYPE,
+ TI_DFLOAT128_TYPE,
+
+ TI_VOID_LIST_NODE,
+
+ TI_MAIN_IDENTIFIER,
+
+ TI_SAT_SFRACT_TYPE,
+ TI_SAT_FRACT_TYPE,
+ TI_SAT_LFRACT_TYPE,
+ TI_SAT_LLFRACT_TYPE,
+ TI_SAT_USFRACT_TYPE,
+ TI_SAT_UFRACT_TYPE,
+ TI_SAT_ULFRACT_TYPE,
+ TI_SAT_ULLFRACT_TYPE,
+ TI_SFRACT_TYPE,
+ TI_FRACT_TYPE,
+ TI_LFRACT_TYPE,
+ TI_LLFRACT_TYPE,
+ TI_USFRACT_TYPE,
+ TI_UFRACT_TYPE,
+ TI_ULFRACT_TYPE,
+ TI_ULLFRACT_TYPE,
+ TI_SAT_SACCUM_TYPE,
+ TI_SAT_ACCUM_TYPE,
+ TI_SAT_LACCUM_TYPE,
+ TI_SAT_LLACCUM_TYPE,
+ TI_SAT_USACCUM_TYPE,
+ TI_SAT_UACCUM_TYPE,
+ TI_SAT_ULACCUM_TYPE,
+ TI_SAT_ULLACCUM_TYPE,
+ TI_SACCUM_TYPE,
+ TI_ACCUM_TYPE,
+ TI_LACCUM_TYPE,
+ TI_LLACCUM_TYPE,
+ TI_USACCUM_TYPE,
+ TI_UACCUM_TYPE,
+ TI_ULACCUM_TYPE,
+ TI_ULLACCUM_TYPE,
+ TI_QQ_TYPE,
+ TI_HQ_TYPE,
+ TI_SQ_TYPE,
+ TI_DQ_TYPE,
+ TI_TQ_TYPE,
+ TI_UQQ_TYPE,
+ TI_UHQ_TYPE,
+ TI_USQ_TYPE,
+ TI_UDQ_TYPE,
+ TI_UTQ_TYPE,
+ TI_SAT_QQ_TYPE,
+ TI_SAT_HQ_TYPE,
+ TI_SAT_SQ_TYPE,
+ TI_SAT_DQ_TYPE,
+ TI_SAT_TQ_TYPE,
+ TI_SAT_UQQ_TYPE,
+ TI_SAT_UHQ_TYPE,
+ TI_SAT_USQ_TYPE,
+ TI_SAT_UDQ_TYPE,
+ TI_SAT_UTQ_TYPE,
+ TI_HA_TYPE,
+ TI_SA_TYPE,
+ TI_DA_TYPE,
+ TI_TA_TYPE,
+ TI_UHA_TYPE,
+ TI_USA_TYPE,
+ TI_UDA_TYPE,
+ TI_UTA_TYPE,
+ TI_SAT_HA_TYPE,
+ TI_SAT_SA_TYPE,
+ TI_SAT_DA_TYPE,
+ TI_SAT_TA_TYPE,
+ TI_SAT_UHA_TYPE,
+ TI_SAT_USA_TYPE,
+ TI_SAT_UDA_TYPE,
+ TI_SAT_UTA_TYPE,
+
+ TI_MODULE_HWM,
+ /* Nodes below here change during compilation, and should therefore
+ not be in the C++ module's global tree table. */
+
+ TI_OPTIMIZATION_DEFAULT,
+ TI_OPTIMIZATION_CURRENT,
+ TI_TARGET_OPTION_DEFAULT,
+ TI_TARGET_OPTION_CURRENT,
+ TI_CURRENT_TARGET_PRAGMA,
+ TI_CURRENT_OPTIMIZE_PRAGMA,
+
+ TI_CHREC_DONT_KNOW,
+ TI_CHREC_KNOWN,
+
+ TI_MAX
+};
+
+/* An enumeration of the standard C integer types. These must be
+ ordered so that shorter types appear before longer ones, and so
+ that signed types appear before unsigned ones, for the correct
+ functioning of interpret_integer() in c-lex.cc. */
+enum integer_type_kind {
+ itk_char,
+ itk_signed_char,
+ itk_unsigned_char,
+ itk_short,
+ itk_unsigned_short,
+ itk_int,
+ itk_unsigned_int,
+ itk_long,
+ itk_unsigned_long,
+ itk_long_long,
+ itk_unsigned_long_long,
+
+ itk_intN_0,
+ itk_unsigned_intN_0,
+ itk_intN_1,
+ itk_unsigned_intN_1,
+ itk_intN_2,
+ itk_unsigned_intN_2,
+ itk_intN_3,
+ itk_unsigned_intN_3,
+
+ itk_none
+};
+
+/* A pointer-to-function member type looks like:
+
+ struct {
+ __P __pfn;
+ ptrdiff_t __delta;
+ };
+
+ If __pfn is NULL, it is a NULL pointer-to-member-function.
+
+ (Because the vtable is always the first thing in the object, we
+ don't need its offset.) If the function is virtual, then PFN is
+ one plus twice the index into the vtable; otherwise, it is just a
+ pointer to the function.
+
+ Unfortunately, using the lowest bit of PFN doesn't work in
+ architectures that don't impose alignment requirements on function
+ addresses, or that use the lowest bit to tell one ISA from another,
+ for example. For such architectures, we use the lowest bit of
+ DELTA instead of the lowest bit of the PFN, and DELTA will be
+ multiplied by 2. */
+enum ptrmemfunc_vbit_where_t {
+ ptrmemfunc_vbit_in_pfn,
+ ptrmemfunc_vbit_in_delta
+};
+
+/* Flags that may be passed in the third argument of decl_attributes, and
+ to handler functions for attributes. */
+enum attribute_flags {
+ /* The type passed in is the type of a DECL, and any attributes that
+ should be passed in again to be applied to the DECL rather than the
+ type should be returned. */
+ ATTR_FLAG_DECL_NEXT = 1,
+ /* The type passed in is a function return type, and any attributes that
+ should be passed in again to be applied to the function type rather
+ than the return type should be returned. */
+ ATTR_FLAG_FUNCTION_NEXT = 2,
+ /* The type passed in is an array element type, and any attributes that
+ should be passed in again to be applied to the array type rather
+ than the element type should be returned. */
+ ATTR_FLAG_ARRAY_NEXT = 4,
+ /* The type passed in is a structure, union or enumeration type being
+ created, and should be modified in place. */
+ ATTR_FLAG_TYPE_IN_PLACE = 8,
+ /* The attributes are being applied by default to a library function whose
+ name indicates known behavior, and should be silently ignored if they
+ are not in fact compatible with the function type. */
+ ATTR_FLAG_BUILT_IN = 16,
+ /* A given attribute has been parsed as a C++-11 attribute. */
+ ATTR_FLAG_CXX11 = 32,
+ /* The attribute handler is being invoked with an internal argument
+ that may not otherwise be valid when specified in source code. */
+ ATTR_FLAG_INTERNAL = 64
+};
+
+/* Types used to represent sizes. */
+enum size_type_kind {
+ stk_sizetype, /* Normal representation of sizes in bytes. */
+ stk_ssizetype, /* Signed representation of sizes in bytes. */
+ stk_bitsizetype, /* Normal representation of sizes in bits. */
+ stk_sbitsizetype, /* Signed representation of sizes in bits. */
+ stk_type_kind_last
+};
+
+/* Flags controlling operand_equal_p() behavior. */
+enum operand_equal_flag {
+ OEP_ONLY_CONST = 1,
+ OEP_PURE_SAME = 2,
+ OEP_MATCH_SIDE_EFFECTS = 4,
+ OEP_ADDRESS_OF = 8,
+ /* Internal within operand_equal_p: */
+ OEP_NO_HASH_CHECK = 16,
+ /* Internal within inchash::add_expr: */
+ OEP_HASH_CHECK = 32,
+ /* Makes operand_equal_p handle more expressions: */
+ OEP_LEXICOGRAPHIC = 64,
+ OEP_BITWISE = 128,
+ /* For OEP_ADDRESS_OF of COMPONENT_REFs, only consider same fields as
+ equivalent rather than also different fields with the same offset. */
+ OEP_ADDRESS_OF_SAME_FIELD = 256,
+ /* In conjunction with OEP_LEXICOGRAPHIC considers names of declarations
+ of the same kind. Used to compare VLA bounds involving parameters
+ across redeclarations of the same function. */
+ OEP_DECL_NAME = 512
+};
+
+/* Enum and arrays used for tree allocation stats.
+ Keep in sync with tree.cc:tree_node_kind_names. */
+enum tree_node_kind {
+ d_kind,
+ t_kind,
+ b_kind,
+ s_kind,
+ r_kind,
+ e_kind,
+ c_kind,
+ id_kind,
+ vec_kind,
+ binfo_kind,
+ ssa_name_kind,
+ constr_kind,
+ x_kind,
+ lang_decl,
+ lang_type,
+ omp_clause_kind,
+ all_kinds
+};
+
+enum annot_expr_kind {
+ annot_expr_ivdep_kind,
+ annot_expr_unroll_kind,
+ annot_expr_no_vector_kind,
+ annot_expr_vector_kind,
+ annot_expr_parallel_kind,
+ annot_expr_kind_last
+};
+
+/* The kind of a TREE_CLOBBER_P CONSTRUCTOR node. */
+enum clobber_kind {
+ /* Unspecified, this clobber acts as a store of an undefined value. */
+ CLOBBER_UNDEF,
+ /* This clobber ends the lifetime of the storage. */
+ CLOBBER_EOL,
+ CLOBBER_LAST
+};
+
+/*---------------------------------------------------------------------------
+ Type definitions
+---------------------------------------------------------------------------*/
+/* When processing aliases at the symbol table level, we need the
+ declaration of target. For this reason we need to queue aliases and
+ process them after all declarations has been produced. */
+struct GTY(()) alias_pair {
+ tree decl;
+ tree target;
+};
+
+/* An initialization priority. */
+typedef unsigned short priority_type;
+
+/* The type of a callback function for walking over tree structure. */
+typedef tree (*walk_tree_fn) (tree *, int *, void *);
+
+/* The type of a callback function that represents a custom walk_tree. */
+typedef tree (*walk_tree_lh) (tree *, int *, tree (*) (tree *, int *, void *),
+ void *, hash_set<tree> *);
+
+
+/*---------------------------------------------------------------------------
+ Main data structures
+---------------------------------------------------------------------------*/
+/* A tree node can represent a data type, a variable, an expression
+ or a statement. Each node has a TREE_CODE which says what kind of
+ thing it represents. Some common codes are:
+ INTEGER_TYPE -- represents a type of integers.
+ ARRAY_TYPE -- represents a type of pointer.
+ VAR_DECL -- represents a declared variable.
+ INTEGER_CST -- represents a constant integer value.
+ PLUS_EXPR -- represents a sum (an expression).
+
+ As for the contents of a tree node: there are some fields
+ that all nodes share. Each TREE_CODE has various special-purpose
+ fields as well. The fields of a node are never accessed directly,
+ always through accessor macros. */
+
+/* Every kind of tree node starts with this structure,
+ so all nodes have these fields.
+
+ See the accessor macros, defined below, for documentation of the
+ fields, and the table below which connects the fields and the
+ accessor macros. */
+
+struct GTY(()) tree_base {
+ ENUM_BITFIELD(tree_code) code : 16;
+
+ unsigned side_effects_flag : 1;
+ unsigned constant_flag : 1;
+ unsigned addressable_flag : 1;
+ unsigned volatile_flag : 1;
+ unsigned readonly_flag : 1;
+ unsigned asm_written_flag: 1;
+ unsigned nowarning_flag : 1;
+ unsigned visited : 1;
+
+ unsigned used_flag : 1;
+ unsigned nothrow_flag : 1;
+ unsigned static_flag : 1;
+ unsigned public_flag : 1;
+ unsigned private_flag : 1;
+ unsigned protected_flag : 1;
+ unsigned deprecated_flag : 1;
+ unsigned default_def_flag : 1;
+
+ union {
+ /* The bits in the following structure should only be used with
+ accessor macros that constrain inputs with tree checking. */
+ struct {
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ unsigned saturating_flag : 1;
+
+ unsigned unsigned_flag : 1;
+ unsigned packed_flag : 1;
+ unsigned user_align : 1;
+ unsigned nameless_flag : 1;
+ unsigned atomic_flag : 1;
+ unsigned unavailable_flag : 1;
+ unsigned spare0 : 2;
+
+ unsigned spare1 : 8;
+
+ /* This field is only used with TREE_TYPE nodes; the only reason it is
+ present in tree_base instead of tree_type is to save space. The size
+ of the field must be large enough to hold addr_space_t values.
+ For CONSTRUCTOR nodes this holds the clobber_kind enum. */
+ unsigned address_space : 8;
+ } bits;
+
+ /* The following fields are present in tree_base to save space. The
+ nodes using them do not require any of the flags above and so can
+ make better use of the 4-byte sized word. */
+
+ /* The number of HOST_WIDE_INTs in an INTEGER_CST. */
+ struct {
+ /* The number of HOST_WIDE_INTs if the INTEGER_CST is accessed in
+ its native precision. */
+ unsigned char unextended;
+
+ /* The number of HOST_WIDE_INTs if the INTEGER_CST is extended to
+ wider precisions based on its TYPE_SIGN. */
+ unsigned char extended;
+
+ /* The number of HOST_WIDE_INTs if the INTEGER_CST is accessed in
+ offset_int precision, with smaller integers being extended
+ according to their TYPE_SIGN. This is equal to one of the two
+ fields above but is cached for speed. */
+ unsigned char offset;
+ } int_length;
+
+ /* VEC length. This field is only used with TREE_VEC. */
+ int length;
+
+ /* This field is only used with VECTOR_CST. */
+ struct {
+ /* The value of VECTOR_CST_LOG2_NPATTERNS. */
+ unsigned int log2_npatterns : 8;
+
+ /* The value of VECTOR_CST_NELTS_PER_PATTERN. */
+ unsigned int nelts_per_pattern : 8;
+
+ /* For future expansion. */
+ unsigned int unused : 16;
+ } vector_cst;
+
+ /* SSA version number. This field is only used with SSA_NAME. */
+ unsigned int version;
+
+ /* CHREC_VARIABLE. This field is only used with POLYNOMIAL_CHREC. */
+ unsigned int chrec_var;
+
+ /* Internal function code. */
+ enum internal_fn ifn;
+
+ /* OMP_ATOMIC* memory order. */
+ enum omp_memory_order omp_atomic_memory_order;
+
+ /* The following two fields are used for MEM_REF and TARGET_MEM_REF
+ expression trees and specify known data non-dependences. For
+ two memory references in a function they are known to not
+ alias if dependence_info.clique are equal and dependence_info.base
+ are distinct. Clique number zero means there is no information,
+ clique number one is populated from function global information
+ and thus needs no remapping on transforms like loop unrolling. */
+ struct {
+ unsigned short clique;
+ unsigned short base;
+ } dependence_info;
+ } GTY((skip(""))) u;
+};
+
+/* The following table lists the uses of each of the above flags and
+ for which types of nodes they are defined.
+
+ addressable_flag:
+
+ TREE_ADDRESSABLE in
+ VAR_DECL, PARM_DECL, RESULT_DECL, FUNCTION_DECL, LABEL_DECL
+ SSA_NAME
+ all types
+ CONSTRUCTOR, IDENTIFIER_NODE
+ STMT_EXPR
+
+ CALL_EXPR_TAILCALL in
+ CALL_EXPR
+
+ CASE_LOW_SEEN in
+ CASE_LABEL_EXPR
+
+ PREDICT_EXPR_OUTCOME in
+ PREDICT_EXPR
+
+ OMP_CLAUSE_MAP_DECL_MAKE_ADDRESSABLE in
+ OMP_CLAUSE
+
+ static_flag:
+
+ TREE_STATIC in
+ VAR_DECL, FUNCTION_DECL
+ CONSTRUCTOR
+
+ TREE_NO_TRAMPOLINE in
+ ADDR_EXPR
+
+ BINFO_VIRTUAL_P in
+ TREE_BINFO
+
+ TREE_SYMBOL_REFERENCED in
+ IDENTIFIER_NODE
+
+ CLEANUP_EH_ONLY in
+ TARGET_EXPR, WITH_CLEANUP_EXPR
+
+ TRY_CATCH_IS_CLEANUP in
+ TRY_CATCH_EXPR
+
+ ASM_INPUT_P in
+ ASM_EXPR
+
+ TYPE_REF_CAN_ALIAS_ALL in
+ POINTER_TYPE, REFERENCE_TYPE
+
+ CASE_HIGH_SEEN in
+ CASE_LABEL_EXPR
+
+ ENUM_IS_SCOPED in
+ ENUMERAL_TYPE
+
+ TRANSACTION_EXPR_OUTER in
+ TRANSACTION_EXPR
+
+ MUST_TAIL_CALL in
+ CALL_EXPR
+
+ public_flag:
+
+ TREE_OVERFLOW in
+ INTEGER_CST, REAL_CST, COMPLEX_CST, VECTOR_CST
+
+ TREE_PUBLIC in
+ VAR_DECL, FUNCTION_DECL
+ IDENTIFIER_NODE
+
+ CONSTRUCTOR_NO_CLEARING in
+ CONSTRUCTOR
+
+ ASM_VOLATILE_P in
+ ASM_EXPR
+
+ CALL_EXPR_VA_ARG_PACK in
+ CALL_EXPR
+
+ TYPE_CACHED_VALUES_P in
+ all types
+
+ SAVE_EXPR_RESOLVED_P in
+ SAVE_EXPR
+
+ OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE in
+ OMP_CLAUSE_LASTPRIVATE
+
+ OMP_CLAUSE_PRIVATE_DEBUG in
+ OMP_CLAUSE_PRIVATE
+
+ OMP_CLAUSE_LINEAR_NO_COPYIN in
+ OMP_CLAUSE_LINEAR
+
+ OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION in
+ OMP_CLAUSE_MAP
+
+ OMP_CLAUSE_REDUCTION_OMP_ORIG_REF in
+ OMP_CLAUSE_{,TASK_,IN_}REDUCTION
+
+ OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT in
+ OMP_CLAUSE_USE_DEVICE_PTR
+
+ TRANSACTION_EXPR_RELAXED in
+ TRANSACTION_EXPR
+
+ FALLTHROUGH_LABEL_P in
+ LABEL_DECL
+
+ SSA_NAME_IS_VIRTUAL_OPERAND in
+ SSA_NAME
+
+ EXPR_LOCATION_WRAPPER_P in
+ NON_LVALUE_EXPR, VIEW_CONVERT_EXPR
+
+ private_flag:
+
+ TREE_PRIVATE in
+ all decls
+
+ CALL_EXPR_RETURN_SLOT_OPT in
+ CALL_EXPR
+
+ OMP_SECTION_LAST in
+ OMP_SECTION
+
+ OMP_PARALLEL_COMBINED in
+ OMP_PARALLEL
+
+ OMP_CLAUSE_PRIVATE_OUTER_REF in
+ OMP_CLAUSE_PRIVATE
+
+ OMP_CLAUSE_LINEAR_NO_COPYOUT in
+ OMP_CLAUSE_LINEAR
+
+ TYPE_REF_IS_RVALUE in
+ REFERENCE_TYPE
+
+ ENUM_IS_OPAQUE in
+ ENUMERAL_TYPE
+
+ protected_flag:
+
+ TREE_PROTECTED in
+ BLOCK
+ all decls
+
+ CALL_FROM_THUNK_P and
+ CALL_ALLOCA_FOR_VAR_P and
+ CALL_FROM_NEW_OR_DELETE_P in
+ CALL_EXPR
+
+ OMP_CLAUSE_LINEAR_VARIABLE_STRIDE in
+ OMP_CLAUSE_LINEAR
+
+ ASM_INLINE_P in
+ ASM_EXPR
+
+ side_effects_flag:
+
+ TREE_SIDE_EFFECTS in
+ all expressions
+ all decls
+ all constants
+
+ FORCED_LABEL in
+ LABEL_DECL
+
+ volatile_flag:
+
+ TREE_THIS_VOLATILE in
+ all expressions
+ all decls
+
+ TYPE_VOLATILE in
+ all types
+
+ readonly_flag:
+
+ TREE_READONLY in
+ all expressions
+ all decls
+
+ TYPE_READONLY in
+ all types
+
+ constant_flag:
+
+ TREE_CONSTANT in
+ all expressions
+ all decls
+ all constants
+
+ TYPE_SIZES_GIMPLIFIED in
+ all types
+
+ unsigned_flag:
+
+ TYPE_UNSIGNED in
+ all types
+
+ DECL_UNSIGNED in
+ all decls
+
+ asm_written_flag:
+
+ TREE_ASM_WRITTEN in
+ VAR_DECL, FUNCTION_DECL, TYPE_DECL
+ RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE
+ BLOCK, STRING_CST
+
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI in
+ SSA_NAME
+
+ used_flag:
+
+ TREE_USED in
+ all expressions
+ all decls
+ IDENTIFIER_NODE
+
+ nothrow_flag:
+
+ TREE_NOTHROW in
+ CALL_EXPR
+ FUNCTION_DECL
+
+ TREE_THIS_NOTRAP in
+ INDIRECT_REF, MEM_REF, TARGET_MEM_REF, ARRAY_REF, ARRAY_RANGE_REF
+
+ SSA_NAME_IN_FREE_LIST in
+ SSA_NAME
+
+ DECL_NONALIASED in
+ VAR_DECL
+
+ deprecated_flag:
+
+ TREE_DEPRECATED in
+ all decls
+ all types
+
+ IDENTIFIER_TRANSPARENT_ALIAS in
+ IDENTIFIER_NODE
+
+ SSA_NAME_POINTS_TO_READONLY_MEMORY in
+ SSA_NAME
+
+ unavailable_flag:
+
+ TREE_UNAVAILABLE in
+ all decls
+ all types
+
+ visited:
+
+ TREE_VISITED in
+ all trees (used liberally by many passes)
+
+ saturating_flag:
+
+ TYPE_REVERSE_STORAGE_ORDER in
+ RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, ARRAY_TYPE
+
+ TYPE_SATURATING in
+ other types
+
+ VAR_DECL_IS_VIRTUAL_OPERAND in
+ VAR_DECL
+
+ nowarning_flag:
+
+ TREE_NO_WARNING in
+ all expressions
+ all decls
+
+ TYPE_ARTIFICIAL in
+ all types
+
+ default_def_flag:
+
+ TYPE_FINAL_P in
+ RECORD_TYPE, UNION_TYPE and QUAL_UNION_TYPE
+
+ TYPE_VECTOR_OPAQUE in
+ VECTOR_TYPE
+
+ SSA_NAME_IS_DEFAULT_DEF in
+ SSA_NAME
+
+ DECL_NONLOCAL_FRAME in
+ VAR_DECL
+
+ REF_REVERSE_STORAGE_ORDER in
+ BIT_FIELD_REF, MEM_REF
+
+ FUNC_ADDR_BY_DESCRIPTOR in
+ ADDR_EXPR
+
+ CALL_EXPR_BY_DESCRIPTOR in
+ CALL_EXPR
+
+*/
+
+struct GTY(()) tree_typed {
+ struct tree_base base;
+ tree type;
+};
+
+struct GTY(()) tree_common {
+ struct tree_typed typed;
+ tree chain;
+};
+
+struct GTY(()) tree_int_cst {
+ struct tree_typed typed;
+ HOST_WIDE_INT val[1];
+};
+
+
+struct GTY(()) tree_real_cst {
+ struct tree_typed typed;
+ struct real_value value;
+};
+
+struct GTY(()) tree_fixed_cst {
+ struct tree_typed typed;
+ struct fixed_value * fixed_cst_ptr;
+};
+
+struct GTY(()) tree_string {
+ struct tree_typed typed;
+ int length;
+ char str[1];
+};
+
+struct GTY(()) tree_complex {
+ struct tree_typed typed;
+ tree real;
+ tree imag;
+};
+
+struct GTY(()) tree_vector {
+ struct tree_typed typed;
+ tree GTY ((length ("vector_cst_encoded_nelts ((tree) &%h)"))) elts[1];
+};
+
+struct GTY(()) tree_poly_int_cst {
+ struct tree_typed typed;
+ tree coeffs[NUM_POLY_INT_COEFFS];
+};
+
+struct GTY(()) tree_identifier {
+ struct tree_common common;
+ struct ht_identifier id;
+};
+
+struct GTY(()) tree_list {
+ struct tree_common common;
+ tree purpose;
+ tree value;
+};
+
+struct GTY(()) tree_vec {
+ struct tree_common common;
+ tree GTY ((length ("TREE_VEC_LENGTH ((tree)&%h)"))) a[1];
+};
+
+/* A single element of a CONSTRUCTOR. VALUE holds the actual value of the
+ element. INDEX can optionally design the position of VALUE: in arrays,
+ it is the index where VALUE has to be placed; in structures, it is the
+ FIELD_DECL of the member. */
+struct GTY(()) constructor_elt {
+ tree index;
+ tree value;
+};
+
+struct GTY(()) tree_constructor {
+ struct tree_typed typed;
+ vec<constructor_elt, va_gc> *elts;
+};
+
+enum omp_clause_depend_kind
+{
+ OMP_CLAUSE_DEPEND_IN,
+ OMP_CLAUSE_DEPEND_OUT,
+ OMP_CLAUSE_DEPEND_INOUT,
+ OMP_CLAUSE_DEPEND_MUTEXINOUTSET,
+ OMP_CLAUSE_DEPEND_INOUTSET,
+ OMP_CLAUSE_DEPEND_DEPOBJ,
+ OMP_CLAUSE_DEPEND_INVALID,
+ OMP_CLAUSE_DEPEND_LAST
+};
+
+enum omp_clause_doacross_kind
+{
+ OMP_CLAUSE_DOACROSS_SOURCE,
+ OMP_CLAUSE_DOACROSS_SINK,
+ OMP_CLAUSE_DOACROSS_LAST
+};
+
+enum omp_clause_proc_bind_kind
+{
+ /* Numbers should match omp_proc_bind_t enum in omp.h. */
+ OMP_CLAUSE_PROC_BIND_FALSE = 0,
+ OMP_CLAUSE_PROC_BIND_TRUE = 1,
+ OMP_CLAUSE_PROC_BIND_PRIMARY = 2,
+ OMP_CLAUSE_PROC_BIND_MASTER = 2,
+ OMP_CLAUSE_PROC_BIND_CLOSE = 3,
+ OMP_CLAUSE_PROC_BIND_SPREAD = 4,
+ OMP_CLAUSE_PROC_BIND_LAST
+};
+
+enum omp_clause_device_type_kind
+{
+ OMP_CLAUSE_DEVICE_TYPE_HOST = 1,
+ OMP_CLAUSE_DEVICE_TYPE_NOHOST = 2,
+ OMP_CLAUSE_DEVICE_TYPE_ANY = 3
+};
+
+enum omp_clause_linear_kind
+{
+ OMP_CLAUSE_LINEAR_DEFAULT,
+ OMP_CLAUSE_LINEAR_REF,
+ OMP_CLAUSE_LINEAR_VAL,
+ OMP_CLAUSE_LINEAR_UVAL
+};
+
+struct GTY(()) tree_exp {
+ struct tree_typed typed;
+ location_t locus;
+ tree GTY ((length ("TREE_OPERAND_LENGTH ((tree)&%h)"))) operands[1];
+};
+
+/* Immediate use linking structure. This structure is used for maintaining
+ a doubly linked list of uses of an SSA_NAME. */
+struct GTY(()) ssa_use_operand_t {
+ struct ssa_use_operand_t* GTY((skip(""))) prev;
+ struct ssa_use_operand_t* GTY((skip(""))) next;
+ /* Immediate uses for a given SSA name are maintained as a cyclic
+ list. To recognize the root of this list, the location field
+ needs to point to the original SSA name. Since statements and
+ SSA names are of different data types, we need this union. See
+ the explanation in struct imm_use_iterator. */
+ union { gimple *stmt; tree ssa_name; } GTY((skip(""))) loc;
+ tree *GTY((skip(""))) use;
+};
+
+struct GTY(()) tree_ssa_name {
+ struct tree_typed typed;
+
+ /* _DECL wrapped by this SSA name. */
+ tree var;
+
+ /* Statement that defines this SSA name. */
+ gimple *def_stmt;
+
+ /* Value range information. */
+ union ssa_name_info_type {
+ /* Ranges for integers. */
+ struct GTY ((tag ("0"))) irange_storage_slot *irange_info;
+ /* Ranges for floating point numbers. */
+ struct GTY ((tag ("1"))) frange_storage_slot *frange_info;
+ /* Pointer attributes used for alias analysis. */
+ struct GTY ((tag ("2"))) ptr_info_def *ptr_info;
+ /* This holds any range info supported by ranger (except ptr_info
+ above) and is managed by vrange_storage. */
+ void * GTY ((skip)) range_info;
+ } GTY ((desc ("%1.typed.type ?" \
+ "(POINTER_TYPE_P (TREE_TYPE ((tree)&%1)) ? 2 : SCALAR_FLOAT_TYPE_P (TREE_TYPE ((tree)&%1))) : 3"))) info;
+ /* Immediate uses list for this SSA_NAME. */
+ struct ssa_use_operand_t imm_uses;
+};
+
+struct GTY(()) phi_arg_d {
+ /* imm_use MUST be the first element in struct because we do some
+ pointer arithmetic with it. See phi_arg_index_from_use. */
+ struct ssa_use_operand_t imm_use;
+ tree def;
+ location_t locus;
+};
+
+struct GTY(()) tree_omp_clause {
+ struct tree_common common;
+ location_t locus;
+ enum omp_clause_code code;
+ union omp_clause_subcode {
+ enum omp_clause_default_kind default_kind;
+ enum omp_clause_schedule_kind schedule_kind;
+ enum omp_clause_depend_kind depend_kind;
+ enum omp_clause_doacross_kind doacross_kind;
+ /* See include/gomp-constants.h for enum gomp_map_kind's values. */
+ unsigned int map_kind;
+ enum omp_clause_proc_bind_kind proc_bind_kind;
+ enum tree_code reduction_code;
+ enum omp_clause_linear_kind linear_kind;
+ enum tree_code if_modifier;
+ enum omp_clause_defaultmap_kind defaultmap_kind;
+ enum omp_clause_bind_kind bind_kind;
+ enum omp_clause_device_type_kind device_type_kind;
+ } GTY ((skip)) subcode;
+
+ /* The gimplification of OMP_CLAUSE_REDUCTION_{INIT,MERGE} for omp-low's
+ usage. */
+ gimple_seq gimple_reduction_init;
+ gimple_seq gimple_reduction_merge;
+
+ tree GTY ((length ("omp_clause_num_ops[OMP_CLAUSE_CODE ((tree)&%h)]")))
+ ops[1];
+};
+
+struct GTY(()) tree_block {
+ struct tree_base base;
+ tree chain;
+
+ unsigned block_num;
+
+ location_t locus;
+ location_t end_locus;
+
+ tree vars;
+ vec<tree, va_gc> *nonlocalized_vars;
+
+ tree subblocks;
+ tree supercontext;
+ tree abstract_origin;
+ tree fragment_origin;
+ tree fragment_chain;
+
+ /* Pointer to the DWARF lexical block. */
+ struct die_struct *die;
+};
+
+struct GTY(()) tree_type_common {
+ struct tree_common common;
+ tree size;
+ tree size_unit;
+ tree attributes;
+ unsigned int uid;
+
+ unsigned int precision : 10;
+ unsigned no_force_blk_flag : 1;
+ unsigned needs_constructing_flag : 1;
+ unsigned transparent_aggr_flag : 1;
+ unsigned restrict_flag : 1;
+ unsigned contains_placeholder_bits : 2;
+
+ ENUM_BITFIELD(machine_mode) mode : 8;
+
+ /* TYPE_STRING_FLAG for INTEGER_TYPE and ARRAY_TYPE.
+ TYPE_CXX_ODR_P for RECORD_TYPE and UNION_TYPE. */
+ unsigned string_flag : 1;
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ unsigned lang_flag_7 : 1;
+
+ /* TYPE_ALIGN in log2; this has to be large enough to hold values
+ of the maximum of BIGGEST_ALIGNMENT and MAX_OFILE_ALIGNMENT,
+ the latter being usually the larger. For ELF it is 8<<28,
+ so we need to store the value 32 (not 31, as we need the zero
+ as well), hence six bits. */
+ unsigned align : 6;
+ unsigned warn_if_not_align : 6;
+ unsigned typeless_storage : 1;
+ unsigned empty_flag : 1;
+ unsigned indivisible_p : 1;
+ unsigned no_named_args_stdarg_p : 1;
+ unsigned spare : 15;
+
+ alias_set_type alias_set;
+ tree pointer_to;
+ tree reference_to;
+ union tree_type_symtab {
+ int GTY ((tag ("TYPE_SYMTAB_IS_ADDRESS"))) address;
+ struct die_struct * GTY ((tag ("TYPE_SYMTAB_IS_DIE"))) die;
+ } GTY ((desc ("debug_hooks->tree_type_symtab_field"))) symtab;
+ tree canonical;
+ tree next_variant;
+ tree main_variant;
+ tree context;
+ tree name;
+};
+
+struct GTY(()) tree_type_with_lang_specific {
+ struct tree_type_common common;
+ /* Points to a structure whose details depend on the language in use. */
+ struct lang_type *lang_specific;
+};
+
+struct GTY(()) tree_type_non_common {
+ struct tree_type_with_lang_specific with_lang_specific;
+ tree values;
+ tree minval;
+ tree maxval;
+ tree lang_1;
+};
+
+struct GTY (()) tree_binfo {
+ struct tree_common common;
+
+ tree offset;
+ tree vtable;
+ tree virtuals;
+ tree vptr_field;
+ vec<tree, va_gc> *base_accesses;
+ tree inheritance;
+
+ tree vtt_subvtt;
+ tree vtt_vptr;
+
+ vec<tree, va_gc> base_binfos;
+};
+
+struct GTY(()) tree_decl_minimal {
+ struct tree_common common;
+ location_t locus;
+ unsigned int uid;
+ tree name;
+ tree context;
+};
+
+struct GTY(()) tree_decl_common {
+ struct tree_decl_minimal common;
+ tree size;
+
+ ENUM_BITFIELD(machine_mode) mode : 8;
+
+ unsigned nonlocal_flag : 1;
+ unsigned virtual_flag : 1;
+ unsigned ignored_flag : 1;
+ unsigned abstract_flag : 1;
+ unsigned artificial_flag : 1;
+ unsigned preserve_flag: 1;
+ unsigned debug_expr_is_from : 1;
+
+ unsigned lang_flag_0 : 1;
+ unsigned lang_flag_1 : 1;
+ unsigned lang_flag_2 : 1;
+ unsigned lang_flag_3 : 1;
+ unsigned lang_flag_4 : 1;
+ unsigned lang_flag_5 : 1;
+ unsigned lang_flag_6 : 1;
+ unsigned lang_flag_7 : 1;
+ unsigned lang_flag_8 : 1;
+
+ /* In VAR_DECL and PARM_DECL, this is DECL_REGISTER
+ In TRANSLATION_UNIT_DECL, this is TRANSLATION_UNIT_WARN_EMPTY_P.
+ In FIELD_DECL, this is DECL_FIELD_ABI_IGNORED. */
+ unsigned decl_flag_0 : 1;
+ /* In FIELD_DECL, this is DECL_BIT_FIELD
+ In VAR_DECL and FUNCTION_DECL, this is DECL_EXTERNAL.
+ In TYPE_DECL, this is TYPE_DECL_SUPPRESS_DEBUG. */
+ unsigned decl_flag_1 : 1;
+ /* In FIELD_DECL, this is DECL_NONADDRESSABLE_P
+ In VAR_DECL, PARM_DECL and RESULT_DECL, this is
+ DECL_HAS_VALUE_EXPR_P. */
+ unsigned decl_flag_2 : 1;
+ /* In FIELD_DECL, this is DECL_PADDING_P. */
+ unsigned decl_flag_3 : 1;
+ /* Logically, these two would go in a theoretical base shared by var and
+ parm decl. */
+ unsigned not_gimple_reg_flag : 1;
+ /* In VAR_DECL, PARM_DECL and RESULT_DECL, this is DECL_BY_REFERENCE. */
+ unsigned decl_by_reference_flag : 1;
+ /* In a VAR_DECL and PARM_DECL, this is DECL_READ_P. */
+ unsigned decl_read_flag : 1;
+ /* In a VAR_DECL or RESULT_DECL, this is DECL_NONSHAREABLE. */
+ /* In a PARM_DECL, this is DECL_HIDDEN_STRING_LENGTH. */
+ unsigned decl_nonshareable_flag : 1;
+
+ /* DECL_OFFSET_ALIGN, used only for FIELD_DECLs. */
+ unsigned int off_align : 6;
+
+ /* DECL_ALIGN. It should have the same size as TYPE_ALIGN. */
+ unsigned int align : 6;
+
+ /* DECL_WARN_IF_NOT_ALIGN. It should have the same size as
+ TYPE_WARN_IF_NOT_ALIGN. */
+ unsigned int warn_if_not_align : 6;
+
+ /* In FIELD_DECL, this is DECL_NOT_FLEXARRAY. */
+ unsigned int decl_not_flexarray : 1;
+
+ /* 13 bits unused. */
+
+ /* UID for points-to sets, stable over copying from inlining. */
+ unsigned int pt_uid;
+
+ tree size_unit;
+ tree initial;
+ tree attributes;
+ tree abstract_origin;
+
+ /* Points to a structure whose details depend on the language in use. */
+ struct lang_decl *lang_specific;
+};
+
+struct GTY(()) tree_decl_with_rtl {
+ struct tree_decl_common common;
+ rtx rtl;
+};
+
+struct GTY(()) tree_field_decl {
+ struct tree_decl_common common;
+
+ tree offset;
+ tree bit_field_type;
+ tree qualifier;
+ tree bit_offset;
+ tree fcontext;
+};
+
+struct GTY(()) tree_label_decl {
+ struct tree_decl_with_rtl common;
+ int label_decl_uid;
+ int eh_landing_pad_nr;
+};
+
+struct GTY(()) tree_result_decl {
+ struct tree_decl_with_rtl common;
+};
+
+struct GTY(()) tree_const_decl {
+ struct tree_decl_common common;
+};
+
+struct GTY(()) tree_parm_decl {
+ struct tree_decl_with_rtl common;
+ rtx incoming_rtl;
+};
+
+struct GTY(()) tree_decl_with_vis {
+ struct tree_decl_with_rtl common;
+ tree assembler_name;
+ struct symtab_node *symtab_node;
+
+ /* Belong to VAR_DECL exclusively. */
+ unsigned defer_output : 1;
+ unsigned hard_register : 1;
+ unsigned common_flag : 1;
+ unsigned in_text_section : 1;
+ unsigned in_constant_pool : 1;
+ unsigned dllimport_flag : 1;
+ /* Don't belong to VAR_DECL exclusively. */
+ unsigned weak_flag : 1;
+
+ unsigned seen_in_bind_expr : 1;
+ unsigned comdat_flag : 1;
+ /* Used for FUNCTION_DECL, VAR_DECL and in C++ for TYPE_DECL. */
+ ENUM_BITFIELD(symbol_visibility) visibility : 2;
+ unsigned visibility_specified : 1;
+
+ /* Belong to FUNCTION_DECL exclusively. */
+ unsigned init_priority_p : 1;
+ /* Used by C++ only. Might become a generic decl flag. */
+ unsigned shadowed_for_var_p : 1;
+ /* Belong to FUNCTION_DECL exclusively. */
+ unsigned cxx_constructor : 1;
+ /* Belong to FUNCTION_DECL exclusively. */
+ unsigned cxx_destructor : 1;
+ /* Belong to FUNCTION_DECL exclusively. */
+ unsigned final : 1;
+ /* Belong to FUNCTION_DECL exclusively. */
+ unsigned regdecl_flag : 1;
+ /* 14 unused bits. */
+ /* 32 more unused on 64 bit HW. */
+};
+
+struct GTY(()) tree_var_decl {
+ struct tree_decl_with_vis common;
+};
+
+struct GTY(()) tree_decl_non_common {
+ struct tree_decl_with_vis common;
+ /* Almost all FE's use this. */
+ tree result;
+};
+
+/* Classify a special function declaration type. */
+
+enum function_decl_type
+{
+ NONE,
+ OPERATOR_NEW,
+ OPERATOR_DELETE,
+ LAMBDA_FUNCTION
+
+ /* 0 values left */
+};
+
+/* FUNCTION_DECL inherits from DECL_NON_COMMON because of the use of the
+ arguments/result/saved_tree fields by front ends. It was either inherit
+ FUNCTION_DECL from non_common, or inherit non_common from FUNCTION_DECL,
+ which seemed a bit strange. */
+
+struct GTY(()) tree_function_decl {
+ struct tree_decl_non_common common;
+
+ struct function *f;
+
+ /* Arguments of the function. */
+ tree arguments;
+ /* The personality function. Used for stack unwinding. */
+ tree personality;
+
+ /* Function specific options that are used by this function. */
+ tree function_specific_target; /* target options */
+ tree function_specific_optimization; /* optimization options */
+
+ /* Generic function body. */
+ tree saved_tree;
+ /* Index within a virtual table. */
+ tree vindex;
+
+ /* In a FUNCTION_DECL this is DECL_UNCHECKED_FUNCTION_CODE. */
+ unsigned int function_code;
+
+ ENUM_BITFIELD(built_in_class) built_in_class : 2;
+ unsigned static_ctor_flag : 1;
+ unsigned static_dtor_flag : 1;
+ unsigned uninlinable : 1;
+ unsigned possibly_inlined : 1;
+ unsigned novops_flag : 1;
+ unsigned returns_twice_flag : 1;
+
+ unsigned malloc_flag : 1;
+ unsigned declared_inline_flag : 1;
+ unsigned no_inline_warning_flag : 1;
+ unsigned no_instrument_function_entry_exit : 1;
+ unsigned no_limit_stack : 1;
+ unsigned disregard_inline_limits : 1;
+ unsigned pure_flag : 1;
+ unsigned looping_const_or_pure_flag : 1;
+
+ /* Align the bitfield to boundary of a byte. */
+ ENUM_BITFIELD(function_decl_type) decl_type: 2;
+ unsigned has_debug_args_flag : 1;
+ unsigned versioned_function : 1;
+ unsigned replaceable_operator : 1;
+
+ /* 11 bits left for future expansion. */
+ /* 32 bits on 64-bit HW. */
+};
+
+struct GTY(()) tree_translation_unit_decl {
+ struct tree_decl_common common;
+ /* Source language of this translation unit. Used for DWARF output. */
+ const char *language;
+ /* TODO: Non-optimization used to build this translation unit. */
+ /* TODO: Root of a partial DWARF tree for global types and decls. */
+};
+
+struct GTY(()) tree_type_decl {
+ struct tree_decl_non_common common;
+
+};
+
+struct GTY ((chain_next ("%h.next"), chain_prev ("%h.prev"))) tree_statement_list_node
+ {
+ struct tree_statement_list_node *prev;
+ struct tree_statement_list_node *next;
+ tree stmt;
+};
+
+struct GTY(()) tree_statement_list
+ {
+ struct tree_typed typed;
+ struct tree_statement_list_node *head;
+ struct tree_statement_list_node *tail;
+};
+
+
+/* Optimization options used by a function. */
+
+struct GTY(()) tree_optimization_option {
+ struct tree_base base;
+
+ /* The optimization options used by the user. */
+ struct cl_optimization *opts;
+
+ /* Target optabs for this set of optimization options. This is of
+ type `struct target_optabs *'. */
+ void *GTY ((atomic)) optabs;
+
+ /* The value of this_target_optabs against which the optabs above were
+ generated. */
+ struct target_optabs *GTY ((skip)) base_optabs;
+};
+
+/* Forward declaration, defined in target-globals.h. */
+
+class GTY(()) target_globals;
+
+/* Target options used by a function. */
+
+struct GTY(()) tree_target_option {
+ struct tree_base base;
+
+ /* Target globals for the corresponding target option. */
+ class target_globals *globals;
+
+ /* The optimization options used by the user. */
+ struct cl_target_option *opts;
+};
+
+/* Define the overall contents of a tree node.
+ It may be any of the structures declared above
+ for various types of node. */
+union GTY ((ptr_alias (union lang_tree_node),
+ desc ("tree_node_structure (&%h)"), variable_size)) tree_node {
+ struct tree_base GTY ((tag ("TS_BASE"))) base;
+ struct tree_typed GTY ((tag ("TS_TYPED"))) typed;
+ struct tree_common GTY ((tag ("TS_COMMON"))) common;
+ struct tree_int_cst GTY ((tag ("TS_INT_CST"))) int_cst;
+ struct tree_poly_int_cst GTY ((tag ("TS_POLY_INT_CST"))) poly_int_cst;
+ struct tree_real_cst GTY ((tag ("TS_REAL_CST"))) real_cst;
+ struct tree_fixed_cst GTY ((tag ("TS_FIXED_CST"))) fixed_cst;
+ struct tree_vector GTY ((tag ("TS_VECTOR"))) vector;
+ struct tree_string GTY ((tag ("TS_STRING"))) string;
+ struct tree_complex GTY ((tag ("TS_COMPLEX"))) complex;
+ struct tree_identifier GTY ((tag ("TS_IDENTIFIER"))) identifier;
+ struct tree_decl_minimal GTY((tag ("TS_DECL_MINIMAL"))) decl_minimal;
+ struct tree_decl_common GTY ((tag ("TS_DECL_COMMON"))) decl_common;
+ struct tree_decl_with_rtl GTY ((tag ("TS_DECL_WRTL"))) decl_with_rtl;
+ struct tree_decl_non_common GTY ((tag ("TS_DECL_NON_COMMON")))
+ decl_non_common;
+ struct tree_parm_decl GTY ((tag ("TS_PARM_DECL"))) parm_decl;
+ struct tree_decl_with_vis GTY ((tag ("TS_DECL_WITH_VIS"))) decl_with_vis;
+ struct tree_var_decl GTY ((tag ("TS_VAR_DECL"))) var_decl;
+ struct tree_field_decl GTY ((tag ("TS_FIELD_DECL"))) field_decl;
+ struct tree_label_decl GTY ((tag ("TS_LABEL_DECL"))) label_decl;
+ struct tree_result_decl GTY ((tag ("TS_RESULT_DECL"))) result_decl;
+ struct tree_const_decl GTY ((tag ("TS_CONST_DECL"))) const_decl;
+ struct tree_type_decl GTY ((tag ("TS_TYPE_DECL"))) type_decl;
+ struct tree_function_decl GTY ((tag ("TS_FUNCTION_DECL"))) function_decl;
+ struct tree_translation_unit_decl GTY ((tag ("TS_TRANSLATION_UNIT_DECL")))
+ translation_unit_decl;
+ struct tree_type_common GTY ((tag ("TS_TYPE_COMMON"))) type_common;
+ struct tree_type_with_lang_specific GTY ((tag ("TS_TYPE_WITH_LANG_SPECIFIC")))
+ type_with_lang_specific;
+ struct tree_type_non_common GTY ((tag ("TS_TYPE_NON_COMMON")))
+ type_non_common;
+ struct tree_list GTY ((tag ("TS_LIST"))) list;
+ struct tree_vec GTY ((tag ("TS_VEC"))) vec;
+ struct tree_exp GTY ((tag ("TS_EXP"))) exp;
+ struct tree_ssa_name GTY ((tag ("TS_SSA_NAME"))) ssa_name;
+ struct tree_block GTY ((tag ("TS_BLOCK"))) block;
+ struct tree_binfo GTY ((tag ("TS_BINFO"))) binfo;
+ struct tree_statement_list GTY ((tag ("TS_STATEMENT_LIST"))) stmt_list;
+ struct tree_constructor GTY ((tag ("TS_CONSTRUCTOR"))) constructor;
+ struct tree_omp_clause GTY ((tag ("TS_OMP_CLAUSE"))) omp_clause;
+ struct tree_optimization_option GTY ((tag ("TS_OPTIMIZATION"))) optimization;
+ struct tree_target_option GTY ((tag ("TS_TARGET_OPTION"))) target_option;
+};
+
+/* Structure describing an attribute and a function to handle it. */
+struct attribute_spec {
+ /* The name of the attribute (without any leading or trailing __),
+ or NULL to mark the end of a table of attributes. */
+ const char *name;
+ /* The minimum length of the list of arguments of the attribute. */
+ int min_length;
+ /* The maximum length of the list of arguments of the attribute
+ (-1 for no maximum). It can also be -2 for fake attributes
+ created for the sake of -Wno-attributes; in that case, we
+ should skip the balanced token sequence when parsing the attribute. */
+ int max_length;
+ /* Whether this attribute requires a DECL. If it does, it will be passed
+ from types of DECLs, function return types and array element types to
+ the DECLs, function types and array types respectively; but when
+ applied to a type in any other circumstances, it will be ignored with
+ a warning. (If greater control is desired for a given attribute,
+ this should be false, and the flags argument to the handler may be
+ used to gain greater control in that case.) */
+ bool decl_required;
+ /* Whether this attribute requires a type. If it does, it will be passed
+ from a DECL to the type of that DECL. */
+ bool type_required;
+ /* Whether this attribute requires a function (or method) type. If it does,
+ it will be passed from a function pointer type to the target type,
+ and from a function return type (which is not itself a function
+ pointer type) to the function type. */
+ bool function_type_required;
+ /* Specifies if attribute affects type's identity. */
+ bool affects_type_identity;
+ /* Function to handle this attribute. NODE points to a tree[3] array,
+ where node[0] is the node to which the attribute is to be applied;
+ node[1] is the last pushed/merged declaration if one exists, and node[2]
+ may be the declaration for node[0]. If a DECL, it should be modified in
+ place; if a TYPE, a copy should be created. NAME is the canonicalized
+ name of the attribute i.e. without any leading or trailing underscores.
+ ARGS is the TREE_LIST of the arguments (which may be NULL). FLAGS gives
+ further information about the context of the attribute. Afterwards, the
+ attributes will be added to the DECL_ATTRIBUTES or TYPE_ATTRIBUTES, as
+ appropriate, unless *NO_ADD_ATTRS is set to true (which should be done on
+ error, as well as in any other cases when the attributes should not be
+ added to the DECL or TYPE). Depending on FLAGS, any attributes to be
+ applied to another type or DECL later may be returned;
+ otherwise the return value should be NULL_TREE. This pointer may be
+ NULL if no special handling is required beyond the checks implied
+ by the rest of this structure. */
+ tree (*handler) (tree *node, tree name, tree args,
+ int flags, bool *no_add_attrs);
+
+ /* Specifies the name of an attribute that's mutually exclusive with
+ this one, and whether the relationship applies to the function,
+ variable, or type form of the attribute. */
+ struct exclusions {
+ const char *name;
+ bool function;
+ bool variable;
+ bool type;
+ };
+
+ /* An array of attribute exclusions describing names of other attributes
+ that this attribute is mutually exclusive with. */
+ const exclusions *exclude;
+};
+
+/* These functions allow a front-end to perform a manual layout of a
+ RECORD_TYPE. (For instance, if the placement of subsequent fields
+ depends on the placement of fields so far.) Begin by calling
+ start_record_layout. Then, call place_field for each of the
+ fields. Then, call finish_record_layout. See layout_type for the
+ default way in which these functions are used. */
+typedef struct record_layout_info_s {
+ /* The RECORD_TYPE that we are laying out. */
+ tree t;
+ /* The offset into the record so far, in bytes, not including bits in
+ BITPOS. */
+ tree offset;
+ /* The last known alignment of SIZE. */
+ unsigned int offset_align;
+ /* The bit position within the last OFFSET_ALIGN bits, in bits. */
+ tree bitpos;
+ /* The alignment of the record so far, in bits. */
+ unsigned int record_align;
+ /* The alignment of the record so far, ignoring #pragma pack and
+ __attribute__ ((packed)), in bits. */
+ unsigned int unpacked_align;
+ /* The previous field laid out. */
+ tree prev_field;
+ /* The static variables (i.e., class variables, as opposed to
+ instance variables) encountered in T. */
+ vec<tree, va_gc> *pending_statics;
+ /* Bits remaining in the current alignment group */
+ int remaining_in_alignment;
+ /* True if we've seen a packed field that didn't have normal
+ alignment anyway. */
+ int packed_maybe_necessary;
+} *record_layout_info;
+
+/* Iterator for going through the function arguments. */
+struct function_args_iterator {
+ tree next; /* TREE_LIST pointing to the next argument */
+};
+
+/* Structures to map from a tree to another tree. */
+struct GTY(()) tree_map_base {
+ tree from;
+};
+
+/* Map from a tree to another tree. */
+
+struct GTY((for_user)) tree_map {
+ struct tree_map_base base;
+ unsigned int hash;
+ tree to;
+};
+
+/* Map from a decl tree to another tree. */
+struct GTY((for_user)) tree_decl_map {
+ struct tree_map_base base;
+ tree to;
+};
+
+/* Map from a tree to an int. */
+struct GTY((for_user)) tree_int_map {
+ struct tree_map_base base;
+ unsigned int to;
+};
+
+/* Map from a decl tree to a tree vector. */
+struct GTY((for_user)) tree_vec_map {
+ struct tree_map_base base;
+ vec<tree, va_gc> *to;
+};
+
+/* Abstract iterators for CALL_EXPRs. These static inline definitions
+ have to go towards the end of tree.h so that union tree_node is fully
+ defined by this point. */
+
+/* Structure containing iterator state. */
+struct call_expr_arg_iterator {
+ tree t; /* the call_expr */
+ int n; /* argument count */
+ int i; /* next argument index */
+};
+
+struct const_call_expr_arg_iterator {
+ const_tree t; /* the call_expr */
+ int n; /* argument count */
+ int i; /* next argument index */
+};
+
+/* The builtin_info structure holds the FUNCTION_DECL of the standard builtin
+ function, and flags. */
+struct GTY(()) builtin_info_type {
+ tree decl;
+ /* Whether the user can use <xxx> instead of explicitly using calls
+ to __builtin_<xxx>. */
+ unsigned implicit_p : 1;
+ /* Whether the user has provided a declaration of <xxx>. */
+ unsigned declared_p : 1;
+};
+
+/* Information about a _FloatN or _FloatNx type that may be
+ supported. */
+struct floatn_type_info {
+ /* The number N in the type name. */
+ int n;
+ /* Whether it is an extended type _FloatNx (true) or an interchange
+ type (false). */
+ bool extended;
+};
+
+
+/*---------------------------------------------------------------------------
+ Global variables
+---------------------------------------------------------------------------*/
+/* Matrix describing the structures contained in a given tree code. */
+extern bool tree_contains_struct[MAX_TREE_CODES][64];
+
+/* Class of tree given its code. */
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) TYPE,
+#define END_OF_BASE_TREE_CODES tcc_exceptional,
+
+#if __cpp_inline_variables < 201606L
+template <int N>
+struct tree_code_type_tmpl {
+ static constexpr enum tree_code_class tree_code_type[] = {
+#include "all-tree.def"
+ };
+};
+
+template <int N>
+constexpr enum tree_code_class tree_code_type_tmpl<N>::tree_code_type[];
+#else
+constexpr inline enum tree_code_class tree_code_type[] = {
+#include "all-tree.def"
+};
+#endif
+
+#undef DEFTREECODE
+#undef END_OF_BASE_TREE_CODES
+
+/* Each tree code class has an associated string representation.
+ These must correspond to the tree_code_class entries. */
+extern const char *const tree_code_class_strings[];
+
+/* Number of argument-words in each kind of tree-node. */
+
+#define DEFTREECODE(SYM, NAME, TYPE, LENGTH) LENGTH,
+#define END_OF_BASE_TREE_CODES 0,
+
+#if __cpp_inline_variables < 201606L
+template <int N>
+struct tree_code_length_tmpl {
+ static constexpr unsigned char tree_code_length[] = {
+#include "all-tree.def"
+ };
+};
+
+template <int N>
+constexpr unsigned char tree_code_length_tmpl<N>::tree_code_length[];
+#else
+constexpr inline unsigned char tree_code_length[] = {
+#include "all-tree.def"
+};
+#endif
+
+#undef DEFTREECODE
+#undef END_OF_BASE_TREE_CODES
+
+/* Vector of all alias pairs for global symbols. */
+extern GTY(()) vec<alias_pair, va_gc> *alias_pairs;
+
+/* Names of all the built_in classes. */
+extern const char *const built_in_class_names[BUILT_IN_LAST];
+
+/* Names of all the built_in functions. */
+extern const char * built_in_names[(int) END_BUILTINS];
+
+/* Number of operands and names for each OMP_CLAUSE node. */
+extern unsigned const char omp_clause_num_ops[];
+extern const char * const omp_clause_code_name[];
+extern const char *user_omp_clause_code_name (tree, bool);
+
+/* A vector of all translation-units. */
+extern GTY (()) vec<tree, va_gc> *all_translation_units;
+
+/* Vector of standard trees used by the C compiler. */
+extern GTY(()) tree global_trees[TI_MAX];
+
+/* The standard C integer types. Use integer_type_kind to index into
+ this array. */
+extern GTY(()) tree integer_types[itk_none];
+
+/* Types used to represent sizes. */
+extern GTY(()) tree sizetype_tab[(int) stk_type_kind_last];
+
+/* Arrays for keeping track of tree node statistics. */
+extern uint64_t tree_node_counts[];
+extern uint64_t tree_node_sizes[];
+
+/* True if we are in gimple form and the actions of the folders need to
+ be restricted. False if we are not in gimple form and folding is not
+ restricted to creating gimple expressions. */
+extern bool in_gimple_form;
+
+/* Functional interface to the builtin functions. */
+extern GTY(()) builtin_info_type builtin_info[(int)END_BUILTINS];
+
+/* If nonzero, an upper limit on alignment of structure fields, in bits, */
+extern unsigned int maximum_field_alignment;
+
+/* Points to the FUNCTION_DECL of the function whose body we are reading. */
+extern GTY(()) tree current_function_decl;
+
+/* Nonzero means a FUNC_BEGIN label was emitted. */
+extern GTY(()) const char * current_function_func_begin_label;
+
+/* Information about the _FloatN and _FloatNx types. */
+extern const floatn_type_info floatn_nx_types[NUM_FLOATN_NX_TYPES];
+
+#endif // GCC_TREE_CORE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-data-ref.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-data-ref.h
new file mode 100644
index 0000000..4d1a5c4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-data-ref.h
@@ -0,0 +1,792 @@
+/* Data references and dependences detectors.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Sebastian Pop <pop@cri.ensmp.fr>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_DATA_REF_H
+#define GCC_TREE_DATA_REF_H
+
+#include "graphds.h"
+#include "tree-chrec.h"
+#include "opt-problem.h"
+
+/*
+ innermost_loop_behavior describes the evolution of the address of the memory
+ reference in the innermost enclosing loop. The address is expressed as
+ BASE + STEP * # of iteration, and base is further decomposed as the base
+ pointer (BASE_ADDRESS), loop invariant offset (OFFSET) and
+ constant offset (INIT). Examples, in loop nest
+
+ for (i = 0; i < 100; i++)
+ for (j = 3; j < 100; j++)
+
+ Example 1 Example 2
+ data-ref a[j].b[i][j] *(p + x + 16B + 4B * j)
+
+
+ innermost_loop_behavior
+ base_address &a p
+ offset i * D_i x
+ init 3 * D_j + offsetof (b) 28
+ step D_j 4
+
+ */
+struct innermost_loop_behavior
+{
+ tree base_address;
+ tree offset;
+ tree init;
+ tree step;
+
+ /* BASE_ADDRESS is known to be misaligned by BASE_MISALIGNMENT bytes
+ from an alignment boundary of BASE_ALIGNMENT bytes. For example,
+ if we had:
+
+ struct S __attribute__((aligned(16))) { ... };
+
+ char *ptr;
+ ... *(struct S *) (ptr - 4) ...;
+
+ the information would be:
+
+ base_address: ptr
+ base_aligment: 16
+ base_misalignment: 4
+ init: -4
+
+ where init cancels the base misalignment. If instead we had a
+ reference to a particular field:
+
+ struct S __attribute__((aligned(16))) { ... int f; ... };
+
+ char *ptr;
+ ... ((struct S *) (ptr - 4))->f ...;
+
+ the information would be:
+
+ base_address: ptr
+ base_aligment: 16
+ base_misalignment: 4
+ init: -4 + offsetof (S, f)
+
+ where base_address + init might also be misaligned, and by a different
+ amount from base_address. */
+ unsigned int base_alignment;
+ unsigned int base_misalignment;
+
+ /* The largest power of two that divides OFFSET, capped to a suitably
+ high value if the offset is zero. This is a byte rather than a bit
+ quantity. */
+ unsigned int offset_alignment;
+
+ /* Likewise for STEP. */
+ unsigned int step_alignment;
+};
+
+/* Describes the evolutions of indices of the memory reference. The indices
+ are indices of the ARRAY_REFs, indexes in artificial dimensions
+ added for member selection of records and the operands of MEM_REFs.
+ BASE_OBJECT is the part of the reference that is loop-invariant
+ (note that this reference does not have to cover the whole object
+ being accessed, in which case UNCONSTRAINED_BASE is set; hence it is
+ not recommended to use BASE_OBJECT in any code generation).
+ For the examples above,
+
+ base_object: a *(p + x + 4B * j_0)
+ indices: {j_0, +, 1}_2 {16, +, 4}_2
+ 4
+ {i_0, +, 1}_1
+ {j_0, +, 1}_2
+*/
+
+struct indices
+{
+ /* The object. */
+ tree base_object;
+
+ /* A list of chrecs. Access functions of the indices. */
+ vec<tree> access_fns;
+
+ /* Whether BASE_OBJECT is an access representing the whole object
+ or whether the access could not be constrained. */
+ bool unconstrained_base;
+};
+
+struct dr_alias
+{
+ /* The alias information that should be used for new pointers to this
+ location. */
+ struct ptr_info_def *ptr_info;
+};
+
+/* An integer vector. A vector formally consists of an element of a vector
+ space. A vector space is a set that is closed under vector addition
+ and scalar multiplication. In this vector space, an element is a list of
+ integers. */
+typedef HOST_WIDE_INT lambda_int;
+typedef lambda_int *lambda_vector;
+
+/* An integer matrix. A matrix consists of m vectors of length n (IE
+ all vectors are the same length). */
+typedef lambda_vector *lambda_matrix;
+
+
+
+struct data_reference
+{
+ /* A pointer to the statement that contains this DR. */
+ gimple *stmt;
+
+ /* A pointer to the memory reference. */
+ tree ref;
+
+ /* Auxiliary info specific to a pass. */
+ void *aux;
+
+ /* True when the data reference is in RHS of a stmt. */
+ bool is_read;
+
+ /* True when the data reference is conditional within STMT,
+ i.e. if it might not occur even when the statement is executed
+ and runs to completion. */
+ bool is_conditional_in_stmt;
+
+ /* Alias information for the data reference. */
+ struct dr_alias alias;
+
+ /* Behavior of the memory reference in the innermost loop. */
+ struct innermost_loop_behavior innermost;
+
+ /* Subscripts of this data reference. */
+ struct indices indices;
+
+ /* Alternate subscripts initialized lazily and used by data-dependence
+ analysis only when the main indices of two DRs are not comparable.
+ Keep last to keep vec_info_shared::check_datarefs happy. */
+ struct indices alt_indices;
+};
+
+#define DR_STMT(DR) (DR)->stmt
+#define DR_REF(DR) (DR)->ref
+#define DR_BASE_OBJECT(DR) (DR)->indices.base_object
+#define DR_UNCONSTRAINED_BASE(DR) (DR)->indices.unconstrained_base
+#define DR_ACCESS_FNS(DR) (DR)->indices.access_fns
+#define DR_ACCESS_FN(DR, I) DR_ACCESS_FNS (DR)[I]
+#define DR_NUM_DIMENSIONS(DR) DR_ACCESS_FNS (DR).length ()
+#define DR_IS_READ(DR) (DR)->is_read
+#define DR_IS_WRITE(DR) (!DR_IS_READ (DR))
+#define DR_IS_CONDITIONAL_IN_STMT(DR) (DR)->is_conditional_in_stmt
+#define DR_BASE_ADDRESS(DR) (DR)->innermost.base_address
+#define DR_OFFSET(DR) (DR)->innermost.offset
+#define DR_INIT(DR) (DR)->innermost.init
+#define DR_STEP(DR) (DR)->innermost.step
+#define DR_PTR_INFO(DR) (DR)->alias.ptr_info
+#define DR_BASE_ALIGNMENT(DR) (DR)->innermost.base_alignment
+#define DR_BASE_MISALIGNMENT(DR) (DR)->innermost.base_misalignment
+#define DR_OFFSET_ALIGNMENT(DR) (DR)->innermost.offset_alignment
+#define DR_STEP_ALIGNMENT(DR) (DR)->innermost.step_alignment
+#define DR_INNERMOST(DR) (DR)->innermost
+
+typedef struct data_reference *data_reference_p;
+
+/* This struct is used to store the information of a data reference,
+ including the data ref itself and the segment length for aliasing
+ checks. This is used to merge alias checks. */
+
+class dr_with_seg_len
+{
+public:
+ dr_with_seg_len (data_reference_p d, tree len, unsigned HOST_WIDE_INT size,
+ unsigned int a)
+ : dr (d), seg_len (len), access_size (size), align (a) {}
+
+ data_reference_p dr;
+ /* The offset of the last access that needs to be checked minus
+ the offset of the first. */
+ tree seg_len;
+ /* A value that, when added to abs (SEG_LEN), gives the total number of
+ bytes in the segment. */
+ poly_uint64 access_size;
+ /* The minimum common alignment of DR's start address, SEG_LEN and
+ ACCESS_SIZE. */
+ unsigned int align;
+};
+
+/* Flags that describe a potential alias between two dr_with_seg_lens.
+ In general, each pair of dr_with_seg_lens represents a composite of
+ multiple access pairs P, so testing flags like DR_IS_READ on the DRs
+ does not give meaningful information.
+
+ DR_ALIAS_RAW:
+ There is a pair in P for which the second reference is a read
+ and the first is a write.
+
+ DR_ALIAS_WAR:
+ There is a pair in P for which the second reference is a write
+ and the first is a read.
+
+ DR_ALIAS_WAW:
+ There is a pair in P for which both references are writes.
+
+ DR_ALIAS_ARBITRARY:
+ Either
+ (a) it isn't possible to classify one pair in P as RAW, WAW or WAR; or
+ (b) there is a pair in P that breaks the ordering assumption below.
+
+ This flag overrides the RAW, WAR and WAW flags above.
+
+ DR_ALIAS_UNSWAPPED:
+ DR_ALIAS_SWAPPED:
+ Temporary flags that indicate whether there is a pair P whose
+ DRs have or haven't been swapped around.
+
+ DR_ALIAS_MIXED_STEPS:
+ The DR_STEP for one of the data references in the pair does not
+ accurately describe that reference for all members of P. (Note
+ that the flag does not say anything about whether the DR_STEPs
+ of the two references in the pair are the same.)
+
+ The ordering assumption mentioned above is that for every pair
+ (DR_A, DR_B) in P:
+
+ (1) The original code accesses n elements for DR_A and n elements for DR_B,
+ interleaved as follows:
+
+ one access of size DR_A.access_size at DR_A.dr
+ one access of size DR_B.access_size at DR_B.dr
+ one access of size DR_A.access_size at DR_A.dr + STEP_A
+ one access of size DR_B.access_size at DR_B.dr + STEP_B
+ one access of size DR_A.access_size at DR_A.dr + STEP_A * 2
+ one access of size DR_B.access_size at DR_B.dr + STEP_B * 2
+ ...
+
+ (2) The new code accesses the same data in exactly two chunks:
+
+ one group of accesses spanning |DR_A.seg_len| + DR_A.access_size
+ one group of accesses spanning |DR_B.seg_len| + DR_B.access_size
+
+ A pair might break this assumption if the DR_A and DR_B accesses
+ in the original or the new code are mingled in some way. For example,
+ if DR_A.access_size represents the effect of two individual writes
+ to nearby locations, the pair breaks the assumption if those writes
+ occur either side of the access for DR_B.
+
+ Note that DR_ALIAS_ARBITRARY describes whether the ordering assumption
+ fails to hold for any individual pair in P. If the assumption *does*
+ hold for every pair in P, it doesn't matter whether it holds for the
+ composite pair or not. In other words, P should represent the complete
+ set of pairs that the composite pair is testing, so only the ordering
+ of two accesses in the same member of P matters. */
+const unsigned int DR_ALIAS_RAW = 1U << 0;
+const unsigned int DR_ALIAS_WAR = 1U << 1;
+const unsigned int DR_ALIAS_WAW = 1U << 2;
+const unsigned int DR_ALIAS_ARBITRARY = 1U << 3;
+const unsigned int DR_ALIAS_SWAPPED = 1U << 4;
+const unsigned int DR_ALIAS_UNSWAPPED = 1U << 5;
+const unsigned int DR_ALIAS_MIXED_STEPS = 1U << 6;
+
+/* This struct contains two dr_with_seg_len objects with aliasing data
+ refs. Two comparisons are generated from them. */
+
+class dr_with_seg_len_pair_t
+{
+public:
+ /* WELL_ORDERED indicates that the ordering assumption described above
+ DR_ALIAS_ARBITRARY holds. REORDERED indicates that it doesn't. */
+ enum sequencing { WELL_ORDERED, REORDERED };
+
+ dr_with_seg_len_pair_t (const dr_with_seg_len &,
+ const dr_with_seg_len &, sequencing);
+
+ dr_with_seg_len first;
+ dr_with_seg_len second;
+ unsigned int flags;
+};
+
+inline dr_with_seg_len_pair_t::
+dr_with_seg_len_pair_t (const dr_with_seg_len &d1, const dr_with_seg_len &d2,
+ sequencing seq)
+ : first (d1), second (d2), flags (0)
+{
+ if (DR_IS_READ (d1.dr) && DR_IS_WRITE (d2.dr))
+ flags |= DR_ALIAS_WAR;
+ else if (DR_IS_WRITE (d1.dr) && DR_IS_READ (d2.dr))
+ flags |= DR_ALIAS_RAW;
+ else if (DR_IS_WRITE (d1.dr) && DR_IS_WRITE (d2.dr))
+ flags |= DR_ALIAS_WAW;
+ else
+ gcc_unreachable ();
+ if (seq == REORDERED)
+ flags |= DR_ALIAS_ARBITRARY;
+}
+
+enum data_dependence_direction {
+ dir_positive,
+ dir_negative,
+ dir_equal,
+ dir_positive_or_negative,
+ dir_positive_or_equal,
+ dir_negative_or_equal,
+ dir_star,
+ dir_independent
+};
+
+/* The description of the grid of iterations that overlap. At most
+ two loops are considered at the same time just now, hence at most
+ two functions are needed. For each of the functions, we store
+ the vector of coefficients, f[0] + x * f[1] + y * f[2] + ...,
+ where x, y, ... are variables. */
+
+#define MAX_DIM 2
+
+/* Special values of N. */
+#define NO_DEPENDENCE 0
+#define NOT_KNOWN (MAX_DIM + 1)
+#define CF_NONTRIVIAL_P(CF) ((CF)->n != NO_DEPENDENCE && (CF)->n != NOT_KNOWN)
+#define CF_NOT_KNOWN_P(CF) ((CF)->n == NOT_KNOWN)
+#define CF_NO_DEPENDENCE_P(CF) ((CF)->n == NO_DEPENDENCE)
+
+typedef vec<tree> affine_fn;
+
+struct conflict_function
+{
+ unsigned n;
+ affine_fn fns[MAX_DIM];
+};
+
+/* What is a subscript? Given two array accesses a subscript is the
+ tuple composed of the access functions for a given dimension.
+ Example: Given A[f1][f2][f3] and B[g1][g2][g3], there are three
+ subscripts: (f1, g1), (f2, g2), (f3, g3). These three subscripts
+ are stored in the data_dependence_relation structure under the form
+ of an array of subscripts. */
+
+struct subscript
+{
+ /* The access functions of the two references. */
+ tree access_fn[2];
+
+ /* A description of the iterations for which the elements are
+ accessed twice. */
+ conflict_function *conflicting_iterations_in_a;
+ conflict_function *conflicting_iterations_in_b;
+
+ /* This field stores the information about the iteration domain
+ validity of the dependence relation. */
+ tree last_conflict;
+
+ /* Distance from the iteration that access a conflicting element in
+ A to the iteration that access this same conflicting element in
+ B. The distance is a tree scalar expression, i.e. a constant or a
+ symbolic expression, but certainly not a chrec function. */
+ tree distance;
+};
+
+typedef struct subscript *subscript_p;
+
+#define SUB_ACCESS_FN(SUB, I) (SUB)->access_fn[I]
+#define SUB_CONFLICTS_IN_A(SUB) (SUB)->conflicting_iterations_in_a
+#define SUB_CONFLICTS_IN_B(SUB) (SUB)->conflicting_iterations_in_b
+#define SUB_LAST_CONFLICT(SUB) (SUB)->last_conflict
+#define SUB_DISTANCE(SUB) (SUB)->distance
+
+/* A data_dependence_relation represents a relation between two
+ data_references A and B. */
+
+struct data_dependence_relation
+{
+
+ struct data_reference *a;
+ struct data_reference *b;
+
+ /* A "yes/no/maybe" field for the dependence relation:
+
+ - when "ARE_DEPENDENT == NULL_TREE", there exist a dependence
+ relation between A and B, and the description of this relation
+ is given in the SUBSCRIPTS array,
+
+ - when "ARE_DEPENDENT == chrec_known", there is no dependence and
+ SUBSCRIPTS is empty,
+
+ - when "ARE_DEPENDENT == chrec_dont_know", there may be a dependence,
+ but the analyzer cannot be more specific. */
+ tree are_dependent;
+
+ /* If nonnull, COULD_BE_INDEPENDENT_P is true and the accesses are
+ independent when the runtime addresses of OBJECT_A and OBJECT_B
+ are different. The addresses of both objects are invariant in the
+ loop nest. */
+ tree object_a;
+ tree object_b;
+
+ /* For each subscript in the dependence test, there is an element in
+ this array. This is the attribute that labels the edge A->B of
+ the data_dependence_relation. */
+ vec<subscript_p> subscripts;
+
+ /* The analyzed loop nest. */
+ vec<loop_p> loop_nest;
+
+ /* The classic direction vector. */
+ vec<lambda_vector> dir_vects;
+
+ /* The classic distance vector. */
+ vec<lambda_vector> dist_vects;
+
+ /* Is the dependence reversed with respect to the lexicographic order? */
+ bool reversed_p;
+
+ /* When the dependence relation is affine, it can be represented by
+ a distance vector. */
+ bool affine_p;
+
+ /* Set to true when the dependence relation is on the same data
+ access. */
+ bool self_reference_p;
+
+ /* True if the dependence described is conservatively correct rather
+ than exact, and if it is still possible for the accesses to be
+ conditionally independent. For example, the a and b references in:
+
+ struct s *a, *b;
+ for (int i = 0; i < n; ++i)
+ a->f[i] += b->f[i];
+
+ conservatively have a distance vector of (0), for the case in which
+ a == b, but the accesses are independent if a != b. Similarly,
+ the a and b references in:
+
+ struct s *a, *b;
+ for (int i = 0; i < n; ++i)
+ a[0].f[i] += b[i].f[i];
+
+ conservatively have a distance vector of (0), but they are indepenent
+ when a != b + i. In contrast, the references in:
+
+ struct s *a;
+ for (int i = 0; i < n; ++i)
+ a->f[i] += a->f[i];
+
+ have the same distance vector of (0), but the accesses can never be
+ independent. */
+ bool could_be_independent_p;
+};
+
+typedef struct data_dependence_relation *ddr_p;
+
+#define DDR_A(DDR) (DDR)->a
+#define DDR_B(DDR) (DDR)->b
+#define DDR_AFFINE_P(DDR) (DDR)->affine_p
+#define DDR_ARE_DEPENDENT(DDR) (DDR)->are_dependent
+#define DDR_OBJECT_A(DDR) (DDR)->object_a
+#define DDR_OBJECT_B(DDR) (DDR)->object_b
+#define DDR_SUBSCRIPTS(DDR) (DDR)->subscripts
+#define DDR_SUBSCRIPT(DDR, I) DDR_SUBSCRIPTS (DDR)[I]
+#define DDR_NUM_SUBSCRIPTS(DDR) DDR_SUBSCRIPTS (DDR).length ()
+
+#define DDR_LOOP_NEST(DDR) (DDR)->loop_nest
+/* The size of the direction/distance vectors: the number of loops in
+ the loop nest. */
+#define DDR_NB_LOOPS(DDR) (DDR_LOOP_NEST (DDR).length ())
+#define DDR_SELF_REFERENCE(DDR) (DDR)->self_reference_p
+
+#define DDR_DIST_VECTS(DDR) ((DDR)->dist_vects)
+#define DDR_DIR_VECTS(DDR) ((DDR)->dir_vects)
+#define DDR_NUM_DIST_VECTS(DDR) \
+ (DDR_DIST_VECTS (DDR).length ())
+#define DDR_NUM_DIR_VECTS(DDR) \
+ (DDR_DIR_VECTS (DDR).length ())
+#define DDR_DIR_VECT(DDR, I) \
+ DDR_DIR_VECTS (DDR)[I]
+#define DDR_DIST_VECT(DDR, I) \
+ DDR_DIST_VECTS (DDR)[I]
+#define DDR_REVERSED_P(DDR) (DDR)->reversed_p
+#define DDR_COULD_BE_INDEPENDENT_P(DDR) (DDR)->could_be_independent_p
+
+
+opt_result dr_analyze_innermost (innermost_loop_behavior *, tree,
+ class loop *, const gimple *);
+extern bool compute_data_dependences_for_loop (class loop *, bool,
+ vec<loop_p> *,
+ vec<data_reference_p> *,
+ vec<ddr_p> *);
+extern void debug_ddrs (vec<ddr_p> );
+extern void dump_data_reference (FILE *, struct data_reference *);
+extern void debug (data_reference &ref);
+extern void debug (data_reference *ptr);
+extern void debug_data_reference (struct data_reference *);
+extern void debug_data_references (vec<data_reference_p> );
+extern void debug (vec<data_reference_p> &ref);
+extern void debug (vec<data_reference_p> *ptr);
+extern void debug_data_dependence_relation (const data_dependence_relation *);
+extern void dump_data_dependence_relations (FILE *, const vec<ddr_p> &);
+extern void debug (vec<ddr_p> &ref);
+extern void debug (vec<ddr_p> *ptr);
+extern void debug_data_dependence_relations (vec<ddr_p> );
+extern void free_dependence_relation (struct data_dependence_relation *);
+extern void free_dependence_relations (vec<ddr_p>& );
+extern void free_data_ref (data_reference_p);
+extern void free_data_refs (vec<data_reference_p>& );
+extern opt_result find_data_references_in_stmt (class loop *, gimple *,
+ vec<data_reference_p> *);
+extern bool graphite_find_data_references_in_stmt (edge, loop_p, gimple *,
+ vec<data_reference_p> *);
+tree find_data_references_in_loop (class loop *, vec<data_reference_p> *);
+bool loop_nest_has_data_refs (loop_p loop);
+struct data_reference *create_data_ref (edge, loop_p, tree, gimple *, bool,
+ bool);
+extern bool find_loop_nest (class loop *, vec<loop_p> *);
+extern struct data_dependence_relation *initialize_data_dependence_relation
+ (struct data_reference *, struct data_reference *, vec<loop_p>);
+extern void compute_affine_dependence (struct data_dependence_relation *,
+ loop_p);
+extern void compute_self_dependence (struct data_dependence_relation *);
+extern bool compute_all_dependences (const vec<data_reference_p> &,
+ vec<ddr_p> *,
+ const vec<loop_p> &, bool);
+extern tree find_data_references_in_bb (class loop *, basic_block,
+ vec<data_reference_p> *);
+extern unsigned int dr_alignment (innermost_loop_behavior *);
+extern tree get_base_for_alignment (tree, unsigned int *);
+
+/* Return the alignment in bytes that DR is guaranteed to have at all
+ times. */
+
+inline unsigned int
+dr_alignment (data_reference *dr)
+{
+ return dr_alignment (&DR_INNERMOST (dr));
+}
+
+extern bool dr_may_alias_p (const struct data_reference *,
+ const struct data_reference *, class loop *);
+extern bool dr_equal_offsets_p (struct data_reference *,
+ struct data_reference *);
+
+extern opt_result runtime_alias_check_p (ddr_p, class loop *, bool);
+extern int data_ref_compare_tree (tree, tree);
+extern void prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *,
+ poly_uint64);
+extern void create_runtime_alias_checks (class loop *,
+ const vec<dr_with_seg_len_pair_t> *,
+ tree*);
+extern tree dr_direction_indicator (struct data_reference *);
+extern tree dr_zero_step_indicator (struct data_reference *);
+extern bool dr_known_forward_stride_p (struct data_reference *);
+
+/* Return true when the base objects of data references A and B are
+ the same memory object. */
+
+inline bool
+same_data_refs_base_objects (data_reference_p a, data_reference_p b)
+{
+ return DR_NUM_DIMENSIONS (a) == DR_NUM_DIMENSIONS (b)
+ && operand_equal_p (DR_BASE_OBJECT (a), DR_BASE_OBJECT (b), 0);
+}
+
+/* Return true when the data references A and B are accessing the same
+ memory object with the same access functions. Optionally skip the
+ last OFFSET dimensions in the data reference. */
+
+inline bool
+same_data_refs (data_reference_p a, data_reference_p b, int offset = 0)
+{
+ unsigned int i;
+
+ /* The references are exactly the same. */
+ if (operand_equal_p (DR_REF (a), DR_REF (b), 0))
+ return true;
+
+ if (!same_data_refs_base_objects (a, b))
+ return false;
+
+ for (i = offset; i < DR_NUM_DIMENSIONS (a); i++)
+ if (!eq_evolutions_p (DR_ACCESS_FN (a, i), DR_ACCESS_FN (b, i)))
+ return false;
+
+ return true;
+}
+
+/* Returns true when all the dependences are computable. */
+
+inline bool
+known_dependences_p (vec<ddr_p> dependence_relations)
+{
+ ddr_p ddr;
+ unsigned int i;
+
+ FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
+ if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
+ return false;
+
+ return true;
+}
+
+/* Returns the dependence level for a vector DIST of size LENGTH.
+ LEVEL = 0 means a lexicographic dependence, i.e. a dependence due
+ to the sequence of statements, not carried by any loop. */
+
+inline unsigned
+dependence_level (lambda_vector dist_vect, int length)
+{
+ int i;
+
+ for (i = 0; i < length; i++)
+ if (dist_vect[i] != 0)
+ return i + 1;
+
+ return 0;
+}
+
+/* Return the dependence level for the DDR relation. */
+
+inline unsigned
+ddr_dependence_level (ddr_p ddr)
+{
+ unsigned vector;
+ unsigned level = 0;
+
+ if (DDR_DIST_VECTS (ddr).exists ())
+ level = dependence_level (DDR_DIST_VECT (ddr, 0), DDR_NB_LOOPS (ddr));
+
+ for (vector = 1; vector < DDR_NUM_DIST_VECTS (ddr); vector++)
+ level = MIN (level, dependence_level (DDR_DIST_VECT (ddr, vector),
+ DDR_NB_LOOPS (ddr)));
+ return level;
+}
+
+/* Return the index of the variable VAR in the LOOP_NEST array. */
+
+inline int
+index_in_loop_nest (int var, const vec<loop_p> &loop_nest)
+{
+ class loop *loopi;
+ int var_index;
+
+ for (var_index = 0; loop_nest.iterate (var_index, &loopi); var_index++)
+ if (loopi->num == var)
+ return var_index;
+
+ gcc_unreachable ();
+}
+
+/* Returns true when the data reference DR the form "A[i] = ..."
+ with a stride equal to its unit type size. */
+
+inline bool
+adjacent_dr_p (struct data_reference *dr)
+{
+ /* If this is a bitfield store bail out. */
+ if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
+ && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
+ return false;
+
+ if (!DR_STEP (dr)
+ || TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
+ return false;
+
+ return tree_int_cst_equal (fold_unary (ABS_EXPR, TREE_TYPE (DR_STEP (dr)),
+ DR_STEP (dr)),
+ TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))));
+}
+
+void split_constant_offset (tree , tree *, tree *);
+
+/* Compute the greatest common divisor of a VECTOR of SIZE numbers. */
+
+inline lambda_int
+lambda_vector_gcd (lambda_vector vector, int size)
+{
+ int i;
+ lambda_int gcd1 = 0;
+
+ if (size > 0)
+ {
+ gcd1 = vector[0];
+ for (i = 1; i < size; i++)
+ gcd1 = gcd (gcd1, vector[i]);
+ }
+ return gcd1;
+}
+
+/* Allocate a new vector of given SIZE. */
+
+inline lambda_vector
+lambda_vector_new (int size)
+{
+ /* ??? We shouldn't abuse the GC allocator here. */
+ return ggc_cleared_vec_alloc<lambda_int> (size);
+}
+
+/* Clear out vector VEC1 of length SIZE. */
+
+inline void
+lambda_vector_clear (lambda_vector vec1, int size)
+{
+ memset (vec1, 0, size * sizeof (*vec1));
+}
+
+/* Returns true when the vector V is lexicographically positive, in
+ other words, when the first nonzero element is positive. */
+
+inline bool
+lambda_vector_lexico_pos (lambda_vector v,
+ unsigned n)
+{
+ unsigned i;
+ for (i = 0; i < n; i++)
+ {
+ if (v[i] == 0)
+ continue;
+ if (v[i] < 0)
+ return false;
+ if (v[i] > 0)
+ return true;
+ }
+ return true;
+}
+
+/* Return true if vector VEC1 of length SIZE is the zero vector. */
+
+inline bool
+lambda_vector_zerop (lambda_vector vec1, int size)
+{
+ int i;
+ for (i = 0; i < size; i++)
+ if (vec1[i] != 0)
+ return false;
+ return true;
+}
+
+/* Allocate a matrix of M rows x N cols. */
+
+inline lambda_matrix
+lambda_matrix_new (int m, int n, struct obstack *lambda_obstack)
+{
+ lambda_matrix mat;
+ int i;
+
+ mat = XOBNEWVEC (lambda_obstack, lambda_vector, m);
+
+ for (i = 0; i < m; i++)
+ mat[i] = XOBNEWVEC (lambda_obstack, lambda_int, n);
+
+ return mat;
+}
+
+#endif /* GCC_TREE_DATA_REF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dfa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dfa.h
new file mode 100644
index 0000000..074a4da
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dfa.h
@@ -0,0 +1,45 @@
+/* Header file for tree data flow functions.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_DFA_H
+#define GCC_TREE_DFA_H
+
+extern void renumber_gimple_stmt_uids_in_block (struct function *, basic_block);
+extern void renumber_gimple_stmt_uids (struct function *);
+extern void renumber_gimple_stmt_uids_in_blocks (basic_block *, int);
+extern void dump_variable (FILE *, tree);
+extern void debug_variable (tree);
+extern void dump_dfa_stats (FILE *);
+extern void debug_dfa_stats (void);
+extern tree ssa_default_def (struct function *, tree);
+extern void set_ssa_default_def (struct function *, tree, tree);
+extern tree get_or_create_ssa_default_def (struct function *, tree);
+extern tree get_ref_base_and_extent (tree, poly_int64_pod *, poly_int64_pod *,
+ poly_int64_pod *, bool *);
+extern tree get_ref_base_and_extent_hwi (tree, HOST_WIDE_INT *,
+ HOST_WIDE_INT *, bool *);
+extern tree get_addr_base_and_unit_offset_1 (tree, poly_int64_pod *,
+ tree (*) (tree));
+extern tree get_addr_base_and_unit_offset (tree, poly_int64_pod *);
+extern bool stmt_references_abnormal_ssa_name (gimple *);
+extern void replace_abnormal_ssa_names (gimple *);
+extern void dump_enumerated_decls (FILE *, dump_flags_t);
+
+
+#endif /* GCC_TREE_DFA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-diagnostic.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-diagnostic.h
new file mode 100644
index 0000000..9089f2d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-diagnostic.h
@@ -0,0 +1,68 @@
+/* Various declarations for language-independent diagnostics
+ subroutines that are only for use in the compilers proper and not
+ the driver or other programs.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_DIAGNOSTIC_H
+#define GCC_TREE_DIAGNOSTIC_H
+
+/* TREE_BLOCK if the diagnostic is to be reported in some inline
+ function inlined into other function, otherwise NULL. */
+#define diagnostic_abstract_origin(DI) \
+ ((tree) diagnostic_info_auxiliary_data (DI))
+
+/* Function of last diagnostic message; more generally, function such
+ that if next diagnostic message is in it then we don't have to
+ mention the function name. */
+#define diagnostic_last_function(DC) \
+ ((tree) diagnostic_context_auxiliary_data (DC))
+
+/* True if the last function in which a diagnostic was reported is
+ different from the current one. */
+#define diagnostic_last_function_changed(DC, DI) \
+ (diagnostic_last_function (DC) != (diagnostic_abstract_origin (DI) \
+ ? diagnostic_abstract_origin (DI) \
+ : current_function_decl))
+
+/* Remember the current function as being the last one in which we report
+ a diagnostic. */
+#define diagnostic_set_last_function(DC, DI) \
+ diagnostic_context_auxiliary_data (DC) \
+ = (((DI) && diagnostic_abstract_origin (DI)) \
+ ? diagnostic_abstract_origin (DI) \
+ : current_function_decl)
+
+void diagnostic_report_current_function (diagnostic_context *,
+ diagnostic_info *);
+void virt_loc_aware_diagnostic_finalizer (diagnostic_context *,
+ diagnostic_info *);
+
+void tree_diagnostics_defaults (diagnostic_context *context);
+bool default_tree_printer (pretty_printer *, text_info *, const char *,
+ int, bool, bool, bool, bool *, const char **);
+
+extern void default_tree_diagnostic_path_printer (diagnostic_context *,
+ const diagnostic_path *);
+extern json::value *default_tree_make_json_for_path (diagnostic_context *,
+ const diagnostic_path *);
+
+extern void maybe_unwind_expanded_macro_loc (diagnostic_context *context,
+ location_t where);
+
+#endif /* ! GCC_TREE_DIAGNOSTIC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dump.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dump.h
new file mode 100644
index 0000000..27a93a1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-dump.h
@@ -0,0 +1,92 @@
+/* Tree-dumping functionality for intermediate representation.
+ Copyright (C) 1999-2023 Free Software Foundation, Inc.
+ Written by Mark Mitchell <mark@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_DUMP_H
+#define GCC_TREE_DUMP_H
+
+#include "splay-tree.h"
+#include "dumpfile.h"
+
+typedef struct dump_info *dump_info_p;
+
+/* Flags used with queue functions. */
+#define DUMP_NONE 0
+#define DUMP_BINFO 1
+
+/* Information about a node to be dumped. */
+
+typedef struct dump_node_info
+{
+ /* The index for the node. */
+ unsigned int index;
+ /* Nonzero if the node is a binfo. */
+ unsigned int binfo_p : 1;
+} *dump_node_info_p;
+
+/* A dump_queue is a link in the queue of things to be dumped. */
+
+typedef struct dump_queue
+{
+ /* The queued tree node. */
+ splay_tree_node node;
+ /* The next node in the queue. */
+ struct dump_queue *next;
+} *dump_queue_p;
+
+/* A dump_info gives information about how we should perform the dump
+ and about the current state of the dump. */
+
+struct dump_info
+{
+ /* The stream on which to dump the information. */
+ FILE *stream;
+ /* The original node. */
+ const_tree node;
+ /* User flags. */
+ dump_flags_t flags;
+ /* The next unused node index. */
+ unsigned int index;
+ /* The next column. */
+ unsigned int column;
+ /* The first node in the queue of nodes to be written out. */
+ dump_queue_p queue;
+ /* The last node in the queue. */
+ dump_queue_p queue_end;
+ /* Free queue nodes. */
+ dump_queue_p free_list;
+ /* The tree nodes which we have already written out. The
+ keys are the addresses of the nodes; the values are the integer
+ indices we assigned them. */
+ splay_tree nodes;
+};
+
+/* Dump the CHILD and its children. */
+#define dump_child(field, child) \
+ queue_and_dump_index (di, field, child, DUMP_NONE)
+
+extern void dump_pointer (dump_info_p, const char *, void *);
+extern void dump_int (dump_info_p, const char *, int);
+extern void dump_string (dump_info_p, const char *);
+extern void dump_string_field (dump_info_p, const char *, const char *);
+extern void queue_and_dump_index (dump_info_p, const char *, const_tree, int);
+extern void queue_and_dump_type (dump_info_p, const_tree);
+extern int dump_flag (dump_info_p, dump_flags_t, const_tree);
+
+#endif /* ! GCC_TREE_DUMP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-eh.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-eh.h
new file mode 100644
index 0000000..771be50
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-eh.h
@@ -0,0 +1,58 @@
+/* Header file for exception handling.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_EH_H
+#define GCC_TREE_EH_H
+
+
+typedef struct eh_region_d *eh_region;
+
+extern void using_eh_for_cleanups (void);
+extern void add_stmt_to_eh_lp (gimple *, int);
+extern bool remove_stmt_from_eh_lp_fn (struct function *, gimple *);
+extern bool remove_stmt_from_eh_lp (gimple *);
+extern int lookup_stmt_eh_lp_fn (struct function *, const gimple *);
+extern int lookup_stmt_eh_lp (const gimple *);
+extern bool make_eh_dispatch_edges (geh_dispatch *);
+extern void make_eh_edges (gimple *);
+extern edge redirect_eh_edge (edge, basic_block);
+extern void redirect_eh_dispatch_edge (geh_dispatch *, edge, basic_block);
+extern bool operation_could_trap_helper_p (enum tree_code, bool, bool, bool,
+ bool, tree, bool *);
+extern bool operation_could_trap_p (enum tree_code, bool, bool, tree);
+extern bool tree_could_trap_p (tree);
+extern tree rewrite_to_non_trapping_overflow (tree);
+extern bool stmt_could_throw_p (function *, gimple *);
+extern bool stmt_unremovable_because_of_non_call_eh_p (function *, gimple *);
+extern bool tree_could_throw_p (tree);
+extern bool stmt_can_throw_external (function *, gimple *);
+extern bool stmt_can_throw_internal (function *, gimple *);
+extern bool maybe_clean_eh_stmt_fn (struct function *, gimple *);
+extern bool maybe_clean_eh_stmt (gimple *);
+extern bool maybe_clean_or_replace_eh_stmt (gimple *, gimple *);
+extern bool maybe_duplicate_eh_stmt_fn (struct function *, gimple *,
+ struct function *, gimple *,
+ hash_map<void *, void *> *, int);
+extern bool maybe_duplicate_eh_stmt (gimple *, gimple *);
+extern void maybe_remove_unreachable_handlers (void);
+extern void unsplit_eh_edges (void);
+extern bool verify_eh_edges (gimple *);
+extern bool verify_eh_dispatch_edge (geh_dispatch *);
+
+#endif /* GCC_TREE_EH_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hash-traits.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hash-traits.h
new file mode 100644
index 0000000..48631f4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hash-traits.h
@@ -0,0 +1,44 @@
+/* Traits for hashing trees.
+ Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef tree_hash_traits_h
+#define tree_hash_traits_h
+
+/* Hash for trees based on operand_equal_p. */
+struct tree_operand_hash : ggc_ptr_hash <tree_node>
+{
+ static inline hashval_t hash (const value_type &);
+ static inline bool equal (const value_type &,
+ const compare_type &);
+};
+
+inline hashval_t
+tree_operand_hash::hash (const value_type &t)
+{
+ return iterative_hash_expr (t, 0);
+}
+
+inline bool
+tree_operand_hash::equal (const value_type &t1,
+ const compare_type &t2)
+{
+ return operand_equal_p (t1, t2, 0);
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hasher.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hasher.h
new file mode 100644
index 0000000..9451ba0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-hasher.h
@@ -0,0 +1,66 @@
+/* Hash Table Helper for Trees
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+ Contributed by Lawrence Crowl <crowl@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_HASHER_H
+#define GCC_TREE_HASHER_H 1
+
+struct int_tree_map {
+ unsigned int uid;
+ tree to;
+};
+
+/* Hashtable helpers. */
+
+struct int_tree_hasher
+{
+ typedef int_tree_map value_type;
+ typedef int_tree_map compare_type;
+ static inline hashval_t hash (const value_type &);
+ static inline bool equal (const value_type &, const compare_type &);
+ static bool is_deleted (const value_type &v)
+ {
+ return v.to == reinterpret_cast<tree> (1);
+ }
+ static void mark_deleted (value_type &v) { v.to = reinterpret_cast<tree> (0x1); }
+ static bool is_empty (const value_type &v) { return v.to == NULL; }
+ static const bool empty_zero_p = true;
+ static void mark_empty (value_type &v) { v.to = NULL; }
+ static void remove (value_type &) {}
+};
+
+/* Hash a UID in a int_tree_map. */
+
+inline hashval_t
+int_tree_hasher::hash (const value_type &item)
+{
+ return item.uid;
+}
+
+/* Return true if the uid in both int tree maps are equal. */
+
+inline bool
+int_tree_hasher::equal (const value_type &a, const compare_type &b)
+{
+ return (a.uid == b.uid);
+}
+
+typedef hash_table <int_tree_hasher> int_tree_htab_type;
+
+#endif /* GCC_TREE_HASHER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-if-conv.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-if-conv.h
new file mode 100644
index 0000000..e1473f0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-if-conv.h
@@ -0,0 +1,24 @@
+/* Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_IF_CONV_H
+#define GCC_TREE_IF_CONV_H
+
+unsigned int tree_if_conversion (class loop *, vec<gimple *> * = NULL);
+
+#endif /* GCC_TREE_IF_CONV_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-inline.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-inline.h
new file mode 100644
index 0000000..ec30ccb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-inline.h
@@ -0,0 +1,256 @@
+/* Tree inlining hooks and declarations.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_INLINE_H
+#define GCC_TREE_INLINE_H
+
+
+struct cgraph_edge;
+
+/* Indicate the desired behavior wrt call graph edges. We can either
+ duplicate the edge (inlining, cloning), move the edge (versioning,
+ parallelization), or move the edges of the clones (saving). */
+
+enum copy_body_cge_which
+{
+ CB_CGE_DUPLICATE,
+ CB_CGE_MOVE,
+ CB_CGE_MOVE_CLONES
+};
+
+typedef int_hash <unsigned short, 0> dependence_hash;
+
+/* Data required for function body duplication. */
+
+struct copy_body_data
+{
+ /* FUNCTION_DECL for function being inlined, or in general the
+ source function providing the original trees. */
+ tree src_fn;
+
+ /* FUNCTION_DECL for function being inlined into, or in general
+ the destination function receiving the new trees. */
+ tree dst_fn;
+
+ /* Callgraph node of the source function. */
+ struct cgraph_node *src_node;
+
+ /* Callgraph node of the destination function. */
+ struct cgraph_node *dst_node;
+
+ /* struct function for function being inlined. Usually this is the same
+ as DECL_STRUCT_FUNCTION (src_fn), but can be different if saved_cfg
+ and saved_eh are in use. */
+ struct function *src_cfun;
+
+ /* The VAR_DECL for the return value. */
+ tree retvar;
+
+ /* The map from local declarations in the inlined function to
+ equivalents in the function into which it is being inlined. */
+ hash_map<tree, tree> *decl_map;
+
+ /* Create a new decl to replace DECL in the destination function. */
+ tree (*copy_decl) (tree, struct copy_body_data *);
+
+ /* Current BLOCK. */
+ tree block;
+
+ /* GIMPLE_CALL if va arg parameter packs should be expanded or NULL
+ is not. */
+ gcall *call_stmt;
+
+ /* > 0 if we are remapping a type currently. */
+ int remapping_type_depth;
+
+ /* Exception landing pad the inlined call lies in. */
+ int eh_lp_nr;
+
+ /* Maps region and landing pad structures from the function being copied
+ to duplicates created within the function we inline into. */
+ hash_map<void *, void *> *eh_map;
+
+ /* We use the same mechanism do all sorts of different things. Rather
+ than enumerating the different cases, we categorize the behavior
+ in the various situations. */
+
+ /* What to do with call graph edges. */
+ enum copy_body_cge_which transform_call_graph_edges;
+
+ /* True if a new CFG should be created. False for inlining, true for
+ everything else. */
+ bool transform_new_cfg;
+
+ /* True if RETURN_EXPRs should be transformed to just the contained
+ MODIFY_EXPR. The branch semantics of the return will be handled
+ by manipulating the CFG rather than a statement. */
+ bool transform_return_to_modify;
+
+ /* True if the parameters of the source function are transformed.
+ Only true for inlining. */
+ bool transform_parameter;
+
+ /* True if this statement will need to be regimplified. */
+ bool regimplify;
+
+ /* True if trees may not be unshared. */
+ bool do_not_unshare;
+
+ /* True if trees should not be folded during the copying. */
+ bool do_not_fold;
+
+ /* True if new declarations may not be created during type remapping. */
+ bool prevent_decl_creation_for_types;
+
+ /* True if the location information will need to be reset. */
+ bool reset_location;
+
+ /* Replace error_mark_node as upper bound of array types with
+ an uninitialized VAR_DECL temporary. */
+ bool adjust_array_error_bounds;
+
+ /* Usually copy_decl callback always creates new decls, in that case
+ we want to remap all variably_modified_type_p types. If this flag
+ is set, remap_type will do further checks to see if remap_decl
+ of any decls mentioned in the type will remap to anything but itself
+ and only in that case will actually remap the type. */
+ bool dont_remap_vla_if_no_change;
+
+ /* Statements that might be possibly folded. */
+ hash_set<gimple *> *statements_to_fold;
+
+ /* Entry basic block to currently copied body. */
+ basic_block entry_bb;
+
+ /* For partial function versioning, bitmap of bbs to be copied,
+ otherwise NULL. */
+ bitmap blocks_to_copy;
+
+ /* Debug statements that need processing. */
+ vec<gdebug *> debug_stmts;
+
+ /* A map from local declarations in the inlined function to
+ equivalents in the function into which it is being inlined,
+ where the originals have been mapped to a value rather than
+ to a variable. */
+ hash_map<tree, tree> *debug_map;
+
+ /* A map from the inlined functions dependence info cliques to
+ equivalents in the function into which it is being inlined. */
+ hash_map<dependence_hash, unsigned short> *dependence_map;
+
+ /* A list of addressable local variables remapped into the caller
+ when inlining a call within an OpenMP SIMD-on-SIMT loop. */
+ vec<tree> *dst_simt_vars;
+
+ /* Basic block to which clobbers for local variables from the inline
+ function that need to live in memory should be added. */
+ basic_block eh_landing_pad_dest;
+
+ /* If clobbers for local variables from the inline function
+ that need to live in memory should be added to EH landing pads
+ outside of the inlined function, this should be the number
+ of basic blocks in the caller before inlining. Zero otherwise. */
+ int add_clobbers_to_eh_landing_pads;
+
+ /* Class managing changes to function parameters and return value planned
+ during IPA stage. */
+ class ipa_param_body_adjustments *param_body_adjs;
+
+ /* Hash set of SSA names that have been killed during call graph edge
+ redirection and should not be introduced into debug statements or NULL if no
+ SSA_NAME was deleted during redirections happened. */
+ hash_set <tree> *killed_new_ssa_names;
+};
+
+/* Weights of constructions for estimate_num_insns. */
+
+struct eni_weights
+{
+ /* Cost per call. */
+ unsigned call_cost;
+
+ /* Cost per indirect call. */
+ unsigned indirect_call_cost;
+
+ /* Cost per call to a target specific builtin */
+ unsigned target_builtin_call_cost;
+
+ /* Cost of "expensive" div and mod operations. */
+ unsigned div_mod_cost;
+
+ /* Cost for omp construct. */
+ unsigned omp_cost;
+
+ /* Cost for tm transaction. */
+ unsigned tm_cost;
+
+ /* Cost of return. */
+ unsigned return_cost;
+
+ /* True when time of statement should be estimated. Thus, the
+ cost of a switch statement is logarithmic rather than linear in number
+ of cases. */
+ bool time_based;
+};
+
+/* Weights that estimate_num_insns uses for heuristics in inlining. */
+
+extern eni_weights eni_inlining_weights;
+
+/* Weights that estimate_num_insns uses to estimate the size of the
+ produced code. */
+
+extern eni_weights eni_size_weights;
+
+/* Weights that estimate_num_insns uses to estimate the time necessary
+ to execute the produced code. */
+
+extern eni_weights eni_time_weights;
+
+/* Function prototypes. */
+void init_inline_once (void);
+extern tree copy_tree_body_r (tree *, int *, void *);
+extern void insert_decl_map (copy_body_data *, tree, tree);
+unsigned int optimize_inline_calls (tree);
+tree maybe_inline_call_in_expr (tree);
+bool tree_inlinable_function_p (tree);
+tree copy_tree_r (tree *, int *, void *);
+tree copy_decl_no_change (tree decl, copy_body_data *id);
+int estimate_move_cost (tree type, bool);
+int estimate_num_insns (gimple *, eni_weights *);
+int estimate_num_insns_fn (tree, eni_weights *);
+int estimate_num_insns_seq (gimple_seq, eni_weights *);
+bool tree_versionable_function_p (tree);
+extern tree remap_decl (tree decl, copy_body_data *id);
+extern tree remap_type (tree type, copy_body_data *id);
+extern gimple_seq copy_gimple_seq_and_replace_locals (gimple_seq seq);
+extern bool debug_find_tree (tree, tree);
+extern tree copy_fn (tree, tree&, tree&);
+extern const char *copy_forbidden (struct function *fun);
+extern tree copy_decl_for_dup_finish (copy_body_data *id, tree decl, tree copy);
+extern tree copy_decl_to_var (tree, copy_body_data *);
+extern tree force_value_to_type (tree type, tree value);
+
+/* This is in tree-inline.cc since the routine uses
+ data structures from the inliner. */
+extern tree build_duplicate_type (tree);
+
+#endif /* GCC_TREE_INLINE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-into-ssa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-into-ssa.h
new file mode 100644
index 0000000..ff2d392
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-into-ssa.h
@@ -0,0 +1,53 @@
+/* Header file for normal form into SSA.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_INTO_SSA_H
+#define GCC_TREE_INTO_SSA_H
+
+extern tree get_current_def (tree);
+extern void set_current_def (tree, tree);
+void delete_update_ssa (void);
+tree create_new_def_for (tree, gimple *, def_operand_p);
+void mark_virtual_operands_for_renaming (struct function *);
+void mark_virtual_operand_for_renaming (tree);
+void mark_virtual_phi_result_for_renaming (gphi *);
+bool need_ssa_update_p (struct function *);
+bool name_registered_for_update_p (tree);
+void release_ssa_name_after_update_ssa (tree);
+void update_ssa (unsigned);
+
+/* Prototypes for debugging functions. */
+extern void debug_decl_set (bitmap set);
+extern void dump_defs_stack (FILE *, int);
+extern void debug_defs_stack (int);
+extern void dump_currdefs (FILE *);
+extern void debug_currdefs (void);
+extern void dump_tree_ssa (FILE *);
+extern void debug_tree_ssa (void);
+extern void dump_tree_ssa_stats (FILE *);
+extern void debug_tree_ssa_stats (void);
+extern void dump_var_infos (FILE *);
+extern void debug_var_infos (void);
+extern void dump_names_replaced_by (FILE *, tree);
+extern void debug_names_replaced_by (tree);
+extern void dump_update_ssa (FILE *);
+extern void debug_update_ssa (void);
+extern bitmap names_to_release;
+
+#endif /* GCC_TREE_INTO_SSA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-iterator.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-iterator.h
new file mode 100644
index 0000000..800dd2e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-iterator.h
@@ -0,0 +1,150 @@
+/* Iterator routines for manipulating GENERIC tree statement list. -*- C++ -*-
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* This file is dependent upon the implementation of tree's. It provides an
+ abstract interface to the tree objects such that if all tree creation and
+ manipulations are done through this interface, we can easily change the
+ implementation of tree's, and not impact other code. */
+
+#ifndef GCC_TREE_ITERATOR_H
+#define GCC_TREE_ITERATOR_H 1
+
+/* Iterator object for GENERIC or GIMPLE TREE statements. */
+
+struct tree_stmt_iterator {
+ struct tree_statement_list_node *ptr;
+ tree container;
+
+ /* No need for user-defined constructors, the implicit definitions (or
+ aggregate initialization) are fine. */
+
+ bool operator== (tree_stmt_iterator b) const
+ { return b.ptr == ptr && b.container == container; }
+ bool operator!= (tree_stmt_iterator b) const { return !(*this == b); }
+ tree_stmt_iterator &operator++ () { ptr = ptr->next; return *this; }
+ tree_stmt_iterator &operator-- () { ptr = ptr->prev; return *this; }
+ tree_stmt_iterator operator++ (int)
+ { tree_stmt_iterator x = *this; ++*this; return x; }
+ tree_stmt_iterator operator-- (int)
+ { tree_stmt_iterator x = *this; --*this; return x; }
+ tree &operator* () { return ptr->stmt; }
+ tree operator* () const { return ptr->stmt; }
+};
+
+inline tree_stmt_iterator
+tsi_start (tree t)
+{
+ tree_stmt_iterator i;
+
+ i.ptr = STATEMENT_LIST_HEAD (t);
+ i.container = t;
+
+ return i;
+}
+
+inline tree_stmt_iterator
+tsi_last (tree t)
+{
+ tree_stmt_iterator i;
+
+ i.ptr = STATEMENT_LIST_TAIL (t);
+ i.container = t;
+
+ return i;
+}
+
+inline bool
+tsi_end_p (tree_stmt_iterator i)
+{
+ return i.ptr == NULL;
+}
+
+inline bool
+tsi_one_before_end_p (tree_stmt_iterator i)
+{
+ return i.ptr != NULL && i.ptr->next == NULL;
+}
+
+inline void
+tsi_next (tree_stmt_iterator *i)
+{
+ ++(*i);
+}
+
+inline void
+tsi_prev (tree_stmt_iterator *i)
+{
+ --(*i);
+}
+
+inline tree *
+tsi_stmt_ptr (tree_stmt_iterator i)
+{
+ return &(*i);
+}
+
+inline tree
+tsi_stmt (tree_stmt_iterator i)
+{
+ return *i;
+}
+
+/* Make tree_stmt_iterator work as a C++ range, e.g.
+ for (tree stmt : tsi_range (stmt_list)) { ... } */
+class tsi_range
+{
+ tree t;
+ public:
+ tsi_range (tree t): t(t) { }
+ tree_stmt_iterator begin() const { return tsi_start (t); }
+ tree_stmt_iterator end() const { return { nullptr, t }; }
+};
+
+enum tsi_iterator_update
+{
+ TSI_NEW_STMT, /* Only valid when single statement is added, move
+ iterator to it. */
+ TSI_SAME_STMT, /* Leave the iterator at the same statement. */
+ TSI_CHAIN_START, /* Only valid when chain of statements is added, move
+ iterator to the first statement in the chain. */
+ TSI_CHAIN_END, /* Only valid when chain of statements is added, move
+ iterator to the last statement in the chain. */
+ TSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for
+ linking other statements/chains of statements in
+ the same direction. */
+};
+
+extern void tsi_link_before (tree_stmt_iterator *, tree,
+ enum tsi_iterator_update);
+extern void tsi_link_after (tree_stmt_iterator *, tree,
+ enum tsi_iterator_update);
+
+extern void tsi_delink (tree_stmt_iterator *);
+
+extern tree alloc_stmt_list (void);
+extern void free_stmt_list (tree);
+extern void append_to_statement_list (tree, tree *);
+extern void append_to_statement_list_force (tree, tree *);
+extern tree expr_first (tree);
+extern tree expr_last (tree);
+extern tree expr_single (tree);
+
+#endif /* GCC_TREE_ITERATOR_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-logical-location.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-logical-location.h
new file mode 100644
index 0000000..e1b01c6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-logical-location.h
@@ -0,0 +1,67 @@
+/* Subclasses of logical_location with knowledge of "tree".
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_LOGICAL_LOCATION_H
+#define GCC_TREE_LOGICAL_LOCATION_H
+
+#include "logical-location.h"
+
+/* Abstract subclass of logical_location, with knowledge of "tree", but
+ for no specific tree. */
+
+class compiler_logical_location : public logical_location
+{
+ protected:
+ static const char *get_short_name_for_tree (tree);
+ static const char *get_name_with_scope_for_tree (tree);
+ static const char *get_internal_name_for_tree (tree);
+ static enum logical_location_kind get_kind_for_tree (tree);
+};
+
+/* Concrete subclass of logical_location, with reference to a specific
+ tree. */
+
+class tree_logical_location : public compiler_logical_location
+{
+public:
+ tree_logical_location (tree decl) : m_decl (decl) {}
+
+ const char *get_short_name () const final override;
+ const char *get_name_with_scope () const final override;
+ const char *get_internal_name () const final override;
+ enum logical_location_kind get_kind () const final override;
+
+private:
+ tree m_decl;
+};
+
+/* Concrete subclass of logical_location, with reference to
+ current_function_decl. */
+
+class current_fndecl_logical_location : public compiler_logical_location
+{
+public:
+ const char *get_short_name () const final override;
+ const char *get_name_with_scope () const final override;
+ const char *get_internal_name () const final override;
+ enum logical_location_kind get_kind () const final override;
+};
+
+#endif /* GCC_TREE_LOGICAL_LOCATION_H. */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-nested.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-nested.h
new file mode 100644
index 0000000..0914bcb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-nested.h
@@ -0,0 +1,89 @@
+/* Header file for Nested function decomposition for GIMPLE.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_NESTED_H
+#define GCC_TREE_NESTED_H
+
+extern tree build_addr (tree);
+extern void insert_field_into_struct (tree, tree);
+extern void lower_nested_functions (tree);
+
+class nested_function_info
+{
+public:
+ /* Constructor. */
+ nested_function_info ()
+ : origin (NULL),
+ nested (NULL),
+ next_nested (NULL)
+ {
+ }
+ /* Copy constructor. We can not simply copy the structure,
+ because the linked lists would go wrong. However we should never
+ need that. */
+ nested_function_info (const nested_function_info &)
+ {
+ gcc_unreachable ();
+ }
+ ~nested_function_info ();
+
+ /* Return nested_function_info, if available. */
+ static nested_function_info *get (cgraph_node *node);
+
+ /* Return nested_function_info possibly creating new one. */
+ static nested_function_info *get_create (cgraph_node *node);
+
+ /* Release all nested_function_infos. */
+ static void release (void);
+
+ /* For nested functions points to function the node is nested in. */
+ cgraph_node *origin;
+ /* Points to first nested function, if any. */
+ cgraph_node *nested;
+ /* Pointer to the next function with same origin, if any. */
+ cgraph_node *next_nested;
+};
+
+extern void maybe_record_nested_function (cgraph_node *node);
+extern void unnest_function (cgraph_node *node);
+
+/* If there are functions nested in NODE, return first one. */
+inline cgraph_node *
+first_nested_function (cgraph_node *node)
+{
+ nested_function_info *info = nested_function_info::get (node);
+ return info ? info->nested : NULL;
+}
+
+/* Return next nested function (used to iterate from first_nested_function). */
+inline cgraph_node *
+next_nested_function (cgraph_node *node)
+{
+ return nested_function_info::get (node)->next_nested;
+}
+
+/* Return origin of nested function (and NULL otherwise). */
+inline cgraph_node *
+nested_function_origin (cgraph_node *node)
+{
+ nested_function_info *info = nested_function_info::get (node);
+ return info ? info->origin : NULL;
+}
+
+#endif /* GCC_TREE_NESTED_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-object-size.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-object-size.h
new file mode 100644
index 0000000..e899cb5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-object-size.h
@@ -0,0 +1,38 @@
+/* Declarations for tree-object-size.cc.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_OBJECT_SIZE_H
+#define GCC_TREE_OBJECT_SIZE_H
+
+/* Bits in object_size_type. */
+
+enum
+{
+ OST_SUBOBJECT = 1,
+ OST_MINIMUM = 2,
+ OST_DYNAMIC = 4,
+ OST_END = 8,
+};
+
+extern void init_object_sizes (void);
+extern void fini_object_sizes (void);
+extern bool compute_builtin_object_size (tree, int, tree *);
+extern tree decl_init_size (tree, bool);
+
+#endif // GCC_TREE_OBJECT_SIZE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-outof-ssa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-outof-ssa.h
new file mode 100644
index 0000000..d69c5a7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-outof-ssa.h
@@ -0,0 +1,82 @@
+/* Routines for expanding from SSA form to RTL.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_TREE_OUTOF_SSA_H
+#define GCC_TREE_OUTOF_SSA_H
+
+
+/* This structure (of which only a singleton SA exists) is used to
+ pass around information between the outof-SSA functions, cfgexpand
+ and expand itself. */
+struct ssaexpand
+{
+ /* The computed partitions of SSA names are stored here. */
+ var_map map;
+
+ /* For an SSA name version V bit V is set iff TER decided that
+ its definition should be forwarded. */
+ bitmap values;
+
+ /* For a partition number I partition_to_pseudo[I] contains the
+ RTL expression of the allocated space of it (either a MEM or
+ a pseudos REG). */
+ rtx *partition_to_pseudo;
+
+ /* If partition I contains an SSA name that has a default def for a
+ parameter, bit I will be set in this bitmap. */
+ bitmap partitions_for_parm_default_defs;
+
+ /* If partition I contains an SSA name that has an undefined value,
+ bit I will be set in this bitmap. */
+ bitmap partitions_for_undefined_values;
+};
+
+/* This is the singleton described above. */
+extern struct ssaexpand SA;
+
+/* Returns the RTX expression representing the storage of the outof-SSA
+ partition that the SSA name EXP is a member of. */
+inline rtx
+get_rtx_for_ssa_name (tree exp)
+{
+ int p = partition_find (SA.map->var_partition, SSA_NAME_VERSION (exp));
+ if (SA.map->partition_to_view)
+ p = SA.map->partition_to_view[p];
+ gcc_assert (p != NO_PARTITION);
+ return SA.partition_to_pseudo[p];
+}
+
+/* If TER decided to forward the definition of SSA name EXP this function
+ returns the defining statement, otherwise NULL. */
+inline gimple *
+get_gimple_for_ssa_name (tree exp)
+{
+ int v = SSA_NAME_VERSION (exp);
+ if (SA.values && bitmap_bit_p (SA.values, v))
+ return SSA_NAME_DEF_STMT (exp);
+ return NULL;
+}
+
+extern bool ssa_is_replaceable_p (gimple *stmt);
+extern void finish_out_of_ssa (struct ssaexpand *sa);
+extern unsigned int rewrite_out_of_ssa (struct ssaexpand *sa);
+extern void expand_phi_nodes (struct ssaexpand *sa);
+
+#endif /* GCC_TREE_OUTOF_SSA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-parloops.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-parloops.h
new file mode 100644
index 0000000..008cff9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-parloops.h
@@ -0,0 +1,25 @@
+/* Header file for loop autoparallelization.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_PARLOOPS_H
+#define GCC_TREE_PARLOOPS_H
+
+extern bool parallelized_function_p (tree);
+
+#endif /* GCC_TREE_PARLOOPS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pass.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pass.h
new file mode 100644
index 0000000..6cdaed7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pass.h
@@ -0,0 +1,685 @@
+/* Definitions for describing one tree-ssa optimization pass.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_TREE_PASS_H
+#define GCC_TREE_PASS_H 1
+
+#include "timevar.h"
+#include "dumpfile.h"
+
+struct function;
+
+/* Optimization pass type. */
+enum opt_pass_type
+{
+ GIMPLE_PASS,
+ RTL_PASS,
+ SIMPLE_IPA_PASS,
+ IPA_PASS
+};
+
+/* Metadata for a pass, non-varying across all instances of a pass. */
+struct pass_data
+{
+ /* Optimization pass type. */
+ enum opt_pass_type type;
+
+ /* Terse name of the pass used as a fragment of the dump file
+ name. If the name starts with a star, no dump happens. */
+ const char *name;
+
+ /* The -fopt-info optimization group flags as defined in dumpfile.h. */
+ optgroup_flags_t optinfo_flags;
+
+ /* The timevar id associated with this pass. */
+ /* ??? Ideally would be dynamically assigned. */
+ timevar_id_t tv_id;
+
+ /* Sets of properties input and output from this pass. */
+ unsigned int properties_required;
+ unsigned int properties_provided;
+ unsigned int properties_destroyed;
+
+ /* Flags indicating common sets things to do before and after. */
+ unsigned int todo_flags_start;
+ unsigned int todo_flags_finish;
+};
+
+namespace gcc
+{
+ class context;
+} // namespace gcc
+
+/* An instance of a pass. This is also "pass_data" to minimize the
+ changes in existing code. */
+class opt_pass : public pass_data
+{
+public:
+ virtual ~opt_pass () { }
+
+ /* Create a copy of this pass.
+
+ Passes that can have multiple instances must provide their own
+ implementation of this, to ensure that any sharing of state between
+ this instance and the copy is "wired up" correctly.
+
+ The default implementation prints an error message and aborts. */
+ virtual opt_pass *clone ();
+ virtual void set_pass_param (unsigned int, bool);
+
+ /* This pass and all sub-passes are executed only if the function returns
+ true. The default implementation returns true. */
+ virtual bool gate (function *fun);
+
+ /* This is the code to run. If this is not overridden, then there should
+ be sub-passes otherwise this pass does nothing.
+ The return value contains TODOs to execute in addition to those in
+ TODO_flags_finish. */
+ virtual unsigned int execute (function *fun);
+
+protected:
+ opt_pass (const pass_data&, gcc::context *);
+
+public:
+ /* A list of sub-passes to run, dependent on gate predicate. */
+ opt_pass *sub;
+
+ /* Next in the list of passes to run, independent of gate predicate. */
+ opt_pass *next;
+
+ /* Static pass number, used as a fragment of the dump file name. */
+ int static_pass_number;
+
+protected:
+ gcc::context *m_ctxt;
+};
+
+/* Description of GIMPLE pass. */
+class gimple_opt_pass : public opt_pass
+{
+protected:
+ gimple_opt_pass (const pass_data& data, gcc::context *ctxt)
+ : opt_pass (data, ctxt)
+ {
+ }
+};
+
+/* Description of RTL pass. */
+class rtl_opt_pass : public opt_pass
+{
+protected:
+ rtl_opt_pass (const pass_data& data, gcc::context *ctxt)
+ : opt_pass (data, ctxt)
+ {
+ }
+};
+
+struct varpool_node;
+struct cgraph_node;
+struct lto_symtab_encoder_d;
+
+/* Description of IPA pass with generate summary, write, execute, read and
+ transform stages. */
+class ipa_opt_pass_d : public opt_pass
+{
+public:
+ /* IPA passes can analyze function body and variable initializers
+ using this hook and produce summary. */
+ void (*generate_summary) (void);
+
+ /* This hook is used to serialize IPA summaries on disk. */
+ void (*write_summary) (void);
+
+ /* This hook is used to deserialize IPA summaries from disk. */
+ void (*read_summary) (void);
+
+ /* This hook is used to serialize IPA optimization summaries on disk. */
+ void (*write_optimization_summary) (void);
+
+ /* This hook is used to deserialize IPA summaries from disk. */
+ void (*read_optimization_summary) (void);
+
+ /* Hook to convert gimple stmt uids into true gimple statements. The second
+ parameter is an array of statements indexed by their uid. */
+ void (*stmt_fixup) (struct cgraph_node *, gimple **);
+
+ /* Results of interprocedural propagation of an IPA pass is applied to
+ function body via this hook. */
+ unsigned int function_transform_todo_flags_start;
+ unsigned int (*function_transform) (struct cgraph_node *);
+ void (*variable_transform) (varpool_node *);
+
+protected:
+ ipa_opt_pass_d (const pass_data& data, gcc::context *ctxt,
+ void (*generate_summary) (void),
+ void (*write_summary) (void),
+ void (*read_summary) (void),
+ void (*write_optimization_summary) (void),
+ void (*read_optimization_summary) (void),
+ void (*stmt_fixup) (struct cgraph_node *, gimple **),
+ unsigned int function_transform_todo_flags_start,
+ unsigned int (*function_transform) (struct cgraph_node *),
+ void (*variable_transform) (varpool_node *))
+ : opt_pass (data, ctxt),
+ generate_summary (generate_summary),
+ write_summary (write_summary),
+ read_summary (read_summary),
+ write_optimization_summary (write_optimization_summary),
+ read_optimization_summary (read_optimization_summary),
+ stmt_fixup (stmt_fixup),
+ function_transform_todo_flags_start (function_transform_todo_flags_start),
+ function_transform (function_transform),
+ variable_transform (variable_transform)
+ {
+ }
+};
+
+/* Description of simple IPA pass. Simple IPA passes have just one execute
+ hook. */
+class simple_ipa_opt_pass : public opt_pass
+{
+protected:
+ simple_ipa_opt_pass (const pass_data& data, gcc::context *ctxt)
+ : opt_pass (data, ctxt)
+ {
+ }
+};
+
+/* Pass properties. */
+#define PROP_gimple_any (1 << 0) /* entire gimple grammar */
+#define PROP_gimple_lcf (1 << 1) /* lowered control flow */
+#define PROP_gimple_leh (1 << 2) /* lowered eh */
+#define PROP_cfg (1 << 3)
+#define PROP_objsz (1 << 4) /* object sizes computed */
+#define PROP_ssa (1 << 5)
+#define PROP_no_crit_edges (1 << 6)
+#define PROP_rtl (1 << 7)
+#define PROP_gimple_lomp (1 << 8) /* lowered OpenMP directives */
+#define PROP_cfglayout (1 << 9) /* cfglayout mode on RTL */
+#define PROP_gimple_lcx (1 << 10) /* lowered complex */
+#define PROP_loops (1 << 11) /* preserve loop structures */
+#define PROP_gimple_lvec (1 << 12) /* lowered vector */
+#define PROP_gimple_eomp (1 << 13) /* no OpenMP directives */
+#define PROP_gimple_lva (1 << 14) /* No va_arg internal function. */
+#define PROP_gimple_opt_math (1 << 15) /* Disable canonicalization
+ of math functions; the
+ current choices have
+ been optimized. */
+#define PROP_gimple_lomp_dev (1 << 16) /* done omp_device_lower */
+#define PROP_rtl_split_insns (1 << 17) /* RTL has insns split. */
+#define PROP_loop_opts_done (1 << 18) /* SSA loop optimizations
+ have completed. */
+#define PROP_assumptions_done (1 << 19) /* Assume function kept
+ around. */
+
+#define PROP_gimple \
+ (PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh | PROP_gimple_lomp)
+
+/* To-do flags. */
+#define TODO_do_not_ggc_collect (1 << 1)
+#define TODO_cleanup_cfg (1 << 5)
+#define TODO_verify_il (1 << 6)
+#define TODO_dump_symtab (1 << 7)
+#define TODO_remove_functions (1 << 8)
+#define TODO_rebuild_frequencies (1 << 9)
+
+/* To-do flags for calls to update_ssa. */
+
+/* Update the SSA form inserting PHI nodes for newly exposed symbols
+ and virtual names marked for updating. When updating real names,
+ only insert PHI nodes for a real name O_j in blocks reached by all
+ the new and old definitions for O_j. If the iterated dominance
+ frontier for O_j is not pruned, we may end up inserting PHI nodes
+ in blocks that have one or more edges with no incoming definition
+ for O_j. This would lead to uninitialized warnings for O_j's
+ symbol. */
+#define TODO_update_ssa (1 << 11)
+
+/* Update the SSA form without inserting any new PHI nodes at all.
+ This is used by passes that have either inserted all the PHI nodes
+ themselves or passes that need only to patch use-def and def-def
+ chains for virtuals (e.g., DCE). */
+#define TODO_update_ssa_no_phi (1 << 12)
+
+/* Insert PHI nodes everywhere they are needed. No pruning of the
+ IDF is done. This is used by passes that need the PHI nodes for
+ O_j even if it means that some arguments will come from the default
+ definition of O_j's symbol.
+
+ WARNING: If you need to use this flag, chances are that your pass
+ may be doing something wrong. Inserting PHI nodes for an old name
+ where not all edges carry a new replacement may lead to silent
+ codegen errors or spurious uninitialized warnings. */
+#define TODO_update_ssa_full_phi (1 << 13)
+
+/* Passes that update the SSA form on their own may want to delegate
+ the updating of virtual names to the generic updater. Since FUD
+ chains are easier to maintain, this simplifies the work they need
+ to do. NOTE: If this flag is used, any OLD->NEW mappings for real
+ names are explicitly destroyed and only the symbols marked for
+ renaming are processed. */
+#define TODO_update_ssa_only_virtuals (1 << 14)
+
+/* Some passes leave unused local variables that can be removed from
+ cfun->local_decls. This reduces the size of dump files
+ and the memory footprint for VAR_DECLs. */
+#define TODO_remove_unused_locals (1 << 15)
+
+/* Call df_finish at the end of the pass. This is done after all of
+ the dumpers have been allowed to run so that they have access to
+ the instance before it is destroyed. */
+#define TODO_df_finish (1 << 17)
+
+/* Call df_verify at the end of the pass if checking is enabled. */
+#define TODO_df_verify (1 << 18)
+
+/* Internally used for the first instance of a pass. */
+#define TODO_mark_first_instance (1 << 19)
+
+/* Rebuild aliasing info. */
+#define TODO_rebuild_alias (1 << 20)
+
+/* Rebuild the addressable-vars bitmap and do register promotion. */
+#define TODO_update_address_taken (1 << 21)
+
+/* Rebuild the callgraph edges. */
+#define TODO_rebuild_cgraph_edges (1 << 22)
+
+/* Release function body (unless assumption function)
+ and stop pass manager. */
+#define TODO_discard_function (1 << 23)
+
+/* Internally used in execute_function_todo(). */
+#define TODO_update_ssa_any \
+ (TODO_update_ssa \
+ | TODO_update_ssa_no_phi \
+ | TODO_update_ssa_full_phi \
+ | TODO_update_ssa_only_virtuals)
+
+#define TODO_verify_all TODO_verify_il
+
+/* To-do flags for pending_TODOs. */
+
+/* Tell the next scalar cleanup pass that there is
+ work for it to do. */
+#define PENDING_TODO_force_next_scalar_cleanup (1 << 1)
+
+/* Register pass info. */
+
+enum pass_positioning_ops
+{
+ PASS_POS_INSERT_AFTER, /* Insert after the reference pass. */
+ PASS_POS_INSERT_BEFORE, /* Insert before the reference pass. */
+ PASS_POS_REPLACE /* Replace the reference pass. */
+};
+
+struct register_pass_info
+{
+ opt_pass *pass; /* New pass to register. */
+ const char *reference_pass_name; /* Name of the reference pass for hooking
+ up the new pass. */
+ int ref_pass_instance_number; /* Insert the pass at the specified
+ instance number of the reference pass.
+ Do it for every instance if it is 0. */
+ enum pass_positioning_ops pos_op; /* how to insert the new pass. */
+};
+
+/* Registers a new pass. Either fill out the register_pass_info or specify
+ the individual parameters. The pass object is expected to have been
+ allocated using operator new and the pass manager takes the ownership of
+ the pass object. */
+extern void register_pass (register_pass_info *);
+extern void register_pass (opt_pass* pass, pass_positioning_ops pos,
+ const char* ref_pass_name, int ref_pass_inst_number);
+
+extern gimple_opt_pass *make_pass_asan (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_asan_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tsan (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tsan_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sancov (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sancov_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_cf (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_refactor_eh (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_eh (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_eh_dispatch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_resx (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_build_cfg (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_tree_profile (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_cleanup_eh (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sra (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sra_early (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tail_recursion (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tail_calls (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_fix_loops (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tree_loop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tree_no_loop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tree_loop_init (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_loop_versioning (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lim (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_linterchange (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tree_unswitch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_loop_split (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_loop_jam (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_predcom (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_iv_canon (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_scev_cprop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_empty_loop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_graphite (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_graphite_transforms (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_if_conversion (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_if_to_switch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_loop_distribution (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_vectorize (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_simduid_cleanup (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_slp_vectorize (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_complete_unroll (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_complete_unrolli (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_pre_slp_scalar_cleanup (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_parallelize_loops (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_loop_prefetch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_iv_optimize (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tree_loop_done (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_ch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_ch_vect (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_ccp (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_split_paths (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_build_ssa (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_build_alias (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_build_ealias (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_dominator (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_dce (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_cd_dce (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_call_cdce (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_merge_phi (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_thread_jumps (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_thread_jumps_full (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_thread_jumps (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_split_crit_edges (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_laddress (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_pre (gcc::context *ctxt);
+extern unsigned int tail_merge_optimize (bool);
+extern gimple_opt_pass *make_pass_profile (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_strip_predict_hints (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_complex_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_complex (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_switch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_switch_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_vector (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_vector_ssa (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_omp_oacc_kernels_decompose (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_omp (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_diagnose_omp_blocks (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_expand_omp (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_expand_omp_ssa (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_omp_target_link (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_oacc_loop_designation (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_omp_oacc_neuter_broadcast (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_oacc_device_lower (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_omp_device_lower (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_object_sizes (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_object_sizes (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_access (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_printf (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_recursion (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_strlen (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_fold_builtins (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_post_ipa_warn (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_stdarg (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_warn_uninitialized (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_late_warn_uninitialized (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_cse_reciprocals (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_cse_sincos (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_expand_powcabs (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_optimize_bswap (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_store_merging (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_optimize_widening_mul (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_function_return (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_function_noreturn (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_cselim (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_phiopt (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_forwprop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_phiprop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tree_ifcombine (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_dse (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_nrv (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_rename_ssa_copies (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sink_code (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_fre (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_check_data_deps (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_copy_prop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_isolate_erroneous_paths (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_vrp (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_vrp (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_assumptions (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_uncprop (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_return_slot (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_reassoc (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_rebuild_cgraph_edges (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_remove_cgraph_callee_edges (gcc::context
+ *ctxt);
+extern gimple_opt_pass *make_pass_build_cgraph_edges (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_local_pure_const (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_nothrow (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tracer (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_restrict (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_unused_result (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_diagnose_tm_blocks (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_tm (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tm_init (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tm_mark (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tm_memopt (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_tm_edges (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_split_functions (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_feedback_split_functions (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_strength_reduction (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_vtable_verify (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_ubsan (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sanopt (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_oacc_kernels (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_oacc (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_oacc_kernels (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_warn_nonnull_compare (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_sprintf_length (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_walloca (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_modref (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_coroutine_lower_builtins (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_coroutine_early_expand_ifns (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_adjust_alignment (gcc::context *ctxt);
+
+/* IPA Passes */
+extern simple_ipa_opt_pass *make_pass_ipa_lower_emutls (gcc::context *ctxt);
+extern simple_ipa_opt_pass
+ *make_pass_ipa_function_and_variable_visibility (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_tree_profile (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_auto_profile (gcc::context *ctxt);
+
+extern simple_ipa_opt_pass *make_pass_build_ssa_passes (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_local_optimization_passes (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_remove_symbols (gcc::context *ctxt);
+
+extern ipa_opt_pass_d *make_pass_analyzer (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_whole_program_visibility (gcc::context
+ *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_increase_alignment (gcc::context
+ *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_fn_summary (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_inline (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_free_lang_data (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_free_fn_summary (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_sra (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_icf (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_target_clone (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_dispatcher_calls (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_omp_simd_clone (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_profile (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_cdtor_merge (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_single_use (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_comdats (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_ipa_modref (gcc::context *ctxt);
+
+extern gimple_opt_pass *make_pass_cleanup_cfg_post_optimizing (gcc::context
+ *ctxt);
+extern gimple_opt_pass *make_pass_fixup_cfg (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_backprop (gcc::context *ctxt);
+
+extern rtl_opt_pass *make_pass_expand (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_instantiate_virtual_regs (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_fwprop (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_fwprop_addr (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_jump (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_jump2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_lower_subreg (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_cse (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_fast_rtl_dce (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_ud_rtl_dce (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_dce (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_dse1 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_dse2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_dse3 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_cprop (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_pre (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_hoist (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_store_motion (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_cse_after_global_opts (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_ifcvt (gcc::context *ctxt);
+
+extern rtl_opt_pass *make_pass_into_cfg_layout_mode (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_outof_cfg_layout_mode (gcc::context *ctxt);
+
+extern rtl_opt_pass *make_pass_loop2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_loop_init (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_move_loop_invariants (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_unroll_loops (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_doloop (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_loop_done (gcc::context *ctxt);
+
+extern rtl_opt_pass *make_pass_lower_subreg2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_web (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_cse2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_df_initialize_opt (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_df_initialize_no_opt (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_reginfo_init (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_inc_dec (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_stack_ptr_mod (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_initialize_regs (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_combine (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_if_after_combine (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_jump_after_combine (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_ree (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_partition_blocks (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_match_asm_constraints (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_split_all_insns (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_fast_rtl_byte_dce (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_lower_subreg3 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_mode_switching (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_sms (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_sched (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_live_range_shrinkage (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_early_remat (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_ira (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_reload (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_clean_state (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_branch_prob (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_value_profile_transformations (gcc::context
+ *ctxt);
+extern rtl_opt_pass *make_pass_postreload_cse (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_gcse2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_split_after_reload (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_thread_prologue_and_epilogue (gcc::context
+ *ctxt);
+extern rtl_opt_pass *make_pass_zero_call_used_regs (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_stack_adjustments (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_peephole2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_if_after_reload (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_regrename (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_cprop_hardreg (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_reorder_blocks (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_leaf_regs (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_split_before_sched2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_compare_elim_after_reload (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_sched2 (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_stack_regs (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_stack_regs_run (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_df_finish (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_compute_alignments (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_duplicate_computed_gotos (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_variable_tracking (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_free_cfg (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_machine_reorg (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_cleanup_barriers (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_delay_slots (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_split_for_shorten_branches (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_split_before_regstack (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_convert_to_eh_region_ranges (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_shorten_branches (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_set_nothrow_function_flags (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_dwarf2_frame (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_final (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_rtl_seqabstr (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_release_ssa_names (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_early_inline (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_local_fn_summary (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_update_address_taken (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_convert_switch (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_lower_vaarg (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_gimple_isel (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_harden_compares (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_harden_conditional_branches (gcc::context
+ *ctxt);
+
+/* Current optimization pass. */
+extern opt_pass *current_pass;
+
+extern bool execute_one_pass (opt_pass *);
+extern void execute_pass_list (function *, opt_pass *);
+extern void execute_ipa_pass_list (opt_pass *);
+extern void execute_ipa_summary_passes (ipa_opt_pass_d *);
+extern void execute_all_ipa_transforms (bool);
+extern void execute_all_ipa_stmt_fixups (struct cgraph_node *, gimple **);
+extern bool pass_init_dump_file (opt_pass *);
+extern void pass_fini_dump_file (opt_pass *);
+extern void emergency_dump_function (void);
+
+extern void print_current_pass (FILE *);
+extern void debug_pass (void);
+extern void ipa_write_summaries (void);
+extern void ipa_write_optimization_summaries (struct lto_symtab_encoder_d *);
+extern void ipa_read_summaries (void);
+extern void ipa_read_optimization_summaries (void);
+extern void register_one_dump_file (opt_pass *);
+extern bool function_called_by_processed_nodes_p (void);
+
+/* Declare for plugins. */
+extern void do_per_function_toporder (void (*) (function *, void *), void *);
+
+extern void disable_pass (const char *);
+extern void enable_pass (const char *);
+extern void dump_passes (void);
+
+#endif /* GCC_TREE_PASS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-phinodes.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-phinodes.h
new file mode 100644
index 0000000..932a461
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-phinodes.h
@@ -0,0 +1,68 @@
+/* Header file for PHI node routines
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_PHINODES_H
+#define GCC_TREE_PHINODES_H
+
+extern void phinodes_print_statistics (void);
+extern void reserve_phi_args_for_new_edge (basic_block);
+extern void add_phi_node_to_bb (gphi *phi, basic_block bb);
+extern gphi *create_phi_node (tree, basic_block);
+extern void add_phi_arg (gphi *, tree, edge, location_t);
+extern void remove_phi_args (edge);
+extern void remove_phi_node (gimple_stmt_iterator *, bool);
+extern void remove_phi_nodes (basic_block);
+extern tree degenerate_phi_result (gphi *);
+extern void set_phi_nodes (basic_block, gimple_seq);
+
+inline use_operand_p
+gimple_phi_arg_imm_use_ptr (gimple *gs, int i)
+{
+ return &gimple_phi_arg (gs, i)->imm_use;
+}
+
+/* Return the phi argument which contains the specified use. */
+
+inline int
+phi_arg_index_from_use (use_operand_p use)
+{
+ struct phi_arg_d *element, *root;
+ size_t index;
+ gimple *phi;
+
+ /* Since the use is the first thing in a PHI argument element, we can
+ calculate its index based on casting it to an argument, and performing
+ pointer arithmetic. */
+
+ phi = USE_STMT (use);
+
+ element = (struct phi_arg_d *)use;
+ root = gimple_phi_arg (phi, 0);
+ index = element - root;
+
+ /* Make sure the calculation doesn't have any leftover bytes. If it does,
+ then imm_use is likely not the first element in phi_arg_d. */
+ gcc_checking_assert ((((char *)element - (char *)root)
+ % sizeof (struct phi_arg_d)) == 0
+ && index < gimple_phi_capacity (phi));
+
+ return index;
+}
+
+#endif /* GCC_TREE_PHINODES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pretty-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pretty-print.h
new file mode 100644
index 0000000..681384a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-pretty-print.h
@@ -0,0 +1,60 @@
+/* Various declarations for language-independent pretty-print
+ subroutines that are only for use in the compilers proper and not
+ the driver or other programs.
+ Copyright (C) 2002-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_PRETTY_PRINT_H
+#define GCC_TREE_PRETTY_PRINT_H
+
+#include "pretty-print.h"
+
+#define pp_unsupported_tree(PP, T) \
+ pp_verbatim (PP, "%qs not supported by %s", \
+ get_tree_code_name (TREE_CODE (T)), __FUNCTION__)
+
+#define pp_ti_abstract_origin(TI) ((tree *) (TI)->x_data)
+
+
+extern void debug_generic_expr (tree);
+extern void debug_generic_stmt (tree);
+extern void debug_tree_chain (tree);
+extern void print_generic_decl (FILE *, tree, dump_flags_t);
+extern void print_generic_stmt (FILE *, tree, dump_flags_t = TDF_NONE);
+extern void print_generic_stmt_indented (FILE *, tree, dump_flags_t, int);
+extern void print_generic_expr (FILE *, tree, dump_flags_t = TDF_NONE);
+extern char *print_generic_expr_to_str (tree);
+extern void dump_omp_clauses (pretty_printer *, tree, int, dump_flags_t,
+ bool = true);
+extern void dump_omp_atomic_memory_order (pretty_printer *,
+ enum omp_memory_order);
+extern void dump_omp_loop_non_rect_expr (pretty_printer *, tree, int,
+ dump_flags_t);
+extern int dump_generic_node (pretty_printer *, tree, int, dump_flags_t, bool);
+extern void print_declaration (pretty_printer *, tree, int, dump_flags_t);
+extern int op_code_prio (enum tree_code);
+extern int op_prio (const_tree);
+extern const char *op_symbol_code (enum tree_code);
+extern void pretty_print_string (pretty_printer *, const char *, size_t);
+extern void print_call_name (pretty_printer *, tree, dump_flags_t);
+extern void pp_tree_identifier (pretty_printer *, tree);
+extern void dump_function_header (FILE *, tree, dump_flags_t);
+extern void pp_double_int (pretty_printer *pp, double_int d, bool uns);
+extern void dump_location (pretty_printer *buffer, location_t loc);
+
+#endif /* ! GCC_TREE_PRETTY_PRINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-scalar-evolution.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-scalar-evolution.h
new file mode 100644
index 0000000..c58a8a1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-scalar-evolution.h
@@ -0,0 +1,74 @@
+/* Scalar evolution detector.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Sebastian Pop <s.pop@laposte.net>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SCALAR_EVOLUTION_H
+#define GCC_TREE_SCALAR_EVOLUTION_H
+
+extern tree number_of_latch_executions (class loop *);
+extern gcond *get_loop_exit_condition (const class loop *);
+
+extern void scev_initialize (void);
+extern bool scev_initialized_p (void);
+extern void scev_reset (void);
+extern void scev_reset_htab (void);
+extern void scev_finalize (void);
+extern tree analyze_scalar_evolution (class loop *, tree);
+extern tree instantiate_scev (edge, class loop *, tree);
+extern tree resolve_mixers (class loop *, tree, bool *);
+extern void gather_stats_on_scev_database (void);
+extern bool final_value_replacement_loop (class loop *);
+extern unsigned int scev_const_prop (void);
+extern bool expression_expensive_p (tree);
+extern bool simple_iv_with_niters (class loop *, class loop *, tree,
+ struct affine_iv *, tree *, bool);
+extern bool simple_iv (class loop *, class loop *, tree, struct affine_iv *,
+ bool);
+extern bool iv_can_overflow_p (class loop *, tree, tree, tree);
+extern tree compute_overall_effect_of_inner_loop (class loop *, tree);
+
+/* Returns the basic block preceding LOOP, or the CFG entry block when
+ the loop is function's body. */
+
+inline basic_block
+block_before_loop (loop_p loop)
+{
+ edge preheader = loop_preheader_edge (loop);
+ return (preheader ? preheader->src : ENTRY_BLOCK_PTR_FOR_FN (cfun));
+}
+
+/* Analyze all the parameters of the chrec that were left under a
+ symbolic form. LOOP is the loop in which symbolic names have to
+ be analyzed and instantiated. */
+
+inline tree
+instantiate_parameters (class loop *loop, tree chrec)
+{
+ return instantiate_scev (loop_preheader_edge (loop), loop, chrec);
+}
+
+/* Returns the loop of the polynomial chrec CHREC. */
+
+inline class loop *
+get_chrec_loop (const_tree chrec)
+{
+ return get_loop (cfun, CHREC_VARIABLE (chrec));
+}
+
+#endif /* GCC_TREE_SCALAR_EVOLUTION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-sra.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-sra.h
new file mode 100644
index 0000000..f20266c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-sra.h
@@ -0,0 +1,31 @@
+/* Scalar Replacement of Aggregates (SRA) converts some structure
+ references into scalar references, exposing them to the scalar
+ optimizers.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+bool type_internals_preclude_sra_p (tree type, const char **msg);
+
+/* Return true iff TYPE is stdarg va_list type (which early SRA and IPA-SRA
+ should leave alone). */
+
+inline bool
+is_va_list_type (tree type)
+{
+ return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node);
+}
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-address.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-address.h
new file mode 100644
index 0000000..2eadbde
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-address.h
@@ -0,0 +1,45 @@
+/* Header file for memory address lowering and mode selection.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_ADDRESS_H
+#define GCC_TREE_SSA_ADDRESS_H
+
+/* Description of a memory address. */
+
+struct mem_address
+{
+ tree symbol, base, index, step, offset;
+};
+
+extern rtx addr_for_mem_ref (struct mem_address *, addr_space_t, bool);
+extern rtx addr_for_mem_ref (tree exp, addr_space_t as, bool really_expand);
+extern void get_address_description (tree, struct mem_address *);
+extern tree tree_mem_ref_addr (tree, tree);
+extern bool valid_mem_ref_p (machine_mode, addr_space_t, struct mem_address *);
+extern void move_fixed_address_to_symbol (struct mem_address *,
+ class aff_tree *);
+tree create_mem_ref (gimple_stmt_iterator *, tree,
+ class aff_tree *, tree, tree, tree, bool);
+extern void copy_ref_info (tree, tree);
+tree maybe_fold_tmr (tree);
+
+extern unsigned int preferred_mem_scale_factor (tree base,
+ machine_mode mem_mode,
+ bool speed);
+#endif /* GCC_TREE_SSA_ADDRESS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias-compare.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias-compare.h
new file mode 100644
index 0000000..ec87260
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias-compare.h
@@ -0,0 +1,43 @@
+/* Comparsion of AO ref.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_SSA_ALIAS_COMPARE_H
+#define TREE_SSA_ALIAS_COMPARE_H
+
+class operand_compare;
+/* A class aggregating all connections and semantic equivalents
+ for a given pair of semantic function candidates. */
+class ao_compare : public operand_compare
+{
+ public:
+ enum ao_ref_diff
+ {
+ SEMANTICS = 1,
+ BASE_ALIAS_SET = 2,
+ REF_ALIAS_SET = 4,
+ ACCESS_PATH = 8,
+ DEPENDENCE_CLIQUE = 16
+ };
+ int compare_ao_refs (ao_ref *ref1, ao_ref *ref2, bool lto_streaming_safe,
+ bool tbaa);
+ void hash_ao_ref (ao_ref *ref, bool lto_streaming_safe, bool tbaa,
+ inchash::hash &hstate);
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias.h
new file mode 100644
index 0000000..dce2732
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-alias.h
@@ -0,0 +1,211 @@
+/* Tree based alias analysis and alias oracle.
+ Copyright (C) 2008-2023 Free Software Foundation, Inc.
+ Contributed by Richard Guenther <rguenther@suse.de>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_SSA_ALIAS_H
+#define TREE_SSA_ALIAS_H
+
+/* The points-to solution.
+
+ The points-to solution is a union of pt_vars and the abstract
+ sets specified by the flags. */
+struct GTY(()) pt_solution
+{
+ /* Nonzero if points-to analysis couldn't determine where this pointer
+ is pointing to. */
+ unsigned int anything : 1;
+
+ /* Nonzero if the points-to set includes any global memory. Note that
+ even if this is zero pt_vars can still include global variables. */
+ unsigned int nonlocal : 1;
+
+ /* Nonzero if the points-to set includes the local escaped solution by
+ reference. */
+ unsigned int escaped : 1;
+
+ /* Nonzero if the points-to set includes the IPA escaped solution by
+ reference. */
+ unsigned int ipa_escaped : 1;
+
+ /* Nonzero if the points-to set includes 'nothing', the points-to set
+ includes memory at address NULL. */
+ unsigned int null : 1;
+
+ /* Nonzero if the vars bitmap includes a variable included in 'nonlocal'. */
+ unsigned int vars_contains_nonlocal : 1;
+ /* Nonzero if the vars bitmap includes a variable included in 'escaped'. */
+ unsigned int vars_contains_escaped : 1;
+ /* Nonzero if the vars bitmap includes a anonymous heap variable that
+ escaped the function and thus became global. */
+ unsigned int vars_contains_escaped_heap : 1;
+ /* Nonzero if the vars bitmap includes a anonymous variable used to
+ represent storage pointed to by a restrict qualified pointer. */
+ unsigned int vars_contains_restrict : 1;
+ /* Nonzero if the vars bitmap includes an interposable variable. */
+ unsigned int vars_contains_interposable : 1;
+
+ /* Set of variables that this pointer may point to. */
+ bitmap vars;
+};
+
+
+/* Simplified and cached information about a memory reference tree.
+ Used by the alias-oracle internally and externally in alternate
+ interfaces. */
+class ao_ref
+{
+public:
+ /* The original full memory reference tree or NULL_TREE if that is
+ not available. */
+ tree ref;
+
+ /* The following fields are the decomposed reference as returned
+ by get_ref_base_and_extent. */
+ /* The base object of the memory reference or NULL_TREE if all of
+ the following fields are not yet computed. */
+ tree base;
+ /* The offset relative to the base. */
+ poly_int64 offset;
+ /* The size of the access. */
+ poly_int64 size;
+ /* The maximum possible extent of the access or -1 if unconstrained. */
+ poly_int64 max_size;
+
+ /* The alias set of the access or -1 if not yet computed. */
+ alias_set_type ref_alias_set;
+
+ /* The alias set of the base object or -1 if not yet computed. */
+ alias_set_type base_alias_set;
+
+ /* Whether the memory is considered a volatile access. */
+ bool volatile_p;
+
+ bool max_size_known_p () const;
+};
+
+/* Return true if the maximum size is known, rather than the special -1
+ marker. */
+
+inline bool
+ao_ref::max_size_known_p () const
+{
+ return known_size_p (max_size);
+}
+
+/* In tree-ssa-alias.cc */
+extern void ao_ref_init (ao_ref *, tree);
+extern void ao_ref_init_from_ptr_and_size (ao_ref *, tree, tree);
+extern void ao_ref_init_from_ptr_and_range (ao_ref *, tree, bool,
+ poly_int64, poly_int64,
+ poly_int64);
+extern tree ao_ref_base (ao_ref *);
+extern alias_set_type ao_ref_alias_set (ao_ref *);
+extern alias_set_type ao_ref_base_alias_set (ao_ref *);
+extern tree ao_ref_alias_ptr_type (ao_ref *);
+extern tree ao_ref_base_alias_ptr_type (ao_ref *);
+extern bool ao_ref_alignment (ao_ref *, unsigned int *,
+ unsigned HOST_WIDE_INT *);
+extern bool ptr_deref_may_alias_global_p (tree, bool);
+extern bool ptr_derefs_may_alias_p (tree, tree);
+extern bool ptrs_compare_unequal (tree, tree);
+extern bool ref_may_alias_global_p (tree, bool);
+extern bool ref_may_alias_global_p (ao_ref *, bool);
+extern bool refs_may_alias_p (tree, tree, bool = true);
+extern bool refs_may_alias_p_1 (ao_ref *, ao_ref *, bool);
+extern bool refs_anti_dependent_p (tree, tree);
+extern bool refs_output_dependent_p (tree, tree);
+extern bool ref_maybe_used_by_stmt_p (gimple *, tree, bool = true);
+extern bool ref_maybe_used_by_stmt_p (gimple *, ao_ref *, bool = true);
+extern bool stmt_may_clobber_global_p (gimple *, bool);
+extern bool stmt_may_clobber_ref_p (gimple *, tree, bool = true);
+extern bool stmt_may_clobber_ref_p_1 (gimple *, ao_ref *, bool = true);
+extern bool call_may_clobber_ref_p (gcall *, tree, bool = true);
+extern bool call_may_clobber_ref_p_1 (gcall *, ao_ref *, bool = true);
+extern bool stmt_kills_ref_p (gimple *, tree);
+extern bool stmt_kills_ref_p (gimple *, ao_ref *);
+enum translate_flags
+ { TR_TRANSLATE, TR_VALUEIZE_AND_DISAMBIGUATE, TR_DISAMBIGUATE };
+extern tree get_continuation_for_phi (gimple *, ao_ref *, bool,
+ unsigned int &, bitmap *, bool,
+ void *(*)(ao_ref *, tree, void *,
+ translate_flags *),
+ void *, translate_flags
+ = TR_VALUEIZE_AND_DISAMBIGUATE);
+extern void *walk_non_aliased_vuses (ao_ref *, tree, bool,
+ void *(*)(ao_ref *, tree, void *),
+ void *(*)(ao_ref *, tree, void *,
+ translate_flags *),
+ tree (*)(tree), unsigned &, void *);
+extern int walk_aliased_vdefs (ao_ref *, tree,
+ bool (*)(ao_ref *, tree, void *),
+ void *, bitmap *,
+ bool *function_entry_reached = NULL,
+ unsigned int limit = 0);
+extern void dump_alias_info (FILE *);
+extern void debug_alias_info (void);
+extern void dump_points_to_solution (FILE *, struct pt_solution *);
+extern void debug (pt_solution &ref);
+extern void debug (pt_solution *ptr);
+extern void dump_points_to_info_for (FILE *, tree);
+extern void debug_points_to_info_for (tree);
+extern void dump_alias_stats (FILE *);
+
+
+/* In tree-ssa-structalias.cc */
+extern unsigned int compute_may_aliases (void);
+extern bool pt_solution_empty_p (const pt_solution *);
+extern bool pt_solution_singleton_or_null_p (struct pt_solution *, unsigned *);
+extern bool pt_solution_includes_global (struct pt_solution *, bool);
+extern bool pt_solution_includes (struct pt_solution *, const_tree);
+extern bool pt_solutions_intersect (struct pt_solution *, struct pt_solution *);
+extern void pt_solution_reset (struct pt_solution *);
+extern void pt_solution_set (struct pt_solution *, bitmap, bool);
+extern void pt_solution_set_var (struct pt_solution *, tree);
+
+extern void dump_pta_stats (FILE *);
+
+extern GTY(()) struct pt_solution ipa_escaped_pt;
+
+/* Return true, if the two ranges [POS1, SIZE1] and [POS2, SIZE2]
+ overlap. SIZE1 and/or SIZE2 can be (unsigned)-1 in which case the
+ range is open-ended. Otherwise return false. */
+
+inline bool
+ranges_overlap_p (HOST_WIDE_INT pos1,
+ unsigned HOST_WIDE_INT size1,
+ HOST_WIDE_INT pos2,
+ unsigned HOST_WIDE_INT size2)
+{
+ if (size1 == 0 || size2 == 0)
+ return false;
+ if (pos1 >= pos2
+ && (size2 == (unsigned HOST_WIDE_INT)-1
+ || pos1 < (pos2 + (HOST_WIDE_INT) size2)))
+ return true;
+ if (pos2 >= pos1
+ && (size1 == (unsigned HOST_WIDE_INT)-1
+ || pos2 < (pos1 + (HOST_WIDE_INT) size1)))
+ return true;
+
+ return false;
+}
+
+
+
+#endif /* TREE_SSA_ALIAS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ccp.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ccp.h
new file mode 100644
index 0000000..619e770
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ccp.h
@@ -0,0 +1,29 @@
+/* Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_SSA_CCP_H
+#define TREE_SSA_CCP_H
+
+void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
+ signop, int, const widest_int &, const widest_int &,
+ signop, int, const widest_int &, const widest_int &);
+
+void bit_value_unop (enum tree_code, signop, int, widest_int *, widest_int *,
+ signop, int, const widest_int &, const widest_int &);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-coalesce.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-coalesce.h
new file mode 100644
index 0000000..83462c4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-coalesce.h
@@ -0,0 +1,26 @@
+/* Header file for tree-ssa-coalesce.cc exports.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_COALESCE_H
+#define GCC_TREE_SSA_COALESCE_H
+
+extern void coalesce_ssa_name (var_map);
+extern bool gimple_can_coalesce_p (tree, tree);
+
+#endif /* GCC_TREE_SSA_COALESCE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dce.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dce.h
new file mode 100644
index 0000000..f254215
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dce.h
@@ -0,0 +1,22 @@
+/* Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_SSA_DCE_H
+#define TREE_SSA_DCE_H
+extern void simple_dce_from_worklist (bitmap);
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dom.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dom.h
new file mode 100644
index 0000000..16aa01f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dom.h
@@ -0,0 +1,25 @@
+/* Header file for SSA dominator optimizations.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_DOM_H
+#define GCC_TREE_SSA_DOM_H
+
+extern bool simple_iv_increment_p (gimple *);
+
+#endif /* GCC_TREE_SSA_DOM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dse.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dse.h
new file mode 100644
index 0000000..14e5a38
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-dse.h
@@ -0,0 +1,37 @@
+/* Support routines for dead store elimination.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_DSE_H
+#define GCC_TREE_SSA_DSE_H
+
+/* Return value from dse_classify_store */
+enum dse_store_status
+{
+ DSE_STORE_LIVE,
+ DSE_STORE_MAYBE_PARTIAL_DEAD,
+ DSE_STORE_DEAD
+};
+
+dse_store_status dse_classify_store (ao_ref *, gimple *, bool, sbitmap,
+ bool * = NULL, tree = NULL);
+
+void delete_dead_or_redundant_assignment (gimple_stmt_iterator *, const char *,
+ bitmap = NULL, bitmap = NULL);
+
+#endif /* GCC_TREE_SSA_DSE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-live.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-live.h
new file mode 100644
index 0000000..de665d6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-live.h
@@ -0,0 +1,331 @@
+/* Routines for liveness in SSA trees.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef _TREE_SSA_LIVE_H
+#define _TREE_SSA_LIVE_H 1
+
+#include "partition.h"
+
+/* Used to create the variable mapping when we go out of SSA form.
+
+ Mapping from an ssa_name to a partition number is maintained, as well as
+ partition number back to ssa_name.
+
+ This data structure also supports "views", which work on a subset of all
+ partitions. This allows the coalescer to decide what partitions are
+ interesting to it, and only work with those partitions. Whenever the view
+ is changed, the partition numbers change, but none of the partition groupings
+ change. (ie, it is truly a view since it doesn't change anything)
+
+ The final component of the data structure is the basevar map. This provides
+ a list of all the different base variables which occur in a partition view,
+ and a unique index for each one. Routines are provided to quickly produce
+ the base variable of a partition.
+
+ Note that members of a partition MUST all have the same base variable. */
+
+typedef struct _var_map
+{
+ /* The partition manager of all variables. */
+ partition var_partition;
+
+ /* Vector for managing partitions views. */
+ int *partition_to_view;
+ int *view_to_partition;
+
+ /* Current number of partitions in var_map based on the current view. */
+ unsigned int num_partitions;
+
+ /* Original full partition size. */
+ unsigned int partition_size;
+
+ /* Number of base variables in the base var list. */
+ int num_basevars;
+
+ /* Map of partitions numbers to base variable table indexes. */
+ int *partition_to_base_index;
+
+ /* Bitmap of basic block. It describes the region within which the analysis
+ is done. Using pointer avoids allocating memory in out-of-ssa case. */
+ bitmap bmp_bbs;
+
+ /* Vector of basic block in the region. */
+ vec<basic_block> vec_bbs;
+
+ /* True if this map is for out-of-ssa, otherwise for live range
+ computation. When for out-of-ssa, it also means the var map is computed
+ for whole current function. */
+ bool outofssa_p;
+} *var_map;
+
+
+/* Value used to represent no partition number. */
+#define NO_PARTITION -1
+
+extern var_map init_var_map (int, class loop* = NULL);
+extern void delete_var_map (var_map);
+extern int var_union (var_map, tree, tree);
+extern void partition_view_normal (var_map);
+extern void partition_view_bitmap (var_map, bitmap);
+extern void dump_scope_blocks (FILE *, dump_flags_t);
+extern void debug_scope_block (tree, dump_flags_t);
+extern void debug_scope_blocks (dump_flags_t);
+extern void remove_unused_locals (void);
+extern void dump_var_map (FILE *, var_map);
+extern void debug (_var_map &ref);
+extern void debug (_var_map *ptr);
+
+
+/* Return TRUE if region of the MAP contains basic block BB. */
+
+inline bool
+region_contains_p (var_map map, basic_block bb)
+{
+ /* It's possible that the function is called with ENTRY_BLOCK/EXIT_BLOCK. */
+ if (map->outofssa_p)
+ return (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK);
+
+ return bitmap_bit_p (map->bmp_bbs, bb->index);
+}
+
+
+/* Return number of partitions in MAP. */
+
+inline unsigned
+num_var_partitions (var_map map)
+{
+ return map->num_partitions;
+}
+
+
+/* Given partition index I from MAP, return the variable which represents that
+ partition. */
+
+inline tree
+partition_to_var (var_map map, int i)
+{
+ tree name;
+ if (map->view_to_partition)
+ i = map->view_to_partition[i];
+ i = partition_find (map->var_partition, i);
+ name = ssa_name (i);
+ return name;
+}
+
+
+/* Given ssa_name VERSION, if it has a partition in MAP, return the var it
+ is associated with. Otherwise return NULL. */
+
+inline tree
+version_to_var (var_map map, int version)
+{
+ int part;
+ part = partition_find (map->var_partition, version);
+ if (map->partition_to_view)
+ part = map->partition_to_view[part];
+ if (part == NO_PARTITION)
+ return NULL_TREE;
+
+ return partition_to_var (map, part);
+}
+
+
+/* Given VAR, return the partition number in MAP which contains it.
+ NO_PARTITION is returned if it's not in any partition. */
+
+inline int
+var_to_partition (var_map map, tree var)
+{
+ int part;
+
+ part = partition_find (map->var_partition, SSA_NAME_VERSION (var));
+ if (map->partition_to_view)
+ part = map->partition_to_view[part];
+ return part;
+}
+
+
+/* Given VAR, return the variable which represents the entire partition
+ it is a member of in MAP. NULL is returned if it is not in a partition. */
+
+inline tree
+var_to_partition_to_var (var_map map, tree var)
+{
+ int part;
+
+ part = var_to_partition (map, var);
+ if (part == NO_PARTITION)
+ return NULL_TREE;
+ return partition_to_var (map, part);
+}
+
+
+/* Return the index into the basevar table for PARTITION's base in MAP. */
+
+inline int
+basevar_index (var_map map, int partition)
+{
+ gcc_checking_assert (partition >= 0
+ && partition <= (int) num_var_partitions (map));
+ return map->partition_to_base_index[partition];
+}
+
+
+/* Return the number of different base variables in MAP. */
+
+inline int
+num_basevars (var_map map)
+{
+ return map->num_basevars;
+}
+
+
+/* ---------------- live on entry/exit info ------------------------------
+
+ This structure is used to represent live range information on SSA based
+ trees. A partition map must be provided, and based on the active partitions,
+ live-on-entry information and live-on-exit information can be calculated.
+ As well, partitions are marked as to whether they are global (live
+ outside the basic block they are defined in).
+
+ The live-on-entry information is per block. It provide a bitmap for
+ each block which has a bit set for each partition that is live on entry to
+ that block.
+
+ The live-on-exit information is per block. It provides a bitmap for each
+ block indicating which partitions are live on exit from the block.
+
+ For the purposes of this implementation, we treat the elements of a PHI
+ as follows:
+
+ Uses in a PHI are considered LIVE-ON-EXIT to the block from which they
+ originate. They are *NOT* considered live on entry to the block
+ containing the PHI node.
+
+ The Def of a PHI node is *not* considered live on entry to the block.
+ It is considered to be "define early" in the block. Picture it as each
+ block having a stmt (or block-preheader) before the first real stmt in
+ the block which defines all the variables that are defined by PHIs.
+
+ ----------------------------------------------------------------------- */
+
+
+typedef struct tree_live_info_d
+{
+ /* Var map this relates to. */
+ var_map map;
+
+ /* Bitmap indicating which partitions are global. */
+ bitmap global;
+
+ /* Bitmaps of live on entry blocks for partition elements. */
+ bitmap_head *livein;
+
+ /* Bitmaps of what variables are live on exit for a basic blocks. */
+ bitmap_head *liveout;
+
+ /* Number of basic blocks when live on exit calculated. */
+ int num_blocks;
+
+ /* Vector used when creating live ranges as a visited stack. */
+ int *work_stack;
+
+ /* Top of workstack. */
+ int *stack_top;
+
+ /* Obstacks to allocate the bitmaps on. */
+ bitmap_obstack livein_obstack;
+ bitmap_obstack liveout_obstack;
+} *tree_live_info_p;
+
+
+#define LIVEDUMP_ENTRY 0x01
+#define LIVEDUMP_EXIT 0x02
+#define LIVEDUMP_ALL (LIVEDUMP_ENTRY | LIVEDUMP_EXIT)
+extern void delete_tree_live_info (tree_live_info_p);
+extern tree_live_info_p calculate_live_ranges (var_map, bool);
+extern void debug (tree_live_info_d &ref);
+extern void debug (tree_live_info_d *ptr);
+extern void dump_live_info (FILE *, tree_live_info_p, int);
+
+typedef hash_map<int_hash <unsigned int, -1U>, unsigned int> live_vars_map;
+extern vec<bitmap_head> compute_live_vars (struct function *, live_vars_map *);
+extern bitmap live_vars_at_stmt (vec<bitmap_head> &, live_vars_map *,
+ gimple *);
+extern void destroy_live_vars (vec<bitmap_head> &);
+
+/* Return TRUE if P is marked as a global in LIVE. */
+
+inline int
+partition_is_global (tree_live_info_p live, int p)
+{
+ gcc_checking_assert (live->global);
+ return bitmap_bit_p (live->global, p);
+}
+
+
+/* Return the bitmap from LIVE representing the live on entry blocks for
+ partition P. */
+
+inline bitmap
+live_on_entry (tree_live_info_p live, basic_block bb)
+{
+ gcc_checking_assert (live->livein
+ && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
+
+ return &live->livein[bb->index];
+}
+
+
+/* Return the bitmap from LIVE representing the live on exit partitions from
+ block BB. */
+
+inline bitmap
+live_on_exit (tree_live_info_p live, basic_block bb)
+{
+ gcc_checking_assert (live->liveout
+ && bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
+
+ return &live->liveout[bb->index];
+}
+
+
+/* Return the partition map which the information in LIVE utilizes. */
+
+inline var_map
+live_var_map (tree_live_info_p live)
+{
+ return live->map;
+}
+
+
+/* Mark partition P as live on entry to basic block BB in LIVE. */
+
+inline void
+make_live_on_entry (tree_live_info_p live, basic_block bb , int p)
+{
+ bitmap_set_bit (&live->livein[bb->index], p);
+ bitmap_set_bit (live->global, p);
+}
+
+#endif /* _TREE_SSA_LIVE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-ivopts.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-ivopts.h
new file mode 100644
index 0000000..9514861
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-ivopts.h
@@ -0,0 +1,37 @@
+/* Header file for Induction variable optimizations.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_LOOP_IVOPTS_H
+#define GCC_TREE_SSA_LOOP_IVOPTS_H
+
+extern edge single_dom_exit (class loop *);
+extern void dump_iv (FILE *, struct iv *);
+extern void dump_use (FILE *, struct iv_use *);
+extern void dump_uses (FILE *, struct ivopts_data *);
+extern void dump_cand (FILE *, struct iv_cand *);
+extern bool contains_abnormal_ssa_name_p (tree);
+extern class loop *outermost_invariant_loop_for_expr (class loop *, tree);
+extern bool expr_invariant_in_loop_p (class loop *, tree);
+extern tree strip_offset (tree, poly_uint64_pod *);
+bool may_be_nonaddressable_p (tree expr);
+void tree_ssa_iv_optimize (void);
+
+void create_canonical_iv (class loop *, edge, tree,
+ tree * = NULL, tree * = NULL);
+#endif /* GCC_TREE_SSA_LOOP_IVOPTS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-manip.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-manip.h
new file mode 100644
index 0000000..d49273a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-manip.h
@@ -0,0 +1,56 @@
+/* Header file for High-level loop manipulation functions.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_LOOP_MANIP_H
+#define GCC_TREE_SSA_LOOP_MANIP_H
+
+typedef void (*transform_callback)(class loop *, void *);
+
+extern void create_iv (tree, tree, tree, class loop *, gimple_stmt_iterator *,
+ bool, tree *, tree *);
+extern void rewrite_into_loop_closed_ssa (bitmap, unsigned);
+extern void verify_loop_closed_ssa (bool, class loop * = NULL);
+
+inline void
+checking_verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop = NULL)
+{
+ if (flag_checking)
+ verify_loop_closed_ssa (verify_ssa_p, loop);
+}
+
+extern basic_block split_loop_exit_edge (edge, bool = false);
+extern basic_block ip_end_pos (class loop *);
+extern basic_block ip_normal_pos (class loop *);
+extern void standard_iv_increment_position (class loop *,
+ gimple_stmt_iterator *, bool *);
+extern bool
+gimple_duplicate_loop_body_to_header_edge (class loop *, edge, unsigned int,
+ sbitmap, edge, vec<edge> *, int);
+extern bool can_unroll_loop_p (class loop *loop, unsigned factor,
+ class tree_niter_desc *niter);
+extern gcov_type niter_for_unrolled_loop (class loop *, unsigned);
+extern void tree_transform_and_unroll_loop (class loop *, unsigned,
+ tree_niter_desc *,
+ transform_callback, void *);
+extern void tree_unroll_loop (class loop *, unsigned, tree_niter_desc *);
+extern tree canonicalize_loop_ivs (class loop *, tree *, bool);
+extern unsigned int loop_invariant_motion_in_fun (function *, bool);
+
+
+#endif /* GCC_TREE_SSA_LOOP_MANIP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-niter.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-niter.h
new file mode 100644
index 0000000..91c4c5b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop-niter.h
@@ -0,0 +1,64 @@
+/* Header file for loop interation estimates.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_LOOP_NITER_H
+#define GCC_TREE_SSA_LOOP_NITER_H
+
+extern tree expand_simple_operations (tree, tree = NULL);
+extern tree simplify_using_initial_conditions (class loop *, tree);
+extern bool loop_only_exit_p (const class loop *, basic_block *body,
+ const_edge);
+extern bool number_of_iterations_exit (class loop *, edge,
+ class tree_niter_desc *niter, bool,
+ bool every_iteration = true,
+ basic_block * = NULL);
+extern bool number_of_iterations_exit_assumptions (class loop *, edge,
+ class tree_niter_desc *,
+ gcond **, bool = true,
+ basic_block * = NULL);
+extern tree find_loop_niter (class loop *, edge *);
+extern bool finite_loop_p (class loop *);
+extern tree loop_niter_by_eval (class loop *, edge);
+extern tree find_loop_niter_by_eval (class loop *, edge *);
+extern bool estimated_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT estimated_loop_iterations_int (class loop *);
+extern bool max_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT max_loop_iterations_int (class loop *);
+extern bool likely_max_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT likely_max_loop_iterations_int (class loop *);
+extern HOST_WIDE_INT max_stmt_executions_int (class loop *);
+extern HOST_WIDE_INT likely_max_stmt_executions_int (class loop *);
+extern HOST_WIDE_INT estimated_stmt_executions_int (class loop *);
+extern bool max_stmt_executions (class loop *, widest_int *);
+extern bool likely_max_stmt_executions (class loop *, widest_int *);
+extern bool estimated_stmt_executions (class loop *, widest_int *);
+extern void estimate_numbers_of_iterations (function *);
+extern void estimate_numbers_of_iterations (class loop *);
+extern bool stmt_dominates_stmt_p (gimple *, gimple *);
+extern bool nowrap_type_p (tree);
+extern bool scev_probably_wraps_p (tree, tree, tree, gimple *,
+ class loop *, bool);
+extern void free_numbers_of_iterations_estimates (class loop *);
+extern void free_numbers_of_iterations_estimates (function *);
+extern tree simplify_replace_tree (tree, tree,
+ tree, tree (*)(tree, void *) = NULL,
+ void * = NULL, bool do_fold = true);
+extern void substitute_in_loop_info (class loop *, tree, tree);
+
+#endif /* GCC_TREE_SSA_LOOP_NITER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop.h
new file mode 100644
index 0000000..3fd4cd4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-loop.h
@@ -0,0 +1,84 @@
+/* Header file for SSA loop optimizations.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_LOOP_H
+#define GCC_TREE_SSA_LOOP_H
+
+
+/* Affine iv. */
+
+struct affine_iv
+{
+ /* Iv = BASE + STEP * i. */
+ tree base, step;
+
+ /* True if this iv does not overflow. */
+ bool no_overflow;
+};
+
+/* Description of number of iterations of a loop. All the expressions inside
+ the structure can be evaluated at the end of the loop's preheader
+ (and due to ssa form, also anywhere inside the body of the loop). */
+
+class tree_niter_desc
+{
+public:
+ tree assumptions; /* The boolean expression. If this expression evaluates
+ to false, then the other fields in this structure
+ should not be used; there is no guarantee that they
+ will be correct. */
+ tree may_be_zero; /* The boolean expression. If it evaluates to true,
+ the loop will exit in the first iteration (i.e.
+ its latch will not be executed), even if the niter
+ field says otherwise. */
+ tree niter; /* The expression giving the number of iterations of
+ a loop (provided that assumptions == true and
+ may_be_zero == false), more precisely the number
+ of executions of the latch of the loop. */
+ widest_int max; /* The upper bound on the number of iterations of
+ the loop. If niter is constant, then these values
+ must agree. */
+
+ /* The simplified shape of the exit condition. This information is used by
+ loop unrolling. If CMP is ERROR_MARK, then the loop cannot be unrolled.
+ Otherwise, the loop exits if CONTROL CMP BOUND is false, where CMP is one
+ of NE_EXPR, LT_EXPR, or GT_EXPR, and CONTROL.STEP is positive if CMP is
+ LT_EXPR and negative if CMP is GT_EXPR. */
+ affine_iv control;
+ tree bound;
+ enum tree_code cmp;
+};
+
+extern bool for_each_index (tree *, bool (*) (tree, tree *, void *), void *);
+extern char *get_lsm_tmp_name (tree ref, unsigned n, const char *suffix = NULL);
+extern unsigned tree_num_loop_insns (class loop *, struct eni_weights *);
+
+/* Returns the loop of the statement STMT. */
+
+inline class loop *
+loop_containing_stmt (gimple *stmt)
+{
+ basic_block bb = gimple_bb (stmt);
+ if (!bb)
+ return NULL;
+
+ return bb->loop_father;
+}
+
+#endif /* GCC_TREE_SSA_LOOP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-math-opts.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-math-opts.h
new file mode 100644
index 0000000..52b7938
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-math-opts.h
@@ -0,0 +1,26 @@
+/* Global, SSA-based optimizations using mathematical identities.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_MATH_OPTS_H
+#define GCC_TREE_SSA_MATH_OPTS_H
+
+extern tree powi_as_mults (gimple_stmt_iterator *, location_t,
+ tree, HOST_WIDE_INT);
+
+#endif /* GCC_TREE_SSA_MATH_OPTS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-operands.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-operands.h
new file mode 100644
index 0000000..ae36bcd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-operands.h
@@ -0,0 +1,122 @@
+/* SSA operand management for trees.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_OPERANDS_H
+#define GCC_TREE_SSA_OPERANDS_H
+
+/* Interface to SSA operands. */
+
+
+/* This represents a pointer to a DEF operand. */
+typedef tree *def_operand_p;
+
+/* This represents a pointer to a USE operand. */
+typedef ssa_use_operand_t *use_operand_p;
+
+/* NULL operand types. */
+#define NULL_USE_OPERAND_P ((use_operand_p)NULL)
+#define NULL_DEF_OPERAND_P ((def_operand_p)NULL)
+
+/* This represents the USE operands of a stmt. */
+struct use_optype_d
+{
+ struct use_optype_d *next;
+ struct ssa_use_operand_t use_ptr;
+};
+typedef struct use_optype_d *use_optype_p;
+
+/* This structure represents a variable sized buffer which is allocated by the
+ operand memory manager. Operands are suballocated out of this block. The
+ MEM array varies in size. */
+
+struct GTY((chain_next("%h.next"))) ssa_operand_memory_d {
+ struct ssa_operand_memory_d *next;
+ char mem[1];
+};
+
+/* Per-function operand caches. */
+struct GTY(()) ssa_operands {
+ struct ssa_operand_memory_d *operand_memory;
+ unsigned operand_memory_index;
+ /* Current size of the operand memory buffer. */
+ unsigned int ssa_operand_mem_size;
+
+ bool ops_active;
+
+ struct use_optype_d * GTY ((skip (""))) free_uses;
+};
+
+#define USE_FROM_PTR(PTR) get_use_from_ptr (PTR)
+#define DEF_FROM_PTR(PTR) get_def_from_ptr (PTR)
+#define SET_USE(USE, V) set_ssa_use_from_ptr (USE, V)
+#define SET_DEF(DEF, V) ((*(DEF)) = (V))
+
+#define USE_STMT(USE) (USE)->loc.stmt
+
+#define USE_OP_PTR(OP) (&((OP)->use_ptr))
+#define USE_OP(OP) (USE_FROM_PTR (USE_OP_PTR (OP)))
+
+#define PHI_RESULT_PTR(PHI) gimple_phi_result_ptr (PHI)
+#define PHI_RESULT(PHI) DEF_FROM_PTR (PHI_RESULT_PTR (PHI))
+#define SET_PHI_RESULT(PHI, V) SET_DEF (PHI_RESULT_PTR (PHI), (V))
+/*
+#define PHI_ARG_DEF(PHI, I) USE_FROM_PTR (PHI_ARG_DEF_PTR ((PHI), (I)))
+*/
+#define PHI_ARG_DEF_PTR(PHI, I) gimple_phi_arg_imm_use_ptr ((PHI), (I))
+#define PHI_ARG_DEF(PHI, I) gimple_phi_arg_def ((PHI), (I))
+#define SET_PHI_ARG_DEF(PHI, I, V) \
+ SET_USE (PHI_ARG_DEF_PTR ((PHI), (I)), (V))
+#define PHI_ARG_DEF_FROM_EDGE(PHI, E) \
+ PHI_ARG_DEF ((PHI), (E)->dest_idx)
+#define PHI_ARG_DEF_PTR_FROM_EDGE(PHI, E) \
+ PHI_ARG_DEF_PTR ((PHI), (E)->dest_idx)
+#define PHI_ARG_INDEX_FROM_USE(USE) phi_arg_index_from_use (USE)
+
+
+extern bool ssa_operands_active (struct function *);
+extern void init_ssa_operands (struct function *fn);
+extern void fini_ssa_operands (struct function *);
+extern bool verify_ssa_operands (struct function *, gimple *stmt);
+extern void free_stmt_operands (struct function *, gimple *);
+extern void update_stmt_operands (struct function *, gimple *);
+extern void swap_ssa_operands (gimple *, tree *, tree *);
+extern bool verify_imm_links (FILE *f, tree var);
+
+extern void dump_immediate_uses_for (FILE *file, tree var);
+extern void dump_immediate_uses (FILE *file);
+extern void debug_immediate_uses (void);
+extern void debug_immediate_uses_for (tree var);
+
+extern void unlink_stmt_vdef (gimple *);
+
+/* Return the tree pointed-to by USE. */
+inline tree
+get_use_from_ptr (use_operand_p use)
+{
+ return *(use->use);
+}
+
+/* Return the tree pointed-to by DEF. */
+inline tree
+get_def_from_ptr (def_operand_p def)
+{
+ return *def;
+}
+
+#endif /* GCC_TREE_SSA_OPERANDS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-propagate.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-propagate.h
new file mode 100644
index 0000000..be4cb45
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-propagate.h
@@ -0,0 +1,123 @@
+/* Data structures and function declarations for the SSA value propagation
+ engine.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _TREE_SSA_PROPAGATE_H
+#define _TREE_SSA_PROPAGATE_H 1
+
+#include "value-query.h"
+
+/* If SIM_P is true, statement S will be simulated again. */
+
+inline void
+prop_set_simulate_again (gimple *s, bool visit_p)
+{
+ gimple_set_visited (s, visit_p);
+}
+
+/* Return true if statement T should be simulated again. */
+
+inline bool
+prop_simulate_again_p (gimple *s)
+{
+ return gimple_visited_p (s);
+}
+
+/* Lattice values used for propagation purposes. Specific instances
+ of a propagation engine must return these values from the statement
+ and PHI visit functions to direct the engine. */
+enum ssa_prop_result {
+ /* The statement produces nothing of interest. No edges will be
+ added to the work lists. */
+ SSA_PROP_NOT_INTERESTING,
+
+ /* The statement produces an interesting value. The set SSA_NAMEs
+ returned by SSA_PROP_VISIT_STMT should be added to
+ INTERESTING_SSA_EDGES. If the statement being visited is a
+ conditional jump, SSA_PROP_VISIT_STMT should indicate which edge
+ out of the basic block should be marked executable. */
+ SSA_PROP_INTERESTING,
+
+ /* The statement produces a varying (i.e., useless) value and
+ should not be simulated again. If the statement being visited
+ is a conditional jump, all the edges coming out of the block
+ will be considered executable. */
+ SSA_PROP_VARYING
+};
+
+
+extern void move_ssa_defining_stmt_for_defs (gimple *, gimple *);
+extern bool stmt_makes_single_store (gimple *);
+extern bool may_propagate_copy (tree, tree, bool = false);
+extern bool may_propagate_copy_into_stmt (gimple *, tree);
+extern bool may_propagate_copy_into_asm (tree);
+extern void propagate_value (use_operand_p, tree);
+extern void replace_exp (use_operand_p, tree);
+extern void propagate_tree_value (tree *, tree);
+extern void propagate_tree_value_into_stmt (gimple_stmt_iterator *, tree);
+
+/* Public interface into the SSA propagation engine. Clients should inherit
+ from this class and provide their own visitors. */
+
+class ssa_propagation_engine
+{
+ public:
+
+ virtual ~ssa_propagation_engine (void) { }
+
+ /* Virtual functions the clients must provide to visit statements
+ and phi nodes respectively. */
+ virtual enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) = 0;
+ virtual enum ssa_prop_result visit_phi (gphi *) = 0;
+
+ /* Main interface into the propagation engine. */
+ void ssa_propagate (void);
+
+ private:
+ /* Internal implementation details. */
+ void simulate_stmt (gimple *stmt);
+ void simulate_block (basic_block);
+};
+
+class substitute_and_fold_engine : public value_query
+{
+ public:
+ substitute_and_fold_engine (bool fold_all_stmts = false)
+ : fold_all_stmts (fold_all_stmts) { }
+ virtual ~substitute_and_fold_engine (void) { }
+ virtual bool fold_stmt (gimple_stmt_iterator *) { return false; }
+
+ bool substitute_and_fold (basic_block = NULL);
+ bool replace_uses_in (gimple *);
+ bool replace_phi_args_in (gphi *);
+
+ virtual void pre_fold_bb (basic_block) { }
+ virtual void post_fold_bb (basic_block) { }
+ virtual void pre_fold_stmt (gimple *) { }
+ virtual void post_new_stmt (gimple *) { }
+
+ bool propagate_into_phi_args (basic_block);
+
+ /* Users like VRP can set this when they want to perform
+ folding for every propagation. */
+ bool fold_all_stmts;
+};
+
+#endif /* _TREE_SSA_PROPAGATE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-reassoc.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-reassoc.h
new file mode 100644
index 0000000..cbd7170
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-reassoc.h
@@ -0,0 +1,48 @@
+/* Reassociation for trees.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SSA_REASSOC_H
+#define GCC_SSA_REASSOC_H
+
+/* Operator, rank pair. */
+struct operand_entry
+{
+ unsigned int rank;
+ unsigned int id;
+ tree op;
+ unsigned int count;
+ gimple *stmt_to_insert;
+};
+
+struct range_entry
+{
+ tree exp;
+ tree low;
+ tree high;
+ bool in_p;
+ bool strict_overflow_p;
+ unsigned int idx, next;
+};
+
+void dump_range_entry (FILE *file, struct range_entry *r);
+void debug_range_entry (struct range_entry *r);
+void init_range_entry (struct range_entry *r, tree exp, gimple *stmt);
+bool no_side_effect_bb (basic_block bb);
+
+#endif /* GCC_SSA_REASSOC_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-sccvn.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-sccvn.h
new file mode 100644
index 0000000..675240e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-sccvn.h
@@ -0,0 +1,316 @@
+/* Tree SCC value numbering
+ Copyright (C) 2007-2023 Free Software Foundation, Inc.
+ Contributed by Daniel Berlin <dberlin@dberlin.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_SSA_SCCVN_H
+#define TREE_SSA_SCCVN_H
+
+/* In tree-ssa-sccvn.cc */
+bool expressions_equal_p (tree, tree, bool = true);
+
+
+/* TOP of the VN lattice. */
+extern tree VN_TOP;
+
+/* A predicated value. */
+struct vn_pval
+{
+ vn_pval *next;
+ /* The value of the expression this is attached to is RESULT in
+ case the expression is computed dominated by one of the blocks
+ in valid_dominated_by_p. */
+ tree result;
+ unsigned n;
+ int valid_dominated_by_p[1];
+};
+
+/* N-ary operations in the hashtable consist of length operands, an
+ opcode, and a type. Result is the value number of the operation,
+ and hashcode is stored to avoid having to calculate it
+ repeatedly. */
+
+typedef struct vn_nary_op_s
+{
+ vn_nary_op_s *next;
+ vn_nary_op_s *unwind_to;
+ /* Unique identify that all expressions with the same value have. */
+ unsigned int value_id;
+ ENUM_BITFIELD(tree_code) opcode : 16;
+ unsigned length : 16;
+ hashval_t hashcode;
+ unsigned predicated_values : 1;
+ union {
+ /* If ! predicated_values this is the value of the expression. */
+ tree result;
+ /* If predicated_values this is a list of values of the expression. */
+ vn_pval *values;
+ } u;
+ tree type;
+ tree op[1];
+} *vn_nary_op_t;
+typedef const struct vn_nary_op_s *const_vn_nary_op_t;
+
+/* Return the size of a vn_nary_op_t with LENGTH operands. */
+
+inline size_t
+sizeof_vn_nary_op (unsigned int length)
+{
+ return sizeof (struct vn_nary_op_s) + sizeof (tree) * length - sizeof (tree);
+}
+
+/* Phi nodes in the hashtable consist of their non-VN_TOP phi
+ arguments, and the basic block the phi is in. Result is the value
+ number of the operation, and hashcode is stored to avoid having to
+ calculate it repeatedly. Phi nodes not in the same block are never
+ considered equivalent. */
+
+typedef struct vn_phi_s
+{
+ vn_phi_s *next;
+ /* Unique identifier that all expressions with the same value have. */
+ unsigned int value_id;
+ hashval_t hashcode;
+ basic_block block;
+ /* Controlling condition lhs/rhs. */
+ tree cclhs;
+ tree ccrhs;
+ tree type;
+ tree result;
+ /* The number of args is determined by EDGE_COUT (block->preds). */
+ tree phiargs[1];
+} *vn_phi_t;
+typedef const struct vn_phi_s *const_vn_phi_t;
+
+/* Reference operands only exist in reference operations structures.
+ They consist of an opcode, type, and some number of operands. For
+ a given opcode, some, all, or none of the operands may be used.
+ The operands are there to store the information that makes up the
+ portion of the addressing calculation that opcode performs. */
+
+typedef struct vn_reference_op_struct
+{
+ ENUM_BITFIELD(tree_code) opcode : 16;
+ /* Dependence info, used for [TARGET_]MEM_REF only. For internal
+ function calls clique is also used for the internal function code. */
+ unsigned short clique;
+ unsigned short base;
+ unsigned reverse : 1;
+ /* For storing TYPE_ALIGN for array ref element size computation. */
+ unsigned align : 6;
+ /* Constant offset this op adds or -1 if it is variable. */
+ poly_int64_pod off;
+ tree type;
+ tree op0;
+ tree op1;
+ tree op2;
+} vn_reference_op_s;
+typedef vn_reference_op_s *vn_reference_op_t;
+typedef const vn_reference_op_s *const_vn_reference_op_t;
+
+inline unsigned
+vn_ref_op_align_unit (vn_reference_op_t op)
+{
+ return op->align ? ((unsigned)1 << (op->align - 1)) / BITS_PER_UNIT : 0;
+}
+
+/* A reference operation in the hashtable is representation as
+ the vuse, representing the memory state at the time of
+ the operation, and a collection of operands that make up the
+ addressing calculation. If two vn_reference_t's have the same set
+ of operands, they access the same memory location. We also store
+ the resulting value number, and the hashcode. */
+
+typedef struct vn_reference_s
+{
+ vn_reference_s *next;
+ /* Unique identifier that all expressions with the same value have. */
+ unsigned int value_id;
+ hashval_t hashcode;
+ tree vuse;
+ alias_set_type set;
+ alias_set_type base_set;
+ tree type;
+ unsigned punned : 1;
+ vec<vn_reference_op_s> operands;
+ tree result;
+ tree result_vdef;
+} *vn_reference_t;
+typedef const struct vn_reference_s *const_vn_reference_t;
+
+typedef struct vn_constant_s
+{
+ unsigned int value_id;
+ hashval_t hashcode;
+ tree constant;
+} *vn_constant_t;
+
+enum vn_kind { VN_NONE, VN_CONSTANT, VN_NARY, VN_REFERENCE, VN_PHI };
+enum vn_kind vn_get_stmt_kind (gimple *);
+
+/* Hash the type TYPE using bits that distinguishes it in the
+ types_compatible_p sense. */
+
+inline hashval_t
+vn_hash_type (tree type)
+{
+ return (INTEGRAL_TYPE_P (type)
+ + (INTEGRAL_TYPE_P (type)
+ ? TYPE_PRECISION (type) + TYPE_UNSIGNED (type) : 0));
+}
+
+/* Hash the constant CONSTANT with distinguishing type incompatible
+ constants in the types_compatible_p sense. */
+
+inline hashval_t
+vn_hash_constant_with_type (tree constant)
+{
+ inchash::hash hstate;
+ inchash::add_expr (constant, hstate);
+ hstate.merge_hash (vn_hash_type (TREE_TYPE (constant)));
+ return hstate.end ();
+}
+
+/* Compare the constants C1 and C2 with distinguishing type incompatible
+ constants in the types_compatible_p sense. */
+
+inline bool
+vn_constant_eq_with_type (tree c1, tree c2)
+{
+ return (expressions_equal_p (c1, c2)
+ && types_compatible_p (TREE_TYPE (c1), TREE_TYPE (c2)));
+}
+
+/* Instead of having a local availability lattice for each basic-block
+ and availability at X defined as union of the local availabilities
+ at X and its dominators we're turning this upside down and track
+ availability per value given values are usually made available at very
+ few points.
+ So we have a chain of LOCATION, LEADER entries where LOCATION is
+ specifying the basic-block LEADER is made available for VALUE.
+ We prepend to this chain in RPO order thus for iteration we can simply
+ remove the last entries.
+ LOCATION is the basic-block index and LEADER is its SSA name version. */
+struct vn_avail
+{
+ vn_avail *next;
+ /* The basic-block LEADER is made available. */
+ int location;
+ /* The LEADER for the value we are chained on. */
+ int leader;
+ /* The previous value we pushed a avail record to. */
+ struct vn_ssa_aux *next_undo;
+};
+
+typedef struct vn_ssa_aux
+{
+ /* SSA name this vn_ssa_aux is associated with in the lattice. */
+ tree name;
+ /* Value number. This may be an SSA name or a constant. */
+ tree valnum;
+ /* Statements to insert if needs_insertion is true. */
+ gimple_seq expr;
+
+ /* AVAIL entries, last in RPO order is first. This is only tracked
+ for SSA names also serving as values (NAME == VALNUM). */
+ vn_avail *avail;
+
+ /* Unique identifier that all expressions with the same value have. */
+ unsigned int value_id;
+
+ /* Whether the SSA_NAME has been processed at least once. */
+ unsigned visited : 1;
+
+ /* Whether the SSA_NAME has no defining statement and thus an
+ insertion of such with EXPR as definition is required before
+ a use can be created of it. */
+ unsigned needs_insertion : 1;
+} *vn_ssa_aux_t;
+
+enum vn_lookup_kind { VN_NOWALK, VN_WALK, VN_WALKREWRITE };
+
+/* Return the value numbering info for an SSA_NAME. */
+bool has_VN_INFO (tree);
+extern vn_ssa_aux_t VN_INFO (tree);
+tree vn_get_expr_for (tree);
+void scc_vn_restore_ssa_info (void);
+vn_nary_op_t alloc_vn_nary_op_noinit (unsigned int, struct obstack *);
+unsigned int vn_nary_length_from_stmt (gimple *);
+void init_vn_nary_op_from_stmt (vn_nary_op_t, gassign *);
+hashval_t vn_nary_op_compute_hash (const vn_nary_op_t);
+tree vn_nary_op_lookup_stmt (gimple *, vn_nary_op_t *);
+tree vn_nary_op_lookup_pieces (unsigned int, enum tree_code,
+ tree, tree *, vn_nary_op_t *);
+vn_nary_op_t vn_nary_op_insert_pieces (unsigned int, enum tree_code,
+ tree, tree *, tree, unsigned int);
+bool ao_ref_init_from_vn_reference (ao_ref *, alias_set_type, alias_set_type,
+ tree, const vec<vn_reference_op_s> &);
+vec<vn_reference_op_s> vn_reference_operands_for_lookup (tree);
+tree vn_reference_lookup_pieces (tree, alias_set_type, alias_set_type, tree,
+ vec<vn_reference_op_s> ,
+ vn_reference_t *, vn_lookup_kind);
+tree vn_reference_lookup (tree, tree, vn_lookup_kind, vn_reference_t *, bool,
+ tree * = NULL, tree = NULL_TREE, bool = false);
+void vn_reference_lookup_call (gcall *, vn_reference_t *, vn_reference_t);
+vn_reference_t vn_reference_insert_pieces (tree, alias_set_type, alias_set_type,
+ tree, vec<vn_reference_op_s>,
+ tree, unsigned int);
+void print_vn_reference_ops (FILE *, const vec<vn_reference_op_s>);
+
+bool vn_nary_op_eq (const_vn_nary_op_t const vno1,
+ const_vn_nary_op_t const vno2);
+bool vn_nary_may_trap (vn_nary_op_t);
+bool vn_reference_may_trap (vn_reference_t);
+bool vn_reference_eq (const_vn_reference_t const, const_vn_reference_t const);
+
+unsigned int get_max_value_id (void);
+unsigned int get_max_constant_value_id (void);
+unsigned int get_next_value_id (void);
+unsigned int get_next_constant_value_id (void);
+unsigned int get_constant_value_id (tree);
+unsigned int get_or_alloc_constant_value_id (tree);
+
+/* Return true if V is a value id for a constant. */
+inline bool
+value_id_constant_p (unsigned int v)
+{
+ return (int)v < 0;
+}
+
+tree fully_constant_vn_reference_p (vn_reference_t);
+tree vn_nary_simplify (vn_nary_op_t);
+
+unsigned do_rpo_vn (function *, edge, bitmap,
+ /* iterate */ bool = false,
+ /* eliminate */ bool = true,
+ vn_lookup_kind = VN_WALKREWRITE);
+
+/* Private interface for PRE. */
+void run_rpo_vn (vn_lookup_kind);
+unsigned eliminate_with_rpo_vn (bitmap);
+void free_rpo_vn (void);
+
+/* Valueize NAME if it is an SSA name, otherwise just return it. This hook
+ is initialized by run_scc_vn. */
+extern tree (*vn_valueize) (tree);
+
+/* Context that valueization should operate on. */
+extern basic_block vn_context_bb;
+
+
+#endif /* TREE_SSA_SCCVN_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-scopedtables.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-scopedtables.h
new file mode 100644
index 0000000..a810b05
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-scopedtables.h
@@ -0,0 +1,212 @@
+/* Header file for SSA dominator optimizations.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_SCOPED_TABLES_H
+#define GCC_TREE_SSA_SCOPED_TABLES_H
+
+/* Representation of a "naked" right-hand-side expression, to be used
+ in recording available expressions in the expression hash table. */
+
+enum expr_kind
+{
+ EXPR_SINGLE,
+ EXPR_UNARY,
+ EXPR_BINARY,
+ EXPR_TERNARY,
+ EXPR_CALL,
+ EXPR_PHI
+};
+
+struct hashable_expr
+{
+ tree type;
+ enum expr_kind kind;
+ union {
+ struct { tree rhs; } single;
+ struct { enum tree_code op; tree opnd; } unary;
+ struct { enum tree_code op; tree opnd0, opnd1; } binary;
+ struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
+ struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
+ struct { size_t nargs; tree *args; } phi;
+ } ops;
+};
+
+/* Structure for recording known value of a conditional expression.
+
+ Clients build vectors of these objects to record known values
+ that occur on edges. */
+
+struct cond_equivalence
+{
+ /* The condition, in a HASHABLE_EXPR form. */
+ struct hashable_expr cond;
+
+ /* The result of the condition (true or false. */
+ tree value;
+};
+
+/* Structure for entries in the expression hash table. */
+
+typedef class expr_hash_elt * expr_hash_elt_t;
+
+class expr_hash_elt
+{
+ public:
+ expr_hash_elt (gimple *, tree);
+ expr_hash_elt (tree);
+ expr_hash_elt (struct hashable_expr *, tree);
+ expr_hash_elt (class expr_hash_elt &);
+ ~expr_hash_elt ();
+ void print (FILE *);
+ tree vop (void) { return m_vop; }
+ tree lhs (void) { return m_lhs; }
+ struct hashable_expr *expr (void) { return &m_expr; }
+ expr_hash_elt *stamp (void) { return m_stamp; }
+ hashval_t hash (void) { return m_hash; }
+
+ private:
+ /* The expression (rhs) we want to record. */
+ struct hashable_expr m_expr;
+
+ /* The value (lhs) of this expression. */
+ tree m_lhs;
+
+ /* The virtual operand associated with the nearest dominating stmt
+ loading from or storing to expr. */
+ tree m_vop;
+
+ /* The hash value for RHS. */
+ hashval_t m_hash;
+
+ /* A unique stamp, typically the address of the hash
+ element itself, used in removing entries from the table. */
+ class expr_hash_elt *m_stamp;
+
+ /* We should never be making assignments between objects in this class.
+ Though it might allow us to exploit C++11 move semantics if we
+ defined the move constructor and move assignment operator. */
+ expr_hash_elt& operator= (const expr_hash_elt&);
+};
+
+/* Hashtable helpers. */
+
+struct expr_elt_hasher : pointer_hash <expr_hash_elt>
+{
+ static inline hashval_t hash (const value_type &p)
+ { return p->hash (); }
+ static bool equal (const value_type &, const compare_type &);
+ static inline void remove (value_type &element)
+ { delete element; }
+};
+
+
+/* This class defines a unwindable expression equivalence table
+ layered on top of the expression hash table.
+
+ Essentially it's just a stack of available expression value pairs with
+ a special marker (NULL, NULL) to indicate unwind points. */
+
+class avail_exprs_stack
+{
+ public:
+ /* We need access to the AVAIL_EXPR hash table so that we can
+ remove entries from the hash table when unwinding the stack. */
+ avail_exprs_stack (hash_table<expr_elt_hasher> *table)
+ { m_stack.create (20); m_avail_exprs = table; }
+ ~avail_exprs_stack (void) { m_stack.release (); }
+
+ /* Push the unwinding marker onto the stack. */
+ void push_marker (void) { record_expr (NULL, NULL, 'M'); }
+
+ /* Restore the AVAIL_EXPRs table to its state when the last marker
+ was pushed. */
+ void pop_to_marker (void);
+
+ /* Record a single available expression that can be unwound. */
+ void record_expr (expr_hash_elt_t, expr_hash_elt_t, char);
+
+ /* Get the underlying hash table. Would this be better as
+ class inheritance? */
+ hash_table<expr_elt_hasher> *avail_exprs (void)
+ { return m_avail_exprs; }
+
+ /* Lookup and conditionally insert an expression into the table,
+ recording enough information to unwind as needed. */
+ tree lookup_avail_expr (gimple *, bool, bool, expr_hash_elt ** = NULL);
+
+ void record_cond (cond_equivalence *);
+
+ private:
+ vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > m_stack;
+ hash_table<expr_elt_hasher> *m_avail_exprs;
+
+ /* For some assignments where the RHS is a binary operator, if we know
+ a equality relationship between the operands, we may be able to compute
+ a result, even if we don't know the exact value of the operands. */
+ tree simplify_binary_operation (gimple *, class expr_hash_elt);
+
+ /* We do not allow copying this object or initializing one
+ from another. */
+ avail_exprs_stack& operator= (const avail_exprs_stack&);
+ avail_exprs_stack (class avail_exprs_stack &);
+};
+
+/* This class defines an unwindable const/copy equivalence table
+ layered on top of SSA_NAME_VALUE/set_ssa_name_value.
+
+ Essentially it's just a stack of name,prev value pairs with a
+ special marker (NULL) to indicate unwind points. */
+
+class const_and_copies
+{
+ public:
+ const_and_copies (void) { m_stack.create (20); };
+ ~const_and_copies (void) { m_stack.release (); }
+
+ /* Push the unwinding marker onto the stack. */
+ void push_marker (void) { m_stack.safe_push (NULL_TREE); }
+
+ /* Restore the const/copies table to its state when the last marker
+ was pushed. */
+ void pop_to_marker (void);
+
+ /* Record a single const/copy pair that can be unwound. This version
+ may follow the value chain for the RHS. */
+ void record_const_or_copy (tree, tree);
+
+ /* Special entry point when we want to provide an explicit previous
+ value for the first argument. Try to get rid of this in the future.
+
+ This version may also follow the value chain for the RHS. */
+ void record_const_or_copy (tree, tree, tree);
+
+ private:
+ /* Record a single const/copy pair that can be unwound. This version
+ does not follow the value chain for the RHS. */
+ void record_const_or_copy_raw (tree, tree, tree);
+
+ vec<tree> m_stack;
+ const_and_copies& operator= (const const_and_copies&);
+ const_and_copies (class const_and_copies &);
+};
+
+void initialize_expr_from_cond (tree cond, struct hashable_expr *expr);
+void record_conditions (vec<cond_equivalence> *p, tree, tree);
+
+#endif /* GCC_TREE_SSA_SCOPED_TABLES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-strlen.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-strlen.h
new file mode 100644
index 0000000..f06ef85
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-strlen.h
@@ -0,0 +1,43 @@
+/* Declarations of tree-ssa-strlen API.
+
+ Copyright (C) 2018-2023 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_STRLEN_H
+#define GCC_TREE_SSA_STRLEN_H
+
+class pointer_query;
+
+extern bool is_strlen_related_p (tree, tree);
+extern bool maybe_diag_stxncpy_trunc (gimple_stmt_iterator, tree, tree,
+ pointer_query * = NULL);
+extern tree set_strlen_range (tree, wide_int, wide_int, tree = NULL_TREE);
+
+extern tree get_range (tree, gimple *, wide_int[2],
+ class range_query * = NULL);
+
+struct c_strlen_data;
+extern void get_range_strlen_dynamic (tree, gimple *, c_strlen_data *,
+ pointer_query &);
+
+extern gimple *use_in_zero_equality (tree, bool = true);
+
+/* APIs internal to strlen pass. Defined in gimple-ssa-sprintf.cc. */
+extern bool handle_printf_call (gimple_stmt_iterator *, pointer_query &);
+
+#endif // GCC_TREE_SSA_STRLEN_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ter.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ter.h
new file mode 100644
index 0000000..9bbaf0f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-ter.h
@@ -0,0 +1,26 @@
+/* Header file for tree-ssa-ter.cc exports.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_TER_H
+#define GCC_TREE_SSA_TER_H
+
+extern bitmap find_replaceable_exprs (var_map);
+extern void dump_replaceable_exprs (FILE *, bitmap);
+
+#endif /* GCC_TREE_SSA_TER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadedge.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadedge.h
new file mode 100644
index 0000000..3d67a12
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadedge.h
@@ -0,0 +1,134 @@
+/* Header file for SSA jump threading.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_THREADEDGE_H
+#define GCC_TREE_SSA_THREADEDGE_H
+
+// Class used to maintain path state in the jump threader and pass it
+// to the jump threader simplifier.
+
+class jt_state
+{
+public:
+ virtual ~jt_state () { }
+ virtual void push (edge);
+ virtual void pop ();
+ virtual void register_equiv (tree dest, tree src, bool update_range);
+ virtual void register_equivs_edge (edge e);
+ virtual void register_equivs_stmt (gimple *, basic_block,
+ class jt_simplifier *);
+ virtual void record_ranges_from_stmt (gimple *stmt, bool temporary);
+ void get_path (vec<basic_block> &);
+ void append_path (basic_block);
+ void dump (FILE *);
+ void debug ();
+
+private:
+ auto_vec<basic_block> m_blocks;
+ static const basic_block BB_MARKER;
+};
+
+// Statement simplifier callback for the jump threader.
+
+class jt_simplifier
+{
+public:
+ virtual ~jt_simplifier () { }
+ virtual tree simplify (gimple *, gimple *, basic_block, jt_state *) = 0;
+};
+
+class hybrid_jt_state : public jt_state
+{
+private:
+ void register_equivs_stmt (gimple *, basic_block, jt_simplifier *) override
+ {
+ // Ranger has no need to simplify anything.
+ }
+};
+
+class hybrid_jt_simplifier : public jt_simplifier
+{
+public:
+ hybrid_jt_simplifier (class gimple_ranger *r, class path_range_query *q);
+ tree simplify (gimple *stmt, gimple *, basic_block, jt_state *) override;
+
+private:
+ void compute_exit_dependencies (bitmap dependencies,
+ const vec<basic_block> &path,
+ gimple *stmt);
+
+ gimple_ranger *m_ranger;
+ path_range_query *m_query;
+};
+
+// This is the high level threader. The entry point is
+// thread_outgoing_edges(), which calculates and registers paths to be
+// threaded. When all candidates have been registered,
+// thread_through_all_blocks() is called to actually change the CFG.
+
+class jump_threader
+{
+public:
+ jump_threader (jt_simplifier *, class jt_state *);
+ ~jump_threader ();
+ void thread_outgoing_edges (basic_block);
+ void remove_jump_threads_including (edge_def *);
+ bool thread_through_all_blocks (bool may_peel_loop_headers);
+
+private:
+ tree simplify_control_stmt_condition (edge, gimple *);
+ tree simplify_control_stmt_condition_1 (edge,
+ gimple *,
+ tree op0,
+ tree_code cond_code,
+ tree op1,
+ unsigned limit);
+
+ bool thread_around_empty_blocks (vec<class jump_thread_edge *> *path,
+ edge, bitmap visited);
+ int thread_through_normal_block (vec<jump_thread_edge *> *path,
+ edge, bitmap visited);
+ void thread_across_edge (edge);
+ bool record_temporary_equivalences_from_phis (edge);
+ gimple *record_temporary_equivalences_from_stmts_at_dest (edge);
+
+ // Dummy condition to avoid creating lots of throw away statements.
+ gcond *dummy_cond;
+
+ class fwd_jt_path_registry *m_registry;
+ jt_simplifier *m_simplifier;
+ jt_state *m_state;
+};
+
+extern void propagate_threaded_block_debug_into (basic_block, basic_block);
+extern bool single_succ_to_potentially_threadable_block (basic_block);
+
+// ?? All this ssa_name_values stuff is the store of values for
+// avail_exprs_stack and const_and_copies, so it really belongs in the
+// jump_threader class. However, it's probably not worth touching
+// this, since all this windable state is slated to go with the
+// ranger.
+extern vec<tree> ssa_name_values;
+#define SSA_NAME_VALUE(x) \
+ (SSA_NAME_VERSION (x) < ssa_name_values.length () \
+ ? ssa_name_values[SSA_NAME_VERSION (x)] \
+ : NULL_TREE)
+extern void set_ssa_name_value (tree, tree);
+
+#endif /* GCC_TREE_SSA_THREADEDGE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadupdate.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadupdate.h
new file mode 100644
index 0000000..ab4f776
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa-threadupdate.h
@@ -0,0 +1,150 @@
+/* Communication between registering jump thread requests and
+ updating the SSA/CFG for jump threading.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _TREE_SSA_THREADUPDATE_H
+#define _TREE_SSA_THREADUPDATE_H 1
+
+enum jump_thread_edge_type
+{
+ EDGE_START_JUMP_THREAD,
+ EDGE_COPY_SRC_BLOCK,
+ EDGE_COPY_SRC_JOINER_BLOCK,
+ EDGE_NO_COPY_SRC_BLOCK
+};
+
+// We keep the registered jump threading opportunities in this
+// vector as edge pairs (original_edge, target_edge).
+
+class jump_thread_edge
+{
+public:
+ jump_thread_edge (edge e, jump_thread_edge_type t) : e (e), type (t) {}
+
+ edge e;
+ jump_thread_edge_type type;
+};
+
+class jump_thread_path_allocator
+{
+public:
+ jump_thread_path_allocator ();
+ ~jump_thread_path_allocator ();
+ jump_thread_edge *allocate_thread_edge (edge, jump_thread_edge_type);
+ vec<jump_thread_edge *> *allocate_thread_path ();
+private:
+ DISABLE_COPY_AND_ASSIGN (jump_thread_path_allocator);
+ obstack m_obstack;
+};
+
+// Abstract class for the jump thread registry.
+//
+// When all candidates have been registered with
+// register_jump_thread(), thread_through_all_blocks() is called to
+// update the CFG.
+
+class jt_path_registry
+{
+public:
+ jt_path_registry (bool backedge_threads);
+ virtual ~jt_path_registry ();
+ bool register_jump_thread (vec<jump_thread_edge *> *);
+ bool thread_through_all_blocks (bool peel_loop_headers);
+ void push_edge (vec<jump_thread_edge *> *path, edge, jump_thread_edge_type);
+ vec<jump_thread_edge *> *allocate_thread_path ();
+ void debug ();
+protected:
+ void debug_path (FILE *, int pathno);
+ vec<vec<jump_thread_edge *> *> m_paths;
+ unsigned long m_num_threaded_edges;
+private:
+ virtual bool update_cfg (bool peel_loop_headers) = 0;
+ bool cancel_invalid_paths (vec<jump_thread_edge *> &path);
+ jump_thread_path_allocator m_allocator;
+ // True if threading through back edges is allowed. This is only
+ // allowed in the generic copier in the backward threader.
+ bool m_backedge_threads;
+ DISABLE_COPY_AND_ASSIGN (jt_path_registry);
+};
+
+// Forward threader path registry using a custom BB copier.
+
+class fwd_jt_path_registry : public jt_path_registry
+{
+public:
+ fwd_jt_path_registry ();
+ ~fwd_jt_path_registry ();
+ void remove_jump_threads_including (edge);
+private:
+ bool update_cfg (bool peel_loop_headers) override;
+ void mark_threaded_blocks (bitmap threaded_blocks);
+ bool thread_block_1 (basic_block, bool noloop_only, bool joiners);
+ bool thread_block (basic_block, bool noloop_only);
+ bool thread_through_loop_header (class loop *loop,
+ bool may_peel_loop_headers);
+ class redirection_data *lookup_redirection_data (edge e, enum insert_option);
+
+ hash_table<struct removed_edges> *m_removed_edges;
+
+ // Main data structure to hold information for duplicates of BB.
+ hash_table<redirection_data> *m_redirection_data;
+};
+
+// Backward threader path registry using a generic BB copier.
+
+class back_jt_path_registry : public jt_path_registry
+{
+public:
+ back_jt_path_registry ();
+private:
+ bool update_cfg (bool peel_loop_headers) override;
+ void adjust_paths_after_duplication (unsigned curr_path_num);
+ bool duplicate_thread_path (edge entry, edge exit, basic_block *region,
+ unsigned n_region, unsigned current_path_no);
+ bool rewire_first_differing_edge (unsigned path_num, unsigned edge_num);
+};
+
+// Rather than search all the edges in jump thread paths each time DOM
+// is able to simply if control statement, we build a hash table with
+// the deleted edges. We only care about the address of the edge, not
+// its contents.
+struct removed_edges : nofree_ptr_hash<edge_def>
+{
+ static hashval_t hash (edge e) { return htab_hash_pointer (e); }
+ static bool equal (edge e1, edge e2) { return e1 == e2; }
+};
+
+extern unsigned int estimate_threading_killed_stmts (basic_block);
+
+enum bb_dom_status
+{
+ /* BB does not dominate latch of the LOOP. */
+ DOMST_NONDOMINATING,
+ /* The LOOP is broken (there is no path from the header to its latch. */
+ DOMST_LOOP_BROKEN,
+ /* BB dominates the latch of the LOOP. */
+ DOMST_DOMINATING
+};
+
+enum bb_dom_status determine_bb_domination_status (class loop *, basic_block);
+
+// In tree-ssa-dom.cc.
+extern void free_dom_edge_info (edge);
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa.h
new file mode 100644
index 0000000..fa8c808
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssa.h
@@ -0,0 +1,118 @@
+/* Header file for any pass which requires SSA routines.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSA_H
+#define GCC_TREE_SSA_H
+
+/* Mapping for redirected edges. */
+struct edge_var_map {
+ tree result; /* PHI result. */
+ tree def; /* PHI arg definition. */
+ location_t locus; /* PHI arg location. */
+};
+
+/* A vector of var maps. */
+typedef vec<edge_var_map, va_heap, vl_embed> edge_var_map_vector;
+
+
+extern void redirect_edge_var_map_add (edge, tree, tree, location_t);
+extern void redirect_edge_var_map_clear (edge);
+extern void redirect_edge_var_map_dup (edge, edge);
+extern vec<edge_var_map> *redirect_edge_var_map_vector (edge);
+extern void redirect_edge_var_map_empty (void);
+extern edge ssa_redirect_edge (edge, basic_block);
+extern void flush_pending_stmts (edge);
+extern void gimple_replace_ssa_lhs (gimple *, tree);
+extern tree target_for_debug_bind (tree);
+extern void insert_debug_temp_for_var_def (gimple_stmt_iterator *, tree);
+extern void insert_debug_temps_for_defs (gimple_stmt_iterator *);
+extern void reset_debug_uses (gimple *);
+extern void release_defs_bitset (bitmap toremove);
+extern void verify_ssa (bool, bool);
+extern void init_tree_ssa (function *, int size = 0);
+extern void delete_tree_ssa (function *);
+extern bool tree_ssa_useless_type_conversion (tree);
+extern tree tree_ssa_strip_useless_type_conversions (tree);
+extern tree find_released_ssa_name (tree *, int *, void *);
+
+
+extern bool ssa_defined_default_def_p (tree t);
+extern bool ssa_undefined_value_p (tree, bool = true);
+extern bool gimple_uses_undefined_value_p (gimple *);
+
+
+bool ssa_name_any_use_dominates_bb_p (tree var, basic_block bb);
+extern void mark_ssa_maybe_undefs (void);
+
+/* Return TRUE iff VAR is marked as maybe-undefined. See
+ mark_ssa_maybe_undefs. */
+
+inline bool
+ssa_name_maybe_undef_p (tree var)
+{
+ gcc_checking_assert (TREE_CODE (var) == SSA_NAME);
+ return TREE_VISITED (var);
+}
+
+/* Set (or clear, depending on VALUE) VAR's maybe-undefined mark. */
+
+inline void
+ssa_name_set_maybe_undef (tree var, bool value = true)
+{
+ gcc_checking_assert (TREE_CODE (var) == SSA_NAME);
+ TREE_VISITED (var) = value;
+}
+
+
+extern void execute_update_addresses_taken (void);
+
+/* Given an edge_var_map V, return the PHI arg definition. */
+
+inline tree
+redirect_edge_var_map_def (edge_var_map *v)
+{
+ return v->def;
+}
+
+/* Given an edge_var_map V, return the PHI result. */
+
+inline tree
+redirect_edge_var_map_result (edge_var_map *v)
+{
+ return v->result;
+}
+
+/* Given an edge_var_map V, return the PHI arg location. */
+
+inline location_t
+redirect_edge_var_map_location (edge_var_map *v)
+{
+ return v->locus;
+}
+
+/* Verify SSA invariants, if internal consistency checks are enabled. */
+
+inline void
+checking_verify_ssa (bool check_modified_stmt, bool check_ssa_operands)
+{
+ if (flag_checking)
+ verify_ssa (check_modified_stmt, check_ssa_operands);
+}
+
+#endif /* GCC_TREE_SSA_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssanames.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssanames.h
new file mode 100644
index 0000000..b09e71b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-ssanames.h
@@ -0,0 +1,140 @@
+/* SSA name expresssons routines
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_SSANAMES_H
+#define GCC_TREE_SSANAMES_H
+
+/* Aliasing information for SSA_NAMEs representing pointer variables. */
+
+struct GTY(()) ptr_info_def
+{
+ /* The points-to solution. */
+ struct pt_solution pt;
+
+ /* Alignment and misalignment of the pointer in bytes. Together
+ align and misalign specify low known bits of the pointer.
+ ptr & (align - 1) == misalign. */
+
+ /* When known, this is the power-of-two byte alignment of the object this
+ pointer points into. This is usually DECL_ALIGN_UNIT for decls and
+ MALLOC_ABI_ALIGNMENT for allocated storage. When the alignment is not
+ known, it is zero. Do not access directly but use functions
+ get_ptr_info_alignment, set_ptr_info_alignment,
+ mark_ptr_info_alignment_unknown and similar. */
+ unsigned int align;
+
+ /* When alignment is known, the byte offset this pointer differs from the
+ above alignment. Access only through the same helper functions as align
+ above. */
+ unsigned int misalign;
+};
+
+
+#define SSANAMES(fun) (fun)->gimple_df->ssa_names
+#define DEFAULT_DEFS(fun) (fun)->gimple_df->default_defs
+
+#define num_ssa_names (vec_safe_length (cfun->gimple_df->ssa_names))
+#define ssa_name(i) ((*cfun->gimple_df->ssa_names)[(i)])
+
+#define FOR_EACH_SSA_NAME(I, VAR, FN) \
+ for (I = 1; SSANAMES (FN)->iterate (I, &VAR); ++I) \
+ if (VAR)
+
+/* Sets the value range to SSA. */
+extern bool set_range_info (tree, const vrange &);
+extern void set_nonzero_bits (tree, const wide_int_ref &);
+extern wide_int get_nonzero_bits (const_tree);
+extern bool ssa_name_has_boolean_range (tree);
+extern void init_ssanames (struct function *, int);
+extern void fini_ssanames (struct function *);
+extern void ssanames_print_statistics (void);
+extern tree make_ssa_name_fn (struct function *, tree, gimple *,
+ unsigned int version = 0);
+extern void init_ssa_name_imm_use (tree);
+extern void release_ssa_name_fn (struct function *, tree);
+extern bool get_ptr_info_alignment (struct ptr_info_def *, unsigned int *,
+ unsigned int *);
+extern void mark_ptr_info_alignment_unknown (struct ptr_info_def *);
+extern void set_ptr_info_alignment (struct ptr_info_def *, unsigned int,
+ unsigned int);
+extern void adjust_ptr_info_misalignment (struct ptr_info_def *, poly_uint64);
+extern struct ptr_info_def *get_ptr_info (tree);
+extern void set_ptr_nonnull (tree);
+
+extern tree copy_ssa_name_fn (struct function *, tree, gimple *);
+extern void duplicate_ssa_name_ptr_info (tree, struct ptr_info_def *);
+extern tree duplicate_ssa_name_fn (struct function *, tree, gimple *);
+extern void duplicate_ssa_name_range_info (tree dest, tree src);
+extern void reset_flow_sensitive_info (tree);
+extern void reset_flow_sensitive_info_in_bb (basic_block);
+extern void release_defs (gimple *);
+extern void replace_ssa_name_symbol (tree, tree);
+extern void flush_ssaname_freelist (void);
+
+
+/* Return an SSA_NAME node for variable VAR defined in statement STMT
+ in function cfun. */
+
+inline tree
+make_ssa_name (tree var, gimple *stmt = NULL)
+{
+ return make_ssa_name_fn (cfun, var, stmt);
+}
+
+/* Return an SSA_NAME node using the template SSA name NAME defined in
+ statement STMT in function cfun. */
+
+inline tree
+copy_ssa_name (tree var, gimple *stmt = NULL)
+{
+ return copy_ssa_name_fn (cfun, var, stmt);
+}
+
+/* Creates a duplicate of a SSA name NAME tobe defined by statement STMT
+ in function cfun. */
+
+inline tree
+duplicate_ssa_name (tree var, gimple *stmt)
+{
+ return duplicate_ssa_name_fn (cfun, var, stmt);
+}
+
+/* Release the SSA name NAME used in function cfun. */
+
+inline void
+release_ssa_name (tree name)
+{
+ release_ssa_name_fn (cfun, name);
+}
+
+/* Return an anonymous SSA_NAME node for type TYPE defined in statement STMT
+ in function cfun. Arrange so that it uses NAME in dumps. */
+
+inline tree
+make_temp_ssa_name (tree type, gimple *stmt, const char *name)
+{
+ tree ssa_name;
+ gcc_checking_assert (TYPE_P (type));
+ ssa_name = make_ssa_name_fn (cfun, type, stmt);
+ SET_SSA_NAME_VAR_OR_IDENTIFIER (ssa_name, get_identifier (name));
+ return ssa_name;
+}
+
+
+#endif /* GCC_TREE_SSANAMES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-stdarg.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-stdarg.h
new file mode 100644
index 0000000..ad6c0f0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-stdarg.h
@@ -0,0 +1,36 @@
+/* Header for a pass computing data for optimizing stdarg functions.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_STDARG_H
+#define GCC_TREE_STDARG_H 1
+
+struct stdarg_info
+{
+ bitmap va_list_vars, va_list_escape_vars;
+ basic_block bb;
+ int compute_sizes, va_start_count;
+ bool va_list_escapes;
+ int *offsets;
+ /* These 2 fields are only meaningful if va_start_count == 1. */
+ basic_block va_start_bb;
+ tree va_start_ap;
+};
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-streamer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-streamer.h
new file mode 100644
index 0000000..170d61c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-streamer.h
@@ -0,0 +1,122 @@
+/* Data structures and functions for streaming trees.
+
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Diego Novillo <dnovillo@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_STREAMER_H
+#define GCC_TREE_STREAMER_H
+
+#include "streamer-hooks.h"
+#include "data-streamer.h"
+
+/* Cache of pickled nodes. Used to avoid writing the same node more
+ than once. The first time a tree node is streamed out, it is
+ entered in this cache. Subsequent references to the same node are
+ resolved by looking it up in this cache.
+
+ This is used in two ways:
+
+ - On the writing side, the first time T is added to STREAMER_CACHE,
+ a new reference index is created for T and T is emitted on the
+ stream. If T needs to be emitted again to the stream, instead of
+ pickling it again, the reference index is emitted.
+
+ - On the reading side, the first time T is read from the stream, it
+ is reconstructed in memory and a new reference index created for
+ T. The reconstructed T is inserted in some array so that when
+ the reference index for T is found in the input stream, it can be
+ used to look up into the array to get the reconstructed T. */
+
+struct streamer_tree_cache_d
+{
+ /* The mapping between tree nodes and slots into the nodes array. */
+ hash_map<tree, unsigned> *node_map;
+
+ /* The nodes pickled so far. */
+ vec<tree> nodes;
+ /* The node hashes (if available). */
+ vec<hashval_t> hashes;
+
+ /* Next index to assign. */
+ unsigned next_idx;
+};
+
+/* In tree-streamer-in.cc. */
+tree streamer_read_string_cst (class data_in *, class lto_input_block *);
+tree streamer_alloc_tree (class lto_input_block *, class data_in *,
+ enum LTO_tags);
+void streamer_read_tree_body (class lto_input_block *, class data_in *, tree);
+tree streamer_get_pickled_tree (class lto_input_block *, class data_in *);
+void streamer_read_tree_bitfields (class lto_input_block *,
+ class data_in *, tree);
+
+/* In tree-streamer-out.cc. */
+void streamer_write_string_cst (struct output_block *,
+ struct lto_output_stream *, tree);
+void streamer_write_tree_header (struct output_block *, tree);
+void streamer_write_tree_bitfields (struct output_block *, tree);
+void streamer_write_tree_body (struct output_block *, tree);
+void streamer_write_integer_cst (struct output_block *, tree);
+
+/* In tree-streamer.cc. */
+extern unsigned char streamer_mode_table[1 << 8];
+void streamer_check_handled_ts_structures (void);
+bool streamer_tree_cache_insert (struct streamer_tree_cache_d *, tree,
+ hashval_t, unsigned *);
+void streamer_tree_cache_replace_tree (struct streamer_tree_cache_d *, tree,
+ unsigned);
+void streamer_tree_cache_append (struct streamer_tree_cache_d *, tree,
+ hashval_t);
+bool streamer_tree_cache_lookup (struct streamer_tree_cache_d *, tree,
+ unsigned *);
+struct streamer_tree_cache_d *streamer_tree_cache_create (bool, bool, bool);
+void streamer_tree_cache_delete (struct streamer_tree_cache_d *);
+
+/* Return the tree node at slot IX in CACHE. */
+
+inline tree
+streamer_tree_cache_get_tree (struct streamer_tree_cache_d *cache, unsigned ix)
+{
+ return cache->nodes[ix];
+}
+
+/* Return the tree hash value at slot IX in CACHE. */
+
+inline hashval_t
+streamer_tree_cache_get_hash (struct streamer_tree_cache_d *cache, unsigned ix)
+{
+ return cache->hashes[ix];
+}
+
+inline void
+bp_pack_machine_mode (struct bitpack_d *bp, machine_mode mode)
+{
+ streamer_mode_table[mode] = 1;
+ bp_pack_enum (bp, machine_mode, 1 << 8, mode);
+}
+
+inline machine_mode
+bp_unpack_machine_mode (struct bitpack_d *bp)
+{
+ return (machine_mode)
+ ((class lto_input_block *)
+ bp->stream)->mode_table[bp_unpack_enum (bp, machine_mode, 1 << 8)];
+}
+
+#endif /* GCC_TREE_STREAMER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-switch-conversion.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-switch-conversion.h
new file mode 100644
index 0000000..4e97164
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-switch-conversion.h
@@ -0,0 +1,927 @@
+/* Tree switch conversion for GNU compiler.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_SWITCH_CONVERSION_H
+#define TREE_SWITCH_CONVERSION_H
+
+namespace tree_switch_conversion {
+
+/* Type of cluster. */
+
+enum cluster_type
+{
+ SIMPLE_CASE,
+ JUMP_TABLE,
+ BIT_TEST
+};
+
+#define PRINT_CASE(f,c) print_generic_expr (f, c)
+
+/* Abstract base class for representing a cluster of cases.
+
+ Here is the inheritance hierarachy, and the enum_cluster_type
+ values for the concrete subclasses:
+
+ cluster
+ |-simple_cluster (SIMPLE_CASE)
+ `-group_cluster
+ |-jump_table_cluster (JUMP_TABLE)
+ `-bit_test_cluster (BIT_TEST). */
+
+class cluster
+{
+public:
+ /* Constructor. */
+ inline cluster (tree case_label_expr, basic_block case_bb,
+ profile_probability prob, profile_probability subtree_prob);
+
+ /* Destructor. */
+ virtual ~cluster ()
+ {}
+
+ /* Return type. */
+ virtual cluster_type get_type () = 0;
+
+ /* Get low value covered by a cluster. */
+ virtual tree get_low () = 0;
+
+ /* Get high value covered by a cluster. */
+ virtual tree get_high () = 0;
+
+ /* Debug content of a cluster. */
+ virtual void debug () = 0;
+
+ /* Dump content of a cluster. */
+ virtual void dump (FILE *f, bool details = false) = 0;
+
+ /* Emit GIMPLE code to handle the cluster. */
+ virtual void emit (tree, tree, tree, basic_block, location_t) = 0;
+
+ /* Return true if a cluster handles only a single case value and the
+ value is not a range. */
+ virtual bool is_single_value_p ()
+ {
+ return false;
+ }
+
+ /* Return range of a cluster. If value would overflow in type of LOW,
+ then return 0. */
+ static unsigned HOST_WIDE_INT get_range (tree low, tree high)
+ {
+ wide_int w = wi::to_wide (high) - wi::to_wide (low);
+ if (wi::neg_p (w, TYPE_SIGN (TREE_TYPE (low))) || !wi::fits_uhwi_p (w))
+ return 0;
+ return w.to_uhwi () + 1;
+ }
+
+ /* Case label. */
+ tree m_case_label_expr;
+
+ /* Basic block of the case. */
+ basic_block m_case_bb;
+
+ /* Probability of taking this cluster. */
+ profile_probability m_prob;
+
+ /* Probability of reaching subtree rooted at this node. */
+ profile_probability m_subtree_prob;
+
+ /* Probability of default case when reaching the node.
+ It is used by bit-test right now. */
+ profile_probability m_default_prob;
+
+protected:
+ /* Default constructor. */
+ cluster () {}
+};
+
+cluster::cluster (tree case_label_expr, basic_block case_bb,
+ profile_probability prob, profile_probability subtree_prob):
+ m_case_label_expr (case_label_expr), m_case_bb (case_bb), m_prob (prob),
+ m_subtree_prob (subtree_prob),
+ m_default_prob (profile_probability::uninitialized ())
+{
+}
+
+/* Subclass of cluster representing a simple contiguous range
+ from [low..high]. */
+
+class simple_cluster: public cluster
+{
+public:
+ /* Constructor. */
+ inline simple_cluster (tree low, tree high, tree case_label_expr,
+ basic_block case_bb, profile_probability prob,
+ bool has_forward_bb = false);
+
+ /* Destructor. */
+ ~simple_cluster ()
+ {}
+
+ cluster_type
+ get_type () final override
+ {
+ return SIMPLE_CASE;
+ }
+
+ tree
+ get_low () final override
+ {
+ return m_low;
+ }
+
+ tree
+ get_high () final override
+ {
+ return m_high;
+ }
+
+ void set_high (tree high)
+ {
+ m_high = high;
+ }
+
+ void
+ debug () final override
+ {
+ dump (stderr);
+ }
+
+ void
+ dump (FILE *f, bool details ATTRIBUTE_UNUSED = false) final override
+ {
+ PRINT_CASE (f, get_low ());
+ if (get_low () != get_high ())
+ {
+ fprintf (f, "-");
+ PRINT_CASE (f, get_high ());
+ }
+ fprintf (f, " ");
+ }
+
+ void emit (tree, tree, tree, basic_block, location_t) final override
+ {
+ gcc_unreachable ();
+ }
+
+ bool is_single_value_p () final override
+ {
+ return tree_int_cst_equal (get_low (), get_high ());
+ }
+
+ /* Return number of comparisons needed for the case. */
+ unsigned
+ get_comparison_count ()
+ {
+ return m_range_p ? 2 : 1;
+ }
+
+ /* Low value of the case. */
+ tree m_low;
+
+ /* High value of the case. */
+ tree m_high;
+
+ /* True if case is a range. */
+ bool m_range_p;
+
+ /* True if the case will use a forwarder BB. */
+ bool m_has_forward_bb;
+};
+
+simple_cluster::simple_cluster (tree low, tree high, tree case_label_expr,
+ basic_block case_bb, profile_probability prob,
+ bool has_forward_bb):
+ cluster (case_label_expr, case_bb, prob, prob),
+ m_low (low), m_high (high), m_has_forward_bb (has_forward_bb)
+{
+ m_range_p = m_high != NULL;
+ if (m_high == NULL)
+ m_high = m_low;
+}
+
+/* Abstract subclass of jump table and bit test cluster,
+ handling a collection of simple_cluster instances. */
+
+class group_cluster: public cluster
+{
+public:
+ /* Constructor. */
+ group_cluster (vec<cluster *> &clusters, unsigned start, unsigned end);
+
+ /* Destructor. */
+ ~group_cluster ();
+
+ tree
+ get_low () final override
+ {
+ return m_cases[0]->get_low ();
+ }
+
+ tree
+ get_high () final override
+ {
+ return m_cases[m_cases.length () - 1]->get_high ();
+ }
+
+ void
+ debug () final override
+ {
+ dump (stderr);
+ }
+
+ void dump (FILE *f, bool details = false) final override;
+
+ /* List of simple clusters handled by the group. */
+ vec<simple_cluster *> m_cases;
+};
+
+/* Concrete subclass of group_cluster representing a collection
+ of cases to be implemented as a jump table.
+ The "emit" vfunc generates a nested switch statement which
+ is later lowered to a jump table. */
+
+class jump_table_cluster: public group_cluster
+{
+public:
+ /* Constructor. */
+ jump_table_cluster (vec<cluster *> &clusters, unsigned start, unsigned end)
+ : group_cluster (clusters, start, end)
+ {}
+
+ cluster_type
+ get_type () final override
+ {
+ return JUMP_TABLE;
+ }
+
+ void emit (tree index_expr, tree index_type,
+ tree default_label_expr, basic_block default_bb, location_t loc)
+ final override;
+
+ /* Find jump tables of given CLUSTERS, where all members of the vector
+ are of type simple_cluster. New clusters are returned. */
+ static vec<cluster *> find_jump_tables (vec<cluster *> &clusters);
+
+ /* Return true when cluster starting at START and ending at END (inclusive)
+ can build a jump-table. COMPARISON_COUNT is number of comparison
+ operations needed if the clusters are expanded as decision tree.
+ MAX_RATIO tells about the maximum code growth (in percent). */
+ static bool can_be_handled (const vec<cluster *> &clusters, unsigned start,
+ unsigned end, unsigned HOST_WIDE_INT max_ratio,
+ unsigned HOST_WIDE_INT comparison_count);
+
+ /* Return true if cluster starting at START and ending at END (inclusive)
+ is profitable transformation. */
+ static bool is_beneficial (const vec<cluster *> &clusters, unsigned start,
+ unsigned end);
+
+ /* Return the smallest number of different values for which it is best
+ to use a jump-table instead of a tree of conditional branches. */
+ static inline unsigned int case_values_threshold (void);
+
+ /* Return whether jump table expansion is allowed. */
+ static inline bool is_enabled (void);
+};
+
+/* A GIMPLE switch statement can be expanded to a short sequence of bit-wise
+comparisons. "switch(x)" is converted into "if ((1 << (x-MINVAL)) & CST)"
+where CST and MINVAL are integer constants. This is better than a series
+of compare-and-banch insns in some cases, e.g. we can implement:
+
+ if ((x==4) || (x==6) || (x==9) || (x==11))
+
+as a single bit test:
+
+ if ((1<<x) & ((1<<4)|(1<<6)|(1<<9)|(1<<11)))
+
+This transformation is only applied if the number of case targets is small,
+if CST constains at least 3 bits, and "1 << x" is cheap. The bit tests are
+performed in "word_mode".
+
+The following example shows the code the transformation generates:
+
+ int bar(int x)
+ {
+ switch (x)
+ {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ return 1;
+ }
+ return 0;
+ }
+
+==>
+
+ bar (int x)
+ {
+ tmp1 = x - 48;
+ if (tmp1 > (70 - 48)) goto L2;
+ tmp2 = 1 << tmp1;
+ tmp3 = 0b11111100000001111111111;
+ if ((tmp2 & tmp3) != 0) goto L1 ; else goto L2;
+ L1:
+ return 1;
+ L2:
+ return 0;
+ }
+
+TODO: There are still some improvements to this transformation that could
+be implemented:
+
+* A narrower mode than word_mode could be used if that is cheaper, e.g.
+ for x86_64 where a narrower-mode shift may result in smaller code.
+
+* The compounded constant could be shifted rather than the one. The
+ test would be either on the sign bit or on the least significant bit,
+ depending on the direction of the shift. On some machines, the test
+ for the branch would be free if the bit to test is already set by the
+ shift operation.
+
+This transformation was contributed by Roger Sayle, see this e-mail:
+ http://gcc.gnu.org/ml/gcc-patches/2003-01/msg01950.html
+*/
+
+class bit_test_cluster: public group_cluster
+{
+public:
+ /* Constructor. */
+ bit_test_cluster (vec<cluster *> &clusters, unsigned start, unsigned end,
+ bool handles_entire_switch)
+ :group_cluster (clusters, start, end),
+ m_handles_entire_switch (handles_entire_switch)
+ {}
+
+ cluster_type
+ get_type () final override
+ {
+ return BIT_TEST;
+ }
+
+/* Expand a switch statement by a short sequence of bit-wise
+ comparisons. "switch(x)" is effectively converted into
+ "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
+ integer constants.
+
+ INDEX_EXPR is the value being switched on.
+
+ MINVAL is the lowest case value of in the case nodes,
+ and RANGE is highest value minus MINVAL. MINVAL and RANGE
+ are not guaranteed to be of the same type as INDEX_EXPR
+ (the gimplifier doesn't change the type of case label values,
+ and MINVAL and RANGE are derived from those values).
+ MAXVAL is MINVAL + RANGE.
+
+ There *MUST* be max_case_bit_tests or less unique case
+ node targets. */
+ void emit (tree index_expr, tree index_type,
+ tree default_label_expr, basic_block default_bb, location_t loc)
+ final override;
+
+ /* Find bit tests of given CLUSTERS, where all members of the vector
+ are of type simple_cluster. New clusters are returned. */
+ static vec<cluster *> find_bit_tests (vec<cluster *> &clusters);
+
+ /* Return true when RANGE of case values with UNIQ labels
+ can build a bit test. */
+ static bool can_be_handled (unsigned HOST_WIDE_INT range, unsigned uniq);
+
+ /* Return true when cluster starting at START and ending at END (inclusive)
+ can build a bit test. */
+ static bool can_be_handled (const vec<cluster *> &clusters, unsigned start,
+ unsigned end);
+
+ /* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
+ transformation. */
+ static bool is_beneficial (unsigned count, unsigned uniq);
+
+ /* Return true if cluster starting at START and ending at END (inclusive)
+ is profitable transformation. */
+ static bool is_beneficial (const vec<cluster *> &clusters, unsigned start,
+ unsigned end);
+
+/* Split the basic block at the statement pointed to by GSIP, and insert
+ a branch to the target basic block of E_TRUE conditional on tree
+ expression COND.
+
+ It is assumed that there is already an edge from the to-be-split
+ basic block to E_TRUE->dest block. This edge is removed, and the
+ profile information on the edge is re-used for the new conditional
+ jump.
+
+ The CFG is updated. The dominator tree will not be valid after
+ this transformation, but the immediate dominators are updated if
+ UPDATE_DOMINATORS is true.
+
+ Returns the newly created basic block. */
+ static basic_block hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
+ tree cond,
+ basic_block case_bb,
+ profile_probability prob,
+ location_t);
+
+ /* Return whether bit test expansion is allowed. */
+ static inline bool is_enabled (void)
+ {
+ return flag_bit_tests;
+ }
+
+ /* True when the jump table handles an entire switch statement. */
+ bool m_handles_entire_switch;
+
+ /* Maximum number of different basic blocks that can be handled by
+ a bit test. */
+ static const int m_max_case_bit_tests = 3;
+};
+
+/* Helper struct to find minimal clusters. */
+
+class min_cluster_item
+{
+public:
+ /* Constructor. */
+ min_cluster_item (unsigned count, unsigned start, unsigned non_jt_cases):
+ m_count (count), m_start (start), m_non_jt_cases (non_jt_cases)
+ {}
+
+ /* Count of clusters. */
+ unsigned m_count;
+
+ /* Index where is cluster boundary. */
+ unsigned m_start;
+
+ /* Total number of cases that will not be in a jump table. */
+ unsigned m_non_jt_cases;
+};
+
+/* Helper struct to represent switch decision tree. */
+
+class case_tree_node
+{
+public:
+ /* Empty Constructor. */
+ case_tree_node ();
+
+ /* Return true when it has a child. */
+ bool has_child ()
+ {
+ return m_left != NULL || m_right != NULL;
+ }
+
+ /* Left son in binary tree. */
+ case_tree_node *m_left;
+
+ /* Right son in binary tree; also node chain. */
+ case_tree_node *m_right;
+
+ /* Parent of node in binary tree. */
+ case_tree_node *m_parent;
+
+ /* Cluster represented by this tree node. */
+ cluster *m_c;
+};
+
+inline
+case_tree_node::case_tree_node ():
+ m_left (NULL), m_right (NULL), m_parent (NULL), m_c (NULL)
+{
+}
+
+unsigned int
+jump_table_cluster::case_values_threshold (void)
+{
+ unsigned int threshold = param_case_values_threshold;
+
+ if (threshold == 0)
+ threshold = targetm.case_values_threshold ();
+
+ return threshold;
+}
+
+/* Return whether jump table expansion is allowed. */
+bool jump_table_cluster::is_enabled (void)
+{
+ /* If neither casesi or tablejump is available, or flag_jump_tables
+ over-ruled us, we really have no choice. */
+ if (!targetm.have_casesi () && !targetm.have_tablejump ())
+ return false;
+ if (!flag_jump_tables)
+ return false;
+#ifndef ASM_OUTPUT_ADDR_DIFF_ELT
+ if (flag_pic)
+ return false;
+#endif
+
+ return true;
+}
+
+/* A case_bit_test represents a set of case nodes that may be
+ selected from using a bit-wise comparison. HI and LO hold
+ the integer to be tested against, TARGET_EDGE contains the
+ edge to the basic block to jump to upon success and BITS
+ counts the number of case nodes handled by this test,
+ typically the number of bits set in HI:LO. The LABEL field
+ is used to quickly identify all cases in this set without
+ looking at label_to_block for every case label. */
+
+class case_bit_test
+{
+public:
+ wide_int mask;
+ basic_block target_bb;
+ tree label;
+ int bits;
+ profile_probability prob;
+
+ /* Comparison function for qsort to order bit tests by decreasing
+ probability of execution. */
+ static int cmp (const void *p1, const void *p2);
+};
+
+class switch_decision_tree
+{
+public:
+ /* Constructor. */
+ switch_decision_tree (gswitch *swtch): m_switch (swtch), m_phi_mapping (),
+ m_case_bbs (), m_case_node_pool ("struct case_node pool"),
+ m_case_list (NULL)
+ {
+ }
+
+ /* Analyze switch statement and return true when the statement is expanded
+ as decision tree. */
+ bool analyze_switch_statement ();
+
+ /* Attempt to expand CLUSTERS as a decision tree. Return true when
+ expanded. */
+ bool try_switch_expansion (vec<cluster *> &clusters);
+ /* Compute the number of case labels that correspond to each outgoing edge of
+ switch statement. Record this information in the aux field of the edge.
+ */
+ void compute_cases_per_edge ();
+
+ /* Before switch transformation, record all SSA_NAMEs defined in switch BB
+ and used in a label basic block. */
+ void record_phi_operand_mapping ();
+
+ /* Append new operands to PHI statements that were introduced due to
+ addition of new edges to case labels. */
+ void fix_phi_operands_for_edges ();
+
+ /* Generate a decision tree, switching on INDEX_EXPR and jumping to
+ one of the labels in CASE_LIST or to the DEFAULT_LABEL.
+
+ We generate a binary decision tree to select the appropriate target
+ code. */
+ void emit (basic_block bb, tree index_expr,
+ profile_probability default_prob, tree index_type);
+
+ /* Emit step-by-step code to select a case for the value of INDEX.
+ The thus generated decision tree follows the form of the
+ case-node binary tree NODE, whose nodes represent test conditions.
+ DEFAULT_PROB is probability of cases leading to default BB.
+ INDEX_TYPE is the type of the index of the switch. */
+ basic_block emit_case_nodes (basic_block bb, tree index,
+ case_tree_node *node,
+ profile_probability default_prob,
+ tree index_type, location_t);
+
+ /* Take an ordered list of case nodes
+ and transform them into a near optimal binary tree,
+ on the assumption that any target code selection value is as
+ likely as any other.
+
+ The transformation is performed by splitting the ordered
+ list into two equal sections plus a pivot. The parts are
+ then attached to the pivot as left and right branches. Each
+ branch is then transformed recursively. */
+ static void balance_case_nodes (case_tree_node **head,
+ case_tree_node *parent);
+
+ /* Dump ROOT, a list or tree of case nodes, to file F. */
+ static void dump_case_nodes (FILE *f, case_tree_node *root, int indent_step,
+ int indent_level);
+
+ /* Add an unconditional jump to CASE_BB that happens in basic block BB. */
+ static void emit_jump (basic_block bb, basic_block case_bb);
+
+ /* Generate code to compare OP0 with OP1 so that the condition codes are
+ set and to jump to LABEL_BB if the condition is true.
+ COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
+ PROB is the probability of jumping to LABEL_BB. */
+ static basic_block emit_cmp_and_jump_insns (basic_block bb, tree op0,
+ tree op1, tree_code comparison,
+ basic_block label_bb,
+ profile_probability prob,
+ location_t);
+
+ /* Generate code to jump to LABEL if OP0 and OP1 are equal in mode MODE.
+ PROB is the probability of jumping to LABEL_BB. */
+ static basic_block do_jump_if_equal (basic_block bb, tree op0, tree op1,
+ basic_block label_bb,
+ profile_probability prob,
+ location_t);
+
+ /* Reset the aux field of all outgoing edges of switch basic block. */
+ static inline void reset_out_edges_aux (gswitch *swtch);
+
+ /* Switch statement. */
+ gswitch *m_switch;
+
+ /* Map of PHI nodes that have to be fixed after expansion. */
+ hash_map<tree, tree> m_phi_mapping;
+
+ /* List of basic blocks that belong to labels of the switch. */
+ auto_vec<basic_block> m_case_bbs;
+
+ /* Basic block with default label. */
+ basic_block m_default_bb;
+
+ /* A pool for case nodes. */
+ object_allocator<case_tree_node> m_case_node_pool;
+
+ /* Balanced tree of case nodes. */
+ case_tree_node *m_case_list;
+};
+
+/*
+ Switch initialization conversion
+
+The following pass changes simple initializations of scalars in a switch
+statement into initializations from a static array. Obviously, the values
+must be constant and known at compile time and a default branch must be
+provided. For example, the following code:
+
+ int a,b;
+
+ switch (argc)
+ {
+ case 1:
+ case 2:
+ a_1 = 8;
+ b_1 = 6;
+ break;
+ case 3:
+ a_2 = 9;
+ b_2 = 5;
+ break;
+ case 12:
+ a_3 = 10;
+ b_3 = 4;
+ break;
+ default:
+ a_4 = 16;
+ b_4 = 1;
+ break;
+ }
+ a_5 = PHI <a_1, a_2, a_3, a_4>
+ b_5 = PHI <b_1, b_2, b_3, b_4>
+
+
+is changed into:
+
+ static const int = CSWTCH01[] = {6, 6, 5, 1, 1, 1, 1, 1, 1, 1, 1, 4};
+ static const int = CSWTCH02[] = {8, 8, 9, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 10};
+
+ if (((unsigned) argc) - 1 < 11)
+ {
+ a_6 = CSWTCH02[argc - 1];
+ b_6 = CSWTCH01[argc - 1];
+ }
+ else
+ {
+ a_7 = 16;
+ b_7 = 1;
+ }
+ a_5 = PHI <a_6, a_7>
+ b_b = PHI <b_6, b_7>
+
+There are further constraints. Specifically, the range of values across all
+case labels must not be bigger than param_switch_conversion_branch_ratio
+(default eight) times the number of the actual switch branches.
+
+This transformation was contributed by Martin Jambor, see this e-mail:
+ http://gcc.gnu.org/ml/gcc-patches/2008-07/msg00011.html */
+
+/* The main structure of the pass. */
+class switch_conversion
+{
+public:
+ /* Constructor. */
+ switch_conversion ();
+
+ /* Destructor. */
+ ~switch_conversion ();
+
+ /* The following function is invoked on every switch statement (the current
+ one is given in SWTCH) and runs the individual phases of switch
+ conversion on it one after another until one fails or the conversion
+ is completed. On success, NULL is in m_reason, otherwise points
+ to a string with the reason why the conversion failed. */
+ void expand (gswitch *swtch);
+
+ /* Collection information about SWTCH statement. */
+ void collect (gswitch *swtch);
+
+ /* Checks whether the range given by individual case statements of the switch
+ switch statement isn't too big and whether the number of branches actually
+ satisfies the size of the new array. */
+ bool check_range ();
+
+ /* Checks whether all but the final BB basic blocks are empty. */
+ bool check_all_empty_except_final ();
+
+ /* This function checks whether all required values in phi nodes in final_bb
+ are constants. Required values are those that correspond to a basic block
+ which is a part of the examined switch statement. It returns true if the
+ phi nodes are OK, otherwise false. */
+ bool check_final_bb ();
+
+ /* The following function allocates default_values, target_{in,out}_names and
+ constructors arrays. The last one is also populated with pointers to
+ vectors that will become constructors of new arrays. */
+ void create_temp_arrays ();
+
+ /* Populate the array of default values in the order of phi nodes.
+ DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
+ if the range is non-contiguous or the default case has standard
+ structure, otherwise it is the first non-default case instead. */
+ void gather_default_values (tree default_case);
+
+ /* The following function populates the vectors in the constructors array with
+ future contents of the static arrays. The vectors are populated in the
+ order of phi nodes. */
+ void build_constructors ();
+
+ /* If all values in the constructor vector are products of a linear function
+ a * x + b, then return true. When true, COEFF_A and COEFF_B and
+ coefficients of the linear function. Note that equal values are special
+ case of a linear function with a and b equal to zero. */
+ bool contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
+ wide_int *coeff_a, wide_int *coeff_b);
+
+ /* Return type which should be used for array elements, either TYPE's
+ main variant or, for integral types, some smaller integral type
+ that can still hold all the constants. */
+ tree array_value_type (tree type, int num);
+
+ /* Create an appropriate array type and declaration and assemble a static
+ array variable. Also create a load statement that initializes
+ the variable in question with a value from the static array. SWTCH is
+ the switch statement being converted, NUM is the index to
+ arrays of constructors, default values and target SSA names
+ for this particular array. ARR_INDEX_TYPE is the type of the index
+ of the new array, PHI is the phi node of the final BB that corresponds
+ to the value that will be loaded from the created array. TIDX
+ is an ssa name of a temporary variable holding the index for loads from the
+ new array. */
+ void build_one_array (int num, tree arr_index_type,
+ gphi *phi, tree tidx);
+
+ /* Builds and initializes static arrays initialized with values gathered from
+ the switch statement. Also creates statements that load values from
+ them. */
+ void build_arrays ();
+
+ /* Generates and appropriately inserts loads of default values at the position
+ given by GSI. Returns the last inserted statement. */
+ gassign *gen_def_assigns (gimple_stmt_iterator *gsi);
+
+ /* Deletes the unused bbs and edges that now contain the switch statement and
+ its empty branch bbs. BBD is the now dead BB containing
+ the original switch statement, FINAL is the last BB of the converted
+ switch statement (in terms of succession). */
+ void prune_bbs (basic_block bbd, basic_block final, basic_block default_bb);
+
+ /* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
+ from the basic block loading values from an array and E2F from the basic
+ block loading default values. BBF is the last switch basic block (see the
+ bbf description in the comment below). */
+ void fix_phi_nodes (edge e1f, edge e2f, basic_block bbf);
+
+ /* Creates a check whether the switch expression value actually falls into the
+ range given by all the cases. If it does not, the temporaries are loaded
+ with default values instead. */
+ void gen_inbound_check ();
+
+ /* Switch statement for which switch conversion takes place. */
+ gswitch *m_switch;
+
+ /* The expression used to decide the switch branch. */
+ tree m_index_expr;
+
+ /* The following integer constants store the minimum and maximum value
+ covered by the case labels. */
+ tree m_range_min;
+ tree m_range_max;
+
+ /* The difference between the above two numbers. Stored here because it
+ is used in all the conversion heuristics, as well as for some of the
+ transformation, and it is expensive to re-compute it all the time. */
+ tree m_range_size;
+
+ /* Basic block that contains the actual GIMPLE_SWITCH. */
+ basic_block m_switch_bb;
+
+ /* Basic block that is the target of the default case. */
+ basic_block m_default_bb;
+
+ /* The single successor block of all branches out of the GIMPLE_SWITCH,
+ if such a block exists. Otherwise NULL. */
+ basic_block m_final_bb;
+
+ /* The probability of the default edge in the replaced switch. */
+ profile_probability m_default_prob;
+
+ /* Number of phi nodes in the final bb (that we'll be replacing). */
+ int m_phi_count;
+
+ /* Constructors of new static arrays. */
+ vec<constructor_elt, va_gc> **m_constructors;
+
+ /* Array of default values, in the same order as phi nodes. */
+ tree *m_default_values;
+
+ /* Array of ssa names that are initialized with a value from a new static
+ array. */
+ tree *m_target_inbound_names;
+
+ /* Array of ssa names that are initialized with the default value if the
+ switch expression is out of range. */
+ tree *m_target_outbound_names;
+
+ /* VOP SSA_NAME. */
+ tree m_target_vop;
+
+ /* The first load statement that loads a temporary from a new static array.
+ */
+ gimple *m_arr_ref_first;
+
+ /* The last load statement that loads a temporary from a new static array. */
+ gimple *m_arr_ref_last;
+
+ /* String reason why the case wasn't a good candidate that is written to the
+ dump file, if there is one. */
+ const char *m_reason;
+
+ /* True if default case is not used for any value between range_min and
+ range_max inclusive. */
+ bool m_contiguous_range;
+
+ /* True if default case does not have the required shape for other case
+ labels. */
+ bool m_default_case_nonstandard;
+
+ /* Number of uniq labels for non-default edges. */
+ unsigned int m_uniq;
+
+ /* Count is number of non-default edges. */
+ unsigned int m_count;
+
+ /* True if CFG has been changed. */
+ bool m_cfg_altered;
+};
+
+void
+switch_decision_tree::reset_out_edges_aux (gswitch *swtch)
+{
+ basic_block bb = gimple_bb (swtch);
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ e->aux = (void *) 0;
+}
+
+/* Release CLUSTERS vector and destruct all dynamically allocated items. */
+
+inline void
+release_clusters (vec<cluster *> &clusters)
+{
+ for (unsigned i = 0; i < clusters.length (); i++)
+ delete clusters[i];
+ clusters.release ();
+}
+
+} // tree_switch_conversion namespace
+
+#endif // TREE_SWITCH_CONVERSION_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vector-builder.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vector-builder.h
new file mode 100644
index 0000000..2af6d75
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vector-builder.h
@@ -0,0 +1,145 @@
+/* A class for building vector tree constants.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_VECTOR_BUILDER_H
+#define GCC_TREE_VECTOR_BUILDER_H
+
+#include "vector-builder.h"
+
+/* This class is used to build VECTOR_CSTs from a sequence of elements.
+ See vector_builder for more details. */
+class tree_vector_builder : public vector_builder<tree, tree,
+ tree_vector_builder>
+{
+ typedef vector_builder<tree, tree, tree_vector_builder> parent;
+ friend class vector_builder<tree, tree, tree_vector_builder>;
+
+public:
+ tree_vector_builder () : m_type (0) {}
+ tree_vector_builder (tree, unsigned int, unsigned int);
+ tree build ();
+
+ tree type () const { return m_type; }
+
+ void new_vector (tree, unsigned int, unsigned int);
+
+private:
+ bool equal_p (const_tree, const_tree) const;
+ bool allow_steps_p () const;
+ bool integral_p (const_tree) const;
+ wide_int step (const_tree, const_tree) const;
+ tree apply_step (tree, unsigned int, const wide_int &) const;
+ bool can_elide_p (const_tree) const;
+ void note_representative (tree *, tree);
+
+ static poly_uint64 shape_nelts (const_tree t)
+ { return TYPE_VECTOR_SUBPARTS (t); }
+ static poly_uint64 nelts_of (const_tree t)
+ { return VECTOR_CST_NELTS (t); }
+ static unsigned int npatterns_of (const_tree t)
+ { return VECTOR_CST_NPATTERNS (t); }
+ static unsigned int nelts_per_pattern_of (const_tree t)
+ { return VECTOR_CST_NELTS_PER_PATTERN (t); }
+
+ tree m_type;
+};
+
+/* Create a new builder for a vector of type TYPE. Initially encode the
+ value as NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements
+ each. */
+
+inline
+tree_vector_builder::tree_vector_builder (tree type, unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ new_vector (type, npatterns, nelts_per_pattern);
+}
+
+/* Start building a new vector of type TYPE. Initially encode the value
+ as NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each. */
+
+inline void
+tree_vector_builder::new_vector (tree type, unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ m_type = type;
+ parent::new_vector (TYPE_VECTOR_SUBPARTS (type), npatterns,
+ nelts_per_pattern);
+}
+
+/* Return true if elements I1 and I2 are equal. */
+
+inline bool
+tree_vector_builder::equal_p (const_tree elt1, const_tree elt2) const
+{
+ return operand_equal_p (elt1, elt2, OEP_BITWISE);
+}
+
+/* Return true if a stepped representation is OK. We don't allow
+ linear series for anything other than integers, to avoid problems
+ with rounding. */
+
+inline bool
+tree_vector_builder::allow_steps_p () const
+{
+ return INTEGRAL_TYPE_P (TREE_TYPE (m_type));
+}
+
+/* Return true if ELT can be interpreted as an integer. */
+
+inline bool
+tree_vector_builder::integral_p (const_tree elt) const
+{
+ return TREE_CODE (elt) == INTEGER_CST;
+}
+
+/* Return the value of element ELT2 minus the value of element ELT1.
+ Both elements are known to be INTEGER_CSTs. */
+
+inline wide_int
+tree_vector_builder::step (const_tree elt1, const_tree elt2) const
+{
+ return wi::to_wide (elt2) - wi::to_wide (elt1);
+}
+
+/* Return true if we can drop element ELT, even if the retained elements
+ are different. Return false if this would mean losing overflow
+ information. */
+
+inline bool
+tree_vector_builder::can_elide_p (const_tree elt) const
+{
+ return !CONSTANT_CLASS_P (elt) || !TREE_OVERFLOW (elt);
+}
+
+/* Record that ELT2 is being elided, given that ELT1_PTR points to the last
+ encoded element for the containing pattern. */
+
+inline void
+tree_vector_builder::note_representative (tree *elt1_ptr, tree elt2)
+{
+ if (CONSTANT_CLASS_P (elt2) && TREE_OVERFLOW (elt2))
+ {
+ gcc_assert (operand_equal_p (*elt1_ptr, elt2, 0));
+ if (!TREE_OVERFLOW (elt2))
+ *elt1_ptr = elt2;
+ }
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vectorizer.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vectorizer.h
new file mode 100644
index 0000000..9cf2fb2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vectorizer.h
@@ -0,0 +1,2586 @@
+/* Vectorizer
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ Contributed by Dorit Naishlos <dorit@il.ibm.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_VECTORIZER_H
+#define GCC_TREE_VECTORIZER_H
+
+typedef class _stmt_vec_info *stmt_vec_info;
+typedef struct _slp_tree *slp_tree;
+
+#include "tree-data-ref.h"
+#include "tree-hash-traits.h"
+#include "target.h"
+#include "internal-fn.h"
+#include "tree-ssa-operands.h"
+#include "gimple-match.h"
+
+/* Used for naming of new temporaries. */
+enum vect_var_kind {
+ vect_simple_var,
+ vect_pointer_var,
+ vect_scalar_var,
+ vect_mask_var
+};
+
+/* Defines type of operation. */
+enum operation_type {
+ unary_op = 1,
+ binary_op,
+ ternary_op
+};
+
+/* Define type of available alignment support. */
+enum dr_alignment_support {
+ dr_unaligned_unsupported,
+ dr_unaligned_supported,
+ dr_explicit_realign,
+ dr_explicit_realign_optimized,
+ dr_aligned
+};
+
+/* Define type of def-use cross-iteration cycle. */
+enum vect_def_type {
+ vect_uninitialized_def = 0,
+ vect_constant_def = 1,
+ vect_external_def,
+ vect_internal_def,
+ vect_induction_def,
+ vect_reduction_def,
+ vect_double_reduction_def,
+ vect_nested_cycle,
+ vect_first_order_recurrence,
+ vect_unknown_def_type
+};
+
+/* Define operation type of linear/non-linear induction variable. */
+enum vect_induction_op_type {
+ vect_step_op_add = 0,
+ vect_step_op_neg,
+ vect_step_op_mul,
+ vect_step_op_shl,
+ vect_step_op_shr
+};
+
+/* Define type of reduction. */
+enum vect_reduction_type {
+ TREE_CODE_REDUCTION,
+ COND_REDUCTION,
+ INTEGER_INDUC_COND_REDUCTION,
+ CONST_COND_REDUCTION,
+
+ /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
+ to implement:
+
+ for (int i = 0; i < VF; ++i)
+ res = cond[i] ? val[i] : res; */
+ EXTRACT_LAST_REDUCTION,
+
+ /* Use a folding reduction within the loop to implement:
+
+ for (int i = 0; i < VF; ++i)
+ res = res OP val[i];
+
+ (with no reassocation). */
+ FOLD_LEFT_REDUCTION
+};
+
+#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
+ || ((D) == vect_double_reduction_def) \
+ || ((D) == vect_nested_cycle))
+
+/* Structure to encapsulate information about a group of like
+ instructions to be presented to the target cost model. */
+struct stmt_info_for_cost {
+ int count;
+ enum vect_cost_for_stmt kind;
+ enum vect_cost_model_location where;
+ stmt_vec_info stmt_info;
+ slp_tree node;
+ tree vectype;
+ int misalign;
+};
+
+typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
+
+/* Maps base addresses to an innermost_loop_behavior and the stmt it was
+ derived from that gives the maximum known alignment for that base. */
+typedef hash_map<tree_operand_hash,
+ std::pair<stmt_vec_info, innermost_loop_behavior *> >
+ vec_base_alignments;
+
+/* Represents elements [START, START + LENGTH) of cyclical array OPS*
+ (i.e. OPS repeated to give at least START + LENGTH elements) */
+struct vect_scalar_ops_slice
+{
+ tree op (unsigned int i) const;
+ bool all_same_p () const;
+
+ vec<tree> *ops;
+ unsigned int start;
+ unsigned int length;
+};
+
+/* Return element I of the slice. */
+inline tree
+vect_scalar_ops_slice::op (unsigned int i) const
+{
+ return (*ops)[(i + start) % ops->length ()];
+}
+
+/* Hash traits for vect_scalar_ops_slice. */
+struct vect_scalar_ops_slice_hash : typed_noop_remove<vect_scalar_ops_slice>
+{
+ typedef vect_scalar_ops_slice value_type;
+ typedef vect_scalar_ops_slice compare_type;
+
+ static const bool empty_zero_p = true;
+
+ static void mark_deleted (value_type &s) { s.length = ~0U; }
+ static void mark_empty (value_type &s) { s.length = 0; }
+ static bool is_deleted (const value_type &s) { return s.length == ~0U; }
+ static bool is_empty (const value_type &s) { return s.length == 0; }
+ static hashval_t hash (const value_type &);
+ static bool equal (const value_type &, const compare_type &);
+};
+
+/************************************************************************
+ SLP
+ ************************************************************************/
+typedef vec<std::pair<unsigned, unsigned> > lane_permutation_t;
+typedef auto_vec<std::pair<unsigned, unsigned>, 16> auto_lane_permutation_t;
+typedef vec<unsigned> load_permutation_t;
+typedef auto_vec<unsigned, 16> auto_load_permutation_t;
+
+/* A computation tree of an SLP instance. Each node corresponds to a group of
+ stmts to be packed in a SIMD stmt. */
+struct _slp_tree {
+ _slp_tree ();
+ ~_slp_tree ();
+
+ /* Nodes that contain def-stmts of this node statements operands. */
+ vec<slp_tree> children;
+
+ /* A group of scalar stmts to be vectorized together. */
+ vec<stmt_vec_info> stmts;
+ /* A group of scalar operands to be vectorized together. */
+ vec<tree> ops;
+ /* The representative that should be used for analysis and
+ code generation. */
+ stmt_vec_info representative;
+
+ /* Load permutation relative to the stores, NULL if there is no
+ permutation. */
+ load_permutation_t load_permutation;
+ /* Lane permutation of the operands scalar lanes encoded as pairs
+ of { operand number, lane number }. The number of elements
+ denotes the number of output lanes. */
+ lane_permutation_t lane_permutation;
+
+ tree vectype;
+ /* Vectorized stmt/s. */
+ vec<gimple *> vec_stmts;
+ vec<tree> vec_defs;
+ /* Number of vector stmts that are created to replace the group of scalar
+ stmts. It is calculated during the transformation phase as the number of
+ scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
+ divided by vector size. */
+ unsigned int vec_stmts_size;
+
+ /* Reference count in the SLP graph. */
+ unsigned int refcnt;
+ /* The maximum number of vector elements for the subtree rooted
+ at this node. */
+ poly_uint64 max_nunits;
+ /* The DEF type of this node. */
+ enum vect_def_type def_type;
+ /* The number of scalar lanes produced by this node. */
+ unsigned int lanes;
+ /* The operation of this node. */
+ enum tree_code code;
+
+ int vertex;
+
+ /* If not NULL this is a cached failed SLP discovery attempt with
+ the lanes that failed during SLP discovery as 'false'. This is
+ a copy of the matches array. */
+ bool *failed;
+
+ /* Allocate from slp_tree_pool. */
+ static void *operator new (size_t);
+
+ /* Return memory to slp_tree_pool. */
+ static void operator delete (void *, size_t);
+
+ /* Linked list of nodes to release when we free the slp_tree_pool. */
+ slp_tree next_node;
+ slp_tree prev_node;
+};
+
+/* The enum describes the type of operations that an SLP instance
+ can perform. */
+
+enum slp_instance_kind {
+ slp_inst_kind_store,
+ slp_inst_kind_reduc_group,
+ slp_inst_kind_reduc_chain,
+ slp_inst_kind_bb_reduc,
+ slp_inst_kind_ctor
+};
+
+/* SLP instance is a sequence of stmts in a loop that can be packed into
+ SIMD stmts. */
+typedef class _slp_instance {
+public:
+ /* The root of SLP tree. */
+ slp_tree root;
+
+ /* For vector constructors, the constructor stmt that the SLP tree is built
+ from, NULL otherwise. */
+ vec<stmt_vec_info> root_stmts;
+
+ /* The unrolling factor required to vectorized this SLP instance. */
+ poly_uint64 unrolling_factor;
+
+ /* The group of nodes that contain loads of this SLP instance. */
+ vec<slp_tree> loads;
+
+ /* The SLP node containing the reduction PHIs. */
+ slp_tree reduc_phis;
+
+ /* Vector cost of this entry to the SLP graph. */
+ stmt_vector_for_cost cost_vec;
+
+ /* If this instance is the main entry of a subgraph the set of
+ entries into the same subgraph, including itself. */
+ vec<_slp_instance *> subgraph_entries;
+
+ /* The type of operation the SLP instance is performing. */
+ slp_instance_kind kind;
+
+ dump_user_location_t location () const;
+} *slp_instance;
+
+
+/* Access Functions. */
+#define SLP_INSTANCE_TREE(S) (S)->root
+#define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
+#define SLP_INSTANCE_LOADS(S) (S)->loads
+#define SLP_INSTANCE_ROOT_STMTS(S) (S)->root_stmts
+#define SLP_INSTANCE_KIND(S) (S)->kind
+
+#define SLP_TREE_CHILDREN(S) (S)->children
+#define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
+#define SLP_TREE_SCALAR_OPS(S) (S)->ops
+#define SLP_TREE_REF_COUNT(S) (S)->refcnt
+#define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
+#define SLP_TREE_VEC_DEFS(S) (S)->vec_defs
+#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
+#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
+#define SLP_TREE_LANE_PERMUTATION(S) (S)->lane_permutation
+#define SLP_TREE_DEF_TYPE(S) (S)->def_type
+#define SLP_TREE_VECTYPE(S) (S)->vectype
+#define SLP_TREE_REPRESENTATIVE(S) (S)->representative
+#define SLP_TREE_LANES(S) (S)->lanes
+#define SLP_TREE_CODE(S) (S)->code
+
+/* Key for map that records association between
+ scalar conditions and corresponding loop mask, and
+ is populated by vect_record_loop_mask. */
+
+struct scalar_cond_masked_key
+{
+ scalar_cond_masked_key (tree t, unsigned ncopies_)
+ : ncopies (ncopies_)
+ {
+ get_cond_ops_from_tree (t);
+ }
+
+ void get_cond_ops_from_tree (tree);
+
+ unsigned ncopies;
+ bool inverted_p;
+ tree_code code;
+ tree op0;
+ tree op1;
+};
+
+template<>
+struct default_hash_traits<scalar_cond_masked_key>
+{
+ typedef scalar_cond_masked_key compare_type;
+ typedef scalar_cond_masked_key value_type;
+
+ static inline hashval_t
+ hash (value_type v)
+ {
+ inchash::hash h;
+ h.add_int (v.code);
+ inchash::add_expr (v.op0, h, 0);
+ inchash::add_expr (v.op1, h, 0);
+ h.add_int (v.ncopies);
+ h.add_flag (v.inverted_p);
+ return h.end ();
+ }
+
+ static inline bool
+ equal (value_type existing, value_type candidate)
+ {
+ return (existing.ncopies == candidate.ncopies
+ && existing.code == candidate.code
+ && existing.inverted_p == candidate.inverted_p
+ && operand_equal_p (existing.op0, candidate.op0, 0)
+ && operand_equal_p (existing.op1, candidate.op1, 0));
+ }
+
+ static const bool empty_zero_p = true;
+
+ static inline void
+ mark_empty (value_type &v)
+ {
+ v.ncopies = 0;
+ v.inverted_p = false;
+ }
+
+ static inline bool
+ is_empty (value_type v)
+ {
+ return v.ncopies == 0;
+ }
+
+ static inline void mark_deleted (value_type &) {}
+
+ static inline bool is_deleted (const value_type &)
+ {
+ return false;
+ }
+
+ static inline void remove (value_type &) {}
+};
+
+typedef hash_set<scalar_cond_masked_key> scalar_cond_masked_set_type;
+
+/* Key and map that records association between vector conditions and
+ corresponding loop mask, and is populated by prepare_vec_mask. */
+
+typedef pair_hash<tree_operand_hash, tree_operand_hash> tree_cond_mask_hash;
+typedef hash_set<tree_cond_mask_hash> vec_cond_masked_set_type;
+
+/* Describes two objects whose addresses must be unequal for the vectorized
+ loop to be valid. */
+typedef std::pair<tree, tree> vec_object_pair;
+
+/* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
+ UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
+class vec_lower_bound {
+public:
+ vec_lower_bound () {}
+ vec_lower_bound (tree e, bool u, poly_uint64 m)
+ : expr (e), unsigned_p (u), min_value (m) {}
+
+ tree expr;
+ bool unsigned_p;
+ poly_uint64 min_value;
+};
+
+/* Vectorizer state shared between different analyses like vector sizes
+ of the same CFG region. */
+class vec_info_shared {
+public:
+ vec_info_shared();
+ ~vec_info_shared();
+
+ void save_datarefs();
+ void check_datarefs();
+
+ /* The number of scalar stmts. */
+ unsigned n_stmts;
+
+ /* All data references. Freed by free_data_refs, so not an auto_vec. */
+ vec<data_reference_p> datarefs;
+ vec<data_reference> datarefs_copy;
+
+ /* The loop nest in which the data dependences are computed. */
+ auto_vec<loop_p> loop_nest;
+
+ /* All data dependences. Freed by free_dependence_relations, so not
+ an auto_vec. */
+ vec<ddr_p> ddrs;
+};
+
+/* Vectorizer state common between loop and basic-block vectorization. */
+class vec_info {
+public:
+ typedef hash_set<int_hash<machine_mode, E_VOIDmode, E_BLKmode> > mode_set;
+ enum vec_kind { bb, loop };
+
+ vec_info (vec_kind, vec_info_shared *);
+ ~vec_info ();
+
+ stmt_vec_info add_stmt (gimple *);
+ stmt_vec_info add_pattern_stmt (gimple *, stmt_vec_info);
+ stmt_vec_info lookup_stmt (gimple *);
+ stmt_vec_info lookup_def (tree);
+ stmt_vec_info lookup_single_use (tree);
+ class dr_vec_info *lookup_dr (data_reference *);
+ void move_dr (stmt_vec_info, stmt_vec_info);
+ void remove_stmt (stmt_vec_info);
+ void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
+ void insert_on_entry (stmt_vec_info, gimple *);
+ void insert_seq_on_entry (stmt_vec_info, gimple_seq);
+
+ /* The type of vectorization. */
+ vec_kind kind;
+
+ /* Shared vectorizer state. */
+ vec_info_shared *shared;
+
+ /* The mapping of GIMPLE UID to stmt_vec_info. */
+ vec<stmt_vec_info> stmt_vec_infos;
+ /* Whether the above mapping is complete. */
+ bool stmt_vec_info_ro;
+
+ /* Whether we've done a transform we think OK to not update virtual
+ SSA form. */
+ bool any_known_not_updated_vssa;
+
+ /* The SLP graph. */
+ auto_vec<slp_instance> slp_instances;
+
+ /* Maps base addresses to an innermost_loop_behavior that gives the maximum
+ known alignment for that base. */
+ vec_base_alignments base_alignments;
+
+ /* All interleaving chains of stores, represented by the first
+ stmt in the chain. */
+ auto_vec<stmt_vec_info> grouped_stores;
+
+ /* The set of vector modes used in the vectorized region. */
+ mode_set used_vector_modes;
+
+ /* The argument we should pass to related_vector_mode when looking up
+ the vector mode for a scalar mode, or VOIDmode if we haven't yet
+ made any decisions about which vector modes to use. */
+ machine_mode vector_mode;
+
+private:
+ stmt_vec_info new_stmt_vec_info (gimple *stmt);
+ void set_vinfo_for_stmt (gimple *, stmt_vec_info, bool = true);
+ void free_stmt_vec_infos ();
+ void free_stmt_vec_info (stmt_vec_info);
+};
+
+class _loop_vec_info;
+class _bb_vec_info;
+
+template<>
+template<>
+inline bool
+is_a_helper <_loop_vec_info *>::test (vec_info *i)
+{
+ return i->kind == vec_info::loop;
+}
+
+template<>
+template<>
+inline bool
+is_a_helper <_bb_vec_info *>::test (vec_info *i)
+{
+ return i->kind == vec_info::bb;
+}
+
+/* In general, we can divide the vector statements in a vectorized loop
+ into related groups ("rgroups") and say that for each rgroup there is
+ some nS such that the rgroup operates on nS values from one scalar
+ iteration followed by nS values from the next. That is, if VF is the
+ vectorization factor of the loop, the rgroup operates on a sequence:
+
+ (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
+
+ where (i,j) represents a scalar value with index j in a scalar
+ iteration with index i.
+
+ [ We use the term "rgroup" to emphasise that this grouping isn't
+ necessarily the same as the grouping of statements used elsewhere.
+ For example, if we implement a group of scalar loads using gather
+ loads, we'll use a separate gather load for each scalar load, and
+ thus each gather load will belong to its own rgroup. ]
+
+ In general this sequence will occupy nV vectors concatenated
+ together. If these vectors have nL lanes each, the total number
+ of scalar values N is given by:
+
+ N = nS * VF = nV * nL
+
+ None of nS, VF, nV and nL are required to be a power of 2. nS and nV
+ are compile-time constants but VF and nL can be variable (if the target
+ supports variable-length vectors).
+
+ In classical vectorization, each iteration of the vector loop would
+ handle exactly VF iterations of the original scalar loop. However,
+ in vector loops that are able to operate on partial vectors, a
+ particular iteration of the vector loop might handle fewer than VF
+ iterations of the scalar loop. The vector lanes that correspond to
+ iterations of the scalar loop are said to be "active" and the other
+ lanes are said to be "inactive".
+
+ In such vector loops, many rgroups need to be controlled to ensure
+ that they have no effect for the inactive lanes. Conceptually, each
+ such rgroup needs a sequence of booleans in the same order as above,
+ but with each (i,j) replaced by a boolean that indicates whether
+ iteration i is active. This sequence occupies nV vector controls
+ that again have nL lanes each. Thus the control sequence as a whole
+ consists of VF independent booleans that are each repeated nS times.
+
+ Taking mask-based approach as a partially-populated vectors example.
+ We make the simplifying assumption that if a sequence of nV masks is
+ suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
+ VIEW_CONVERTing it. This holds for all current targets that support
+ fully-masked loops. For example, suppose the scalar loop is:
+
+ float *f;
+ double *d;
+ for (int i = 0; i < n; ++i)
+ {
+ f[i * 2 + 0] += 1.0f;
+ f[i * 2 + 1] += 2.0f;
+ d[i] += 3.0;
+ }
+
+ and suppose that vectors have 256 bits. The vectorized f accesses
+ will belong to one rgroup and the vectorized d access to another:
+
+ f rgroup: nS = 2, nV = 1, nL = 8
+ d rgroup: nS = 1, nV = 1, nL = 4
+ VF = 4
+
+ [ In this simple example the rgroups do correspond to the normal
+ SLP grouping scheme. ]
+
+ If only the first three lanes are active, the masks we need are:
+
+ f rgroup: 1 1 | 1 1 | 1 1 | 0 0
+ d rgroup: 1 | 1 | 1 | 0
+
+ Here we can use a mask calculated for f's rgroup for d's, but not
+ vice versa.
+
+ Thus for each value of nV, it is enough to provide nV masks, with the
+ mask being calculated based on the highest nL (or, equivalently, based
+ on the highest nS) required by any rgroup with that nV. We therefore
+ represent the entire collection of masks as a two-level table, with the
+ first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
+ the second being indexed by the mask index 0 <= i < nV. */
+
+/* The controls (like masks or lengths) needed by rgroups with nV vectors,
+ according to the description above. */
+struct rgroup_controls {
+ /* The largest nS for all rgroups that use these controls. */
+ unsigned int max_nscalars_per_iter;
+
+ /* For the largest nS recorded above, the loop controls divide each scalar
+ into FACTOR equal-sized pieces. This is useful if we need to split
+ element-based accesses into byte-based accesses. */
+ unsigned int factor;
+
+ /* This is a vector type with MAX_NSCALARS_PER_ITER * VF / nV elements.
+ For mask-based controls, it is the type of the masks in CONTROLS.
+ For length-based controls, it can be any vector type that has the
+ specified number of elements; the type of the elements doesn't matter. */
+ tree type;
+
+ /* A vector of nV controls, in iteration order. */
+ vec<tree> controls;
+
+ /* In case of len_load and len_store with a bias there is only one
+ rgroup. This holds the adjusted loop length for the this rgroup. */
+ tree bias_adjusted_ctrl;
+};
+
+typedef auto_vec<rgroup_controls> vec_loop_masks;
+
+typedef auto_vec<rgroup_controls> vec_loop_lens;
+
+typedef auto_vec<std::pair<data_reference*, tree> > drs_init_vec;
+
+/* Information about a reduction accumulator from the main loop that could
+ conceivably be reused as the input to a reduction in an epilogue loop. */
+struct vect_reusable_accumulator {
+ /* The final value of the accumulator, which forms the input to the
+ reduction operation. */
+ tree reduc_input;
+
+ /* The stmt_vec_info that describes the reduction (i.e. the one for
+ which is_reduc_info is true). */
+ stmt_vec_info reduc_info;
+};
+
+/*-----------------------------------------------------------------*/
+/* Info on vectorized loops. */
+/*-----------------------------------------------------------------*/
+typedef class _loop_vec_info : public vec_info {
+public:
+ _loop_vec_info (class loop *, vec_info_shared *);
+ ~_loop_vec_info ();
+
+ /* The loop to which this info struct refers to. */
+ class loop *loop;
+
+ /* The loop basic blocks. */
+ basic_block *bbs;
+
+ /* Number of latch executions. */
+ tree num_itersm1;
+ /* Number of iterations. */
+ tree num_iters;
+ /* Number of iterations of the original loop. */
+ tree num_iters_unchanged;
+ /* Condition under which this loop is analyzed and versioned. */
+ tree num_iters_assumptions;
+
+ /* The cost of the vector code. */
+ class vector_costs *vector_costs;
+
+ /* The cost of the scalar code. */
+ class vector_costs *scalar_costs;
+
+ /* Threshold of number of iterations below which vectorization will not be
+ performed. It is calculated from MIN_PROFITABLE_ITERS and
+ param_min_vect_loop_bound. */
+ unsigned int th;
+
+ /* When applying loop versioning, the vector form should only be used
+ if the number of scalar iterations is >= this value, on top of all
+ the other requirements. Ignored when loop versioning is not being
+ used. */
+ poly_uint64 versioning_threshold;
+
+ /* Unrolling factor */
+ poly_uint64 vectorization_factor;
+
+ /* If this loop is an epilogue loop whose main loop can be skipped,
+ MAIN_LOOP_EDGE is the edge from the main loop to this loop's
+ preheader. SKIP_MAIN_LOOP_EDGE is then the edge that skips the
+ main loop and goes straight to this loop's preheader.
+
+ Both fields are null otherwise. */
+ edge main_loop_edge;
+ edge skip_main_loop_edge;
+
+ /* If this loop is an epilogue loop that might be skipped after executing
+ the main loop, this edge is the one that skips the epilogue. */
+ edge skip_this_loop_edge;
+
+ /* The vectorized form of a standard reduction replaces the original
+ scalar code's final result (a loop-closed SSA PHI) with the result
+ of a vector-to-scalar reduction operation. After vectorization,
+ this variable maps these vector-to-scalar results to information
+ about the reductions that generated them. */
+ hash_map<tree, vect_reusable_accumulator> reusable_accumulators;
+
+ /* The number of times that the target suggested we unroll the vector loop
+ in order to promote more ILP. This value will be used to re-analyze the
+ loop for vectorization and if successful the value will be folded into
+ vectorization_factor (and therefore exactly divides
+ vectorization_factor). */
+ unsigned int suggested_unroll_factor;
+
+ /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
+ if there is no particular limit. */
+ unsigned HOST_WIDE_INT max_vectorization_factor;
+
+ /* The masks that a fully-masked loop should use to avoid operating
+ on inactive scalars. */
+ vec_loop_masks masks;
+
+ /* The lengths that a loop with length should use to avoid operating
+ on inactive scalars. */
+ vec_loop_lens lens;
+
+ /* Set of scalar conditions that have loop mask applied. */
+ scalar_cond_masked_set_type scalar_cond_masked_set;
+
+ /* Set of vector conditions that have loop mask applied. */
+ vec_cond_masked_set_type vec_cond_masked_set;
+
+ /* If we are using a loop mask to align memory addresses, this variable
+ contains the number of vector elements that we should skip in the
+ first iteration of the vector loop (i.e. the number of leading
+ elements that should be false in the first mask). */
+ tree mask_skip_niters;
+
+ /* The type that the loop control IV should be converted to before
+ testing which of the VF scalars are active and inactive.
+ Only meaningful if LOOP_VINFO_USING_PARTIAL_VECTORS_P. */
+ tree rgroup_compare_type;
+
+ /* For #pragma omp simd if (x) loops the x expression. If constant 0,
+ the loop should not be vectorized, if constant non-zero, simd_if_cond
+ shouldn't be set and loop vectorized normally, if SSA_NAME, the loop
+ should be versioned on that condition, using scalar loop if the condition
+ is false and vectorized loop otherwise. */
+ tree simd_if_cond;
+
+ /* The type that the vector loop control IV should have when
+ LOOP_VINFO_USING_PARTIAL_VECTORS_P is true. */
+ tree rgroup_iv_type;
+
+ /* Unknown DRs according to which loop was peeled. */
+ class dr_vec_info *unaligned_dr;
+
+ /* peeling_for_alignment indicates whether peeling for alignment will take
+ place, and what the peeling factor should be:
+ peeling_for_alignment = X means:
+ If X=0: Peeling for alignment will not be applied.
+ If X>0: Peel first X iterations.
+ If X=-1: Generate a runtime test to calculate the number of iterations
+ to be peeled, using the dataref recorded in the field
+ unaligned_dr. */
+ int peeling_for_alignment;
+
+ /* The mask used to check the alignment of pointers or arrays. */
+ int ptr_mask;
+
+ /* Data Dependence Relations defining address ranges that are candidates
+ for a run-time aliasing check. */
+ auto_vec<ddr_p> may_alias_ddrs;
+
+ /* Data Dependence Relations defining address ranges together with segment
+ lengths from which the run-time aliasing check is built. */
+ auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
+
+ /* Check that the addresses of each pair of objects is unequal. */
+ auto_vec<vec_object_pair> check_unequal_addrs;
+
+ /* List of values that are required to be nonzero. This is used to check
+ whether things like "x[i * n] += 1;" are safe and eventually gets added
+ to the checks for lower bounds below. */
+ auto_vec<tree> check_nonzero;
+
+ /* List of values that need to be checked for a minimum value. */
+ auto_vec<vec_lower_bound> lower_bounds;
+
+ /* Statements in the loop that have data references that are candidates for a
+ runtime (loop versioning) misalignment check. */
+ auto_vec<stmt_vec_info> may_misalign_stmts;
+
+ /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
+ auto_vec<stmt_vec_info> reductions;
+
+ /* All reduction chains in the loop, represented by the first
+ stmt in the chain. */
+ auto_vec<stmt_vec_info> reduction_chains;
+
+ /* Cost vector for a single scalar iteration. */
+ auto_vec<stmt_info_for_cost> scalar_cost_vec;
+
+ /* Map of IV base/step expressions to inserted name in the preheader. */
+ hash_map<tree_operand_hash, tree> *ivexpr_map;
+
+ /* Map of OpenMP "omp simd array" scan variables to corresponding
+ rhs of the store of the initializer. */
+ hash_map<tree, tree> *scan_map;
+
+ /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
+ applied to the loop, i.e., no unrolling is needed, this is 1. */
+ poly_uint64 slp_unrolling_factor;
+
+ /* The factor used to over weight those statements in an inner loop
+ relative to the loop being vectorized. */
+ unsigned int inner_loop_cost_factor;
+
+ /* Is the loop vectorizable? */
+ bool vectorizable;
+
+ /* Records whether we still have the option of vectorizing this loop
+ using partially-populated vectors; in other words, whether it is
+ still possible for one iteration of the vector loop to handle
+ fewer than VF scalars. */
+ bool can_use_partial_vectors_p;
+
+ /* True if we've decided to use partially-populated vectors, so that
+ the vector loop can handle fewer than VF scalars. */
+ bool using_partial_vectors_p;
+
+ /* True if we've decided to use partially-populated vectors for the
+ epilogue of loop. */
+ bool epil_using_partial_vectors_p;
+
+ /* The bias for len_load and len_store. For now, only 0 and -1 are
+ supported. -1 must be used when a backend does not support
+ len_load/len_store with a length of zero. */
+ signed char partial_load_store_bias;
+
+ /* When we have grouped data accesses with gaps, we may introduce invalid
+ memory accesses. We peel the last iteration of the loop to prevent
+ this. */
+ bool peeling_for_gaps;
+
+ /* When the number of iterations is not a multiple of the vector size
+ we need to peel off iterations at the end to form an epilogue loop. */
+ bool peeling_for_niter;
+
+ /* True if there are no loop carried data dependencies in the loop.
+ If loop->safelen <= 1, then this is always true, either the loop
+ didn't have any loop carried data dependencies, or the loop is being
+ vectorized guarded with some runtime alias checks, or couldn't
+ be vectorized at all, but then this field shouldn't be used.
+ For loop->safelen >= 2, the user has asserted that there are no
+ backward dependencies, but there still could be loop carried forward
+ dependencies in such loops. This flag will be false if normal
+ vectorizer data dependency analysis would fail or require versioning
+ for alias, but because of loop->safelen >= 2 it has been vectorized
+ even without versioning for alias. E.g. in:
+ #pragma omp simd
+ for (int i = 0; i < m; i++)
+ a[i] = a[i + k] * c;
+ (or #pragma simd or #pragma ivdep) we can vectorize this and it will
+ DTRT even for k > 0 && k < m, but without safelen we would not
+ vectorize this, so this field would be false. */
+ bool no_data_dependencies;
+
+ /* Mark loops having masked stores. */
+ bool has_mask_store;
+
+ /* Queued scaling factor for the scalar loop. */
+ profile_probability scalar_loop_scaling;
+
+ /* If if-conversion versioned this loop before conversion, this is the
+ loop version without if-conversion. */
+ class loop *scalar_loop;
+
+ /* For loops being epilogues of already vectorized loops
+ this points to the original vectorized loop. Otherwise NULL. */
+ _loop_vec_info *orig_loop_info;
+
+ /* Used to store loop_vec_infos of epilogues of this loop during
+ analysis. */
+ vec<_loop_vec_info *> epilogue_vinfos;
+
+} *loop_vec_info;
+
+/* Access Functions. */
+#define LOOP_VINFO_LOOP(L) (L)->loop
+#define LOOP_VINFO_BBS(L) (L)->bbs
+#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
+#define LOOP_VINFO_NITERS(L) (L)->num_iters
+/* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
+ prologue peeling retain total unchanged scalar loop iterations for
+ cost model. */
+#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
+#define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
+#define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
+#define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
+#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
+#define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P(L) (L)->can_use_partial_vectors_p
+#define LOOP_VINFO_USING_PARTIAL_VECTORS_P(L) (L)->using_partial_vectors_p
+#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P(L) \
+ (L)->epil_using_partial_vectors_p
+#define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS(L) (L)->partial_load_store_bias
+#define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
+#define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
+#define LOOP_VINFO_MASKS(L) (L)->masks
+#define LOOP_VINFO_LENS(L) (L)->lens
+#define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
+#define LOOP_VINFO_RGROUP_COMPARE_TYPE(L) (L)->rgroup_compare_type
+#define LOOP_VINFO_RGROUP_IV_TYPE(L) (L)->rgroup_iv_type
+#define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
+#define LOOP_VINFO_N_STMTS(L) (L)->shared->n_stmts
+#define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
+#define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
+#define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
+#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
+#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
+#define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
+#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
+#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
+#define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
+#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
+#define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
+#define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
+#define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
+#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
+#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
+#define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
+#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
+#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
+#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
+#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
+#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
+#define LOOP_VINFO_SCALAR_LOOP_SCALING(L) (L)->scalar_loop_scaling
+#define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
+#define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
+#define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
+#define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond
+#define LOOP_VINFO_INNER_LOOP_COST_FACTOR(L) (L)->inner_loop_cost_factor
+
+#define LOOP_VINFO_FULLY_MASKED_P(L) \
+ (LOOP_VINFO_USING_PARTIAL_VECTORS_P (L) \
+ && !LOOP_VINFO_MASKS (L).is_empty ())
+
+#define LOOP_VINFO_FULLY_WITH_LENGTH_P(L) \
+ (LOOP_VINFO_USING_PARTIAL_VECTORS_P (L) \
+ && !LOOP_VINFO_LENS (L).is_empty ())
+
+#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
+ ((L)->may_misalign_stmts.length () > 0)
+#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
+ ((L)->comp_alias_ddrs.length () > 0 \
+ || (L)->check_unequal_addrs.length () > 0 \
+ || (L)->lower_bounds.length () > 0)
+#define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
+ (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
+#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \
+ (LOOP_VINFO_SIMD_IF_COND (L))
+#define LOOP_REQUIRES_VERSIONING(L) \
+ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
+ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
+ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \
+ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L))
+
+#define LOOP_VINFO_NITERS_KNOWN_P(L) \
+ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
+
+#define LOOP_VINFO_EPILOGUE_P(L) \
+ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
+
+#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
+ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
+
+/* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
+ value signifies success, and a NULL value signifies failure, supporting
+ propagating an opt_problem * describing the failure back up the call
+ stack. */
+typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
+
+inline loop_vec_info
+loop_vec_info_for_loop (class loop *loop)
+{
+ return (loop_vec_info) loop->aux;
+}
+
+struct slp_root
+{
+ slp_root (slp_instance_kind kind_, vec<stmt_vec_info> stmts_,
+ vec<stmt_vec_info> roots_)
+ : kind(kind_), stmts(stmts_), roots(roots_) {}
+ slp_instance_kind kind;
+ vec<stmt_vec_info> stmts;
+ vec<stmt_vec_info> roots;
+};
+
+typedef class _bb_vec_info : public vec_info
+{
+public:
+ _bb_vec_info (vec<basic_block> bbs, vec_info_shared *);
+ ~_bb_vec_info ();
+
+ /* The region we are operating on. bbs[0] is the entry, excluding
+ its PHI nodes. In the future we might want to track an explicit
+ entry edge to cover bbs[0] PHI nodes and have a region entry
+ insert location. */
+ vec<basic_block> bbs;
+
+ vec<slp_root> roots;
+} *bb_vec_info;
+
+#define BB_VINFO_BB(B) (B)->bb
+#define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
+#define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
+#define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
+#define BB_VINFO_DDRS(B) (B)->shared->ddrs
+
+/*-----------------------------------------------------------------*/
+/* Info on vectorized defs. */
+/*-----------------------------------------------------------------*/
+enum stmt_vec_info_type {
+ undef_vec_info_type = 0,
+ load_vec_info_type,
+ store_vec_info_type,
+ shift_vec_info_type,
+ op_vec_info_type,
+ call_vec_info_type,
+ call_simd_clone_vec_info_type,
+ assignment_vec_info_type,
+ condition_vec_info_type,
+ comparison_vec_info_type,
+ reduc_vec_info_type,
+ induc_vec_info_type,
+ type_promotion_vec_info_type,
+ type_demotion_vec_info_type,
+ type_conversion_vec_info_type,
+ cycle_phi_info_type,
+ lc_phi_info_type,
+ phi_info_type,
+ recurr_info_type,
+ loop_exit_ctrl_vec_info_type
+};
+
+/* Indicates whether/how a variable is used in the scope of loop/basic
+ block. */
+enum vect_relevant {
+ vect_unused_in_scope = 0,
+
+ /* The def is only used outside the loop. */
+ vect_used_only_live,
+ /* The def is in the inner loop, and the use is in the outer loop, and the
+ use is a reduction stmt. */
+ vect_used_in_outer_by_reduction,
+ /* The def is in the inner loop, and the use is in the outer loop (and is
+ not part of reduction). */
+ vect_used_in_outer,
+
+ /* defs that feed computations that end up (only) in a reduction. These
+ defs may be used by non-reduction stmts, but eventually, any
+ computations/values that are affected by these defs are used to compute
+ a reduction (i.e. don't get stored to memory, for example). We use this
+ to identify computations that we can change the order in which they are
+ computed. */
+ vect_used_by_reduction,
+
+ vect_used_in_scope
+};
+
+/* The type of vectorization that can be applied to the stmt: regular loop-based
+ vectorization; pure SLP - the stmt is a part of SLP instances and does not
+ have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
+ a part of SLP instance and also must be loop-based vectorized, since it has
+ uses outside SLP sequences.
+
+ In the loop context the meanings of pure and hybrid SLP are slightly
+ different. By saying that pure SLP is applied to the loop, we mean that we
+ exploit only intra-iteration parallelism in the loop; i.e., the loop can be
+ vectorized without doing any conceptual unrolling, cause we don't pack
+ together stmts from different iterations, only within a single iteration.
+ Loop hybrid SLP means that we exploit both intra-iteration and
+ inter-iteration parallelism (e.g., number of elements in the vector is 4
+ and the slp-group-size is 2, in which case we don't have enough parallelism
+ within an iteration, so we obtain the rest of the parallelism from subsequent
+ iterations by unrolling the loop by 2). */
+enum slp_vect_type {
+ loop_vect = 0,
+ pure_slp,
+ hybrid
+};
+
+/* Says whether a statement is a load, a store of a vectorized statement
+ result, or a store of an invariant value. */
+enum vec_load_store_type {
+ VLS_LOAD,
+ VLS_STORE,
+ VLS_STORE_INVARIANT
+};
+
+/* Describes how we're going to vectorize an individual load or store,
+ or a group of loads or stores. */
+enum vect_memory_access_type {
+ /* An access to an invariant address. This is used only for loads. */
+ VMAT_INVARIANT,
+
+ /* A simple contiguous access. */
+ VMAT_CONTIGUOUS,
+
+ /* A contiguous access that goes down in memory rather than up,
+ with no additional permutation. This is used only for stores
+ of invariants. */
+ VMAT_CONTIGUOUS_DOWN,
+
+ /* A simple contiguous access in which the elements need to be permuted
+ after loading or before storing. Only used for loop vectorization;
+ SLP uses separate permutes. */
+ VMAT_CONTIGUOUS_PERMUTE,
+
+ /* A simple contiguous access in which the elements need to be reversed
+ after loading or before storing. */
+ VMAT_CONTIGUOUS_REVERSE,
+
+ /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
+ VMAT_LOAD_STORE_LANES,
+
+ /* An access in which each scalar element is loaded or stored
+ individually. */
+ VMAT_ELEMENTWISE,
+
+ /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
+ SLP accesses. Each unrolled iteration uses a contiguous load
+ or store for the whole group, but the groups from separate iterations
+ are combined in the same way as for VMAT_ELEMENTWISE. */
+ VMAT_STRIDED_SLP,
+
+ /* The access uses gather loads or scatter stores. */
+ VMAT_GATHER_SCATTER
+};
+
+class dr_vec_info {
+public:
+ /* The data reference itself. */
+ data_reference *dr;
+ /* The statement that contains the data reference. */
+ stmt_vec_info stmt;
+ /* The analysis group this DR belongs to when doing BB vectorization.
+ DRs of the same group belong to the same conditional execution context. */
+ unsigned group;
+ /* The misalignment in bytes of the reference, or -1 if not known. */
+ int misalignment;
+ /* The byte alignment that we'd ideally like the reference to have,
+ and the value that misalignment is measured against. */
+ poly_uint64 target_alignment;
+ /* If true the alignment of base_decl needs to be increased. */
+ bool base_misaligned;
+ tree base_decl;
+
+ /* Stores current vectorized loop's offset. To be added to the DR's
+ offset to calculate current offset of data reference. */
+ tree offset;
+};
+
+typedef struct data_reference *dr_p;
+
+class _stmt_vec_info {
+public:
+
+ enum stmt_vec_info_type type;
+
+ /* Indicates whether this stmts is part of a computation whose result is
+ used outside the loop. */
+ bool live;
+
+ /* Stmt is part of some pattern (computation idiom) */
+ bool in_pattern_p;
+
+ /* True if the statement was created during pattern recognition as
+ part of the replacement for RELATED_STMT. This implies that the
+ statement isn't part of any basic block, although for convenience
+ its gimple_bb is the same as for RELATED_STMT. */
+ bool pattern_stmt_p;
+
+ /* Is this statement vectorizable or should it be skipped in (partial)
+ vectorization. */
+ bool vectorizable;
+
+ /* The stmt to which this info struct refers to. */
+ gimple *stmt;
+
+ /* The vector type to be used for the LHS of this statement. */
+ tree vectype;
+
+ /* The vectorized stmts. */
+ vec<gimple *> vec_stmts;
+
+ /* The following is relevant only for stmts that contain a non-scalar
+ data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
+ at most one such data-ref. */
+
+ dr_vec_info dr_aux;
+
+ /* Information about the data-ref relative to this loop
+ nest (the loop that is being considered for vectorization). */
+ innermost_loop_behavior dr_wrt_vec_loop;
+
+ /* For loop PHI nodes, the base and evolution part of it. This makes sure
+ this information is still available in vect_update_ivs_after_vectorizer
+ where we may not be able to re-analyze the PHI nodes evolution as
+ peeling for the prologue loop can make it unanalyzable. The evolution
+ part is still correct after peeling, but the base may have changed from
+ the version here. */
+ tree loop_phi_evolution_base_unchanged;
+ tree loop_phi_evolution_part;
+ enum vect_induction_op_type loop_phi_evolution_type;
+
+ /* Used for various bookkeeping purposes, generally holding a pointer to
+ some other stmt S that is in some way "related" to this stmt.
+ Current use of this field is:
+ If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
+ true): S is the "pattern stmt" that represents (and replaces) the
+ sequence of stmts that constitutes the pattern. Similarly, the
+ related_stmt of the "pattern stmt" points back to this stmt (which is
+ the last stmt in the original sequence of stmts that constitutes the
+ pattern). */
+ stmt_vec_info related_stmt;
+
+ /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
+ The sequence is attached to the original statement rather than the
+ pattern statement. */
+ gimple_seq pattern_def_seq;
+
+ /* Selected SIMD clone's function info. First vector element
+ is SIMD clone's function decl, followed by a pair of trees (base + step)
+ for linear arguments (pair of NULLs for other arguments). */
+ vec<tree> simd_clone_info;
+
+ /* Classify the def of this stmt. */
+ enum vect_def_type def_type;
+
+ /* Whether the stmt is SLPed, loop-based vectorized, or both. */
+ enum slp_vect_type slp_type;
+
+ /* Interleaving and reduction chains info. */
+ /* First element in the group. */
+ stmt_vec_info first_element;
+ /* Pointer to the next element in the group. */
+ stmt_vec_info next_element;
+ /* The size of the group. */
+ unsigned int size;
+ /* For stores, number of stores from this group seen. We vectorize the last
+ one. */
+ unsigned int store_count;
+ /* For loads only, the gap from the previous load. For consecutive loads, GAP
+ is 1. */
+ unsigned int gap;
+
+ /* The minimum negative dependence distance this stmt participates in
+ or zero if none. */
+ unsigned int min_neg_dist;
+
+ /* Not all stmts in the loop need to be vectorized. e.g, the increment
+ of the loop induction variable and computation of array indexes. relevant
+ indicates whether the stmt needs to be vectorized. */
+ enum vect_relevant relevant;
+
+ /* For loads if this is a gather, for stores if this is a scatter. */
+ bool gather_scatter_p;
+
+ /* True if this is an access with loop-invariant stride. */
+ bool strided_p;
+
+ /* For both loads and stores. */
+ unsigned simd_lane_access_p : 3;
+
+ /* Classifies how the load or store is going to be implemented
+ for loop vectorization. */
+ vect_memory_access_type memory_access_type;
+
+ /* For INTEGER_INDUC_COND_REDUCTION, the initial value to be used. */
+ tree induc_cond_initial_val;
+
+ /* If not NULL the value to be added to compute final reduction value. */
+ tree reduc_epilogue_adjustment;
+
+ /* On a reduction PHI the reduction type as detected by
+ vect_is_simple_reduction and vectorizable_reduction. */
+ enum vect_reduction_type reduc_type;
+
+ /* The original reduction code, to be used in the epilogue. */
+ code_helper reduc_code;
+ /* An internal function we should use in the epilogue. */
+ internal_fn reduc_fn;
+
+ /* On a stmt participating in the reduction the index of the operand
+ on the reduction SSA cycle. */
+ int reduc_idx;
+
+ /* On a reduction PHI the def returned by vect_force_simple_reduction.
+ On the def returned by vect_force_simple_reduction the
+ corresponding PHI. */
+ stmt_vec_info reduc_def;
+
+ /* The vector input type relevant for reduction vectorization. */
+ tree reduc_vectype_in;
+
+ /* The vector type for performing the actual reduction. */
+ tree reduc_vectype;
+
+ /* If IS_REDUC_INFO is true and if the vector code is performing
+ N scalar reductions in parallel, this variable gives the initial
+ scalar values of those N reductions. */
+ vec<tree> reduc_initial_values;
+
+ /* If IS_REDUC_INFO is true and if the vector code is performing
+ N scalar reductions in parallel, this variable gives the vectorized code's
+ final (scalar) result for each of those N reductions. In other words,
+ REDUC_SCALAR_RESULTS[I] replaces the original scalar code's loop-closed
+ SSA PHI for reduction number I. */
+ vec<tree> reduc_scalar_results;
+
+ /* Only meaningful if IS_REDUC_INFO. If non-null, the reduction is
+ being performed by an epilogue loop and we have decided to reuse
+ this accumulator from the main loop. */
+ vect_reusable_accumulator *reused_accumulator;
+
+ /* Whether we force a single cycle PHI during reduction vectorization. */
+ bool force_single_cycle;
+
+ /* Whether on this stmt reduction meta is recorded. */
+ bool is_reduc_info;
+
+ /* If nonzero, the lhs of the statement could be truncated to this
+ many bits without affecting any users of the result. */
+ unsigned int min_output_precision;
+
+ /* If nonzero, all non-boolean input operands have the same precision,
+ and they could each be truncated to this many bits without changing
+ the result. */
+ unsigned int min_input_precision;
+
+ /* If OPERATION_BITS is nonzero, the statement could be performed on
+ an integer with the sign and number of bits given by OPERATION_SIGN
+ and OPERATION_BITS without changing the result. */
+ unsigned int operation_precision;
+ signop operation_sign;
+
+ /* If the statement produces a boolean result, this value describes
+ how we should choose the associated vector type. The possible
+ values are:
+
+ - an integer precision N if we should use the vector mask type
+ associated with N-bit integers. This is only used if all relevant
+ input booleans also want the vector mask type for N-bit integers,
+ or if we can convert them into that form by pattern-matching.
+
+ - ~0U if we considered choosing a vector mask type but decided
+ to treat the boolean as a normal integer type instead.
+
+ - 0 otherwise. This means either that the operation isn't one that
+ could have a vector mask type (and so should have a normal vector
+ type instead) or that we simply haven't made a choice either way. */
+ unsigned int mask_precision;
+
+ /* True if this is only suitable for SLP vectorization. */
+ bool slp_vect_only_p;
+
+ /* True if this is a pattern that can only be handled by SLP
+ vectorization. */
+ bool slp_vect_pattern_only_p;
+};
+
+/* Information about a gather/scatter call. */
+struct gather_scatter_info {
+ /* The internal function to use for the gather/scatter operation,
+ or IFN_LAST if a built-in function should be used instead. */
+ internal_fn ifn;
+
+ /* The FUNCTION_DECL for the built-in gather/scatter function,
+ or null if an internal function should be used instead. */
+ tree decl;
+
+ /* The loop-invariant base value. */
+ tree base;
+
+ /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
+ tree offset;
+
+ /* Each offset element should be multiplied by this amount before
+ being added to the base. */
+ int scale;
+
+ /* The definition type for the vectorized offset. */
+ enum vect_def_type offset_dt;
+
+ /* The type of the vectorized offset. */
+ tree offset_vectype;
+
+ /* The type of the scalar elements after loading or before storing. */
+ tree element_type;
+
+ /* The type of the scalar elements being loaded or stored. */
+ tree memory_type;
+};
+
+/* Access Functions. */
+#define STMT_VINFO_TYPE(S) (S)->type
+#define STMT_VINFO_STMT(S) (S)->stmt
+#define STMT_VINFO_RELEVANT(S) (S)->relevant
+#define STMT_VINFO_LIVE_P(S) (S)->live
+#define STMT_VINFO_VECTYPE(S) (S)->vectype
+#define STMT_VINFO_VEC_STMTS(S) (S)->vec_stmts
+#define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
+#define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
+#define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
+#define STMT_VINFO_STRIDED_P(S) (S)->strided_p
+#define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type
+#define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
+#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL(S) (S)->induc_cond_initial_val
+#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT(S) (S)->reduc_epilogue_adjustment
+#define STMT_VINFO_REDUC_IDX(S) (S)->reduc_idx
+#define STMT_VINFO_FORCE_SINGLE_CYCLE(S) (S)->force_single_cycle
+
+#define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
+#define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
+#define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
+#define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
+#define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
+#define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
+#define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
+ (S)->dr_wrt_vec_loop.base_misalignment
+#define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
+ (S)->dr_wrt_vec_loop.offset_alignment
+#define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
+ (S)->dr_wrt_vec_loop.step_alignment
+
+#define STMT_VINFO_DR_INFO(S) \
+ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
+
+#define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
+#define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
+#define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
+#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
+#define STMT_VINFO_DEF_TYPE(S) (S)->def_type
+#define STMT_VINFO_GROUPED_ACCESS(S) \
+ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
+#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
+#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
+#define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE(S) (S)->loop_phi_evolution_type
+#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
+#define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
+#define STMT_VINFO_REDUC_CODE(S) (S)->reduc_code
+#define STMT_VINFO_REDUC_FN(S) (S)->reduc_fn
+#define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
+#define STMT_VINFO_REDUC_VECTYPE(S) (S)->reduc_vectype
+#define STMT_VINFO_REDUC_VECTYPE_IN(S) (S)->reduc_vectype_in
+#define STMT_VINFO_SLP_VECT_ONLY(S) (S)->slp_vect_only_p
+#define STMT_VINFO_SLP_VECT_ONLY_PATTERN(S) (S)->slp_vect_pattern_only_p
+
+#define DR_GROUP_FIRST_ELEMENT(S) \
+ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
+#define DR_GROUP_NEXT_ELEMENT(S) \
+ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
+#define DR_GROUP_SIZE(S) \
+ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
+#define DR_GROUP_STORE_COUNT(S) \
+ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
+#define DR_GROUP_GAP(S) \
+ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
+
+#define REDUC_GROUP_FIRST_ELEMENT(S) \
+ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
+#define REDUC_GROUP_NEXT_ELEMENT(S) \
+ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
+#define REDUC_GROUP_SIZE(S) \
+ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
+
+#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
+
+#define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
+#define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
+#define STMT_SLP_TYPE(S) (S)->slp_type
+
+/* Contains the scalar or vector costs for a vec_info. */
+class vector_costs
+{
+public:
+ vector_costs (vec_info *, bool);
+ virtual ~vector_costs () {}
+
+ /* Update the costs in response to adding COUNT copies of a statement.
+
+ - WHERE specifies whether the cost occurs in the loop prologue,
+ the loop body, or the loop epilogue.
+ - KIND is the kind of statement, which is always meaningful.
+ - STMT_INFO or NODE, if nonnull, describe the statement that will be
+ vectorized.
+ - VECTYPE, if nonnull, is the vector type that the vectorized
+ statement will operate on. Note that this should be used in
+ preference to STMT_VINFO_VECTYPE (STMT_INFO) since the latter
+ is not correct for SLP.
+ - for unaligned_load and unaligned_store statements, MISALIGN is
+ the byte misalignment of the load or store relative to the target's
+ preferred alignment for VECTYPE, or DR_MISALIGNMENT_UNKNOWN
+ if the misalignment is not known.
+
+ Return the calculated cost as well as recording it. The return
+ value is used for dumping purposes. */
+ virtual unsigned int add_stmt_cost (int count, vect_cost_for_stmt kind,
+ stmt_vec_info stmt_info,
+ slp_tree node,
+ tree vectype, int misalign,
+ vect_cost_model_location where);
+
+ /* Finish calculating the cost of the code. The results can be
+ read back using the functions below.
+
+ If the costs describe vector code, SCALAR_COSTS gives the costs
+ of the corresponding scalar code, otherwise it is null. */
+ virtual void finish_cost (const vector_costs *scalar_costs);
+
+ /* The costs in THIS and OTHER both describe ways of vectorizing
+ a main loop. Return true if the costs described by THIS are
+ cheaper than the costs described by OTHER. Return false if any
+ of the following are true:
+
+ - THIS and OTHER are of equal cost
+ - OTHER is better than THIS
+ - we can't be sure about the relative costs of THIS and OTHER. */
+ virtual bool better_main_loop_than_p (const vector_costs *other) const;
+
+ /* Likewise, but the costs in THIS and OTHER both describe ways of
+ vectorizing an epilogue loop of MAIN_LOOP. */
+ virtual bool better_epilogue_loop_than_p (const vector_costs *other,
+ loop_vec_info main_loop) const;
+
+ unsigned int prologue_cost () const;
+ unsigned int body_cost () const;
+ unsigned int epilogue_cost () const;
+ unsigned int outside_cost () const;
+ unsigned int total_cost () const;
+ unsigned int suggested_unroll_factor () const;
+
+protected:
+ unsigned int record_stmt_cost (stmt_vec_info, vect_cost_model_location,
+ unsigned int);
+ unsigned int adjust_cost_for_freq (stmt_vec_info, vect_cost_model_location,
+ unsigned int);
+ int compare_inside_loop_cost (const vector_costs *) const;
+ int compare_outside_loop_cost (const vector_costs *) const;
+
+ /* The region of code that we're considering vectorizing. */
+ vec_info *m_vinfo;
+
+ /* True if we're costing the scalar code, false if we're costing
+ the vector code. */
+ bool m_costing_for_scalar;
+
+ /* The costs of the three regions, indexed by vect_cost_model_location. */
+ unsigned int m_costs[3];
+
+ /* The suggested unrolling factor determined at finish_cost. */
+ unsigned int m_suggested_unroll_factor;
+
+ /* True if finish_cost has been called. */
+ bool m_finished;
+};
+
+/* Create costs for VINFO. COSTING_FOR_SCALAR is true if the costs
+ are for scalar code, false if they are for vector code. */
+
+inline
+vector_costs::vector_costs (vec_info *vinfo, bool costing_for_scalar)
+ : m_vinfo (vinfo),
+ m_costing_for_scalar (costing_for_scalar),
+ m_costs (),
+ m_suggested_unroll_factor(1),
+ m_finished (false)
+{
+}
+
+/* Return the cost of the prologue code (in abstract units). */
+
+inline unsigned int
+vector_costs::prologue_cost () const
+{
+ gcc_checking_assert (m_finished);
+ return m_costs[vect_prologue];
+}
+
+/* Return the cost of the body code (in abstract units). */
+
+inline unsigned int
+vector_costs::body_cost () const
+{
+ gcc_checking_assert (m_finished);
+ return m_costs[vect_body];
+}
+
+/* Return the cost of the epilogue code (in abstract units). */
+
+inline unsigned int
+vector_costs::epilogue_cost () const
+{
+ gcc_checking_assert (m_finished);
+ return m_costs[vect_epilogue];
+}
+
+/* Return the cost of the prologue and epilogue code (in abstract units). */
+
+inline unsigned int
+vector_costs::outside_cost () const
+{
+ return prologue_cost () + epilogue_cost ();
+}
+
+/* Return the cost of the prologue, body and epilogue code
+ (in abstract units). */
+
+inline unsigned int
+vector_costs::total_cost () const
+{
+ return body_cost () + outside_cost ();
+}
+
+/* Return the suggested unroll factor. */
+
+inline unsigned int
+vector_costs::suggested_unroll_factor () const
+{
+ gcc_checking_assert (m_finished);
+ return m_suggested_unroll_factor;
+}
+
+#define VECT_MAX_COST 1000
+
+/* The maximum number of intermediate steps required in multi-step type
+ conversion. */
+#define MAX_INTERM_CVT_STEPS 3
+
+#define MAX_VECTORIZATION_FACTOR INT_MAX
+
+/* Nonzero if TYPE represents a (scalar) boolean type or type
+ in the middle-end compatible with it (unsigned precision 1 integral
+ types). Used to determine which types should be vectorized as
+ VECTOR_BOOLEAN_TYPE_P. */
+
+#define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == BOOLEAN_TYPE \
+ || ((TREE_CODE (TYPE) == INTEGER_TYPE \
+ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \
+ && TYPE_PRECISION (TYPE) == 1 \
+ && TYPE_UNSIGNED (TYPE)))
+
+inline bool
+nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
+{
+ return (loop->inner
+ && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
+}
+
+/* PHI is either a scalar reduction phi or a scalar induction phi.
+ Return the initial value of the variable on entry to the containing
+ loop. */
+
+inline tree
+vect_phi_initial_value (gphi *phi)
+{
+ basic_block bb = gimple_bb (phi);
+ edge pe = loop_preheader_edge (bb->loop_father);
+ gcc_assert (pe->dest == bb);
+ return PHI_ARG_DEF_FROM_EDGE (phi, pe);
+}
+
+/* Return true if STMT_INFO should produce a vector mask type rather than
+ a normal nonmask type. */
+
+inline bool
+vect_use_mask_type_p (stmt_vec_info stmt_info)
+{
+ return stmt_info->mask_precision && stmt_info->mask_precision != ~0U;
+}
+
+/* Return TRUE if a statement represented by STMT_INFO is a part of a
+ pattern. */
+
+inline bool
+is_pattern_stmt_p (stmt_vec_info stmt_info)
+{
+ return stmt_info->pattern_stmt_p;
+}
+
+/* If STMT_INFO is a pattern statement, return the statement that it
+ replaces, otherwise return STMT_INFO itself. */
+
+inline stmt_vec_info
+vect_orig_stmt (stmt_vec_info stmt_info)
+{
+ if (is_pattern_stmt_p (stmt_info))
+ return STMT_VINFO_RELATED_STMT (stmt_info);
+ return stmt_info;
+}
+
+/* Return the later statement between STMT1_INFO and STMT2_INFO. */
+
+inline stmt_vec_info
+get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
+{
+ if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt)
+ > gimple_uid (vect_orig_stmt (stmt2_info)->stmt))
+ return stmt1_info;
+ else
+ return stmt2_info;
+}
+
+/* If STMT_INFO has been replaced by a pattern statement, return the
+ replacement statement, otherwise return STMT_INFO itself. */
+
+inline stmt_vec_info
+vect_stmt_to_vectorize (stmt_vec_info stmt_info)
+{
+ if (STMT_VINFO_IN_PATTERN_P (stmt_info))
+ return STMT_VINFO_RELATED_STMT (stmt_info);
+ return stmt_info;
+}
+
+/* Return true if BB is a loop header. */
+
+inline bool
+is_loop_header_bb_p (basic_block bb)
+{
+ if (bb == (bb->loop_father)->header)
+ return true;
+ gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
+ return false;
+}
+
+/* Return pow2 (X). */
+
+inline int
+vect_pow2 (int x)
+{
+ int i, res = 1;
+
+ for (i = 0; i < x; i++)
+ res *= 2;
+
+ return res;
+}
+
+/* Alias targetm.vectorize.builtin_vectorization_cost. */
+
+inline int
+builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
+ tree vectype, int misalign)
+{
+ return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
+ vectype, misalign);
+}
+
+/* Get cost by calling cost target builtin. */
+
+inline
+int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
+{
+ return builtin_vectorization_cost (type_of_cost, NULL, 0);
+}
+
+/* Alias targetm.vectorize.init_cost. */
+
+inline vector_costs *
+init_cost (vec_info *vinfo, bool costing_for_scalar)
+{
+ return targetm.vectorize.create_costs (vinfo, costing_for_scalar);
+}
+
+extern void dump_stmt_cost (FILE *, int, enum vect_cost_for_stmt,
+ stmt_vec_info, slp_tree, tree, int, unsigned,
+ enum vect_cost_model_location);
+
+/* Alias targetm.vectorize.add_stmt_cost. */
+
+inline unsigned
+add_stmt_cost (vector_costs *costs, int count,
+ enum vect_cost_for_stmt kind,
+ stmt_vec_info stmt_info, slp_tree node,
+ tree vectype, int misalign,
+ enum vect_cost_model_location where)
+{
+ unsigned cost = costs->add_stmt_cost (count, kind, stmt_info, node, vectype,
+ misalign, where);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_stmt_cost (dump_file, count, kind, stmt_info, node, vectype, misalign,
+ cost, where);
+ return cost;
+}
+
+inline unsigned
+add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind,
+ enum vect_cost_model_location where)
+{
+ gcc_assert (kind == cond_branch_taken || kind == cond_branch_not_taken
+ || kind == scalar_stmt);
+ return add_stmt_cost (costs, count, kind, NULL, NULL, NULL_TREE, 0, where);
+}
+
+/* Alias targetm.vectorize.add_stmt_cost. */
+
+inline unsigned
+add_stmt_cost (vector_costs *costs, stmt_info_for_cost *i)
+{
+ return add_stmt_cost (costs, i->count, i->kind, i->stmt_info, i->node,
+ i->vectype, i->misalign, i->where);
+}
+
+/* Alias targetm.vectorize.finish_cost. */
+
+inline void
+finish_cost (vector_costs *costs, const vector_costs *scalar_costs,
+ unsigned *prologue_cost, unsigned *body_cost,
+ unsigned *epilogue_cost, unsigned *suggested_unroll_factor = NULL)
+{
+ costs->finish_cost (scalar_costs);
+ *prologue_cost = costs->prologue_cost ();
+ *body_cost = costs->body_cost ();
+ *epilogue_cost = costs->epilogue_cost ();
+ if (suggested_unroll_factor)
+ *suggested_unroll_factor = costs->suggested_unroll_factor ();
+}
+
+inline void
+add_stmt_costs (vector_costs *costs, stmt_vector_for_cost *cost_vec)
+{
+ stmt_info_for_cost *cost;
+ unsigned i;
+ FOR_EACH_VEC_ELT (*cost_vec, i, cost)
+ add_stmt_cost (costs, cost->count, cost->kind, cost->stmt_info,
+ cost->node, cost->vectype, cost->misalign, cost->where);
+}
+
+/*-----------------------------------------------------------------*/
+/* Info on data references alignment. */
+/*-----------------------------------------------------------------*/
+#define DR_MISALIGNMENT_UNKNOWN (-1)
+#define DR_MISALIGNMENT_UNINITIALIZED (-2)
+
+inline void
+set_dr_misalignment (dr_vec_info *dr_info, int val)
+{
+ dr_info->misalignment = val;
+}
+
+extern int dr_misalignment (dr_vec_info *dr_info, tree vectype,
+ poly_int64 offset = 0);
+
+#define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
+
+/* Only defined once DR_MISALIGNMENT is defined. */
+inline const poly_uint64
+dr_target_alignment (dr_vec_info *dr_info)
+{
+ if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
+ dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (dr_info->stmt));
+ return dr_info->target_alignment;
+}
+#define DR_TARGET_ALIGNMENT(DR) dr_target_alignment (DR)
+
+inline void
+set_dr_target_alignment (dr_vec_info *dr_info, poly_uint64 val)
+{
+ dr_info->target_alignment = val;
+}
+#define SET_DR_TARGET_ALIGNMENT(DR, VAL) set_dr_target_alignment (DR, VAL)
+
+/* Return true if data access DR_INFO is aligned to the targets
+ preferred alignment for VECTYPE (which may be less than a full vector). */
+
+inline bool
+aligned_access_p (dr_vec_info *dr_info, tree vectype)
+{
+ return (dr_misalignment (dr_info, vectype) == 0);
+}
+
+/* Return TRUE if the (mis-)alignment of the data access is known with
+ respect to the targets preferred alignment for VECTYPE, and FALSE
+ otherwise. */
+
+inline bool
+known_alignment_for_access_p (dr_vec_info *dr_info, tree vectype)
+{
+ return (dr_misalignment (dr_info, vectype) != DR_MISALIGNMENT_UNKNOWN);
+}
+
+/* Return the minimum alignment in bytes that the vectorized version
+ of DR_INFO is guaranteed to have. */
+
+inline unsigned int
+vect_known_alignment_in_bytes (dr_vec_info *dr_info, tree vectype)
+{
+ int misalignment = dr_misalignment (dr_info, vectype);
+ if (misalignment == DR_MISALIGNMENT_UNKNOWN)
+ return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
+ else if (misalignment == 0)
+ return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
+ return misalignment & -misalignment;
+}
+
+/* Return the behavior of DR_INFO with respect to the vectorization context
+ (which for outer loop vectorization might not be the behavior recorded
+ in DR_INFO itself). */
+
+inline innermost_loop_behavior *
+vect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
+{
+ stmt_vec_info stmt_info = dr_info->stmt;
+ loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo);
+ if (loop_vinfo == NULL
+ || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
+ return &DR_INNERMOST (dr_info->dr);
+ else
+ return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
+}
+
+/* Return the offset calculated by adding the offset of this DR_INFO to the
+ corresponding data_reference's offset. If CHECK_OUTER then use
+ vect_dr_behavior to select the appropriate data_reference to use. */
+
+inline tree
+get_dr_vinfo_offset (vec_info *vinfo,
+ dr_vec_info *dr_info, bool check_outer = false)
+{
+ innermost_loop_behavior *base;
+ if (check_outer)
+ base = vect_dr_behavior (vinfo, dr_info);
+ else
+ base = &dr_info->dr->innermost;
+
+ tree offset = base->offset;
+
+ if (!dr_info->offset)
+ return offset;
+
+ offset = fold_convert (sizetype, offset);
+ return fold_build2 (PLUS_EXPR, TREE_TYPE (dr_info->offset), offset,
+ dr_info->offset);
+}
+
+
+/* Return the vect cost model for LOOP. */
+inline enum vect_cost_model
+loop_cost_model (loop_p loop)
+{
+ if (loop != NULL
+ && loop->force_vectorize
+ && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
+ return flag_simd_cost_model;
+ return flag_vect_cost_model;
+}
+
+/* Return true if the vect cost model is unlimited. */
+inline bool
+unlimited_cost_model (loop_p loop)
+{
+ return loop_cost_model (loop) == VECT_COST_MODEL_UNLIMITED;
+}
+
+/* Return true if the loop described by LOOP_VINFO is fully-masked and
+ if the first iteration should use a partial mask in order to achieve
+ alignment. */
+
+inline bool
+vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
+{
+ return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
+ && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
+}
+
+/* Return the number of vectors of type VECTYPE that are needed to get
+ NUNITS elements. NUNITS should be based on the vectorization factor,
+ so it is always a known multiple of the number of elements in VECTYPE. */
+
+inline unsigned int
+vect_get_num_vectors (poly_uint64 nunits, tree vectype)
+{
+ return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
+}
+
+/* Return the number of copies needed for loop vectorization when
+ a statement operates on vectors of type VECTYPE. This is the
+ vectorization factor divided by the number of elements in
+ VECTYPE and is always known at compile time. */
+
+inline unsigned int
+vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
+{
+ return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
+}
+
+/* Update maximum unit count *MAX_NUNITS so that it accounts for
+ NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */
+
+inline void
+vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
+{
+ /* All unit counts have the form vec_info::vector_size * X for some
+ rational X, so two unit sizes must have a common multiple.
+ Everything is a multiple of the initial value of 1. */
+ *max_nunits = force_common_multiple (*max_nunits, nunits);
+}
+
+/* Update maximum unit count *MAX_NUNITS so that it accounts for
+ the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
+ if we haven't yet recorded any vector types. */
+
+inline void
+vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
+{
+ vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype));
+}
+
+/* Return the vectorization factor that should be used for costing
+ purposes while vectorizing the loop described by LOOP_VINFO.
+ Pick a reasonable estimate if the vectorization factor isn't
+ known at compile time. */
+
+inline unsigned int
+vect_vf_for_cost (loop_vec_info loop_vinfo)
+{
+ return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+}
+
+/* Estimate the number of elements in VEC_TYPE for costing purposes.
+ Pick a reasonable estimate if the exact number isn't known at
+ compile time. */
+
+inline unsigned int
+vect_nunits_for_cost (tree vec_type)
+{
+ return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
+}
+
+/* Return the maximum possible vectorization factor for LOOP_VINFO. */
+
+inline unsigned HOST_WIDE_INT
+vect_max_vf (loop_vec_info loop_vinfo)
+{
+ unsigned HOST_WIDE_INT vf;
+ if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
+ return vf;
+ return MAX_VECTORIZATION_FACTOR;
+}
+
+/* Return the size of the value accessed by unvectorized data reference
+ DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
+ for the associated gimple statement, since that guarantees that DR_INFO
+ accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
+ here includes things like V1SI, which can be vectorized in the same way
+ as a plain SI.) */
+
+inline unsigned int
+vect_get_scalar_dr_size (dr_vec_info *dr_info)
+{
+ return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
+}
+
+/* Return true if LOOP_VINFO requires a runtime check for whether the
+ vector loop is profitable. */
+
+inline bool
+vect_apply_runtime_profitability_check_p (loop_vec_info loop_vinfo)
+{
+ unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
+ return (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && th >= vect_vf_for_cost (loop_vinfo));
+}
+
+/* Source location + hotness information. */
+extern dump_user_location_t vect_location;
+
+/* A macro for calling:
+ dump_begin_scope (MSG, vect_location);
+ via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
+ and then calling
+ dump_end_scope ();
+ once the object goes out of scope, thus capturing the nesting of
+ the scopes.
+
+ These scopes affect dump messages within them: dump messages at the
+ top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
+ in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
+
+#define DUMP_VECT_SCOPE(MSG) \
+ AUTO_DUMP_SCOPE (MSG, vect_location)
+
+/* A sentinel class for ensuring that the "vect_location" global gets
+ reset at the end of a scope.
+
+ The "vect_location" global is used during dumping and contains a
+ location_t, which could contain references to a tree block via the
+ ad-hoc data. This data is used for tracking inlining information,
+ but it's not a GC root; it's simply assumed that such locations never
+ get accessed if the blocks are optimized away.
+
+ Hence we need to ensure that such locations are purged at the end
+ of any operations using them (e.g. via this class). */
+
+class auto_purge_vect_location
+{
+ public:
+ ~auto_purge_vect_location ();
+};
+
+/*-----------------------------------------------------------------*/
+/* Function prototypes. */
+/*-----------------------------------------------------------------*/
+
+/* Simple loop peeling and versioning utilities for vectorizer's purposes -
+ in tree-vect-loop-manip.cc. */
+extern void vect_set_loop_condition (class loop *, loop_vec_info,
+ tree, tree, tree, bool);
+extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge);
+class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *,
+ class loop *, edge);
+class loop *vect_loop_versioning (loop_vec_info, gimple *);
+extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
+ tree *, tree *, tree *, int, bool, bool,
+ tree *);
+extern tree vect_get_main_loop_result (loop_vec_info, tree, tree);
+extern void vect_prepare_for_masked_peels (loop_vec_info);
+extern dump_user_location_t find_loop_location (class loop *);
+extern bool vect_can_advance_ivs_p (loop_vec_info);
+extern void vect_update_inits_of_drs (loop_vec_info, tree, tree_code);
+
+/* In tree-vect-stmts.cc. */
+extern tree get_related_vectype_for_scalar_type (machine_mode, tree,
+ poly_uint64 = 0);
+extern tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int = 0);
+extern tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree);
+extern tree get_mask_type_for_scalar_type (vec_info *, tree, unsigned int = 0);
+extern tree get_same_sized_vectype (tree, tree);
+extern bool vect_chooses_same_modes_p (vec_info *, machine_mode);
+extern bool vect_get_loop_mask_type (loop_vec_info);
+extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
+ stmt_vec_info * = NULL, gimple ** = NULL);
+extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
+ tree *, stmt_vec_info * = NULL,
+ gimple ** = NULL);
+extern bool vect_is_simple_use (vec_info *, stmt_vec_info, slp_tree,
+ unsigned, tree *, slp_tree *,
+ enum vect_def_type *,
+ tree *, stmt_vec_info * = NULL);
+extern bool vect_maybe_update_slp_op_vectype (slp_tree, tree);
+extern bool supportable_widening_operation (vec_info *,
+ enum tree_code, stmt_vec_info,
+ tree, tree, enum tree_code *,
+ enum tree_code *, int *,
+ vec<tree> *);
+extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
+ enum tree_code *, int *,
+ vec<tree> *);
+
+extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
+ enum vect_cost_for_stmt, stmt_vec_info,
+ tree, int, enum vect_cost_model_location);
+extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
+ enum vect_cost_for_stmt, slp_tree,
+ tree, int, enum vect_cost_model_location);
+extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
+ enum vect_cost_for_stmt,
+ enum vect_cost_model_location);
+
+/* Overload of record_stmt_cost with VECTYPE derived from STMT_INFO. */
+
+inline unsigned
+record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
+ enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
+ int misalign, enum vect_cost_model_location where)
+{
+ return record_stmt_cost (body_cost_vec, count, kind, stmt_info,
+ STMT_VINFO_VECTYPE (stmt_info), misalign, where);
+}
+
+extern void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *);
+extern void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *,
+ gimple_stmt_iterator *);
+extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *);
+extern tree vect_get_store_rhs (stmt_vec_info);
+void vect_get_vec_defs_for_operand (vec_info *vinfo, stmt_vec_info, unsigned,
+ tree op, vec<tree> *, tree = NULL);
+void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned,
+ tree, vec<tree> *,
+ tree = NULL, vec<tree> * = NULL,
+ tree = NULL, vec<tree> * = NULL,
+ tree = NULL, vec<tree> * = NULL);
+void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned,
+ tree, vec<tree> *, tree,
+ tree = NULL, vec<tree> * = NULL, tree = NULL,
+ tree = NULL, vec<tree> * = NULL, tree = NULL,
+ tree = NULL, vec<tree> * = NULL, tree = NULL);
+extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree,
+ gimple_stmt_iterator *);
+extern tree vect_get_slp_vect_def (slp_tree, unsigned);
+extern bool vect_transform_stmt (vec_info *, stmt_vec_info,
+ gimple_stmt_iterator *,
+ slp_tree, slp_instance);
+extern void vect_remove_stores (vec_info *, stmt_vec_info);
+extern bool vect_nop_conversion_p (stmt_vec_info);
+extern opt_result vect_analyze_stmt (vec_info *, stmt_vec_info, bool *,
+ slp_tree,
+ slp_instance, stmt_vector_for_cost *);
+extern void vect_get_load_cost (vec_info *, stmt_vec_info, int,
+ dr_alignment_support, int, bool,
+ unsigned int *, unsigned int *,
+ stmt_vector_for_cost *,
+ stmt_vector_for_cost *, bool);
+extern void vect_get_store_cost (vec_info *, stmt_vec_info, int,
+ dr_alignment_support, int,
+ unsigned int *, stmt_vector_for_cost *);
+extern bool vect_supportable_shift (vec_info *, enum tree_code, tree);
+extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
+extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
+extern void optimize_mask_stores (class loop*);
+extern tree vect_gen_while (gimple_seq *, tree, tree, tree,
+ const char * = nullptr);
+extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
+extern opt_result vect_get_vector_types_for_stmt (vec_info *,
+ stmt_vec_info, tree *,
+ tree *, unsigned int = 0);
+extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0);
+
+/* In tree-vect-data-refs.cc. */
+extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
+extern enum dr_alignment_support vect_supportable_dr_alignment
+ (vec_info *, dr_vec_info *, tree, int);
+extern tree vect_get_smallest_scalar_type (stmt_vec_info, tree);
+extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
+extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance);
+extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
+extern opt_result vect_analyze_data_refs_alignment (loop_vec_info);
+extern bool vect_slp_analyze_instance_alignment (vec_info *, slp_instance);
+extern opt_result vect_analyze_data_ref_accesses (vec_info *, vec<int> *);
+extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
+extern bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree,
+ tree, int, internal_fn *, tree *);
+extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info,
+ gather_scatter_info *);
+extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
+ vec<data_reference_p> *,
+ vec<int> *, int);
+extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *);
+extern void vect_record_base_alignments (vec_info *);
+extern tree vect_create_data_ref_ptr (vec_info *,
+ stmt_vec_info, tree, class loop *, tree,
+ tree *, gimple_stmt_iterator *,
+ gimple **, bool,
+ tree = NULL_TREE);
+extern tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *,
+ stmt_vec_info, tree);
+extern void vect_copy_ref_info (tree, tree);
+extern tree vect_create_destination_var (tree, tree);
+extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
+extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
+extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
+extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
+extern void vect_permute_store_chain (vec_info *, vec<tree> &,
+ unsigned int, stmt_vec_info,
+ gimple_stmt_iterator *, vec<tree> *);
+extern tree vect_setup_realignment (vec_info *,
+ stmt_vec_info, gimple_stmt_iterator *,
+ tree *, enum dr_alignment_support, tree,
+ class loop **);
+extern void vect_transform_grouped_load (vec_info *, stmt_vec_info, vec<tree>,
+ int, gimple_stmt_iterator *);
+extern void vect_record_grouped_load_vectors (vec_info *,
+ stmt_vec_info, vec<tree>);
+extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
+extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
+ const char * = NULL);
+extern tree vect_create_addr_base_for_vector_ref (vec_info *,
+ stmt_vec_info, gimple_seq *,
+ tree);
+
+/* In tree-vect-loop.cc. */
+extern tree neutral_op_for_reduction (tree, code_helper, tree);
+extern widest_int vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo);
+bool vect_rgroup_iv_might_wrap_p (loop_vec_info, rgroup_controls *);
+/* Used in tree-vect-loop-manip.cc */
+extern opt_result vect_determine_partial_vectors_and_peeling (loop_vec_info,
+ bool);
+/* Used in gimple-loop-interchange.c and tree-parloops.cc. */
+extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
+ enum tree_code);
+extern bool needs_fold_left_reduction_p (tree, code_helper);
+/* Drive for loop analysis stage. */
+extern opt_loop_vec_info vect_analyze_loop (class loop *, vec_info_shared *);
+extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
+extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
+ tree *, bool);
+extern tree vect_halve_mask_nunits (tree, machine_mode);
+extern tree vect_double_mask_nunits (tree, machine_mode);
+extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
+ unsigned int, tree, tree);
+extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
+ unsigned int, tree, unsigned int);
+extern void vect_record_loop_len (loop_vec_info, vec_loop_lens *, unsigned int,
+ tree, unsigned int);
+extern tree vect_get_loop_len (loop_vec_info, vec_loop_lens *, unsigned int,
+ unsigned int);
+extern gimple_seq vect_gen_len (tree, tree, tree, tree);
+extern stmt_vec_info info_for_reduction (vec_info *, stmt_vec_info);
+extern bool reduction_fn_for_scalar_code (code_helper, internal_fn *);
+
+/* Drive for loop transformation stage. */
+extern class loop *vect_transform_loop (loop_vec_info, gimple *);
+struct vect_loop_form_info
+{
+ tree number_of_iterations;
+ tree number_of_iterationsm1;
+ tree assumptions;
+ gcond *loop_cond;
+ gcond *inner_loop_cond;
+};
+extern opt_result vect_analyze_loop_form (class loop *, vect_loop_form_info *);
+extern loop_vec_info vect_create_loop_vinfo (class loop *, vec_info_shared *,
+ const vect_loop_form_info *,
+ loop_vec_info = nullptr);
+extern bool vectorizable_live_operation (vec_info *,
+ stmt_vec_info, gimple_stmt_iterator *,
+ slp_tree, slp_instance, int,
+ bool, stmt_vector_for_cost *);
+extern bool vectorizable_reduction (loop_vec_info, stmt_vec_info,
+ slp_tree, slp_instance,
+ stmt_vector_for_cost *);
+extern bool vectorizable_induction (loop_vec_info, stmt_vec_info,
+ gimple **, slp_tree,
+ stmt_vector_for_cost *);
+extern bool vect_transform_reduction (loop_vec_info, stmt_vec_info,
+ gimple_stmt_iterator *,
+ gimple **, slp_tree);
+extern bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info,
+ gimple **,
+ slp_tree, slp_instance);
+extern bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info,
+ gimple **, slp_tree);
+extern bool vectorizable_phi (vec_info *, stmt_vec_info, gimple **, slp_tree,
+ stmt_vector_for_cost *);
+extern bool vectorizable_recurr (loop_vec_info, stmt_vec_info,
+ gimple **, slp_tree, stmt_vector_for_cost *);
+extern bool vect_emulated_vector_p (tree);
+extern bool vect_can_vectorize_without_simd_p (tree_code);
+extern bool vect_can_vectorize_without_simd_p (code_helper);
+extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
+ stmt_vector_for_cost *,
+ stmt_vector_for_cost *,
+ stmt_vector_for_cost *);
+extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
+
+/* Nonlinear induction. */
+extern tree vect_peel_nonlinear_iv_init (gimple_seq*, tree, tree,
+ tree, enum vect_induction_op_type);
+
+/* In tree-vect-slp.cc. */
+extern void vect_slp_init (void);
+extern void vect_slp_fini (void);
+extern void vect_free_slp_instance (slp_instance);
+extern bool vect_transform_slp_perm_load (vec_info *, slp_tree, const vec<tree> &,
+ gimple_stmt_iterator *, poly_uint64,
+ bool, unsigned *,
+ unsigned * = nullptr, bool = false);
+extern bool vect_slp_analyze_operations (vec_info *);
+extern void vect_schedule_slp (vec_info *, const vec<slp_instance> &);
+extern opt_result vect_analyze_slp (vec_info *, unsigned);
+extern bool vect_make_slp_decision (loop_vec_info);
+extern void vect_detect_hybrid_slp (loop_vec_info);
+extern void vect_optimize_slp (vec_info *);
+extern void vect_gather_slp_loads (vec_info *);
+extern void vect_get_slp_defs (slp_tree, vec<tree> *);
+extern void vect_get_slp_defs (vec_info *, slp_tree, vec<vec<tree> > *,
+ unsigned n = -1U);
+extern bool vect_slp_if_converted_bb (basic_block bb, loop_p orig_loop);
+extern bool vect_slp_function (function *);
+extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
+extern stmt_vec_info vect_find_first_scalar_stmt_in_slp (slp_tree);
+extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
+extern bool can_duplicate_and_interleave_p (vec_info *, unsigned int, tree,
+ unsigned int * = NULL,
+ tree * = NULL, tree * = NULL);
+extern void duplicate_and_interleave (vec_info *, gimple_seq *, tree,
+ const vec<tree> &, unsigned int, vec<tree> &);
+extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
+extern slp_tree vect_create_new_slp_node (unsigned, tree_code);
+extern void vect_free_slp_tree (slp_tree);
+extern bool compatible_calls_p (gcall *, gcall *);
+
+/* In tree-vect-patterns.cc. */
+extern void
+vect_mark_pattern_stmts (vec_info *, stmt_vec_info, gimple *, tree);
+
+/* Pattern recognition functions.
+ Additional pattern recognition functions can (and will) be added
+ in the future. */
+void vect_pattern_recog (vec_info *);
+
+/* In tree-vectorizer.cc. */
+unsigned vectorize_loops (void);
+void vect_free_loop_info_assumptions (class loop *);
+gimple *vect_loop_vectorized_call (class loop *, gcond **cond = NULL);
+bool vect_stmt_dominates_stmt_p (gimple *, gimple *);
+
+/* SLP Pattern matcher types, tree-vect-slp-patterns.cc. */
+
+/* Forward declaration of possible two operands operation that can be matched
+ by the complex numbers pattern matchers. */
+enum _complex_operation : unsigned;
+
+/* All possible load permute values that could result from the partial data-flow
+ analysis. */
+typedef enum _complex_perm_kinds {
+ PERM_UNKNOWN,
+ PERM_EVENODD,
+ PERM_ODDEVEN,
+ PERM_ODDODD,
+ PERM_EVENEVEN,
+ /* Can be combined with any other PERM values. */
+ PERM_TOP
+} complex_perm_kinds_t;
+
+/* Cache from nodes to the load permutation they represent. */
+typedef hash_map <slp_tree, complex_perm_kinds_t>
+ slp_tree_to_load_perm_map_t;
+
+/* Cache from nodes pair to being compatible or not. */
+typedef pair_hash <nofree_ptr_hash <_slp_tree>,
+ nofree_ptr_hash <_slp_tree>> slp_node_hash;
+typedef hash_map <slp_node_hash, bool> slp_compat_nodes_map_t;
+
+
+/* Vector pattern matcher base class. All SLP pattern matchers must inherit
+ from this type. */
+
+class vect_pattern
+{
+ protected:
+ /* The number of arguments that the IFN requires. */
+ unsigned m_num_args;
+
+ /* The internal function that will be used when a pattern is created. */
+ internal_fn m_ifn;
+
+ /* The current node being inspected. */
+ slp_tree *m_node;
+
+ /* The list of operands to be the children for the node produced when the
+ internal function is created. */
+ vec<slp_tree> m_ops;
+
+ /* Default constructor where NODE is the root of the tree to inspect. */
+ vect_pattern (slp_tree *node, vec<slp_tree> *m_ops, internal_fn ifn)
+ {
+ this->m_ifn = ifn;
+ this->m_node = node;
+ this->m_ops.create (0);
+ if (m_ops)
+ this->m_ops.safe_splice (*m_ops);
+ }
+
+ public:
+
+ /* Create a new instance of the pattern matcher class of the given type. */
+ static vect_pattern* recognize (slp_tree_to_load_perm_map_t *,
+ slp_compat_nodes_map_t *, slp_tree *);
+
+ /* Build the pattern from the data collected so far. */
+ virtual void build (vec_info *) = 0;
+
+ /* Default destructor. */
+ virtual ~vect_pattern ()
+ {
+ this->m_ops.release ();
+ }
+};
+
+/* Function pointer to create a new pattern matcher from a generic type. */
+typedef vect_pattern* (*vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *,
+ slp_compat_nodes_map_t *,
+ slp_tree *);
+
+/* List of supported pattern matchers. */
+extern vect_pattern_decl_t slp_patterns[];
+
+/* Number of supported pattern matchers. */
+extern size_t num__slp_patterns;
+
+/* ----------------------------------------------------------------------
+ Target support routines
+ -----------------------------------------------------------------------
+ The following routines are provided to simplify costing decisions in
+ target code. Please add more as needed. */
+
+/* Return true if an operaton of kind KIND for STMT_INFO represents
+ the extraction of an element from a vector in preparation for
+ storing the element to memory. */
+inline bool
+vect_is_store_elt_extraction (vect_cost_for_stmt kind, stmt_vec_info stmt_info)
+{
+ return (kind == vec_to_scalar
+ && STMT_VINFO_DATA_REF (stmt_info)
+ && DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)));
+}
+
+/* Return true if STMT_INFO represents part of a reduction. */
+inline bool
+vect_is_reduction (stmt_vec_info stmt_info)
+{
+ return STMT_VINFO_REDUC_IDX (stmt_info) >= 0;
+}
+
+/* If STMT_INFO describes a reduction, return the vect_reduction_type
+ of the reduction it describes, otherwise return -1. */
+inline int
+vect_reduc_type (vec_info *vinfo, stmt_vec_info stmt_info)
+{
+ if (loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo))
+ if (STMT_VINFO_REDUC_DEF (stmt_info))
+ {
+ stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
+ return int (STMT_VINFO_REDUC_TYPE (reduc_info));
+ }
+ return -1;
+}
+
+/* If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
+ scalar type of the values being compared. Return null otherwise. */
+inline tree
+vect_embedded_comparison_type (stmt_vec_info stmt_info)
+{
+ if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
+ if (gimple_assign_rhs_code (assign) == COND_EXPR)
+ {
+ tree cond = gimple_assign_rhs1 (assign);
+ if (COMPARISON_CLASS_P (cond))
+ return TREE_TYPE (TREE_OPERAND (cond, 0));
+ }
+ return NULL_TREE;
+}
+
+/* If STMT_INFO is a comparison or contains an embedded comparison, return the
+ scalar type of the values being compared. Return null otherwise. */
+inline tree
+vect_comparison_type (stmt_vec_info stmt_info)
+{
+ if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
+ if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison)
+ return TREE_TYPE (gimple_assign_rhs1 (assign));
+ return vect_embedded_comparison_type (stmt_info);
+}
+
+/* Return true if STMT_INFO extends the result of a load. */
+inline bool
+vect_is_extending_load (class vec_info *vinfo, stmt_vec_info stmt_info)
+{
+ /* Although this is quite large for an inline function, this part
+ at least should be inline. */
+ gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
+ return false;
+
+ tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
+ tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
+ tree rhs_type = TREE_TYPE (rhs);
+ if (!INTEGRAL_TYPE_P (lhs_type)
+ || !INTEGRAL_TYPE_P (rhs_type)
+ || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type))
+ return false;
+
+ stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
+ return (def_stmt_info
+ && STMT_VINFO_DATA_REF (def_stmt_info)
+ && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
+}
+
+/* Return true if STMT_INFO is an integer truncation. */
+inline bool
+vect_is_integer_truncation (stmt_vec_info stmt_info)
+{
+ gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
+ if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
+ return false;
+
+ tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
+ tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
+ return (INTEGRAL_TYPE_P (lhs_type)
+ && INTEGRAL_TYPE_P (rhs_type)
+ && TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type));
+}
+
+#endif /* GCC_TREE_VECTORIZER_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vrp.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vrp.h
new file mode 100644
index 0000000..3b1d6dc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree-vrp.h
@@ -0,0 +1,45 @@
+/* Support routines for Value Range Propagation (VRP).
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_VRP_H
+#define GCC_TREE_VRP_H
+
+#include "value-range.h"
+
+extern bool range_int_cst_p (const value_range *);
+
+extern int compare_values (tree, tree);
+extern int compare_values_warnv (tree, tree, bool *);
+extern int operand_less_p (tree, tree);
+
+void range_fold_unary_expr (value_range *, enum tree_code, tree type,
+ const value_range *, tree op0_type);
+void range_fold_binary_expr (value_range *, enum tree_code, tree type,
+ const value_range *, const value_range *);
+
+extern enum value_range_kind intersect_range_with_nonzero_bits
+ (enum value_range_kind, wide_int *, wide_int *, const wide_int &, signop);
+
+extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
+extern tree find_case_label_range (gswitch *, const irange *vr);
+extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
+extern bool overflow_comparison_p (tree_code, tree, tree, tree *);
+extern void maybe_set_nonzero_bits (edge, tree);
+
+#endif /* GCC_TREE_VRP_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.def
new file mode 100644
index 0000000..ee02754
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.def
@@ -0,0 +1,1518 @@
+/* This file contains the definitions and documentation for the
+ tree codes used in GCC.
+ Copyright (C) 1987-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* For tcc_references, tcc_expression, tcc_comparison, tcc_unary,
+ tcc_binary, and tcc_statement nodes, which use struct tree_exp, the
+ 4th element is the number of argument slots to allocate. This
+ determines the size of the tree node object. Other nodes use
+ different structures, and the size is determined by the tree_union
+ member structure; the 4th element should be zero. Languages that
+ define language-specific tcc_exceptional or tcc_constant codes must
+ define the tree_size langhook to say how big they are.
+
+ These tree codes have been sorted so that the macros in tree.h that
+ check for various tree codes are optimized into range checks. This
+ gives a measurable performance improvement. When adding a new
+ code, consider its placement in relation to the other codes. */
+
+/* Any erroneous construct is parsed into a node of this type.
+ This type of node is accepted without complaint in all contexts
+ by later parsing activities, to avoid multiple error messages
+ for one error.
+ No fields in these nodes are used except the TREE_CODE. */
+DEFTREECODE (ERROR_MARK, "error_mark", tcc_exceptional, 0)
+
+/* Used to represent a name (such as, in the DECL_NAME of a decl node).
+ Internally it looks like a STRING_CST node.
+ There is only one IDENTIFIER_NODE ever made for any particular name.
+ Use `get_identifier' to get it (or create it, the first time). */
+DEFTREECODE (IDENTIFIER_NODE, "identifier_node", tcc_exceptional, 0)
+
+/* Has the TREE_VALUE and TREE_PURPOSE fields. */
+/* These nodes are made into lists by chaining through the
+ TREE_CHAIN field. The elements of the list live in the
+ TREE_VALUE fields, while TREE_PURPOSE fields are occasionally
+ used as well to get the effect of Lisp association lists. */
+DEFTREECODE (TREE_LIST, "tree_list", tcc_exceptional, 0)
+
+/* These nodes contain an array of tree nodes. */
+DEFTREECODE (TREE_VEC, "tree_vec", tcc_exceptional, 0)
+
+/* A symbol binding block. These are arranged in a tree,
+ where the BLOCK_SUBBLOCKS field contains a chain of subblocks
+ chained through the BLOCK_CHAIN field.
+ BLOCK_SUPERCONTEXT points to the parent block.
+ For a block which represents the outermost scope of a function, it
+ points to the FUNCTION_DECL node.
+ BLOCK_VARS points to a chain of decl nodes.
+ BLOCK_CHAIN points to the next BLOCK at the same level.
+ BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which
+ this block is an instance of, or else is NULL to indicate that this
+ block is not an instance of anything else. When non-NULL, the value
+ could either point to another BLOCK node or it could point to a
+ FUNCTION_DECL node (e.g. in the case of a block representing the
+ outermost scope of a particular inlining of a function).
+ TREE_ASM_WRITTEN is nonzero if the block was actually referenced
+ in the generated assembly. */
+DEFTREECODE (BLOCK, "block", tcc_exceptional, 0)
+
+/* Each data type is represented by a tree node whose code is one of
+ the following: */
+/* Each node that represents a data type has a component TYPE_SIZE
+ that evaluates either to a tree that is a (potentially non-constant)
+ expression representing the type size in bits, or to a null pointer
+ when the size of the type is unknown (for example, for incomplete
+ types such as arrays of unspecified bound).
+ The TYPE_MODE contains the machine mode for values of this type.
+ The TYPE_POINTER_TO field contains a type for a pointer to this type,
+ or zero if no such has been created yet.
+ The TYPE_NEXT_VARIANT field is used to chain together types
+ that are variants made by type modifiers such as "const" and "volatile".
+ The TYPE_MAIN_VARIANT field, in any member of such a chain,
+ points to the start of the chain.
+ The TYPE_NAME field contains info on the name used in the program
+ for this type (for GDB symbol table output). It is either a
+ TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE
+ in the case of structs, unions or enums that are known with a tag,
+ or zero for types that have no special name.
+ The TYPE_CONTEXT for any sort of type which could have a name or
+ which could have named members (e.g. tagged types in C/C++) will
+ point to the node which represents the scope of the given type, or
+ will be NULL_TREE if the type has "file scope". For most types, this
+ will point to a BLOCK node or a FUNCTION_DECL node, but it could also
+ point to a FUNCTION_TYPE node (for types whose scope is limited to the
+ formal parameter list of some function type specification) or it
+ could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node
+ (for C++ "member" types).
+ For non-tagged-types, TYPE_CONTEXT need not be set to anything in
+ particular, since any type which is of some type category (e.g.
+ an array type or a function type) which cannot either have a name
+ itself or have named members doesn't really have a "scope" per se.
+ The TYPE_STUB_DECL field is used as a forward-references to names for
+ ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes;
+ see below. */
+
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. OFFSET_TYPE, ENUMERAL_TYPE, BOOLEAN_TYPE, INTEGER_TYPE,
+ REAL_TYPE, POINTER_TYPE. */
+
+/* An offset is a pointer relative to an object.
+ The TREE_TYPE field is the type of the object at the offset.
+ The TYPE_OFFSET_BASETYPE points to the node for the type of object
+ that the offset is relative to. */
+DEFTREECODE (OFFSET_TYPE, "offset_type", tcc_type, 0)
+
+/* C enums. The type node looks just like an INTEGER_TYPE node.
+ The symbols for the values of the enum type are defined by
+ CONST_DECL nodes, but the type does not point to them;
+ however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE
+ is a name and the TREE_VALUE is the value (an INTEGER_CST node). */
+/* A forward reference `enum foo' when no enum named foo is defined yet
+ has zero (a null pointer) in its TYPE_SIZE. The tag name is in
+ the TYPE_NAME field. If the type is later defined, the normal
+ fields are filled in.
+ RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are
+ treated similarly. */
+DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", tcc_type, 0)
+
+/* Boolean type (true or false are the only values). Looks like an
+ INTEGRAL_TYPE. */
+DEFTREECODE (BOOLEAN_TYPE, "boolean_type", tcc_type, 0)
+
+/* Integer types in all languages, including char in C.
+ Also used for sub-ranges of other discrete types.
+ Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive)
+ and TYPE_PRECISION (number of bits used by this type). */
+DEFTREECODE (INTEGER_TYPE, "integer_type", tcc_type, 0)
+
+/* C's float and double. Different floating types are distinguished
+ by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */
+DEFTREECODE (REAL_TYPE, "real_type", tcc_type, 0)
+
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. POINTER_TYPE, REFERENCE_TYPE. Note that this range
+ overlaps the previous range of ordered types. */
+
+/* All pointer-to-x types have code POINTER_TYPE.
+ The TREE_TYPE points to the node for the type pointed to. */
+DEFTREECODE (POINTER_TYPE, "pointer_type", tcc_type, 0)
+
+/* A reference is like a pointer except that it is coerced
+ automatically to the value it points to. Used in C++. */
+DEFTREECODE (REFERENCE_TYPE, "reference_type", tcc_type, 0)
+
+/* The C++ decltype(nullptr) type. */
+DEFTREECODE (NULLPTR_TYPE, "nullptr_type", tcc_type, 0)
+
+/* _Fract and _Accum types in Embedded-C. Different fixed-point types
+ are distinguished by machine mode and by the TYPE_SIZE and the
+ TYPE_PRECISION. */
+DEFTREECODE (FIXED_POINT_TYPE, "fixed_point_type", tcc_type, 0)
+
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. COMPLEX_TYPE, VECTOR_TYPE, ARRAY_TYPE. */
+
+/* Complex number types. The TREE_TYPE field is the data type
+ of the real and imaginary parts. It must be of scalar
+ arithmetic type, not including pointer type. */
+DEFTREECODE (COMPLEX_TYPE, "complex_type", tcc_type, 0)
+
+/* Vector types. The TREE_TYPE field is the data type of the vector
+ elements. The TYPE_PRECISION field is the number of subparts of
+ the vector. */
+DEFTREECODE (VECTOR_TYPE, "vector_type", tcc_type, 0)
+
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. ARRAY_TYPE, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE.
+ Note that this range overlaps the previous range. */
+
+/* Types of arrays. Special fields:
+ TREE_TYPE Type of an array element.
+ TYPE_DOMAIN Type to index by.
+ Its range of values specifies the array length.
+ The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero
+ and holds the type to coerce a value of that array type to in C.
+ TYPE_STRING_FLAG indicates a string (in contrast to an array of chars)
+ in languages (such as Chill) that make a distinction. */
+/* Array types in C */
+DEFTREECODE (ARRAY_TYPE, "array_type", tcc_type, 0)
+
+/* Struct in C. */
+/* Special fields:
+ TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct,
+ VAR_DECLs, TYPE_DECLs and CONST_DECLs for record-scope variables,
+ types and enumerators and FUNCTION_DECLs for methods associated
+ with the type. */
+/* See the comment above, before ENUMERAL_TYPE, for how
+ forward references to struct tags are handled in C. */
+DEFTREECODE (RECORD_TYPE, "record_type", tcc_type, 0)
+
+/* Union in C. Like a struct, except that the offsets of the fields
+ will all be zero. */
+/* See the comment above, before ENUMERAL_TYPE, for how
+ forward references to union tags are handled in C. */
+DEFTREECODE (UNION_TYPE, "union_type", tcc_type, 0) /* C union type */
+
+/* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER
+ in each FIELD_DECL determine what the union contains. The first
+ field whose DECL_QUALIFIER expression is true is deemed to occupy
+ the union. */
+DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", tcc_type, 0)
+
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. VOID_TYPE, FUNCTION_TYPE, METHOD_TYPE. */
+
+/* The void type in C */
+DEFTREECODE (VOID_TYPE, "void_type", tcc_type, 0)
+
+/* Type of functions. Special fields:
+ TREE_TYPE type of value returned.
+ TYPE_ARG_TYPES list of types of arguments expected.
+ this list is made of TREE_LIST nodes.
+ In this list TREE_PURPOSE can be used to indicate the default
+ value of parameter (used by C++ frontend).
+ Types of "Procedures" in languages where they are different from functions
+ have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */
+DEFTREECODE (FUNCTION_TYPE, "function_type", tcc_type, 0)
+
+/* METHOD_TYPE is the type of a function which takes an extra first
+ argument for "self", which is not present in the declared argument list.
+ The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE
+ is the type of "self". TYPE_ARG_TYPES is the real argument list, which
+ includes the hidden argument for "self". */
+DEFTREECODE (METHOD_TYPE, "method_type", tcc_type, 0)
+
+/* This is a language-specific kind of type.
+ Its meaning is defined by the language front end.
+ layout_type does not know how to lay this out,
+ so the front-end must do so manually. */
+DEFTREECODE (LANG_TYPE, "lang_type", tcc_type, 0)
+
+/* This is for types that will use MODE_OPAQUE in the back end. They are meant
+ to be able to go in a register of some sort but are explicitly not to be
+ converted or operated on like INTEGER_TYPE. They will have size and
+ alignment information only. */
+DEFTREECODE (OPAQUE_TYPE, "opaque_type", tcc_type, 0)
+
+/* Expressions */
+
+/* First, the constants. */
+
+DEFTREECODE (VOID_CST, "void_cst", tcc_constant, 0)
+
+/* Contents are in an array of HOST_WIDE_INTs.
+
+ We often access these constants both in their native precision and
+ in wider precisions (with the constant being implicitly extended
+ according to TYPE_SIGN). In each case, the useful part of the array
+ may be as wide as the precision requires but may be shorter when all
+ of the upper bits are sign bits. The length of the array when accessed
+ in the constant's native precision is given by TREE_INT_CST_NUNITS.
+ The length of the array when accessed in wider precisions is given
+ by TREE_INT_CST_EXT_NUNITS. Each element can be obtained using
+ TREE_INT_CST_ELT.
+
+ INTEGER_CST nodes can be shared, and therefore should be considered
+ read only. They should be copied before setting a flag such as
+ TREE_OVERFLOW. If an INTEGER_CST has TREE_OVERFLOW already set,
+ it is known to be unique. INTEGER_CST nodes are created for the
+ integral types, for pointer types and for vector and float types in
+ some circumstances. */
+DEFTREECODE (INTEGER_CST, "integer_cst", tcc_constant, 0)
+
+/* Contents are given by POLY_INT_CST_COEFF. */
+DEFTREECODE (POLY_INT_CST, "poly_int_cst", tcc_constant, 0)
+
+/* Contents are in TREE_REAL_CST field. */
+DEFTREECODE (REAL_CST, "real_cst", tcc_constant, 0)
+
+/* Contents are in TREE_FIXED_CST field. */
+DEFTREECODE (FIXED_CST, "fixed_cst", tcc_constant, 0)
+
+/* Contents are in TREE_REALPART and TREE_IMAGPART fields,
+ whose contents are other constant nodes. */
+DEFTREECODE (COMPLEX_CST, "complex_cst", tcc_constant, 0)
+
+/* See generic.texi for details. */
+DEFTREECODE (VECTOR_CST, "vector_cst", tcc_constant, 0)
+
+/* Contents are TREE_STRING_LENGTH and the actual contents of the string. */
+DEFTREECODE (STRING_CST, "string_cst", tcc_constant, 0)
+
+/* Declarations. All references to names are represented as ..._DECL
+ nodes. The decls in one binding context are chained through the
+ TREE_CHAIN field. Each DECL has a DECL_NAME field which contains
+ an IDENTIFIER_NODE. (Some decls, most often labels, may have zero
+ as the DECL_NAME). DECL_CONTEXT points to the node representing
+ the context in which this declaration has its scope. For
+ FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or
+ QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL,
+ PARM_DECL, FUNCTION_DECL, LABEL_DECL, and CONST_DECL nodes, this
+ points to either the FUNCTION_DECL for the containing function, the
+ RECORD_TYPE or UNION_TYPE for the containing type, or NULL_TREE or
+ a TRANSLATION_UNIT_DECL if the given decl has "file scope".
+ DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract)
+ ..._DECL node of which this decl is an (inlined or template expanded)
+ instance.
+ The TREE_TYPE field holds the data type of the object, when relevant.
+ LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field
+ contents are the type whose name is being declared.
+ The DECL_ALIGN, DECL_SIZE,
+ and DECL_MODE fields exist in decl nodes just as in type nodes.
+ They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes.
+
+ DECL_FIELD_BIT_OFFSET holds an integer number of bits offset for
+ the location. DECL_VOFFSET holds an expression for a variable
+ offset; it is to be multiplied by DECL_VOFFSET_UNIT (an integer).
+ These fields are relevant only in FIELD_DECLs and PARM_DECLs.
+
+ DECL_INITIAL holds the value to initialize a variable to,
+ or the value of a constant. For a function, it holds the body
+ (a node of type BLOCK representing the function's binding contour
+ and whose body contains the function's statements.) For a LABEL_DECL
+ in C, it is a flag, nonzero if the label's definition has been seen.
+
+ PARM_DECLs use a special field:
+ DECL_ARG_TYPE is the type in which the argument is actually
+ passed, which may be different from its type within the function.
+
+ FUNCTION_DECLs use four special fields:
+ DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments.
+ DECL_RESULT holds a RESULT_DECL node for the value of a function.
+ The DECL_RTL field is 0 for a function that returns no value.
+ (C functions returning void have zero here.)
+ The TREE_TYPE field is the type in which the result is actually
+ returned. This is usually the same as the return type of the
+ FUNCTION_DECL, but it may be a wider integer type because of
+ promotion.
+ DECL_FUNCTION_CODE is a code number that is nonzero for
+ built-in functions. Its value is an enum built_in_function
+ that says which built-in function it is.
+
+ DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE
+ holds a line number. In some cases these can be the location of
+ a reference, if no definition has been seen.
+
+ DECL_ABSTRACT is nonzero if the decl represents an abstract instance
+ of a decl (i.e. one which is nested within an abstract instance of a
+ inline function. */
+
+DEFTREECODE (FUNCTION_DECL, "function_decl", tcc_declaration, 0)
+DEFTREECODE (LABEL_DECL, "label_decl", tcc_declaration, 0)
+/* The ordering of the following codes is optimized for the checking
+ macros in tree.h. Changing the order will degrade the speed of the
+ compiler. FIELD_DECL, VAR_DECL, CONST_DECL, PARM_DECL,
+ TYPE_DECL. */
+DEFTREECODE (FIELD_DECL, "field_decl", tcc_declaration, 0)
+DEFTREECODE (VAR_DECL, "var_decl", tcc_declaration, 0)
+DEFTREECODE (CONST_DECL, "const_decl", tcc_declaration, 0)
+DEFTREECODE (PARM_DECL, "parm_decl", tcc_declaration, 0)
+DEFTREECODE (TYPE_DECL, "type_decl", tcc_declaration, 0)
+DEFTREECODE (RESULT_DECL, "result_decl", tcc_declaration, 0)
+
+/* A "declaration" of a debug temporary. It should only appear in
+ DEBUG stmts. */
+DEFTREECODE (DEBUG_EXPR_DECL, "debug_expr_decl", tcc_declaration, 0)
+
+/* A stmt that marks the beginning of a source statement. */
+DEFTREECODE (DEBUG_BEGIN_STMT, "debug_begin_stmt", tcc_statement, 0)
+
+/* A namespace declaration. Namespaces appear in DECL_CONTEXT of other
+ _DECLs, providing a hierarchy of names. */
+DEFTREECODE (NAMESPACE_DECL, "namespace_decl", tcc_declaration, 0)
+
+/* A declaration import.
+ The C++ FE uses this to represent a using-directive; eg:
+ "using namespace foo".
+ But it could be used to represent any declaration import construct.
+ Whenever a declaration import appears in a lexical block, the BLOCK node
+ representing that lexical block in GIMPLE will contain an IMPORTED_DECL
+ node, linked via BLOCK_VARS accessor of the said BLOCK.
+ For a given NODE which code is IMPORTED_DECL,
+ IMPORTED_DECL_ASSOCIATED_DECL (NODE) accesses the imported declaration. */
+DEFTREECODE (IMPORTED_DECL, "imported_decl", tcc_declaration, 0)
+
+/* A namelist declaration.
+ The Fortran FE uses this to represent a namelist statement, e.g.:
+ NAMELIST /namelist-group-name/ namelist-group-object-list.
+ Whenever a declaration import appears in a lexical block, the BLOCK node
+ representing that lexical block in GIMPLE will contain an NAMELIST_DECL
+ node, linked via BLOCK_VARS accessor of the said BLOCK.
+ For a given NODE which code is NAMELIST_DECL,
+ NAMELIST_DECL_ASSOCIATED_DECL (NODE) accesses the imported declaration. */
+DEFTREECODE (NAMELIST_DECL, "namelist_decl", tcc_declaration, 0)
+
+/* A translation unit. This is not technically a declaration, since it
+ can't be looked up, but it's close enough. */
+DEFTREECODE (TRANSLATION_UNIT_DECL, "translation_unit_decl",\
+ tcc_declaration, 0)
+
+/* References to storage. */
+
+/* The ordering of the following codes is optimized for the classification
+ in handled_component_p. Keep them in a consecutive group. */
+
+/* Value is structure or union component.
+ Operand 0 is the structure or union (an expression).
+ Operand 1 is the field (a node of type FIELD_DECL).
+ Operand 2, if present, is the value of DECL_FIELD_OFFSET, measured
+ in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. */
+DEFTREECODE (COMPONENT_REF, "component_ref", tcc_reference, 3)
+
+/* Reference to a group of bits within an object. Similar to COMPONENT_REF
+ except the position is given explicitly rather than via a FIELD_DECL.
+ Operand 0 is the structure or union expression;
+ operand 1 is a tree giving the constant number of bits being referenced;
+ operand 2 is a tree giving the constant position of the first referenced bit.
+ The result type width has to match the number of bits referenced.
+ If the result type is integral, its signedness specifies how it is extended
+ to its mode width. */
+DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", tcc_reference, 3)
+
+/* Array indexing.
+ Operand 0 is the array; operand 1 is a (single) array index.
+ Operand 2, if present, is a copy of TYPE_MIN_VALUE of the index.
+ Operand 3, if present, is the element size, measured in units of
+ the alignment of the element type. */
+DEFTREECODE (ARRAY_REF, "array_ref", tcc_reference, 4)
+
+/* Likewise, except that the result is a range ("slice") of the array. The
+ starting index of the resulting array is taken from operand 1 and the size
+ of the range is taken from the type of the expression. */
+DEFTREECODE (ARRAY_RANGE_REF, "array_range_ref", tcc_reference, 4)
+
+/* Used only on an operand of complex type, these return
+ a value of the corresponding component type. */
+DEFTREECODE (REALPART_EXPR, "realpart_expr", tcc_reference, 1)
+DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", tcc_reference, 1)
+
+/* Represents viewing something of one type as being of a second type.
+ This corresponds to an "Unchecked Conversion" in Ada and roughly to
+ the idiom *(type2 *)&X in C. The only operand is the value to be
+ viewed as being of another type. It is undefined if the type of the
+ input and of the expression have different sizes.
+
+ This code may also be used within the LHS of a MODIFY_EXPR, in which
+ case no actual data motion may occur. TREE_ADDRESSABLE will be set in
+ this case and GCC must abort if it could not do the operation without
+ generating insns. */
+DEFTREECODE (VIEW_CONVERT_EXPR, "view_convert_expr", tcc_reference, 1)
+
+/* C unary `*'. One operand, an expression for a pointer. */
+DEFTREECODE (INDIRECT_REF, "indirect_ref", tcc_reference, 1)
+
+/* Used to represent lookup in a virtual method table which is dependent on
+ the runtime type of an object. Operands are:
+ OBJ_TYPE_REF_EXPR: An expression that evaluates the value to use.
+ OBJ_TYPE_REF_OBJECT: Is the object on whose behalf the lookup is
+ being performed. Through this the optimizers may be able to statically
+ determine the dynamic type of the object.
+ OBJ_TYPE_REF_TOKEN: An integer index to the virtual method table.
+ The integer index should have as type the original type of
+ OBJ_TYPE_REF_OBJECT; as pointer type conversions are useless in GIMPLE,
+ the type of OBJ_TYPE_REF_OBJECT can change to an unrelated pointer
+ type during optimizations. */
+DEFTREECODE (OBJ_TYPE_REF, "obj_type_ref", tcc_expression, 3)
+
+/* Used to represent the brace-enclosed initializers for a structure or an
+ array. It contains a sequence of component values made out of a VEC of
+ constructor_elt.
+
+ For RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE:
+ The field INDEX of each constructor_elt is a FIELD_DECL.
+
+ For ARRAY_TYPE:
+ The field INDEX of each constructor_elt is the corresponding index.
+ If the index is a RANGE_EXPR, it is a short-hand for many nodes,
+ one for each index in the range. (If the corresponding field VALUE
+ has side-effects, they are evaluated once for each element. Wrap the
+ value in a SAVE_EXPR if you want to evaluate side effects only once.)
+
+ Components that aren't present are cleared as per the C semantics,
+ unless the CONSTRUCTOR_NO_CLEARING flag is set, in which case their
+ value becomes undefined. */
+DEFTREECODE (CONSTRUCTOR, "constructor", tcc_exceptional, 0)
+
+/* The expression types are mostly straightforward, with the fourth argument
+ of DEFTREECODE saying how many operands there are.
+ Unless otherwise specified, the operands are expressions and the
+ types of all the operands and the expression must all be the same. */
+
+/* Contains two expressions to compute, one followed by the other.
+ the first value is ignored. The second one's value is used. The
+ type of the first expression need not agree with the other types. */
+DEFTREECODE (COMPOUND_EXPR, "compound_expr", tcc_expression, 2)
+
+/* Assignment expression. Operand 0 is the what to set; 1, the new value. */
+DEFTREECODE (MODIFY_EXPR, "modify_expr", tcc_expression, 2)
+
+/* Initialization expression. Operand 0 is the variable to initialize;
+ Operand 1 is the initializer. This differs from MODIFY_EXPR in that any
+ reference to the referent of operand 0 within operand 1 is undefined. */
+DEFTREECODE (INIT_EXPR, "init_expr", tcc_expression, 2)
+
+/* For TARGET_EXPR, operand 0 is the target of an initialization,
+ operand 1 is the initializer for the target, which may be void
+ if simply expanding it initializes the target.
+ operand 2 is the cleanup for this node, if any.
+ operand 3 is the saved initializer after this node has been
+ expanded once; this is so we can re-expand the tree later. */
+DEFTREECODE (TARGET_EXPR, "target_expr", tcc_expression, 4)
+
+/* Conditional expression ( ... ? ... : ... in C).
+ Operand 0 is the condition.
+ Operand 1 is the then-value.
+ Operand 2 is the else-value.
+ Operand 0 may be of any type.
+ Operand 1 must have the same type as the entire expression, unless
+ it unconditionally throws an exception, in which case it should
+ have VOID_TYPE. The same constraints apply to operand 2. The
+ condition in operand 0 must be of integral type.
+
+ In cfg gimple, if you do not have a selection expression, operands
+ 1 and 2 are NULL. The operands are then taken from the cfg edges. */
+DEFTREECODE (COND_EXPR, "cond_expr", tcc_expression, 3)
+
+/* Represents a vector in which every element is equal to operand 0. */
+DEFTREECODE (VEC_DUPLICATE_EXPR, "vec_duplicate_expr", tcc_unary, 1)
+
+/* Vector series created from a start (base) value and a step.
+
+ A = VEC_SERIES_EXPR (B, C)
+
+ means
+
+ for (i = 0; i < N; i++)
+ A[i] = B + C * i; */
+DEFTREECODE (VEC_SERIES_EXPR, "vec_series_expr", tcc_binary, 2)
+
+/* Vector conditional expression. It is like COND_EXPR, but with
+ vector operands.
+
+ A = VEC_COND_EXPR ( X < Y, B, C)
+
+ means
+
+ for (i=0; i<N; i++)
+ A[i] = X[i] < Y[i] ? B[i] : C[i];
+*/
+DEFTREECODE (VEC_COND_EXPR, "vec_cond_expr", tcc_expression, 3)
+
+/* Vector permutation expression. A = VEC_PERM_EXPR<v0, v1, mask> means
+
+ N = length(mask)
+ foreach i in N:
+ M = mask[i] % (2*N)
+ A = M < N ? v0[M] : v1[M-N]
+
+ V0 and V1 are vectors of the same type. MASK is an integer-typed
+ vector. The number of MASK elements must be the same with the
+ number of elements in V0 and V1. The size of the inner type
+ of the MASK and of the V0 and V1 must be the same.
+*/
+DEFTREECODE (VEC_PERM_EXPR, "vec_perm_expr", tcc_expression, 3)
+
+/* Declare local variables, including making RTL and allocating space.
+ BIND_EXPR_VARS is a chain of VAR_DECL nodes for the variables.
+ BIND_EXPR_BODY is the body, the expression to be computed using
+ the variables. The value of operand 1 becomes that of the BIND_EXPR.
+ BIND_EXPR_BLOCK is the BLOCK that corresponds to these bindings
+ for debugging purposes. If this BIND_EXPR is actually expanded,
+ that sets the TREE_USED flag in the BLOCK.
+
+ The BIND_EXPR is not responsible for informing parsers
+ about these variables. If the body is coming from the input file,
+ then the code that creates the BIND_EXPR is also responsible for
+ informing the parser of the variables.
+
+ If the BIND_EXPR is ever expanded, its TREE_USED flag is set.
+ This tells the code for debugging symbol tables not to ignore the BIND_EXPR.
+ If the BIND_EXPR should be output for debugging but will not be expanded,
+ set the TREE_USED flag by hand.
+
+ In order for the BIND_EXPR to be known at all, the code that creates it
+ must also install it as a subblock in the tree of BLOCK
+ nodes for the function. */
+DEFTREECODE (BIND_EXPR, "bind_expr", tcc_expression, 3)
+
+/* Function call. CALL_EXPRs are represented by variably-sized expression
+ nodes. There are at least three fixed operands. Operand 0 is an
+ INTEGER_CST node containing the total operand count, the number of
+ arguments plus 3. Operand 1 is the function or NULL, while operand 2 is
+ is static chain argument, or NULL. The remaining operands are the
+ arguments to the call. */
+DEFTREECODE (CALL_EXPR, "call_expr", tcc_vl_exp, 3)
+
+/* Specify a value to compute along with its corresponding cleanup.
+ Operand 0 is the cleanup expression.
+ The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR,
+ which must exist. This differs from TRY_CATCH_EXPR in that operand 1
+ is always evaluated when cleanups are run. */
+DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", tcc_expression, 1)
+
+/* Specify a cleanup point.
+ Operand 0 is an expression that may have cleanups. If it does, those
+ cleanups are executed after the expression is expanded.
+
+ Note that if the expression is a reference to storage, it is forced out
+ of memory before the cleanups are run. This is necessary to handle
+ cases where the cleanups modify the storage referenced; in the
+ expression 't.i', if 't' is a struct with an integer member 'i' and a
+ cleanup which modifies 'i', the value of the expression depends on
+ whether the cleanup is run before or after 't.i' is evaluated. When
+ expand_expr is run on 't.i', it returns a MEM. This is not good enough;
+ the value of 't.i' must be forced out of memory.
+
+ As a consequence, the operand of a CLEANUP_POINT_EXPR must not have
+ BLKmode, because it will not be forced out of memory. */
+DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", tcc_expression, 1)
+
+/* The following code is used in languages that have types where some
+ field in an object of the type contains a value that is used in the
+ computation of another field's offset or size and/or the size of the
+ type. The positions and/or sizes of fields can vary from object to
+ object of the same type or even for one and the same object within
+ its scope.
+
+ Record types with discriminants in Ada are
+ examples of such types. This mechanism is also used to create "fat
+ pointers" for unconstrained array types in Ada; the fat pointer is a
+ structure one of whose fields is a pointer to the actual array type
+ and the other field is a pointer to a template, which is a structure
+ containing the bounds of the array. The bounds in the type pointed
+ to by the first field in the fat pointer refer to the values in the
+ template.
+
+ When you wish to construct such a type you need "self-references"
+ that allow you to reference the object having this type from the
+ TYPE node, i.e. without having a variable instantiating this type.
+
+ Such a "self-references" is done using a PLACEHOLDER_EXPR. This is
+ a node that will later be replaced with the object being referenced.
+ Its type is that of the object and selects which object to use from
+ a chain of references (see below). No other slots are used in the
+ PLACEHOLDER_EXPR.
+
+ For example, if your type FOO is a RECORD_TYPE with a field BAR,
+ and you need the value of <variable>.BAR to calculate TYPE_SIZE
+ (FOO), just substitute <variable> above with a PLACEHOLDER_EXPR
+ whose TREE_TYPE is FOO. Then construct your COMPONENT_REF with
+ the PLACEHOLDER_EXPR as the first operand (which has the correct
+ type). Later, when the size is needed in the program, the back-end
+ will find this PLACEHOLDER_EXPR and generate code to calculate the
+ actual size at run-time. In the following, we describe how this
+ calculation is done.
+
+ When we wish to evaluate a size or offset, we check whether it contains a
+ PLACEHOLDER_EXPR. If it does, we call substitute_placeholder_in_expr
+ passing both that tree and an expression within which the object may be
+ found. The latter expression is the object itself in the simple case of
+ an Ada record with discriminant, but it can be the array in the case of an
+ unconstrained array.
+
+ In the latter case, we need the fat pointer, because the bounds of
+ the array can only be accessed from it. However, we rely here on the
+ fact that the expression for the array contains the dereference of
+ the fat pointer that obtained the array pointer. */
+
+/* Denotes a record to later be substituted before evaluating this expression.
+ The type of this expression is used to find the record to replace it. */
+DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", tcc_exceptional, 0)
+
+/* Simple arithmetic. */
+DEFTREECODE (PLUS_EXPR, "plus_expr", tcc_binary, 2)
+DEFTREECODE (MINUS_EXPR, "minus_expr", tcc_binary, 2)
+DEFTREECODE (MULT_EXPR, "mult_expr", tcc_binary, 2)
+
+/* Pointer addition. The first operand is always a pointer and the
+ second operand is an integer of type sizetype. */
+DEFTREECODE (POINTER_PLUS_EXPR, "pointer_plus_expr", tcc_binary, 2)
+
+/* Pointer subtraction. The two arguments are pointers, and the result
+ is a signed integer of the same precision. Pointers are interpreted
+ as unsigned, the difference is computed as if in infinite signed
+ precision. Behavior is undefined if the difference does not fit in
+ the result type. The result does not depend on the pointer type,
+ it is not divided by the size of the pointed-to type. */
+DEFTREECODE (POINTER_DIFF_EXPR, "pointer_diff_expr", tcc_binary, 2)
+
+/* Highpart multiplication. For an integral type with precision B,
+ returns bits [2B-1, B] of the full 2*B product. Both operands
+ and the result should have integer types of the same precision
+ and signedness. */
+DEFTREECODE (MULT_HIGHPART_EXPR, "mult_highpart_expr", tcc_binary, 2)
+
+/* Division for integer result that rounds the quotient toward zero. */
+DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", tcc_binary, 2)
+
+/* Division for integer result that rounds it toward plus infinity. */
+DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", tcc_binary, 2)
+
+/* Division for integer result that rounds it toward minus infinity. */
+DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", tcc_binary, 2)
+
+/* Division for integer result that rounds it toward nearest integer. */
+DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", tcc_binary, 2)
+
+/* Four kinds of remainder that go with the four kinds of division: */
+
+/* The sign of the remainder is that of the dividend. */
+DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", tcc_binary, 2)
+
+/* The sign of the remainder is the opposite of that of the divisor. */
+DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", tcc_binary, 2)
+
+/* The sign of the remainder is that of the divisor. */
+DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", tcc_binary, 2)
+
+/* The sign of the remainder is not predictable. */
+DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", tcc_binary, 2)
+
+/* Division for real result. */
+DEFTREECODE (RDIV_EXPR, "rdiv_expr", tcc_binary, 2)
+
+/* Division which is not supposed to need rounding.
+ Used for pointer subtraction in C. */
+DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", tcc_binary, 2)
+
+/* Conversion of real to fixed point by truncation. */
+DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", tcc_unary, 1)
+
+/* Conversion of an integer to a real. */
+DEFTREECODE (FLOAT_EXPR, "float_expr", tcc_unary, 1)
+
+/* Unary negation. */
+DEFTREECODE (NEGATE_EXPR, "negate_expr", tcc_unary, 1)
+
+/* Minimum and maximum values. When used with floating point, if both
+ operands are zeros, or if either operand is NaN, then it is unspecified
+ which of the two operands is returned as the result. */
+DEFTREECODE (MIN_EXPR, "min_expr", tcc_binary, 2)
+DEFTREECODE (MAX_EXPR, "max_expr", tcc_binary, 2)
+
+/* Represents the absolute value of the operand.
+
+ An ABS_EXPR must have either an INTEGER_TYPE or a REAL_TYPE. The
+ operand of the ABS_EXPR must have the same type. */
+DEFTREECODE (ABS_EXPR, "abs_expr", tcc_unary, 1)
+
+/* Represents the unsigned absolute value of the operand.
+ An ABSU_EXPR must have unsigned INTEGER_TYPE. The operand of the ABSU_EXPR
+ must have the corresponding signed type. */
+DEFTREECODE (ABSU_EXPR, "absu_expr", tcc_unary, 1)
+
+/* Shift operations for shift and rotate.
+ Shift means logical shift if done on an
+ unsigned type, arithmetic shift if done on a signed type.
+ The second operand is the number of bits to
+ shift by; it need not be the same type as the first operand and result.
+ Note that the result is undefined if the second operand is larger
+ than or equal to the first operand's type size.
+
+ The first operand of a shift can have either an integer or a
+ (non-integer) fixed-point type. We follow the ISO/IEC TR 18037:2004
+ semantics for the latter.
+
+ Rotates are defined for integer types only. */
+DEFTREECODE (LSHIFT_EXPR, "lshift_expr", tcc_binary, 2)
+DEFTREECODE (RSHIFT_EXPR, "rshift_expr", tcc_binary, 2)
+DEFTREECODE (LROTATE_EXPR, "lrotate_expr", tcc_binary, 2)
+DEFTREECODE (RROTATE_EXPR, "rrotate_expr", tcc_binary, 2)
+
+/* Bitwise operations. Operands have same mode as result. */
+DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", tcc_binary, 2)
+DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", tcc_binary, 2)
+DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", tcc_binary, 2)
+DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", tcc_unary, 1)
+
+/* ANDIF and ORIF allow the second operand not to be computed if the
+ value of the expression is determined from the first operand. AND,
+ OR, and XOR always compute the second operand whether its value is
+ needed or not (for side effects). The operand may have
+ BOOLEAN_TYPE or INTEGER_TYPE. In either case, the argument will be
+ either zero or one. For example, a TRUTH_NOT_EXPR will never have
+ an INTEGER_TYPE VAR_DECL as its argument; instead, a NE_EXPR will be
+ used to compare the VAR_DECL to zero, thereby obtaining a node with
+ value zero or one. */
+DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", tcc_expression, 2)
+DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", tcc_expression, 2)
+DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", tcc_expression, 2)
+DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", tcc_expression, 2)
+DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", tcc_expression, 2)
+DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", tcc_expression, 1)
+
+/* Relational operators.
+ EQ_EXPR and NE_EXPR are allowed for any types. The others, except for
+ LTGT_EXPR, are allowed only for integral, floating-point and vector types.
+ LTGT_EXPR is allowed only for floating-point types.
+ For floating-point operators, if either operand is a NaN, then NE_EXPR
+ returns true and the remaining operators return false. The operators
+ other than EQ_EXPR and NE_EXPR may generate an exception on quiet NaNs.
+ In all cases the operands will have the same type,
+ and the value is either the type used by the language for booleans
+ or an integer vector type of the same size and with the same number
+ of elements as the comparison operands. True for a vector of
+ comparison results has all bits set while false is equal to zero. */
+DEFTREECODE (LT_EXPR, "lt_expr", tcc_comparison, 2)
+DEFTREECODE (LE_EXPR, "le_expr", tcc_comparison, 2)
+DEFTREECODE (GT_EXPR, "gt_expr", tcc_comparison, 2)
+DEFTREECODE (GE_EXPR, "ge_expr", tcc_comparison, 2)
+DEFTREECODE (LTGT_EXPR, "ltgt_expr", tcc_comparison, 2)
+DEFTREECODE (EQ_EXPR, "eq_expr", tcc_comparison, 2)
+DEFTREECODE (NE_EXPR, "ne_expr", tcc_comparison, 2)
+
+/* Additional relational operators for floating-point unordered. */
+DEFTREECODE (UNORDERED_EXPR, "unordered_expr", tcc_comparison, 2)
+DEFTREECODE (ORDERED_EXPR, "ordered_expr", tcc_comparison, 2)
+
+/* These are equivalent to unordered or ... */
+DEFTREECODE (UNLT_EXPR, "unlt_expr", tcc_comparison, 2)
+DEFTREECODE (UNLE_EXPR, "unle_expr", tcc_comparison, 2)
+DEFTREECODE (UNGT_EXPR, "ungt_expr", tcc_comparison, 2)
+DEFTREECODE (UNGE_EXPR, "unge_expr", tcc_comparison, 2)
+DEFTREECODE (UNEQ_EXPR, "uneq_expr", tcc_comparison, 2)
+
+DEFTREECODE (RANGE_EXPR, "range_expr", tcc_binary, 2)
+
+/* Represents a re-association barrier for floating point expressions
+ like explicit parenthesis in fortran. */
+DEFTREECODE (PAREN_EXPR, "paren_expr", tcc_unary, 1)
+
+/* Represents a conversion of type of a value.
+ All conversions, including implicit ones, must be
+ represented by CONVERT_EXPR or NOP_EXPR nodes. */
+DEFTREECODE (CONVERT_EXPR, "convert_expr", tcc_unary, 1)
+
+/* Conversion of a pointer value to a pointer to a different
+ address space. */
+DEFTREECODE (ADDR_SPACE_CONVERT_EXPR, "addr_space_convert_expr", tcc_unary, 1)
+
+/* Conversion of a fixed-point value to an integer, a real, or a fixed-point
+ value. Or conversion of a fixed-point value from an integer, a real, or
+ a fixed-point value. */
+DEFTREECODE (FIXED_CONVERT_EXPR, "fixed_convert_expr", tcc_unary, 1)
+
+/* Represents a conversion expected to require no code to be generated. */
+DEFTREECODE (NOP_EXPR, "nop_expr", tcc_unary, 1)
+
+/* Value is same as argument, but guaranteed not an lvalue. */
+DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", tcc_unary, 1)
+
+/* A COMPOUND_LITERAL_EXPR represents a literal that is placed in a DECL. The
+ COMPOUND_LITERAL_EXPR_DECL_EXPR is the a DECL_EXPR containing the decl
+ for the anonymous object represented by the COMPOUND_LITERAL;
+ the DECL_INITIAL of that decl is the CONSTRUCTOR that initializes
+ the compound literal. */
+DEFTREECODE (COMPOUND_LITERAL_EXPR, "compound_literal_expr", tcc_expression, 1)
+
+/* Represents something we computed once and will use multiple times.
+ First operand is that expression. After it is evaluated once, it
+ will be replaced by the temporary variable that holds the value. */
+DEFTREECODE (SAVE_EXPR, "save_expr", tcc_expression, 1)
+
+/* & in C. Value is the address at which the operand's value resides.
+ Operand may have any mode. Result mode is Pmode. */
+DEFTREECODE (ADDR_EXPR, "addr_expr", tcc_expression, 1)
+
+/* Operand0 is a function constant; result is part N of a function
+ descriptor of type ptr_mode. */
+DEFTREECODE (FDESC_EXPR, "fdesc_expr", tcc_expression, 2)
+
+/* Given a container value, a replacement value and a bit position within
+ the container, produce the value that results from replacing the part of
+ the container starting at the bit position with the replacement value.
+ Operand 0 is a tree for the container value of integral or vector type;
+ Operand 1 is a tree for the replacement value of another integral or
+ the vector element type;
+ Operand 2 is a tree giving the constant bit position;
+ The number of bits replaced is given by the precision of the type of the
+ replacement value if it is integral or by its size if it is non-integral.
+ ??? The reason to make the size of the replacement implicit is to avoid
+ introducing a quaternary operation.
+ The replaced bits shall be fully inside the container. If the container
+ is of vector type, then these bits shall be aligned with its elements. */
+DEFTREECODE (BIT_INSERT_EXPR, "bit_insert_expr", tcc_expression, 3)
+
+/* Given two real or integer operands of the same type,
+ returns a complex value of the corresponding complex type. */
+DEFTREECODE (COMPLEX_EXPR, "complex_expr", tcc_binary, 2)
+
+/* Complex conjugate of operand. Used only on complex types. */
+DEFTREECODE (CONJ_EXPR, "conj_expr", tcc_unary, 1)
+
+/* Nodes for ++ and -- in C.
+ The second arg is how much to increment or decrement by.
+ For a pointer, it would be the size of the object pointed to. */
+DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", tcc_expression, 2)
+DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", tcc_expression, 2)
+DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", tcc_expression, 2)
+DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", tcc_expression, 2)
+
+/* Used to implement `va_arg'. */
+DEFTREECODE (VA_ARG_EXPR, "va_arg_expr", tcc_expression, 1)
+
+/* Evaluate operand 0. If and only if an exception is thrown during
+ the evaluation of operand 0, evaluate operand 1.
+
+ This differs from TRY_FINALLY_EXPR in that operand 1 is not evaluated
+ on a normal or jump exit, only on an exception. */
+DEFTREECODE (TRY_CATCH_EXPR, "try_catch_expr", tcc_statement, 2)
+
+/* Evaluate the first operand.
+ The second operand is a cleanup expression which is evaluated
+ on any exit (normal, exception, or jump out) from this expression. */
+DEFTREECODE (TRY_FINALLY_EXPR, "try_finally_expr", tcc_statement, 2)
+
+/* Evaluate either the normal or the exceptional cleanup. This must
+ only be present as the cleanup expression in a TRY_FINALLY_EXPR.
+ If the TRY_FINALLY_EXPR completes normally, the first operand of
+ EH_ELSE_EXPR is used as a cleanup, otherwise the second operand is
+ used. */
+DEFTREECODE (EH_ELSE_EXPR, "eh_else_expr", tcc_statement, 2)
+
+/* These types of expressions have no useful value,
+ and always have side effects. */
+
+/* Used to represent a local declaration. The operand is DECL_EXPR_DECL. */
+DEFTREECODE (DECL_EXPR, "decl_expr", tcc_statement, 1)
+
+/* A label definition, encapsulated as a statement.
+ Operand 0 is the LABEL_DECL node for the label that appears here.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (LABEL_EXPR, "label_expr", tcc_statement, 1)
+
+/* GOTO. Operand 0 is a LABEL_DECL node or an expression.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (GOTO_EXPR, "goto_expr", tcc_statement, 1)
+
+/* RETURN. Evaluates operand 0, then returns from the current function.
+ Presumably that operand is an assignment that stores into the
+ RESULT_DECL that hold the value to be returned.
+ The operand may be null.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (RETURN_EXPR, "return_expr", tcc_statement, 1)
+
+/* Exit the inner most loop conditionally. Operand 0 is the condition.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (EXIT_EXPR, "exit_expr", tcc_statement, 1)
+
+/* A loop. Operand 0 is the body of the loop.
+ It must contain an EXIT_EXPR or is an infinite loop.
+ The type should be void and the value should be ignored. */
+DEFTREECODE (LOOP_EXPR, "loop_expr", tcc_statement, 1)
+
+/* Switch expression.
+
+ TREE_TYPE is the original type of the condition, before any
+ language required type conversions. It may be NULL, in which case
+ the original type and final types are assumed to be the same.
+
+ Operand 0 is the expression used to perform the branch,
+ Operand 1 is the body of the switch, which probably contains
+ CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2
+ must not be NULL. */
+DEFTREECODE (SWITCH_EXPR, "switch_expr", tcc_statement, 2)
+
+/* Used to represent a case label.
+
+ Operand 0 is CASE_LOW. It may be NULL_TREE, in which case the label
+ is a 'default' label.
+ Operand 1 is CASE_HIGH. If it is NULL_TREE, the label is a simple
+ (one-value) case label. If it is non-NULL_TREE, the case is a range.
+ Operand 2 is CASE_LABEL, which has the corresponding LABEL_DECL.
+ Operand 3 is CASE_CHAIN. This operand is only used in tree-cfg.cc to
+ speed up the lookup of case labels which use a particular edge in
+ the control flow graph. */
+DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", tcc_statement, 4)
+
+/* Used to represent an inline assembly statement. ASM_STRING returns a
+ STRING_CST for the instruction (e.g., "mov x, y"). ASM_OUTPUTS,
+ ASM_INPUTS, and ASM_CLOBBERS represent the outputs, inputs, and clobbers
+ for the statement. ASM_LABELS, if present, indicates various destinations
+ for the asm; labels cannot be combined with outputs. */
+DEFTREECODE (ASM_EXPR, "asm_expr", tcc_statement, 5)
+
+/* Variable references for SSA analysis. New SSA names are created every
+ time a variable is assigned a new value. The SSA builder uses SSA_NAME
+ nodes to implement SSA versioning. */
+DEFTREECODE (SSA_NAME, "ssa_name", tcc_exceptional, 0)
+
+/* Used to represent a typed exception handler. CATCH_TYPES is the type (or
+ list of types) handled, and CATCH_BODY is the code for the handler. */
+DEFTREECODE (CATCH_EXPR, "catch_expr", tcc_statement, 2)
+
+/* Used to represent an exception specification. EH_FILTER_TYPES is a list
+ of allowed types, and EH_FILTER_FAILURE is an expression to evaluate on
+ failure. */
+DEFTREECODE (EH_FILTER_EXPR, "eh_filter_expr", tcc_statement, 2)
+
+/* Node used for describing a property that is known at compile
+ time. */
+DEFTREECODE (SCEV_KNOWN, "scev_known", tcc_expression, 0)
+
+/* Node used for describing a property that is not known at compile
+ time. */
+DEFTREECODE (SCEV_NOT_KNOWN, "scev_not_known", tcc_expression, 0)
+
+/* Polynomial chains of recurrences.
+ cr = {CHREC_LEFT (cr), +, CHREC_RIGHT (cr)}_CHREC_VARIABLE (cr). */
+DEFTREECODE (POLYNOMIAL_CHREC, "polynomial_chrec", tcc_expression, 2)
+
+/* Used to chain children of container statements together.
+ Use the interface in tree-iterator.h to access this node. */
+DEFTREECODE (STATEMENT_LIST, "statement_list", tcc_exceptional, 0)
+
+/* NOTE: This code is deprecated and should only be used internally by ipa* as
+ temporary construct.
+
+ Predicate assertion. Artificial expression generated by the optimizers
+ to keep track of predicate values. This expression may only appear on
+ the RHS of assignments.
+
+ Given X = ASSERT_EXPR <Y, EXPR>, the optimizers can infer
+ two things:
+
+ 1- X is a copy of Y.
+ 2- EXPR is a conditional expression and is known to be true.
+
+ Valid and to be expected forms of conditional expressions are
+ valid GIMPLE conditional expressions (as defined by is_gimple_condexpr)
+ and conditional expressions with the first operand being a
+ PLUS_EXPR with a variable possibly wrapped in a NOP_EXPR first
+ operand and an integer constant second operand.
+
+ The type of the expression is the same as Y. */
+DEFTREECODE (ASSERT_EXPR, "assert_expr", tcc_expression, 2)
+
+/* Base class information. Holds information about a class as a
+ baseclass of itself or another class. */
+DEFTREECODE (TREE_BINFO, "tree_binfo", tcc_exceptional, 0)
+
+/* Records the size for an expression of variable size type. This is
+ for use in contexts in which we are accessing the entire object,
+ such as for a function call, or block copy.
+ Operand 0 is the real expression.
+ Operand 1 is the size of the type in the expression. */
+DEFTREECODE (WITH_SIZE_EXPR, "with_size_expr", tcc_expression, 2)
+
+/* Extract elements from two input vectors Operand 0 and Operand 1
+ size VS, according to the offset OFF defined by Operand 2 as
+ follows:
+ If OFF > 0, the last VS - OFF elements of vector OP0 are concatenated to
+ the first OFF elements of the vector OP1.
+ If OFF == 0, then the returned vector is OP1.
+ On different targets OFF may take different forms; It can be an address, in
+ which case its low log2(VS)-1 bits define the offset, or it can be a mask
+ generated by the builtin targetm.vectorize.mask_for_load_builtin_decl. */
+DEFTREECODE (REALIGN_LOAD_EXPR, "realign_load", tcc_expression, 3)
+
+/* Low-level memory addressing. Operands are BASE (address of static or
+ global variable or register), OFFSET (integer constant),
+ INDEX (register), STEP (integer constant), INDEX2 (register),
+ The corresponding address is BASE + STEP * INDEX + INDEX2 + OFFSET.
+ Only variations and values valid on the target are allowed.
+
+ The type of STEP, INDEX and INDEX2 is sizetype.
+
+ The type of BASE is a pointer type. If BASE is not an address of
+ a static or global variable INDEX2 will be NULL.
+
+ The type of OFFSET is a pointer type and determines TBAA the same as
+ the constant offset operand in MEM_REF. */
+
+DEFTREECODE (TARGET_MEM_REF, "target_mem_ref", tcc_reference, 5)
+
+/* Memory addressing. Operands are a pointer and a tree constant integer
+ byte offset of the pointer type that when dereferenced yields the
+ type of the base object the pointer points into and which is used for
+ TBAA purposes.
+ The type of the MEM_REF is the type the bytes at the memory location
+ are interpreted as.
+ MEM_REF <p, c> is equivalent to ((typeof(c))p)->x... where x... is a
+ chain of component references offsetting p by c. */
+DEFTREECODE (MEM_REF, "mem_ref", tcc_reference, 2)
+
+/* OpenACC and OpenMP. As it is exposed in TREE_RANGE_CHECK invocations, do
+ not change the ordering of these codes. */
+
+/* OpenACC - #pragma acc parallel [clause1 ... clauseN]
+ Operand 0: OMP_BODY: Code to be executed in parallel.
+ Operand 1: OMP_CLAUSES: List of clauses. */
+
+DEFTREECODE (OACC_PARALLEL, "oacc_parallel", tcc_statement, 2)
+
+/* OpenACC - #pragma acc kernels [clause1 ... clauseN]
+ Operand 0: OMP_BODY: Sequence of kernels.
+ Operand 1: OMP_CLAUSES: List of clauses. */
+
+DEFTREECODE (OACC_KERNELS, "oacc_kernels", tcc_statement, 2)
+
+/* OpenACC - #pragma acc serial [clause1 ... clauseN]
+ Operand 0: OMP_BODY: Code to be executed sequentially.
+ Operand 1: OMP_CLAUSES: List of clauses. */
+
+DEFTREECODE (OACC_SERIAL, "oacc_serial", tcc_statement, 2)
+
+/* OpenACC - #pragma acc data [clause1 ... clauseN]
+ Operand 0: OACC_DATA_BODY: Data construct body.
+ Operand 1: OACC_DATA_CLAUSES: List of clauses. */
+
+DEFTREECODE (OACC_DATA, "oacc_data", tcc_statement, 2)
+
+/* OpenACC - #pragma acc host_data [clause1 ... clauseN]
+ Operand 0: OACC_HOST_DATA_BODY: Host_data construct body.
+ Operand 1: OACC_HOST_DATA_CLAUSES: List of clauses. */
+
+DEFTREECODE (OACC_HOST_DATA, "oacc_host_data", tcc_statement, 2)
+
+/* OpenMP - #pragma omp parallel [clause1 ... clauseN]
+ Operand 0: OMP_PARALLEL_BODY: Code to be executed by all threads.
+ Operand 1: OMP_PARALLEL_CLAUSES: List of clauses. */
+
+DEFTREECODE (OMP_PARALLEL, "omp_parallel", tcc_statement, 2)
+
+/* OpenMP - #pragma omp task [clause1 ... clauseN]
+ Operand 0: OMP_TASK_BODY: Code to be executed by all threads.
+ Operand 1: OMP_TASK_CLAUSES: List of clauses. */
+
+DEFTREECODE (OMP_TASK, "omp_task", tcc_statement, 2)
+
+/* OpenMP - #pragma omp for [clause1 ... clauseN]
+
+ A single OMP_FOR node represents an entire nest of collapsed
+ loops; as noted below, some of its arguments are vectors of length
+ equal to the collapse depth, and the corresponding elements holding
+ data specific to a particular loop in the nest. These vectors are
+ numbered from the outside in so that the outermost loop is element 0.
+
+ These constructs have seven operands:
+
+ Operand 0: OMP_FOR_BODY contains the loop body.
+
+ Operand 1: OMP_FOR_CLAUSES is the list of clauses
+ associated with the directive.
+
+ Operand 2: OMP_FOR_INIT is a vector containing iteration
+ variable initializations of the form VAR = N1.
+
+ Operand 3: OMP_FOR_COND is vector containing loop
+ conditional expressions of the form VAR {<,>,<=,>=} N2.
+
+ Operand 4: OMP_FOR_INCR is a vector containing loop index
+ increment expressions of the form VAR {+=,-=} INCR.
+
+ Operand 5: OMP_FOR_PRE_BODY contains side effect code from
+ operands OMP_FOR_INIT, OMP_FOR_COND and
+ OMP_FOR_INCR. These side effects are part of the
+ OMP_FOR block but must be evaluated before the start of
+ loop body. OMP_FOR_PRE_BODY specifically
+ includes DECL_EXPRs for iteration variables that are
+ declared in the nested for loops.
+ Note this field is not a vector; it may be null, but otherwise is
+ usually a statement list collecting the side effect code from all
+ the collapsed loops.
+
+ Operand 6: OMP_FOR_ORIG_DECLS holds VAR_DECLS for the
+ original user-specified iterator variables in the source code.
+ In some cases, like C++ class iterators or range for with
+ decomposition, the for loop is rewritten by the front end to
+ use a temporary iteration variable. The purpose of this field is to
+ make the original variables available to the gimplifier so it can
+ adjust their data-sharing attributes and diagnose errors.
+ OMP_FOR_ORIG_DECLS is a vector field, with each element holding
+ a list of VAR_DECLS for the corresponding collapse level.
+
+ The loop index variable VAR must be a signed integer variable,
+ which is implicitly private to each thread. For rectangular loops,
+ the bounds N1 and N2 and the increment expression
+ INCR are required to be loop-invariant integer expressions
+ that are evaluated without any synchronization. The evaluation order,
+ frequency of evaluation and side effects are otherwise unspecified
+ by the standard.
+
+ For non-rectangular loops, in which the bounds of an inner loop depend
+ on the index of an outer loop, the bit OMP_FOR_NON_RECTANGULAR
+ must be set. In this case N1 and N2 are not ordinary
+ expressions, but instead a TREE_VEC with three elements:
+ the DECL for the outer loop variable, a multiplication
+ factor, and an offset. */
+
+DEFTREECODE (OMP_FOR, "omp_for", tcc_statement, 7)
+
+/* OpenMP - #pragma omp simd [clause1 ... clauseN]
+ Operands like for OMP_FOR. */
+DEFTREECODE (OMP_SIMD, "omp_simd", tcc_statement, 7)
+
+/* OpenMP - #pragma omp distribute [clause1 ... clauseN]
+ Operands like for OMP_FOR. */
+DEFTREECODE (OMP_DISTRIBUTE, "omp_distribute", tcc_statement, 7)
+
+/* OpenMP - #pragma omp taskloop [clause1 ... clauseN]
+ Operands like for OMP_FOR. */
+DEFTREECODE (OMP_TASKLOOP, "omp_taskloop", tcc_statement, 7)
+
+/* OpenMP - #pragma omp loop [clause1 ... clauseN]
+ Operands like for OMP_FOR. */
+DEFTREECODE (OMP_LOOP, "omp_loop", tcc_statement, 7)
+
+/* OpenMP - #pragma acc loop [clause1 ... clauseN]
+ Operands like for OMP_FOR. */
+DEFTREECODE (OACC_LOOP, "oacc_loop", tcc_statement, 7)
+
+/* OpenMP - #pragma omp teams [clause1 ... clauseN]
+ Operand 0: OMP_TEAMS_BODY: Teams body.
+ Operand 1: OMP_TEAMS_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TEAMS, "omp_teams", tcc_statement, 2)
+
+/* OpenMP - #pragma omp target data [clause1 ... clauseN]
+ Operand 0: OMP_TARGET_DATA_BODY: Target data construct body.
+ Operand 1: OMP_TARGET_DATA_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TARGET_DATA, "omp_target_data", tcc_statement, 2)
+
+/* OpenMP - #pragma omp target [clause1 ... clauseN]
+ Operand 0: OMP_TARGET_BODY: Target construct body.
+ Operand 1: OMP_TARGET_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TARGET, "omp_target", tcc_statement, 2)
+
+/* OpenMP - #pragma omp sections [clause1 ... clauseN]
+ Operand 0: OMP_SECTIONS_BODY: Sections body.
+ Operand 1: OMP_SECTIONS_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_SECTIONS, "omp_sections", tcc_statement, 2)
+
+/* OpenMP - #pragma omp ordered
+ Operand 0: OMP_ORDERED_BODY: Master section body.
+ Operand 1: OMP_ORDERED_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_ORDERED, "omp_ordered", tcc_statement, 2)
+
+/* OpenMP - #pragma omp critical [name]
+ Operand 0: OMP_CRITICAL_BODY: Critical section body.
+ Operand 1: OMP_CRITICAL_CLAUSES: List of clauses.
+ Operand 2: OMP_CRITICAL_NAME: Identifier for critical section. */
+DEFTREECODE (OMP_CRITICAL, "omp_critical", tcc_statement, 3)
+
+/* OpenMP - #pragma omp single
+ Operand 0: OMP_SINGLE_BODY: Single section body.
+ Operand 1: OMP_SINGLE_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_SINGLE, "omp_single", tcc_statement, 2)
+
+/* OpenMP - #pragma omp scope
+ Operand 0: OMP_SCOPE_BODY: Masked section body.
+ Operand 1: OMP_SCOPE_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_SCOPE, "omp_scope", tcc_statement, 2)
+
+/* OpenMP - #pragma omp taskgroup
+ Operand 0: OMP_TASKGROUP_BODY: Taskgroup body.
+ Operand 1: OMP_SINGLE_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TASKGROUP, "omp_taskgroup", tcc_statement, 2)
+
+/* OpenMP - #pragma omp masked
+ Operand 0: OMP_MASKED_BODY: Masked section body.
+ Operand 1: OMP_MASKED_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_MASKED, "omp_masked", tcc_statement, 2)
+
+/* OpenMP - #pragma omp scan
+ Operand 0: OMP_SCAN_BODY: Scan body.
+ Operand 1: OMP_SCAN_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_SCAN, "omp_scan", tcc_statement, 2)
+
+/* OpenMP - #pragma omp section
+ Operand 0: OMP_SECTION_BODY: Section body. */
+DEFTREECODE (OMP_SECTION, "omp_section", tcc_statement, 1)
+
+/* OpenMP - #pragma omp master
+ Operand 0: OMP_MASTER_BODY: Master section body. */
+DEFTREECODE (OMP_MASTER, "omp_master", tcc_statement, 1)
+
+/* OpenACC - #pragma acc cache (variable1 ... variableN)
+ Operand 0: OACC_CACHE_CLAUSES: List of variables (transformed into
+ OMP_CLAUSE__CACHE_ clauses). */
+DEFTREECODE (OACC_CACHE, "oacc_cache", tcc_statement, 1)
+
+/* OpenACC - #pragma acc declare [clause1 ... clauseN]
+ Operand 0: OACC_DECLARE_CLAUSES: List of clauses. */
+DEFTREECODE (OACC_DECLARE, "oacc_declare", tcc_statement, 1)
+
+/* OpenACC - #pragma acc enter data [clause1 ... clauseN]
+ Operand 0: OACC_ENTER_DATA_CLAUSES: List of clauses. */
+DEFTREECODE (OACC_ENTER_DATA, "oacc_enter_data", tcc_statement, 1)
+
+/* OpenACC - #pragma acc exit data [clause1 ... clauseN]
+ Operand 0: OACC_EXIT_DATA_CLAUSES: List of clauses. */
+DEFTREECODE (OACC_EXIT_DATA, "oacc_exit_data", tcc_statement, 1)
+
+/* OpenACC - #pragma acc update [clause1 ... clauseN]
+ Operand 0: OACC_UPDATE_CLAUSES: List of clauses. */
+DEFTREECODE (OACC_UPDATE, "oacc_update", tcc_statement, 1)
+
+/* OpenMP - #pragma omp target update [clause1 ... clauseN]
+ Operand 0: OMP_TARGET_UPDATE_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TARGET_UPDATE, "omp_target_update", tcc_statement, 1)
+
+/* OpenMP - #pragma omp target enter data [clause1 ... clauseN]
+ Operand 0: OMP_TARGET_ENTER_DATA_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TARGET_ENTER_DATA, "omp_target_enter_data", tcc_statement, 1)
+
+/* OpenMP - #pragma omp target exit data [clause1 ... clauseN]
+ Operand 0: OMP_TARGET_EXIT_DATA_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_TARGET_EXIT_DATA, "omp_target_exit_data", tcc_statement, 1)
+
+/* OMP_ATOMIC through OMP_ATOMIC_CAPTURE_NEW must be consecutive,
+ or OMP_ATOMIC_SEQ_CST needs adjusting. */
+
+/* OpenMP - #pragma omp atomic
+ Operand 0: The address at which the atomic operation is to be performed.
+ This address should be stabilized with save_expr.
+ Operand 1: The expression to evaluate. When the old value of the object
+ at the address is used in the expression, it should appear as if
+ build_fold_indirect_ref of the address. */
+DEFTREECODE (OMP_ATOMIC, "omp_atomic", tcc_statement, 2)
+
+/* OpenMP - #pragma omp atomic read
+ Operand 0: The address at which the atomic operation is to be performed.
+ This address should be stabilized with save_expr. */
+DEFTREECODE (OMP_ATOMIC_READ, "omp_atomic_read", tcc_statement, 1)
+
+/* OpenMP - #pragma omp atomic capture
+ Operand 0: The address at which the atomic operation is to be performed.
+ This address should be stabilized with save_expr.
+ Operand 1: The expression to evaluate. When the old value of the object
+ at the address is used in the expression, it should appear as if
+ build_fold_indirect_ref of the address.
+ OMP_ATOMIC_CAPTURE_OLD returns the old memory content,
+ OMP_ATOMIC_CAPTURE_NEW the new value. */
+DEFTREECODE (OMP_ATOMIC_CAPTURE_OLD, "omp_atomic_capture_old", tcc_statement, 2)
+DEFTREECODE (OMP_ATOMIC_CAPTURE_NEW, "omp_atomic_capture_new", tcc_statement, 2)
+
+/* OpenMP clauses. */
+DEFTREECODE (OMP_CLAUSE, "omp_clause", tcc_exceptional, 0)
+
+/* TRANSACTION_EXPR tree code.
+ Operand 0: BODY: contains body of the transaction. */
+DEFTREECODE (TRANSACTION_EXPR, "transaction_expr", tcc_expression, 1)
+
+/* Widening dot-product.
+ The first two arguments are of type t1.
+ The third argument and the result are of type t2, such that t2 is at least
+ twice the size of t1. DOT_PROD_EXPR(arg1,arg2,arg3) is equivalent to:
+ tmp = WIDEN_MULT_EXPR(arg1, arg2);
+ arg3 = PLUS_EXPR (tmp, arg3);
+ or:
+ tmp = WIDEN_MULT_EXPR(arg1, arg2);
+ arg3 = WIDEN_SUM_EXPR (tmp, arg3); */
+DEFTREECODE (DOT_PROD_EXPR, "dot_prod_expr", tcc_expression, 3)
+
+/* Widening summation.
+ The first argument is of type t1.
+ The second argument is of type t2, such that t2 is at least twice
+ the size of t1. The type of the entire expression is also t2.
+ WIDEN_SUM_EXPR is equivalent to first widening (promoting)
+ the first argument from type t1 to type t2, and then summing it
+ with the second argument. */
+DEFTREECODE (WIDEN_SUM_EXPR, "widen_sum_expr", tcc_binary, 2)
+
+/* Widening sad (sum of absolute differences).
+ The first two arguments are of type t1 which should be integer.
+ The third argument and the result are of type t2, such that t2 is at least
+ twice the size of t1. Like DOT_PROD_EXPR, SAD_EXPR (arg1,arg2,arg3) is
+ equivalent to:
+ tmp = WIDEN_MINUS_EXPR (arg1, arg2)
+ tmp2 = ABS_EXPR (tmp)
+ arg3 = PLUS_EXPR (tmp2, arg3)
+ or:
+ tmp = WIDEN_MINUS_EXPR (arg1, arg2)
+ tmp2 = ABS_EXPR (tmp)
+ arg3 = WIDEN_SUM_EXPR (tmp2, arg3)
+ */
+DEFTREECODE (SAD_EXPR, "sad_expr", tcc_expression, 3)
+
+/* Widening multiplication.
+ The two arguments are of type t1 and t2, both integral types that
+ have the same precision, but possibly different signedness.
+ The result is of integral type t3, such that t3 is at least twice
+ the size of t1/t2. WIDEN_MULT_EXPR is equivalent to first widening
+ (promoting) the arguments from type t1 to type t3, and from t2 to
+ type t3 and then multiplying them. */
+DEFTREECODE (WIDEN_MULT_EXPR, "widen_mult_expr", tcc_binary, 2)
+
+/* Widening multiply-accumulate.
+ The first two arguments are of type t1.
+ The third argument and the result are of type t2, such as t2 is at least
+ twice the size of t1. t1 and t2 must be integral or fixed-point types.
+ The expression is equivalent to a WIDEN_MULT_EXPR operation
+ of the first two operands followed by an add or subtract of the third
+ operand. */
+DEFTREECODE (WIDEN_MULT_PLUS_EXPR, "widen_mult_plus_expr", tcc_expression, 3)
+/* This is like the above, except in the final expression the multiply result
+ is subtracted from t3. */
+DEFTREECODE (WIDEN_MULT_MINUS_EXPR, "widen_mult_minus_expr", tcc_expression, 3)
+
+/* Widening shift left.
+ The first operand is of type t1.
+ The second operand is the number of bits to shift by; it need not be the
+ same type as the first operand and result.
+ Note that the result is undefined if the second operand is larger
+ than or equal to the first operand's type size.
+ The type of the entire expression is t2, such that t2 is at least twice
+ the size of t1.
+ WIDEN_LSHIFT_EXPR is equivalent to first widening (promoting)
+ the first argument from type t1 to type t2, and then shifting it
+ by the second argument. */
+DEFTREECODE (WIDEN_LSHIFT_EXPR, "widen_lshift_expr", tcc_binary, 2)
+DEFTREECODE (WIDEN_PLUS_EXPR, "widen_plus_expr", tcc_binary, 2)
+DEFTREECODE (WIDEN_MINUS_EXPR, "widen_minus_expr", tcc_binary, 2)
+
+/* Widening vector multiplication.
+ The two operands are vectors with N elements of size S. Multiplying the
+ elements of the two vectors will result in N products of size 2*S.
+ VEC_WIDEN_MULT_HI_EXPR computes the N/2 high products.
+ VEC_WIDEN_MULT_LO_EXPR computes the N/2 low products. */
+DEFTREECODE (VEC_WIDEN_MULT_HI_EXPR, "widen_mult_hi_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_MULT_LO_EXPR, "widen_mult_lo_expr", tcc_binary, 2)
+
+/* Similarly, but return the even or odd N/2 products. */
+DEFTREECODE (VEC_WIDEN_MULT_EVEN_EXPR, "widen_mult_even_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_MULT_ODD_EXPR, "widen_mult_odd_expr", tcc_binary, 2)
+
+/* Unpack (extract and promote/widen) the high/low elements of the input
+ vector into the output vector. The input vector has twice as many
+ elements as the output vector, that are half the size of the elements
+ of the output vector. This is used to support type promotion. */
+DEFTREECODE (VEC_UNPACK_HI_EXPR, "vec_unpack_hi_expr", tcc_unary, 1)
+DEFTREECODE (VEC_UNPACK_LO_EXPR, "vec_unpack_lo_expr", tcc_unary, 1)
+
+/* Unpack (extract) the high/low elements of the input vector, convert
+ fixed point values to floating point and widen elements into the
+ output vector. The input vector has twice as many elements as the output
+ vector, that are half the size of the elements of the output vector. */
+DEFTREECODE (VEC_UNPACK_FLOAT_HI_EXPR, "vec_unpack_float_hi_expr", tcc_unary, 1)
+DEFTREECODE (VEC_UNPACK_FLOAT_LO_EXPR, "vec_unpack_float_lo_expr", tcc_unary, 1)
+
+/* Unpack (extract) the high/low elements of the input vector, convert
+ floating point values to integer and widen elements into the output
+ vector. The input vector has twice as many elements as the output
+ vector, that are half the size of the elements of the output vector. */
+DEFTREECODE (VEC_UNPACK_FIX_TRUNC_HI_EXPR, "vec_unpack_fix_trunc_hi_expr",
+ tcc_unary, 1)
+DEFTREECODE (VEC_UNPACK_FIX_TRUNC_LO_EXPR, "vec_unpack_fix_trunc_lo_expr",
+ tcc_unary, 1)
+
+/* Pack (demote/narrow and merge) the elements of the two input vectors
+ into the output vector using truncation/saturation.
+ The elements of the input vectors are twice the size of the elements of the
+ output vector. This is used to support type demotion. */
+DEFTREECODE (VEC_PACK_TRUNC_EXPR, "vec_pack_trunc_expr", tcc_binary, 2)
+DEFTREECODE (VEC_PACK_SAT_EXPR, "vec_pack_sat_expr", tcc_binary, 2)
+
+/* Convert floating point values of the two input vectors to integer
+ and pack (narrow and merge) the elements into the output vector. The
+ elements of the input vector are twice the size of the elements of
+ the output vector. */
+DEFTREECODE (VEC_PACK_FIX_TRUNC_EXPR, "vec_pack_fix_trunc_expr", tcc_binary, 2)
+
+/* Convert fixed point values of the two input vectors to floating point
+ and pack (narrow and merge) the elements into the output vector. The
+ elements of the input vector are twice the size of the elements of
+ the output vector. */
+DEFTREECODE (VEC_PACK_FLOAT_EXPR, "vec_pack_float_expr", tcc_binary, 2)
+
+/* Widening vector shift left in bits.
+ Operand 0 is a vector to be shifted with N elements of size S.
+ Operand 1 is an integer shift amount in bits.
+ The result of the operation is N elements of size 2*S.
+ VEC_WIDEN_LSHIFT_HI_EXPR computes the N/2 high results.
+ VEC_WIDEN_LSHIFT_LO_EXPR computes the N/2 low results.
+ */
+DEFTREECODE (VEC_WIDEN_LSHIFT_HI_EXPR, "widen_lshift_hi_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_LSHIFT_LO_EXPR, "widen_lshift_lo_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_PLUS_HI_EXPR, "widen_plus_hi_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_PLUS_LO_EXPR, "widen_plus_lo_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_MINUS_HI_EXPR, "widen_minus_hi_expr", tcc_binary, 2)
+DEFTREECODE (VEC_WIDEN_MINUS_LO_EXPR, "widen_minus_lo_expr", tcc_binary, 2)
+
+/* PREDICT_EXPR. Specify hint for branch prediction. The
+ PREDICT_EXPR_PREDICTOR specify predictor and PREDICT_EXPR_OUTCOME the
+ outcome (0 for not taken and 1 for taken). Once the profile is guessed
+ all conditional branches leading to execution paths executing the
+ PREDICT_EXPR will get predicted by the specified predictor. */
+DEFTREECODE (PREDICT_EXPR, "predict_expr", tcc_expression, 1)
+
+/* OPTIMIZATION_NODE. Node to store the optimization options. */
+DEFTREECODE (OPTIMIZATION_NODE, "optimization_node", tcc_exceptional, 0)
+
+/* TARGET_OPTION_NODE. Node to store the target specific options. */
+DEFTREECODE (TARGET_OPTION_NODE, "target_option_node", tcc_exceptional, 0)
+
+/* ANNOTATE_EXPR.
+ Operand 0 is the expression to be annotated.
+ Operand 1 is the annotation kind.
+ Operand 2 is additional data. */
+DEFTREECODE (ANNOTATE_EXPR, "annotate_expr", tcc_expression, 3)
+
+/*
+Local variables:
+mode:c
+End:
+*/
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.h
new file mode 100644
index 0000000..abcdb56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tree.h
@@ -0,0 +1,6717 @@
+/* Definitions for the ubiquitous 'tree' type for GNU compilers.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TREE_H
+#define GCC_TREE_H
+
+#include "tree-core.h"
+#include "options.h"
+
+/* Convert a target-independent built-in function code to a combined_fn. */
+
+inline combined_fn
+as_combined_fn (built_in_function fn)
+{
+ return combined_fn (int (fn));
+}
+
+/* Convert an internal function code to a combined_fn. */
+
+inline combined_fn
+as_combined_fn (internal_fn fn)
+{
+ return combined_fn (int (fn) + int (END_BUILTINS));
+}
+
+/* Return true if CODE is a target-independent built-in function. */
+
+inline bool
+builtin_fn_p (combined_fn code)
+{
+ return int (code) < int (END_BUILTINS);
+}
+
+/* Return the target-independent built-in function represented by CODE.
+ Only valid if builtin_fn_p (CODE). */
+
+inline built_in_function
+as_builtin_fn (combined_fn code)
+{
+ gcc_checking_assert (builtin_fn_p (code));
+ return built_in_function (int (code));
+}
+
+/* Return true if CODE is an internal function. */
+
+inline bool
+internal_fn_p (combined_fn code)
+{
+ return int (code) >= int (END_BUILTINS);
+}
+
+/* Return the internal function represented by CODE. Only valid if
+ internal_fn_p (CODE). */
+
+inline internal_fn
+as_internal_fn (combined_fn code)
+{
+ gcc_checking_assert (internal_fn_p (code));
+ return internal_fn (int (code) - int (END_BUILTINS));
+}
+
+/* Helper to transparently allow tree codes and builtin function codes
+ exist in one storage entity. */
+class code_helper
+{
+public:
+ code_helper () {}
+ code_helper (tree_code code) : rep ((int) code) {}
+ code_helper (combined_fn fn) : rep (-(int) fn) {}
+ code_helper (internal_fn fn) : rep (-(int) as_combined_fn (fn)) {}
+ explicit operator tree_code () const { return (tree_code) rep; }
+ explicit operator combined_fn () const { return (combined_fn) -rep; }
+ explicit operator internal_fn () const;
+ explicit operator built_in_function () const;
+ bool is_tree_code () const { return rep > 0; }
+ bool is_fn_code () const { return rep < 0; }
+ bool is_internal_fn () const;
+ bool is_builtin_fn () const;
+ int get_rep () const { return rep; }
+ bool operator== (const code_helper &other) { return rep == other.rep; }
+ bool operator!= (const code_helper &other) { return rep != other.rep; }
+ bool operator== (tree_code c) { return rep == code_helper (c).rep; }
+ bool operator!= (tree_code c) { return rep != code_helper (c).rep; }
+
+private:
+ int rep;
+};
+
+inline code_helper::operator internal_fn () const
+{
+ return as_internal_fn (combined_fn (*this));
+}
+
+inline code_helper::operator built_in_function () const
+{
+ return as_builtin_fn (combined_fn (*this));
+}
+
+inline bool
+code_helper::is_internal_fn () const
+{
+ return is_fn_code () && internal_fn_p (combined_fn (*this));
+}
+
+inline bool
+code_helper::is_builtin_fn () const
+{
+ return is_fn_code () && builtin_fn_p (combined_fn (*this));
+}
+
+/* Macros for initializing `tree_contains_struct'. */
+#define MARK_TS_BASE(C) \
+ (tree_contains_struct[C][TS_BASE] = true)
+
+#define MARK_TS_TYPED(C) \
+ (MARK_TS_BASE (C), \
+ tree_contains_struct[C][TS_TYPED] = true)
+
+#define MARK_TS_COMMON(C) \
+ (MARK_TS_TYPED (C), \
+ tree_contains_struct[C][TS_COMMON] = true)
+
+#define MARK_TS_TYPE_COMMON(C) \
+ (MARK_TS_COMMON (C), \
+ tree_contains_struct[C][TS_TYPE_COMMON] = true)
+
+#define MARK_TS_TYPE_WITH_LANG_SPECIFIC(C) \
+ (MARK_TS_TYPE_COMMON (C), \
+ tree_contains_struct[C][TS_TYPE_WITH_LANG_SPECIFIC] = true)
+
+#define MARK_TS_TYPE_NON_COMMON(C) \
+ (MARK_TS_TYPE_WITH_LANG_SPECIFIC (C), \
+ tree_contains_struct[C][TS_TYPE_NON_COMMON] = true) \
+
+#define MARK_TS_DECL_MINIMAL(C) \
+ (MARK_TS_COMMON (C), \
+ tree_contains_struct[C][TS_DECL_MINIMAL] = true)
+
+#define MARK_TS_DECL_COMMON(C) \
+ (MARK_TS_DECL_MINIMAL (C), \
+ tree_contains_struct[C][TS_DECL_COMMON] = true)
+
+#define MARK_TS_DECL_WRTL(C) \
+ (MARK_TS_DECL_COMMON (C), \
+ tree_contains_struct[C][TS_DECL_WRTL] = true)
+
+#define MARK_TS_DECL_WITH_VIS(C) \
+ (MARK_TS_DECL_WRTL (C), \
+ tree_contains_struct[C][TS_DECL_WITH_VIS] = true)
+
+#define MARK_TS_DECL_NON_COMMON(C) \
+ (MARK_TS_DECL_WITH_VIS (C), \
+ tree_contains_struct[C][TS_DECL_NON_COMMON] = true)
+
+#define MARK_TS_EXP(C) \
+ (MARK_TS_TYPED (C), \
+ tree_contains_struct[C][TS_EXP] = true)
+
+/* Returns the string representing CLASS. */
+
+#define TREE_CODE_CLASS_STRING(CLASS)\
+ tree_code_class_strings[(int) (CLASS)]
+
+#if __cpp_inline_variables < 201606L
+#define TREE_CODE_CLASS(CODE) \
+ tree_code_type_tmpl <0>::tree_code_type[(int) (CODE)]
+#else
+#define TREE_CODE_CLASS(CODE) tree_code_type[(int) (CODE)]
+#endif
+
+/* Nonzero if NODE represents an exceptional code. */
+
+#define EXCEPTIONAL_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_exceptional)
+
+/* Nonzero if NODE represents a constant. */
+
+#define CONSTANT_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_constant)
+
+/* Nonzero if NODE represents a constant, or is a location wrapper
+ around such a node. */
+
+#define CONSTANT_CLASS_OR_WRAPPER_P(NODE)\
+ (CONSTANT_CLASS_P (tree_strip_any_location_wrapper (NODE)))
+
+/* Nonzero if NODE represents a type. */
+
+#define TYPE_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_type)
+
+/* Nonzero if NODE represents a declaration. */
+
+#define DECL_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_declaration)
+
+/* True if NODE designates a variable declaration. */
+#define VAR_P(NODE) \
+ (TREE_CODE (NODE) == VAR_DECL)
+
+/* Nonzero if DECL represents a VAR_DECL or FUNCTION_DECL. */
+
+#define VAR_OR_FUNCTION_DECL_P(DECL)\
+ (TREE_CODE (DECL) == VAR_DECL || TREE_CODE (DECL) == FUNCTION_DECL)
+
+/* Nonzero if NODE represents a INDIRECT_REF. Keep these checks in
+ ascending code order. */
+
+#define INDIRECT_REF_P(NODE)\
+ (TREE_CODE (NODE) == INDIRECT_REF)
+
+/* Nonzero if NODE represents a reference. */
+
+#define REFERENCE_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_reference)
+
+/* Nonzero if NODE represents a comparison. */
+
+#define COMPARISON_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_comparison)
+
+/* Nonzero if NODE represents a unary arithmetic expression. */
+
+#define UNARY_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_unary)
+
+/* Nonzero if NODE represents a binary arithmetic expression. */
+
+#define BINARY_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_binary)
+
+/* Nonzero if NODE represents a statement expression. */
+
+#define STATEMENT_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_statement)
+
+/* Nonzero if NODE represents a function call-like expression with a
+ variable-length operand vector. */
+
+#define VL_EXP_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_vl_exp)
+
+/* Nonzero if NODE represents any other expression. */
+
+#define EXPRESSION_CLASS_P(NODE)\
+ (TREE_CODE_CLASS (TREE_CODE (NODE)) == tcc_expression)
+
+/* Returns nonzero iff NODE represents a type or declaration. */
+
+#define IS_TYPE_OR_DECL_P(NODE)\
+ (TYPE_P (NODE) || DECL_P (NODE))
+
+/* Returns nonzero iff CLASS is the tree-code class of an
+ expression. */
+
+#define IS_EXPR_CODE_CLASS(CLASS)\
+ ((CLASS) >= tcc_reference && (CLASS) <= tcc_expression)
+
+/* Returns nonzero iff NODE is an expression of some kind. */
+
+#define EXPR_P(NODE) IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (NODE)))
+
+#if __cpp_inline_variables < 201606L
+#define TREE_CODE_LENGTH(CODE) \
+ tree_code_length_tmpl <0>::tree_code_length[(int) (CODE)]
+#else
+#define TREE_CODE_LENGTH(CODE) tree_code_length[(int) (CODE)]
+#endif
+
+
+/* Helper macros for math builtins. */
+
+#define CASE_FLT_FN(FN) case FN: case FN##F: case FN##L
+#define CASE_FLT_FN_FLOATN_NX(FN) \
+ case FN##F16: case FN##F32: case FN##F64: case FN##F128: \
+ case FN##F32X: case FN##F64X: case FN##F128X
+#define CASE_FLT_FN_REENT(FN) case FN##_R: case FN##F_R: case FN##L_R
+#define CASE_INT_FN(FN) case FN: case FN##L: case FN##LL: case FN##IMAX
+
+#define NULL_TREE (tree) NULL
+
+/* Define accessors for the fields that all tree nodes have
+ (though some fields are not used for all kinds of nodes). */
+
+/* The tree-code says what kind of node it is.
+ Codes are defined in tree.def. */
+#define TREE_CODE(NODE) ((enum tree_code) (NODE)->base.code)
+#define TREE_SET_CODE(NODE, VALUE) ((NODE)->base.code = (VALUE))
+
+/* When checking is enabled, errors will be generated if a tree node
+ is accessed incorrectly. The macros die with a fatal error. */
+#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
+
+#define TREE_CHECK(T, CODE) \
+(tree_check ((T), __FILE__, __LINE__, __FUNCTION__, (CODE)))
+
+#define TREE_NOT_CHECK(T, CODE) \
+(tree_not_check ((T), __FILE__, __LINE__, __FUNCTION__, (CODE)))
+
+#define TREE_CHECK2(T, CODE1, CODE2) \
+(tree_check2 ((T), __FILE__, __LINE__, __FUNCTION__, (CODE1), (CODE2)))
+
+#define TREE_NOT_CHECK2(T, CODE1, CODE2) \
+(tree_not_check2 ((T), __FILE__, __LINE__, __FUNCTION__, (CODE1), (CODE2)))
+
+#define TREE_CHECK3(T, CODE1, CODE2, CODE3) \
+(tree_check3 ((T), __FILE__, __LINE__, __FUNCTION__, (CODE1), (CODE2), (CODE3)))
+
+#define TREE_NOT_CHECK3(T, CODE1, CODE2, CODE3) \
+(tree_not_check3 ((T), __FILE__, __LINE__, __FUNCTION__, \
+ (CODE1), (CODE2), (CODE3)))
+
+#define TREE_CHECK4(T, CODE1, CODE2, CODE3, CODE4) \
+(tree_check4 ((T), __FILE__, __LINE__, __FUNCTION__, \
+ (CODE1), (CODE2), (CODE3), (CODE4)))
+
+#define TREE_NOT_CHECK4(T, CODE1, CODE2, CODE3, CODE4) \
+(tree_not_check4 ((T), __FILE__, __LINE__, __FUNCTION__, \
+ (CODE1), (CODE2), (CODE3), (CODE4)))
+
+#define TREE_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) \
+(tree_check5 ((T), __FILE__, __LINE__, __FUNCTION__, \
+ (CODE1), (CODE2), (CODE3), (CODE4), (CODE5)))
+
+#define TREE_NOT_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) \
+(tree_not_check5 ((T), __FILE__, __LINE__, __FUNCTION__, \
+ (CODE1), (CODE2), (CODE3), (CODE4), (CODE5)))
+
+#define CONTAINS_STRUCT_CHECK(T, STRUCT) \
+(contains_struct_check ((T), (STRUCT), __FILE__, __LINE__, __FUNCTION__))
+
+#define TREE_CLASS_CHECK(T, CLASS) \
+(tree_class_check ((T), (CLASS), __FILE__, __LINE__, __FUNCTION__))
+
+#define TREE_RANGE_CHECK(T, CODE1, CODE2) \
+(tree_range_check ((T), (CODE1), (CODE2), __FILE__, __LINE__, __FUNCTION__))
+
+#define OMP_CLAUSE_SUBCODE_CHECK(T, CODE) \
+(omp_clause_subcode_check ((T), (CODE), __FILE__, __LINE__, __FUNCTION__))
+
+#define OMP_CLAUSE_RANGE_CHECK(T, CODE1, CODE2) \
+(omp_clause_range_check ((T), (CODE1), (CODE2), \
+ __FILE__, __LINE__, __FUNCTION__))
+
+/* These checks have to be special cased. */
+#define EXPR_CHECK(T) \
+(expr_check ((T), __FILE__, __LINE__, __FUNCTION__))
+
+/* These checks have to be special cased. */
+#define NON_TYPE_CHECK(T) \
+(non_type_check ((T), __FILE__, __LINE__, __FUNCTION__))
+
+/* These checks have to be special cased. */
+#define ANY_INTEGRAL_TYPE_CHECK(T) \
+(any_integral_type_check ((T), __FILE__, __LINE__, __FUNCTION__))
+
+#define TREE_INT_CST_ELT_CHECK(T, I) \
+(*tree_int_cst_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__))
+
+#define TREE_VEC_ELT_CHECK(T, I) \
+(*(CONST_CAST2 (tree *, typeof (T)*, \
+ tree_vec_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__))))
+
+#define OMP_CLAUSE_ELT_CHECK(T, I) \
+(*(omp_clause_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__)))
+
+/* Special checks for TREE_OPERANDs. */
+#define TREE_OPERAND_CHECK(T, I) \
+(*(CONST_CAST2 (tree*, typeof (T)*, \
+ tree_operand_check ((T), (I), __FILE__, __LINE__, __FUNCTION__))))
+
+#define TREE_OPERAND_CHECK_CODE(T, CODE, I) \
+(*(tree_operand_check_code ((T), (CODE), (I), \
+ __FILE__, __LINE__, __FUNCTION__)))
+
+/* Nodes are chained together for many purposes.
+ Types are chained together to record them for being output to the debugger
+ (see the function `chain_type').
+ Decls in the same scope are chained together to record the contents
+ of the scope.
+ Statement nodes for successive statements used to be chained together.
+ Often lists of things are represented by TREE_LIST nodes that
+ are chained together. */
+
+#define TREE_CHAIN(NODE) \
+(CONTAINS_STRUCT_CHECK (NODE, TS_COMMON)->common.chain)
+
+/* In all nodes that are expressions, this is the data type of the expression.
+ In POINTER_TYPE nodes, this is the type that the pointer points to.
+ In ARRAY_TYPE nodes, this is the type of the elements.
+ In VECTOR_TYPE nodes, this is the type of the elements. */
+#define TREE_TYPE(NODE) \
+(CONTAINS_STRUCT_CHECK (NODE, TS_TYPED)->typed.type)
+
+extern void tree_contains_struct_check_failed (const_tree,
+ const enum tree_node_structure_enum,
+ const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+
+extern void tree_check_failed (const_tree, const char *, int, const char *,
+ ...) ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_not_check_failed (const_tree, const char *, int, const char *,
+ ...) ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_class_check_failed (const_tree, const enum tree_code_class,
+ const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_range_check_failed (const_tree, const char *, int,
+ const char *, enum tree_code,
+ enum tree_code)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_not_class_check_failed (const_tree,
+ const enum tree_code_class,
+ const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_int_cst_elt_check_failed (int, int, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_vec_elt_check_failed (int, int, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void phi_node_elt_check_failed (int, int, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void tree_operand_check_failed (int, const_tree,
+ const char *, int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void omp_clause_check_failed (const_tree, const char *, int,
+ const char *, enum omp_clause_code)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void omp_clause_operand_check_failed (int, const_tree, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+extern void omp_clause_range_check_failed (const_tree, const char *, int,
+ const char *, enum omp_clause_code,
+ enum omp_clause_code)
+ ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+
+#else /* not ENABLE_TREE_CHECKING, or not gcc */
+
+#define CONTAINS_STRUCT_CHECK(T, ENUM) (T)
+#define TREE_CHECK(T, CODE) (T)
+#define TREE_NOT_CHECK(T, CODE) (T)
+#define TREE_CHECK2(T, CODE1, CODE2) (T)
+#define TREE_NOT_CHECK2(T, CODE1, CODE2) (T)
+#define TREE_CHECK3(T, CODE1, CODE2, CODE3) (T)
+#define TREE_NOT_CHECK3(T, CODE1, CODE2, CODE3) (T)
+#define TREE_CHECK4(T, CODE1, CODE2, CODE3, CODE4) (T)
+#define TREE_NOT_CHECK4(T, CODE1, CODE2, CODE3, CODE4) (T)
+#define TREE_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) (T)
+#define TREE_NOT_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) (T)
+#define TREE_CLASS_CHECK(T, CODE) (T)
+#define TREE_RANGE_CHECK(T, CODE1, CODE2) (T)
+#define EXPR_CHECK(T) (T)
+#define NON_TYPE_CHECK(T) (T)
+#define TREE_INT_CST_ELT_CHECK(T, I) ((T)->int_cst.val[I])
+#define TREE_VEC_ELT_CHECK(T, I) ((T)->vec.a[I])
+#define TREE_OPERAND_CHECK(T, I) ((T)->exp.operands[I])
+#define TREE_OPERAND_CHECK_CODE(T, CODE, I) ((T)->exp.operands[I])
+#define OMP_CLAUSE_ELT_CHECK(T, i) ((T)->omp_clause.ops[i])
+#define OMP_CLAUSE_RANGE_CHECK(T, CODE1, CODE2) (T)
+#define OMP_CLAUSE_SUBCODE_CHECK(T, CODE) (T)
+#define ANY_INTEGRAL_TYPE_CHECK(T) (T)
+
+#define TREE_CHAIN(NODE) ((NODE)->common.chain)
+#define TREE_TYPE(NODE) ((NODE)->typed.type)
+
+#endif
+
+#define TREE_BLOCK(NODE) (tree_block (NODE))
+#define TREE_SET_BLOCK(T, B) (tree_set_block ((T), (B)))
+
+#include "tree-check.h"
+
+#define TYPE_CHECK(T) TREE_CLASS_CHECK (T, tcc_type)
+#define DECL_MINIMAL_CHECK(T) CONTAINS_STRUCT_CHECK (T, TS_DECL_MINIMAL)
+#define DECL_COMMON_CHECK(T) CONTAINS_STRUCT_CHECK (T, TS_DECL_COMMON)
+#define DECL_WRTL_CHECK(T) CONTAINS_STRUCT_CHECK (T, TS_DECL_WRTL)
+#define DECL_WITH_VIS_CHECK(T) CONTAINS_STRUCT_CHECK (T, TS_DECL_WITH_VIS)
+#define DECL_NON_COMMON_CHECK(T) CONTAINS_STRUCT_CHECK (T, TS_DECL_NON_COMMON)
+#define CST_CHECK(T) TREE_CLASS_CHECK (T, tcc_constant)
+#define STMT_CHECK(T) TREE_CLASS_CHECK (T, tcc_statement)
+#define VL_EXP_CHECK(T) TREE_CLASS_CHECK (T, tcc_vl_exp)
+#define FUNC_OR_METHOD_CHECK(T) TREE_CHECK2 (T, FUNCTION_TYPE, METHOD_TYPE)
+#define PTR_OR_REF_CHECK(T) TREE_CHECK2 (T, POINTER_TYPE, REFERENCE_TYPE)
+
+#define RECORD_OR_UNION_CHECK(T) \
+ TREE_CHECK3 (T, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE)
+#define NOT_RECORD_OR_UNION_CHECK(T) \
+ TREE_NOT_CHECK3 (T, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE)
+#define ARRAY_OR_INTEGER_TYPE_CHECK(T) \
+ TREE_CHECK2 (T, ARRAY_TYPE, INTEGER_TYPE)
+
+#define NUMERICAL_TYPE_CHECK(T) \
+ TREE_CHECK5 (T, INTEGER_TYPE, ENUMERAL_TYPE, BOOLEAN_TYPE, REAL_TYPE, \
+ FIXED_POINT_TYPE)
+
+/* Here is how primitive or already-canonicalized types' hash codes
+ are made. */
+#define TYPE_HASH(TYPE) (TYPE_UID (TYPE))
+
+/* A simple hash function for an arbitrary tree node. This must not be
+ used in hash tables which are saved to a PCH. */
+#define TREE_HASH(NODE) ((size_t) (NODE) & 0777777)
+
+/* Tests if CODE is a conversion expr (NOP_EXPR or CONVERT_EXPR). */
+#define CONVERT_EXPR_CODE_P(CODE) \
+ ((CODE) == NOP_EXPR || (CODE) == CONVERT_EXPR)
+
+/* Similarly, but accept an expression instead of a tree code. */
+#define CONVERT_EXPR_P(EXP) CONVERT_EXPR_CODE_P (TREE_CODE (EXP))
+
+/* Generate case for NOP_EXPR, CONVERT_EXPR. */
+
+#define CASE_CONVERT \
+ case NOP_EXPR: \
+ case CONVERT_EXPR
+
+/* Given an expression as a tree, strip any conversion that generates
+ no instruction. Accepts both tree and const_tree arguments since
+ we are not modifying the tree itself. */
+
+#define STRIP_NOPS(EXP) \
+ (EXP) = tree_strip_nop_conversions (CONST_CAST_TREE (EXP))
+
+/* Like STRIP_NOPS, but don't let the signedness change either. */
+
+#define STRIP_SIGN_NOPS(EXP) \
+ (EXP) = tree_strip_sign_nop_conversions (CONST_CAST_TREE (EXP))
+
+/* Like STRIP_NOPS, but don't alter the TREE_TYPE either. */
+
+#define STRIP_TYPE_NOPS(EXP) \
+ while ((CONVERT_EXPR_P (EXP) \
+ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \
+ && TREE_OPERAND (EXP, 0) != error_mark_node \
+ && (TREE_TYPE (EXP) \
+ == TREE_TYPE (TREE_OPERAND (EXP, 0)))) \
+ (EXP) = TREE_OPERAND (EXP, 0)
+
+/* Remove unnecessary type conversions according to
+ tree_ssa_useless_type_conversion. */
+
+#define STRIP_USELESS_TYPE_CONVERSION(EXP) \
+ (EXP) = tree_ssa_strip_useless_type_conversions (EXP)
+
+/* Remove any VIEW_CONVERT_EXPR or NON_LVALUE_EXPR that's purely
+ in use to provide a location_t. */
+
+#define STRIP_ANY_LOCATION_WRAPPER(EXP) \
+ (EXP) = tree_strip_any_location_wrapper (CONST_CAST_TREE (EXP))
+
+/* Nonzero if TYPE represents a vector type. */
+
+#define VECTOR_TYPE_P(TYPE) (TREE_CODE (TYPE) == VECTOR_TYPE)
+
+/* Nonzero if TYPE represents a vector of booleans. */
+
+#define VECTOR_BOOLEAN_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == VECTOR_TYPE \
+ && TREE_CODE (TREE_TYPE (TYPE)) == BOOLEAN_TYPE)
+
+/* Nonzero if TYPE represents an integral type. Note that we do not
+ include COMPLEX types here. Keep these checks in ascending code
+ order. */
+
+#define INTEGRAL_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE \
+ || TREE_CODE (TYPE) == BOOLEAN_TYPE \
+ || TREE_CODE (TYPE) == INTEGER_TYPE)
+
+/* Nonzero if TYPE represents an integral type, including complex
+ and vector integer types. */
+
+#define ANY_INTEGRAL_TYPE_P(TYPE) \
+ (INTEGRAL_TYPE_P (TYPE) \
+ || ((TREE_CODE (TYPE) == COMPLEX_TYPE \
+ || VECTOR_TYPE_P (TYPE)) \
+ && INTEGRAL_TYPE_P (TREE_TYPE (TYPE))))
+
+/* Nonzero if TYPE represents a non-saturating fixed-point type. */
+
+#define NON_SAT_FIXED_POINT_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == FIXED_POINT_TYPE && !TYPE_SATURATING (TYPE))
+
+/* Nonzero if TYPE represents a saturating fixed-point type. */
+
+#define SAT_FIXED_POINT_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == FIXED_POINT_TYPE && TYPE_SATURATING (TYPE))
+
+/* Nonzero if TYPE represents a fixed-point type. */
+
+#define FIXED_POINT_TYPE_P(TYPE) (TREE_CODE (TYPE) == FIXED_POINT_TYPE)
+
+/* Nonzero if TYPE represents a scalar floating-point type. */
+
+#define SCALAR_FLOAT_TYPE_P(TYPE) (TREE_CODE (TYPE) == REAL_TYPE)
+
+/* Nonzero if TYPE represents a complex floating-point type. */
+
+#define COMPLEX_FLOAT_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == COMPLEX_TYPE \
+ && TREE_CODE (TREE_TYPE (TYPE)) == REAL_TYPE)
+
+/* Nonzero if TYPE represents a vector integer type. */
+
+#define VECTOR_INTEGER_TYPE_P(TYPE) \
+ (VECTOR_TYPE_P (TYPE) \
+ && TREE_CODE (TREE_TYPE (TYPE)) == INTEGER_TYPE)
+
+
+/* Nonzero if TYPE represents a vector floating-point type. */
+
+#define VECTOR_FLOAT_TYPE_P(TYPE) \
+ (VECTOR_TYPE_P (TYPE) \
+ && TREE_CODE (TREE_TYPE (TYPE)) == REAL_TYPE)
+
+/* Nonzero if TYPE represents a floating-point type, including complex
+ and vector floating-point types. The vector and complex check does
+ not use the previous two macros to enable early folding. */
+
+#define FLOAT_TYPE_P(TYPE) \
+ (SCALAR_FLOAT_TYPE_P (TYPE) \
+ || ((TREE_CODE (TYPE) == COMPLEX_TYPE \
+ || VECTOR_TYPE_P (TYPE)) \
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (TYPE))))
+
+/* Nonzero if TYPE represents a decimal floating-point type. */
+#define DECIMAL_FLOAT_TYPE_P(TYPE) \
+ (SCALAR_FLOAT_TYPE_P (TYPE) \
+ && DECIMAL_FLOAT_MODE_P (TYPE_MODE (TYPE)))
+
+/* Nonzero if TYPE is a record or union type. */
+#define RECORD_OR_UNION_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == QUAL_UNION_TYPE)
+
+/* Nonzero if TYPE represents an aggregate (multi-component) type.
+ Keep these checks in ascending code order. */
+
+#define AGGREGATE_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE || RECORD_OR_UNION_TYPE_P (TYPE))
+
+/* Nonzero if TYPE represents a pointer or reference type.
+ (It should be renamed to INDIRECT_TYPE_P.) Keep these checks in
+ ascending code order. */
+
+#define POINTER_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == POINTER_TYPE || TREE_CODE (TYPE) == REFERENCE_TYPE)
+
+/* Nonzero if TYPE represents a pointer to function. */
+#define FUNCTION_POINTER_TYPE_P(TYPE) \
+ (POINTER_TYPE_P (TYPE) && TREE_CODE (TREE_TYPE (TYPE)) == FUNCTION_TYPE)
+
+/* Nonzero if this type is a complete type. */
+#define COMPLETE_TYPE_P(NODE) (TYPE_SIZE (NODE) != NULL_TREE)
+
+/* Nonzero if this type is the (possibly qualified) void type. */
+#define VOID_TYPE_P(NODE) (TREE_CODE (NODE) == VOID_TYPE)
+
+/* Nonzero if this type is complete or is cv void. */
+#define COMPLETE_OR_VOID_TYPE_P(NODE) \
+ (COMPLETE_TYPE_P (NODE) || VOID_TYPE_P (NODE))
+
+/* Nonzero if this type is complete or is an array with unspecified bound. */
+#define COMPLETE_OR_UNBOUND_ARRAY_TYPE_P(NODE) \
+ (COMPLETE_TYPE_P (TREE_CODE (NODE) == ARRAY_TYPE ? TREE_TYPE (NODE) : (NODE)))
+
+#define FUNC_OR_METHOD_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_TYPE || TREE_CODE (NODE) == METHOD_TYPE)
+
+#define OPAQUE_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == OPAQUE_TYPE)
+
+/* Define many boolean fields that all tree nodes have. */
+
+/* In VAR_DECL, PARM_DECL and RESULT_DECL nodes, nonzero means address
+ of this is needed. So it cannot be in a register.
+ In a FUNCTION_DECL it has no meaning.
+ In LABEL_DECL nodes, it means a goto for this label has been seen
+ from a place outside all binding contours that restore stack levels.
+ In an artificial SSA_NAME that points to a stack partition with at least
+ two variables, it means that at least one variable has TREE_ADDRESSABLE.
+ In ..._TYPE nodes, it means that objects of this type must be fully
+ addressable. This means that pieces of this object cannot go into
+ register parameters, for example. If this a function type, this
+ means that the value must be returned in memory.
+ In CONSTRUCTOR nodes, it means object constructed must be in memory.
+ In IDENTIFIER_NODEs, this means that some extern decl for this name
+ had its address taken. That matters for inline functions.
+ In a STMT_EXPR, it means we want the result of the enclosed expression. */
+#define TREE_ADDRESSABLE(NODE) ((NODE)->base.addressable_flag)
+
+/* Set on a CALL_EXPR if the call is in a tail position, ie. just before the
+ exit of a function. Calls for which this is true are candidates for tail
+ call optimizations. */
+#define CALL_EXPR_TAILCALL(NODE) \
+ (CALL_EXPR_CHECK (NODE)->base.addressable_flag)
+
+/* Set on a CALL_EXPR if the call has been marked as requiring tail call
+ optimization for correctness. */
+#define CALL_EXPR_MUST_TAIL_CALL(NODE) \
+ (CALL_EXPR_CHECK (NODE)->base.static_flag)
+
+/* Used as a temporary field on a CASE_LABEL_EXPR to indicate that the
+ CASE_LOW operand has been processed. */
+#define CASE_LOW_SEEN(NODE) \
+ (CASE_LABEL_EXPR_CHECK (NODE)->base.addressable_flag)
+
+#define PREDICT_EXPR_OUTCOME(NODE) \
+ ((enum prediction) (PREDICT_EXPR_CHECK (NODE)->base.addressable_flag))
+#define SET_PREDICT_EXPR_OUTCOME(NODE, OUTCOME) \
+ (PREDICT_EXPR_CHECK (NODE)->base.addressable_flag = (int) OUTCOME)
+#define PREDICT_EXPR_PREDICTOR(NODE) \
+ ((enum br_predictor)tree_to_shwi (TREE_OPERAND (PREDICT_EXPR_CHECK (NODE), 0)))
+
+/* In a VAR_DECL, nonzero means allocate static storage.
+ In a FUNCTION_DECL, nonzero if function has been defined.
+ In a CONSTRUCTOR, nonzero means allocate static storage. */
+#define TREE_STATIC(NODE) ((NODE)->base.static_flag)
+
+/* In an ADDR_EXPR, nonzero means do not use a trampoline. */
+#define TREE_NO_TRAMPOLINE(NODE) (ADDR_EXPR_CHECK (NODE)->base.static_flag)
+
+/* In a TARGET_EXPR or WITH_CLEANUP_EXPR, means that the pertinent cleanup
+ should only be executed if an exception is thrown, not on normal exit
+ of its scope. */
+#define CLEANUP_EH_ONLY(NODE) ((NODE)->base.static_flag)
+
+/* In a TRY_CATCH_EXPR, means that the handler should be considered a
+ separate cleanup in honor_protect_cleanup_actions. */
+#define TRY_CATCH_IS_CLEANUP(NODE) \
+ (TRY_CATCH_EXPR_CHECK (NODE)->base.static_flag)
+
+/* Used as a temporary field on a CASE_LABEL_EXPR to indicate that the
+ CASE_HIGH operand has been processed. */
+#define CASE_HIGH_SEEN(NODE) \
+ (CASE_LABEL_EXPR_CHECK (NODE)->base.static_flag)
+
+/* Used to mark scoped enums. */
+#define ENUM_IS_SCOPED(NODE) (ENUMERAL_TYPE_CHECK (NODE)->base.static_flag)
+
+/* Determines whether an ENUMERAL_TYPE has defined the list of constants. */
+#define ENUM_IS_OPAQUE(NODE) (ENUMERAL_TYPE_CHECK (NODE)->base.private_flag)
+
+/* In an expr node (usually a conversion) this means the node was made
+ implicitly and should not lead to any sort of warning. In a decl node,
+ warnings concerning the decl should be suppressed. This is used at
+ least for used-before-set warnings, and it set after one warning is
+ emitted. */
+#define TREE_NO_WARNING(NODE) ((NODE)->base.nowarning_flag)
+
+/* Nonzero if we should warn about the change in empty class parameter
+ passing ABI in this TU. */
+#define TRANSLATION_UNIT_WARN_EMPTY_P(NODE) \
+ (TRANSLATION_UNIT_DECL_CHECK (NODE)->decl_common.decl_flag_0)
+
+/* Nonzero if this type is "empty" according to the particular psABI. */
+#define TYPE_EMPTY_P(NODE) (TYPE_CHECK (NODE)->type_common.empty_flag)
+
+/* Used to indicate that this TYPE represents a compiler-generated entity. */
+#define TYPE_ARTIFICIAL(NODE) (TYPE_CHECK (NODE)->base.nowarning_flag)
+
+/* True if the type is indivisible at the source level, i.e. if its
+ component parts cannot be accessed directly. This is used to suppress
+ normal GNU extensions for target-specific vector types. */
+#define TYPE_INDIVISIBLE_P(NODE) (TYPE_CHECK (NODE)->type_common.indivisible_p)
+
+/* True if this is a stdarg function with no named arguments (C2x
+ (...) prototype, where arguments can be accessed with va_start and
+ va_arg), as opposed to an unprototyped function. */
+#define TYPE_NO_NAMED_ARGS_STDARG_P(NODE) \
+ (TYPE_CHECK (NODE)->type_common.no_named_args_stdarg_p)
+
+/* In an IDENTIFIER_NODE, this means that assemble_name was called with
+ this string as an argument. */
+#define TREE_SYMBOL_REFERENCED(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE)->base.static_flag)
+
+/* Nonzero in a pointer or reference type means the data pointed to
+ by this type can alias anything. */
+#define TYPE_REF_CAN_ALIAS_ALL(NODE) \
+ (PTR_OR_REF_CHECK (NODE)->base.static_flag)
+
+/* In an INTEGER_CST, REAL_CST, COMPLEX_CST, or VECTOR_CST, this means
+ there was an overflow in folding. */
+
+#define TREE_OVERFLOW(NODE) (CST_CHECK (NODE)->base.public_flag)
+
+/* TREE_OVERFLOW can only be true for EXPR of CONSTANT_CLASS_P. */
+
+#define TREE_OVERFLOW_P(EXPR) \
+ (CONSTANT_CLASS_P (EXPR) && TREE_OVERFLOW (EXPR))
+
+/* In a VAR_DECL, FUNCTION_DECL, NAMESPACE_DECL or TYPE_DECL,
+ nonzero means name is to be accessible from outside this translation unit.
+ In an IDENTIFIER_NODE, nonzero means an external declaration
+ accessible from outside this translation unit was previously seen
+ for this name in an inner scope. */
+#define TREE_PUBLIC(NODE) ((NODE)->base.public_flag)
+
+/* In a _TYPE, indicates whether TYPE_CACHED_VALUES contains a vector
+ of cached values, or is something else. */
+#define TYPE_CACHED_VALUES_P(NODE) (TYPE_CHECK (NODE)->base.public_flag)
+
+/* In a SAVE_EXPR, indicates that the original expression has already
+ been substituted with a VAR_DECL that contains the value. */
+#define SAVE_EXPR_RESOLVED_P(NODE) \
+ (SAVE_EXPR_CHECK (NODE)->base.public_flag)
+
+/* Set on a CALL_EXPR if this stdarg call should be passed the argument
+ pack. */
+#define CALL_EXPR_VA_ARG_PACK(NODE) \
+ (CALL_EXPR_CHECK (NODE)->base.public_flag)
+
+/* In any expression, decl, or constant, nonzero means it has side effects or
+ reevaluation of the whole expression could produce a different value.
+ This is set if any subexpression is a function call, a side effect or a
+ reference to a volatile variable. In a ..._DECL, this is set only if the
+ declaration said `volatile'. This will never be set for a constant. */
+#define TREE_SIDE_EFFECTS(NODE) \
+ (NON_TYPE_CHECK (NODE)->base.side_effects_flag)
+
+/* In a LABEL_DECL, nonzero means this label had its address taken
+ and therefore can never be deleted and is a jump target for
+ computed gotos. */
+#define FORCED_LABEL(NODE) (LABEL_DECL_CHECK (NODE)->base.side_effects_flag)
+
+/* Whether a case or a user-defined label is allowed to fall through to.
+ This is used to implement -Wimplicit-fallthrough. */
+#define FALLTHROUGH_LABEL_P(NODE) \
+ (LABEL_DECL_CHECK (NODE)->base.private_flag)
+
+/* Set on the artificial label created for break; stmt from a switch.
+ This is used to implement -Wimplicit-fallthrough. */
+#define SWITCH_BREAK_LABEL_P(NODE) \
+ (LABEL_DECL_CHECK (NODE)->base.protected_flag)
+
+/* Set on label that is known not to be jumped to, it can be only
+ reached by falling through from previous statements.
+ This is used to implement -Wimplicit-fallthrough. */
+#define UNUSED_LABEL_P(NODE) \
+ (LABEL_DECL_CHECK (NODE)->base.default_def_flag)
+
+/* Nonzero means this expression is volatile in the C sense:
+ its address should be of type `volatile WHATEVER *'.
+ In other words, the declared item is volatile qualified.
+ This is used in _DECL nodes and _REF nodes.
+ On a FUNCTION_DECL node, this means the function does not
+ return normally. This is the same effect as setting
+ the attribute noreturn on the function in C.
+
+ In a ..._TYPE node, means this type is volatile-qualified.
+ But use TYPE_VOLATILE instead of this macro when the node is a type,
+ because eventually we may make that a different bit.
+
+ If this bit is set in an expression, so is TREE_SIDE_EFFECTS. */
+#define TREE_THIS_VOLATILE(NODE) ((NODE)->base.volatile_flag)
+
+/* Nonzero means this node will not trap. In an INDIRECT_REF, means
+ accessing the memory pointed to won't generate a trap. However,
+ this only applies to an object when used appropriately: it doesn't
+ mean that writing a READONLY mem won't trap.
+
+ In ARRAY_REF and ARRAY_RANGE_REF means that we know that the index
+ (or slice of the array) always belongs to the range of the array.
+ I.e. that the access will not trap, provided that the access to
+ the base to the array will not trap. */
+#define TREE_THIS_NOTRAP(NODE) \
+ (TREE_CHECK5 (NODE, INDIRECT_REF, MEM_REF, TARGET_MEM_REF, ARRAY_REF, \
+ ARRAY_RANGE_REF)->base.nothrow_flag)
+
+/* In a VAR_DECL, PARM_DECL or FIELD_DECL, or any kind of ..._REF node,
+ nonzero means it may not be the lhs of an assignment.
+ Nonzero in a FUNCTION_DECL means this function should be treated
+ as "const" function (can only read its arguments). */
+#define TREE_READONLY(NODE) (NON_TYPE_CHECK (NODE)->base.readonly_flag)
+
+/* Value of expression is constant. Always on in all ..._CST nodes. May
+ also appear in an expression or decl where the value is constant. */
+#define TREE_CONSTANT(NODE) (NON_TYPE_CHECK (NODE)->base.constant_flag)
+
+/* Nonzero if NODE, a type, has had its sizes gimplified. */
+#define TYPE_SIZES_GIMPLIFIED(NODE) \
+ (TYPE_CHECK (NODE)->base.constant_flag)
+
+/* In a decl (most significantly a FIELD_DECL), means an unsigned field. */
+#define DECL_UNSIGNED(NODE) \
+ (DECL_COMMON_CHECK (NODE)->base.u.bits.unsigned_flag)
+
+/* In integral and pointer types, means an unsigned type. */
+#define TYPE_UNSIGNED(NODE) (TYPE_CHECK (NODE)->base.u.bits.unsigned_flag)
+
+/* Same as TYPE_UNSIGNED but converted to SIGNOP. */
+#define TYPE_SIGN(NODE) ((signop) TYPE_UNSIGNED (NODE))
+
+/* True if overflow wraps around for the given integral or pointer type. That
+ is, TYPE_MAX + 1 == TYPE_MIN. */
+#define TYPE_OVERFLOW_WRAPS(TYPE) \
+ (POINTER_TYPE_P (TYPE) \
+ ? flag_wrapv_pointer \
+ : (ANY_INTEGRAL_TYPE_CHECK(TYPE)->base.u.bits.unsigned_flag \
+ || flag_wrapv))
+
+/* True if overflow is undefined for the given integral or pointer type.
+ We may optimize on the assumption that values in the type never overflow.
+
+ IMPORTANT NOTE: Any optimization based on TYPE_OVERFLOW_UNDEFINED
+ must issue a warning based on warn_strict_overflow. In some cases
+ it will be appropriate to issue the warning immediately, and in
+ other cases it will be appropriate to simply set a flag and let the
+ caller decide whether a warning is appropriate or not. */
+#define TYPE_OVERFLOW_UNDEFINED(TYPE) \
+ (POINTER_TYPE_P (TYPE) \
+ ? !flag_wrapv_pointer \
+ : (!ANY_INTEGRAL_TYPE_CHECK(TYPE)->base.u.bits.unsigned_flag \
+ && !flag_wrapv && !flag_trapv))
+
+/* True if overflow for the given integral type should issue a
+ trap. */
+#define TYPE_OVERFLOW_TRAPS(TYPE) \
+ (!ANY_INTEGRAL_TYPE_CHECK(TYPE)->base.u.bits.unsigned_flag && flag_trapv)
+
+/* True if an overflow is to be preserved for sanitization. */
+#define TYPE_OVERFLOW_SANITIZED(TYPE) \
+ (INTEGRAL_TYPE_P (TYPE) \
+ && !TYPE_OVERFLOW_WRAPS (TYPE) \
+ && (flag_sanitize & SANITIZE_SI_OVERFLOW))
+
+/* Nonzero in a VAR_DECL or STRING_CST means assembler code has been written.
+ Nonzero in a FUNCTION_DECL means that the function has been compiled.
+ This is interesting in an inline function, since it might not need
+ to be compiled separately.
+ Nonzero in a RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, ENUMERAL_TYPE
+ or TYPE_DECL if the debugging info for the type has been written.
+ In a BLOCK node, nonzero if reorder_blocks has already seen this block.
+ In an SSA_NAME node, nonzero if the SSA_NAME occurs in an abnormal
+ PHI node. */
+#define TREE_ASM_WRITTEN(NODE) ((NODE)->base.asm_written_flag)
+
+/* Nonzero in a _DECL if the name is used in its scope.
+ Nonzero in an expr node means inhibit warning if value is unused.
+ In IDENTIFIER_NODEs, this means that some extern decl for this name
+ was used.
+ In a BLOCK, this means that the block contains variables that are used. */
+#define TREE_USED(NODE) ((NODE)->base.used_flag)
+
+/* In a FUNCTION_DECL, nonzero means a call to the function cannot
+ throw an exception. In a CALL_EXPR, nonzero means the call cannot
+ throw. We can't easily check the node type here as the C++
+ frontend also uses this flag (for AGGR_INIT_EXPR). */
+#define TREE_NOTHROW(NODE) ((NODE)->base.nothrow_flag)
+
+/* In a CALL_EXPR, means that it's safe to use the target of the call
+ expansion as the return slot for a call that returns in memory. */
+#define CALL_EXPR_RETURN_SLOT_OPT(NODE) \
+ (CALL_EXPR_CHECK (NODE)->base.private_flag)
+
+/* In a RESULT_DECL, PARM_DECL and VAR_DECL, means that it is
+ passed by invisible reference (and the TREE_TYPE is a pointer to the true
+ type). */
+#define DECL_BY_REFERENCE(NODE) \
+ (TREE_CHECK3 (NODE, VAR_DECL, PARM_DECL, \
+ RESULT_DECL)->decl_common.decl_by_reference_flag)
+
+/* In VAR_DECL and PARM_DECL, set when the decl has been used except for
+ being set. */
+#define DECL_READ_P(NODE) \
+ (TREE_CHECK2 (NODE, VAR_DECL, PARM_DECL)->decl_common.decl_read_flag)
+
+/* In VAR_DECL or RESULT_DECL, set when significant code movement precludes
+ attempting to share the stack slot with some other variable. */
+#define DECL_NONSHAREABLE(NODE) \
+ (TREE_CHECK2 (NODE, VAR_DECL, \
+ RESULT_DECL)->decl_common.decl_nonshareable_flag)
+
+/* In a PARM_DECL, set for Fortran hidden string length arguments that some
+ buggy callers don't pass to the callee. */
+#define DECL_HIDDEN_STRING_LENGTH(NODE) \
+ (TREE_CHECK (NODE, PARM_DECL)->decl_common.decl_nonshareable_flag)
+
+/* In a CALL_EXPR, means that the call is the jump from a thunk to the
+ thunked-to function. Be careful to avoid using this macro when one of the
+ next two applies instead. */
+#define CALL_FROM_THUNK_P(NODE) (CALL_EXPR_CHECK (NODE)->base.protected_flag)
+
+/* In a CALL_EXPR, if the function being called is BUILT_IN_ALLOCA, means that
+ it has been built for the declaration of a variable-sized object and, if the
+ function being called is BUILT_IN_MEMCPY, means that it has been built for
+ the assignment of a variable-sized object. */
+#define CALL_ALLOCA_FOR_VAR_P(NODE) \
+ (CALL_EXPR_CHECK (NODE)->base.protected_flag)
+
+/* In a CALL_EXPR, if the function being called is DECL_IS_OPERATOR_NEW_P or
+ DECL_IS_OPERATOR_DELETE_P, true for allocator calls from C++ new or delete
+ expressions. Not set for C++20 destroying delete operators. */
+#define CALL_FROM_NEW_OR_DELETE_P(NODE) \
+ (CALL_EXPR_CHECK (NODE)->base.protected_flag)
+
+/* Used in classes in C++. */
+#define TREE_PRIVATE(NODE) ((NODE)->base.private_flag)
+/* Used in classes in C++. */
+#define TREE_PROTECTED(NODE) ((NODE)->base.protected_flag)
+
+/* True if reference type NODE is a C++ rvalue reference. */
+#define TYPE_REF_IS_RVALUE(NODE) \
+ (REFERENCE_TYPE_CHECK (NODE)->base.private_flag)
+
+/* Nonzero in a _DECL if the use of the name is defined as a
+ deprecated feature by __attribute__((deprecated)). */
+#define TREE_DEPRECATED(NODE) \
+ ((NODE)->base.deprecated_flag)
+
+/* Nonzero in a _DECL if the use of the name is defined as an
+ unavailable feature by __attribute__((unavailable)). */
+#define TREE_UNAVAILABLE(NODE) \
+ ((NODE)->base.u.bits.unavailable_flag)
+
+/* Nonzero indicates an IDENTIFIER_NODE that names an anonymous
+ aggregate, (as created by anon_aggr_name_format). */
+#define IDENTIFIER_ANON_P(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE)->base.private_flag)
+
+/* Nonzero in an IDENTIFIER_NODE if the name is a local alias, whose
+ uses are to be substituted for uses of the TREE_CHAINed identifier. */
+#define IDENTIFIER_TRANSPARENT_ALIAS(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE)->base.deprecated_flag)
+
+/* In an aggregate type, indicates that the scalar fields of the type are
+ stored in reverse order from the target order. This effectively
+ toggles BYTES_BIG_ENDIAN and WORDS_BIG_ENDIAN within the type. */
+#define TYPE_REVERSE_STORAGE_ORDER(NODE) \
+ (TREE_CHECK4 (NODE, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, ARRAY_TYPE)->base.u.bits.saturating_flag)
+
+/* In a non-aggregate type, indicates a saturating type. */
+#define TYPE_SATURATING(NODE) \
+ (TREE_NOT_CHECK4 (NODE, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, ARRAY_TYPE)->base.u.bits.saturating_flag)
+
+/* In a BIT_FIELD_REF and MEM_REF, indicates that the reference is to a group
+ of bits stored in reverse order from the target order. This effectively
+ toggles both BYTES_BIG_ENDIAN and WORDS_BIG_ENDIAN for the reference.
+
+ The overall strategy is to preserve the invariant that every scalar in
+ memory is associated with a single storage order, i.e. all accesses to
+ this scalar are done with the same storage order. This invariant makes
+ it possible to factor out the storage order in most transformations, as
+ only the address and/or the value (in target order) matter for them.
+ But, of course, the storage order must be preserved when the accesses
+ themselves are rewritten or transformed. */
+#define REF_REVERSE_STORAGE_ORDER(NODE) \
+ (TREE_CHECK2 (NODE, BIT_FIELD_REF, MEM_REF)->base.default_def_flag)
+
+ /* In an ADDR_EXPR, indicates that this is a pointer to nested function
+ represented by a descriptor instead of a trampoline. */
+#define FUNC_ADDR_BY_DESCRIPTOR(NODE) \
+ (TREE_CHECK (NODE, ADDR_EXPR)->base.default_def_flag)
+
+/* In a CALL_EXPR, indicates that this is an indirect call for which
+ pointers to nested function are descriptors instead of trampolines. */
+#define CALL_EXPR_BY_DESCRIPTOR(NODE) \
+ (TREE_CHECK (NODE, CALL_EXPR)->base.default_def_flag)
+
+/* These flags are available for each language front end to use internally. */
+#define TREE_LANG_FLAG_0(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_0)
+#define TREE_LANG_FLAG_1(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_1)
+#define TREE_LANG_FLAG_2(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_2)
+#define TREE_LANG_FLAG_3(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_3)
+#define TREE_LANG_FLAG_4(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_4)
+#define TREE_LANG_FLAG_5(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_5)
+#define TREE_LANG_FLAG_6(NODE) \
+ (TREE_NOT_CHECK2 (NODE, TREE_VEC, SSA_NAME)->base.u.bits.lang_flag_6)
+
+/* Define additional fields and accessors for nodes representing constants. */
+
+#define TREE_INT_CST_NUNITS(NODE) \
+ (INTEGER_CST_CHECK (NODE)->base.u.int_length.unextended)
+#define TREE_INT_CST_EXT_NUNITS(NODE) \
+ (INTEGER_CST_CHECK (NODE)->base.u.int_length.extended)
+#define TREE_INT_CST_OFFSET_NUNITS(NODE) \
+ (INTEGER_CST_CHECK (NODE)->base.u.int_length.offset)
+#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I)
+#define TREE_INT_CST_LOW(NODE) \
+ ((unsigned HOST_WIDE_INT) TREE_INT_CST_ELT (NODE, 0))
+
+/* Return true if NODE is a POLY_INT_CST. This is only ever true on
+ targets with variable-sized modes. */
+#define POLY_INT_CST_P(NODE) \
+ (NUM_POLY_INT_COEFFS > 1 && TREE_CODE (NODE) == POLY_INT_CST)
+
+/* In a POLY_INT_CST node. */
+#define POLY_INT_CST_COEFF(NODE, I) \
+ (POLY_INT_CST_CHECK (NODE)->poly_int_cst.coeffs[I])
+
+#define TREE_REAL_CST_PTR(NODE) (&REAL_CST_CHECK (NODE)->real_cst.value)
+#define TREE_REAL_CST(NODE) (*TREE_REAL_CST_PTR (NODE))
+
+#define TREE_FIXED_CST_PTR(NODE) \
+ (FIXED_CST_CHECK (NODE)->fixed_cst.fixed_cst_ptr)
+#define TREE_FIXED_CST(NODE) (*TREE_FIXED_CST_PTR (NODE))
+
+/* In a STRING_CST */
+/* In C terms, this is sizeof, not strlen. */
+#define TREE_STRING_LENGTH(NODE) (STRING_CST_CHECK (NODE)->string.length)
+#define TREE_STRING_POINTER(NODE) \
+ ((const char *)(STRING_CST_CHECK (NODE)->string.str))
+
+/* In a COMPLEX_CST node. */
+#define TREE_REALPART(NODE) (COMPLEX_CST_CHECK (NODE)->complex.real)
+#define TREE_IMAGPART(NODE) (COMPLEX_CST_CHECK (NODE)->complex.imag)
+
+/* In a VECTOR_CST node. See generic.texi for details. */
+#define VECTOR_CST_NELTS(NODE) (TYPE_VECTOR_SUBPARTS (TREE_TYPE (NODE)))
+#define VECTOR_CST_ELT(NODE,IDX) vector_cst_elt (NODE, IDX)
+
+#define VECTOR_CST_LOG2_NPATTERNS(NODE) \
+ (VECTOR_CST_CHECK (NODE)->base.u.vector_cst.log2_npatterns)
+#define VECTOR_CST_NPATTERNS(NODE) \
+ (1U << VECTOR_CST_LOG2_NPATTERNS (NODE))
+#define VECTOR_CST_NELTS_PER_PATTERN(NODE) \
+ (VECTOR_CST_CHECK (NODE)->base.u.vector_cst.nelts_per_pattern)
+#define VECTOR_CST_DUPLICATE_P(NODE) \
+ (VECTOR_CST_NELTS_PER_PATTERN (NODE) == 1)
+#define VECTOR_CST_STEPPED_P(NODE) \
+ (VECTOR_CST_NELTS_PER_PATTERN (NODE) == 3)
+#define VECTOR_CST_ENCODED_ELTS(NODE) \
+ (VECTOR_CST_CHECK (NODE)->vector.elts)
+#define VECTOR_CST_ENCODED_ELT(NODE, ELT) \
+ (VECTOR_CST_CHECK (NODE)->vector.elts[ELT])
+
+/* Define fields and accessors for some special-purpose tree nodes. */
+
+/* Unlike STRING_CST, in C terms this is strlen, not sizeof. */
+#define IDENTIFIER_LENGTH(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE)->identifier.id.len)
+#define IDENTIFIER_POINTER(NODE) \
+ ((const char *) IDENTIFIER_NODE_CHECK (NODE)->identifier.id.str)
+#define IDENTIFIER_HASH_VALUE(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE)->identifier.id.hash_value)
+
+/* Translate a hash table identifier pointer to a tree_identifier
+ pointer, and vice versa. */
+
+#define HT_IDENT_TO_GCC_IDENT(NODE) \
+ ((tree) ((char *) (NODE) - sizeof (struct tree_common)))
+#define GCC_IDENT_TO_HT_IDENT(NODE) (&((struct tree_identifier *) (NODE))->id)
+
+/* In a TREE_LIST node. */
+#define TREE_PURPOSE(NODE) (TREE_LIST_CHECK (NODE)->list.purpose)
+#define TREE_VALUE(NODE) (TREE_LIST_CHECK (NODE)->list.value)
+
+/* In a TREE_VEC node. */
+#define TREE_VEC_LENGTH(NODE) (TREE_VEC_CHECK (NODE)->base.u.length)
+#define TREE_VEC_BEGIN(NODE) (&TREE_VEC_CHECK (NODE)->vec.a[0])
+#define TREE_VEC_END(NODE) \
+ ((void) TREE_VEC_CHECK (NODE), &((NODE)->vec.a[(NODE)->base.u.length]))
+
+#define TREE_VEC_ELT(NODE,I) TREE_VEC_ELT_CHECK (NODE, I)
+
+/* In a CONSTRUCTOR node. */
+#define CONSTRUCTOR_ELTS(NODE) (CONSTRUCTOR_CHECK (NODE)->constructor.elts)
+#define CONSTRUCTOR_ELT(NODE,IDX) \
+ (&(*CONSTRUCTOR_ELTS (NODE))[IDX])
+#define CONSTRUCTOR_NELTS(NODE) \
+ (vec_safe_length (CONSTRUCTOR_ELTS (NODE)))
+#define CONSTRUCTOR_NO_CLEARING(NODE) \
+ (CONSTRUCTOR_CHECK (NODE)->base.public_flag)
+
+/* Iterate through the vector V of CONSTRUCTOR_ELT elements, yielding the
+ value of each element (stored within VAL). IX must be a scratch variable
+ of unsigned integer type. */
+#define FOR_EACH_CONSTRUCTOR_VALUE(V, IX, VAL) \
+ for (IX = 0; (IX >= vec_safe_length (V)) \
+ ? false \
+ : ((VAL = (*(V))[IX].value), \
+ true); \
+ (IX)++)
+
+/* Iterate through the vector V of CONSTRUCTOR_ELT elements, yielding both
+ the value of each element (stored within VAL) and its index (stored
+ within INDEX). IX must be a scratch variable of unsigned integer type. */
+#define FOR_EACH_CONSTRUCTOR_ELT(V, IX, INDEX, VAL) \
+ for (IX = 0; (IX >= vec_safe_length (V)) \
+ ? false \
+ : (((void) (VAL = (*V)[IX].value)), \
+ (INDEX = (*V)[IX].index), \
+ true); \
+ (IX)++)
+
+/* Append a new constructor element to V, with the specified INDEX and VAL. */
+#define CONSTRUCTOR_APPEND_ELT(V, INDEX, VALUE) \
+ do { \
+ constructor_elt _ce___ = {INDEX, VALUE}; \
+ vec_safe_push ((V), _ce___); \
+ } while (0)
+
+/* True if NODE, a FIELD_DECL, is to be processed as a bitfield for
+ constructor output purposes. */
+#define CONSTRUCTOR_BITFIELD_P(NODE) \
+ (DECL_BIT_FIELD (FIELD_DECL_CHECK (NODE)) && DECL_MODE (NODE) != BLKmode)
+
+/* True if NODE is a clobber right hand side, an expression of indeterminate
+ value that clobbers the LHS in a copy instruction. We use a volatile
+ empty CONSTRUCTOR for this, as it matches most of the necessary semantic.
+ In particular the volatile flag causes us to not prematurely remove
+ such clobber instructions. */
+#define TREE_CLOBBER_P(NODE) \
+ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_THIS_VOLATILE (NODE))
+
+/* Return the clobber_kind of a CLOBBER CONSTRUCTOR. */
+#define CLOBBER_KIND(NODE) \
+ (CONSTRUCTOR_CHECK (NODE)->base.u.bits.address_space)
+
+/* Define fields and accessors for some nodes that represent expressions. */
+
+/* Nonzero if NODE is an empty statement (NOP_EXPR <0>). */
+#define IS_EMPTY_STMT(NODE) (TREE_CODE (NODE) == NOP_EXPR \
+ && VOID_TYPE_P (TREE_TYPE (NODE)) \
+ && integer_zerop (TREE_OPERAND (NODE, 0)))
+
+/* In ordinary expression nodes. */
+#define TREE_OPERAND_LENGTH(NODE) tree_operand_length (NODE)
+#define TREE_OPERAND(NODE, I) TREE_OPERAND_CHECK (NODE, I)
+
+/* In a tcc_vl_exp node, operand 0 is an INT_CST node holding the operand
+ length. Its value includes the length operand itself; that is,
+ the minimum valid length is 1.
+ Note that we have to bypass the use of TREE_OPERAND to access
+ that field to avoid infinite recursion in expanding the macros. */
+#define VL_EXP_OPERAND_LENGTH(NODE) \
+ ((int)TREE_INT_CST_LOW (VL_EXP_CHECK (NODE)->exp.operands[0]))
+
+/* Nonzero if gimple_debug_nonbind_marker_p() may possibly hold. */
+#define MAY_HAVE_DEBUG_MARKER_STMTS debug_nonbind_markers_p
+/* Nonzero if gimple_debug_bind_p() (and thus
+ gimple_debug_source_bind_p()) may possibly hold. */
+#define MAY_HAVE_DEBUG_BIND_STMTS flag_var_tracking_assignments
+/* Nonzero if is_gimple_debug() may possibly hold. */
+#define MAY_HAVE_DEBUG_STMTS \
+ (MAY_HAVE_DEBUG_MARKER_STMTS || MAY_HAVE_DEBUG_BIND_STMTS)
+
+/* In a LOOP_EXPR node. */
+#define LOOP_EXPR_BODY(NODE) TREE_OPERAND_CHECK_CODE (NODE, LOOP_EXPR, 0)
+
+/* The source location of this expression. Non-tree_exp nodes such as
+ decls and constants can be shared among multiple locations, so
+ return nothing. */
+#define EXPR_LOCATION(NODE) \
+ (CAN_HAVE_LOCATION_P ((NODE)) ? (NODE)->exp.locus : UNKNOWN_LOCATION)
+#define SET_EXPR_LOCATION(NODE, LOCUS) EXPR_CHECK ((NODE))->exp.locus = (LOCUS)
+#define EXPR_HAS_LOCATION(NODE) (LOCATION_LOCUS (EXPR_LOCATION (NODE)) \
+ != UNKNOWN_LOCATION)
+/* The location to be used in a diagnostic about this expression. Do not
+ use this macro if the location will be assigned to other expressions. */
+#define EXPR_LOC_OR_LOC(NODE, LOCUS) (EXPR_HAS_LOCATION (NODE) \
+ ? (NODE)->exp.locus : (LOCUS))
+#define EXPR_FILENAME(NODE) LOCATION_FILE (EXPR_CHECK ((NODE))->exp.locus)
+#define EXPR_LINENO(NODE) LOCATION_LINE (EXPR_CHECK (NODE)->exp.locus)
+
+#define CAN_HAVE_RANGE_P(NODE) (CAN_HAVE_LOCATION_P (NODE))
+#define EXPR_LOCATION_RANGE(NODE) (get_expr_source_range (EXPR_CHECK ((NODE))))
+
+#define EXPR_HAS_RANGE(NODE) \
+ (CAN_HAVE_RANGE_P (NODE) \
+ ? EXPR_LOCATION_RANGE (NODE).m_start != UNKNOWN_LOCATION \
+ : false)
+
+/* True if a tree is an expression or statement that can have a
+ location. */
+#define CAN_HAVE_LOCATION_P(NODE) ((NODE) && EXPR_P (NODE))
+
+inline source_range
+get_expr_source_range (tree expr)
+{
+ location_t loc = EXPR_LOCATION (expr);
+ return get_range_from_loc (line_table, loc);
+}
+
+extern void protected_set_expr_location (tree, location_t);
+extern void protected_set_expr_location_if_unset (tree, location_t);
+ATTRIBUTE_WARN_UNUSED_RESULT
+extern tree protected_set_expr_location_unshare (tree, location_t);
+
+WARN_UNUSED_RESULT extern tree maybe_wrap_with_location (tree, location_t);
+
+extern int suppress_location_wrappers;
+
+/* A class for suppressing the creation of location wrappers.
+ Location wrappers will not be created during the lifetime
+ of an instance of this class. */
+
+class auto_suppress_location_wrappers
+{
+ public:
+ auto_suppress_location_wrappers () { ++suppress_location_wrappers; }
+ ~auto_suppress_location_wrappers () { --suppress_location_wrappers; }
+};
+
+/* In a TARGET_EXPR node. */
+#define TARGET_EXPR_SLOT(NODE) TREE_OPERAND_CHECK_CODE (NODE, TARGET_EXPR, 0)
+#define TARGET_EXPR_INITIAL(NODE) TREE_OPERAND_CHECK_CODE (NODE, TARGET_EXPR, 1)
+#define TARGET_EXPR_CLEANUP(NODE) TREE_OPERAND_CHECK_CODE (NODE, TARGET_EXPR, 2)
+/* Don't elide the initialization of TARGET_EXPR_SLOT for this TARGET_EXPR
+ on rhs of MODIFY_EXPR. */
+#define TARGET_EXPR_NO_ELIDE(NODE) (TARGET_EXPR_CHECK (NODE)->base.private_flag)
+
+/* DECL_EXPR accessor. This gives access to the DECL associated with
+ the given declaration statement. */
+#define DECL_EXPR_DECL(NODE) TREE_OPERAND (DECL_EXPR_CHECK (NODE), 0)
+
+#define EXIT_EXPR_COND(NODE) TREE_OPERAND (EXIT_EXPR_CHECK (NODE), 0)
+
+/* COMPOUND_LITERAL_EXPR accessors. */
+#define COMPOUND_LITERAL_EXPR_DECL_EXPR(NODE) \
+ TREE_OPERAND (COMPOUND_LITERAL_EXPR_CHECK (NODE), 0)
+#define COMPOUND_LITERAL_EXPR_DECL(NODE) \
+ DECL_EXPR_DECL (COMPOUND_LITERAL_EXPR_DECL_EXPR (NODE))
+
+/* SWITCH_EXPR accessors. These give access to the condition and body. */
+#define SWITCH_COND(NODE) TREE_OPERAND (SWITCH_EXPR_CHECK (NODE), 0)
+#define SWITCH_BODY(NODE) TREE_OPERAND (SWITCH_EXPR_CHECK (NODE), 1)
+/* True if there are case labels for all possible values of SWITCH_COND, either
+ because there is a default: case label or because the case label ranges cover
+ all values. */
+#define SWITCH_ALL_CASES_P(NODE) (SWITCH_EXPR_CHECK (NODE)->base.private_flag)
+
+/* CASE_LABEL_EXPR accessors. These give access to the high and low values
+ of a case label, respectively. */
+#define CASE_LOW(NODE) TREE_OPERAND (CASE_LABEL_EXPR_CHECK (NODE), 0)
+#define CASE_HIGH(NODE) TREE_OPERAND (CASE_LABEL_EXPR_CHECK (NODE), 1)
+#define CASE_LABEL(NODE) TREE_OPERAND (CASE_LABEL_EXPR_CHECK (NODE), 2)
+#define CASE_CHAIN(NODE) TREE_OPERAND (CASE_LABEL_EXPR_CHECK (NODE), 3)
+
+/* The operands of a TARGET_MEM_REF. Operands 0 and 1 have to match
+ corresponding MEM_REF operands. */
+#define TMR_BASE(NODE) (TREE_OPERAND (TARGET_MEM_REF_CHECK (NODE), 0))
+#define TMR_OFFSET(NODE) (TREE_OPERAND (TARGET_MEM_REF_CHECK (NODE), 1))
+#define TMR_INDEX(NODE) (TREE_OPERAND (TARGET_MEM_REF_CHECK (NODE), 2))
+#define TMR_STEP(NODE) (TREE_OPERAND (TARGET_MEM_REF_CHECK (NODE), 3))
+#define TMR_INDEX2(NODE) (TREE_OPERAND (TARGET_MEM_REF_CHECK (NODE), 4))
+
+#define MR_DEPENDENCE_CLIQUE(NODE) \
+ (TREE_CHECK2 (NODE, MEM_REF, TARGET_MEM_REF)->base.u.dependence_info.clique)
+#define MR_DEPENDENCE_BASE(NODE) \
+ (TREE_CHECK2 (NODE, MEM_REF, TARGET_MEM_REF)->base.u.dependence_info.base)
+
+/* The operands of a BIND_EXPR. */
+#define BIND_EXPR_VARS(NODE) (TREE_OPERAND (BIND_EXPR_CHECK (NODE), 0))
+#define BIND_EXPR_BODY(NODE) (TREE_OPERAND (BIND_EXPR_CHECK (NODE), 1))
+#define BIND_EXPR_BLOCK(NODE) (TREE_OPERAND (BIND_EXPR_CHECK (NODE), 2))
+
+/* GOTO_EXPR accessor. This gives access to the label associated with
+ a goto statement. */
+#define GOTO_DESTINATION(NODE) TREE_OPERAND (GOTO_EXPR_CHECK (NODE), 0)
+
+/* ASM_EXPR accessors. ASM_STRING returns a STRING_CST for the
+ instruction (e.g., "mov x, y"). ASM_OUTPUTS, ASM_INPUTS, and
+ ASM_CLOBBERS represent the outputs, inputs, and clobbers for the
+ statement. */
+#define ASM_STRING(NODE) TREE_OPERAND (ASM_EXPR_CHECK (NODE), 0)
+#define ASM_OUTPUTS(NODE) TREE_OPERAND (ASM_EXPR_CHECK (NODE), 1)
+#define ASM_INPUTS(NODE) TREE_OPERAND (ASM_EXPR_CHECK (NODE), 2)
+#define ASM_CLOBBERS(NODE) TREE_OPERAND (ASM_EXPR_CHECK (NODE), 3)
+#define ASM_LABELS(NODE) TREE_OPERAND (ASM_EXPR_CHECK (NODE), 4)
+/* Nonzero if we want to create an ASM_INPUT instead of an
+ ASM_OPERAND with no operands. */
+#define ASM_INPUT_P(NODE) (ASM_EXPR_CHECK (NODE)->base.static_flag)
+#define ASM_VOLATILE_P(NODE) (ASM_EXPR_CHECK (NODE)->base.public_flag)
+/* Nonzero if we want to consider this asm as minimum length and cost
+ for inlining decisions. */
+#define ASM_INLINE_P(NODE) (ASM_EXPR_CHECK (NODE)->base.protected_flag)
+
+/* COND_EXPR accessors. */
+#define COND_EXPR_COND(NODE) (TREE_OPERAND (COND_EXPR_CHECK (NODE), 0))
+#define COND_EXPR_THEN(NODE) (TREE_OPERAND (COND_EXPR_CHECK (NODE), 1))
+#define COND_EXPR_ELSE(NODE) (TREE_OPERAND (COND_EXPR_CHECK (NODE), 2))
+
+/* Accessors for the chains of recurrences. */
+#define CHREC_LEFT(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 0)
+#define CHREC_RIGHT(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 1)
+#define CHREC_VARIABLE(NODE) POLYNOMIAL_CHREC_CHECK (NODE)->base.u.chrec_var
+
+/* LABEL_EXPR accessor. This gives access to the label associated with
+ the given label expression. */
+#define LABEL_EXPR_LABEL(NODE) TREE_OPERAND (LABEL_EXPR_CHECK (NODE), 0)
+
+/* CATCH_EXPR accessors. */
+#define CATCH_TYPES(NODE) TREE_OPERAND (CATCH_EXPR_CHECK (NODE), 0)
+#define CATCH_BODY(NODE) TREE_OPERAND (CATCH_EXPR_CHECK (NODE), 1)
+
+/* EH_FILTER_EXPR accessors. */
+#define EH_FILTER_TYPES(NODE) TREE_OPERAND (EH_FILTER_EXPR_CHECK (NODE), 0)
+#define EH_FILTER_FAILURE(NODE) TREE_OPERAND (EH_FILTER_EXPR_CHECK (NODE), 1)
+
+/* OBJ_TYPE_REF accessors. */
+#define OBJ_TYPE_REF_EXPR(NODE) TREE_OPERAND (OBJ_TYPE_REF_CHECK (NODE), 0)
+#define OBJ_TYPE_REF_OBJECT(NODE) TREE_OPERAND (OBJ_TYPE_REF_CHECK (NODE), 1)
+#define OBJ_TYPE_REF_TOKEN(NODE) TREE_OPERAND (OBJ_TYPE_REF_CHECK (NODE), 2)
+
+/* CALL_EXPR accessors. */
+#define CALL_EXPR_FN(NODE) TREE_OPERAND (CALL_EXPR_CHECK (NODE), 1)
+#define CALL_EXPR_STATIC_CHAIN(NODE) TREE_OPERAND (CALL_EXPR_CHECK (NODE), 2)
+#define CALL_EXPR_ARG(NODE, I) TREE_OPERAND (CALL_EXPR_CHECK (NODE), (I) + 3)
+#define call_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH (NODE) - 3)
+#define CALL_EXPR_IFN(NODE) (CALL_EXPR_CHECK (NODE)->base.u.ifn)
+
+/* CALL_EXPR_ARGP returns a pointer to the argument vector for NODE.
+ We can't use &CALL_EXPR_ARG (NODE, 0) because that will complain if
+ the argument count is zero when checking is enabled. Instead, do
+ the pointer arithmetic to advance past the 3 fixed operands in a
+ CALL_EXPR. That produces a valid pointer to just past the end of the
+ operand array, even if it's not valid to dereference it. */
+#define CALL_EXPR_ARGP(NODE) \
+ (&(TREE_OPERAND (CALL_EXPR_CHECK (NODE), 0)) + 3)
+
+/* TM directives and accessors. */
+#define TRANSACTION_EXPR_BODY(NODE) \
+ TREE_OPERAND (TRANSACTION_EXPR_CHECK (NODE), 0)
+#define TRANSACTION_EXPR_OUTER(NODE) \
+ (TRANSACTION_EXPR_CHECK (NODE)->base.static_flag)
+#define TRANSACTION_EXPR_RELAXED(NODE) \
+ (TRANSACTION_EXPR_CHECK (NODE)->base.public_flag)
+
+/* OpenMP and OpenACC directive and clause accessors. */
+
+/* Generic accessors for OMP nodes that keep the body as operand 0, and clauses
+ as operand 1. */
+#define OMP_BODY(NODE) \
+ TREE_OPERAND (TREE_RANGE_CHECK (NODE, OACC_PARALLEL, OMP_MASTER), 0)
+#define OMP_CLAUSES(NODE) \
+ TREE_OPERAND (TREE_RANGE_CHECK (NODE, OACC_PARALLEL, OMP_SCAN), 1)
+
+/* Generic accessors for OMP nodes that keep clauses as operand 0. */
+#define OMP_STANDALONE_CLAUSES(NODE) \
+ TREE_OPERAND (TREE_RANGE_CHECK (NODE, OACC_CACHE, OMP_TARGET_EXIT_DATA), 0)
+
+#define OACC_DATA_BODY(NODE) \
+ TREE_OPERAND (OACC_DATA_CHECK (NODE), 0)
+#define OACC_DATA_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_DATA_CHECK (NODE), 1)
+
+#define OACC_HOST_DATA_BODY(NODE) \
+ TREE_OPERAND (OACC_HOST_DATA_CHECK (NODE), 0)
+#define OACC_HOST_DATA_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_HOST_DATA_CHECK (NODE), 1)
+
+#define OACC_CACHE_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_CACHE_CHECK (NODE), 0)
+
+#define OACC_DECLARE_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_DECLARE_CHECK (NODE), 0)
+
+#define OACC_ENTER_DATA_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_ENTER_DATA_CHECK (NODE), 0)
+
+#define OACC_EXIT_DATA_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_EXIT_DATA_CHECK (NODE), 0)
+
+#define OACC_UPDATE_CLAUSES(NODE) \
+ TREE_OPERAND (OACC_UPDATE_CHECK (NODE), 0)
+
+#define OMP_PARALLEL_BODY(NODE) TREE_OPERAND (OMP_PARALLEL_CHECK (NODE), 0)
+#define OMP_PARALLEL_CLAUSES(NODE) TREE_OPERAND (OMP_PARALLEL_CHECK (NODE), 1)
+
+#define OMP_TASK_BODY(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 0)
+#define OMP_TASK_CLAUSES(NODE) TREE_OPERAND (OMP_TASK_CHECK (NODE), 1)
+
+#define OMP_TASKREG_CHECK(NODE) TREE_RANGE_CHECK (NODE, OMP_PARALLEL, OMP_TASK)
+#define OMP_TASKREG_BODY(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 0)
+#define OMP_TASKREG_CLAUSES(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 1)
+
+#define OMP_LOOPING_CHECK(NODE) TREE_RANGE_CHECK (NODE, OMP_FOR, OACC_LOOP)
+#define OMP_FOR_BODY(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 0)
+#define OMP_FOR_CLAUSES(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 1)
+#define OMP_FOR_INIT(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 2)
+#define OMP_FOR_COND(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 3)
+#define OMP_FOR_INCR(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 4)
+#define OMP_FOR_PRE_BODY(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 5)
+#define OMP_FOR_ORIG_DECLS(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 6)
+
+#define OMP_SECTIONS_BODY(NODE) TREE_OPERAND (OMP_SECTIONS_CHECK (NODE), 0)
+#define OMP_SECTIONS_CLAUSES(NODE) TREE_OPERAND (OMP_SECTIONS_CHECK (NODE), 1)
+
+#define OMP_SECTION_BODY(NODE) TREE_OPERAND (OMP_SECTION_CHECK (NODE), 0)
+
+#define OMP_SINGLE_BODY(NODE) TREE_OPERAND (OMP_SINGLE_CHECK (NODE), 0)
+#define OMP_SINGLE_CLAUSES(NODE) TREE_OPERAND (OMP_SINGLE_CHECK (NODE), 1)
+
+#define OMP_SCOPE_BODY(NODE) TREE_OPERAND (OMP_SCOPE_CHECK (NODE), 0)
+#define OMP_SCOPE_CLAUSES(NODE) TREE_OPERAND (OMP_SCOPE_CHECK (NODE), 1)
+
+#define OMP_MASTER_BODY(NODE) TREE_OPERAND (OMP_MASTER_CHECK (NODE), 0)
+
+#define OMP_MASKED_BODY(NODE) TREE_OPERAND (OMP_MASKED_CHECK (NODE), 0)
+#define OMP_MASKED_CLAUSES(NODE) TREE_OPERAND (OMP_MASKED_CHECK (NODE), 1)
+
+#define OMP_TASKGROUP_BODY(NODE) TREE_OPERAND (OMP_TASKGROUP_CHECK (NODE), 0)
+#define OMP_TASKGROUP_CLAUSES(NODE) \
+ TREE_OPERAND (OMP_TASKGROUP_CHECK (NODE), 1)
+
+#define OMP_ORDERED_BODY(NODE) TREE_OPERAND (OMP_ORDERED_CHECK (NODE), 0)
+#define OMP_ORDERED_CLAUSES(NODE) TREE_OPERAND (OMP_ORDERED_CHECK (NODE), 1)
+
+#define OMP_CRITICAL_BODY(NODE) TREE_OPERAND (OMP_CRITICAL_CHECK (NODE), 0)
+#define OMP_CRITICAL_CLAUSES(NODE) TREE_OPERAND (OMP_CRITICAL_CHECK (NODE), 1)
+#define OMP_CRITICAL_NAME(NODE) TREE_OPERAND (OMP_CRITICAL_CHECK (NODE), 2)
+
+#define OMP_TEAMS_BODY(NODE) TREE_OPERAND (OMP_TEAMS_CHECK (NODE), 0)
+#define OMP_TEAMS_CLAUSES(NODE) TREE_OPERAND (OMP_TEAMS_CHECK (NODE), 1)
+
+#define OMP_TARGET_DATA_BODY(NODE) \
+ TREE_OPERAND (OMP_TARGET_DATA_CHECK (NODE), 0)
+#define OMP_TARGET_DATA_CLAUSES(NODE)\
+ TREE_OPERAND (OMP_TARGET_DATA_CHECK (NODE), 1)
+
+#define OMP_TARGET_BODY(NODE) TREE_OPERAND (OMP_TARGET_CHECK (NODE), 0)
+#define OMP_TARGET_CLAUSES(NODE) TREE_OPERAND (OMP_TARGET_CHECK (NODE), 1)
+
+#define OMP_TARGET_UPDATE_CLAUSES(NODE)\
+ TREE_OPERAND (OMP_TARGET_UPDATE_CHECK (NODE), 0)
+
+#define OMP_TARGET_ENTER_DATA_CLAUSES(NODE)\
+ TREE_OPERAND (OMP_TARGET_ENTER_DATA_CHECK (NODE), 0)
+
+#define OMP_TARGET_EXIT_DATA_CLAUSES(NODE)\
+ TREE_OPERAND (OMP_TARGET_EXIT_DATA_CHECK (NODE), 0)
+
+#define OMP_SCAN_BODY(NODE) TREE_OPERAND (OMP_SCAN_CHECK (NODE), 0)
+#define OMP_SCAN_CLAUSES(NODE) TREE_OPERAND (OMP_SCAN_CHECK (NODE), 1)
+
+#define OMP_CLAUSE_SIZE(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_RANGE_CHECK (OMP_CLAUSE_CHECK (NODE), \
+ OMP_CLAUSE_FROM, \
+ OMP_CLAUSE__CACHE_), 1)
+
+#define OMP_CLAUSE_CHAIN(NODE) TREE_CHAIN (OMP_CLAUSE_CHECK (NODE))
+#define OMP_CLAUSE_DECL(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_RANGE_CHECK (OMP_CLAUSE_CHECK (NODE), \
+ OMP_CLAUSE_PRIVATE, \
+ OMP_CLAUSE__SCANTEMP_), 0)
+#define OMP_CLAUSE_HAS_LOCATION(NODE) \
+ (LOCATION_LOCUS ((OMP_CLAUSE_CHECK (NODE))->omp_clause.locus) \
+ != UNKNOWN_LOCATION)
+#define OMP_CLAUSE_LOCATION(NODE) (OMP_CLAUSE_CHECK (NODE))->omp_clause.locus
+
+/* True on OMP_FOR and other OpenMP/OpenACC looping constructs if the loop nest
+ is non-rectangular. */
+#define OMP_FOR_NON_RECTANGULAR(NODE) \
+ (OMP_LOOPING_CHECK (NODE)->base.private_flag)
+
+/* True on an OMP_SECTION statement that was the last lexical member.
+ This status is meaningful in the implementation of lastprivate. */
+#define OMP_SECTION_LAST(NODE) \
+ (OMP_SECTION_CHECK (NODE)->base.private_flag)
+
+/* True on an OMP_PARALLEL statement if it represents an explicit
+ combined parallel work-sharing constructs. */
+#define OMP_PARALLEL_COMBINED(NODE) \
+ (OMP_PARALLEL_CHECK (NODE)->base.private_flag)
+
+/* True on an OMP_TEAMS statement if it represents an explicit
+ combined teams distribute constructs. */
+#define OMP_TEAMS_COMBINED(NODE) \
+ (OMP_TEAMS_CHECK (NODE)->base.private_flag)
+
+/* True on an OMP_TARGET statement if it represents explicit
+ combined target teams, target parallel or target simd constructs. */
+#define OMP_TARGET_COMBINED(NODE) \
+ (OMP_TARGET_CHECK (NODE)->base.private_flag)
+
+/* True on an OMP_MASTER statement if it represents an explicit
+ combined master constructs. */
+#define OMP_MASTER_COMBINED(NODE) \
+ (OMP_MASTER_CHECK (NODE)->base.private_flag)
+
+/* True on an OMP_MASKED statement if it represents an explicit
+ combined masked constructs. */
+#define OMP_MASKED_COMBINED(NODE) \
+ (OMP_MASKED_CHECK (NODE)->base.private_flag)
+
+/* Memory order for OMP_ATOMIC*. */
+#define OMP_ATOMIC_MEMORY_ORDER(NODE) \
+ (TREE_RANGE_CHECK (NODE, OMP_ATOMIC, \
+ OMP_ATOMIC_CAPTURE_NEW)->base.u.omp_atomic_memory_order)
+
+/* Weak clause on OMP_ATOMIC*. */
+#define OMP_ATOMIC_WEAK(NODE) \
+ (TREE_RANGE_CHECK (NODE, OMP_ATOMIC, \
+ OMP_ATOMIC_CAPTURE_NEW)->base.public_flag)
+
+/* True on a PRIVATE clause if its decl is kept around for debugging
+ information only and its DECL_VALUE_EXPR is supposed to point
+ to what it has been remapped to. */
+#define OMP_CLAUSE_PRIVATE_DEBUG(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PRIVATE)->base.public_flag)
+
+/* True on a PRIVATE clause if ctor needs access to outer region's
+ variable. */
+#define OMP_CLAUSE_PRIVATE_OUTER_REF(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PRIVATE))
+
+/* True if a PRIVATE clause is for a C++ class IV on taskloop construct
+ (thus should be private on the outer taskloop and firstprivate on
+ task). */
+#define OMP_CLAUSE_PRIVATE_TASKLOOP_IV(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PRIVATE))
+
+/* True on a FIRSTPRIVATE clause if it has been added implicitly. */
+#define OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_FIRSTPRIVATE)->base.public_flag)
+
+/* True on a FIRSTPRIVATE clause if only the reference and not what it refers
+ to should be firstprivatized. */
+#define OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_FIRSTPRIVATE))
+
+/* True on a FIRSTPRIVATE clause with OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT also
+ set if target construct is the only one that accepts the clause. */
+#define OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT_TARGET(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_FIRSTPRIVATE))
+
+/* True on a LASTPRIVATE clause if a FIRSTPRIVATE clause for the same
+ decl is present in the chain. */
+#define OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LASTPRIVATE)->base.public_flag)
+#define OMP_CLAUSE_LASTPRIVATE_STMT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, \
+ OMP_CLAUSE_LASTPRIVATE),\
+ 1)
+#define OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ(NODE) \
+ (OMP_CLAUSE_CHECK (NODE))->omp_clause.gimple_reduction_init
+
+/* True if a LASTPRIVATE clause is for a C++ class IV on taskloop or
+ loop construct (thus should be lastprivate on the outer taskloop and
+ firstprivate on task for the taskloop construct and carefully handled
+ for loop construct). */
+#define OMP_CLAUSE_LASTPRIVATE_LOOP_IV(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LASTPRIVATE))
+
+/* True if a LASTPRIVATE clause has CONDITIONAL: modifier. */
+#define OMP_CLAUSE_LASTPRIVATE_CONDITIONAL(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LASTPRIVATE))
+
+/* True on a SHARED clause if a FIRSTPRIVATE clause for the same
+ decl is present in the chain (this can happen only for taskloop
+ with FIRSTPRIVATE/LASTPRIVATE on it originally. */
+#define OMP_CLAUSE_SHARED_FIRSTPRIVATE(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SHARED)->base.public_flag)
+
+/* True on a SHARED clause if a scalar is not modified in the body and
+ thus could be optimized as firstprivate. */
+#define OMP_CLAUSE_SHARED_READONLY(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SHARED))
+
+#define OMP_CLAUSE_IF_MODIFIER(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_IF)->omp_clause.subcode.if_modifier)
+
+#define OMP_CLAUSE_FINAL_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_FINAL), 0)
+#define OMP_CLAUSE_IF_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_IF), 0)
+#define OMP_CLAUSE_NUM_THREADS_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_THREADS),0)
+#define OMP_CLAUSE_SCHEDULE_CHUNK_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SCHEDULE), 0)
+#define OMP_CLAUSE_NUM_TASKS_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TASKS), 0)
+#define OMP_CLAUSE_HINT_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_HINT), 0)
+#define OMP_CLAUSE_FILTER_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_FILTER), 0)
+
+#define OMP_CLAUSE_GRAINSIZE_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_GRAINSIZE),0)
+
+#define OMP_CLAUSE_PRIORITY_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PRIORITY),0)
+
+#define OMP_CLAUSE_GRAINSIZE_STRICT(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_GRAINSIZE))
+#define OMP_CLAUSE_NUM_TASKS_STRICT(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TASKS))
+
+/* OpenACC clause expressions */
+#define OMP_CLAUSE_EXPR(NODE, CLAUSE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, CLAUSE), 0)
+#define OMP_CLAUSE_GANG_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_GANG), 0)
+#define OMP_CLAUSE_GANG_STATIC_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_GANG), 1)
+#define OMP_CLAUSE_ASYNC_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ASYNC), 0)
+#define OMP_CLAUSE_WAIT_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_WAIT), 0)
+#define OMP_CLAUSE_VECTOR_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_VECTOR), 0)
+#define OMP_CLAUSE_WORKER_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_WORKER), 0)
+#define OMP_CLAUSE_NUM_GANGS_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_GANGS), 0)
+#define OMP_CLAUSE_NUM_WORKERS_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_WORKERS), 0)
+#define OMP_CLAUSE_VECTOR_LENGTH_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND ( \
+ OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_VECTOR_LENGTH), 0)
+
+#define OMP_CLAUSE_DEPEND_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DEPEND)->omp_clause.subcode.depend_kind)
+
+#define OMP_CLAUSE_DOACROSS_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DOACROSS)->omp_clause.subcode.doacross_kind)
+
+#define OMP_CLAUSE_DOACROSS_SINK_NEGATIVE(NODE) \
+ TREE_PUBLIC (TREE_LIST_CHECK (NODE))
+
+/* True if DOACROSS clause is spelled as DEPEND. */
+#define OMP_CLAUSE_DOACROSS_DEPEND(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DOACROSS))
+
+#define OMP_CLAUSE_MAP_KIND(NODE) \
+ ((enum gomp_map_kind) OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP)->omp_clause.subcode.map_kind)
+#define OMP_CLAUSE_SET_MAP_KIND(NODE, MAP_KIND) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP)->omp_clause.subcode.map_kind \
+ = (unsigned int) (MAP_KIND))
+
+/* Nonzero if this map clause is for array (rather than pointer) based array
+ section with zero bias. Both the non-decl OMP_CLAUSE_MAP and corresponding
+ OMP_CLAUSE_MAP with GOMP_MAP_POINTER are marked with this flag. */
+#define OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP)->base.public_flag)
+/* Nonzero if this is a mapped array section, that might need special
+ treatment if OMP_CLAUSE_SIZE is zero. */
+#define OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP))
+/* Nonzero if this map clause is for an OpenACC compute construct's reduction
+ variable or OpenMP map clause mentioned also in in_reduction clause on the
+ same construct. */
+#define OMP_CLAUSE_MAP_IN_REDUCTION(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP))
+/* Nonzero on map clauses added implicitly for reduction clauses on combined
+ or composite constructs. They shall be removed if there is an explicit
+ map clause. */
+#define OMP_CLAUSE_MAP_IMPLICIT(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP)->base.default_def_flag)
+/* Nonzero if this map clause is to be indicated to the runtime as 'implicit',
+ due to being created through implicit data-mapping rules in the middle-end.
+ NOTE: this is different than OMP_CLAUSE_MAP_IMPLICIT. */
+#define OMP_CLAUSE_MAP_RUNTIME_IMPLICIT_P(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP)->base.deprecated_flag)
+
+/* Flag that 'OMP_CLAUSE_DECL (NODE)' is to be made addressable during OMP
+ lowering. */
+#define OMP_CLAUSE_MAP_DECL_MAKE_ADDRESSABLE(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_MAP)->base.addressable_flag)
+
+/* True on an OMP_CLAUSE_USE_DEVICE_PTR with an OpenACC 'if_present'
+ clause. */
+#define OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_USE_DEVICE_PTR)->base.public_flag)
+
+#define OMP_CLAUSE_PROC_BIND_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_PROC_BIND)->omp_clause.subcode.proc_bind_kind)
+
+#define OMP_CLAUSE_DEVICE_TYPE_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DEVICE_TYPE)->omp_clause.subcode.device_type_kind)
+
+/* True if there is a device clause with a device-modifier 'ancestor'. */
+#define OMP_CLAUSE_DEVICE_ANCESTOR(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DEVICE)->base.public_flag)
+
+#define OMP_CLAUSE_COLLAPSE_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_COLLAPSE), 0)
+#define OMP_CLAUSE_COLLAPSE_ITERVAR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_COLLAPSE), 1)
+#define OMP_CLAUSE_COLLAPSE_COUNT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_COLLAPSE), 2)
+
+#define OMP_CLAUSE_ORDERED_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ORDERED), 0)
+
+/* True on an OMP_CLAUSE_ORDERED if stand-alone ordered construct is nested
+ inside of work-sharing loop the clause is on. */
+#define OMP_CLAUSE_ORDERED_DOACROSS(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ORDERED)->base.public_flag)
+
+/* True for unconstrained modifier on order(concurrent) clause. */
+#define OMP_CLAUSE_ORDER_UNCONSTRAINED(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ORDER)->base.public_flag)
+/* True for reproducible modifier on order(concurrent) clause. */
+#define OMP_CLAUSE_ORDER_REPRODUCIBLE(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ORDER))
+
+#define OMP_CLAUSE_REDUCTION_CODE(NODE) \
+ (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
+ OMP_CLAUSE_IN_REDUCTION)->omp_clause.subcode.reduction_code)
+#define OMP_CLAUSE_REDUCTION_INIT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
+ OMP_CLAUSE_IN_REDUCTION), 1)
+#define OMP_CLAUSE_REDUCTION_MERGE(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
+ OMP_CLAUSE_IN_REDUCTION), 2)
+#define OMP_CLAUSE_REDUCTION_GIMPLE_INIT(NODE) \
+ (OMP_CLAUSE_CHECK (NODE))->omp_clause.gimple_reduction_init
+#define OMP_CLAUSE_REDUCTION_GIMPLE_MERGE(NODE) \
+ (OMP_CLAUSE_CHECK (NODE))->omp_clause.gimple_reduction_merge
+#define OMP_CLAUSE_REDUCTION_PLACEHOLDER(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
+ OMP_CLAUSE_IN_REDUCTION), 3)
+#define OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
+ OMP_CLAUSE_IN_REDUCTION), 4)
+
+/* True if a REDUCTION clause may reference the original list item (omp_orig)
+ in its OMP_CLAUSE_REDUCTION_{,GIMPLE_}INIT. */
+#define OMP_CLAUSE_REDUCTION_OMP_ORIG_REF(NODE) \
+ (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_REDUCTION, \
+ OMP_CLAUSE_IN_REDUCTION)->base.public_flag)
+
+/* True if a REDUCTION clause has task reduction-modifier. */
+#define OMP_CLAUSE_REDUCTION_TASK(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_REDUCTION))
+
+/* True if a REDUCTION clause has inscan reduction-modifier. */
+#define OMP_CLAUSE_REDUCTION_INSCAN(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_REDUCTION))
+
+/* True if a LINEAR clause doesn't need copy in. True for iterator vars which
+ are always initialized inside of the loop construct, false otherwise. */
+#define OMP_CLAUSE_LINEAR_NO_COPYIN(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR)->base.public_flag)
+
+/* True if a LINEAR clause doesn't need copy out. True for iterator vars which
+ are declared inside of the simd construct. */
+#define OMP_CLAUSE_LINEAR_NO_COPYOUT(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR))
+
+/* True if a LINEAR clause has a stride that is variable. */
+#define OMP_CLAUSE_LINEAR_VARIABLE_STRIDE(NODE) \
+ TREE_PROTECTED (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR))
+
+/* True for a LINEAR clause with old style modifier syntax
+ linear(modifier(list)) or linear(modifier(list):step). */
+#define OMP_CLAUSE_LINEAR_OLD_LINEAR_MODIFIER(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR)->base.addressable_flag)
+
+/* True if a LINEAR clause is for an array or allocatable variable that
+ needs special handling by the frontend. */
+#define OMP_CLAUSE_LINEAR_ARRAY(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR)->base.deprecated_flag)
+
+#define OMP_CLAUSE_LINEAR_STEP(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR), 1)
+
+#define OMP_CLAUSE_LINEAR_STMT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR), 2)
+
+#define OMP_CLAUSE_LINEAR_GIMPLE_SEQ(NODE) \
+ (OMP_CLAUSE_CHECK (NODE))->omp_clause.gimple_reduction_init
+
+#define OMP_CLAUSE_LINEAR_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_LINEAR)->omp_clause.subcode.linear_kind)
+
+#define OMP_CLAUSE_ALIGNED_ALIGNMENT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALIGNED), 1)
+
+#define OMP_CLAUSE_ALLOCATE_ALLOCATOR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE), 1)
+
+#define OMP_CLAUSE_ALLOCATE_ALIGN(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE), 2)
+
+/* True if an ALLOCATE clause was present on a combined or composite
+ construct and the code for splitting the clauses has already performed
+ checking if the listed variable has explicit privatization on the
+ construct. */
+#define OMP_CLAUSE_ALLOCATE_COMBINED(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ALLOCATE)->base.public_flag)
+
+#define OMP_CLAUSE_NUM_TEAMS_UPPER_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TEAMS), 0)
+
+#define OMP_CLAUSE_NUM_TEAMS_LOWER_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TEAMS), 1)
+
+#define OMP_CLAUSE_THREAD_LIMIT_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, \
+ OMP_CLAUSE_THREAD_LIMIT), 0)
+
+#define OMP_CLAUSE_DEVICE_ID(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DEVICE), 0)
+
+#define OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, \
+ OMP_CLAUSE_DIST_SCHEDULE), 0)
+
+#define OMP_CLAUSE_SAFELEN_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SAFELEN), 0)
+
+#define OMP_CLAUSE_SIMDLEN_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SIMDLEN), 0)
+
+#define OMP_CLAUSE__SIMDUID__DECL(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE__SIMDUID_), 0)
+
+#define OMP_CLAUSE_SCHEDULE_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SCHEDULE)->omp_clause.subcode.schedule_kind)
+
+/* True if a SCHEDULE clause has the simd modifier on it. */
+#define OMP_CLAUSE_SCHEDULE_SIMD(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_SCHEDULE)->base.public_flag)
+
+#define OMP_CLAUSE_DEFAULT_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DEFAULT)->omp_clause.subcode.default_kind)
+
+#define OMP_CLAUSE_DEFAULTMAP_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_DEFAULTMAP)->omp_clause.subcode.defaultmap_kind)
+#define OMP_CLAUSE_DEFAULTMAP_CATEGORY(NODE) \
+ ((enum omp_clause_defaultmap_kind) \
+ (OMP_CLAUSE_DEFAULTMAP_KIND (NODE) & OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK))
+#define OMP_CLAUSE_DEFAULTMAP_BEHAVIOR(NODE) \
+ ((enum omp_clause_defaultmap_kind) \
+ (OMP_CLAUSE_DEFAULTMAP_KIND (NODE) & OMP_CLAUSE_DEFAULTMAP_MASK))
+#define OMP_CLAUSE_DEFAULTMAP_SET_KIND(NODE, BEHAVIOR, CATEGORY) \
+ (OMP_CLAUSE_DEFAULTMAP_KIND (NODE) \
+ = (enum omp_clause_defaultmap_kind) (CATEGORY | BEHAVIOR))
+
+#define OMP_CLAUSE_BIND_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_BIND)->omp_clause.subcode.bind_kind)
+
+/* True if ENTER clause is spelled as TO. */
+#define OMP_CLAUSE_ENTER_TO(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_ENTER)->base.public_flag)
+
+#define OMP_CLAUSE_TILE_LIST(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_TILE), 0)
+#define OMP_CLAUSE_TILE_ITERVAR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_TILE), 1)
+#define OMP_CLAUSE_TILE_COUNT(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_TILE), 2)
+
+/* _CONDTEMP_ holding temporary with iteration count. */
+#define OMP_CLAUSE__CONDTEMP__ITER(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE__CONDTEMP_)->base.public_flag)
+
+/* _SCANTEMP_ holding temporary with pointer to thread's local array;
+ allocation. */
+#define OMP_CLAUSE__SCANTEMP__ALLOC(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE__SCANTEMP_)->base.public_flag)
+
+/* _SCANTEMP_ holding temporary with a control variable for deallocation;
+ one boolean_type_node for test whether alloca was used, another one
+ to pass to __builtin_stack_restore or free. */
+#define OMP_CLAUSE__SCANTEMP__CONTROL(NODE) \
+ TREE_PRIVATE (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE__SCANTEMP_))
+
+/* SSA_NAME accessors. */
+
+/* Whether SSA_NAME NODE is a virtual operand. This simply caches the
+ information in the underlying SSA_NAME_VAR for efficiency. */
+#define SSA_NAME_IS_VIRTUAL_OPERAND(NODE) \
+ SSA_NAME_CHECK (NODE)->base.public_flag
+
+/* Returns the IDENTIFIER_NODE giving the SSA name a name or NULL_TREE
+ if there is no name associated with it. */
+#define SSA_NAME_IDENTIFIER(NODE) \
+ (SSA_NAME_CHECK (NODE)->ssa_name.var != NULL_TREE \
+ ? (TREE_CODE ((NODE)->ssa_name.var) == IDENTIFIER_NODE \
+ ? (NODE)->ssa_name.var \
+ : DECL_NAME ((NODE)->ssa_name.var)) \
+ : NULL_TREE)
+
+/* Returns the variable being referenced. This can be NULL_TREE for
+ temporaries not associated with any user variable.
+ Once released, this is the only field that can be relied upon. */
+#define SSA_NAME_VAR(NODE) \
+ (SSA_NAME_CHECK (NODE)->ssa_name.var == NULL_TREE \
+ || TREE_CODE ((NODE)->ssa_name.var) == IDENTIFIER_NODE \
+ ? NULL_TREE : (NODE)->ssa_name.var)
+
+#define SET_SSA_NAME_VAR_OR_IDENTIFIER(NODE,VAR) \
+ do \
+ { \
+ tree var_ = (VAR); \
+ SSA_NAME_CHECK (NODE)->ssa_name.var = var_; \
+ SSA_NAME_IS_VIRTUAL_OPERAND (NODE) \
+ = (var_ \
+ && TREE_CODE (var_) == VAR_DECL \
+ && VAR_DECL_IS_VIRTUAL_OPERAND (var_)); \
+ } \
+ while (0)
+
+/* Returns the statement which defines this SSA name. */
+#define SSA_NAME_DEF_STMT(NODE) SSA_NAME_CHECK (NODE)->ssa_name.def_stmt
+
+/* Returns the SSA version number of this SSA name. Note that in
+ tree SSA, version numbers are not per variable and may be recycled. */
+#define SSA_NAME_VERSION(NODE) SSA_NAME_CHECK (NODE)->base.u.version
+
+/* Nonzero if this SSA name occurs in an abnormal PHI. SSA_NAMES are
+ never output, so we can safely use the ASM_WRITTEN_FLAG for this
+ status bit. */
+#define SSA_NAME_OCCURS_IN_ABNORMAL_PHI(NODE) \
+ SSA_NAME_CHECK (NODE)->base.asm_written_flag
+
+/* Nonzero if this SSA_NAME expression is currently on the free list of
+ SSA_NAMES. Using NOTHROW_FLAG seems reasonably safe since throwing
+ has no meaning for an SSA_NAME. */
+#define SSA_NAME_IN_FREE_LIST(NODE) \
+ SSA_NAME_CHECK (NODE)->base.nothrow_flag
+
+/* Nonzero if this SSA_NAME is the default definition for the
+ underlying symbol. A default SSA name is created for symbol S if
+ the very first reference to S in the function is a read operation.
+ Default definitions are always created by an empty statement and
+ belong to no basic block. */
+#define SSA_NAME_IS_DEFAULT_DEF(NODE) \
+ SSA_NAME_CHECK (NODE)->base.default_def_flag
+
+/* Nonzero if this SSA_NAME is known to point to memory that may not
+ be written to. This is set for default defs of function parameters
+ that have a corresponding r or R specification in the functions
+ fn spec attribute. This is used by alias analysis. */
+#define SSA_NAME_POINTS_TO_READONLY_MEMORY(NODE) \
+ SSA_NAME_CHECK (NODE)->base.deprecated_flag
+
+/* Attributes for SSA_NAMEs for pointer-type variables. */
+#define SSA_NAME_PTR_INFO(N) \
+ SSA_NAME_CHECK (N)->ssa_name.info.ptr_info
+
+/* Value range info attributes for SSA_NAMEs of non pointer-type variables. */
+#define SSA_NAME_RANGE_INFO(N) \
+ SSA_NAME_CHECK (N)->ssa_name.info.range_info
+
+/* Return the immediate_use information for an SSA_NAME. */
+#define SSA_NAME_IMM_USE_NODE(NODE) SSA_NAME_CHECK (NODE)->ssa_name.imm_uses
+
+#define OMP_CLAUSE_CODE(NODE) \
+ (OMP_CLAUSE_CHECK (NODE))->omp_clause.code
+
+#define OMP_CLAUSE_SET_CODE(NODE, CODE) \
+ ((OMP_CLAUSE_CHECK (NODE))->omp_clause.code = (CODE))
+
+#define OMP_CLAUSE_OPERAND(NODE, I) \
+ OMP_CLAUSE_ELT_CHECK (NODE, I)
+
+/* In a BLOCK (scope) node:
+ Variables declared in the scope NODE. */
+#define BLOCK_VARS(NODE) (BLOCK_CHECK (NODE)->block.vars)
+#define BLOCK_NONLOCALIZED_VARS(NODE) \
+ (BLOCK_CHECK (NODE)->block.nonlocalized_vars)
+#define BLOCK_NUM_NONLOCALIZED_VARS(NODE) \
+ vec_safe_length (BLOCK_NONLOCALIZED_VARS (NODE))
+#define BLOCK_NONLOCALIZED_VAR(NODE,N) (*BLOCK_NONLOCALIZED_VARS (NODE))[N]
+/* A chain of BLOCKs (scopes) nested within the scope NODE. */
+#define BLOCK_SUBBLOCKS(NODE) (BLOCK_CHECK (NODE)->block.subblocks)
+/* The scope enclosing the scope NODE, or FUNCTION_DECL for the "outermost"
+ function scope. Inlined functions are chained by this so that given
+ expression E and its TREE_BLOCK(E) B, BLOCK_SUPERCONTEXT(B) is the scope
+ in which E has been made or into which E has been inlined. */
+#define BLOCK_SUPERCONTEXT(NODE) (BLOCK_CHECK (NODE)->block.supercontext)
+/* Points to the next scope at the same level of nesting as scope NODE. */
+#define BLOCK_CHAIN(NODE) (BLOCK_CHECK (NODE)->block.chain)
+/* A BLOCK, or FUNCTION_DECL of the function from which a block has been
+ inlined. In a scope immediately enclosing an inlined leaf expression,
+ points to the outermost scope into which it has been inlined (thus
+ bypassing all intermediate BLOCK_SUPERCONTEXTs). */
+#define BLOCK_ABSTRACT_ORIGIN(NODE) (BLOCK_CHECK (NODE)->block.abstract_origin)
+#define BLOCK_ORIGIN(NODE) \
+ (BLOCK_ABSTRACT_ORIGIN(NODE) ? BLOCK_ABSTRACT_ORIGIN(NODE) : (NODE))
+#define BLOCK_DIE(NODE) (BLOCK_CHECK (NODE)->block.die)
+
+/* True if BLOCK has the same ranges as its BLOCK_SUPERCONTEXT. */
+#define BLOCK_SAME_RANGE(NODE) (BLOCK_CHECK (NODE)->base.u.bits.nameless_flag)
+
+/* True if BLOCK appears in cold section. */
+#define BLOCK_IN_COLD_SECTION_P(NODE) \
+ (BLOCK_CHECK (NODE)->base.u.bits.atomic_flag)
+
+/* An index number for this block. These values are not guaranteed to
+ be unique across functions -- whether or not they are depends on
+ the debugging output format in use. */
+#define BLOCK_NUMBER(NODE) (BLOCK_CHECK (NODE)->block.block_num)
+
+/* If block reordering splits a lexical block into discontiguous
+ address ranges, we'll make a copy of the original block.
+
+ Note that this is logically distinct from BLOCK_ABSTRACT_ORIGIN.
+ In that case, we have one source block that has been replicated
+ (through inlining or unrolling) into many logical blocks, and that
+ these logical blocks have different physical variables in them.
+
+ In this case, we have one logical block split into several
+ non-contiguous address ranges. Most debug formats can't actually
+ represent this idea directly, so we fake it by creating multiple
+ logical blocks with the same variables in them. However, for those
+ that do support non-contiguous regions, these allow the original
+ logical block to be reconstructed, along with the set of address
+ ranges.
+
+ One of the logical block fragments is arbitrarily chosen to be
+ the ORIGIN. The other fragments will point to the origin via
+ BLOCK_FRAGMENT_ORIGIN; the origin itself will have this pointer
+ be null. The list of fragments will be chained through
+ BLOCK_FRAGMENT_CHAIN from the origin. */
+
+#define BLOCK_FRAGMENT_ORIGIN(NODE) (BLOCK_CHECK (NODE)->block.fragment_origin)
+#define BLOCK_FRAGMENT_CHAIN(NODE) (BLOCK_CHECK (NODE)->block.fragment_chain)
+
+/* For an inlined function, this gives the location where it was called
+ from. This is only set in the top level block, which corresponds to the
+ inlined function scope. This is used in the debug output routines. */
+
+#define BLOCK_SOURCE_LOCATION(NODE) (BLOCK_CHECK (NODE)->block.locus)
+
+/* This gives the location of the end of the block, useful to attach
+ code implicitly generated for outgoing paths. */
+
+#define BLOCK_SOURCE_END_LOCATION(NODE) (BLOCK_CHECK (NODE)->block.end_locus)
+
+/* Define fields and accessors for nodes representing data types. */
+
+/* See tree.def for documentation of the use of these fields.
+ Look at the documentation of the various ..._TYPE tree codes.
+
+ Note that the type.values, type.minval, and type.maxval fields are
+ overloaded and used for different macros in different kinds of types.
+ Each macro must check to ensure the tree node is of the proper kind of
+ type. Note also that some of the front-ends also overload these fields,
+ so they must be checked as well. */
+
+#define TYPE_UID(NODE) (TYPE_CHECK (NODE)->type_common.uid)
+/* Type size in bits as a tree expression. Need not be constant and may
+ be greater than TYPE_SIZE for a C++ FIELD_DECL representing a base
+ class subobject with its own virtual base classes (which are laid out
+ separately). */
+#define TYPE_SIZE(NODE) (TYPE_CHECK (NODE)->type_common.size)
+/* Likewise, type size in bytes. */
+#define TYPE_SIZE_UNIT(NODE) (TYPE_CHECK (NODE)->type_common.size_unit)
+#define TYPE_POINTER_TO(NODE) (TYPE_CHECK (NODE)->type_common.pointer_to)
+#define TYPE_REFERENCE_TO(NODE) (TYPE_CHECK (NODE)->type_common.reference_to)
+#define TYPE_PRECISION(NODE) (TYPE_CHECK (NODE)->type_common.precision)
+#define TYPE_NAME(NODE) (TYPE_CHECK (NODE)->type_common.name)
+#define TYPE_NEXT_VARIANT(NODE) (TYPE_CHECK (NODE)->type_common.next_variant)
+#define TYPE_MAIN_VARIANT(NODE) (TYPE_CHECK (NODE)->type_common.main_variant)
+#define TYPE_CONTEXT(NODE) (TYPE_CHECK (NODE)->type_common.context)
+
+#define TYPE_MODE_RAW(NODE) (TYPE_CHECK (NODE)->type_common.mode)
+#define TYPE_MODE(NODE) \
+ (VECTOR_TYPE_P (TYPE_CHECK (NODE)) \
+ ? vector_type_mode (NODE) : (NODE)->type_common.mode)
+#define SCALAR_TYPE_MODE(NODE) \
+ (as_a <scalar_mode> (TYPE_CHECK (NODE)->type_common.mode))
+#define SCALAR_INT_TYPE_MODE(NODE) \
+ (as_a <scalar_int_mode> (TYPE_CHECK (NODE)->type_common.mode))
+#define SCALAR_FLOAT_TYPE_MODE(NODE) \
+ (as_a <scalar_float_mode> (TYPE_CHECK (NODE)->type_common.mode))
+#define SET_TYPE_MODE(NODE, MODE) \
+ (TYPE_CHECK (NODE)->type_common.mode = (MODE))
+
+extern machine_mode element_mode (const_tree);
+extern machine_mode vector_type_mode (const_tree);
+extern unsigned int vector_element_bits (const_tree);
+extern tree vector_element_bits_tree (const_tree);
+
+/* The "canonical" type for this type node, which is used by frontends to
+ compare the type for equality with another type. If two types are
+ equal (based on the semantics of the language), then they will have
+ equivalent TYPE_CANONICAL entries.
+
+ As a special case, if TYPE_CANONICAL is NULL_TREE, and thus
+ TYPE_STRUCTURAL_EQUALITY_P is true, then it cannot
+ be used for comparison against other types. Instead, the type is
+ said to require structural equality checks, described in
+ TYPE_STRUCTURAL_EQUALITY_P.
+
+ For unqualified aggregate and function types the middle-end relies on
+ TYPE_CANONICAL to tell whether two variables can be assigned
+ to each other without a conversion. The middle-end also makes sure
+ to assign the same alias-sets to the type partition with equal
+ TYPE_CANONICAL of their unqualified variants. */
+#define TYPE_CANONICAL(NODE) (TYPE_CHECK (NODE)->type_common.canonical)
+/* Indicates that the type node requires structural equality
+ checks. The compiler will need to look at the composition of the
+ type to determine whether it is equal to another type, rather than
+ just comparing canonical type pointers. For instance, we would need
+ to look at the return and parameter types of a FUNCTION_TYPE
+ node. */
+#define TYPE_STRUCTURAL_EQUALITY_P(NODE) (TYPE_CANONICAL (NODE) == NULL_TREE)
+/* Sets the TYPE_CANONICAL field to NULL_TREE, indicating that the
+ type node requires structural equality. */
+#define SET_TYPE_STRUCTURAL_EQUALITY(NODE) (TYPE_CANONICAL (NODE) = NULL_TREE)
+
+#define TYPE_IBIT(NODE) (GET_MODE_IBIT (TYPE_MODE (NODE)))
+#define TYPE_FBIT(NODE) (GET_MODE_FBIT (TYPE_MODE (NODE)))
+
+/* The (language-specific) typed-based alias set for this type.
+ Objects whose TYPE_ALIAS_SETs are different cannot alias each
+ other. If the TYPE_ALIAS_SET is -1, no alias set has yet been
+ assigned to this type. If the TYPE_ALIAS_SET is 0, objects of this
+ type can alias objects of any type. */
+#define TYPE_ALIAS_SET(NODE) (TYPE_CHECK (NODE)->type_common.alias_set)
+
+/* Nonzero iff the typed-based alias set for this type has been
+ calculated. */
+#define TYPE_ALIAS_SET_KNOWN_P(NODE) \
+ (TYPE_CHECK (NODE)->type_common.alias_set != -1)
+
+/* A TREE_LIST of IDENTIFIER nodes of the attributes that apply
+ to this type. */
+#define TYPE_ATTRIBUTES(NODE) (TYPE_CHECK (NODE)->type_common.attributes)
+
+/* Raw access to the alignment field. */
+#define TYPE_ALIGN_RAW(NODE) \
+ (TYPE_CHECK (NODE)->type_common.align)
+
+/* The alignment necessary for objects of this type.
+ The value is an int, measured in bits and must be a power of two.
+ We support also an "alignment" of zero. */
+#define TYPE_ALIGN(NODE) \
+ (TYPE_ALIGN_RAW (NODE) \
+ ? ((unsigned)1) << (TYPE_ALIGN_RAW(NODE) - 1) : 0)
+
+/* Specify that TYPE_ALIGN(NODE) is X. */
+#define SET_TYPE_ALIGN(NODE, X) \
+ (TYPE_CHECK (NODE)->type_common.align = ffs_hwi (X))
+
+/* 1 if the alignment for this type was requested by "aligned" attribute,
+ 0 if it is the default for this type. */
+#define TYPE_USER_ALIGN(NODE) (TYPE_CHECK (NODE)->base.u.bits.user_align)
+
+/* The alignment for NODE, in bytes. */
+#define TYPE_ALIGN_UNIT(NODE) (TYPE_ALIGN (NODE) / BITS_PER_UNIT)
+
+/* The minimum alignment necessary for objects of this type without
+ warning. The value is an int, measured in bits. */
+#define TYPE_WARN_IF_NOT_ALIGN(NODE) \
+ (TYPE_CHECK (NODE)->type_common.warn_if_not_align \
+ ? ((unsigned)1) << ((NODE)->type_common.warn_if_not_align - 1) : 0)
+
+/* Specify that TYPE_WARN_IF_NOT_ALIGN(NODE) is X. */
+#define SET_TYPE_WARN_IF_NOT_ALIGN(NODE, X) \
+ (TYPE_CHECK (NODE)->type_common.warn_if_not_align = ffs_hwi (X))
+
+/* If your language allows you to declare types, and you want debug info
+ for them, then you need to generate corresponding TYPE_DECL nodes.
+ These "stub" TYPE_DECL nodes have no name, and simply point at the
+ type node. You then set the TYPE_STUB_DECL field of the type node
+ to point back at the TYPE_DECL node. This allows the debug routines
+ to know that the two nodes represent the same type, so that we only
+ get one debug info record for them. */
+#define TYPE_STUB_DECL(NODE) (TREE_CHAIN (TYPE_CHECK (NODE)))
+
+/* In a RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE or ARRAY_TYPE, it means
+ the type has BLKmode only because it lacks the alignment required for
+ its size. */
+#define TYPE_NO_FORCE_BLK(NODE) \
+ (TYPE_CHECK (NODE)->type_common.no_force_blk_flag)
+
+/* Nonzero in a type considered volatile as a whole. */
+#define TYPE_VOLATILE(NODE) (TYPE_CHECK (NODE)->base.volatile_flag)
+
+/* Nonzero in a type considered atomic as a whole. */
+#define TYPE_ATOMIC(NODE) (TYPE_CHECK (NODE)->base.u.bits.atomic_flag)
+
+/* Means this type is const-qualified. */
+#define TYPE_READONLY(NODE) (TYPE_CHECK (NODE)->base.readonly_flag)
+
+/* If nonzero, this type is `restrict'-qualified, in the C sense of
+ the term. */
+#define TYPE_RESTRICT(NODE) (TYPE_CHECK (NODE)->type_common.restrict_flag)
+
+/* If nonzero, type's name shouldn't be emitted into debug info. */
+#define TYPE_NAMELESS(NODE) (TYPE_CHECK (NODE)->base.u.bits.nameless_flag)
+
+/* The address space the type is in. */
+#define TYPE_ADDR_SPACE(NODE) (TYPE_CHECK (NODE)->base.u.bits.address_space)
+
+/* Encode/decode the named memory support as part of the qualifier. If more
+ than 8 qualifiers are added, these macros need to be adjusted. */
+#define ENCODE_QUAL_ADDR_SPACE(NUM) ((NUM & 0xFF) << 8)
+#define DECODE_QUAL_ADDR_SPACE(X) (((X) >> 8) & 0xFF)
+
+/* Return all qualifiers except for the address space qualifiers. */
+#define CLEAR_QUAL_ADDR_SPACE(X) ((X) & ~0xFF00)
+
+/* Only keep the address space out of the qualifiers and discard the other
+ qualifiers. */
+#define KEEP_QUAL_ADDR_SPACE(X) ((X) & 0xFF00)
+
+/* The set of type qualifiers for this type. */
+#define TYPE_QUALS(NODE) \
+ ((int) ((TYPE_READONLY (NODE) * TYPE_QUAL_CONST) \
+ | (TYPE_VOLATILE (NODE) * TYPE_QUAL_VOLATILE) \
+ | (TYPE_ATOMIC (NODE) * TYPE_QUAL_ATOMIC) \
+ | (TYPE_RESTRICT (NODE) * TYPE_QUAL_RESTRICT) \
+ | (ENCODE_QUAL_ADDR_SPACE (TYPE_ADDR_SPACE (NODE)))))
+
+/* The same as TYPE_QUALS without the address space qualifications. */
+#define TYPE_QUALS_NO_ADDR_SPACE(NODE) \
+ ((int) ((TYPE_READONLY (NODE) * TYPE_QUAL_CONST) \
+ | (TYPE_VOLATILE (NODE) * TYPE_QUAL_VOLATILE) \
+ | (TYPE_ATOMIC (NODE) * TYPE_QUAL_ATOMIC) \
+ | (TYPE_RESTRICT (NODE) * TYPE_QUAL_RESTRICT)))
+
+/* The same as TYPE_QUALS without the address space and atomic
+ qualifications. */
+#define TYPE_QUALS_NO_ADDR_SPACE_NO_ATOMIC(NODE) \
+ ((int) ((TYPE_READONLY (NODE) * TYPE_QUAL_CONST) \
+ | (TYPE_VOLATILE (NODE) * TYPE_QUAL_VOLATILE) \
+ | (TYPE_RESTRICT (NODE) * TYPE_QUAL_RESTRICT)))
+
+/* These flags are available for each language front end to use internally. */
+#define TYPE_LANG_FLAG_0(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_0)
+#define TYPE_LANG_FLAG_1(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_1)
+#define TYPE_LANG_FLAG_2(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_2)
+#define TYPE_LANG_FLAG_3(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_3)
+#define TYPE_LANG_FLAG_4(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_4)
+#define TYPE_LANG_FLAG_5(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_5)
+#define TYPE_LANG_FLAG_6(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_6)
+#define TYPE_LANG_FLAG_7(NODE) (TYPE_CHECK (NODE)->type_common.lang_flag_7)
+
+/* Used to keep track of visited nodes in tree traversals. This is set to
+ 0 by copy_node and make_node. */
+#define TREE_VISITED(NODE) ((NODE)->base.visited)
+
+/* If set in an ARRAY_TYPE, indicates a string type (for languages
+ that distinguish string from array of char).
+ If set in a INTEGER_TYPE, indicates a character type. */
+#define TYPE_STRING_FLAG(NODE) \
+ (ARRAY_OR_INTEGER_TYPE_CHECK (NODE)->type_common.string_flag)
+
+/* If set for RECORD_TYPE or UNION_TYPE it indicates that the type conforms
+ to the C++ one definition rule. This is used for LTO canonical type
+ computation. */
+#define TYPE_CXX_ODR_P(NODE) \
+ (RECORD_OR_UNION_CHECK (NODE)->type_common.string_flag)
+
+/* Nonzero in a VECTOR_TYPE if the frontends should not emit warnings
+ about missing conversions to other vector types of the same size. */
+#define TYPE_VECTOR_OPAQUE(NODE) \
+ (VECTOR_TYPE_CHECK (NODE)->base.default_def_flag)
+
+/* Indicates that objects of this type must be initialized by calling a
+ function when they are created. */
+#define TYPE_NEEDS_CONSTRUCTING(NODE) \
+ (TYPE_CHECK (NODE)->type_common.needs_constructing_flag)
+
+/* Indicates that a UNION_TYPE object should be passed the same way that
+ the first union alternative would be passed, or that a RECORD_TYPE
+ object should be passed the same way that the first (and only) member
+ would be passed. */
+#define TYPE_TRANSPARENT_AGGR(NODE) \
+ (RECORD_OR_UNION_CHECK (NODE)->type_common.transparent_aggr_flag)
+
+/* For an ARRAY_TYPE, indicates that it is not permitted to take the
+ address of a component of the type. This is the counterpart of
+ DECL_NONADDRESSABLE_P for arrays, see the definition of this flag. */
+#define TYPE_NONALIASED_COMPONENT(NODE) \
+ (ARRAY_TYPE_CHECK (NODE)->type_common.transparent_aggr_flag)
+
+/* For an ARRAY_TYPE, a RECORD_TYPE, a UNION_TYPE or a QUAL_UNION_TYPE
+ whether the array is typeless storage or the type contains a member
+ with this flag set. Such types are exempt from type-based alias
+ analysis. For ARRAY_TYPEs with AGGREGATE_TYPE_P element types
+ the flag should be inherited from the element type, can change
+ when type is finalized and because of that should not be used in
+ type hashing. For ARRAY_TYPEs with non-AGGREGATE_TYPE_P element types
+ the flag should not be changed after the array is created and should
+ be used in type hashing. */
+#define TYPE_TYPELESS_STORAGE(NODE) \
+ (TREE_CHECK4 (NODE, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, \
+ ARRAY_TYPE)->type_common.typeless_storage)
+
+/* Indicated that objects of this type should be laid out in as
+ compact a way as possible. */
+#define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->base.u.bits.packed_flag)
+
+/* Used by type_contains_placeholder_p to avoid recomputation.
+ Values are: 0 (unknown), 1 (false), 2 (true). Never access
+ this field directly. */
+#define TYPE_CONTAINS_PLACEHOLDER_INTERNAL(NODE) \
+ (TYPE_CHECK (NODE)->type_common.contains_placeholder_bits)
+
+/* Nonzero if RECORD_TYPE represents a final derivation of class. */
+#define TYPE_FINAL_P(NODE) \
+ (RECORD_OR_UNION_CHECK (NODE)->base.default_def_flag)
+
+/* The debug output functions use the symtab union field to store
+ information specific to the debugging format. The different debug
+ output hooks store different types in the union field. These three
+ macros are used to access different fields in the union. The debug
+ hooks are responsible for consistently using only a specific
+ macro. */
+
+/* Symtab field as an integer. Used by stabs generator in dbxout.cc to
+ hold the type's number in the generated stabs. */
+#define TYPE_SYMTAB_ADDRESS(NODE) \
+ (TYPE_CHECK (NODE)->type_common.symtab.address)
+
+/* Symtab field as a pointer to a DWARF DIE. Used by DWARF generator
+ in dwarf2out.cc to point to the DIE generated for the type. */
+#define TYPE_SYMTAB_DIE(NODE) \
+ (TYPE_CHECK (NODE)->type_common.symtab.die)
+
+/* The garbage collector needs to know the interpretation of the
+ symtab field. These constants represent the different types in the
+ union. */
+
+#define TYPE_SYMTAB_IS_ADDRESS (0)
+#define TYPE_SYMTAB_IS_DIE (1)
+
+#define TYPE_LANG_SPECIFIC(NODE) \
+ (TYPE_CHECK (NODE)->type_with_lang_specific.lang_specific)
+
+#define TYPE_VALUES(NODE) (ENUMERAL_TYPE_CHECK (NODE)->type_non_common.values)
+#define TYPE_DOMAIN(NODE) (ARRAY_TYPE_CHECK (NODE)->type_non_common.values)
+#define TYPE_FIELDS(NODE) \
+ (RECORD_OR_UNION_CHECK (NODE)->type_non_common.values)
+#define TYPE_CACHED_VALUES(NODE) (TYPE_CHECK (NODE)->type_non_common.values)
+#define TYPE_ARG_TYPES(NODE) \
+ (FUNC_OR_METHOD_CHECK (NODE)->type_non_common.values)
+#define TYPE_VALUES_RAW(NODE) (TYPE_CHECK (NODE)->type_non_common.values)
+
+#define TYPE_MIN_VALUE(NODE) \
+ (NUMERICAL_TYPE_CHECK (NODE)->type_non_common.minval)
+#define TYPE_NEXT_PTR_TO(NODE) \
+ (POINTER_TYPE_CHECK (NODE)->type_non_common.minval)
+#define TYPE_NEXT_REF_TO(NODE) \
+ (REFERENCE_TYPE_CHECK (NODE)->type_non_common.minval)
+#define TYPE_VFIELD(NODE) \
+ (RECORD_OR_UNION_CHECK (NODE)->type_non_common.minval)
+#define TYPE_MIN_VALUE_RAW(NODE) (TYPE_CHECK (NODE)->type_non_common.minval)
+
+#define TYPE_MAX_VALUE(NODE) \
+ (NUMERICAL_TYPE_CHECK (NODE)->type_non_common.maxval)
+#define TYPE_METHOD_BASETYPE(NODE) \
+ (FUNC_OR_METHOD_CHECK (NODE)->type_non_common.maxval)
+#define TYPE_OFFSET_BASETYPE(NODE) \
+ (OFFSET_TYPE_CHECK (NODE)->type_non_common.maxval)
+/* If non-NULL, this is an upper bound of the size (in bytes) of an
+ object of the given ARRAY_TYPE_NON_COMMON. This allows temporaries to be
+ allocated. */
+#define TYPE_ARRAY_MAX_SIZE(ARRAY_TYPE) \
+ (ARRAY_TYPE_CHECK (ARRAY_TYPE)->type_non_common.maxval)
+#define TYPE_MAX_VALUE_RAW(NODE) (TYPE_CHECK (NODE)->type_non_common.maxval)
+/* For record and union types, information about this type, as a base type
+ for itself. */
+#define TYPE_BINFO(NODE) (RECORD_OR_UNION_CHECK (NODE)->type_non_common.maxval)
+
+/* For types, used in a language-dependent way. */
+#define TYPE_LANG_SLOT_1(NODE) \
+ (TYPE_CHECK (NODE)->type_non_common.lang_1)
+
+/* Define accessor macros for information about type inheritance
+ and basetypes.
+
+ A "basetype" means a particular usage of a data type for inheritance
+ in another type. Each such basetype usage has its own "binfo"
+ object to describe it. The binfo object is a TREE_VEC node.
+
+ Inheritance is represented by the binfo nodes allocated for a
+ given type. For example, given types C and D, such that D is
+ inherited by C, 3 binfo nodes will be allocated: one for describing
+ the binfo properties of C, similarly one for D, and one for
+ describing the binfo properties of D as a base type for C.
+ Thus, given a pointer to class C, one can get a pointer to the binfo
+ of D acting as a basetype for C by looking at C's binfo's basetypes. */
+
+/* BINFO specific flags. */
+
+/* Nonzero means that the derivation chain is via a `virtual' declaration. */
+#define BINFO_VIRTUAL_P(NODE) (TREE_BINFO_CHECK (NODE)->base.static_flag)
+
+/* Flags for language dependent use. */
+#define BINFO_FLAG_0(NODE) TREE_LANG_FLAG_0 (TREE_BINFO_CHECK (NODE))
+#define BINFO_FLAG_1(NODE) TREE_LANG_FLAG_1 (TREE_BINFO_CHECK (NODE))
+#define BINFO_FLAG_2(NODE) TREE_LANG_FLAG_2 (TREE_BINFO_CHECK (NODE))
+#define BINFO_FLAG_3(NODE) TREE_LANG_FLAG_3 (TREE_BINFO_CHECK (NODE))
+#define BINFO_FLAG_4(NODE) TREE_LANG_FLAG_4 (TREE_BINFO_CHECK (NODE))
+#define BINFO_FLAG_5(NODE) TREE_LANG_FLAG_5 (TREE_BINFO_CHECK (NODE))
+#define BINFO_FLAG_6(NODE) TREE_LANG_FLAG_6 (TREE_BINFO_CHECK (NODE))
+
+/* The actual data type node being inherited in this basetype. */
+#define BINFO_TYPE(NODE) TREE_TYPE (TREE_BINFO_CHECK (NODE))
+
+/* The offset where this basetype appears in its containing type.
+ BINFO_OFFSET slot holds the offset (in bytes)
+ from the base of the complete object to the base of the part of the
+ object that is allocated on behalf of this `type'.
+ This is always 0 except when there is multiple inheritance. */
+
+#define BINFO_OFFSET(NODE) (TREE_BINFO_CHECK (NODE)->binfo.offset)
+#define BINFO_OFFSET_ZEROP(NODE) (integer_zerop (BINFO_OFFSET (NODE)))
+
+/* The virtual function table belonging to this basetype. Virtual
+ function tables provide a mechanism for run-time method dispatching.
+ The entries of a virtual function table are language-dependent. */
+
+#define BINFO_VTABLE(NODE) (TREE_BINFO_CHECK (NODE)->binfo.vtable)
+
+/* The virtual functions in the virtual function table. This is
+ a TREE_LIST that is used as an initial approximation for building
+ a virtual function table for this basetype. */
+#define BINFO_VIRTUALS(NODE) (TREE_BINFO_CHECK (NODE)->binfo.virtuals)
+
+/* A vector of binfos for the direct basetypes inherited by this
+ basetype.
+
+ If this basetype describes type D as inherited in C, and if the
+ basetypes of D are E and F, then this vector contains binfos for
+ inheritance of E and F by C. */
+#define BINFO_BASE_BINFOS(NODE) (&TREE_BINFO_CHECK (NODE)->binfo.base_binfos)
+
+/* The number of basetypes for NODE. */
+#define BINFO_N_BASE_BINFOS(NODE) (BINFO_BASE_BINFOS (NODE)->length ())
+
+/* Accessor macro to get to the Nth base binfo of this binfo. */
+#define BINFO_BASE_BINFO(NODE,N) \
+ ((*BINFO_BASE_BINFOS (NODE))[(N)])
+#define BINFO_BASE_ITERATE(NODE,N,B) \
+ (BINFO_BASE_BINFOS (NODE)->iterate ((N), &(B)))
+#define BINFO_BASE_APPEND(NODE,T) \
+ (BINFO_BASE_BINFOS (NODE)->quick_push ((T)))
+
+/* For a BINFO record describing a virtual base class, i.e., one where
+ TREE_VIA_VIRTUAL is set, this field assists in locating the virtual
+ base. The actual contents are language-dependent. In the C++
+ front-end this field is an INTEGER_CST giving an offset into the
+ vtable where the offset to the virtual base can be found. */
+#define BINFO_VPTR_FIELD(NODE) (TREE_BINFO_CHECK (NODE)->binfo.vptr_field)
+
+/* Indicates the accesses this binfo has to its bases. The values are
+ access_public_node, access_protected_node or access_private_node.
+ If this array is not present, public access is implied. */
+#define BINFO_BASE_ACCESSES(NODE) \
+ (TREE_BINFO_CHECK (NODE)->binfo.base_accesses)
+
+#define BINFO_BASE_ACCESS(NODE,N) \
+ (*BINFO_BASE_ACCESSES (NODE))[(N)]
+#define BINFO_BASE_ACCESS_APPEND(NODE,T) \
+ BINFO_BASE_ACCESSES (NODE)->quick_push ((T))
+
+/* The index in the VTT where this subobject's sub-VTT can be found.
+ NULL_TREE if there is no sub-VTT. */
+#define BINFO_SUBVTT_INDEX(NODE) (TREE_BINFO_CHECK (NODE)->binfo.vtt_subvtt)
+
+/* The index in the VTT where the vptr for this subobject can be
+ found. NULL_TREE if there is no secondary vptr in the VTT. */
+#define BINFO_VPTR_INDEX(NODE) (TREE_BINFO_CHECK (NODE)->binfo.vtt_vptr)
+
+/* The BINFO_INHERITANCE_CHAIN points at the binfo for the base
+ inheriting this base for non-virtual bases. For virtual bases it
+ points either to the binfo for which this is a primary binfo, or to
+ the binfo of the most derived type. */
+#define BINFO_INHERITANCE_CHAIN(NODE) \
+ (TREE_BINFO_CHECK (NODE)->binfo.inheritance)
+
+
+/* Define fields and accessors for nodes representing declared names. */
+
+/* Nonzero if DECL represents an SSA name or a variable that can possibly
+ have an associated SSA name. */
+#define SSA_VAR_P(DECL) \
+ (TREE_CODE (DECL) == VAR_DECL \
+ || TREE_CODE (DECL) == PARM_DECL \
+ || TREE_CODE (DECL) == RESULT_DECL \
+ || TREE_CODE (DECL) == SSA_NAME)
+
+
+#define DECL_CHAIN(NODE) (TREE_CHAIN (DECL_MINIMAL_CHECK (NODE)))
+
+/* This is the name of the object as written by the user.
+ It is an IDENTIFIER_NODE. */
+#define DECL_NAME(NODE) (DECL_MINIMAL_CHECK (NODE)->decl_minimal.name)
+
+/* The IDENTIFIER_NODE associated with the TYPE_NAME field. */
+#define TYPE_IDENTIFIER(NODE) \
+ (TYPE_NAME (NODE) && DECL_P (TYPE_NAME (NODE)) \
+ ? DECL_NAME (TYPE_NAME (NODE)) : TYPE_NAME (NODE))
+
+/* Every ..._DECL node gets a unique number. */
+#define DECL_UID(NODE) (DECL_MINIMAL_CHECK (NODE)->decl_minimal.uid)
+
+/* DEBUG_EXPR_DECLs get negative UID numbers, to catch erroneous
+ uses. */
+#define DEBUG_TEMP_UID(NODE) (-DECL_UID (TREE_CHECK ((NODE), DEBUG_EXPR_DECL)))
+
+/* Every ..._DECL node gets a unique number that stays the same even
+ when the decl is copied by the inliner once it is set. */
+#define DECL_PT_UID(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.pt_uid == -1u \
+ ? (NODE)->decl_minimal.uid : (NODE)->decl_common.pt_uid)
+/* Initialize the ..._DECL node pt-uid to the decls uid. */
+#define SET_DECL_PT_UID(NODE, UID) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.pt_uid = (UID))
+/* Whether the ..._DECL node pt-uid has been initialized and thus needs to
+ be preserved when copyin the decl. */
+#define DECL_PT_UID_SET_P(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.pt_uid != -1u)
+
+/* These two fields describe where in the source code the declaration
+ was. If the declaration appears in several places (as for a C
+ function that is declared first and then defined later), this
+ information should refer to the definition. */
+#define DECL_SOURCE_LOCATION(NODE) \
+ (DECL_MINIMAL_CHECK (NODE)->decl_minimal.locus)
+#define DECL_SOURCE_FILE(NODE) LOCATION_FILE (DECL_SOURCE_LOCATION (NODE))
+#define DECL_SOURCE_LINE(NODE) LOCATION_LINE (DECL_SOURCE_LOCATION (NODE))
+#define DECL_SOURCE_COLUMN(NODE) LOCATION_COLUMN (DECL_SOURCE_LOCATION (NODE))
+/* This decl was created by a front-end or back-end rather than by
+ user code, and has not been explicitly declared by the user -- when
+ that happens the source location is updated to the user's
+ source. This includes decls with no location (!). */
+#define DECL_IS_UNDECLARED_BUILTIN(DECL) \
+ (DECL_SOURCE_LOCATION (DECL) <= BUILTINS_LOCATION)
+
+/* For FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or
+ QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL,
+ PARM_DECL, FUNCTION_DECL, LABEL_DECL, RESULT_DECL, and CONST_DECL
+ nodes, this points to either the FUNCTION_DECL for the containing
+ function, the RECORD_TYPE or UNION_TYPE for the containing type, or
+ NULL_TREE or a TRANSLATION_UNIT_DECL if the given decl has "file
+ scope". In particular, for VAR_DECLs which are virtual table pointers
+ (they have DECL_VIRTUAL set), we use DECL_CONTEXT to determine the type
+ they belong to. */
+#define DECL_CONTEXT(NODE) (DECL_MINIMAL_CHECK (NODE)->decl_minimal.context)
+#define DECL_FIELD_CONTEXT(NODE) \
+ (FIELD_DECL_CHECK (NODE)->decl_minimal.context)
+
+/* If nonzero, decl's name shouldn't be emitted into debug info. */
+#define DECL_NAMELESS(NODE) (DECL_MINIMAL_CHECK (NODE)->base.u.bits.nameless_flag)
+
+/* For any sort of a ..._DECL node, this points to the original (abstract)
+ decl node which this decl is an inlined/cloned instance of, or else it
+ is NULL indicating that this decl is not an instance of some other decl.
+
+ The C front-end also uses this in a nested declaration of an inline
+ function, to point back to the definition. */
+#define DECL_ABSTRACT_ORIGIN(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.abstract_origin)
+
+/* Like DECL_ABSTRACT_ORIGIN, but returns NODE if there's no abstract
+ origin. This is useful when setting the DECL_ABSTRACT_ORIGIN. */
+#define DECL_ORIGIN(NODE) \
+ (DECL_ABSTRACT_ORIGIN (NODE) ? DECL_ABSTRACT_ORIGIN (NODE) : (NODE))
+
+/* Nonzero for any sort of ..._DECL node means this decl node represents an
+ inline instance of some original (abstract) decl from an inline function;
+ suppress any warnings about shadowing some other variable. FUNCTION_DECL
+ nodes can also have their abstract origin set to themselves. */
+#define DECL_FROM_INLINE(NODE) \
+ (DECL_ABSTRACT_ORIGIN (NODE) != NULL_TREE \
+ && DECL_ABSTRACT_ORIGIN (NODE) != (NODE))
+
+/* In a DECL this is the field where attributes are stored. */
+#define DECL_ATTRIBUTES(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.attributes)
+
+/* For a FUNCTION_DECL, holds the tree of BINDINGs.
+ For a TRANSLATION_UNIT_DECL, holds the namespace's BLOCK.
+ For a VAR_DECL, holds the initial value.
+ For a PARM_DECL, used for DECL_ARG_TYPE--default
+ values for parameters are encoded in the type of the function,
+ not in the PARM_DECL slot.
+ For a FIELD_DECL, this is used for enumeration values and the C
+ frontend uses it for temporarily storing bitwidth of bitfields.
+
+ ??? Need to figure out some way to check this isn't a PARM_DECL. */
+#define DECL_INITIAL(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.initial)
+
+/* Holds the size of the datum, in bits, as a tree expression.
+ Need not be constant and may be null. May be less than TYPE_SIZE
+ for a C++ FIELD_DECL representing a base class subobject with its
+ own virtual base classes (which are laid out separately). */
+#define DECL_SIZE(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.size)
+/* Likewise for the size in bytes. */
+#define DECL_SIZE_UNIT(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.size_unit)
+#define DECL_ALIGN_RAW(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.align)
+/* Returns the alignment required for the datum, in bits. It must
+ be a power of two, but an "alignment" of zero is supported
+ (e.g. as "uninitialized" sentinel). */
+#define DECL_ALIGN(NODE) \
+ (DECL_ALIGN_RAW (NODE) \
+ ? ((unsigned)1) << (DECL_ALIGN_RAW (NODE) - 1) : 0)
+/* Specify that DECL_ALIGN(NODE) is X. */
+#define SET_DECL_ALIGN(NODE, X) \
+ (DECL_ALIGN_RAW (NODE) = ffs_hwi (X))
+
+/* The minimum alignment necessary for the datum, in bits, without
+ warning. */
+#define DECL_WARN_IF_NOT_ALIGN_RAW(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.warn_if_not_align)
+#define DECL_WARN_IF_NOT_ALIGN(NODE) \
+ (DECL_WARN_IF_NOT_ALIGN_RAW (NODE) \
+ ? ((unsigned)1) << (DECL_WARN_IF_NOT_ALIGN_RAW (NODE) - 1) : 0)
+
+/* Specify that DECL_WARN_IF_NOT_ALIGN(NODE) is X. */
+#define SET_DECL_WARN_IF_NOT_ALIGN(NODE, X) \
+ (DECL_WARN_IF_NOT_ALIGN_RAW (NODE) = ffs_hwi (X))
+
+/* The alignment of NODE, in bytes. */
+#define DECL_ALIGN_UNIT(NODE) (DECL_ALIGN (NODE) / BITS_PER_UNIT)
+/* Set if the alignment of this DECL has been set by the user, for
+ example with an 'aligned' attribute. */
+#define DECL_USER_ALIGN(NODE) \
+ (DECL_COMMON_CHECK (NODE)->base.u.bits.user_align)
+/* Holds the machine mode corresponding to the declaration of a variable or
+ field. Always equal to TYPE_MODE (TREE_TYPE (decl)) except for a
+ FIELD_DECL. */
+#define DECL_MODE(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.mode)
+#define SET_DECL_MODE(NODE, MODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.mode = (MODE))
+
+/* For FUNCTION_DECL, if it is built-in, this identifies which built-in
+ operation it is. This is only intended for low-level accesses;
+ normally DECL_FUNCTION_CODE, DECL_FE_FUNCTION_CODE or DECL_MD_FUNCTION
+ should be used instead. */
+#define DECL_UNCHECKED_FUNCTION_CODE(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.function_code)
+
+/* Test if FCODE is a function code for an alloca operation. */
+#define ALLOCA_FUNCTION_CODE_P(FCODE) \
+ ((FCODE) == BUILT_IN_ALLOCA \
+ || (FCODE) == BUILT_IN_ALLOCA_WITH_ALIGN \
+ || (FCODE) == BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX)
+
+/* Generate case for an alloca operation. */
+#define CASE_BUILT_IN_ALLOCA \
+ case BUILT_IN_ALLOCA: \
+ case BUILT_IN_ALLOCA_WITH_ALIGN: \
+ case BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX
+
+#define DECL_FUNCTION_PERSONALITY(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.personality)
+
+/* Nonzero for a given ..._DECL node means that the name of this node should
+ be ignored for symbolic debug purposes. For a TYPE_DECL, this means that
+ the associated type should be ignored. For a FUNCTION_DECL, the body of
+ the function should also be ignored. */
+#define DECL_IGNORED_P(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.ignored_flag)
+
+/* Nonzero for a given ..._DECL node means that this node represents an
+ "abstract instance" of the given declaration (e.g. in the original
+ declaration of an inline function). When generating symbolic debugging
+ information, we mustn't try to generate any address information for nodes
+ marked as "abstract instances" because we don't actually generate
+ any code or allocate any data space for such instances. */
+#define DECL_ABSTRACT_P(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.abstract_flag)
+
+/* Language-specific decl information. */
+#define DECL_LANG_SPECIFIC(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_specific)
+
+/* In a VAR_DECL or FUNCTION_DECL, nonzero means external reference:
+ do not allocate storage, and refer to a definition elsewhere. Note that
+ this does not necessarily imply the entity represented by NODE
+ has no program source-level definition in this translation unit. For
+ example, for a FUNCTION_DECL, DECL_SAVED_TREE may be non-NULL and
+ DECL_EXTERNAL may be true simultaneously; that can be the case for
+ a C99 "extern inline" function. */
+#define DECL_EXTERNAL(NODE) (DECL_COMMON_CHECK (NODE)->decl_common.decl_flag_1)
+
+/* Nonzero in a ..._DECL means this variable is ref'd from a nested function.
+ For VAR_DECL nodes, PARM_DECL nodes, and FUNCTION_DECL nodes.
+
+ For LABEL_DECL nodes, nonzero if nonlocal gotos to the label are permitted.
+
+ Also set in some languages for variables, etc., outside the normal
+ lexical scope, such as class instance variables. */
+#define DECL_NONLOCAL(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.nonlocal_flag)
+
+/* Used in VAR_DECLs to indicate that the variable is a vtable.
+ Used in FIELD_DECLs for vtable pointers.
+ Used in FUNCTION_DECLs to indicate that the function is virtual. */
+#define DECL_VIRTUAL_P(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.virtual_flag)
+
+/* Used to indicate that this DECL represents a compiler-generated entity. */
+#define DECL_ARTIFICIAL(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.artificial_flag)
+
+/* Additional flags for language-specific uses. */
+#define DECL_LANG_FLAG_0(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_0)
+#define DECL_LANG_FLAG_1(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_1)
+#define DECL_LANG_FLAG_2(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_2)
+#define DECL_LANG_FLAG_3(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_3)
+#define DECL_LANG_FLAG_4(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_4)
+#define DECL_LANG_FLAG_5(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_5)
+#define DECL_LANG_FLAG_6(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_6)
+#define DECL_LANG_FLAG_7(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_7)
+#define DECL_LANG_FLAG_8(NODE) \
+ (DECL_COMMON_CHECK (NODE)->decl_common.lang_flag_8)
+
+/* Nonzero for a scope which is equal to file scope. */
+#define SCOPE_FILE_SCOPE_P(EXP) \
+ (! (EXP) || TREE_CODE (EXP) == TRANSLATION_UNIT_DECL)
+/* Nonzero for a decl which is at file scope. */
+#define DECL_FILE_SCOPE_P(EXP) SCOPE_FILE_SCOPE_P (DECL_CONTEXT (EXP))
+/* Nonzero for a type which is at file scope. */
+#define TYPE_FILE_SCOPE_P(EXP) SCOPE_FILE_SCOPE_P (TYPE_CONTEXT (EXP))
+
+/* Nonzero for a decl that is decorated using attribute used.
+ This indicates to compiler tools that this decl needs to be preserved. */
+#define DECL_PRESERVE_P(DECL) \
+ DECL_COMMON_CHECK (DECL)->decl_common.preserve_flag
+
+/* Nonzero for a decl that is decorated with the "noinit" attribute.
+ decls with this attribute are placed into the ".noinit" section, so they are
+ not initialized by the target's startup code. */
+#define DECL_NOINIT_P(DECL) \
+ (DECL_P (DECL) \
+ && (lookup_attribute ("noinit", DECL_ATTRIBUTES (DECL)) != NULL_TREE))
+
+/* Nonzero for a decl that is decorated with the "persistent" attribute.
+ decls with this attribute are placed into the ".persistent" section, so they
+ are not initialized by the target's startup code. */
+#define DECL_PERSISTENT_P(DECL) \
+ (DECL_P (DECL) \
+ && (lookup_attribute ("persistent", DECL_ATTRIBUTES (DECL)) != NULL_TREE))
+
+/* For function local variables of COMPLEX and VECTOR types,
+ indicates that the variable is not aliased, and that all
+ modifications to the variable have been adjusted so that
+ they are killing assignments. Thus the variable may now
+ be treated as a GIMPLE register, and use real instead of
+ virtual ops in SSA form. */
+#define DECL_NOT_GIMPLE_REG_P(DECL) \
+ DECL_COMMON_CHECK (DECL)->decl_common.not_gimple_reg_flag
+
+extern tree decl_value_expr_lookup (tree);
+extern void decl_value_expr_insert (tree, tree);
+
+/* In a VAR_DECL or PARM_DECL, the location at which the value may be found,
+ if transformations have made this more complicated than evaluating the
+ decl itself. */
+#define DECL_HAS_VALUE_EXPR_P(NODE) \
+ (TREE_CHECK3 (NODE, VAR_DECL, PARM_DECL, RESULT_DECL) \
+ ->decl_common.decl_flag_2)
+#define DECL_VALUE_EXPR(NODE) \
+ (decl_value_expr_lookup (DECL_WRTL_CHECK (NODE)))
+#define SET_DECL_VALUE_EXPR(NODE, VAL) \
+ (decl_value_expr_insert (DECL_WRTL_CHECK (NODE), VAL))
+
+/* Holds the RTL expression for the value of a variable or function.
+ This value can be evaluated lazily for functions, variables with
+ static storage duration, and labels. */
+#define DECL_RTL(NODE) \
+ (DECL_WRTL_CHECK (NODE)->decl_with_rtl.rtl \
+ ? (NODE)->decl_with_rtl.rtl \
+ : (make_decl_rtl (NODE), (NODE)->decl_with_rtl.rtl))
+
+/* Set the DECL_RTL for NODE to RTL. */
+#define SET_DECL_RTL(NODE, RTL) set_decl_rtl (NODE, RTL)
+
+/* Returns nonzero if NODE is a tree node that can contain RTL. */
+#define HAS_RTL_P(NODE) (CODE_CONTAINS_STRUCT (TREE_CODE (NODE), TS_DECL_WRTL))
+
+/* Returns nonzero if the DECL_RTL for NODE has already been set. */
+#define DECL_RTL_SET_P(NODE) \
+ (HAS_RTL_P (NODE) && DECL_WRTL_CHECK (NODE)->decl_with_rtl.rtl != NULL)
+
+/* Copy the RTL from SRC_DECL to DST_DECL. If the RTL was not set for
+ SRC_DECL, it will not be set for DST_DECL; this is a lazy copy. */
+#define COPY_DECL_RTL(SRC_DECL, DST_DECL) \
+ (DECL_WRTL_CHECK (DST_DECL)->decl_with_rtl.rtl \
+ = DECL_WRTL_CHECK (SRC_DECL)->decl_with_rtl.rtl)
+
+/* The DECL_RTL for NODE, if it is set, or NULL, if it is not set. */
+#define DECL_RTL_IF_SET(NODE) (DECL_RTL_SET_P (NODE) ? DECL_RTL (NODE) : NULL)
+
+#if (GCC_VERSION >= 2007)
+#define DECL_RTL_KNOWN_SET(decl) __extension__ \
+({ tree const __d = (decl); \
+ gcc_checking_assert (DECL_RTL_SET_P (__d)); \
+ /* Dereference it so the compiler knows it can't be NULL even \
+ without assertion checking. */ \
+ &*DECL_RTL_IF_SET (__d); })
+#else
+#define DECL_RTL_KNOWN_SET(decl) (&*DECL_RTL_IF_SET (decl))
+#endif
+
+/* In VAR_DECL and PARM_DECL nodes, nonzero means declared `register'. */
+#define DECL_REGISTER(NODE) (DECL_WRTL_CHECK (NODE)->decl_common.decl_flag_0)
+
+/* In a FIELD_DECL, this is the field position, counting in bytes, of the
+ DECL_OFFSET_ALIGN-bit-sized word containing the bit closest to the beginning
+ of the structure. */
+#define DECL_FIELD_OFFSET(NODE) (FIELD_DECL_CHECK (NODE)->field_decl.offset)
+
+/* In a FIELD_DECL, this is the offset, in bits, of the first bit of the
+ field from DECL_FIELD_OFFSET. This field may be nonzero even for fields
+ that are not bit fields (since DECL_OFFSET_ALIGN may be larger than the
+ natural alignment of the field's type). */
+#define DECL_FIELD_BIT_OFFSET(NODE) \
+ (FIELD_DECL_CHECK (NODE)->field_decl.bit_offset)
+
+/* In a FIELD_DECL, this indicates whether the field was a bit-field and
+ if so, the type that was originally specified for it.
+ TREE_TYPE may have been modified (in finish_struct). */
+#define DECL_BIT_FIELD_TYPE(NODE) \
+ (FIELD_DECL_CHECK (NODE)->field_decl.bit_field_type)
+
+/* In a FIELD_DECL of a RECORD_TYPE, this is a pointer to the storage
+ representative FIELD_DECL. */
+#define DECL_BIT_FIELD_REPRESENTATIVE(NODE) \
+ (FIELD_DECL_CHECK (NODE)->field_decl.qualifier)
+
+/* For a FIELD_DECL in a QUAL_UNION_TYPE, records the expression, which
+ if nonzero, indicates that the field occupies the type. */
+#define DECL_QUALIFIER(NODE) (FIELD_DECL_CHECK (NODE)->field_decl.qualifier)
+
+/* For FIELD_DECLs, off_align holds the number of low-order bits of
+ DECL_FIELD_OFFSET which are known to be always zero.
+ DECL_OFFSET_ALIGN thus returns the alignment that DECL_FIELD_OFFSET
+ has. */
+#define DECL_OFFSET_ALIGN(NODE) \
+ (((unsigned HOST_WIDE_INT)1) << FIELD_DECL_CHECK (NODE)->decl_common.off_align)
+
+/* Specify that DECL_OFFSET_ALIGN(NODE) is X. */
+#define SET_DECL_OFFSET_ALIGN(NODE, X) \
+ (FIELD_DECL_CHECK (NODE)->decl_common.off_align = ffs_hwi (X) - 1)
+
+/* For FIELD_DECLS, DECL_FCONTEXT is the *first* baseclass in
+ which this FIELD_DECL is defined. This information is needed when
+ writing debugging information about vfield and vbase decls for C++. */
+#define DECL_FCONTEXT(NODE) (FIELD_DECL_CHECK (NODE)->field_decl.fcontext)
+
+/* In a FIELD_DECL, indicates this field should be bit-packed. */
+#define DECL_PACKED(NODE) (FIELD_DECL_CHECK (NODE)->base.u.bits.packed_flag)
+
+/* Nonzero in a FIELD_DECL means it is a bit field, and must be accessed
+ specially. */
+#define DECL_BIT_FIELD(NODE) (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_1)
+
+/* In a FIELD_DECL, indicates this field should be ignored for ABI decisions
+ like passing/returning containing struct by value.
+ Set for C++17 empty base artificial FIELD_DECLs as well as
+ empty [[no_unique_address]] non-static data members. */
+#define DECL_FIELD_ABI_IGNORED(NODE) \
+ (!DECL_BIT_FIELD (NODE) && (NODE)->decl_common.decl_flag_0)
+#define SET_DECL_FIELD_ABI_IGNORED(NODE, VAL) \
+ do { \
+ gcc_checking_assert (!DECL_BIT_FIELD (NODE)); \
+ FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_0 = (VAL); \
+ } while (0)
+
+/* In a FIELD_DECL, indicates C++ zero-width bitfield that used to be
+ removed from the IL since PR42217 until PR101539 and by that changed
+ the ABI on several targets. This flag is provided so that the backends
+ can decide on the ABI with zero-width bitfields and emit -Wpsabi
+ warnings. */
+#define DECL_FIELD_CXX_ZERO_WIDTH_BIT_FIELD(NODE) \
+ (DECL_BIT_FIELD (NODE) && (NODE)->decl_common.decl_flag_0)
+#define SET_DECL_FIELD_CXX_ZERO_WIDTH_BIT_FIELD(NODE, VAL) \
+ do { \
+ gcc_checking_assert (DECL_BIT_FIELD (NODE)); \
+ FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_0 = (VAL); \
+ } while (0)
+
+/* Used in a FIELD_DECL to indicate that we cannot form the address of
+ this component. This makes it possible for Type-Based Alias Analysis
+ to disambiguate accesses to this field with indirect accesses using
+ the field's type:
+
+ struct S { int i; } s;
+ int *p;
+
+ If the flag is set on 'i', TBAA computes that s.i and *p never conflict.
+
+ From the implementation's viewpoint, the alias set of the type of the
+ field 'i' (int) will not be recorded as a subset of that of the type of
+ 's' (struct S) in record_component_aliases. The counterpart is that
+ accesses to s.i must not be given the alias set of the type of 'i'
+ (int) but instead directly that of the type of 's' (struct S). */
+#define DECL_NONADDRESSABLE_P(NODE) \
+ (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_2)
+
+/* Used in a FIELD_DECL to indicate that this field is padding. */
+#define DECL_PADDING_P(NODE) \
+ (FIELD_DECL_CHECK (NODE)->decl_common.decl_flag_3)
+
+/* Used in a FIELD_DECL to indicate whether this field is not a flexible
+ array member. This is only valid for the last array type field of a
+ structure. */
+#define DECL_NOT_FLEXARRAY(NODE) \
+ (FIELD_DECL_CHECK (NODE)->decl_common.decl_not_flexarray)
+
+/* A numeric unique identifier for a LABEL_DECL. The UID allocation is
+ dense, unique within any one function, and may be used to index arrays.
+ If the value is -1, then no UID has been assigned. */
+#define LABEL_DECL_UID(NODE) \
+ (LABEL_DECL_CHECK (NODE)->label_decl.label_decl_uid)
+
+/* In a LABEL_DECL, the EH region number for which the label is the
+ post_landing_pad. */
+#define EH_LANDING_PAD_NR(NODE) \
+ (LABEL_DECL_CHECK (NODE)->label_decl.eh_landing_pad_nr)
+
+/* For a PARM_DECL, records the data type used to pass the argument,
+ which may be different from the type seen in the program. */
+#define DECL_ARG_TYPE(NODE) (PARM_DECL_CHECK (NODE)->decl_common.initial)
+
+/* For PARM_DECL, holds an RTL for the stack slot or register
+ where the data was actually passed. */
+#define DECL_INCOMING_RTL(NODE) \
+ (PARM_DECL_CHECK (NODE)->parm_decl.incoming_rtl)
+
+/* Nonzero for a given ..._DECL node means that no warnings should be
+ generated just because this node is unused. */
+#define DECL_IN_SYSTEM_HEADER(NODE) \
+ (in_system_header_at (DECL_SOURCE_LOCATION (NODE)))
+
+/* Used to indicate that the linkage status of this DECL is not yet known,
+ so it should not be output now. */
+#define DECL_DEFER_OUTPUT(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.defer_output)
+
+/* In a VAR_DECL that's static,
+ nonzero if the space is in the text section. */
+#define DECL_IN_TEXT_SECTION(NODE) \
+ (VAR_DECL_CHECK (NODE)->decl_with_vis.in_text_section)
+
+/* In a VAR_DECL that's static,
+ nonzero if it belongs to the global constant pool. */
+#define DECL_IN_CONSTANT_POOL(NODE) \
+ (VAR_DECL_CHECK (NODE)->decl_with_vis.in_constant_pool)
+
+/* Nonzero for a given ..._DECL node means that this node should be
+ put in .common, if possible. If a DECL_INITIAL is given, and it
+ is not error_mark_node, then the decl cannot be put in .common. */
+#define DECL_COMMON(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.common_flag)
+
+/* In a VAR_DECL, nonzero if the decl is a register variable with
+ an explicit asm specification. */
+#define DECL_HARD_REGISTER(NODE) \
+ (VAR_DECL_CHECK (NODE)->decl_with_vis.hard_register)
+
+ /* Used to indicate that this DECL has weak linkage. */
+#define DECL_WEAK(NODE) (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.weak_flag)
+
+/* Used to indicate that the DECL is a dllimport. */
+#define DECL_DLLIMPORT_P(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.dllimport_flag)
+
+/* Used in a DECL to indicate that, even if it TREE_PUBLIC, it need
+ not be put out unless it is needed in this translation unit.
+ Entities like this are shared across translation units (like weak
+ entities), but are guaranteed to be generated by any translation
+ unit that needs them, and therefore need not be put out anywhere
+ where they are not needed. DECL_COMDAT is just a hint to the
+ back-end; it is up to front-ends which set this flag to ensure
+ that there will never be any harm, other than bloat, in putting out
+ something which is DECL_COMDAT. */
+#define DECL_COMDAT(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.comdat_flag)
+
+#define DECL_COMDAT_GROUP(NODE) \
+ decl_comdat_group (NODE)
+
+/* Used in TREE_PUBLIC decls to indicate that copies of this DECL in
+ multiple translation units should be merged. */
+#define DECL_ONE_ONLY(NODE) (DECL_COMDAT_GROUP (NODE) != NULL_TREE \
+ && (TREE_PUBLIC (NODE) || DECL_EXTERNAL (NODE)))
+
+/* The name of the object as the assembler will see it (but before any
+ translations made by ASM_OUTPUT_LABELREF). Often this is the same
+ as DECL_NAME. It is an IDENTIFIER_NODE.
+
+ ASSEMBLER_NAME of TYPE_DECLS may store global name of type used for
+ One Definition Rule based type merging at LTO. It is computed only for
+ LTO compilation and C++. */
+#define DECL_ASSEMBLER_NAME(NODE) decl_assembler_name (NODE)
+
+/* Raw accessor for DECL_ASSEMBLE_NAME. */
+#define DECL_ASSEMBLER_NAME_RAW(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.assembler_name)
+
+/* Return true if NODE is a NODE that can contain a DECL_ASSEMBLER_NAME.
+ This is true of all DECL nodes except FIELD_DECL. */
+#define HAS_DECL_ASSEMBLER_NAME_P(NODE) \
+ (CODE_CONTAINS_STRUCT (TREE_CODE (NODE), TS_DECL_WITH_VIS))
+
+/* Returns nonzero if the DECL_ASSEMBLER_NAME for NODE has been set. If zero,
+ the NODE might still have a DECL_ASSEMBLER_NAME -- it just hasn't been set
+ yet. */
+#define DECL_ASSEMBLER_NAME_SET_P(NODE) \
+ (DECL_ASSEMBLER_NAME_RAW (NODE) != NULL_TREE)
+
+/* Set the DECL_ASSEMBLER_NAME for NODE to NAME. */
+#define SET_DECL_ASSEMBLER_NAME(NODE, NAME) \
+ overwrite_decl_assembler_name (NODE, NAME)
+
+/* Copy the DECL_ASSEMBLER_NAME from SRC_DECL to DST_DECL. Note that
+ if SRC_DECL's DECL_ASSEMBLER_NAME has not yet been set, using this
+ macro will not cause the DECL_ASSEMBLER_NAME to be set, but will
+ clear DECL_ASSEMBLER_NAME of DST_DECL, if it was already set. In
+ other words, the semantics of using this macro, are different than
+ saying:
+
+ SET_DECL_ASSEMBLER_NAME(DST_DECL, DECL_ASSEMBLER_NAME (SRC_DECL))
+
+ which will try to set the DECL_ASSEMBLER_NAME for SRC_DECL. */
+
+#define COPY_DECL_ASSEMBLER_NAME(SRC_DECL, DST_DECL) \
+ SET_DECL_ASSEMBLER_NAME (DST_DECL, DECL_ASSEMBLER_NAME_RAW (SRC_DECL))
+
+/* Records the section name in a section attribute. Used to pass
+ the name from decl_attributes to make_function_rtl and make_decl_rtl. */
+#define DECL_SECTION_NAME(NODE) decl_section_name (NODE)
+
+/* Nonzero in a decl means that the gimplifier has seen (or placed)
+ this variable in a BIND_EXPR. */
+#define DECL_SEEN_IN_BIND_EXPR_P(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.seen_in_bind_expr)
+
+/* Value of the decls's visibility attribute */
+#define DECL_VISIBILITY(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.visibility)
+
+/* Nonzero means that the decl (or an enclosing scope) had its
+ visibility specified rather than being inferred. */
+#define DECL_VISIBILITY_SPECIFIED(NODE) \
+ (DECL_WITH_VIS_CHECK (NODE)->decl_with_vis.visibility_specified)
+
+/* In a VAR_DECL, the model to use if the data should be allocated from
+ thread-local storage. */
+#define DECL_TLS_MODEL(NODE) decl_tls_model (NODE)
+
+/* In a VAR_DECL, nonzero if the data should be allocated from
+ thread-local storage. */
+#define DECL_THREAD_LOCAL_P(NODE) \
+ ((TREE_STATIC (NODE) || DECL_EXTERNAL (NODE)) && decl_tls_model (NODE) >= TLS_MODEL_REAL)
+
+/* In a non-local VAR_DECL with static storage duration, true if the
+ variable has an initialization priority. If false, the variable
+ will be initialized at the DEFAULT_INIT_PRIORITY. */
+#define DECL_HAS_INIT_PRIORITY_P(NODE) \
+ (VAR_DECL_CHECK (NODE)->decl_with_vis.init_priority_p)
+
+extern tree decl_debug_expr_lookup (tree);
+extern void decl_debug_expr_insert (tree, tree);
+
+/* For VAR_DECL, this is set to an expression that it was split from. */
+#define DECL_HAS_DEBUG_EXPR_P(NODE) \
+ (VAR_DECL_CHECK (NODE)->decl_common.debug_expr_is_from)
+#define DECL_DEBUG_EXPR(NODE) \
+ (decl_debug_expr_lookup (VAR_DECL_CHECK (NODE)))
+
+#define SET_DECL_DEBUG_EXPR(NODE, VAL) \
+ (decl_debug_expr_insert (VAR_DECL_CHECK (NODE), VAL))
+
+extern priority_type decl_init_priority_lookup (tree);
+extern priority_type decl_fini_priority_lookup (tree);
+extern void decl_init_priority_insert (tree, priority_type);
+extern void decl_fini_priority_insert (tree, priority_type);
+
+/* For a VAR_DECL or FUNCTION_DECL the initialization priority of
+ NODE. */
+#define DECL_INIT_PRIORITY(NODE) \
+ (decl_init_priority_lookup (NODE))
+/* Set the initialization priority for NODE to VAL. */
+#define SET_DECL_INIT_PRIORITY(NODE, VAL) \
+ (decl_init_priority_insert (NODE, VAL))
+
+/* For a FUNCTION_DECL the finalization priority of NODE. */
+#define DECL_FINI_PRIORITY(NODE) \
+ (decl_fini_priority_lookup (NODE))
+/* Set the finalization priority for NODE to VAL. */
+#define SET_DECL_FINI_PRIORITY(NODE, VAL) \
+ (decl_fini_priority_insert (NODE, VAL))
+
+/* The initialization priority for entities for which no explicit
+ initialization priority has been specified. */
+#define DEFAULT_INIT_PRIORITY 65535
+
+/* The maximum allowed initialization priority. */
+#define MAX_INIT_PRIORITY 65535
+
+/* The largest priority value reserved for use by system runtime
+ libraries. */
+#define MAX_RESERVED_INIT_PRIORITY 100
+
+/* In a VAR_DECL, nonzero if this is a global variable for VOPs. */
+#define VAR_DECL_IS_VIRTUAL_OPERAND(NODE) \
+ (VAR_DECL_CHECK (NODE)->base.u.bits.saturating_flag)
+
+/* In a VAR_DECL, nonzero if this is a non-local frame structure. */
+#define DECL_NONLOCAL_FRAME(NODE) \
+ (VAR_DECL_CHECK (NODE)->base.default_def_flag)
+
+/* In a VAR_DECL, nonzero if this variable is not aliased by any pointer. */
+#define DECL_NONALIASED(NODE) \
+ (VAR_DECL_CHECK (NODE)->base.nothrow_flag)
+
+/* This field is used to reference anything in decl.result and is meant only
+ for use by the garbage collector. */
+#define DECL_RESULT_FLD(NODE) \
+ (DECL_NON_COMMON_CHECK (NODE)->decl_non_common.result)
+
+/* The DECL_VINDEX is used for FUNCTION_DECLS in two different ways.
+ Before the struct containing the FUNCTION_DECL is laid out,
+ DECL_VINDEX may point to a FUNCTION_DECL in a base class which
+ is the FUNCTION_DECL which this FUNCTION_DECL will replace as a virtual
+ function. When the class is laid out, this pointer is changed
+ to an INTEGER_CST node which is suitable for use as an index
+ into the virtual function table. */
+#define DECL_VINDEX(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.vindex)
+
+/* In FUNCTION_DECL, holds the decl for the return value. */
+#define DECL_RESULT(NODE) (FUNCTION_DECL_CHECK (NODE)->decl_non_common.result)
+
+/* In a FUNCTION_DECL, nonzero if the function cannot be inlined. */
+#define DECL_UNINLINABLE(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.uninlinable)
+
+/* In a FUNCTION_DECL, the saved representation of the body of the
+ entire function. */
+#define DECL_SAVED_TREE(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.saved_tree)
+
+/* Nonzero in a FUNCTION_DECL means this function should be treated
+ as if it were a malloc, meaning it returns a pointer that is
+ not an alias. */
+#define DECL_IS_MALLOC(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.malloc_flag)
+
+/* Macro for direct set and get of function_decl.decl_type. */
+#define FUNCTION_DECL_DECL_TYPE(NODE) \
+ (NODE->function_decl.decl_type)
+
+/* Set decl_type of a DECL. Set it to T when SET is true, or reset
+ it to NONE. */
+
+inline void
+set_function_decl_type (tree decl, function_decl_type t, bool set)
+{
+ if (set)
+ {
+ gcc_assert (FUNCTION_DECL_DECL_TYPE (decl) == NONE
+ || FUNCTION_DECL_DECL_TYPE (decl) == t);
+ FUNCTION_DECL_DECL_TYPE (decl) = t;
+ }
+ else if (FUNCTION_DECL_DECL_TYPE (decl) == t)
+ FUNCTION_DECL_DECL_TYPE (decl) = NONE;
+}
+
+/* Nonzero in a FUNCTION_DECL means this function is a replaceable
+ function (like replaceable operators new or delete). */
+#define DECL_IS_REPLACEABLE_OPERATOR(NODE)\
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.replaceable_operator)
+
+/* Nonzero in a FUNCTION_DECL means this function should be treated as
+ C++ operator new, meaning that it returns a pointer for which we
+ should not use type based aliasing. */
+#define DECL_IS_OPERATOR_NEW_P(NODE) \
+ (FUNCTION_DECL_DECL_TYPE (FUNCTION_DECL_CHECK (NODE)) == OPERATOR_NEW)
+
+#define DECL_IS_REPLACEABLE_OPERATOR_NEW_P(NODE) \
+ (DECL_IS_OPERATOR_NEW_P (NODE) && DECL_IS_REPLACEABLE_OPERATOR (NODE))
+
+#define DECL_SET_IS_OPERATOR_NEW(NODE, VAL) \
+ set_function_decl_type (FUNCTION_DECL_CHECK (NODE), OPERATOR_NEW, VAL)
+
+/* Nonzero in a FUNCTION_DECL means this function should be treated as
+ C++ operator delete. */
+#define DECL_IS_OPERATOR_DELETE_P(NODE) \
+ (FUNCTION_DECL_DECL_TYPE (FUNCTION_DECL_CHECK (NODE)) == OPERATOR_DELETE)
+
+#define DECL_SET_IS_OPERATOR_DELETE(NODE, VAL) \
+ set_function_decl_type (FUNCTION_DECL_CHECK (NODE), OPERATOR_DELETE, VAL)
+
+/* Nonzero in a FUNCTION_DECL means this function may return more
+ than once. */
+#define DECL_IS_RETURNS_TWICE(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.returns_twice_flag)
+
+/* Nonzero in a FUNCTION_DECL means this function should be treated
+ as "pure" function (like const function, but may read global memory).
+ Note that being pure or const for a function is orthogonal to being
+ nothrow, i.e. it is valid to have DECL_PURE_P set and TREE_NOTHROW
+ cleared. */
+#define DECL_PURE_P(NODE) (FUNCTION_DECL_CHECK (NODE)->function_decl.pure_flag)
+
+/* Nonzero only if one of TREE_READONLY or DECL_PURE_P is nonzero AND
+ the const or pure function may not terminate. When this is nonzero
+ for a const or pure function, it can be dealt with by cse passes
+ but cannot be removed by dce passes since you are not allowed to
+ change an infinite looping program into one that terminates without
+ error. */
+#define DECL_LOOPING_CONST_OR_PURE_P(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.looping_const_or_pure_flag)
+
+/* Nonzero in a FUNCTION_DECL means this function should be treated
+ as "novops" function (function that does not read global memory,
+ but may have arbitrary side effects). */
+#define DECL_IS_NOVOPS(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.novops_flag)
+
+/* Used in FUNCTION_DECLs to indicate that they should be run automatically
+ at the beginning or end of execution. */
+#define DECL_STATIC_CONSTRUCTOR(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.static_ctor_flag)
+
+#define DECL_STATIC_DESTRUCTOR(NODE) \
+(FUNCTION_DECL_CHECK (NODE)->function_decl.static_dtor_flag)
+
+/* Used in FUNCTION_DECLs to indicate that function entry and exit should
+ be instrumented with calls to support routines. */
+#define DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.no_instrument_function_entry_exit)
+
+/* Used in FUNCTION_DECLs to indicate that limit-stack-* should be
+ disabled in this function. */
+#define DECL_NO_LIMIT_STACK(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.no_limit_stack)
+
+/* In a FUNCTION_DECL indicates that a static chain is needed. */
+#define DECL_STATIC_CHAIN(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->decl_with_vis.regdecl_flag)
+
+/* Nonzero for a decl that cgraph has decided should be inlined into
+ at least one call site. It is not meaningful to look at this
+ directly; always use cgraph_function_possibly_inlined_p. */
+#define DECL_POSSIBLY_INLINED(DECL) \
+ FUNCTION_DECL_CHECK (DECL)->function_decl.possibly_inlined
+
+/* Nonzero in a FUNCTION_DECL means that this function was declared inline,
+ such as via the `inline' keyword in C/C++. This flag controls the linkage
+ semantics of 'inline' */
+#define DECL_DECLARED_INLINE_P(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.declared_inline_flag)
+
+/* Nonzero in a FUNCTION_DECL means this function should not get
+ -Winline warnings. */
+#define DECL_NO_INLINE_WARNING_P(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.no_inline_warning_flag)
+
+/* Nonzero if a FUNCTION_CODE is a TM load/store. */
+#define BUILTIN_TM_LOAD_STORE_P(FN) \
+ ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
+
+/* Nonzero if a FUNCTION_CODE is a TM load. */
+#define BUILTIN_TM_LOAD_P(FN) \
+ ((FN) >= BUILT_IN_TM_LOAD_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
+
+/* Nonzero if a FUNCTION_CODE is a TM store. */
+#define BUILTIN_TM_STORE_P(FN) \
+ ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_STORE_WAW_LDOUBLE)
+
+#define CASE_BUILT_IN_TM_LOAD(FN) \
+ case BUILT_IN_TM_LOAD_##FN: \
+ case BUILT_IN_TM_LOAD_RAR_##FN: \
+ case BUILT_IN_TM_LOAD_RAW_##FN: \
+ case BUILT_IN_TM_LOAD_RFW_##FN
+
+#define CASE_BUILT_IN_TM_STORE(FN) \
+ case BUILT_IN_TM_STORE_##FN: \
+ case BUILT_IN_TM_STORE_WAR_##FN: \
+ case BUILT_IN_TM_STORE_WAW_##FN
+
+/* Nonzero in a FUNCTION_DECL that should be always inlined by the inliner
+ disregarding size and cost heuristics. This is equivalent to using
+ the always_inline attribute without the required diagnostics if the
+ function cannot be inlined. */
+#define DECL_DISREGARD_INLINE_LIMITS(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.disregard_inline_limits)
+
+extern vec<tree, va_gc> **decl_debug_args_lookup (tree);
+extern vec<tree, va_gc> **decl_debug_args_insert (tree);
+
+/* Nonzero if a FUNCTION_DECL has DEBUG arguments attached to it. */
+#define DECL_HAS_DEBUG_ARGS_P(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.has_debug_args_flag)
+
+/* For FUNCTION_DECL, this holds a pointer to a structure ("struct function")
+ that describes the status of this function. */
+#define DECL_STRUCT_FUNCTION(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.f)
+
+/* For a builtin function, identify which part of the compiler defined it. */
+#define DECL_BUILT_IN_CLASS(NODE) \
+ ((built_in_class) FUNCTION_DECL_CHECK (NODE)->function_decl.built_in_class)
+
+/* In FUNCTION_DECL, a chain of ..._DECL nodes. */
+#define DECL_ARGUMENTS(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.arguments)
+
+/* In FUNCTION_DECL, the function specific target options to use when compiling
+ this function. */
+#define DECL_FUNCTION_SPECIFIC_TARGET(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.function_specific_target)
+
+/* In FUNCTION_DECL, the function specific optimization options to use when
+ compiling this function. */
+#define DECL_FUNCTION_SPECIFIC_OPTIMIZATION(NODE) \
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.function_specific_optimization)
+
+/* In FUNCTION_DECL, this is set if this function has other versions generated
+ using "target" attributes. The default version is the one which does not
+ have any "target" attribute set. */
+#define DECL_FUNCTION_VERSIONED(NODE)\
+ (FUNCTION_DECL_CHECK (NODE)->function_decl.versioned_function)
+
+/* In FUNCTION_DECL, this is set if this function is a C++ constructor.
+ Devirtualization machinery uses this knowledge for determing type of the
+ object constructed. Also we assume that constructor address is not
+ important. */
+#define DECL_CXX_CONSTRUCTOR_P(NODE)\
+ (FUNCTION_DECL_CHECK (NODE)->decl_with_vis.cxx_constructor)
+
+/* In FUNCTION_DECL, this is set if this function is a C++ destructor.
+ Devirtualization machinery uses this to track types in destruction. */
+#define DECL_CXX_DESTRUCTOR_P(NODE)\
+ (FUNCTION_DECL_CHECK (NODE)->decl_with_vis.cxx_destructor)
+
+/* In FUNCTION_DECL, this is set if this function is a lambda function. */
+#define DECL_LAMBDA_FUNCTION_P(NODE) \
+ (FUNCTION_DECL_DECL_TYPE (FUNCTION_DECL_CHECK (NODE)) == LAMBDA_FUNCTION)
+
+#define DECL_SET_LAMBDA_FUNCTION(NODE, VAL) \
+ set_function_decl_type (FUNCTION_DECL_CHECK (NODE), LAMBDA_FUNCTION, VAL)
+
+/* In FUNCTION_DECL that represent an virtual method this is set when
+ the method is final. */
+#define DECL_FINAL_P(NODE)\
+ (FUNCTION_DECL_CHECK (NODE)->decl_with_vis.final)
+
+/* The source language of the translation-unit. */
+#define TRANSLATION_UNIT_LANGUAGE(NODE) \
+ (TRANSLATION_UNIT_DECL_CHECK (NODE)->translation_unit_decl.language)
+
+/* TRANSLATION_UNIT_DECL inherits from DECL_MINIMAL. */
+
+/* For a TYPE_DECL, holds the "original" type. (TREE_TYPE has the copy.) */
+#define DECL_ORIGINAL_TYPE(NODE) \
+ (TYPE_DECL_CHECK (NODE)->decl_non_common.result)
+
+/* In a TYPE_DECL nonzero means the detail info about this type is not dumped
+ into stabs. Instead it will generate cross reference ('x') of names.
+ This uses the same flag as DECL_EXTERNAL. */
+#define TYPE_DECL_SUPPRESS_DEBUG(NODE) \
+ (TYPE_DECL_CHECK (NODE)->decl_common.decl_flag_1)
+
+/* Getter of the imported declaration associated to the
+ IMPORTED_DECL node. */
+#define IMPORTED_DECL_ASSOCIATED_DECL(NODE) \
+(DECL_INITIAL (IMPORTED_DECL_CHECK (NODE)))
+
+/* Getter of the symbol declaration associated with the
+ NAMELIST_DECL node. */
+#define NAMELIST_DECL_ASSOCIATED_DECL(NODE) \
+ (DECL_INITIAL (NODE))
+
+/* A STATEMENT_LIST chains statements together in GENERIC and GIMPLE.
+ To reduce overhead, the nodes containing the statements are not trees.
+ This avoids the overhead of tree_common on all linked list elements.
+
+ Use the interface in tree-iterator.h to access this node. */
+
+#define STATEMENT_LIST_HEAD(NODE) \
+ (STATEMENT_LIST_CHECK (NODE)->stmt_list.head)
+#define STATEMENT_LIST_TAIL(NODE) \
+ (STATEMENT_LIST_CHECK (NODE)->stmt_list.tail)
+
+#define TREE_OPTIMIZATION(NODE) \
+ (OPTIMIZATION_NODE_CHECK (NODE)->optimization.opts)
+
+#define TREE_OPTIMIZATION_OPTABS(NODE) \
+ (OPTIMIZATION_NODE_CHECK (NODE)->optimization.optabs)
+
+#define TREE_OPTIMIZATION_BASE_OPTABS(NODE) \
+ (OPTIMIZATION_NODE_CHECK (NODE)->optimization.base_optabs)
+
+/* Return a tree node that encapsulates the optimization options in OPTS
+ and OPTS_SET. */
+extern tree build_optimization_node (struct gcc_options *opts,
+ struct gcc_options *opts_set);
+
+#define TREE_TARGET_OPTION(NODE) \
+ (TARGET_OPTION_NODE_CHECK (NODE)->target_option.opts)
+
+#define TREE_TARGET_GLOBALS(NODE) \
+ (TARGET_OPTION_NODE_CHECK (NODE)->target_option.globals)
+
+/* Return a tree node that encapsulates the target options in OPTS and
+ OPTS_SET. */
+extern tree build_target_option_node (struct gcc_options *opts,
+ struct gcc_options *opts_set);
+
+extern void prepare_target_option_nodes_for_pch (void);
+
+#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
+
+inline tree
+tree_check (tree __t, const char *__f, int __l, const char *__g, tree_code __c)
+{
+ if (TREE_CODE (__t) != __c)
+ tree_check_failed (__t, __f, __l, __g, __c, 0);
+ return __t;
+}
+
+inline tree
+tree_not_check (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c)
+{
+ if (TREE_CODE (__t) == __c)
+ tree_not_check_failed (__t, __f, __l, __g, __c, 0);
+ return __t;
+}
+
+inline tree
+tree_check2 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, 0);
+ return __t;
+}
+
+inline tree
+tree_not_check2 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, 0);
+ return __t;
+}
+
+inline tree
+tree_check3 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2
+ && TREE_CODE (__t) != __c3)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, 0);
+ return __t;
+}
+
+inline tree
+tree_not_check3 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2
+ || TREE_CODE (__t) == __c3)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, 0);
+ return __t;
+}
+
+inline tree
+tree_check4 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2
+ && TREE_CODE (__t) != __c3
+ && TREE_CODE (__t) != __c4)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, 0);
+ return __t;
+}
+
+inline tree
+tree_not_check4 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2
+ || TREE_CODE (__t) == __c3
+ || TREE_CODE (__t) == __c4)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, 0);
+ return __t;
+}
+
+inline tree
+tree_check5 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4, enum tree_code __c5)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2
+ && TREE_CODE (__t) != __c3
+ && TREE_CODE (__t) != __c4
+ && TREE_CODE (__t) != __c5)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, __c5, 0);
+ return __t;
+}
+
+inline tree
+tree_not_check5 (tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4, enum tree_code __c5)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2
+ || TREE_CODE (__t) == __c3
+ || TREE_CODE (__t) == __c4
+ || TREE_CODE (__t) == __c5)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, __c5, 0);
+ return __t;
+}
+
+inline tree
+contains_struct_check (tree __t, const enum tree_node_structure_enum __s,
+ const char *__f, int __l, const char *__g)
+{
+ if (tree_contains_struct[TREE_CODE (__t)][__s] != 1)
+ tree_contains_struct_check_failed (__t, __s, __f, __l, __g);
+ return __t;
+}
+
+inline tree
+tree_class_check (tree __t, const enum tree_code_class __class,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE_CLASS (TREE_CODE (__t)) != __class)
+ tree_class_check_failed (__t, __class, __f, __l, __g);
+ return __t;
+}
+
+inline tree
+tree_range_check (tree __t,
+ enum tree_code __code1, enum tree_code __code2,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) < __code1 || TREE_CODE (__t) > __code2)
+ tree_range_check_failed (__t, __f, __l, __g, __code1, __code2);
+ return __t;
+}
+
+inline tree
+omp_clause_subcode_check (tree __t, enum omp_clause_code __code,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != OMP_CLAUSE)
+ tree_check_failed (__t, __f, __l, __g, OMP_CLAUSE, 0);
+ if (__t->omp_clause.code != __code)
+ omp_clause_check_failed (__t, __f, __l, __g, __code);
+ return __t;
+}
+
+inline tree
+omp_clause_range_check (tree __t,
+ enum omp_clause_code __code1,
+ enum omp_clause_code __code2,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != OMP_CLAUSE)
+ tree_check_failed (__t, __f, __l, __g, OMP_CLAUSE, 0);
+ if ((int) __t->omp_clause.code < (int) __code1
+ || (int) __t->omp_clause.code > (int) __code2)
+ omp_clause_range_check_failed (__t, __f, __l, __g, __code1, __code2);
+ return __t;
+}
+
+/* These checks have to be special cased. */
+
+inline tree
+expr_check (tree __t, const char *__f, int __l, const char *__g)
+{
+ char const __c = TREE_CODE_CLASS (TREE_CODE (__t));
+ if (!IS_EXPR_CODE_CLASS (__c))
+ tree_class_check_failed (__t, tcc_expression, __f, __l, __g);
+ return __t;
+}
+
+/* These checks have to be special cased. */
+
+inline tree
+non_type_check (tree __t, const char *__f, int __l, const char *__g)
+{
+ if (TYPE_P (__t))
+ tree_not_class_check_failed (__t, tcc_type, __f, __l, __g);
+ return __t;
+}
+
+inline const HOST_WIDE_INT *
+tree_int_cst_elt_check (const_tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != INTEGER_CST)
+ tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0);
+ if (__i < 0 || __i >= __t->base.u.int_length.extended)
+ tree_int_cst_elt_check_failed (__i, __t->base.u.int_length.extended,
+ __f, __l, __g);
+ return &CONST_CAST_TREE (__t)->int_cst.val[__i];
+}
+
+inline HOST_WIDE_INT *
+tree_int_cst_elt_check (tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != INTEGER_CST)
+ tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0);
+ if (__i < 0 || __i >= __t->base.u.int_length.extended)
+ tree_int_cst_elt_check_failed (__i, __t->base.u.int_length.extended,
+ __f, __l, __g);
+ return &CONST_CAST_TREE (__t)->int_cst.val[__i];
+}
+
+/* Workaround -Wstrict-overflow false positive during profiledbootstrap. */
+
+# if GCC_VERSION >= 4006
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-overflow"
+#endif
+
+inline tree *
+tree_vec_elt_check (tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != TREE_VEC)
+ tree_check_failed (__t, __f, __l, __g, TREE_VEC, 0);
+ if (__i < 0 || __i >= __t->base.u.length)
+ tree_vec_elt_check_failed (__i, __t->base.u.length, __f, __l, __g);
+ return &CONST_CAST_TREE (__t)->vec.a[__i];
+}
+
+# if GCC_VERSION >= 4006
+#pragma GCC diagnostic pop
+#endif
+
+inline tree *
+omp_clause_elt_check (tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != OMP_CLAUSE)
+ tree_check_failed (__t, __f, __l, __g, OMP_CLAUSE, 0);
+ if (__i < 0 || __i >= omp_clause_num_ops [__t->omp_clause.code])
+ omp_clause_operand_check_failed (__i, __t, __f, __l, __g);
+ return &__t->omp_clause.ops[__i];
+}
+
+/* These checks have to be special cased. */
+
+inline tree
+any_integral_type_check (tree __t, const char *__f, int __l, const char *__g)
+{
+ if (!ANY_INTEGRAL_TYPE_P (__t))
+ tree_check_failed (__t, __f, __l, __g, BOOLEAN_TYPE, ENUMERAL_TYPE,
+ INTEGER_TYPE, 0);
+ return __t;
+}
+
+inline const_tree
+tree_check (const_tree __t, const char *__f, int __l, const char *__g,
+ tree_code __c)
+{
+ if (TREE_CODE (__t) != __c)
+ tree_check_failed (__t, __f, __l, __g, __c, 0);
+ return __t;
+}
+
+inline const_tree
+tree_not_check (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c)
+{
+ if (TREE_CODE (__t) == __c)
+ tree_not_check_failed (__t, __f, __l, __g, __c, 0);
+ return __t;
+}
+
+inline const_tree
+tree_check2 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, 0);
+ return __t;
+}
+
+inline const_tree
+tree_not_check2 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, 0);
+ return __t;
+}
+
+inline const_tree
+tree_check3 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2
+ && TREE_CODE (__t) != __c3)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, 0);
+ return __t;
+}
+
+inline const_tree
+tree_not_check3 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2
+ || TREE_CODE (__t) == __c3)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, 0);
+ return __t;
+}
+
+inline const_tree
+tree_check4 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2
+ && TREE_CODE (__t) != __c3
+ && TREE_CODE (__t) != __c4)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, 0);
+ return __t;
+}
+
+inline const_tree
+tree_not_check4 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2
+ || TREE_CODE (__t) == __c3
+ || TREE_CODE (__t) == __c4)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, 0);
+ return __t;
+}
+
+inline const_tree
+tree_check5 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4, enum tree_code __c5)
+{
+ if (TREE_CODE (__t) != __c1
+ && TREE_CODE (__t) != __c2
+ && TREE_CODE (__t) != __c3
+ && TREE_CODE (__t) != __c4
+ && TREE_CODE (__t) != __c5)
+ tree_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, __c5, 0);
+ return __t;
+}
+
+inline const_tree
+tree_not_check5 (const_tree __t, const char *__f, int __l, const char *__g,
+ enum tree_code __c1, enum tree_code __c2, enum tree_code __c3,
+ enum tree_code __c4, enum tree_code __c5)
+{
+ if (TREE_CODE (__t) == __c1
+ || TREE_CODE (__t) == __c2
+ || TREE_CODE (__t) == __c3
+ || TREE_CODE (__t) == __c4
+ || TREE_CODE (__t) == __c5)
+ tree_not_check_failed (__t, __f, __l, __g, __c1, __c2, __c3, __c4, __c5, 0);
+ return __t;
+}
+
+inline const_tree
+contains_struct_check (const_tree __t, const enum tree_node_structure_enum __s,
+ const char *__f, int __l, const char *__g)
+{
+ if (tree_contains_struct[TREE_CODE (__t)][__s] != 1)
+ tree_contains_struct_check_failed (__t, __s, __f, __l, __g);
+ return __t;
+}
+
+inline const_tree
+tree_class_check (const_tree __t, const enum tree_code_class __class,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE_CLASS (TREE_CODE (__t)) != __class)
+ tree_class_check_failed (__t, __class, __f, __l, __g);
+ return __t;
+}
+
+inline const_tree
+tree_range_check (const_tree __t,
+ enum tree_code __code1, enum tree_code __code2,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) < __code1 || TREE_CODE (__t) > __code2)
+ tree_range_check_failed (__t, __f, __l, __g, __code1, __code2);
+ return __t;
+}
+
+inline const_tree
+omp_clause_subcode_check (const_tree __t, enum omp_clause_code __code,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != OMP_CLAUSE)
+ tree_check_failed (__t, __f, __l, __g, OMP_CLAUSE, 0);
+ if (__t->omp_clause.code != __code)
+ omp_clause_check_failed (__t, __f, __l, __g, __code);
+ return __t;
+}
+
+inline const_tree
+omp_clause_range_check (const_tree __t,
+ enum omp_clause_code __code1,
+ enum omp_clause_code __code2,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != OMP_CLAUSE)
+ tree_check_failed (__t, __f, __l, __g, OMP_CLAUSE, 0);
+ if ((int) __t->omp_clause.code < (int) __code1
+ || (int) __t->omp_clause.code > (int) __code2)
+ omp_clause_range_check_failed (__t, __f, __l, __g, __code1, __code2);
+ return __t;
+}
+
+inline const_tree
+expr_check (const_tree __t, const char *__f, int __l, const char *__g)
+{
+ char const __c = TREE_CODE_CLASS (TREE_CODE (__t));
+ if (!IS_EXPR_CODE_CLASS (__c))
+ tree_class_check_failed (__t, tcc_expression, __f, __l, __g);
+ return __t;
+}
+
+inline const_tree
+non_type_check (const_tree __t, const char *__f, int __l, const char *__g)
+{
+ if (TYPE_P (__t))
+ tree_not_class_check_failed (__t, tcc_type, __f, __l, __g);
+ return __t;
+}
+
+# if GCC_VERSION >= 4006
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-overflow"
+#endif
+
+inline const_tree *
+tree_vec_elt_check (const_tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != TREE_VEC)
+ tree_check_failed (__t, __f, __l, __g, TREE_VEC, 0);
+ if (__i < 0 || __i >= __t->base.u.length)
+ tree_vec_elt_check_failed (__i, __t->base.u.length, __f, __l, __g);
+ return CONST_CAST (const_tree *, &__t->vec.a[__i]);
+ //return &__t->vec.a[__i];
+}
+
+# if GCC_VERSION >= 4006
+#pragma GCC diagnostic pop
+#endif
+
+inline const_tree *
+omp_clause_elt_check (const_tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != OMP_CLAUSE)
+ tree_check_failed (__t, __f, __l, __g, OMP_CLAUSE, 0);
+ if (__i < 0 || __i >= omp_clause_num_ops [__t->omp_clause.code])
+ omp_clause_operand_check_failed (__i, __t, __f, __l, __g);
+ return CONST_CAST (const_tree *, &__t->omp_clause.ops[__i]);
+}
+
+inline const_tree
+any_integral_type_check (const_tree __t, const char *__f, int __l,
+ const char *__g)
+{
+ if (!ANY_INTEGRAL_TYPE_P (__t))
+ tree_check_failed (__t, __f, __l, __g, BOOLEAN_TYPE, ENUMERAL_TYPE,
+ INTEGER_TYPE, 0);
+ return __t;
+}
+
+#endif
+
+/* Compute the number of operands in an expression node NODE. For
+ tcc_vl_exp nodes like CALL_EXPRs, this is stored in the node itself,
+ otherwise it is looked up from the node's code. */
+inline int
+tree_operand_length (const_tree node)
+{
+ if (VL_EXP_CLASS_P (node))
+ return VL_EXP_OPERAND_LENGTH (node);
+ else
+ return TREE_CODE_LENGTH (TREE_CODE (node));
+}
+
+#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
+
+/* Special checks for TREE_OPERANDs. */
+inline tree *
+tree_operand_check (tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ const_tree __u = EXPR_CHECK (__t);
+ if (__i < 0 || __i >= TREE_OPERAND_LENGTH (__u))
+ tree_operand_check_failed (__i, __u, __f, __l, __g);
+ return &CONST_CAST_TREE (__u)->exp.operands[__i];
+}
+
+inline tree *
+tree_operand_check_code (tree __t, enum tree_code __code, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != __code)
+ tree_check_failed (__t, __f, __l, __g, __code, 0);
+ if (__i < 0 || __i >= TREE_OPERAND_LENGTH (__t))
+ tree_operand_check_failed (__i, __t, __f, __l, __g);
+ return &__t->exp.operands[__i];
+}
+
+inline const_tree *
+tree_operand_check (const_tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ const_tree __u = EXPR_CHECK (__t);
+ if (__i < 0 || __i >= TREE_OPERAND_LENGTH (__u))
+ tree_operand_check_failed (__i, __u, __f, __l, __g);
+ return CONST_CAST (const_tree *, &__u->exp.operands[__i]);
+}
+
+inline const_tree *
+tree_operand_check_code (const_tree __t, enum tree_code __code, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != __code)
+ tree_check_failed (__t, __f, __l, __g, __code, 0);
+ if (__i < 0 || __i >= TREE_OPERAND_LENGTH (__t))
+ tree_operand_check_failed (__i, __t, __f, __l, __g);
+ return CONST_CAST (const_tree *, &__t->exp.operands[__i]);
+}
+
+#endif
+
+/* True iff an identifier matches a C string. */
+
+inline bool
+id_equal (const_tree id, const char *str)
+{
+ return !strcmp (IDENTIFIER_POINTER (id), str);
+}
+
+inline bool
+id_equal (const char *str, const_tree id)
+{
+ return id_equal (id, str);
+}
+
+/* Return the number of elements in the VECTOR_TYPE given by NODE. */
+
+inline poly_uint64
+TYPE_VECTOR_SUBPARTS (const_tree node)
+{
+ STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 2);
+ unsigned int precision = VECTOR_TYPE_CHECK (node)->type_common.precision;
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ /* See the corresponding code in SET_TYPE_VECTOR_SUBPARTS for a
+ description of the encoding. */
+ poly_uint64 res = 0;
+ res.coeffs[0] = HOST_WIDE_INT_1U << (precision & 0xff);
+ if (precision & 0x100)
+ res.coeffs[1] = HOST_WIDE_INT_1U << (precision & 0xff);
+ return res;
+ }
+ else
+ return HOST_WIDE_INT_1U << precision;
+}
+
+/* Set the number of elements in VECTOR_TYPE NODE to SUBPARTS, which must
+ satisfy valid_vector_subparts_p. */
+
+inline void
+SET_TYPE_VECTOR_SUBPARTS (tree node, poly_uint64 subparts)
+{
+ STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 2);
+ unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
+ int index = exact_log2 (coeff0);
+ gcc_assert (index >= 0);
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ /* We have two coefficients that are each in the range 1 << [0, 63],
+ so supporting all combinations would require 6 bits per coefficient
+ and 12 bits in total. Since the precision field is only 10 bits
+ in size, we need to be more restrictive than that.
+
+ At present, coeff[1] is always either 0 (meaning that the number
+ of units is constant) or equal to coeff[0] (meaning that the number
+ of units is N + X * N for some target-dependent zero-based runtime
+ parameter X). We can therefore encode coeff[1] in a single bit.
+
+ The most compact encoding would be to use mask 0x3f for coeff[0]
+ and 0x40 for coeff[1], leaving 0x380 unused. It's possible to
+ get slightly more efficient code on some hosts if we instead
+ treat the shift amount as an independent byte, so here we use
+ 0xff for coeff[0] and 0x100 for coeff[1]. */
+ unsigned HOST_WIDE_INT coeff1 = subparts.coeffs[1];
+ gcc_assert (coeff1 == 0 || coeff1 == coeff0);
+ VECTOR_TYPE_CHECK (node)->type_common.precision
+ = index + (coeff1 != 0 ? 0x100 : 0);
+ }
+ else
+ VECTOR_TYPE_CHECK (node)->type_common.precision = index;
+}
+
+/* Return true if we can construct vector types with the given number
+ of subparts. */
+
+inline bool
+valid_vector_subparts_p (poly_uint64 subparts)
+{
+ unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
+ if (!pow2p_hwi (coeff0))
+ return false;
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ unsigned HOST_WIDE_INT coeff1 = subparts.coeffs[1];
+ if (coeff1 != 0 && coeff1 != coeff0)
+ return false;
+ }
+ return true;
+}
+
+/* Return the built-in function that DECL represents, given that it is known
+ to be a FUNCTION_DECL with built-in class BUILT_IN_NORMAL. */
+inline built_in_function
+DECL_FUNCTION_CODE (const_tree decl)
+{
+ const tree_function_decl &fndecl = FUNCTION_DECL_CHECK (decl)->function_decl;
+ gcc_checking_assert (fndecl.built_in_class == BUILT_IN_NORMAL);
+ return (built_in_function) fndecl.function_code;
+}
+
+/* Return the target-specific built-in function that DECL represents,
+ given that it is known to be a FUNCTION_DECL with built-in class
+ BUILT_IN_MD. */
+inline int
+DECL_MD_FUNCTION_CODE (const_tree decl)
+{
+ const tree_function_decl &fndecl = FUNCTION_DECL_CHECK (decl)->function_decl;
+ gcc_checking_assert (fndecl.built_in_class == BUILT_IN_MD);
+ return fndecl.function_code;
+}
+
+/* Return the frontend-specific built-in function that DECL represents,
+ given that it is known to be a FUNCTION_DECL with built-in class
+ BUILT_IN_FRONTEND. */
+inline int
+DECL_FE_FUNCTION_CODE (const_tree decl)
+{
+ const tree_function_decl &fndecl = FUNCTION_DECL_CHECK (decl)->function_decl;
+ gcc_checking_assert (fndecl.built_in_class == BUILT_IN_FRONTEND);
+ return fndecl.function_code;
+}
+
+/* Record that FUNCTION_DECL DECL represents built-in function FCODE of
+ class FCLASS. */
+inline void
+set_decl_built_in_function (tree decl, built_in_class fclass,
+ unsigned int fcode)
+{
+ tree_function_decl &fndecl = FUNCTION_DECL_CHECK (decl)->function_decl;
+ fndecl.built_in_class = fclass;
+ fndecl.function_code = fcode;
+}
+
+/* Record that FUNCTION_DECL NEWDECL represents the same built-in function
+ as OLDDECL (or none, if OLDDECL doesn't represent a built-in function). */
+inline void
+copy_decl_built_in_function (tree newdecl, const_tree olddecl)
+{
+ tree_function_decl &newfndecl = FUNCTION_DECL_CHECK (newdecl)->function_decl;
+ const tree_function_decl &oldfndecl
+ = FUNCTION_DECL_CHECK (olddecl)->function_decl;
+ newfndecl.built_in_class = oldfndecl.built_in_class;
+ newfndecl.function_code = oldfndecl.function_code;
+}
+
+/* In NON_LVALUE_EXPR and VIEW_CONVERT_EXPR, set when this node is merely a
+ wrapper added to express a location_t on behalf of the node's child
+ (e.g. by maybe_wrap_with_location). */
+
+#define EXPR_LOCATION_WRAPPER_P(NODE) \
+ (TREE_CHECK2(NODE, NON_LVALUE_EXPR, VIEW_CONVERT_EXPR)->base.public_flag)
+
+/* Test if EXP is merely a wrapper node, added to express a location_t
+ on behalf of the node's child (e.g. by maybe_wrap_with_location). */
+
+inline bool
+location_wrapper_p (const_tree exp)
+{
+ /* A wrapper node has code NON_LVALUE_EXPR or VIEW_CONVERT_EXPR, and
+ the flag EXPR_LOCATION_WRAPPER_P is set.
+ It normally has the same type as its operand, but it can have a
+ different one if the type of the operand has changed (e.g. when
+ merging duplicate decls).
+
+ NON_LVALUE_EXPR is used for wrapping constants, apart from STRING_CST.
+ VIEW_CONVERT_EXPR is used for wrapping non-constants and STRING_CST. */
+ if ((TREE_CODE (exp) == NON_LVALUE_EXPR
+ || TREE_CODE (exp) == VIEW_CONVERT_EXPR)
+ && EXPR_LOCATION_WRAPPER_P (exp))
+ return true;
+ return false;
+}
+
+/* Implementation of STRIP_ANY_LOCATION_WRAPPER. */
+
+inline tree
+tree_strip_any_location_wrapper (tree exp)
+{
+ if (location_wrapper_p (exp))
+ return TREE_OPERAND (exp, 0);
+ else
+ return exp;
+}
+
+#define error_mark_node global_trees[TI_ERROR_MARK]
+
+#define intQI_type_node global_trees[TI_INTQI_TYPE]
+#define intHI_type_node global_trees[TI_INTHI_TYPE]
+#define intSI_type_node global_trees[TI_INTSI_TYPE]
+#define intDI_type_node global_trees[TI_INTDI_TYPE]
+#define intTI_type_node global_trees[TI_INTTI_TYPE]
+
+#define unsigned_intQI_type_node global_trees[TI_UINTQI_TYPE]
+#define unsigned_intHI_type_node global_trees[TI_UINTHI_TYPE]
+#define unsigned_intSI_type_node global_trees[TI_UINTSI_TYPE]
+#define unsigned_intDI_type_node global_trees[TI_UINTDI_TYPE]
+#define unsigned_intTI_type_node global_trees[TI_UINTTI_TYPE]
+
+#define atomicQI_type_node global_trees[TI_ATOMICQI_TYPE]
+#define atomicHI_type_node global_trees[TI_ATOMICHI_TYPE]
+#define atomicSI_type_node global_trees[TI_ATOMICSI_TYPE]
+#define atomicDI_type_node global_trees[TI_ATOMICDI_TYPE]
+#define atomicTI_type_node global_trees[TI_ATOMICTI_TYPE]
+
+#define uint16_type_node global_trees[TI_UINT16_TYPE]
+#define uint32_type_node global_trees[TI_UINT32_TYPE]
+#define uint64_type_node global_trees[TI_UINT64_TYPE]
+#define uint128_type_node global_trees[TI_UINT128_TYPE]
+
+#define void_node global_trees[TI_VOID]
+
+#define integer_zero_node global_trees[TI_INTEGER_ZERO]
+#define integer_one_node global_trees[TI_INTEGER_ONE]
+#define integer_three_node global_trees[TI_INTEGER_THREE]
+#define integer_minus_one_node global_trees[TI_INTEGER_MINUS_ONE]
+#define size_zero_node global_trees[TI_SIZE_ZERO]
+#define size_one_node global_trees[TI_SIZE_ONE]
+#define bitsize_zero_node global_trees[TI_BITSIZE_ZERO]
+#define bitsize_one_node global_trees[TI_BITSIZE_ONE]
+#define bitsize_unit_node global_trees[TI_BITSIZE_UNIT]
+
+/* Base access nodes. */
+#define access_public_node global_trees[TI_PUBLIC]
+#define access_protected_node global_trees[TI_PROTECTED]
+#define access_private_node global_trees[TI_PRIVATE]
+
+#define null_pointer_node global_trees[TI_NULL_POINTER]
+
+#define float_type_node global_trees[TI_FLOAT_TYPE]
+#define double_type_node global_trees[TI_DOUBLE_TYPE]
+#define long_double_type_node global_trees[TI_LONG_DOUBLE_TYPE]
+#define bfloat16_type_node global_trees[TI_BFLOAT16_TYPE]
+
+/* Nodes for particular _FloatN and _FloatNx types in sequence. */
+#define FLOATN_TYPE_NODE(IDX) global_trees[TI_FLOATN_TYPE_FIRST + (IDX)]
+#define FLOATN_NX_TYPE_NODE(IDX) global_trees[TI_FLOATN_NX_TYPE_FIRST + (IDX)]
+#define FLOATNX_TYPE_NODE(IDX) global_trees[TI_FLOATNX_TYPE_FIRST + (IDX)]
+
+/* Names for individual types (code should normally iterate over all
+ such types; these are only for back-end use, or in contexts such as
+ *.def where iteration is not possible). */
+#define float16_type_node global_trees[TI_FLOAT16_TYPE]
+#define float32_type_node global_trees[TI_FLOAT32_TYPE]
+#define float64_type_node global_trees[TI_FLOAT64_TYPE]
+#define float128_type_node global_trees[TI_FLOAT128_TYPE]
+#define float32x_type_node global_trees[TI_FLOAT32X_TYPE]
+#define float64x_type_node global_trees[TI_FLOAT64X_TYPE]
+#define float128x_type_node global_trees[TI_FLOAT128X_TYPE]
+
+/* Type used by certain backends for __float128, which in C++ should be
+ distinct type from _Float128 for backwards compatibility reasons. */
+#define float128t_type_node global_trees[TI_FLOAT128T_TYPE]
+
+#define float_ptr_type_node global_trees[TI_FLOAT_PTR_TYPE]
+#define double_ptr_type_node global_trees[TI_DOUBLE_PTR_TYPE]
+#define long_double_ptr_type_node global_trees[TI_LONG_DOUBLE_PTR_TYPE]
+#define integer_ptr_type_node global_trees[TI_INTEGER_PTR_TYPE]
+
+#define complex_integer_type_node global_trees[TI_COMPLEX_INTEGER_TYPE]
+#define complex_float_type_node global_trees[TI_COMPLEX_FLOAT_TYPE]
+#define complex_double_type_node global_trees[TI_COMPLEX_DOUBLE_TYPE]
+#define complex_long_double_type_node global_trees[TI_COMPLEX_LONG_DOUBLE_TYPE]
+
+#define COMPLEX_FLOATN_NX_TYPE_NODE(IDX) global_trees[TI_COMPLEX_FLOATN_NX_TYPE_FIRST + (IDX)]
+
+#define void_type_node global_trees[TI_VOID_TYPE]
+/* The C type `void *'. */
+#define ptr_type_node global_trees[TI_PTR_TYPE]
+/* The C type `const void *'. */
+#define const_ptr_type_node global_trees[TI_CONST_PTR_TYPE]
+/* The C type `size_t'. */
+#define size_type_node global_trees[TI_SIZE_TYPE]
+#define pid_type_node global_trees[TI_PID_TYPE]
+#define ptrdiff_type_node global_trees[TI_PTRDIFF_TYPE]
+#define va_list_type_node global_trees[TI_VA_LIST_TYPE]
+#define va_list_gpr_counter_field global_trees[TI_VA_LIST_GPR_COUNTER_FIELD]
+#define va_list_fpr_counter_field global_trees[TI_VA_LIST_FPR_COUNTER_FIELD]
+/* The C type `FILE *'. */
+#define fileptr_type_node global_trees[TI_FILEPTR_TYPE]
+/* The C type `const struct tm *'. */
+#define const_tm_ptr_type_node global_trees[TI_CONST_TM_PTR_TYPE]
+/* The C type `fenv_t *'. */
+#define fenv_t_ptr_type_node global_trees[TI_FENV_T_PTR_TYPE]
+#define const_fenv_t_ptr_type_node global_trees[TI_CONST_FENV_T_PTR_TYPE]
+/* The C type `fexcept_t *'. */
+#define fexcept_t_ptr_type_node global_trees[TI_FEXCEPT_T_PTR_TYPE]
+#define const_fexcept_t_ptr_type_node global_trees[TI_CONST_FEXCEPT_T_PTR_TYPE]
+#define pointer_sized_int_node global_trees[TI_POINTER_SIZED_TYPE]
+
+#define boolean_type_node global_trees[TI_BOOLEAN_TYPE]
+#define boolean_false_node global_trees[TI_BOOLEAN_FALSE]
+#define boolean_true_node global_trees[TI_BOOLEAN_TRUE]
+
+/* The decimal floating point types. */
+#define dfloat32_type_node global_trees[TI_DFLOAT32_TYPE]
+#define dfloat64_type_node global_trees[TI_DFLOAT64_TYPE]
+#define dfloat128_type_node global_trees[TI_DFLOAT128_TYPE]
+
+/* The fixed-point types. */
+#define sat_short_fract_type_node global_trees[TI_SAT_SFRACT_TYPE]
+#define sat_fract_type_node global_trees[TI_SAT_FRACT_TYPE]
+#define sat_long_fract_type_node global_trees[TI_SAT_LFRACT_TYPE]
+#define sat_long_long_fract_type_node global_trees[TI_SAT_LLFRACT_TYPE]
+#define sat_unsigned_short_fract_type_node \
+ global_trees[TI_SAT_USFRACT_TYPE]
+#define sat_unsigned_fract_type_node global_trees[TI_SAT_UFRACT_TYPE]
+#define sat_unsigned_long_fract_type_node \
+ global_trees[TI_SAT_ULFRACT_TYPE]
+#define sat_unsigned_long_long_fract_type_node \
+ global_trees[TI_SAT_ULLFRACT_TYPE]
+#define short_fract_type_node global_trees[TI_SFRACT_TYPE]
+#define fract_type_node global_trees[TI_FRACT_TYPE]
+#define long_fract_type_node global_trees[TI_LFRACT_TYPE]
+#define long_long_fract_type_node global_trees[TI_LLFRACT_TYPE]
+#define unsigned_short_fract_type_node global_trees[TI_USFRACT_TYPE]
+#define unsigned_fract_type_node global_trees[TI_UFRACT_TYPE]
+#define unsigned_long_fract_type_node global_trees[TI_ULFRACT_TYPE]
+#define unsigned_long_long_fract_type_node \
+ global_trees[TI_ULLFRACT_TYPE]
+#define sat_short_accum_type_node global_trees[TI_SAT_SACCUM_TYPE]
+#define sat_accum_type_node global_trees[TI_SAT_ACCUM_TYPE]
+#define sat_long_accum_type_node global_trees[TI_SAT_LACCUM_TYPE]
+#define sat_long_long_accum_type_node global_trees[TI_SAT_LLACCUM_TYPE]
+#define sat_unsigned_short_accum_type_node \
+ global_trees[TI_SAT_USACCUM_TYPE]
+#define sat_unsigned_accum_type_node global_trees[TI_SAT_UACCUM_TYPE]
+#define sat_unsigned_long_accum_type_node \
+ global_trees[TI_SAT_ULACCUM_TYPE]
+#define sat_unsigned_long_long_accum_type_node \
+ global_trees[TI_SAT_ULLACCUM_TYPE]
+#define short_accum_type_node global_trees[TI_SACCUM_TYPE]
+#define accum_type_node global_trees[TI_ACCUM_TYPE]
+#define long_accum_type_node global_trees[TI_LACCUM_TYPE]
+#define long_long_accum_type_node global_trees[TI_LLACCUM_TYPE]
+#define unsigned_short_accum_type_node global_trees[TI_USACCUM_TYPE]
+#define unsigned_accum_type_node global_trees[TI_UACCUM_TYPE]
+#define unsigned_long_accum_type_node global_trees[TI_ULACCUM_TYPE]
+#define unsigned_long_long_accum_type_node \
+ global_trees[TI_ULLACCUM_TYPE]
+#define qq_type_node global_trees[TI_QQ_TYPE]
+#define hq_type_node global_trees[TI_HQ_TYPE]
+#define sq_type_node global_trees[TI_SQ_TYPE]
+#define dq_type_node global_trees[TI_DQ_TYPE]
+#define tq_type_node global_trees[TI_TQ_TYPE]
+#define uqq_type_node global_trees[TI_UQQ_TYPE]
+#define uhq_type_node global_trees[TI_UHQ_TYPE]
+#define usq_type_node global_trees[TI_USQ_TYPE]
+#define udq_type_node global_trees[TI_UDQ_TYPE]
+#define utq_type_node global_trees[TI_UTQ_TYPE]
+#define sat_qq_type_node global_trees[TI_SAT_QQ_TYPE]
+#define sat_hq_type_node global_trees[TI_SAT_HQ_TYPE]
+#define sat_sq_type_node global_trees[TI_SAT_SQ_TYPE]
+#define sat_dq_type_node global_trees[TI_SAT_DQ_TYPE]
+#define sat_tq_type_node global_trees[TI_SAT_TQ_TYPE]
+#define sat_uqq_type_node global_trees[TI_SAT_UQQ_TYPE]
+#define sat_uhq_type_node global_trees[TI_SAT_UHQ_TYPE]
+#define sat_usq_type_node global_trees[TI_SAT_USQ_TYPE]
+#define sat_udq_type_node global_trees[TI_SAT_UDQ_TYPE]
+#define sat_utq_type_node global_trees[TI_SAT_UTQ_TYPE]
+#define ha_type_node global_trees[TI_HA_TYPE]
+#define sa_type_node global_trees[TI_SA_TYPE]
+#define da_type_node global_trees[TI_DA_TYPE]
+#define ta_type_node global_trees[TI_TA_TYPE]
+#define uha_type_node global_trees[TI_UHA_TYPE]
+#define usa_type_node global_trees[TI_USA_TYPE]
+#define uda_type_node global_trees[TI_UDA_TYPE]
+#define uta_type_node global_trees[TI_UTA_TYPE]
+#define sat_ha_type_node global_trees[TI_SAT_HA_TYPE]
+#define sat_sa_type_node global_trees[TI_SAT_SA_TYPE]
+#define sat_da_type_node global_trees[TI_SAT_DA_TYPE]
+#define sat_ta_type_node global_trees[TI_SAT_TA_TYPE]
+#define sat_uha_type_node global_trees[TI_SAT_UHA_TYPE]
+#define sat_usa_type_node global_trees[TI_SAT_USA_TYPE]
+#define sat_uda_type_node global_trees[TI_SAT_UDA_TYPE]
+#define sat_uta_type_node global_trees[TI_SAT_UTA_TYPE]
+
+/* The node that should be placed at the end of a parameter list to
+ indicate that the function does not take a variable number of
+ arguments. The TREE_VALUE will be void_type_node and there will be
+ no TREE_CHAIN. Language-independent code should not assume
+ anything else about this node. */
+#define void_list_node global_trees[TI_VOID_LIST_NODE]
+
+#define main_identifier_node global_trees[TI_MAIN_IDENTIFIER]
+#define MAIN_NAME_P(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE) == main_identifier_node)
+
+/* Optimization options (OPTIMIZATION_NODE) to use for default and current
+ functions. */
+#define optimization_default_node global_trees[TI_OPTIMIZATION_DEFAULT]
+#define optimization_current_node global_trees[TI_OPTIMIZATION_CURRENT]
+
+/* Default/current target options (TARGET_OPTION_NODE). */
+#define target_option_default_node global_trees[TI_TARGET_OPTION_DEFAULT]
+#define target_option_current_node global_trees[TI_TARGET_OPTION_CURRENT]
+
+/* Default tree list option(), optimize() pragmas to be linked into the
+ attribute list. */
+#define current_target_pragma global_trees[TI_CURRENT_TARGET_PRAGMA]
+#define current_optimize_pragma global_trees[TI_CURRENT_OPTIMIZE_PRAGMA]
+
+/* SCEV analyzer global shared trees. */
+#define chrec_not_analyzed_yet NULL_TREE
+#define chrec_dont_know global_trees[TI_CHREC_DONT_KNOW]
+#define chrec_known global_trees[TI_CHREC_KNOWN]
+
+#define char_type_node integer_types[itk_char]
+#define signed_char_type_node integer_types[itk_signed_char]
+#define unsigned_char_type_node integer_types[itk_unsigned_char]
+#define short_integer_type_node integer_types[itk_short]
+#define short_unsigned_type_node integer_types[itk_unsigned_short]
+#define integer_type_node integer_types[itk_int]
+#define unsigned_type_node integer_types[itk_unsigned_int]
+#define long_integer_type_node integer_types[itk_long]
+#define long_unsigned_type_node integer_types[itk_unsigned_long]
+#define long_long_integer_type_node integer_types[itk_long_long]
+#define long_long_unsigned_type_node integer_types[itk_unsigned_long_long]
+
+/* True if T is an erroneous expression. */
+
+inline bool
+error_operand_p (const_tree t)
+{
+ return (t == error_mark_node
+ || (t && TREE_TYPE (t) == error_mark_node));
+}
+
+/* Return the number of elements encoded directly in a VECTOR_CST. */
+
+inline unsigned int
+vector_cst_encoded_nelts (const_tree t)
+{
+ return VECTOR_CST_NPATTERNS (t) * VECTOR_CST_NELTS_PER_PATTERN (t);
+}
+
+extern tree decl_assembler_name (tree);
+extern void overwrite_decl_assembler_name (tree decl, tree name);
+extern tree decl_comdat_group (const_tree);
+extern tree decl_comdat_group_id (const_tree);
+extern const char *decl_section_name (const_tree);
+extern void set_decl_section_name (tree, const char *);
+extern void set_decl_section_name (tree, const_tree);
+extern enum tls_model decl_tls_model (const_tree);
+extern void set_decl_tls_model (tree, enum tls_model);
+
+/* Compute the number of bytes occupied by 'node'. This routine only
+ looks at TREE_CODE and, if the code is TREE_VEC, TREE_VEC_LENGTH. */
+
+extern size_t tree_size (const_tree);
+
+/* Compute the number of bytes occupied by a tree with code CODE.
+ This function cannot be used for TREE_VEC or INTEGER_CST nodes,
+ which are of variable length. */
+extern size_t tree_code_size (enum tree_code);
+
+/* Allocate and return a new UID from the DECL_UID namespace. */
+extern int allocate_decl_uid (void);
+
+/* Lowest level primitive for allocating a node.
+ The TREE_CODE is the only argument. Contents are initialized
+ to zero except for a few of the common fields. */
+
+extern tree make_node (enum tree_code CXX_MEM_STAT_INFO);
+
+/* Free tree node. */
+
+extern void free_node (tree);
+
+/* Make a copy of a node, with all the same contents. */
+
+extern tree copy_node (tree CXX_MEM_STAT_INFO);
+
+/* Make a copy of a chain of TREE_LIST nodes. */
+
+extern tree copy_list (tree);
+
+/* Make a CASE_LABEL_EXPR. */
+
+extern tree build_case_label (tree, tree, tree);
+
+/* Make a BINFO. */
+extern tree make_tree_binfo (unsigned CXX_MEM_STAT_INFO);
+
+/* Make an INTEGER_CST. */
+
+extern tree make_int_cst (int, int CXX_MEM_STAT_INFO);
+
+/* Make a TREE_VEC. */
+
+extern tree make_tree_vec (int CXX_MEM_STAT_INFO);
+
+/* Grow a TREE_VEC. */
+
+extern tree grow_tree_vec (tree v, int CXX_MEM_STAT_INFO);
+
+/* Treat a TREE_VEC as a range of trees, e.g.
+ for (tree e : tree_vec_range (v)) { ... } */
+
+class tree_vec_range
+{
+ tree v;
+public:
+ tree_vec_range(tree v) : v(v) { }
+ tree *begin() { return TREE_VEC_BEGIN (v); }
+ tree *end() { return TREE_VEC_END (v); }
+};
+
+/* Construct various types of nodes. */
+
+extern tree build_nt (enum tree_code, ...);
+extern tree build_nt_call_vec (tree, vec<tree, va_gc> *);
+
+extern tree build0 (enum tree_code, tree CXX_MEM_STAT_INFO);
+extern tree build1 (enum tree_code, tree, tree CXX_MEM_STAT_INFO);
+extern tree build2 (enum tree_code, tree, tree, tree CXX_MEM_STAT_INFO);
+extern tree build3 (enum tree_code, tree, tree, tree, tree CXX_MEM_STAT_INFO);
+extern tree build4 (enum tree_code, tree, tree, tree, tree,
+ tree CXX_MEM_STAT_INFO);
+extern tree build5 (enum tree_code, tree, tree, tree, tree, tree,
+ tree CXX_MEM_STAT_INFO);
+
+/* _loc versions of build[1-5]. */
+
+inline tree
+build1_loc (location_t loc, enum tree_code code, tree type,
+ tree arg1 CXX_MEM_STAT_INFO)
+{
+ tree t = build1 (code, type, arg1 PASS_MEM_STAT);
+ if (CAN_HAVE_LOCATION_P (t))
+ SET_EXPR_LOCATION (t, loc);
+ return t;
+}
+
+inline tree
+build2_loc (location_t loc, enum tree_code code, tree type, tree arg0,
+ tree arg1 CXX_MEM_STAT_INFO)
+{
+ tree t = build2 (code, type, arg0, arg1 PASS_MEM_STAT);
+ if (CAN_HAVE_LOCATION_P (t))
+ SET_EXPR_LOCATION (t, loc);
+ return t;
+}
+
+inline tree
+build3_loc (location_t loc, enum tree_code code, tree type, tree arg0,
+ tree arg1, tree arg2 CXX_MEM_STAT_INFO)
+{
+ tree t = build3 (code, type, arg0, arg1, arg2 PASS_MEM_STAT);
+ if (CAN_HAVE_LOCATION_P (t))
+ SET_EXPR_LOCATION (t, loc);
+ return t;
+}
+
+inline tree
+build4_loc (location_t loc, enum tree_code code, tree type, tree arg0,
+ tree arg1, tree arg2, tree arg3 CXX_MEM_STAT_INFO)
+{
+ tree t = build4 (code, type, arg0, arg1, arg2, arg3 PASS_MEM_STAT);
+ if (CAN_HAVE_LOCATION_P (t))
+ SET_EXPR_LOCATION (t, loc);
+ return t;
+}
+
+inline tree
+build5_loc (location_t loc, enum tree_code code, tree type, tree arg0,
+ tree arg1, tree arg2, tree arg3, tree arg4 CXX_MEM_STAT_INFO)
+{
+ tree t = build5 (code, type, arg0, arg1, arg2, arg3,
+ arg4 PASS_MEM_STAT);
+ if (CAN_HAVE_LOCATION_P (t))
+ SET_EXPR_LOCATION (t, loc);
+ return t;
+}
+
+/* Constructs double_int from tree CST. */
+
+extern tree double_int_to_tree (tree, double_int);
+
+extern tree wide_int_to_tree (tree type, const poly_wide_int_ref &cst);
+extern tree force_fit_type (tree, const poly_wide_int_ref &, int, bool);
+
+/* Create an INT_CST node with a CST value zero extended. */
+
+/* static inline */
+extern tree build_int_cst (tree, poly_int64);
+extern tree build_int_cstu (tree type, poly_uint64);
+extern tree build_int_cst_type (tree, poly_int64);
+extern tree make_vector (unsigned, unsigned CXX_MEM_STAT_INFO);
+extern tree build_vector_from_ctor (tree, const vec<constructor_elt, va_gc> *);
+extern tree build_vector_from_val (tree, tree);
+extern tree build_uniform_cst (tree, tree);
+extern tree build_vec_series (tree, tree, tree);
+extern tree build_index_vector (tree, poly_uint64, poly_uint64);
+extern tree build_vector_a_then_b (tree, unsigned int, tree, tree);
+extern void recompute_constructor_flags (tree);
+extern void verify_constructor_flags (tree);
+extern tree build_constructor (tree, vec<constructor_elt, va_gc> * CXX_MEM_STAT_INFO);
+extern tree build_constructor_single (tree, tree, tree);
+extern tree build_constructor_from_list (tree, tree);
+extern tree build_constructor_from_vec (tree, const vec<tree, va_gc> *);
+extern tree build_constructor_va (tree, int, ...);
+extern tree build_clobber (tree, enum clobber_kind = CLOBBER_UNDEF);
+extern tree build_real_from_int_cst (tree, const_tree);
+extern tree build_real_from_wide (tree, const wide_int_ref &, signop);
+extern tree build_complex (tree, tree, tree);
+extern tree build_complex_inf (tree, bool);
+extern tree build_each_one_cst (tree);
+extern tree build_one_cst (tree);
+extern tree build_minus_one_cst (tree);
+extern tree build_all_ones_cst (tree);
+extern tree build_zero_cst (tree);
+extern tree sign_mask_for (tree);
+extern tree build_string (unsigned, const char * = NULL);
+extern tree build_poly_int_cst (tree, const poly_wide_int_ref &);
+extern tree build_tree_list (tree, tree CXX_MEM_STAT_INFO);
+extern tree build_tree_list_vec (const vec<tree, va_gc> * CXX_MEM_STAT_INFO);
+extern tree build_decl (location_t, enum tree_code,
+ tree, tree CXX_MEM_STAT_INFO);
+extern tree build_debug_expr_decl (tree type);
+extern tree build_fn_decl (const char *, tree);
+extern tree build_translation_unit_decl (tree);
+extern tree build_block (tree, tree, tree, tree);
+extern tree build_empty_stmt (location_t);
+extern tree build_omp_clause (location_t, enum omp_clause_code);
+
+extern tree build_vl_exp (enum tree_code, int CXX_MEM_STAT_INFO);
+
+extern tree build_call_nary (tree, tree, int, ...);
+extern tree build_call_valist (tree, tree, int, va_list);
+#define build_call_array(T1,T2,N,T3)\
+ build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T3)
+extern tree build_call_array_loc (location_t, tree, tree, int, const tree *);
+extern tree build_call_vec (tree, tree, const vec<tree, va_gc> *);
+extern tree build_call_expr_loc_array (location_t, tree, int, tree *);
+extern tree build_call_expr_loc_vec (location_t, tree, vec<tree, va_gc> *);
+extern tree build_call_expr_loc (location_t, tree, int, ...);
+extern tree build_call_expr (tree, int, ...);
+extern tree build_call_expr_internal_loc (location_t, enum internal_fn,
+ tree, int, ...);
+extern tree build_call_expr_internal_loc_array (location_t, enum internal_fn,
+ tree, int, const tree *);
+extern tree maybe_build_call_expr_loc (location_t, combined_fn, tree,
+ int, ...);
+extern tree build_alloca_call_expr (tree, unsigned int, HOST_WIDE_INT);
+extern tree build_string_literal (unsigned, const char * = NULL,
+ tree = char_type_node,
+ unsigned HOST_WIDE_INT = HOST_WIDE_INT_M1U);
+inline tree build_string_literal (const char *p)
+{ return build_string_literal (strlen (p) + 1, p); }
+inline tree build_string_literal (tree t)
+{
+ return build_string_literal (IDENTIFIER_LENGTH (t) + 1,
+ IDENTIFIER_POINTER (t));
+}
+
+/* Construct various nodes representing data types. */
+
+extern tree signed_or_unsigned_type_for (int, tree);
+extern tree signed_type_for (tree);
+extern tree unsigned_type_for (tree);
+extern bool is_truth_type_for (tree, tree);
+extern bool tree_zero_one_valued_p (tree);
+extern tree truth_type_for (tree);
+extern tree build_pointer_type_for_mode (tree, machine_mode, bool);
+extern tree build_pointer_type (tree);
+extern tree build_reference_type_for_mode (tree, machine_mode, bool);
+extern tree build_reference_type (tree);
+extern tree build_vector_type_for_mode (tree, machine_mode);
+extern tree build_vector_type (tree, poly_int64);
+extern tree build_truth_vector_type_for_mode (poly_uint64, machine_mode);
+extern tree build_opaque_vector_type (tree, poly_int64);
+extern tree build_index_type (tree);
+extern tree build_array_type_1 (tree, tree, bool, bool, bool);
+extern tree build_array_type (tree, tree, bool = false);
+extern tree build_nonshared_array_type (tree, tree);
+extern tree build_array_type_nelts (tree, poly_uint64);
+extern tree build_function_type (tree, tree, bool = false);
+extern tree build_function_type_list (tree, ...);
+extern tree build_varargs_function_type_list (tree, ...);
+extern tree build_function_type_array (tree, int, tree *);
+extern tree build_varargs_function_type_array (tree, int, tree *);
+#define build_function_type_vec(RET, V) \
+ build_function_type_array (RET, vec_safe_length (V), vec_safe_address (V))
+#define build_varargs_function_type_vec(RET, V) \
+ build_varargs_function_type_array (RET, vec_safe_length (V), \
+ vec_safe_address (V))
+extern tree build_method_type_directly (tree, tree, tree);
+extern tree build_method_type (tree, tree);
+extern tree build_offset_type (tree, tree);
+extern tree build_complex_type (tree, bool named = false);
+extern tree array_type_nelts (const_tree);
+
+extern tree value_member (tree, tree);
+extern tree purpose_member (const_tree, tree);
+extern bool vec_member (const_tree, vec<tree, va_gc> *);
+extern tree chain_index (int, tree);
+
+/* Arguments may be null. */
+extern int tree_int_cst_equal (const_tree, const_tree);
+
+/* The following predicates are safe to call with a null argument. */
+extern bool tree_fits_shwi_p (const_tree) ATTRIBUTE_PURE;
+extern bool tree_fits_poly_int64_p (const_tree) ATTRIBUTE_PURE;
+extern bool tree_fits_uhwi_p (const_tree) ATTRIBUTE_PURE;
+extern bool tree_fits_poly_uint64_p (const_tree) ATTRIBUTE_PURE;
+
+extern HOST_WIDE_INT tree_to_shwi (const_tree)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_PURE;
+extern poly_int64 tree_to_poly_int64 (const_tree)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_PURE;
+extern unsigned HOST_WIDE_INT tree_to_uhwi (const_tree)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_PURE;
+extern poly_uint64 tree_to_poly_uint64 (const_tree)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_PURE;
+#if !defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 4003)
+extern inline __attribute__ ((__gnu_inline__)) HOST_WIDE_INT
+tree_to_shwi (const_tree t)
+{
+ gcc_assert (tree_fits_shwi_p (t));
+ return TREE_INT_CST_LOW (t);
+}
+
+extern inline __attribute__ ((__gnu_inline__)) unsigned HOST_WIDE_INT
+tree_to_uhwi (const_tree t)
+{
+ gcc_assert (tree_fits_uhwi_p (t));
+ return TREE_INT_CST_LOW (t);
+}
+#if NUM_POLY_INT_COEFFS == 1
+extern inline __attribute__ ((__gnu_inline__)) poly_int64
+tree_to_poly_int64 (const_tree t)
+{
+ gcc_assert (tree_fits_poly_int64_p (t));
+ return TREE_INT_CST_LOW (t);
+}
+
+extern inline __attribute__ ((__gnu_inline__)) poly_uint64
+tree_to_poly_uint64 (const_tree t)
+{
+ gcc_assert (tree_fits_poly_uint64_p (t));
+ return TREE_INT_CST_LOW (t);
+}
+#endif
+#endif
+extern int tree_int_cst_sgn (const_tree);
+extern int tree_int_cst_sign_bit (const_tree);
+extern unsigned int tree_int_cst_min_precision (tree, signop);
+extern tree strip_array_types (tree);
+extern tree excess_precision_type (tree);
+
+/* Desription of the reason why the argument of valid_constant_size_p
+ is not a valid size. */
+enum cst_size_error {
+ cst_size_ok,
+ cst_size_not_constant,
+ cst_size_negative,
+ cst_size_too_big,
+ cst_size_overflow
+};
+
+extern bool valid_constant_size_p (const_tree, cst_size_error * = NULL);
+extern tree max_object_size ();
+
+/* Return true if T holds a value that can be represented as a poly_int64
+ without loss of precision. Store the value in *VALUE if so. */
+
+inline bool
+poly_int_tree_p (const_tree t, poly_int64_pod *value)
+{
+ if (tree_fits_poly_int64_p (t))
+ {
+ *value = tree_to_poly_int64 (t);
+ return true;
+ }
+ return false;
+}
+
+/* Return true if T holds a value that can be represented as a poly_uint64
+ without loss of precision. Store the value in *VALUE if so. */
+
+inline bool
+poly_int_tree_p (const_tree t, poly_uint64_pod *value)
+{
+ if (tree_fits_poly_uint64_p (t))
+ {
+ *value = tree_to_poly_uint64 (t);
+ return true;
+ }
+ return false;
+}
+
+/* From expmed.cc. Since rtl.h is included after tree.h, we can't
+ put the prototype here. Rtl.h does declare the prototype if
+ tree.h had been included. */
+
+extern tree make_tree (tree, rtx);
+
+/* Returns true iff CAND and BASE have equivalent language-specific
+ qualifiers. */
+
+extern bool check_lang_type (const_tree cand, const_tree base);
+
+/* Returns true iff unqualified CAND and BASE are equivalent. */
+
+extern bool check_base_type (const_tree cand, const_tree base);
+
+/* Check whether CAND is suitable to be returned from get_qualified_type
+ (BASE, TYPE_QUALS). */
+
+extern bool check_qualified_type (const_tree, const_tree, int);
+
+/* Return a version of the TYPE, qualified as indicated by the
+ TYPE_QUALS, if one exists. If no qualified version exists yet,
+ return NULL_TREE. */
+
+extern tree get_qualified_type (tree, int);
+
+/* Like get_qualified_type, but creates the type if it does not
+ exist. This function never returns NULL_TREE. */
+
+extern tree build_qualified_type (tree, int CXX_MEM_STAT_INFO);
+
+/* Create a variant of type T with alignment ALIGN. */
+
+extern tree build_aligned_type (tree, unsigned int);
+
+/* Like build_qualified_type, but only deals with the `const' and
+ `volatile' qualifiers. This interface is retained for backwards
+ compatibility with the various front-ends; new code should use
+ build_qualified_type instead. */
+
+#define build_type_variant(TYPE, CONST_P, VOLATILE_P) \
+ build_qualified_type ((TYPE), \
+ ((CONST_P) ? TYPE_QUAL_CONST : 0) \
+ | ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0))
+
+/* Make a copy of a type node. */
+
+extern tree build_distinct_type_copy (tree CXX_MEM_STAT_INFO);
+extern tree build_variant_type_copy (tree CXX_MEM_STAT_INFO);
+
+/* Given a hashcode and a ..._TYPE node (for which the hashcode was made),
+ return a canonicalized ..._TYPE node, so that duplicates are not made.
+ How the hash code is computed is up to the caller, as long as any two
+ callers that could hash identical-looking type nodes agree. */
+
+extern hashval_t type_hash_canon_hash (tree);
+extern tree type_hash_canon (unsigned int, tree);
+
+extern tree convert (tree, tree);
+extern tree size_in_bytes_loc (location_t, const_tree);
+inline tree
+size_in_bytes (const_tree t)
+{
+ return size_in_bytes_loc (input_location, t);
+}
+
+extern HOST_WIDE_INT int_size_in_bytes (const_tree);
+extern HOST_WIDE_INT max_int_size_in_bytes (const_tree);
+extern tree bit_position (const_tree);
+extern tree byte_position (const_tree);
+extern HOST_WIDE_INT int_byte_position (const_tree);
+
+/* Type for sizes of data-type. */
+
+#define sizetype sizetype_tab[(int) stk_sizetype]
+#define bitsizetype sizetype_tab[(int) stk_bitsizetype]
+#define ssizetype sizetype_tab[(int) stk_ssizetype]
+#define sbitsizetype sizetype_tab[(int) stk_sbitsizetype]
+#define size_int(L) size_int_kind (L, stk_sizetype)
+#define ssize_int(L) size_int_kind (L, stk_ssizetype)
+#define bitsize_int(L) size_int_kind (L, stk_bitsizetype)
+#define sbitsize_int(L) size_int_kind (L, stk_sbitsizetype)
+
+/* Log2 of BITS_PER_UNIT. */
+
+#if BITS_PER_UNIT == 8
+#define LOG2_BITS_PER_UNIT 3
+#elif BITS_PER_UNIT == 16
+#define LOG2_BITS_PER_UNIT 4
+#else
+#error Unknown BITS_PER_UNIT
+#endif
+
+/* Concatenate two lists (chains of TREE_LIST nodes) X and Y
+ by making the last node in X point to Y.
+ Returns X, except if X is 0 returns Y. */
+
+extern tree chainon (tree, tree);
+
+/* Make a new TREE_LIST node from specified PURPOSE, VALUE and CHAIN. */
+
+extern tree tree_cons (tree, tree, tree CXX_MEM_STAT_INFO);
+
+/* Return the last tree node in a chain. */
+
+extern tree tree_last (tree);
+
+/* Reverse the order of elements in a chain, and return the new head. */
+
+extern tree nreverse (tree);
+
+/* Returns the length of a chain of nodes
+ (number of chain pointers to follow before reaching a null pointer). */
+
+extern int list_length (const_tree);
+
+/* Returns the first/last FIELD_DECL in a RECORD_TYPE. */
+
+extern tree first_field (const_tree) ATTRIBUTE_NONNULL (1);
+extern tree last_field (const_tree) ATTRIBUTE_NONNULL (1);
+
+/* Given an initializer INIT, return TRUE if INIT is zero or some
+ aggregate of zeros. Otherwise return FALSE. If NONZERO is not
+ null, set *NONZERO if and only if INIT is known not to be all
+ zeros. The combination of return value of false and *NONZERO
+ false implies that INIT may but need not be all zeros. Other
+ combinations indicate definitive answers. */
+
+extern bool initializer_zerop (const_tree, bool * = NULL);
+extern bool initializer_each_zero_or_onep (const_tree);
+
+extern tree vector_cst_elt (const_tree, unsigned int);
+
+/* Given a vector VEC, return its first element if all elements are
+ the same. Otherwise return NULL_TREE. */
+
+extern tree uniform_vector_p (const_tree);
+
+/* If the argument is INTEGER_CST, return it. If the argument is vector
+ with all elements the same INTEGER_CST, return that INTEGER_CST. Otherwise
+ return NULL_TREE. */
+
+extern tree uniform_integer_cst_p (tree);
+
+extern int single_nonzero_element (const_tree);
+
+/* Given a CONSTRUCTOR CTOR, return the element values as a vector. */
+
+extern vec<tree, va_gc> *ctor_to_vec (tree);
+
+/* zerop (tree x) is nonzero if X is a constant of value 0. */
+
+extern bool zerop (const_tree);
+
+/* integer_zerop (tree x) is nonzero if X is an integer constant of value 0. */
+
+extern bool integer_zerop (const_tree);
+
+/* integer_onep (tree x) is nonzero if X is an integer constant of value 1. */
+
+extern bool integer_onep (const_tree);
+
+/* integer_onep (tree x) is nonzero if X is an integer constant of value 1, or
+ a vector or complex where each part is 1. */
+
+extern bool integer_each_onep (const_tree);
+
+/* integer_all_onesp (tree x) is nonzero if X is an integer constant
+ all of whose significant bits are 1. */
+
+extern bool integer_all_onesp (const_tree);
+
+/* integer_minus_onep (tree x) is nonzero if X is an integer constant of
+ value -1. */
+
+extern bool integer_minus_onep (const_tree);
+
+/* integer_pow2p (tree x) is nonzero is X is an integer constant with
+ exactly one bit 1. */
+
+extern bool integer_pow2p (const_tree);
+
+/* Checks to see if T is a constant or a constant vector and if each element E
+ adheres to ~E + 1 == pow2 then return ~E otherwise NULL_TREE. */
+
+extern tree bitmask_inv_cst_vector_p (tree);
+
+/* integer_nonzerop (tree x) is nonzero if X is an integer constant
+ with a nonzero value. */
+
+extern bool integer_nonzerop (const_tree);
+
+/* integer_truep (tree x) is nonzero if X is an integer constant of value 1 or
+ a vector where each element is an integer constant of value -1. */
+
+extern bool integer_truep (const_tree);
+
+extern bool cst_and_fits_in_hwi (const_tree);
+extern tree num_ending_zeros (const_tree);
+
+/* fixed_zerop (tree x) is nonzero if X is a fixed-point constant of
+ value 0. */
+
+extern bool fixed_zerop (const_tree);
+
+/* staticp (tree x) is nonzero if X is a reference to data allocated
+ at a fixed address in memory. Returns the outermost data. */
+
+extern tree staticp (tree);
+
+/* save_expr (EXP) returns an expression equivalent to EXP
+ but it can be used multiple times within context CTX
+ and only evaluate EXP once. */
+
+extern tree save_expr (tree);
+
+/* Return true if T is function-invariant. */
+
+extern bool tree_invariant_p (tree);
+
+/* Look inside EXPR into any simple arithmetic operations. Return the
+ outermost non-arithmetic or non-invariant node. */
+
+extern tree skip_simple_arithmetic (tree);
+
+/* Look inside EXPR into simple arithmetic operations involving constants.
+ Return the outermost non-arithmetic or non-constant node. */
+
+extern tree skip_simple_constant_arithmetic (tree);
+
+/* Return which tree structure is used by T. */
+
+enum tree_node_structure_enum tree_node_structure (const_tree);
+
+/* Return true if EXP contains a PLACEHOLDER_EXPR, i.e. if it represents a
+ size or offset that depends on a field within a record. */
+
+extern bool contains_placeholder_p (const_tree);
+
+/* This macro calls the above function but short-circuits the common
+ case of a constant to save time. Also check for null. */
+
+#define CONTAINS_PLACEHOLDER_P(EXP) \
+ ((EXP) != 0 && ! TREE_CONSTANT (EXP) && contains_placeholder_p (EXP))
+
+/* Return true if any part of the structure of TYPE involves a PLACEHOLDER_EXPR
+ directly. This includes size, bounds, qualifiers (for QUAL_UNION_TYPE) and
+ field positions. */
+
+extern bool type_contains_placeholder_p (tree);
+
+/* Given a tree EXP, find all occurrences of references to fields
+ in a PLACEHOLDER_EXPR and place them in vector REFS without
+ duplicates. Also record VAR_DECLs and CONST_DECLs. Note that
+ we assume here that EXP contains only arithmetic expressions
+ or CALL_EXPRs with PLACEHOLDER_EXPRs occurring only in their
+ argument list. */
+
+extern void find_placeholder_in_expr (tree, vec<tree> *);
+
+/* This macro calls the above function but short-circuits the common
+ case of a constant to save time and also checks for NULL. */
+
+#define FIND_PLACEHOLDER_IN_EXPR(EXP, V) \
+do { \
+ if((EXP) && !TREE_CONSTANT (EXP)) \
+ find_placeholder_in_expr (EXP, V); \
+} while (0)
+
+/* Given a tree EXP, a FIELD_DECL F, and a replacement value R,
+ return a tree with all occurrences of references to F in a
+ PLACEHOLDER_EXPR replaced by R. Also handle VAR_DECLs and
+ CONST_DECLs. Note that we assume here that EXP contains only
+ arithmetic expressions or CALL_EXPRs with PLACEHOLDER_EXPRs
+ occurring only in their argument list. */
+
+extern tree substitute_in_expr (tree, tree, tree);
+
+/* This macro calls the above function but short-circuits the common
+ case of a constant to save time and also checks for NULL. */
+
+#define SUBSTITUTE_IN_EXPR(EXP, F, R) \
+ ((EXP) == 0 || TREE_CONSTANT (EXP) ? (EXP) : substitute_in_expr (EXP, F, R))
+
+/* Similar, but look for a PLACEHOLDER_EXPR in EXP and find a replacement
+ for it within OBJ, a tree that is an object or a chain of references. */
+
+extern tree substitute_placeholder_in_expr (tree, tree);
+
+/* This macro calls the above function but short-circuits the common
+ case of a constant to save time and also checks for NULL. */
+
+#define SUBSTITUTE_PLACEHOLDER_IN_EXPR(EXP, OBJ) \
+ ((EXP) == 0 || TREE_CONSTANT (EXP) ? (EXP) \
+ : substitute_placeholder_in_expr (EXP, OBJ))
+
+
+/* stabilize_reference (EXP) returns a reference equivalent to EXP
+ but it can be used multiple times
+ and only evaluate the subexpressions once. */
+
+extern tree stabilize_reference (tree);
+
+/* Return EXP, stripped of any conversions to wider types
+ in such a way that the result of converting to type FOR_TYPE
+ is the same as if EXP were converted to FOR_TYPE.
+ If FOR_TYPE is 0, it signifies EXP's type. */
+
+extern tree get_unwidened (tree, tree);
+
+/* Return OP or a simpler expression for a narrower value
+ which can be sign-extended or zero-extended to give back OP.
+ Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended
+ or 0 if the value should be sign-extended. */
+
+extern tree get_narrower (tree, int *);
+
+/* Return true if T is an expression that get_inner_reference handles. */
+
+inline bool
+handled_component_p (const_tree t)
+{
+ switch (TREE_CODE (t))
+ {
+ case COMPONENT_REF:
+ case BIT_FIELD_REF:
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Return true T is a component with reverse storage order. */
+
+inline bool
+reverse_storage_order_for_component_p (tree t)
+{
+ /* The storage order only applies to scalar components. */
+ if (AGGREGATE_TYPE_P (TREE_TYPE (t))
+ || POINTER_TYPE_P (TREE_TYPE (t))
+ || VECTOR_TYPE_P (TREE_TYPE (t)))
+ return false;
+
+ if (TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR)
+ t = TREE_OPERAND (t, 0);
+
+ switch (TREE_CODE (t))
+ {
+ case ARRAY_REF:
+ case COMPONENT_REF:
+ /* ??? Fortran can take COMPONENT_REF of a VOID_TYPE. */
+ /* ??? UBSan can take COMPONENT_REF of a REFERENCE_TYPE. */
+ return AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (TREE_OPERAND (t, 0)));
+
+ case BIT_FIELD_REF:
+ case MEM_REF:
+ return REF_REVERSE_STORAGE_ORDER (t);
+
+ case ARRAY_RANGE_REF:
+ case VIEW_CONVERT_EXPR:
+ default:
+ return false;
+ }
+}
+
+/* Return true if T is a storage order barrier, i.e. a VIEW_CONVERT_EXPR
+ that can modify the storage order of objects. Note that, even if the
+ TYPE_REVERSE_STORAGE_ORDER flag is set on both the inner type and the
+ outer type, a VIEW_CONVERT_EXPR can modify the storage order because
+ it can change the partition of the aggregate object into scalars. */
+
+inline bool
+storage_order_barrier_p (const_tree t)
+{
+ if (TREE_CODE (t) != VIEW_CONVERT_EXPR)
+ return false;
+
+ if (AGGREGATE_TYPE_P (TREE_TYPE (t))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (t)))
+ return true;
+
+ tree op = TREE_OPERAND (t, 0);
+
+ if (AGGREGATE_TYPE_P (TREE_TYPE (op))
+ && TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (op)))
+ return true;
+
+ return false;
+}
+
+/* Given a DECL or TYPE, return the scope in which it was declared, or
+ NUL_TREE if there is no containing scope. */
+
+extern tree get_containing_scope (const_tree);
+
+/* Returns the ultimate TRANSLATION_UNIT_DECL context of DECL or NULL. */
+
+extern const_tree get_ultimate_context (const_tree);
+
+/* Return the FUNCTION_DECL which provides this _DECL with its context,
+ or zero if none. */
+extern tree decl_function_context (const_tree);
+
+/* Return the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE which provides
+ this _DECL with its context, or zero if none. */
+extern tree decl_type_context (const_tree);
+
+/* Return true if EXPR is the real constant zero. */
+extern bool real_zerop (const_tree);
+
+/* Initialize the iterator I with arguments from function FNDECL */
+
+inline void
+function_args_iter_init (function_args_iterator *i, const_tree fntype)
+{
+ i->next = TYPE_ARG_TYPES (fntype);
+}
+
+/* Return a pointer that holds the next argument if there are more arguments to
+ handle, otherwise return NULL. */
+
+inline tree *
+function_args_iter_cond_ptr (function_args_iterator *i)
+{
+ return (i->next) ? &TREE_VALUE (i->next) : NULL;
+}
+
+/* Return the next argument if there are more arguments to handle, otherwise
+ return NULL. */
+
+inline tree
+function_args_iter_cond (function_args_iterator *i)
+{
+ return (i->next) ? TREE_VALUE (i->next) : NULL_TREE;
+}
+
+/* Advance to the next argument. */
+inline void
+function_args_iter_next (function_args_iterator *i)
+{
+ gcc_assert (i->next != NULL_TREE);
+ i->next = TREE_CHAIN (i->next);
+}
+
+/* Returns true if a BLOCK has a source location.
+ BLOCK_SOURCE_LOCATION is set only to inlined function entry points,
+ so the function returns true for all but the innermost and outermost
+ blocks into which an expression has been inlined. */
+
+inline bool
+inlined_function_outer_scope_p (const_tree block)
+{
+ return LOCATION_LOCUS (BLOCK_SOURCE_LOCATION (block)) != UNKNOWN_LOCATION;
+}
+
+/* Loop over all function arguments of FNTYPE. In each iteration, PTR is set
+ to point to the next tree element. ITER is an instance of
+ function_args_iterator used to iterate the arguments. */
+#define FOREACH_FUNCTION_ARGS_PTR(FNTYPE, PTR, ITER) \
+ for (function_args_iter_init (&(ITER), (FNTYPE)); \
+ (PTR = function_args_iter_cond_ptr (&(ITER))) != NULL; \
+ function_args_iter_next (&(ITER)))
+
+/* Loop over all function arguments of FNTYPE. In each iteration, TREE is set
+ to the next tree element. ITER is an instance of function_args_iterator
+ used to iterate the arguments. */
+#define FOREACH_FUNCTION_ARGS(FNTYPE, TREE, ITER) \
+ for (function_args_iter_init (&(ITER), (FNTYPE)); \
+ (TREE = function_args_iter_cond (&(ITER))) != NULL_TREE; \
+ function_args_iter_next (&(ITER)))
+
+/* In tree.cc */
+extern unsigned crc32_unsigned_n (unsigned, unsigned, unsigned);
+extern unsigned crc32_string (unsigned, const char *);
+inline unsigned
+crc32_unsigned (unsigned chksum, unsigned value)
+{
+ return crc32_unsigned_n (chksum, value, 4);
+}
+inline unsigned
+crc32_byte (unsigned chksum, char byte)
+{
+ return crc32_unsigned_n (chksum, byte, 1);
+}
+extern void clean_symbol_name (char *);
+extern tree get_file_function_name (const char *);
+extern tree get_callee_fndecl (const_tree);
+extern combined_fn get_call_combined_fn (const_tree);
+extern int type_num_arguments (const_tree);
+extern tree type_argument_type (const_tree, unsigned) ATTRIBUTE_NONNULL (1);
+extern bool associative_tree_code (enum tree_code);
+extern bool commutative_tree_code (enum tree_code);
+extern bool commutative_ternary_tree_code (enum tree_code);
+extern bool operation_can_overflow (enum tree_code);
+extern bool operation_no_trapping_overflow (tree, enum tree_code);
+extern tree upper_bound_in_type (tree, tree);
+extern tree lower_bound_in_type (tree, tree);
+extern int operand_equal_for_phi_arg_p (const_tree, const_tree);
+extern tree create_artificial_label (location_t);
+extern const char *get_name (tree);
+extern bool stdarg_p (const_tree);
+extern bool prototype_p (const_tree);
+extern bool is_typedef_decl (const_tree x);
+extern bool typedef_variant_p (const_tree);
+extern bool auto_var_p (const_tree);
+extern bool auto_var_in_fn_p (const_tree, const_tree);
+extern tree build_low_bits_mask (tree, unsigned);
+extern bool tree_nop_conversion_p (const_tree, const_tree);
+extern tree tree_strip_nop_conversions (tree);
+extern tree tree_strip_sign_nop_conversions (tree);
+extern const_tree strip_invariant_refs (const_tree);
+extern tree strip_zero_offset_components (tree);
+extern tree lhd_gcc_personality (void);
+extern void assign_assembler_name_if_needed (tree);
+extern bool warn_deprecated_use (tree, tree);
+extern void error_unavailable_use (tree, tree);
+extern tree cache_integer_cst (tree, bool might_duplicate = false);
+extern const char *combined_fn_name (combined_fn);
+
+/* Compare and hash for any structure which begins with a canonical
+ pointer. Assumes all pointers are interchangeable, which is sort
+ of already assumed by gcc elsewhere IIRC. */
+
+inline int
+struct_ptr_eq (const void *a, const void *b)
+{
+ const void * const * x = (const void * const *) a;
+ const void * const * y = (const void * const *) b;
+ return *x == *y;
+}
+
+inline hashval_t
+struct_ptr_hash (const void *a)
+{
+ const void * const * x = (const void * const *) a;
+ return (intptr_t)*x >> 4;
+}
+
+/* Return nonzero if CODE is a tree code that represents a truth value. */
+inline bool
+truth_value_p (enum tree_code code)
+{
+ return (TREE_CODE_CLASS (code) == tcc_comparison
+ || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
+ || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
+ || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
+}
+
+/* Return whether TYPE is a type suitable for an offset for
+ a POINTER_PLUS_EXPR. */
+inline bool
+ptrofftype_p (tree type)
+{
+ return (INTEGRAL_TYPE_P (type)
+ && TYPE_PRECISION (type) == TYPE_PRECISION (sizetype)
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
+}
+
+/* Return true if the argument is a complete type or an array
+ of unknown bound (whose type is incomplete but) whose elements
+ have complete type. */
+inline bool
+complete_or_array_type_p (const_tree type)
+{
+ return COMPLETE_TYPE_P (type)
+ || (TREE_CODE (type) == ARRAY_TYPE
+ && COMPLETE_TYPE_P (TREE_TYPE (type)));
+}
+
+/* Return true if the value of T could be represented as a poly_widest_int. */
+
+inline bool
+poly_int_tree_p (const_tree t)
+{
+ return (TREE_CODE (t) == INTEGER_CST || POLY_INT_CST_P (t));
+}
+
+/* Return the bit size of BIT_FIELD_REF T, in cases where it is known
+ to be a poly_uint64. (This is always true at the gimple level.) */
+
+inline poly_uint64
+bit_field_size (const_tree t)
+{
+ return tree_to_poly_uint64 (TREE_OPERAND (t, 1));
+}
+
+/* Return the starting bit offset of BIT_FIELD_REF T, in cases where it is
+ known to be a poly_uint64. (This is always true at the gimple level.) */
+
+inline poly_uint64
+bit_field_offset (const_tree t)
+{
+ return tree_to_poly_uint64 (TREE_OPERAND (t, 2));
+}
+
+extern tree strip_float_extensions (tree);
+extern bool really_constant_p (const_tree);
+extern bool ptrdiff_tree_p (const_tree, poly_int64_pod *);
+extern bool decl_address_invariant_p (const_tree);
+extern bool decl_address_ip_invariant_p (const_tree);
+extern bool int_fits_type_p (const_tree, const_tree)
+ ATTRIBUTE_NONNULL (1) ATTRIBUTE_NONNULL (2) ATTRIBUTE_PURE;
+#ifndef GENERATOR_FILE
+extern void get_type_static_bounds (const_tree, mpz_t, mpz_t);
+#endif
+extern bool variably_modified_type_p (tree, tree);
+extern int tree_log2 (const_tree);
+extern int tree_floor_log2 (const_tree);
+extern unsigned int tree_ctz (const_tree);
+extern int simple_cst_equal (const_tree, const_tree);
+
+namespace inchash
+{
+
+extern void add_expr (const_tree, hash &, unsigned int = 0);
+
+}
+
+/* Compat version until all callers are converted. Return hash for
+ TREE with SEED. */
+inline hashval_t iterative_hash_expr(const_tree tree, hashval_t seed)
+{
+ inchash::hash hstate (seed);
+ inchash::add_expr (tree, hstate);
+ return hstate.end ();
+}
+
+extern int compare_tree_int (const_tree, unsigned HOST_WIDE_INT);
+extern bool type_list_equal (const_tree, const_tree);
+extern bool chain_member (const_tree, const_tree);
+extern void dump_tree_statistics (void);
+extern void recompute_tree_invariant_for_addr_expr (tree);
+extern bool needs_to_live_in_memory (const_tree);
+extern tree reconstruct_complex_type (tree, tree);
+extern bool real_onep (const_tree);
+extern bool real_minus_onep (const_tree);
+extern bool real_maybe_zerop (const_tree);
+extern void init_ttree (void);
+extern void build_common_tree_nodes (bool);
+extern void build_common_builtin_nodes (void);
+extern void tree_cc_finalize (void);
+extern tree build_nonstandard_integer_type (unsigned HOST_WIDE_INT, int);
+extern tree build_nonstandard_boolean_type (unsigned HOST_WIDE_INT);
+extern tree build_range_type (tree, tree, tree);
+extern tree build_nonshared_range_type (tree, tree, tree);
+extern bool subrange_type_for_debug_p (const_tree, tree *, tree *);
+extern HOST_WIDE_INT int_cst_value (const_tree);
+extern tree tree_block (tree);
+extern void tree_set_block (tree, tree);
+extern location_t *block_nonartificial_location (tree);
+extern location_t tree_nonartificial_location (tree);
+extern location_t tree_inlined_location (tree, bool = true);
+extern tree block_ultimate_origin (const_tree);
+extern tree get_binfo_at_offset (tree, poly_int64, tree);
+extern bool virtual_method_call_p (const_tree, bool = false);
+extern tree obj_type_ref_class (const_tree ref, bool = false);
+extern bool types_same_for_odr (const_tree type1, const_tree type2);
+extern bool contains_bitfld_component_ref_p (const_tree);
+extern bool block_may_fallthru (const_tree);
+extern void using_eh_for_cleanups (void);
+extern bool using_eh_for_cleanups_p (void);
+extern const char *get_tree_code_name (enum tree_code);
+extern void set_call_expr_flags (tree, int);
+extern tree walk_tree_1 (tree*, walk_tree_fn, void*, hash_set<tree>*,
+ walk_tree_lh);
+extern tree walk_tree_without_duplicates_1 (tree*, walk_tree_fn, void*,
+ walk_tree_lh);
+#define walk_tree(a,b,c,d) \
+ walk_tree_1 (a, b, c, d, NULL)
+#define walk_tree_without_duplicates(a,b,c) \
+ walk_tree_without_duplicates_1 (a, b, c, NULL)
+
+extern tree drop_tree_overflow (tree);
+
+/* Given a memory reference expression T, return its base address.
+ The base address of a memory reference expression is the main
+ object being referenced. */
+extern tree get_base_address (tree t);
+
+/* Return a tree of sizetype representing the size, in bytes, of the element
+ of EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
+extern tree array_ref_element_size (tree);
+
+/* Return a typenode for the "standard" C type with a given name. */
+extern tree get_typenode_from_name (const char *);
+
+/* Return a tree representing the upper bound of the array mentioned in
+ EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
+extern tree array_ref_up_bound (tree);
+
+/* Return a tree representing the lower bound of the array mentioned in
+ EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
+extern tree array_ref_low_bound (tree);
+
+/* Returns true if REF is an array reference, a component reference,
+ or a memory reference to an array whose actual size might be larger
+ than its upper bound implies. */
+extern bool array_ref_flexible_size_p (tree, bool * = NULL);
+
+/* Return a tree representing the offset, in bytes, of the field referenced
+ by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */
+extern tree component_ref_field_offset (tree);
+
+/* Describes a "special" array member for a COMPONENT_REF. */
+enum struct special_array_member
+ {
+ none, /* Not a special array member. */
+ int_0, /* Interior array member with zero elements. */
+ trail_0, /* Trailing array member with zero elements. */
+ trail_1, /* Trailing array member with one element. */
+ trail_n, /* Trailing array member with two or more elements. */
+ int_n /* Interior array member with one or more elements. */
+ };
+
+/* Determines the special array member type for a COMPONENT_REF. */
+extern special_array_member component_ref_sam_type (tree);
+
+/* Return the size of the member referenced by the COMPONENT_REF, using
+ its initializer expression if necessary in order to determine the size
+ of an initialized flexible array member. The size might be zero for
+ an object with an uninitialized flexible array member or null if it
+ cannot be determined. */
+extern tree component_ref_size (tree, special_array_member * = NULL);
+
+extern int tree_map_base_eq (const void *, const void *);
+extern unsigned int tree_map_base_hash (const void *);
+extern int tree_map_base_marked_p (const void *);
+extern void DEBUG_FUNCTION verify_type (const_tree t);
+extern bool gimple_canonical_types_compatible_p (const_tree, const_tree,
+ bool trust_type_canonical = true);
+extern bool type_with_interoperable_signedness (const_tree);
+extern bitmap get_nonnull_args (const_tree);
+extern int get_range_pos_neg (tree);
+
+/* Return true for a valid pair of new and delete operators. */
+extern bool valid_new_delete_pair_p (tree, tree, bool * = NULL);
+
+/* Return simplified tree code of type that is used for canonical type
+ merging. */
+inline enum tree_code
+tree_code_for_canonical_type_merging (enum tree_code code)
+{
+ /* By C standard, each enumerated type shall be compatible with char,
+ a signed integer, or an unsigned integer. The choice of type is
+ implementation defined (in our case it depends on -fshort-enum).
+
+ For this reason we make no distinction between ENUMERAL_TYPE and INTEGER
+ type and compare only by their signedness and precision. */
+ if (code == ENUMERAL_TYPE)
+ return INTEGER_TYPE;
+ /* To allow inter-operability between languages having references and
+ C, we consider reference types and pointers alike. Note that this is
+ not strictly necessary for C-Fortran 2008 interoperability because
+ Fortran define C_PTR type that needs to be compatible with C pointers
+ and we handle this one as ptr_type_node. */
+ if (code == REFERENCE_TYPE)
+ return POINTER_TYPE;
+ return code;
+}
+
+/* Return ture if get_alias_set care about TYPE_CANONICAL of given type.
+ We don't define the types for pointers, arrays and vectors. The reason is
+ that pointers are handled specially: ptr_type_node accesses conflict with
+ accesses to all other pointers. This is done by alias.cc.
+ Because alias sets of arrays and vectors are the same as types of their
+ elements, we can't compute canonical type either. Otherwise we could go
+ form void *[10] to int *[10] (because they are equivalent for canonical type
+ machinery) and get wrong TBAA. */
+
+inline bool
+canonical_type_used_p (const_tree t)
+{
+ return !(POINTER_TYPE_P (t)
+ || TREE_CODE (t) == ARRAY_TYPE
+ || TREE_CODE (t) == VECTOR_TYPE);
+}
+
+/* Kinds of access to pass-by-reference arguments to functions. */
+enum access_mode
+{
+ access_none = 0,
+ access_read_only = 1,
+ access_write_only = 2,
+ access_read_write = access_read_only | access_write_only,
+ access_deferred = 4
+};
+
+#define tree_map_eq tree_map_base_eq
+extern unsigned int tree_map_hash (const void *);
+#define tree_map_marked_p tree_map_base_marked_p
+
+#define tree_decl_map_eq tree_map_base_eq
+extern unsigned int tree_decl_map_hash (const void *);
+#define tree_decl_map_marked_p tree_map_base_marked_p
+
+struct tree_decl_map_cache_hasher : ggc_cache_ptr_hash<tree_decl_map>
+{
+ static hashval_t hash (tree_decl_map *m) { return tree_decl_map_hash (m); }
+ static bool
+ equal (tree_decl_map *a, tree_decl_map *b)
+ {
+ return tree_decl_map_eq (a, b);
+ }
+
+ static int
+ keep_cache_entry (tree_decl_map *&m)
+ {
+ return ggc_marked_p (m->base.from);
+ }
+};
+
+#define tree_int_map_eq tree_map_base_eq
+#define tree_int_map_hash tree_map_base_hash
+#define tree_int_map_marked_p tree_map_base_marked_p
+
+#define tree_vec_map_eq tree_map_base_eq
+#define tree_vec_map_hash tree_decl_map_hash
+#define tree_vec_map_marked_p tree_map_base_marked_p
+
+struct tree_vec_map_cache_hasher : ggc_cache_ptr_hash<tree_vec_map>
+{
+ static hashval_t hash (tree_vec_map *m) { return DECL_UID (m->base.from); }
+
+ static bool
+ equal (tree_vec_map *a, tree_vec_map *b)
+ {
+ return a->base.from == b->base.from;
+ }
+
+ static int
+ keep_cache_entry (tree_vec_map *&m)
+ {
+ return ggc_marked_p (m->base.from);
+ }
+};
+
+/* Hasher for tree decls. Pointer equality is enough here, but the DECL_UID
+ is a better hash than the pointer value and gives a predictable traversal
+ order. Additionally it can be used across PCH save/restore. */
+struct tree_decl_hash : ggc_ptr_hash <tree_node>
+{
+ static inline hashval_t hash (tree);
+};
+
+inline hashval_t
+tree_decl_hash::hash (tree t)
+{
+ return DECL_UID (t);
+}
+
+/* Similarly for types. Uses TYPE_UID as hash function. */
+struct tree_type_hash : ggc_ptr_hash <tree_node>
+{
+ static inline hashval_t hash (tree);
+};
+
+inline hashval_t
+tree_type_hash::hash (tree t)
+{
+ return TYPE_UID (t);
+}
+
+/* Hash for SSA_NAMEs in the same function. Pointer equality is enough
+ here, but the SSA_NAME_VERSION is a better hash than the pointer
+ value and gives a predictable traversal order. */
+struct tree_ssa_name_hash : ggc_ptr_hash <tree_node>
+{
+ static inline hashval_t hash (tree);
+};
+
+inline hashval_t
+tree_ssa_name_hash::hash (tree t)
+{
+ return SSA_NAME_VERSION (t);
+}
+
+/* Hasher for general trees, based on their TREE_HASH. */
+struct tree_hash : ggc_ptr_hash <tree_node>
+{
+ static hashval_t hash (tree);
+};
+
+inline hashval_t
+tree_hash::hash (tree t)
+{
+ return TREE_HASH (t);
+}
+
+/* A hash_map of two trees for use with GTY((cache)). Garbage collection for
+ such a map will not mark keys, and will mark values if the key is already
+ marked. */
+struct tree_cache_traits
+ : simple_cache_map_traits<default_hash_traits<tree>, tree> { };
+typedef hash_map<tree,tree,tree_cache_traits> tree_cache_map;
+
+/* Similarly, but use DECL_UID as hash function rather than pointer hashing.
+ This is for hash_maps from decls to trees that need to work across PCH. */
+struct decl_tree_cache_traits
+ : simple_cache_map_traits<tree_decl_hash, tree> { };
+typedef hash_map<tree,tree,decl_tree_cache_traits> decl_tree_cache_map;
+
+/* Similarly, but use TYPE_UID as hash function rather than pointer hashing.
+ This is for hash_maps from types to trees that need to work across PCH. */
+struct type_tree_cache_traits
+ : simple_cache_map_traits<tree_type_hash, tree> { };
+typedef hash_map<tree,tree,type_tree_cache_traits> type_tree_cache_map;
+
+/* Similarly to decl_tree_cache_map, but without caching. */
+struct decl_tree_traits
+ : simple_hashmap_traits<tree_decl_hash, tree> { };
+typedef hash_map<tree,tree,decl_tree_traits> decl_tree_map;
+
+/* Initialize the abstract argument list iterator object ITER with the
+ arguments from CALL_EXPR node EXP. */
+inline void
+init_call_expr_arg_iterator (tree exp, call_expr_arg_iterator *iter)
+{
+ iter->t = exp;
+ iter->n = call_expr_nargs (exp);
+ iter->i = 0;
+}
+
+inline void
+init_const_call_expr_arg_iterator (const_tree exp, const_call_expr_arg_iterator *iter)
+{
+ iter->t = exp;
+ iter->n = call_expr_nargs (exp);
+ iter->i = 0;
+}
+
+/* Return the next argument from abstract argument list iterator object ITER,
+ and advance its state. Return NULL_TREE if there are no more arguments. */
+inline tree
+next_call_expr_arg (call_expr_arg_iterator *iter)
+{
+ tree result;
+ if (iter->i >= iter->n)
+ return NULL_TREE;
+ result = CALL_EXPR_ARG (iter->t, iter->i);
+ iter->i++;
+ return result;
+}
+
+inline const_tree
+next_const_call_expr_arg (const_call_expr_arg_iterator *iter)
+{
+ const_tree result;
+ if (iter->i >= iter->n)
+ return NULL_TREE;
+ result = CALL_EXPR_ARG (iter->t, iter->i);
+ iter->i++;
+ return result;
+}
+
+/* Initialize the abstract argument list iterator object ITER, then advance
+ past and return the first argument. Useful in for expressions, e.g.
+ for (arg = first_call_expr_arg (exp, &iter); arg;
+ arg = next_call_expr_arg (&iter)) */
+inline tree
+first_call_expr_arg (tree exp, call_expr_arg_iterator *iter)
+{
+ init_call_expr_arg_iterator (exp, iter);
+ return next_call_expr_arg (iter);
+}
+
+inline const_tree
+first_const_call_expr_arg (const_tree exp, const_call_expr_arg_iterator *iter)
+{
+ init_const_call_expr_arg_iterator (exp, iter);
+ return next_const_call_expr_arg (iter);
+}
+
+/* Test whether there are more arguments in abstract argument list iterator
+ ITER, without changing its state. */
+inline bool
+more_call_expr_args_p (const call_expr_arg_iterator *iter)
+{
+ return (iter->i < iter->n);
+}
+
+/* Iterate through each argument ARG of CALL_EXPR CALL, using variable ITER
+ (of type call_expr_arg_iterator) to hold the iteration state. */
+#define FOR_EACH_CALL_EXPR_ARG(arg, iter, call) \
+ for ((arg) = first_call_expr_arg ((call), &(iter)); (arg); \
+ (arg) = next_call_expr_arg (&(iter)))
+
+#define FOR_EACH_CONST_CALL_EXPR_ARG(arg, iter, call) \
+ for ((arg) = first_const_call_expr_arg ((call), &(iter)); (arg); \
+ (arg) = next_const_call_expr_arg (&(iter)))
+
+/* Return true if tree node T is a language-specific node. */
+inline bool
+is_lang_specific (const_tree t)
+{
+ return TREE_CODE (t) == LANG_TYPE || TREE_CODE (t) >= NUM_TREE_CODES;
+}
+
+/* Valid builtin number. */
+#define BUILTIN_VALID_P(FNCODE) \
+ (IN_RANGE ((int)FNCODE, ((int)BUILT_IN_NONE) + 1, ((int) END_BUILTINS) - 1))
+
+/* Obtain a pointer to the identifier string holding the asm name for
+ BUILTIN, a BUILT_IN code. This is handy if the target
+ mangles/overrides the function name that implements the
+ builtin. */
+#define BUILTIN_ASM_NAME_PTR(BUILTIN) \
+ (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (builtin_decl_explicit (BUILTIN))))
+
+/* Return the tree node for an explicit standard builtin function or NULL. */
+inline tree
+builtin_decl_explicit (enum built_in_function fncode)
+{
+ gcc_checking_assert (BUILTIN_VALID_P (fncode));
+
+ return builtin_info[(size_t)fncode].decl;
+}
+
+/* Return the tree node for an implicit builtin function or NULL. */
+inline tree
+builtin_decl_implicit (enum built_in_function fncode)
+{
+ size_t uns_fncode = (size_t)fncode;
+ gcc_checking_assert (BUILTIN_VALID_P (fncode));
+
+ if (!builtin_info[uns_fncode].implicit_p)
+ return NULL_TREE;
+
+ return builtin_info[uns_fncode].decl;
+}
+
+/* For BUILTIN_UNREACHABLE, use one of these or
+ gimple_build_builtin_unreachable instead of one of the above. */
+extern tree builtin_decl_unreachable ();
+extern tree build_builtin_unreachable (location_t);
+
+/* Set explicit builtin function nodes and whether it is an implicit
+ function. */
+
+inline void
+set_builtin_decl (enum built_in_function fncode, tree decl, bool implicit_p)
+{
+ size_t ufncode = (size_t)fncode;
+
+ gcc_checking_assert (BUILTIN_VALID_P (fncode)
+ && (decl != NULL_TREE || !implicit_p));
+
+ builtin_info[ufncode].decl = decl;
+ builtin_info[ufncode].implicit_p = implicit_p;
+ builtin_info[ufncode].declared_p = false;
+}
+
+/* Set the implicit flag for a builtin function. */
+
+inline void
+set_builtin_decl_implicit_p (enum built_in_function fncode, bool implicit_p)
+{
+ size_t uns_fncode = (size_t)fncode;
+
+ gcc_checking_assert (BUILTIN_VALID_P (fncode)
+ && builtin_info[uns_fncode].decl != NULL_TREE);
+
+ builtin_info[uns_fncode].implicit_p = implicit_p;
+}
+
+/* Set the declared flag for a builtin function. */
+
+inline void
+set_builtin_decl_declared_p (enum built_in_function fncode, bool declared_p)
+{
+ size_t uns_fncode = (size_t)fncode;
+
+ gcc_checking_assert (BUILTIN_VALID_P (fncode)
+ && builtin_info[uns_fncode].decl != NULL_TREE);
+
+ builtin_info[uns_fncode].declared_p = declared_p;
+}
+
+/* Return whether the standard builtin function can be used as an explicit
+ function. */
+
+inline bool
+builtin_decl_explicit_p (enum built_in_function fncode)
+{
+ gcc_checking_assert (BUILTIN_VALID_P (fncode));
+ return (builtin_info[(size_t)fncode].decl != NULL_TREE);
+}
+
+/* Return whether the standard builtin function can be used implicitly. */
+
+inline bool
+builtin_decl_implicit_p (enum built_in_function fncode)
+{
+ size_t uns_fncode = (size_t)fncode;
+
+ gcc_checking_assert (BUILTIN_VALID_P (fncode));
+ return (builtin_info[uns_fncode].decl != NULL_TREE
+ && builtin_info[uns_fncode].implicit_p);
+}
+
+/* Return whether the standard builtin function was declared. */
+
+inline bool
+builtin_decl_declared_p (enum built_in_function fncode)
+{
+ size_t uns_fncode = (size_t)fncode;
+
+ gcc_checking_assert (BUILTIN_VALID_P (fncode));
+ return (builtin_info[uns_fncode].decl != NULL_TREE
+ && builtin_info[uns_fncode].declared_p);
+}
+
+/* Determine if the function identified by FNDECL is one that
+ makes sense to match by name, for those places where we detect
+ "magic" functions by name.
+
+ Return true if FNDECL has a name and is an extern fndecl at file scope.
+ FNDECL must be a non-NULL decl.
+
+ Avoid using this, as it's generally better to use attributes rather
+ than to check for functions by name. */
+
+inline bool
+maybe_special_function_p (const_tree fndecl)
+{
+ tree name_decl = DECL_NAME (fndecl);
+ if (name_decl
+ /* Exclude functions not at the file scope, or not `extern',
+ since they are not the magic functions we would otherwise
+ think they are. */
+ && (DECL_CONTEXT (fndecl) == NULL_TREE
+ || TREE_CODE (DECL_CONTEXT (fndecl)) == TRANSLATION_UNIT_DECL)
+ && TREE_PUBLIC (fndecl))
+ return true;
+ return false;
+}
+
+/* Return true if T (assumed to be a DECL) is a global variable.
+ A variable is considered global if its storage is not automatic. */
+
+inline bool
+is_global_var (const_tree t)
+{
+ return (TREE_STATIC (t) || DECL_EXTERNAL (t));
+}
+
+/* Return true if VAR may be aliased. A variable is considered as
+ maybe aliased if it has its address taken by the local TU
+ or possibly by another TU and might be modified through a pointer. */
+
+inline bool
+may_be_aliased (const_tree var)
+{
+ return (TREE_CODE (var) != CONST_DECL
+ && (TREE_PUBLIC (var)
+ || DECL_EXTERNAL (var)
+ || TREE_ADDRESSABLE (var))
+ && !((TREE_STATIC (var) || TREE_PUBLIC (var) || DECL_EXTERNAL (var))
+ && (TREE_READONLY (var)
+ || (TREE_CODE (var) == VAR_DECL
+ && DECL_NONALIASED (var)))));
+}
+
+/* Return pointer to optimization flags of FNDECL. */
+inline struct cl_optimization *
+opts_for_fn (const_tree fndecl)
+{
+ tree fn_opts = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
+ if (fn_opts == NULL_TREE)
+ fn_opts = optimization_default_node;
+ return TREE_OPTIMIZATION (fn_opts);
+}
+
+/* Return pointer to target flags of FNDECL. */
+inline cl_target_option *
+target_opts_for_fn (const_tree fndecl)
+{
+ tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
+ if (fn_opts == NULL_TREE)
+ fn_opts = target_option_default_node;
+ return fn_opts == NULL_TREE ? NULL : TREE_TARGET_OPTION (fn_opts);
+}
+
+/* opt flag for function FNDECL, e.g. opts_for_fn (fndecl, optimize) is
+ the optimization level of function fndecl. */
+#define opt_for_fn(fndecl, opt) (opts_for_fn (fndecl)->x_##opt)
+
+/* For anonymous aggregate types, we need some sort of name to
+ hold on to. In practice, this should not appear, but it should
+ not be harmful if it does. Identifiers returned will be
+ IDENTIFIER_ANON_P. */
+extern tree make_anon_name ();
+
+/* The tree and const_tree overload templates. */
+namespace wi
+{
+ class unextended_tree
+ {
+ private:
+ const_tree m_t;
+
+ public:
+ unextended_tree () {}
+ unextended_tree (const_tree t) : m_t (t) {}
+
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ const_tree get_tree () const { return m_t; }
+ };
+
+ template <>
+ struct int_traits <unextended_tree>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = false;
+ };
+
+ template <int N>
+ class extended_tree
+ {
+ private:
+ const_tree m_t;
+
+ public:
+ extended_tree () {}
+ extended_tree (const_tree);
+
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ const_tree get_tree () const { return m_t; }
+ };
+
+ template <int N>
+ struct int_traits <extended_tree <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const unsigned int precision = N;
+ };
+
+ typedef extended_tree <WIDE_INT_MAX_PRECISION> widest_extended_tree;
+ typedef extended_tree <ADDR_MAX_PRECISION> offset_extended_tree;
+
+ typedef const generic_wide_int <widest_extended_tree> tree_to_widest_ref;
+ typedef const generic_wide_int <offset_extended_tree> tree_to_offset_ref;
+ typedef const generic_wide_int<wide_int_ref_storage<false, false> >
+ tree_to_wide_ref;
+
+ tree_to_widest_ref to_widest (const_tree);
+ tree_to_offset_ref to_offset (const_tree);
+ tree_to_wide_ref to_wide (const_tree);
+ wide_int to_wide (const_tree, unsigned int);
+
+ typedef const poly_int <NUM_POLY_INT_COEFFS,
+ generic_wide_int <widest_extended_tree> >
+ tree_to_poly_widest_ref;
+ typedef const poly_int <NUM_POLY_INT_COEFFS,
+ generic_wide_int <offset_extended_tree> >
+ tree_to_poly_offset_ref;
+ typedef const poly_int <NUM_POLY_INT_COEFFS,
+ generic_wide_int <unextended_tree> >
+ tree_to_poly_wide_ref;
+
+ tree_to_poly_widest_ref to_poly_widest (const_tree);
+ tree_to_poly_offset_ref to_poly_offset (const_tree);
+ tree_to_poly_wide_ref to_poly_wide (const_tree);
+
+ template <int N>
+ struct ints_for <generic_wide_int <extended_tree <N> >, CONST_PRECISION>
+ {
+ typedef generic_wide_int <extended_tree <N> > extended;
+ static extended zero (const extended &);
+ };
+
+ template <>
+ struct ints_for <generic_wide_int <unextended_tree>, VAR_PRECISION>
+ {
+ typedef generic_wide_int <unextended_tree> unextended;
+ static unextended zero (const unextended &);
+ };
+}
+
+/* Used to convert a tree to a widest2_int like this:
+ widest2_int foo = widest2_int_cst (some_tree). */
+typedef generic_wide_int <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> >
+ widest2_int_cst;
+
+/* Refer to INTEGER_CST T as though it were a widest_int.
+
+ This function gives T's actual numerical value, influenced by the
+ signedness of its type. For example, a signed byte with just the
+ top bit set would be -128 while an unsigned byte with the same
+ bit pattern would be 128.
+
+ This is the right choice when operating on groups of INTEGER_CSTs
+ that might have different signedness or precision. It is also the
+ right choice in code that specifically needs an approximation of
+ infinite-precision arithmetic instead of normal modulo arithmetic.
+
+ The approximation of infinite precision is good enough for realistic
+ numbers of additions and subtractions of INTEGER_CSTs (where
+ "realistic" includes any number less than 1 << 31) but it cannot
+ represent the result of multiplying the two largest supported
+ INTEGER_CSTs. The overflow-checking form of wi::mul provides a way
+ of multiplying two arbitrary INTEGER_CSTs and checking that the
+ result is representable as a widest_int.
+
+ Note that any overflow checking done on these values is relative to
+ the range of widest_int rather than the range of a TREE_TYPE.
+
+ Calling this function should have no overhead in release builds,
+ so it is OK to call it several times for the same tree. If it is
+ useful for readability reasons to reduce the number of calls,
+ it is more efficient to use:
+
+ wi::tree_to_widest_ref wt = wi::to_widest (t);
+
+ instead of:
+
+ widest_int wt = wi::to_widest (t). */
+
+inline wi::tree_to_widest_ref
+wi::to_widest (const_tree t)
+{
+ return t;
+}
+
+/* Refer to INTEGER_CST T as though it were an offset_int.
+
+ This function is an optimisation of wi::to_widest for cases
+ in which T is known to be a bit or byte count in the range
+ (-(2 ^ (N + BITS_PER_UNIT)), 2 ^ (N + BITS_PER_UNIT)), where N is
+ the target's address size in bits.
+
+ This is the right choice when operating on bit or byte counts as
+ untyped numbers rather than M-bit values. The wi::to_widest comments
+ about addition, subtraction and multiplication apply here: sequences
+ of 1 << 31 additions and subtractions do not induce overflow, but
+ multiplying the largest sizes might. Again,
+
+ wi::tree_to_offset_ref wt = wi::to_offset (t);
+
+ is more efficient than:
+
+ offset_int wt = wi::to_offset (t). */
+
+inline wi::tree_to_offset_ref
+wi::to_offset (const_tree t)
+{
+ return t;
+}
+
+/* Refer to INTEGER_CST T as though it were a wide_int.
+
+ In contrast to the approximation of infinite-precision numbers given
+ by wi::to_widest and wi::to_offset, this function treats T as a
+ signless collection of N bits, where N is the precision of T's type.
+ As with machine registers, signedness is determined by the operation
+ rather than the operands; for example, there is a distinction between
+ signed and unsigned division.
+
+ This is the right choice when operating on values with the same type
+ using normal modulo arithmetic. The overflow-checking forms of things
+ like wi::add check whether the result can be represented in T's type.
+
+ Calling this function should have no overhead in release builds,
+ so it is OK to call it several times for the same tree. If it is
+ useful for readability reasons to reduce the number of calls,
+ it is more efficient to use:
+
+ wi::tree_to_wide_ref wt = wi::to_wide (t);
+
+ instead of:
+
+ wide_int wt = wi::to_wide (t). */
+
+inline wi::tree_to_wide_ref
+wi::to_wide (const_tree t)
+{
+ return wi::storage_ref (&TREE_INT_CST_ELT (t, 0), TREE_INT_CST_NUNITS (t),
+ TYPE_PRECISION (TREE_TYPE (t)));
+}
+
+/* Convert INTEGER_CST T to a wide_int of precision PREC, extending or
+ truncating as necessary. When extending, use sign extension if T's
+ type is signed and zero extension if T's type is unsigned. */
+
+inline wide_int
+wi::to_wide (const_tree t, unsigned int prec)
+{
+ return wide_int::from (wi::to_wide (t), prec, TYPE_SIGN (TREE_TYPE (t)));
+}
+
+template <int N>
+inline wi::extended_tree <N>::extended_tree (const_tree t)
+ : m_t (t)
+{
+ gcc_checking_assert (TYPE_PRECISION (TREE_TYPE (t)) <= N);
+}
+
+template <int N>
+inline unsigned int
+wi::extended_tree <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+wi::extended_tree <N>::get_val () const
+{
+ return &TREE_INT_CST_ELT (m_t, 0);
+}
+
+template <int N>
+inline unsigned int
+wi::extended_tree <N>::get_len () const
+{
+ if (N == ADDR_MAX_PRECISION)
+ return TREE_INT_CST_OFFSET_NUNITS (m_t);
+ else if (N >= WIDE_INT_MAX_PRECISION)
+ return TREE_INT_CST_EXT_NUNITS (m_t);
+ else
+ /* This class is designed to be used for specific output precisions
+ and needs to be as fast as possible, so there is no fallback for
+ other casees. */
+ gcc_unreachable ();
+}
+
+inline unsigned int
+wi::unextended_tree::get_precision () const
+{
+ return TYPE_PRECISION (TREE_TYPE (m_t));
+}
+
+inline const HOST_WIDE_INT *
+wi::unextended_tree::get_val () const
+{
+ return &TREE_INT_CST_ELT (m_t, 0);
+}
+
+inline unsigned int
+wi::unextended_tree::get_len () const
+{
+ return TREE_INT_CST_NUNITS (m_t);
+}
+
+/* Return the value of a POLY_INT_CST in its native precision. */
+
+inline wi::tree_to_poly_wide_ref
+poly_int_cst_value (const_tree x)
+{
+ poly_int <NUM_POLY_INT_COEFFS, generic_wide_int <wi::unextended_tree> > res;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res.coeffs[i] = POLY_INT_CST_COEFF (x, i);
+ return res;
+}
+
+/* Access INTEGER_CST or POLY_INT_CST tree T as if it were a
+ poly_widest_int. See wi::to_widest for more details. */
+
+inline wi::tree_to_poly_widest_ref
+wi::to_poly_widest (const_tree t)
+{
+ if (POLY_INT_CST_P (t))
+ {
+ poly_int <NUM_POLY_INT_COEFFS,
+ generic_wide_int <widest_extended_tree> > res;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res.coeffs[i] = POLY_INT_CST_COEFF (t, i);
+ return res;
+ }
+ return t;
+}
+
+/* Access INTEGER_CST or POLY_INT_CST tree T as if it were a
+ poly_offset_int. See wi::to_offset for more details. */
+
+inline wi::tree_to_poly_offset_ref
+wi::to_poly_offset (const_tree t)
+{
+ if (POLY_INT_CST_P (t))
+ {
+ poly_int <NUM_POLY_INT_COEFFS,
+ generic_wide_int <offset_extended_tree> > res;
+ for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+ res.coeffs[i] = POLY_INT_CST_COEFF (t, i);
+ return res;
+ }
+ return t;
+}
+
+/* Access INTEGER_CST or POLY_INT_CST tree T as if it were a
+ poly_wide_int. See wi::to_wide for more details. */
+
+inline wi::tree_to_poly_wide_ref
+wi::to_poly_wide (const_tree t)
+{
+ if (POLY_INT_CST_P (t))
+ return poly_int_cst_value (t);
+ return t;
+}
+
+template <int N>
+inline generic_wide_int <wi::extended_tree <N> >
+wi::ints_for <generic_wide_int <wi::extended_tree <N> >,
+ wi::CONST_PRECISION>::zero (const extended &x)
+{
+ return build_zero_cst (TREE_TYPE (x.get_tree ()));
+}
+
+inline generic_wide_int <wi::unextended_tree>
+wi::ints_for <generic_wide_int <wi::unextended_tree>,
+ wi::VAR_PRECISION>::zero (const unextended &x)
+{
+ return build_zero_cst (TREE_TYPE (x.get_tree ()));
+}
+
+namespace wi
+{
+ template <typename T>
+ bool fits_to_boolean_p (const T &x, const_tree);
+
+ template <typename T>
+ bool fits_to_tree_p (const T &x, const_tree);
+
+ wide_int min_value (const_tree);
+ wide_int max_value (const_tree);
+ wide_int from_mpz (const_tree, mpz_t, bool);
+}
+
+template <typename T>
+bool
+wi::fits_to_boolean_p (const T &x, const_tree type)
+{
+ typedef typename poly_int_traits<T>::int_type int_type;
+ return (known_eq (x, int_type (0))
+ || known_eq (x, int_type (TYPE_UNSIGNED (type) ? 1 : -1)));
+}
+
+template <typename T>
+bool
+wi::fits_to_tree_p (const T &x, const_tree type)
+{
+ /* Non-standard boolean types can have arbitrary precision but various
+ transformations assume that they can only take values 0 and +/-1. */
+ if (TREE_CODE (type) == BOOLEAN_TYPE)
+ return fits_to_boolean_p (x, type);
+
+ if (TYPE_UNSIGNED (type))
+ return known_eq (x, zext (x, TYPE_PRECISION (type)));
+ else
+ return known_eq (x, sext (x, TYPE_PRECISION (type)));
+}
+
+/* Produce the smallest number that is represented in TYPE. The precision
+ and sign are taken from TYPE. */
+inline wide_int
+wi::min_value (const_tree type)
+{
+ return min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+}
+
+/* Produce the largest number that is represented in TYPE. The precision
+ and sign are taken from TYPE. */
+inline wide_int
+wi::max_value (const_tree type)
+{
+ return max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+}
+
+/* Return true if INTEGER_CST T1 is less than INTEGER_CST T2,
+ extending both according to their respective TYPE_SIGNs. */
+
+inline bool
+tree_int_cst_lt (const_tree t1, const_tree t2)
+{
+ return wi::to_widest (t1) < wi::to_widest (t2);
+}
+
+/* Return true if INTEGER_CST T1 is less than or equal to INTEGER_CST T2,
+ extending both according to their respective TYPE_SIGNs. */
+
+inline bool
+tree_int_cst_le (const_tree t1, const_tree t2)
+{
+ return wi::to_widest (t1) <= wi::to_widest (t2);
+}
+
+/* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. T1 and T2
+ are both INTEGER_CSTs and their values are extended according to their
+ respective TYPE_SIGNs. */
+
+inline int
+tree_int_cst_compare (const_tree t1, const_tree t2)
+{
+ return wi::cmps (wi::to_widest (t1), wi::to_widest (t2));
+}
+
+/* FIXME - These declarations belong in builtins.h, expr.h and emit-rtl.h,
+ but none of these files are allowed to be included from front ends.
+ They should be split in two. One suitable for the FEs, the other suitable
+ for the BE. */
+
+/* Assign the RTX to declaration. */
+extern void set_decl_rtl (tree, rtx);
+extern bool complete_ctor_at_level_p (const_tree, HOST_WIDE_INT, const_tree);
+
+/* Given an expression EXP that is a handled_component_p,
+ look for the ultimate containing object, which is returned and specify
+ the access position and size. */
+extern tree get_inner_reference (tree, poly_int64_pod *, poly_int64_pod *,
+ tree *, machine_mode *, int *, int *, int *);
+
+extern tree build_personality_function (const char *);
+
+struct GTY(()) int_n_trees_t {
+ /* These parts are initialized at runtime */
+ tree signed_type;
+ tree unsigned_type;
+};
+
+/* This is also in machmode.h */
+extern bool int_n_enabled_p[NUM_INT_N_ENTS];
+extern GTY(()) struct int_n_trees_t int_n_trees[NUM_INT_N_ENTS];
+
+/* Like bit_position, but return as an integer. It must be representable in
+ that way (since it could be a signed value, we don't have the
+ option of returning -1 like int_size_in_byte can. */
+
+inline HOST_WIDE_INT
+int_bit_position (const_tree field)
+{
+ return ((wi::to_offset (DECL_FIELD_OFFSET (field)) << LOG2_BITS_PER_UNIT)
+ + wi::to_offset (DECL_FIELD_BIT_OFFSET (field))).to_shwi ();
+}
+
+/* Return true if it makes sense to consider alias set for a type T. */
+
+inline bool
+type_with_alias_set_p (const_tree t)
+{
+ /* Function and method types are never accessed as memory locations. */
+ if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
+ return false;
+
+ if (COMPLETE_TYPE_P (t))
+ return true;
+
+ /* Incomplete types cannot be accessed in general except for arrays
+ where we can fetch its element despite we have no array bounds. */
+ if (TREE_CODE (t) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (t)))
+ return true;
+
+ return false;
+}
+
+extern location_t set_block (location_t loc, tree block);
+
+extern void gt_ggc_mx (tree &);
+extern void gt_pch_nx (tree &);
+extern void gt_pch_nx (tree &, gt_pointer_operator, void *);
+
+extern bool nonnull_arg_p (const_tree);
+extern bool is_empty_type (const_tree);
+extern bool default_is_empty_record (const_tree);
+extern bool flexible_array_type_p (const_tree);
+extern HOST_WIDE_INT arg_int_size_in_bytes (const_tree);
+extern tree arg_size_in_bytes (const_tree);
+extern bool expr_type_first_operand_type_p (tree_code);
+
+extern location_t
+set_source_range (tree expr, location_t start, location_t finish);
+
+extern location_t
+set_source_range (tree expr, source_range src_range);
+
+/* Return true if it makes sense to promote/demote from_type to to_type. */
+inline bool
+desired_pro_or_demotion_p (const_tree to_type, const_tree from_type)
+{
+ unsigned int to_type_precision = TYPE_PRECISION (to_type);
+
+ /* OK to promote if to_type is no bigger than word_mode. */
+ if (to_type_precision <= GET_MODE_PRECISION (word_mode))
+ return true;
+
+ /* Otherwise, allow only if narrowing or same precision conversions. */
+ return to_type_precision <= TYPE_PRECISION (from_type);
+}
+
+/* Pointer type used to declare builtins before we have seen its real
+ declaration. */
+class builtin_structptr_type
+{
+public:
+ tree& node;
+ tree& base;
+ const char *str;
+};
+extern const builtin_structptr_type builtin_structptr_types[6];
+
+/* Return true if type T has the same precision as its underlying mode. */
+
+inline bool
+type_has_mode_precision_p (const_tree t)
+{
+ return known_eq (TYPE_PRECISION (t), GET_MODE_PRECISION (TYPE_MODE (t)));
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function.
+
+ Note that it is different from the DECL_IS_UNDECLARED_BUILTIN
+ accessor, as this is impervious to user declaration. */
+
+inline bool
+fndecl_built_in_p (const_tree node)
+{
+ return DECL_BUILT_IN_CLASS (node) != NOT_BUILT_IN;
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function
+ of class KLASS. */
+
+inline bool
+fndecl_built_in_p (const_tree node, built_in_class klass)
+{
+ return fndecl_built_in_p (node) && DECL_BUILT_IN_CLASS (node) == klass;
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function
+ of class KLASS with name equal to NAME. */
+
+inline bool
+fndecl_built_in_p (const_tree node, unsigned int name, built_in_class klass)
+{
+ return (fndecl_built_in_p (node, klass)
+ && DECL_UNCHECKED_FUNCTION_CODE (node) == name);
+}
+
+/* Return true if a FUNCTION_DECL NODE is a GCC built-in function
+ of BUILT_IN_NORMAL class with name equal to NAME. */
+
+inline bool
+fndecl_built_in_p (const_tree node, built_in_function name)
+{
+ return (fndecl_built_in_p (node, BUILT_IN_NORMAL)
+ && DECL_FUNCTION_CODE (node) == name);
+}
+
+/* A struct for encapsulating location information about an operator
+ and the operation built from it.
+
+ m_operator_loc is the location of the operator
+ m_combined_loc is the location of the compound expression.
+
+ For example, given "a && b" the, operator location is:
+ a && b
+ ^~
+ and the combined location is:
+ a && b
+ ~~^~~~
+ Capturing this information allows for class binary_op_rich_location
+ to provide detailed information about e.g. type mismatches in binary
+ operations where enough location information is available:
+
+ arg_0 op arg_1
+ ~~~~~ ^~ ~~~~~
+ | |
+ | arg1 type
+ arg0 type
+
+ falling back to just showing the combined location:
+
+ arg_0 op arg_1
+ ~~~~~~^~~~~~~~
+
+ where it is not. */
+
+class op_location_t
+{
+public:
+ location_t m_operator_loc;
+ location_t m_combined_loc;
+
+ /* 1-argument ctor, for constructing from a combined location. */
+ op_location_t (location_t combined_loc)
+ : m_operator_loc (UNKNOWN_LOCATION), m_combined_loc (combined_loc)
+ {}
+
+ /* 2-argument ctor, for distinguishing between the operator's location
+ and the combined location. */
+ op_location_t (location_t operator_loc, location_t combined_loc)
+ : m_operator_loc (operator_loc), m_combined_loc (combined_loc)
+ {}
+
+ /* Implicitly convert back to a location_t, using the combined location. */
+ operator location_t () const { return m_combined_loc; }
+};
+
+/* Code that doesn't refer to any warning. Has no effect on suppression
+ functions. */
+constexpr opt_code no_warning = opt_code ();
+/* Wildcard code that refers to all warnings. */
+constexpr opt_code all_warnings = N_OPTS;
+
+/* Return the disposition for a warning (or all warnings by default)
+ at a location. */
+extern bool warning_suppressed_at (location_t, opt_code = all_warnings);
+/* Set the disposition for a warning (or all warnings by default)
+ at a location to disabled by default. */
+extern bool suppress_warning_at (location_t, opt_code = all_warnings,
+ bool = true);
+/* Copy warning disposition from one location to another. */
+extern void copy_warning (location_t, location_t);
+
+/* Return the disposition for a warning (or all warnings by default)
+ for an expression. */
+extern bool warning_suppressed_p (const_tree, opt_code = all_warnings);
+/* Set the disposition for a warning (or all warnings by default)
+ at a location to disabled by default. */
+extern void suppress_warning (tree, opt_code = all_warnings, bool = true)
+ ATTRIBUTE_NONNULL (1);
+/* Copy warning disposition from one expression to another. */
+extern void copy_warning (tree, const_tree);
+
+/* Return the zero-based number corresponding to the argument being
+ deallocated if FNDECL is a deallocation function or an out-of-bounds
+ value if it isn't. */
+extern unsigned fndecl_dealloc_argno (tree);
+
+/* If an expression refers to a character array or pointer declared
+ attribute nonstring, return a decl for that array or pointer and
+ if nonnull, set the second argument to the referenced enclosing
+ object or pointer. Otherwise return null. */
+extern tree get_attr_nonstring_decl (tree, tree * = NULL);
+
+extern int get_target_clone_attr_len (tree);
+
+#endif /* GCC_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/treestruct.def b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/treestruct.def
new file mode 100644
index 0000000..2cdc3a5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/treestruct.def
@@ -0,0 +1,71 @@
+/* This file contains the definitions for the tree structure
+ enumeration used in GCC.
+
+Copyright (C) 2005-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* The format of this file is
+
+ DEFTREESTRUCT(enumeration value, printable name).
+
+ Each enumeration value should correspond with a single member of
+ union tree_node.
+
+ These enumerator values are used in order to distinguish members of
+ union tree_node for garbage collection purposes, as well as
+ specifying what structures contain what other structures in the
+ tree_contains_struct array. */
+DEFTREESTRUCT(TS_BASE, "base")
+DEFTREESTRUCT(TS_TYPED, "typed")
+DEFTREESTRUCT(TS_COMMON, "common")
+DEFTREESTRUCT(TS_INT_CST, "integer cst")
+DEFTREESTRUCT(TS_POLY_INT_CST, "poly_int_cst")
+DEFTREESTRUCT(TS_REAL_CST, "real cst")
+DEFTREESTRUCT(TS_FIXED_CST, "fixed cst")
+DEFTREESTRUCT(TS_VECTOR, "vector")
+DEFTREESTRUCT(TS_STRING, "string")
+DEFTREESTRUCT(TS_COMPLEX, "complex")
+DEFTREESTRUCT(TS_IDENTIFIER, "identifier")
+DEFTREESTRUCT(TS_DECL_MINIMAL, "decl minimal")
+DEFTREESTRUCT(TS_DECL_COMMON, "decl common")
+DEFTREESTRUCT(TS_DECL_WRTL, "decl with RTL")
+DEFTREESTRUCT(TS_DECL_NON_COMMON, "decl non-common")
+DEFTREESTRUCT(TS_DECL_WITH_VIS, "decl with visibility")
+DEFTREESTRUCT(TS_FIELD_DECL, "field decl")
+DEFTREESTRUCT(TS_VAR_DECL, "var decl")
+DEFTREESTRUCT(TS_PARM_DECL, "parm decl")
+DEFTREESTRUCT(TS_LABEL_DECL, "label decl")
+DEFTREESTRUCT(TS_RESULT_DECL, "result decl")
+DEFTREESTRUCT(TS_CONST_DECL, "const decl")
+DEFTREESTRUCT(TS_TYPE_DECL, "type decl")
+DEFTREESTRUCT(TS_FUNCTION_DECL, "function decl")
+DEFTREESTRUCT(TS_TRANSLATION_UNIT_DECL, "translation-unit decl")
+DEFTREESTRUCT(TS_TYPE_COMMON, "type common")
+DEFTREESTRUCT(TS_TYPE_WITH_LANG_SPECIFIC, "type with lang-specific")
+DEFTREESTRUCT(TS_TYPE_NON_COMMON, "type non-common")
+DEFTREESTRUCT(TS_LIST, "list")
+DEFTREESTRUCT(TS_VEC, "vec")
+DEFTREESTRUCT(TS_EXP, "exp")
+DEFTREESTRUCT(TS_SSA_NAME, "ssa name")
+DEFTREESTRUCT(TS_BLOCK, "block")
+DEFTREESTRUCT(TS_BINFO, "binfo")
+DEFTREESTRUCT(TS_STATEMENT_LIST, "statement list")
+DEFTREESTRUCT(TS_CONSTRUCTOR, "constructor")
+DEFTREESTRUCT(TS_OMP_CLAUSE, "omp clause")
+DEFTREESTRUCT(TS_OPTIMIZATION, "optimization options")
+DEFTREESTRUCT(TS_TARGET_OPTION, "target options")
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tristate.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tristate.h
new file mode 100644
index 0000000..38293f5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tristate.h
@@ -0,0 +1,85 @@
+/* "True" vs "False" vs "Unknown".
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TRISTATE_H
+#define GCC_TRISTATE_H
+
+/* "True" vs "False" vs "Unknown". */
+
+class tristate {
+ public:
+ enum value {
+ TS_UNKNOWN,
+ TS_TRUE,
+ TS_FALSE
+ };
+
+ tristate (enum value val) : m_value (val) {}
+ tristate (bool val) : m_value (val ? TS_TRUE : TS_FALSE) {}
+ static tristate unknown () { return tristate (TS_UNKNOWN); }
+
+ const char *as_string () const;
+
+ bool is_known () const { return m_value != TS_UNKNOWN; }
+ bool is_unknown () const { return m_value == TS_UNKNOWN; }
+ bool is_true () const { return m_value == TS_TRUE; }
+ bool is_false () const { return m_value == TS_FALSE; }
+
+ tristate not_ () const;
+ tristate or_ (tristate other) const;
+ tristate and_ (tristate other) const;
+
+ bool operator== (const tristate &other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool operator!= (const tristate &other) const
+ {
+ return m_value != other.m_value;
+ }
+
+ enum value get_value () const { return m_value; }
+
+ private:
+ enum value m_value;
+};
+
+/* Overloaded boolean operators on tristates. */
+
+inline tristate
+operator ! (tristate t)
+{
+ return t.not_ ();
+}
+
+inline tristate
+operator || (tristate a, tristate b)
+{
+ return a.or_ (b);
+}
+
+inline tristate
+operator && (tristate a, tristate b)
+{
+ return a.and_ (b);
+}
+
+#endif /* GCC_TRISTATE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsan.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsan.h
new file mode 100644
index 0000000..b735305
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsan.h
@@ -0,0 +1,26 @@
+/* ThreadSanitizer, a data race detector.
+ Copyright (C) 2011-2023 Free Software Foundation, Inc.
+ Contributed by Dmitry Vyukov <dvyukov@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_TSAN
+#define TREE_TSAN
+
+extern void tsan_finish_file (void);
+
+#endif /* TREE_TSAN */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsystem.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsystem.h
new file mode 100644
index 0000000..081c733
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/tsystem.h
@@ -0,0 +1,137 @@
+/* Get common system includes and various definitions and declarations
+ based on target macros.
+ Copyright (C) 2000-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TSYSTEM_H
+#define GCC_TSYSTEM_H
+
+/* System headers (e.g. stdio.h, stdlib.h, unistd.h) sometimes
+ indirectly include getopt.h. Our -I flags will cause gcc's gnu
+ getopt.h to be included, not the platform's copy. In the default
+ case, gnu getopt.h will provide us with a no-argument prototype
+ which will generate -Wstrict-prototypes warnings. None of the
+ target files actually use getopt, so it is safe to tell gnu
+ getopt.h we never need this prototype. */
+#ifndef HAVE_DECL_GETOPT
+#define HAVE_DECL_GETOPT 1
+#endif
+
+/* We want everything from the glibc headers. */
+#define _GNU_SOURCE 1
+
+/* GCC supplies these headers. */
+#include <stddef.h>
+#include <float.h>
+
+#ifdef inhibit_libc
+
+#ifndef malloc
+extern void *malloc (size_t);
+#endif
+
+#ifndef free
+extern void free (void *);
+#endif
+
+#ifndef atexit
+extern int atexit (void (*)(void));
+#endif
+
+#ifndef abort
+#define abort() __builtin_trap ()
+#endif
+
+#ifndef strlen
+extern size_t strlen (const char *);
+#endif
+
+#ifndef memcpy
+extern void *memcpy (void *, const void *, size_t);
+#endif
+
+#ifndef memset
+extern void *memset (void *, int, size_t);
+#endif
+
+#else /* ! inhibit_libc */
+/* We disable this when inhibit_libc, so that gcc can still be built without
+ needing header files first. */
+/* ??? This is not a good solution, since prototypes may be required in
+ some cases for correct code. */
+
+/* GCC supplies this header. */
+#include <stdarg.h>
+
+/* All systems have this header. */
+#include <stdio.h>
+
+/* All systems have this header. */
+#include <sys/types.h>
+
+/* All systems have this header. */
+#include <errno.h>
+
+#ifndef errno
+extern int errno;
+#endif
+
+/* If these system headers do not exist, fixincludes must create them. */
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+/* GCC supplies this header. */
+#include <limits.h>
+
+/* If these system headers do not exist, fixincludes must create them. */
+#include <time.h>
+
+#endif /* inhibit_libc */
+
+/* Define a generic NULL if one hasn't already been defined. */
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* GCC always provides __builtin_alloca(x). */
+#undef alloca
+#define alloca(x) __builtin_alloca(x)
+
+#ifdef ENABLE_RUNTIME_CHECKING
+#define gcc_assert(EXPR) ((void)(!(EXPR) ? abort (), 0 : 0))
+#else
+/* Include EXPR, so that unused variable warnings do not occur. */
+#define gcc_assert(EXPR) ((void)(0 && (EXPR)))
+#endif
+/* Use gcc_unreachable() to mark unreachable locations (like an
+ unreachable default case of a switch. Do not use gcc_assert(0). */
+#define gcc_unreachable() (abort ())
+
+#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((__extension__(union {FROMTYPE _q; TOTYPE _nq;})(X))._nq)
+#define CONST_CAST(TYPE,X) CONST_CAST2 (TYPE, const TYPE, (X))
+
+/* Filename handling macros. */
+#include "filenames.h"
+
+#endif /* ! GCC_TSYSTEM_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/typeclass.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/typeclass.h
new file mode 100644
index 0000000..c4f0933
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/typeclass.h
@@ -0,0 +1,43 @@
+/* Type class enum
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TYPECLASS_H
+#define GCC_TYPECLASS_H
+
+/* Values returned by __builtin_classify_type. */
+
+enum type_class
+{
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class,
+ lang_type_class, opaque_type_class
+};
+
+#endif /* GCC_TYPECLASS_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/typed-splay-tree.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/typed-splay-tree.h
new file mode 100644
index 0000000..ffe9675
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/typed-splay-tree.h
@@ -0,0 +1,652 @@
+/* A typesafe wrapper around libiberty's splay-tree.h.
+ Copyright (C) 2015-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TYPED_SPLAY_TREE_H
+#define GCC_TYPED_SPLAY_TREE_H
+
+/* Typesafe wrapper around libiberty's splay-tree.h. */
+template <typename KEY_TYPE, typename VALUE_TYPE>
+class typed_splay_tree
+{
+ public:
+ typedef KEY_TYPE key_type;
+ typedef VALUE_TYPE value_type;
+
+ typedef int (*compare_fn) (key_type, key_type);
+ typedef void (*delete_key_fn) (key_type);
+ typedef void (*delete_value_fn) (value_type);
+ typedef int (*foreach_fn) (key_type, value_type, void *);
+
+ typed_splay_tree (compare_fn,
+ delete_key_fn,
+ delete_value_fn);
+ ~typed_splay_tree ();
+
+ value_type lookup (key_type k);
+ value_type predecessor (key_type k);
+ value_type successor (key_type k);
+ void insert (key_type k, value_type v);
+ void remove (key_type k);
+ value_type max ();
+ value_type min ();
+ int foreach (foreach_fn, void *);
+
+ private:
+ /* Copy and assignment ops are not supported. */
+ typed_splay_tree (const typed_splay_tree &);
+ typed_splay_tree & operator = (const typed_splay_tree &);
+
+ typedef key_type splay_tree_key;
+ typedef value_type splay_tree_value;
+
+ /* The nodes in the splay tree. */
+ struct splay_tree_node_s {
+ /* The key. */
+ splay_tree_key key;
+
+ /* The value. */
+ splay_tree_value value;
+
+ /* The left and right children, respectively. */
+ splay_tree_node_s *left, *right;
+
+ /* Used as temporary value for tree traversals. */
+ splay_tree_node_s *back;
+ };
+ typedef splay_tree_node_s *splay_tree_node;
+
+ inline void KDEL (splay_tree_key);
+ inline void VDEL (splay_tree_value);
+ void splay_tree_delete_helper (splay_tree_node);
+ static inline void rotate_left (splay_tree_node *,
+ splay_tree_node, splay_tree_node);
+ static inline void rotate_right (splay_tree_node *,
+ splay_tree_node, splay_tree_node);
+ void splay_tree_splay (splay_tree_key);
+ static int splay_tree_foreach_helper (splay_tree_node,
+ foreach_fn, void*);
+ splay_tree_node splay_tree_insert (splay_tree_key, splay_tree_value);
+ void splay_tree_remove (splay_tree_key key);
+ splay_tree_node splay_tree_lookup (splay_tree_key key);
+ splay_tree_node splay_tree_predecessor (splay_tree_key);
+ splay_tree_node splay_tree_successor (splay_tree_key);
+ splay_tree_node splay_tree_max ();
+ splay_tree_node splay_tree_min ();
+
+ static value_type node_to_value (splay_tree_node node);
+
+ /* The root of the tree. */
+ splay_tree_node root;
+
+ /* The comparision function. */
+ compare_fn comp;
+
+ /* The deallocate-key function. NULL if no cleanup is necessary. */
+ delete_key_fn delete_key;
+
+ /* The deallocate-value function. NULL if no cleanup is necessary. */
+ delete_value_fn delete_value;
+};
+
+/* Constructor for typed_splay_tree <K, V>. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline typed_splay_tree<KEY_TYPE, VALUE_TYPE>::
+ typed_splay_tree (compare_fn compare_fn,
+ delete_key_fn delete_key_fn,
+ delete_value_fn delete_value_fn)
+{
+ root = NULL;
+ comp = compare_fn;
+ delete_key = delete_key_fn;
+ delete_value = delete_value_fn;
+}
+
+/* Destructor for typed_splay_tree <K, V>. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline typed_splay_tree<KEY_TYPE, VALUE_TYPE>::
+ ~typed_splay_tree ()
+{
+ splay_tree_delete_helper (root);
+}
+
+/* Lookup KEY, returning a value if present, and NULL
+ otherwise. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::lookup (key_type key)
+{
+ splay_tree_node node = splay_tree_lookup (key);
+ return node_to_value (node);
+}
+
+/* Return the immediate predecessor of KEY, or NULL if there is no
+ predecessor. KEY need not be present in the tree. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::predecessor (key_type key)
+{
+ splay_tree_node node = splay_tree_predecessor (key);
+ return node_to_value (node);
+}
+
+/* Return the immediate successor of KEY, or NULL if there is no
+ successor. KEY need not be present in the tree. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::successor (key_type key)
+{
+ splay_tree_node node = splay_tree_successor (key);
+ return node_to_value (node);
+}
+
+/* Insert a new node (associating KEY with VALUE). If a
+ previous node with the indicated KEY exists, its data is replaced
+ with the new value. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::insert (key_type key,
+ value_type value)
+{
+ splay_tree_insert (key, value);
+}
+
+/* Remove a node (associating KEY with VALUE). */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::remove (key_type key)
+{
+ splay_tree_remove (key);
+}
+
+/* Get the value with maximal key. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::max ()
+{
+ return node_to_value (splay_tree_max ());
+}
+
+/* Get the value with minimal key. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::min ()
+{
+ return node_to_value (splay_tree_min ());
+}
+
+/* Call OUTER_CB, passing it the OUTER_USER_DATA, for every node,
+ following an in-order traversal. If OUTER_CB ever returns a non-zero
+ value, the iteration ceases immediately, and the value is returned.
+ Otherwise, this function returns 0. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline int
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::foreach (foreach_fn foreach_fn,
+ void *user_data)
+{
+ return splay_tree_foreach_helper (root, foreach_fn, user_data);
+}
+
+/* Internal function for converting from splay_tree_node to
+ VALUE_TYPE. */
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::node_to_value (splay_tree_node node)
+{
+ if (node)
+ return node->value;
+ else
+ return 0;
+}
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::KDEL(splay_tree_key x)
+{
+ if (delete_key)
+ (*delete_key)(x);
+}
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::VDEL(splay_tree_value x)
+{
+ if (delete_value)
+ (*delete_value)(x);
+}
+
+/* Deallocate NODE (a member of SP), and all its sub-trees. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+void
+typed_splay_tree<KEY_TYPE,
+ VALUE_TYPE>::splay_tree_delete_helper (splay_tree_node node)
+{
+ splay_tree_node pending = NULL;
+ splay_tree_node active = NULL;
+
+ if (!node)
+ return;
+
+ KDEL (node->key);
+ VDEL (node->value);
+
+ /* We use the "back" field to hold the "next" pointer. */
+ node->back = pending;
+ pending = node;
+
+ /* Now, keep processing the pending list until there aren't any
+ more. This is a little more complicated than just recursing, but
+ it doesn't toast the stack for large trees. */
+
+ while (pending)
+ {
+ active = pending;
+ pending = NULL;
+ while (active)
+ {
+ splay_tree_node temp;
+
+ /* active points to a node which has its key and value
+ deallocated, we just need to process left and right. */
+
+ if (active->left)
+ {
+ KDEL (active->left->key);
+ VDEL (active->left->value);
+ active->left->back = pending;
+ pending = active->left;
+ }
+ if (active->right)
+ {
+ KDEL (active->right->key);
+ VDEL (active->right->value);
+ active->right->back = pending;
+ pending = active->right;
+ }
+
+ temp = active;
+ active = temp->back;
+ delete temp;
+ }
+ }
+}
+
+/* Rotate the edge joining the left child N with its parent P. PP is the
+ grandparents' pointer to P. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::rotate_left (splay_tree_node *pp,
+ splay_tree_node p,
+ splay_tree_node n)
+{
+ splay_tree_node tmp;
+ tmp = n->right;
+ n->right = p;
+ p->left = tmp;
+ *pp = n;
+}
+
+/* Rotate the edge joining the right child N with its parent P. PP is the
+ grandparents' pointer to P. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::rotate_right (splay_tree_node *pp,
+ splay_tree_node p,
+ splay_tree_node n)
+{
+ splay_tree_node tmp;
+ tmp = n->left;
+ n->left = p;
+ p->right = tmp;
+ *pp = n;
+}
+
+/* Bottom up splay of key. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_splay (splay_tree_key key)
+{
+ if (root == NULL)
+ return;
+
+ do {
+ int cmp1, cmp2;
+ splay_tree_node n, c;
+
+ n = root;
+ cmp1 = (*comp) (key, n->key);
+
+ /* Found. */
+ if (cmp1 == 0)
+ return;
+
+ /* Left or right? If no child, then we're done. */
+ if (cmp1 < 0)
+ c = n->left;
+ else
+ c = n->right;
+ if (!c)
+ return;
+
+ /* Next one left or right? If found or no child, we're done
+ after one rotation. */
+ cmp2 = (*comp) (key, c->key);
+ if (cmp2 == 0
+ || (cmp2 < 0 && !c->left)
+ || (cmp2 > 0 && !c->right))
+ {
+ if (cmp1 < 0)
+ rotate_left (&root, n, c);
+ else
+ rotate_right (&root, n, c);
+ return;
+ }
+
+ /* Now we have the four cases of double-rotation. */
+ if (cmp1 < 0 && cmp2 < 0)
+ {
+ rotate_left (&n->left, c, c->left);
+ rotate_left (&root, n, n->left);
+ }
+ else if (cmp1 > 0 && cmp2 > 0)
+ {
+ rotate_right (&n->right, c, c->right);
+ rotate_right (&root, n, n->right);
+ }
+ else if (cmp1 < 0 && cmp2 > 0)
+ {
+ rotate_right (&n->left, c, c->right);
+ rotate_left (&root, n, n->left);
+ }
+ else if (cmp1 > 0 && cmp2 < 0)
+ {
+ rotate_left (&n->right, c, c->left);
+ rotate_right (&root, n, n->right);
+ }
+ } while (1);
+}
+
+/* Call FN, passing it the DATA, for every node below NODE, all of
+ which are from SP, following an in-order traversal. If FN every
+ returns a non-zero value, the iteration ceases immediately, and the
+ value is returned. Otherwise, this function returns 0. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+int
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_foreach_helper (
+ splay_tree_node node,
+ foreach_fn fn, void *data)
+{
+ int val;
+ splay_tree_node stack;
+
+ /* A non-recursive implementation is used to avoid filling the stack
+ for large trees. Splay trees are worst case O(n) in the depth of
+ the tree. */
+
+ stack = NULL;
+ val = 0;
+
+ for (;;)
+ {
+ while (node != NULL)
+ {
+ node->back = stack;
+ stack = node;
+ node = node->left;
+ }
+
+ if (stack == NULL)
+ break;
+
+ node = stack;
+ stack = stack->back;
+
+ val = (*fn) (node->key, node->value, data);
+ if (val)
+ break;
+
+ node = node->right;
+ }
+
+ return val;
+}
+
+/* Insert a new node (associating KEY with DATA) into SP. If a
+ previous node with the indicated KEY exists, its data is replaced
+ with the new value. Returns the new node. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+typename typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_node
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_insert (
+ splay_tree_key key,
+ splay_tree_value value)
+{
+ int comparison = 0;
+
+ splay_tree_splay (key);
+
+ if (root)
+ comparison = (*comp)(root->key, key);
+
+ if (root && comparison == 0)
+ {
+ /* If the root of the tree already has the indicated KEY, just
+ replace the value with VALUE. */
+ VDEL(root->value);
+ root->value = value;
+ }
+ else
+ {
+ /* Create a new node, and insert it at the root. */
+ splay_tree_node node;
+
+ node = new splay_tree_node_s;
+ node->key = key;
+ node->value = value;
+
+ if (!root)
+ node->left = node->right = 0;
+ else if (comparison < 0)
+ {
+ node->left = root;
+ node->right = node->left->right;
+ node->left->right = 0;
+ }
+ else
+ {
+ node->right = root;
+ node->left = node->right->left;
+ node->right->left = 0;
+ }
+
+ root = node;
+ }
+
+ return root;
+}
+
+/* Remove KEY from SP. It is not an error if it did not exist. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_remove (splay_tree_key key)
+{
+ splay_tree_splay (key);
+
+ if (root && (*comp) (root->key, key) == 0)
+ {
+ splay_tree_node left, right;
+
+ left = root->left;
+ right = root->right;
+
+ /* Delete the root node itself. */
+ VDEL (root->value);
+ delete root;
+
+ /* One of the children is now the root. Doesn't matter much
+ which, so long as we preserve the properties of the tree. */
+ if (left)
+ {
+ root = left;
+
+ /* If there was a right child as well, hang it off the
+ right-most leaf of the left child. */
+ if (right)
+ {
+ while (left->right)
+ left = left->right;
+ left->right = right;
+ }
+ }
+ else
+ root = right;
+ }
+}
+
+/* Lookup KEY in SP, returning VALUE if present, and NULL
+ otherwise. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+typename typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_node
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_lookup (splay_tree_key key)
+{
+ splay_tree_splay (key);
+
+ if (root && (*comp)(root->key, key) == 0)
+ return root;
+ else
+ return 0;
+}
+
+/* Return the node in SP with the greatest key. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+typename typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_node
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_max ()
+{
+ splay_tree_node n = root;
+
+ if (!n)
+ return NULL;
+
+ while (n->right)
+ n = n->right;
+
+ return n;
+}
+
+/* Return the node in SP with the smallest key. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+typename typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_node
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_min ()
+{
+ splay_tree_node n = root;
+
+ if (!n)
+ return NULL;
+
+ while (n->left)
+ n = n->left;
+
+ return n;
+}
+
+/* Return the immediate predecessor KEY, or NULL if there is no
+ predecessor. KEY need not be present in the tree. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+typename typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_node
+typed_splay_tree<KEY_TYPE,
+ VALUE_TYPE>::splay_tree_predecessor (splay_tree_key key)
+{
+ int comparison;
+ splay_tree_node node;
+
+ /* If the tree is empty, there is certainly no predecessor. */
+ if (!root)
+ return NULL;
+
+ /* Splay the tree around KEY. That will leave either the KEY
+ itself, its predecessor, or its successor at the root. */
+ splay_tree_splay (key);
+ comparison = (*comp)(root->key, key);
+
+ /* If the predecessor is at the root, just return it. */
+ if (comparison < 0)
+ return root;
+
+ /* Otherwise, find the rightmost element of the left subtree. */
+ node = root->left;
+ if (node)
+ while (node->right)
+ node = node->right;
+
+ return node;
+}
+
+/* Return the immediate successor KEY, or NULL if there is no
+ successor. KEY need not be present in the tree. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+typename typed_splay_tree<KEY_TYPE, VALUE_TYPE>::splay_tree_node
+typed_splay_tree<KEY_TYPE,
+ VALUE_TYPE>::splay_tree_successor (splay_tree_key key)
+{
+ int comparison;
+ splay_tree_node node;
+
+ /* If the tree is empty, there is certainly no successor. */
+ if (!root)
+ return NULL;
+
+ /* Splay the tree around KEY. That will leave either the KEY
+ itself, its predecessor, or its successor at the root. */
+ splay_tree_splay (key);
+ comparison = (*comp)(root->key, key);
+
+ /* If the successor is at the root, just return it. */
+ if (comparison > 0)
+ return root;
+
+ /* Otherwise, find the leftmost element of the right subtree. */
+ node = root->right;
+ if (node)
+ while (node->left)
+ node = node->left;
+
+ return node;
+}
+
+#endif /* GCC_TYPED_SPLAY_TREE_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ubsan.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ubsan.h
new file mode 100644
index 0000000..c21d319
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/ubsan.h
@@ -0,0 +1,70 @@
+/* UndefinedBehaviorSanitizer, undefined behavior detector.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+ Contributed by Marek Polacek <polacek@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_UBSAN_H
+#define GCC_UBSAN_H
+
+/* The various kinds of NULL pointer checks. */
+enum ubsan_null_ckind {
+ UBSAN_LOAD_OF,
+ UBSAN_STORE_OF,
+ UBSAN_REF_BINDING,
+ UBSAN_MEMBER_ACCESS,
+ UBSAN_MEMBER_CALL,
+ UBSAN_CTOR_CALL,
+ UBSAN_DOWNCAST_POINTER,
+ UBSAN_DOWNCAST_REFERENCE,
+ UBSAN_UPCAST,
+ UBSAN_CAST_TO_VBASE
+};
+
+/* This controls how ubsan prints types. Used in ubsan_type_descriptor. */
+enum ubsan_print_style {
+ UBSAN_PRINT_NORMAL,
+ UBSAN_PRINT_POINTER,
+ UBSAN_PRINT_ARRAY
+};
+
+/* This controls ubsan_encode_value behavior. */
+enum ubsan_encode_value_phase {
+ UBSAN_ENCODE_VALUE_GENERIC,
+ UBSAN_ENCODE_VALUE_GIMPLE,
+ UBSAN_ENCODE_VALUE_RTL
+};
+
+extern bool ubsan_expand_bounds_ifn (gimple_stmt_iterator *);
+extern bool ubsan_expand_null_ifn (gimple_stmt_iterator *);
+extern bool ubsan_expand_objsize_ifn (gimple_stmt_iterator *);
+extern bool ubsan_expand_ptr_ifn (gimple_stmt_iterator *);
+extern bool ubsan_expand_vptr_ifn (gimple_stmt_iterator *);
+extern bool ubsan_instrument_unreachable (gimple_stmt_iterator *);
+extern tree ubsan_create_data (const char *, int, const location_t *, ...);
+extern tree ubsan_type_descriptor (tree, ubsan_print_style
+ = UBSAN_PRINT_NORMAL);
+extern tree ubsan_encode_value (tree, ubsan_encode_value_phase
+ = UBSAN_ENCODE_VALUE_GENERIC);
+extern bool is_ubsan_builtin_p (tree);
+extern tree ubsan_build_overflow_builtin (tree_code, location_t, tree, tree,
+ tree, tree *);
+extern tree ubsan_instrument_float_cast (location_t, tree, tree);
+extern tree ubsan_get_source_location_type (void);
+extern tree sanitize_unreachable_fn (tree *data, location_t loc);
+
+#endif /* GCC_UBSAN_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/valtrack.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/valtrack.h
new file mode 100644
index 0000000..98ca8e6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/valtrack.h
@@ -0,0 +1,139 @@
+/* Infrastructure for tracking user variable locations and values
+ throughout compilation.
+ Copyright (C) 2010-2023 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <aoliva@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALTRACK_H
+#define GCC_VALTRACK_H
+
+/* Debug uses of dead regs. */
+
+/* Entry that maps a dead pseudo (REG) used in a debug insns that dies
+ at different blocks to the debug temp (DTEMP) it was replaced
+ with. */
+
+struct dead_debug_global_entry
+{
+ rtx reg;
+ rtx dtemp;
+};
+
+/* Descriptor for hash_table to hash by dead_debug_global_entry's REG
+ and map to DTEMP. */
+
+struct dead_debug_hash_descr : free_ptr_hash <dead_debug_global_entry>
+{
+ /* Hash on the pseudo number. */
+ static inline hashval_t hash (const dead_debug_global_entry *my);
+ /* Entries are identical if they refer to the same pseudo. */
+ static inline bool equal (const dead_debug_global_entry *my,
+ const dead_debug_global_entry *other);
+};
+
+/* Hash on the pseudo number. */
+inline hashval_t
+dead_debug_hash_descr::hash (const dead_debug_global_entry *my)
+{
+ return REGNO (my->reg);
+}
+
+/* Entries are identical if they refer to the same pseudo. */
+inline bool
+dead_debug_hash_descr::equal (const dead_debug_global_entry *my,
+ const dead_debug_global_entry *other)
+{
+ return my->reg == other->reg;
+}
+
+/* Maintain a global table of pseudos used in debug insns after their
+ deaths in other blocks, and debug temps their deathpoint values are
+ to be bound to. */
+
+struct dead_debug_global
+{
+ /* This hash table that maps pseudos to debug temps. */
+ hash_table<dead_debug_hash_descr> *htab;
+ /* For each entry in htab, the bit corresponding to its REGNO will
+ be set. */
+ bitmap used;
+};
+
+/* Node of a linked list of uses of dead REGs in debug insns. */
+
+struct dead_debug_use
+{
+ df_ref use;
+ struct dead_debug_use *next;
+};
+
+/* Linked list of the above, with a bitmap of the REGs in the
+ list. */
+
+struct dead_debug_local
+{
+ /* The first dead_debug_use entry in the list. */
+ struct dead_debug_use *head;
+ /* A pointer to the global tracking data structure. */
+ struct dead_debug_global *global;
+ /* A bitmap that has bits set for each REG used in the
+ dead_debug_use list, and for each entry in the global hash
+ table. */
+ bitmap used;
+ /* A bitmap that has bits set for each INSN that is to be
+ rescanned. */
+ bitmap to_rescan;
+};
+
+/* This type controls the behavior of dead_debug_insert_temp WRT
+ UREGNO and INSN. */
+
+enum debug_temp_where
+ {
+ /* Bind a newly-created debug temporary to a REG for UREGNO, and
+ insert the debug insn before INSN. REG is expected to die at
+ INSN. */
+ DEBUG_TEMP_BEFORE_WITH_REG = -1,
+ /* Bind a newly-created debug temporary to the value INSN stores
+ in REG, and insert the debug insn before INSN. */
+ DEBUG_TEMP_BEFORE_WITH_VALUE = 0,
+ /* Bind a newly-created debug temporary to a REG for UREGNO, and
+ insert the debug insn after INSN. REG is expected to be set at
+ INSN. */
+ DEBUG_TEMP_AFTER_WITH_REG = 1,
+ /* Like DEBUG_TEMP_AFTER_WITH_REG, but force addition of a debug
+ temporary even if there is just a single debug use. This is used
+ on regs that are becoming REG_DEAD on INSN and so uses of the
+ reg later on are invalid. */
+ DEBUG_TEMP_AFTER_WITH_REG_FORCE = 2
+ };
+
+extern void dead_debug_global_init (struct dead_debug_global *, bitmap);
+extern void dead_debug_global_finish (struct dead_debug_global *, bitmap);
+extern void dead_debug_local_init (struct dead_debug_local *, bitmap,
+ struct dead_debug_global *);
+extern void dead_debug_local_finish (struct dead_debug_local *, bitmap);
+extern void dead_debug_add (struct dead_debug_local *, df_ref, unsigned int);
+extern int dead_debug_insert_temp (struct dead_debug_local *,
+ unsigned int uregno, rtx_insn *insn,
+ enum debug_temp_where);
+
+extern void propagate_for_debug (rtx_insn *, rtx_insn *, rtx, rtx, basic_block);
+
+
+#endif /* GCC_VALTRACK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-pointer-equiv.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-pointer-equiv.h
new file mode 100644
index 0000000..37cd2b9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-pointer-equiv.h
@@ -0,0 +1,62 @@
+/* Header file for the context-aware pointer equivalence tracker.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALUE_POINTER_EQUIV_H
+#define GCC_VALUE_POINTER_EQUIV_H
+
+// Simple context-aware pointer equivalency analyzer that returns what
+// a pointer SSA name is equivalent to at a given point during a walk
+// of the IL.
+//
+// Note that global equivalency take priority over conditional
+// equivalency. That is, p = &q takes priority over a later p == &t.
+//
+// This class is meant to be called during a DOM walk.
+
+class pointer_equiv_analyzer
+{
+public:
+ pointer_equiv_analyzer (gimple_ranger *r);
+ ~pointer_equiv_analyzer ();
+ void enter (basic_block);
+ void leave (basic_block);
+ void visit_stmt (gimple *stmt);
+ tree get_equiv (tree ssa);
+
+private:
+ void visit_edge (edge e);
+ tree get_equiv_expr (tree_code code, tree expr);
+ void set_global_equiv (tree ssa, tree pointee);
+ void set_cond_equiv (tree ssa, tree pointee);
+
+ gimple_ranger *m_ranger;
+ // Global pointer equivalency indexed by SSA_NAME_VERSION.
+ auto_vec<tree> m_global_points;
+ // Conditional pointer equivalency.
+ class ssa_equiv_stack *m_cond_points;
+};
+
+inline bool
+supported_pointer_equiv_p (tree expr)
+{
+ return TREE_CODE (expr) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (expr));
+}
+
+#endif // GCC_VALUE_POINTER_EQUIV_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-prof.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-prof.h
new file mode 100644
index 0000000..0cfd5ad
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-prof.h
@@ -0,0 +1,120 @@
+/* Definitions for transformations based on profile information for values.
+ Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALUE_PROF_H
+#define GCC_VALUE_PROF_H
+
+/* Supported histogram types. */
+enum hist_type
+{
+ HIST_TYPE_INTERVAL, /* Measures histogram of values inside a specified
+ interval. */
+ HIST_TYPE_POW2, /* Histogram of power of 2 values. */
+ HIST_TYPE_TOPN_VALUES, /* Tries to identify the N most common values. */
+ HIST_TYPE_INDIR_CALL, /* Tries to identify the function that is (almost)
+ called in indirect call */
+ HIST_TYPE_AVERAGE, /* Compute average value (sum of all values). */
+ HIST_TYPE_IOR, /* Used to compute expected alignment. */
+ HIST_TYPE_TIME_PROFILE, /* Used for time profile */
+ HIST_TYPE_MAX
+};
+
+#define COUNTER_FOR_HIST_TYPE(TYPE) ((int) (TYPE) + GCOV_FIRST_VALUE_COUNTER)
+#define HIST_TYPE_FOR_COUNTER(COUNTER) \
+ ((enum hist_type) ((COUNTER) - GCOV_FIRST_VALUE_COUNTER))
+
+
+/* The value to measure. */
+struct histogram_value_t
+{
+ struct
+ {
+ tree value; /* The value to profile. */
+ gimple *stmt; /* Insn containing the value. */
+ gcov_type *counters; /* Pointer to first counter. */
+ struct histogram_value_t *next; /* Linked list pointer. */
+ } hvalue;
+ enum hist_type type; /* Type of information to measure. */
+ unsigned n_counters; /* Number of required counters. */
+ struct function *fun;
+ union
+ {
+ struct
+ {
+ int int_start; /* First value in interval. */
+ unsigned int steps; /* Number of values in it. */
+ } intvl; /* Interval histogram data. */
+ } hdata; /* Profiled information specific data. */
+};
+
+typedef struct histogram_value_t *histogram_value;
+typedef const struct histogram_value_t *const_histogram_value;
+
+
+typedef vec<histogram_value> histogram_values;
+
+extern void gimple_find_values_to_profile (histogram_values *);
+extern bool gimple_value_profile_transformations (void);
+
+histogram_value gimple_alloc_histogram_value (struct function *, enum hist_type,
+ gimple *stmt = NULL,
+ tree value = NULL_TREE);
+histogram_value gimple_histogram_value (struct function *, gimple *);
+histogram_value gimple_histogram_value_of_type (struct function *, gimple *,
+ enum hist_type);
+void gimple_add_histogram_value (struct function *, gimple *, histogram_value);
+void dump_histograms_for_stmt (struct function *, FILE *, gimple *);
+void gimple_remove_histogram_value (struct function *, gimple *, histogram_value);
+void gimple_remove_stmt_histograms (struct function *, gimple *);
+void gimple_duplicate_stmt_histograms (struct function *, gimple *,
+ struct function *, gimple *);
+void gimple_move_stmt_histograms (struct function *, gimple *, gimple *);
+void verify_histograms (void);
+void free_histograms (function *);
+void stringop_block_profile (gimple *, unsigned int *, HOST_WIDE_INT *);
+gcall *gimple_ic (gcall *, struct cgraph_node *, profile_probability);
+bool get_nth_most_common_value (gimple *stmt, const char *counter_type,
+ histogram_value hist, gcov_type *value,
+ gcov_type *count, gcov_type *all,
+ unsigned n = 0);
+
+/* In tree-profile.cc. */
+extern void gimple_init_gcov_profiler (void);
+extern void gimple_gen_edge_profiler (int, edge);
+extern void gimple_gen_interval_profiler (histogram_value, unsigned);
+extern void gimple_gen_pow2_profiler (histogram_value, unsigned);
+extern void gimple_gen_topn_values_profiler (histogram_value, unsigned);
+extern void gimple_gen_ic_profiler (histogram_value, unsigned);
+extern void gimple_gen_ic_func_profiler (void);
+extern void gimple_gen_time_profiler (unsigned);
+extern void gimple_gen_average_profiler (histogram_value, unsigned);
+extern void gimple_gen_ior_profiler (histogram_value, unsigned);
+extern void stream_out_histogram_value (struct output_block *, histogram_value);
+extern void stream_in_histogram_value (class lto_input_block *, gimple *);
+extern struct cgraph_node* find_func_by_profile_id (int func_id);
+
+
+/* In profile.cc. */
+extern void init_branch_prob (void);
+extern void branch_prob (bool);
+extern void read_thunk_profile (struct cgraph_node *);
+extern void end_branch_prob (void);
+
+#endif /* GCC_VALUE_PROF_H */
+
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-query.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-query.h
new file mode 100644
index 0000000..20a3711
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-query.h
@@ -0,0 +1,150 @@
+/* Support routines for value queries.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com> and
+ Andrew Macleod <amacleod@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_QUERY_H
+#define GCC_QUERY_H
+
+#include "value-relation.h"
+
+// The value_query class is used by optimization passes that require
+// valueizing SSA names in terms of a tree value, but have no need
+// for ranges.
+//
+// value_of_expr must be provided. The default for value_on_edge and
+// value_of_stmt is to call value_of_expr.
+//
+// This implies the valuation is global in nature. If a pass can make
+// use of more specific information, it can override the other queries.
+//
+// Proper usage of the correct query in passes will enable other
+// valuation mechanisms to produce more precise results.
+
+class value_query
+{
+public:
+ value_query () { }
+ // Return the singleton expression for EXPR at a gimple statement,
+ // or NULL if none found.
+ virtual tree value_of_expr (tree expr, gimple * = NULL) = 0;
+ // Return the singleton expression for EXPR at an edge, or NULL if
+ // none found.
+ virtual tree value_on_edge (edge, tree expr);
+ // Return the singleton expression for the LHS of a gimple
+ // statement, assuming an (optional) initial value of NAME. Returns
+ // NULL if none found.
+ //
+ // Note that this method calculates the range the LHS would have
+ // *after* the statement has executed.
+ virtual tree value_of_stmt (gimple *, tree name = NULL);
+
+private:
+ DISABLE_COPY_AND_ASSIGN (value_query);
+};
+
+// The range_query class is used by optimization passes which are
+// range aware.
+//
+// range_of_expr must be provided. The default for range_on_edge and
+// range_of_stmt is to call range_of_expr. If a pass can make use of
+// more specific information, then it can override the other queries.
+//
+// The default for the value_* routines is to call the equivalent
+// range_* routines, check if the range is a singleton, and return it
+// if so.
+//
+// The get_value_range method is currently provided for compatibility
+// with vr-values. It will be deprecated when possible.
+
+class range_query : public value_query
+{
+public:
+ range_query ();
+ virtual ~range_query ();
+
+ virtual tree value_of_expr (tree expr, gimple * = NULL) override;
+ virtual tree value_on_edge (edge, tree expr) override;
+ virtual tree value_of_stmt (gimple *, tree name = NULL) override;
+
+ // These are the range equivalents of the value_* methods. Instead
+ // of returning a singleton, they calculate a range and return it in
+ // R. TRUE is returned on success or FALSE if no range was found.
+ //
+ // Note that range_of_expr must always return TRUE unless ranges are
+ // unsupported for EXPR's type (supports_type_p is false).
+ virtual bool range_of_expr (vrange &r, tree expr, gimple * = NULL) = 0;
+ virtual bool range_on_edge (vrange &r, edge, tree expr);
+ virtual bool range_of_stmt (vrange &r, gimple *, tree name = NULL);
+
+ // When the IL in a stmt is changed, call this for better results.
+ virtual void update_stmt (gimple *) { }
+
+ // Query if there is any relation between SSA1 and SSA2.
+ relation_kind query_relation (gimple *s, tree ssa1, tree ssa2,
+ bool get_range = true);
+ relation_kind query_relation (edge e, tree ssa1, tree ssa2,
+ bool get_range = true);
+ // If present, Access relation oracle for more advanced uses.
+ inline relation_oracle *oracle () const { return m_oracle; }
+
+ // DEPRECATED: This method is used from vr-values. The plan is to
+ // rewrite all uses of it to the above API.
+ virtual const value_range *get_value_range (const_tree, gimple * = NULL);
+ virtual void dump (FILE *);
+
+protected:
+ bool get_tree_range (vrange &v, tree expr, gimple *stmt);
+ bool get_arith_expr_range (vrange &r, tree expr, gimple *stmt);
+ relation_oracle *m_oracle;
+
+private:
+ class equiv_allocator *equiv_alloc;
+};
+
+// Global ranges for SSA names using SSA_NAME_RANGE_INFO.
+
+class global_range_query : public range_query
+{
+public:
+ bool range_of_expr (vrange &r, tree expr, gimple * = NULL) override;
+};
+
+extern global_range_query global_ranges;
+
+inline range_query *
+get_global_range_query ()
+{
+ return &global_ranges;
+}
+
+/* Returns the currently active range access class. When there is no active
+ range class, global ranges are used. Never returns null. */
+
+ATTRIBUTE_RETURNS_NONNULL inline range_query *
+get_range_query (const struct function *fun)
+{
+ return (fun && fun->x_range_query) ? fun->x_range_query : &global_ranges;
+}
+
+// Query the global range of NAME in function F. Default to cfun.
+extern void gimple_range_global (vrange &v, tree name,
+ struct function *f = cfun);
+
+#endif // GCC_QUERY_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-pretty-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-pretty-print.h
new file mode 100644
index 0000000..30b9c74
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-pretty-print.h
@@ -0,0 +1,40 @@
+/* Pretty print support for value ranges.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALUE_RANGE_PRETTY_H
+#define GCC_VALUE_RANGE_PRETTY_H
+
+class vrange_printer : public vrange_visitor
+{
+public:
+ vrange_printer (pretty_printer *pp_) : pp (pp_) { }
+ void visit (const unsupported_range &) const override;
+ void visit (const irange &) const override;
+ void visit (const frange &) const override;
+private:
+ void print_irange_bound (const wide_int &w, tree type) const;
+ void print_irange_bitmasks (const irange &) const;
+ void print_frange_nan (const frange &) const;
+ void print_real_value (tree type, const REAL_VALUE_TYPE &r) const;
+
+ pretty_printer *pp;
+};
+
+#endif // GCC_VALUE_RANGE_PRETTY_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-storage.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-storage.h
new file mode 100644
index 0000000..1ed6f1c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range-storage.h
@@ -0,0 +1,233 @@
+/* Support routines for vrange storage.
+ Copyright (C) 2022-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALUE_RANGE_STORAGE_H
+#define GCC_VALUE_RANGE_STORAGE_H
+
+// This class is used to allocate the minimum amount of storage needed
+// for a given range. Storage is automatically freed at destruction
+// of the class.
+
+class vrange_allocator
+{
+public:
+ vrange_allocator () { }
+ virtual ~vrange_allocator () { }
+ // Allocate a range of TYPE.
+ vrange *alloc_vrange (tree type);
+ // Allocate a memory block of BYTES.
+ virtual void *alloc (unsigned bytes) = 0;
+ virtual void free (void *p) = 0;
+ // Return a clone of SRC.
+ template <typename T> T *clone (const T &src);
+private:
+ irange *alloc_irange (unsigned pairs);
+ frange *alloc_frange ();
+ void operator= (const vrange_allocator &) = delete;
+};
+
+// This class is used to allocate chunks of memory that can store
+// ranges as memory efficiently as possible. It is meant to be used
+// when long term storage of a range is needed. The class can be used
+// with any vrange_allocator (i.e. alloca or GC).
+
+class vrange_storage
+{
+public:
+ vrange_storage (vrange_allocator *alloc) : m_alloc (alloc) { }
+ void *alloc_slot (const vrange &r);
+ void free (void *slot) { m_alloc->free (slot); }
+ void get_vrange (const void *slot, vrange &r, tree type);
+ void set_vrange (void *slot, const vrange &r);
+ static bool fits_p (const void *slot, const vrange &r);
+private:
+ DISABLE_COPY_AND_ASSIGN (vrange_storage);
+ vrange_allocator *m_alloc;
+};
+
+// A chunk of memory pointing to an irange storage.
+
+class GTY ((variable_size)) irange_storage_slot
+{
+public:
+ static irange_storage_slot *alloc_slot (vrange_allocator &, const irange &r);
+ void set_irange (const irange &r);
+ void get_irange (irange &r, tree type) const;
+ wide_int get_nonzero_bits () const { return m_ints[0]; }
+ bool fits_p (const irange &r) const;
+ static size_t size (const irange &r);
+ void dump () const;
+private:
+ DISABLE_COPY_AND_ASSIGN (irange_storage_slot);
+ friend void gt_ggc_mx_irange_storage_slot (void *);
+ friend void gt_pch_p_19irange_storage_slot (void *, void *,
+ gt_pointer_operator, void *);
+ friend void gt_pch_nx_irange_storage_slot (void *);
+
+ // This is the maximum number of wide_int's allowed in the trailing
+ // ints structure, without going over 16 bytes (128 bits) in the
+ // control word that precedes the HOST_WIDE_INTs in
+ // trailing_wide_ints::m_val[].
+ static const unsigned MAX_INTS = 12;
+
+ // Maximum number of range pairs we can handle, considering the
+ // nonzero bits take one wide_int.
+ static const unsigned MAX_PAIRS = (MAX_INTS - 1) / 2;
+
+ // Constructor is private to disallow stack initialization. Use
+ // alloc_slot() to create objects.
+ irange_storage_slot (const irange &r);
+
+ static unsigned num_wide_ints_needed (const irange &r);
+
+ trailing_wide_ints<MAX_INTS> m_ints;
+};
+
+// A chunk of memory to store an frange to long term memory.
+
+class GTY (()) frange_storage_slot
+{
+ public:
+ static frange_storage_slot *alloc_slot (vrange_allocator &, const frange &r);
+ void set_frange (const frange &r);
+ void get_frange (frange &r, tree type) const;
+ bool fits_p (const frange &) const;
+ private:
+ frange_storage_slot (const frange &r) { set_frange (r); }
+ DISABLE_COPY_AND_ASSIGN (frange_storage_slot);
+
+ enum value_range_kind m_kind;
+ REAL_VALUE_TYPE m_min;
+ REAL_VALUE_TYPE m_max;
+ bool m_pos_nan;
+ bool m_neg_nan;
+};
+
+class obstack_vrange_allocator final: public vrange_allocator
+{
+public:
+ obstack_vrange_allocator ()
+ {
+ obstack_init (&m_obstack);
+ }
+ virtual ~obstack_vrange_allocator () final override
+ {
+ obstack_free (&m_obstack, NULL);
+ }
+ virtual void *alloc (unsigned bytes) final override
+ {
+ return obstack_alloc (&m_obstack, bytes);
+ }
+ virtual void free (void *) final override { }
+private:
+ obstack m_obstack;
+};
+
+class ggc_vrange_allocator final: public vrange_allocator
+{
+public:
+ ggc_vrange_allocator () { }
+ virtual ~ggc_vrange_allocator () final override { }
+ virtual void *alloc (unsigned bytes) final override
+ {
+ return ggc_internal_alloc (bytes);
+ }
+ virtual void free (void *p) final override
+ {
+ return ggc_free (p);
+ }
+};
+
+// Return a new range to hold ranges of TYPE. The newly allocated
+// range is initialized to VR_UNDEFINED.
+
+inline vrange *
+vrange_allocator::alloc_vrange (tree type)
+{
+ if (irange::supports_p (type))
+ return alloc_irange (2);
+ if (frange::supports_p (type))
+ return alloc_frange ();
+ return NULL;
+ gcc_unreachable ();
+}
+
+// Return a new range with NUM_PAIRS.
+
+inline irange *
+vrange_allocator::alloc_irange (unsigned num_pairs)
+{
+ // Never allocate 0 pairs.
+ // Don't allocate 1 either, or we get legacy value_range's.
+ if (num_pairs < 2)
+ num_pairs = 2;
+
+ size_t nbytes = sizeof (tree) * 2 * num_pairs;
+
+ // Allocate the irange and required memory for the vector.
+ void *r = alloc (sizeof (irange));
+ tree *mem = static_cast <tree *> (alloc (nbytes));
+ return new (r) irange (mem, num_pairs, /*resizable=*/false);
+}
+
+inline frange *
+vrange_allocator::alloc_frange ()
+{
+ void *r = alloc (sizeof (frange));
+ return new (r) frange ();
+}
+
+// Return a clone of an irange.
+
+template <>
+inline irange *
+vrange_allocator::clone <irange> (const irange &src)
+{
+ irange *r = alloc_irange (src.num_pairs ());
+ *r = src;
+ return r;
+}
+
+// Return a clone of an frange.
+
+template <>
+inline frange *
+vrange_allocator::clone <frange> (const frange &src)
+{
+ frange *r = alloc_frange ();
+ *r = src;
+ return r;
+}
+
+// Return a clone of a vrange.
+
+template <>
+inline vrange *
+vrange_allocator::clone <vrange> (const vrange &src)
+{
+ if (is_a <irange> (src))
+ return clone <irange> (as_a <irange> (src));
+ if (is_a <frange> (src))
+ return clone <frange> (as_a <frange> (src));
+ return NULL;
+ gcc_unreachable ();
+}
+
+#endif // GCC_VALUE_RANGE_STORAGE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range.h
new file mode 100644
index 0000000..96e59ec
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-range.h
@@ -0,0 +1,1487 @@
+/* Support routines for value ranges.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez <aldyh@redhat.com> and
+ Andrew Macleod <amacleod@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALUE_RANGE_H
+#define GCC_VALUE_RANGE_H
+
+class irange;
+
+// Types of value ranges.
+enum value_range_kind
+{
+ /* Empty range. */
+ VR_UNDEFINED,
+ /* Range spans the entire domain. */
+ VR_VARYING,
+ /* Range is [MIN, MAX]. */
+ VR_RANGE,
+ /* Range is ~[MIN, MAX]. */
+ VR_ANTI_RANGE,
+ /* Range is a NAN. */
+ VR_NAN,
+ /* Range is a nice guy. */
+ VR_LAST
+};
+
+// Discriminator between different vrange types.
+
+enum value_range_discriminator
+{
+ // Range holds an integer or pointer.
+ VR_IRANGE,
+ // Floating point range.
+ VR_FRANGE,
+ // Range holds an unsupported type.
+ VR_UNKNOWN
+};
+
+// Abstract class for ranges of any of the supported types.
+//
+// To query what types ranger and the entire ecosystem can support,
+// use Value_Range::supports_type_p(tree type). This is a static
+// method available independently of any vrange object.
+//
+// To query what a given vrange variant can support, use:
+// irange::supports_p ()
+// frange::supports_p ()
+// etc
+//
+// To query what a range object can support, use:
+// void foo (vrange &v, irange &i, frange &f)
+// {
+// if (v.supports_type_p (type)) ...
+// if (i.supports_type_p (type)) ...
+// if (f.supports_type_p (type)) ...
+// }
+
+class vrange
+{
+ template <typename T> friend bool is_a (vrange &);
+ friend class Value_Range;
+public:
+ virtual void accept (const class vrange_visitor &v) const = 0;
+ virtual void set (tree, tree, value_range_kind = VR_RANGE);
+ virtual tree type () const;
+ virtual bool supports_type_p (const_tree type) const;
+ virtual void set_varying (tree type);
+ virtual void set_undefined ();
+ virtual bool union_ (const vrange &);
+ virtual bool intersect (const vrange &);
+ virtual bool singleton_p (tree *result = NULL) const;
+ virtual bool contains_p (tree cst) const;
+ virtual bool zero_p () const;
+ virtual bool nonzero_p () const;
+ virtual void set_nonzero (tree type);
+ virtual void set_zero (tree type);
+ virtual void set_nonnegative (tree type);
+ virtual bool fits_p (const vrange &r) const;
+
+ bool varying_p () const;
+ bool undefined_p () const;
+ vrange& operator= (const vrange &);
+ bool operator== (const vrange &) const;
+ bool operator!= (const vrange &r) const { return !(*this == r); }
+ void dump (FILE *) const;
+
+ enum value_range_kind kind () const; // DEPRECATED
+
+protected:
+ ENUM_BITFIELD(value_range_kind) m_kind : 8;
+ ENUM_BITFIELD(value_range_discriminator) m_discriminator : 4;
+};
+
+// An integer range without any storage.
+
+class GTY((user)) irange : public vrange
+{
+ friend class vrange_allocator;
+ friend class irange_storage_slot; // For legacy_mode_p checks.
+public:
+ // In-place setters.
+ virtual void set (tree, tree, value_range_kind = VR_RANGE) override;
+ void set (tree type, const wide_int_ref &, const wide_int_ref &,
+ value_range_kind = VR_RANGE);
+ virtual void set_nonzero (tree type) override;
+ virtual void set_zero (tree type) override;
+ virtual void set_nonnegative (tree type) override;
+ virtual void set_varying (tree type) override;
+ virtual void set_undefined () override;
+
+ // Range types.
+ static bool supports_p (const_tree type);
+ virtual bool supports_type_p (const_tree type) const override;
+ virtual tree type () const override;
+
+ // Iteration over sub-ranges.
+ unsigned num_pairs () const;
+ wide_int lower_bound (unsigned = 0) const;
+ wide_int upper_bound (unsigned) const;
+ wide_int upper_bound () const;
+
+ // Predicates.
+ virtual bool zero_p () const override;
+ virtual bool nonzero_p () const override;
+ virtual bool singleton_p (tree *result = NULL) const override;
+ virtual bool contains_p (tree cst) const override;
+
+ // In-place operators.
+ virtual bool union_ (const vrange &) override;
+ virtual bool intersect (const vrange &) override;
+ void invert ();
+
+ // Operator overloads.
+ irange& operator= (const irange &);
+ bool operator== (const irange &) const;
+ bool operator!= (const irange &r) const { return !(*this == r); }
+
+ // Misc methods.
+ virtual bool fits_p (const vrange &r) const override;
+ virtual void accept (const vrange_visitor &v) const override;
+
+ // Nonzero masks.
+ wide_int get_nonzero_bits () const;
+ void set_nonzero_bits (const wide_int_ref &bits);
+
+ // Deprecated legacy public methods.
+ tree min () const; // DEPRECATED
+ tree max () const; // DEPRECATED
+ bool symbolic_p () const; // DEPRECATED
+ bool constant_p () const; // DEPRECATED
+ void normalize_symbolics (); // DEPRECATED
+ void normalize_addresses (); // DEPRECATED
+ bool may_contain_p (tree) const; // DEPRECATED
+ bool legacy_verbose_union_ (const class irange *); // DEPRECATED
+ bool legacy_verbose_intersect (const irange *); // DEPRECATED
+
+protected:
+ void maybe_resize (int needed);
+ irange (tree *, unsigned nranges, bool resizable);
+ // potential promotion to public?
+ tree tree_lower_bound (unsigned = 0) const;
+ tree tree_upper_bound (unsigned) const;
+ tree tree_upper_bound () const;
+
+ // In-place operators.
+ bool irange_union (const irange &);
+ bool irange_intersect (const irange &);
+ void irange_set (tree, tree);
+ void irange_set_anti_range (tree, tree);
+ bool irange_contains_p (const irange &) const;
+ bool irange_single_pair_union (const irange &r);
+
+ void normalize_kind ();
+
+ bool legacy_mode_p () const;
+ bool legacy_equal_p (const irange &) const;
+ void legacy_union (irange *, const irange *);
+ void legacy_intersect (irange *, const irange *);
+ void verify_range ();
+ wide_int legacy_lower_bound (unsigned = 0) const;
+ wide_int legacy_upper_bound (unsigned) const;
+ int value_inside_range (tree) const;
+ bool maybe_anti_range () const;
+ void copy_to_legacy (const irange &);
+ void copy_legacy_to_multi_range (const irange &);
+
+ // Hard limit on max ranges allowed.
+ static const int HARD_MAX_RANGES = 255;
+private:
+ friend void gt_ggc_mx (irange *);
+ friend void gt_pch_nx (irange *);
+ friend void gt_pch_nx (irange *, gt_pointer_operator, void *);
+
+ void irange_set_1bit_anti_range (tree, tree);
+ bool varying_compatible_p () const;
+ bool intersect_nonzero_bits (const irange &r);
+ bool union_nonzero_bits (const irange &r);
+ wide_int get_nonzero_bits_from_range () const;
+ bool set_range_from_nonzero_bits ();
+
+ bool intersect (const wide_int& lb, const wide_int& ub);
+ unsigned char m_num_ranges;
+ bool m_resizable;
+ unsigned char m_max_ranges;
+ tree m_nonzero_mask;
+protected:
+ tree *m_base;
+};
+
+// Here we describe an irange with N pairs of ranges. The storage for
+// the pairs is embedded in the class as an array.
+//
+// If RESIZABLE is true, the storage will be resized on the heap when
+// the number of ranges needed goes past N up to a max of
+// HARD_MAX_RANGES. This new storage is freed upon destruction.
+
+template<unsigned N, bool RESIZABLE = false>
+class GTY((user)) int_range : public irange
+{
+public:
+ int_range ();
+ int_range (tree, tree, value_range_kind = VR_RANGE);
+ int_range (tree type, const wide_int &, const wide_int &,
+ value_range_kind = VR_RANGE);
+ int_range (tree type);
+ int_range (const int_range &);
+ int_range (const irange &);
+ virtual ~int_range ();
+ int_range& operator= (const int_range &);
+private:
+ template <unsigned X> friend void gt_ggc_mx (int_range<X> *);
+ template <unsigned X> friend void gt_pch_nx (int_range<X> *);
+ template <unsigned X> friend void gt_pch_nx (int_range<X> *,
+ gt_pointer_operator, void *);
+
+ // ?? These stubs are for ipa-prop.cc which use a value_range in a
+ // hash_traits. hash-traits.h defines an extern of gt_ggc_mx (T &)
+ // instead of picking up the gt_ggc_mx (T *) version.
+ friend void gt_ggc_mx (int_range<1> *&);
+ friend void gt_pch_nx (int_range<1> *&);
+
+ tree m_ranges[N*2];
+};
+
+// Unsupported temporaries may be created by ranger before it's known
+// they're unsupported, or by vr_values::get_value_range.
+
+class unsupported_range : public vrange
+{
+public:
+ unsupported_range ()
+ {
+ m_discriminator = VR_UNKNOWN;
+ set_undefined ();
+ }
+ virtual void set_undefined () final override
+ {
+ m_kind = VR_UNDEFINED;
+ }
+ virtual void accept (const vrange_visitor &v) const override;
+};
+
+// The NAN state as an opaque object. The default constructor is +-NAN.
+
+class nan_state
+{
+public:
+ nan_state ();
+ nan_state (bool pos_nan, bool neg_nan);
+ bool neg_p () const;
+ bool pos_p () const;
+private:
+ bool m_pos_nan;
+ bool m_neg_nan;
+};
+
+// Default constructor initializing the object to +-NAN.
+
+inline
+nan_state::nan_state ()
+{
+ m_pos_nan = true;
+ m_neg_nan = true;
+}
+
+// Constructor initializing the object to +NAN if POS_NAN is set, -NAN
+// if NEG_NAN is set, or +-NAN if both are set. Otherwise POS_NAN and
+// NEG_NAN are clear, and the object cannot be a NAN.
+
+inline
+nan_state::nan_state (bool pos_nan, bool neg_nan)
+{
+ m_pos_nan = pos_nan;
+ m_neg_nan = neg_nan;
+}
+
+// Return if +NAN is possible.
+
+inline bool
+nan_state::pos_p () const
+{
+ return m_pos_nan;
+}
+
+// Return if -NAN is possible.
+
+inline bool
+nan_state::neg_p () const
+{
+ return m_neg_nan;
+}
+
+// A floating point range.
+//
+// The representation is a type with a couple of endpoints, unioned
+// with the set of { -NAN, +Nan }.
+
+class frange : public vrange
+{
+ friend class frange_storage_slot;
+ friend class vrange_printer;
+public:
+ frange ();
+ frange (const frange &);
+ frange (tree, tree, value_range_kind = VR_RANGE);
+ frange (tree type);
+ frange (tree type, const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
+ value_range_kind = VR_RANGE);
+ static bool supports_p (const_tree type)
+ {
+ // ?? Decimal floats can have multiple representations for the
+ // same number. Supporting them may be as simple as just
+ // disabling them in singleton_p. No clue.
+ return SCALAR_FLOAT_TYPE_P (type) && !DECIMAL_FLOAT_TYPE_P (type);
+ }
+ virtual tree type () const override;
+ virtual void set (tree, tree, value_range_kind = VR_RANGE) override;
+ void set (tree type, const REAL_VALUE_TYPE &, const REAL_VALUE_TYPE &,
+ value_range_kind = VR_RANGE);
+ void set (tree type, const REAL_VALUE_TYPE &, const REAL_VALUE_TYPE &,
+ const nan_state &, value_range_kind = VR_RANGE);
+ void set_nan (tree type);
+ void set_nan (tree type, bool sign);
+ virtual void set_varying (tree type) override;
+ virtual void set_undefined () override;
+ virtual bool union_ (const vrange &) override;
+ virtual bool intersect (const vrange &) override;
+ virtual bool contains_p (tree) const override;
+ virtual bool singleton_p (tree *result = NULL) const override;
+ virtual bool supports_type_p (const_tree type) const override;
+ virtual void accept (const vrange_visitor &v) const override;
+ virtual bool zero_p () const override;
+ virtual bool nonzero_p () const override;
+ virtual void set_nonzero (tree type) override;
+ virtual void set_zero (tree type) override;
+ virtual void set_nonnegative (tree type) override;
+ frange& operator= (const frange &);
+ bool operator== (const frange &) const;
+ bool operator!= (const frange &r) const { return !(*this == r); }
+ const REAL_VALUE_TYPE &lower_bound () const;
+ const REAL_VALUE_TYPE &upper_bound () const;
+ nan_state get_nan_state () const;
+ void update_nan ();
+ void update_nan (bool sign);
+ void update_nan (tree) = delete; // Disallow silent conversion to bool.
+ void update_nan (const nan_state &);
+ void clear_nan ();
+ void flush_denormals_to_zero ();
+
+ // fpclassify like API
+ bool known_isfinite () const;
+ bool known_isnan () const;
+ bool known_isinf () const;
+ bool maybe_isnan () const;
+ bool maybe_isnan (bool sign) const;
+ bool maybe_isinf () const;
+ bool signbit_p (bool &signbit) const;
+ bool nan_signbit_p (bool &signbit) const;
+private:
+ void verify_range ();
+ bool normalize_kind ();
+ bool union_nans (const frange &);
+ bool intersect_nans (const frange &);
+ bool combine_zeros (const frange &, bool union_p);
+
+ tree m_type;
+ REAL_VALUE_TYPE m_min;
+ REAL_VALUE_TYPE m_max;
+ bool m_pos_nan;
+ bool m_neg_nan;
+};
+
+inline const REAL_VALUE_TYPE &
+frange::lower_bound () const
+{
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
+ return m_min;
+}
+
+inline const REAL_VALUE_TYPE &
+frange::upper_bound () const
+{
+ gcc_checking_assert (!undefined_p () && !known_isnan ());
+ return m_max;
+}
+
+// Return the NAN state.
+
+inline nan_state
+frange::get_nan_state () const
+{
+ return nan_state (m_pos_nan, m_neg_nan);
+}
+
+// is_a<> and as_a<> implementation for vrange.
+
+// Anything we haven't specialized is a hard fail.
+template <typename T>
+inline bool
+is_a (vrange &)
+{
+ gcc_unreachable ();
+ return false;
+}
+
+template <typename T>
+inline bool
+is_a (const vrange &v)
+{
+ // Reuse is_a <vrange> to implement the const version.
+ const T &derived = static_cast<const T &> (v);
+ return is_a <T> (const_cast<T &> (derived));
+}
+
+template <typename T>
+inline T &
+as_a (vrange &v)
+{
+ gcc_checking_assert (is_a <T> (v));
+ return static_cast <T &> (v);
+}
+
+template <typename T>
+inline const T &
+as_a (const vrange &v)
+{
+ gcc_checking_assert (is_a <T> (v));
+ return static_cast <const T &> (v);
+}
+
+// Specializations for the different range types.
+
+template <>
+inline bool
+is_a <irange> (vrange &v)
+{
+ return v.m_discriminator == VR_IRANGE;
+}
+
+template <>
+inline bool
+is_a <frange> (vrange &v)
+{
+ return v.m_discriminator == VR_FRANGE;
+}
+
+// For resizable ranges, resize the range up to HARD_MAX_RANGES if the
+// NEEDED pairs is greater than the current capacity of the range.
+
+inline void
+irange::maybe_resize (int needed)
+{
+ if (!m_resizable || m_max_ranges == HARD_MAX_RANGES)
+ return;
+
+ if (needed > m_max_ranges)
+ {
+ m_max_ranges = HARD_MAX_RANGES;
+ tree *newmem = new tree[m_max_ranges * 2];
+ memcpy (newmem, m_base, sizeof (tree) * num_pairs () * 2);
+ m_base = newmem;
+ }
+}
+
+template<unsigned N, bool RESIZABLE>
+inline
+int_range<N, RESIZABLE>::~int_range ()
+{
+ if (RESIZABLE && m_base != m_ranges)
+ delete m_base;
+}
+
+// This is an "infinite" precision irange for use in temporary
+// calculations. It starts with a sensible default covering 99% of
+// uses, and goes up to HARD_MAX_RANGES when needed. Any allocated
+// storage is freed upon destruction.
+typedef int_range<3, /*RESIZABLE=*/true> int_range_max;
+
+class vrange_visitor
+{
+public:
+ virtual void visit (const irange &) const { }
+ virtual void visit (const frange &) const { }
+ virtual void visit (const unsupported_range &) const { }
+};
+
+// This is a special int_range<1> with only one pair, plus
+// VR_ANTI_RANGE magic to describe slightly more than can be described
+// in one pair. It is described in the code as a "legacy range" (as
+// opposed to multi-ranges which have multiple sub-ranges). It is
+// provided for backward compatibility with code that has not been
+// converted to multi-range irange's.
+//
+// There are copy operators to seamlessly copy to/fro multi-ranges.
+typedef int_range<1> value_range;
+
+// This is an "infinite" precision range object for use in temporary
+// calculations for any of the handled types. The object can be
+// transparently used as a vrange.
+
+class Value_Range
+{
+public:
+ Value_Range ();
+ Value_Range (const vrange &r);
+ Value_Range (tree type);
+ Value_Range (const Value_Range &);
+ void set_type (tree type);
+ vrange& operator= (const vrange &);
+ bool operator== (const Value_Range &r) const;
+ bool operator!= (const Value_Range &r) const;
+ operator vrange &();
+ operator const vrange &() const;
+ void dump (FILE *) const;
+ static bool supports_type_p (const_tree type);
+
+ // Convenience methods for vrange compatibility.
+ void set (tree min, tree max, value_range_kind kind = VR_RANGE)
+ { return m_vrange->set (min, max, kind); }
+ tree type () { return m_vrange->type (); }
+ enum value_range_kind kind () { return m_vrange->kind (); }
+ bool varying_p () const { return m_vrange->varying_p (); }
+ bool undefined_p () const { return m_vrange->undefined_p (); }
+ void set_varying (tree type) { m_vrange->set_varying (type); }
+ void set_undefined () { m_vrange->set_undefined (); }
+ bool union_ (const vrange &r) { return m_vrange->union_ (r); }
+ bool intersect (const vrange &r) { return m_vrange->intersect (r); }
+ bool singleton_p (tree *result = NULL) const
+ { return m_vrange->singleton_p (result); }
+ bool zero_p () const { return m_vrange->zero_p (); }
+ wide_int lower_bound () const; // For irange/prange comparability.
+ wide_int upper_bound () const; // For irange/prange comparability.
+ void accept (const vrange_visitor &v) const { m_vrange->accept (v); }
+private:
+ void init (tree type);
+ unsupported_range m_unsupported;
+ vrange *m_vrange;
+ int_range_max m_irange;
+ frange m_frange;
+};
+
+inline
+Value_Range::Value_Range ()
+{
+ m_vrange = &m_unsupported;
+}
+
+// Copy constructor from a vrange.
+
+inline
+Value_Range::Value_Range (const vrange &r)
+{
+ *this = r;
+}
+
+// Copy constructor from a TYPE. The range of the temporary is set to
+// UNDEFINED.
+
+inline
+Value_Range::Value_Range (tree type)
+{
+ init (type);
+}
+
+inline
+Value_Range::Value_Range (const Value_Range &r)
+{
+ m_vrange = r.m_vrange;
+}
+
+// Initialize object so it is possible to store temporaries of TYPE
+// into it.
+
+inline void
+Value_Range::init (tree type)
+{
+ gcc_checking_assert (TYPE_P (type));
+
+ if (irange::supports_p (type))
+ m_vrange = &m_irange;
+ else if (frange::supports_p (type))
+ m_vrange = &m_frange;
+ else
+ m_vrange = &m_unsupported;
+}
+
+// Set the temporary to allow storing temporaries of TYPE. The range
+// of the temporary is set to UNDEFINED.
+
+inline void
+Value_Range::set_type (tree type)
+{
+ init (type);
+ m_vrange->set_undefined ();
+}
+
+// Assignment operator for temporaries. Copying incompatible types is
+// allowed.
+
+inline vrange &
+Value_Range::operator= (const vrange &r)
+{
+ if (is_a <irange> (r))
+ {
+ m_irange = as_a <irange> (r);
+ m_vrange = &m_irange;
+ }
+ else if (is_a <frange> (r))
+ {
+ m_frange = as_a <frange> (r);
+ m_vrange = &m_frange;
+ }
+ else
+ gcc_unreachable ();
+
+ return *m_vrange;
+}
+
+inline bool
+Value_Range::operator== (const Value_Range &r) const
+{
+ return *m_vrange == *r.m_vrange;
+}
+
+inline bool
+Value_Range::operator!= (const Value_Range &r) const
+{
+ return *m_vrange != *r.m_vrange;
+}
+
+inline
+Value_Range::operator vrange &()
+{
+ return *m_vrange;
+}
+
+inline
+Value_Range::operator const vrange &() const
+{
+ return *m_vrange;
+}
+
+// Return TRUE if TYPE is supported by the vrange infrastructure.
+
+inline bool
+Value_Range::supports_type_p (const_tree type)
+{
+ return irange::supports_p (type) || frange::supports_p (type);
+}
+
+// Returns true for an old-school value_range as described above.
+inline bool
+irange::legacy_mode_p () const
+{
+ return m_max_ranges == 1;
+}
+
+extern bool range_has_numeric_bounds_p (const irange *);
+extern bool ranges_from_anti_range (const value_range *,
+ value_range *, value_range *);
+extern void dump_value_range (FILE *, const vrange *);
+extern bool vrp_val_is_min (const_tree);
+extern bool vrp_val_is_max (const_tree);
+extern bool vrp_operand_equal_p (const_tree, const_tree);
+inline REAL_VALUE_TYPE frange_val_min (const_tree type);
+inline REAL_VALUE_TYPE frange_val_max (const_tree type);
+
+inline value_range_kind
+vrange::kind () const
+{
+ return m_kind;
+}
+
+// Number of sub-ranges in a range.
+
+inline unsigned
+irange::num_pairs () const
+{
+ if (m_kind == VR_ANTI_RANGE)
+ return constant_p () ? 2 : 1;
+ else
+ return m_num_ranges;
+}
+
+inline tree
+irange::type () const
+{
+ gcc_checking_assert (m_num_ranges > 0);
+ return TREE_TYPE (m_base[0]);
+}
+
+// Return the lower bound of a sub-range expressed as a tree. PAIR is
+// the sub-range in question.
+
+inline tree
+irange::tree_lower_bound (unsigned pair) const
+{
+ return m_base[pair * 2];
+}
+
+// Return the upper bound of a sub-range expressed as a tree. PAIR is
+// the sub-range in question.
+
+inline tree
+irange::tree_upper_bound (unsigned pair) const
+{
+ return m_base[pair * 2 + 1];
+}
+
+// Return the highest bound of a range expressed as a tree.
+
+inline tree
+irange::tree_upper_bound () const
+{
+ gcc_checking_assert (m_num_ranges);
+ return tree_upper_bound (m_num_ranges - 1);
+}
+
+inline tree
+irange::min () const
+{
+ return tree_lower_bound (0);
+}
+
+inline tree
+irange::max () const
+{
+ if (m_num_ranges)
+ return tree_upper_bound ();
+ else
+ return NULL;
+}
+
+inline bool
+irange::varying_compatible_p () const
+{
+ if (m_num_ranges != 1)
+ return false;
+
+ tree l = m_base[0];
+ tree u = m_base[1];
+ tree t = TREE_TYPE (l);
+
+ if (m_kind == VR_VARYING && t == error_mark_node)
+ return true;
+
+ unsigned prec = TYPE_PRECISION (t);
+ signop sign = TYPE_SIGN (t);
+ if (INTEGRAL_TYPE_P (t))
+ return (wi::to_wide (l) == wi::min_value (prec, sign)
+ && wi::to_wide (u) == wi::max_value (prec, sign)
+ && (!m_nonzero_mask || wi::to_wide (m_nonzero_mask) == -1));
+ if (POINTER_TYPE_P (t))
+ return (wi::to_wide (l) == 0
+ && wi::to_wide (u) == wi::max_value (prec, sign)
+ && (!m_nonzero_mask || wi::to_wide (m_nonzero_mask) == -1));
+ return true;
+}
+
+inline void
+irange::set (tree type, const wide_int_ref &min, const wide_int_ref &max,
+ value_range_kind kind)
+{
+ set (wide_int_to_tree (type, min), wide_int_to_tree (type, max), kind);
+}
+
+inline bool
+vrange::varying_p () const
+{
+ return m_kind == VR_VARYING;
+}
+
+inline bool
+vrange::undefined_p () const
+{
+ return m_kind == VR_UNDEFINED;
+}
+
+inline bool
+irange::zero_p () const
+{
+ return (m_kind == VR_RANGE && m_num_ranges == 1
+ && integer_zerop (tree_lower_bound (0))
+ && integer_zerop (tree_upper_bound (0)));
+}
+
+inline bool
+irange::nonzero_p () const
+{
+ if (undefined_p ())
+ return false;
+
+ tree zero = build_zero_cst (type ());
+ return *this == int_range<1> (zero, zero, VR_ANTI_RANGE);
+}
+
+inline bool
+irange::supports_p (const_tree type)
+{
+ return INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type);
+}
+
+inline bool
+range_includes_zero_p (const irange *vr)
+{
+ if (vr->undefined_p ())
+ return false;
+
+ if (vr->varying_p ())
+ return true;
+
+ return vr->may_contain_p (build_zero_cst (vr->type ()));
+}
+
+inline void
+gt_ggc_mx (irange *x)
+{
+ for (unsigned i = 0; i < x->m_num_ranges; ++i)
+ {
+ gt_ggc_mx (x->m_base[i * 2]);
+ gt_ggc_mx (x->m_base[i * 2 + 1]);
+ }
+ if (x->m_nonzero_mask)
+ gt_ggc_mx (x->m_nonzero_mask);
+}
+
+inline void
+gt_pch_nx (irange *x)
+{
+ for (unsigned i = 0; i < x->m_num_ranges; ++i)
+ {
+ gt_pch_nx (x->m_base[i * 2]);
+ gt_pch_nx (x->m_base[i * 2 + 1]);
+ }
+ if (x->m_nonzero_mask)
+ gt_pch_nx (x->m_nonzero_mask);
+}
+
+inline void
+gt_pch_nx (irange *x, gt_pointer_operator op, void *cookie)
+{
+ for (unsigned i = 0; i < x->m_num_ranges; ++i)
+ {
+ op (&x->m_base[i * 2], NULL, cookie);
+ op (&x->m_base[i * 2 + 1], NULL, cookie);
+ }
+ if (x->m_nonzero_mask)
+ op (&x->m_nonzero_mask, NULL, cookie);
+}
+
+template<unsigned N>
+inline void
+gt_ggc_mx (int_range<N> *x)
+{
+ gt_ggc_mx ((irange *) x);
+}
+
+template<unsigned N>
+inline void
+gt_pch_nx (int_range<N> *x)
+{
+ gt_pch_nx ((irange *) x);
+}
+
+template<unsigned N>
+inline void
+gt_pch_nx (int_range<N> *x, gt_pointer_operator op, void *cookie)
+{
+ gt_pch_nx ((irange *) x, op, cookie);
+}
+
+// Constructors for irange
+
+inline
+irange::irange (tree *base, unsigned nranges, bool resizable)
+{
+ m_discriminator = VR_IRANGE;
+ m_base = base;
+ m_max_ranges = nranges;
+ m_resizable = resizable;
+ set_undefined ();
+}
+
+// Constructors for int_range<>.
+
+template<unsigned N, bool RESIZABLE>
+inline
+int_range<N, RESIZABLE>::int_range ()
+ : irange (m_ranges, N, RESIZABLE)
+{
+}
+
+template<unsigned N, bool RESIZABLE>
+int_range<N, RESIZABLE>::int_range (const int_range &other)
+ : irange (m_ranges, N, RESIZABLE)
+{
+ irange::operator= (other);
+}
+
+template<unsigned N, bool RESIZABLE>
+int_range<N, RESIZABLE>::int_range (tree min, tree max, value_range_kind kind)
+ : irange (m_ranges, N, RESIZABLE)
+{
+ irange::set (min, max, kind);
+}
+
+template<unsigned N, bool RESIZABLE>
+int_range<N, RESIZABLE>::int_range (tree type)
+ : irange (m_ranges, N, RESIZABLE)
+{
+ set_varying (type);
+}
+
+template<unsigned N, bool RESIZABLE>
+int_range<N, RESIZABLE>::int_range (tree type, const wide_int &wmin, const wide_int &wmax,
+ value_range_kind kind)
+ : irange (m_ranges, N, RESIZABLE)
+{
+ tree min = wide_int_to_tree (type, wmin);
+ tree max = wide_int_to_tree (type, wmax);
+ set (min, max, kind);
+}
+
+template<unsigned N, bool RESIZABLE>
+int_range<N, RESIZABLE>::int_range (const irange &other)
+ : irange (m_ranges, N, RESIZABLE)
+{
+ irange::operator= (other);
+}
+
+template<unsigned N, bool RESIZABLE>
+int_range<N, RESIZABLE>&
+int_range<N, RESIZABLE>::operator= (const int_range &src)
+{
+ irange::operator= (src);
+ return *this;
+}
+
+inline void
+irange::set_undefined ()
+{
+ m_kind = VR_UNDEFINED;
+ m_num_ranges = 0;
+ m_nonzero_mask = NULL;
+}
+
+inline void
+irange::set_varying (tree type)
+{
+ m_kind = VR_VARYING;
+ m_num_ranges = 1;
+ m_nonzero_mask = NULL;
+
+ if (INTEGRAL_TYPE_P (type))
+ {
+ // Strict enum's require varying to be not TYPE_MIN/MAX, but rather
+ // min_value and max_value.
+ wide_int min = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ if (wi::eq_p (max, wi::to_wide (TYPE_MAX_VALUE (type)))
+ && wi::eq_p (min, wi::to_wide (TYPE_MIN_VALUE (type))))
+ {
+ m_base[0] = TYPE_MIN_VALUE (type);
+ m_base[1] = TYPE_MAX_VALUE (type);
+ }
+ else
+ {
+ m_base[0] = wide_int_to_tree (type, min);
+ m_base[1] = wide_int_to_tree (type, max);
+ }
+ }
+ else if (POINTER_TYPE_P (type))
+ {
+ m_base[0] = build_int_cst (type, 0);
+ m_base[1] = build_int_cst (type, -1);
+ }
+ else
+ m_base[0] = m_base[1] = error_mark_node;
+}
+
+// Return the lower bound of a sub-range. PAIR is the sub-range in
+// question.
+
+inline wide_int
+irange::lower_bound (unsigned pair) const
+{
+ if (legacy_mode_p ())
+ return legacy_lower_bound (pair);
+ gcc_checking_assert (m_num_ranges > 0);
+ gcc_checking_assert (pair + 1 <= num_pairs ());
+ return wi::to_wide (tree_lower_bound (pair));
+}
+
+// Return the upper bound of a sub-range. PAIR is the sub-range in
+// question.
+
+inline wide_int
+irange::upper_bound (unsigned pair) const
+{
+ if (legacy_mode_p ())
+ return legacy_upper_bound (pair);
+ gcc_checking_assert (m_num_ranges > 0);
+ gcc_checking_assert (pair + 1 <= num_pairs ());
+ return wi::to_wide (tree_upper_bound (pair));
+}
+
+// Return the highest bound of a range.
+
+inline wide_int
+irange::upper_bound () const
+{
+ unsigned pairs = num_pairs ();
+ gcc_checking_assert (pairs > 0);
+ return upper_bound (pairs - 1);
+}
+
+inline bool
+irange::union_ (const vrange &r)
+{
+ dump_flags_t m_flags = dump_flags;
+ dump_flags &= ~TDF_DETAILS;
+ bool ret = irange::legacy_verbose_union_ (&as_a <irange> (r));
+ dump_flags = m_flags;
+ return ret;
+}
+
+inline bool
+irange::intersect (const vrange &r)
+{
+ dump_flags_t m_flags = dump_flags;
+ dump_flags &= ~TDF_DETAILS;
+ bool ret = irange::legacy_verbose_intersect (&as_a <irange> (r));
+ dump_flags = m_flags;
+ return ret;
+}
+
+// Set value range VR to a nonzero range of type TYPE.
+
+inline void
+irange::set_nonzero (tree type)
+{
+ tree zero = build_int_cst (type, 0);
+ if (legacy_mode_p ())
+ set (zero, zero, VR_ANTI_RANGE);
+ else
+ irange_set_anti_range (zero, zero);
+}
+
+// Set value range VR to a ZERO range of type TYPE.
+
+inline void
+irange::set_zero (tree type)
+{
+ tree z = build_int_cst (type, 0);
+ if (legacy_mode_p ())
+ set (z, z);
+ else
+ irange_set (z, z);
+}
+
+// Normalize a range to VARYING or UNDEFINED if possible.
+
+inline void
+irange::normalize_kind ()
+{
+ if (m_num_ranges == 0)
+ set_undefined ();
+ else if (varying_compatible_p ())
+ {
+ if (m_kind == VR_RANGE)
+ m_kind = VR_VARYING;
+ else if (m_kind == VR_ANTI_RANGE)
+ set_undefined ();
+ }
+}
+
+// Return the maximum value for TYPE.
+
+inline tree
+vrp_val_max (const_tree type)
+{
+ if (INTEGRAL_TYPE_P (type))
+ return TYPE_MAX_VALUE (type);
+ if (POINTER_TYPE_P (type))
+ {
+ wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ return wide_int_to_tree (const_cast<tree> (type), max);
+ }
+ if (frange::supports_p (type))
+ {
+ REAL_VALUE_TYPE r = frange_val_max (type);
+ return build_real (const_cast <tree> (type), r);
+ }
+ return NULL_TREE;
+}
+
+// Return the minimum value for TYPE.
+
+inline tree
+vrp_val_min (const_tree type)
+{
+ if (INTEGRAL_TYPE_P (type))
+ return TYPE_MIN_VALUE (type);
+ if (POINTER_TYPE_P (type))
+ return build_zero_cst (const_cast<tree> (type));
+ if (frange::supports_p (type))
+ {
+ REAL_VALUE_TYPE r = frange_val_min (type);
+ return build_real (const_cast <tree> (type), r);
+ }
+ return NULL_TREE;
+}
+
+inline
+frange::frange ()
+{
+ m_discriminator = VR_FRANGE;
+ set_undefined ();
+}
+
+inline
+frange::frange (const frange &src)
+{
+ m_discriminator = VR_FRANGE;
+ *this = src;
+}
+
+inline
+frange::frange (tree type)
+{
+ m_discriminator = VR_FRANGE;
+ set_varying (type);
+}
+
+// frange constructor from REAL_VALUE_TYPE endpoints.
+
+inline
+frange::frange (tree type,
+ const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
+ value_range_kind kind)
+{
+ m_discriminator = VR_FRANGE;
+ set (type, min, max, kind);
+}
+
+// frange constructor from trees.
+
+inline
+frange::frange (tree min, tree max, value_range_kind kind)
+{
+ m_discriminator = VR_FRANGE;
+ set (min, max, kind);
+}
+
+inline tree
+frange::type () const
+{
+ gcc_checking_assert (!undefined_p ());
+ return m_type;
+}
+
+inline void
+frange::set_varying (tree type)
+{
+ m_kind = VR_VARYING;
+ m_type = type;
+ m_min = frange_val_min (type);
+ m_max = frange_val_max (type);
+ if (HONOR_NANS (m_type))
+ {
+ m_pos_nan = true;
+ m_neg_nan = true;
+ }
+ else
+ {
+ m_pos_nan = false;
+ m_neg_nan = false;
+ }
+}
+
+inline void
+frange::set_undefined ()
+{
+ m_kind = VR_UNDEFINED;
+ m_type = NULL;
+ m_pos_nan = false;
+ m_neg_nan = false;
+ // m_min and m_min are uninitialized as they are REAL_VALUE_TYPE ??.
+ if (flag_checking)
+ verify_range ();
+}
+
+// Set the NAN bit and adjust the range.
+
+inline void
+frange::update_nan ()
+{
+ gcc_checking_assert (!undefined_p ());
+ if (HONOR_NANS (m_type))
+ {
+ m_pos_nan = true;
+ m_neg_nan = true;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ }
+}
+
+// Like above, but set the sign of the NAN.
+
+inline void
+frange::update_nan (bool sign)
+{
+ gcc_checking_assert (!undefined_p ());
+ if (HONOR_NANS (m_type))
+ {
+ m_pos_nan = !sign;
+ m_neg_nan = sign;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+ }
+}
+
+// Clear the NAN bit and adjust the range.
+
+inline void
+frange::clear_nan ()
+{
+ gcc_checking_assert (!undefined_p ());
+ m_pos_nan = false;
+ m_neg_nan = false;
+ normalize_kind ();
+ if (flag_checking)
+ verify_range ();
+}
+
+// Set R to maximum representable value for TYPE.
+
+inline REAL_VALUE_TYPE
+real_max_representable (const_tree type)
+{
+ REAL_VALUE_TYPE r;
+ char buf[128];
+ get_max_float (REAL_MODE_FORMAT (TYPE_MODE (type)),
+ buf, sizeof (buf), false);
+ int res = real_from_string (&r, buf);
+ gcc_checking_assert (!res);
+ return r;
+}
+
+// Return the minimum representable value for TYPE.
+
+inline REAL_VALUE_TYPE
+real_min_representable (const_tree type)
+{
+ REAL_VALUE_TYPE r = real_max_representable (type);
+ r = real_value_negate (&r);
+ return r;
+}
+
+// Return the minimum value for TYPE.
+
+inline REAL_VALUE_TYPE
+frange_val_min (const_tree type)
+{
+ if (HONOR_INFINITIES (type))
+ return dconstninf;
+ else
+ return real_min_representable (type);
+}
+
+// Return the maximum value for TYPE.
+
+inline REAL_VALUE_TYPE
+frange_val_max (const_tree type)
+{
+ if (HONOR_INFINITIES (type))
+ return dconstinf;
+ else
+ return real_max_representable (type);
+}
+
+// Return TRUE if R is the minimum value for TYPE.
+
+inline bool
+frange_val_is_min (const REAL_VALUE_TYPE &r, const_tree type)
+{
+ REAL_VALUE_TYPE min = frange_val_min (type);
+ return real_identical (&min, &r);
+}
+
+// Return TRUE if R is the max value for TYPE.
+
+inline bool
+frange_val_is_max (const REAL_VALUE_TYPE &r, const_tree type)
+{
+ REAL_VALUE_TYPE max = frange_val_max (type);
+ return real_identical (&max, &r);
+}
+
+// Build a signless NAN of type TYPE.
+
+inline void
+frange::set_nan (tree type)
+{
+ if (HONOR_NANS (type))
+ {
+ m_kind = VR_NAN;
+ m_type = type;
+ m_pos_nan = true;
+ m_neg_nan = true;
+ if (flag_checking)
+ verify_range ();
+ }
+ else
+ set_undefined ();
+}
+
+// Build a NAN of type TYPE with SIGN.
+
+inline void
+frange::set_nan (tree type, bool sign)
+{
+ if (HONOR_NANS (type))
+ {
+ m_kind = VR_NAN;
+ m_type = type;
+ m_neg_nan = sign;
+ m_pos_nan = !sign;
+ if (flag_checking)
+ verify_range ();
+ }
+ else
+ set_undefined ();
+}
+
+// Return TRUE if range is known to be finite.
+
+inline bool
+frange::known_isfinite () const
+{
+ if (undefined_p () || varying_p () || m_kind == VR_ANTI_RANGE)
+ return false;
+ return (!maybe_isnan () && !real_isinf (&m_min) && !real_isinf (&m_max));
+}
+
+// Return TRUE if range may be infinite.
+
+inline bool
+frange::maybe_isinf () const
+{
+ if (undefined_p () || m_kind == VR_ANTI_RANGE || m_kind == VR_NAN)
+ return false;
+ if (varying_p ())
+ return true;
+ return real_isinf (&m_min) || real_isinf (&m_max);
+}
+
+// Return TRUE if range is known to be the [-INF,-INF] or [+INF,+INF].
+
+inline bool
+frange::known_isinf () const
+{
+ return (m_kind == VR_RANGE
+ && !maybe_isnan ()
+ && real_identical (&m_min, &m_max)
+ && real_isinf (&m_min));
+}
+
+// Return TRUE if range is possibly a NAN.
+
+inline bool
+frange::maybe_isnan () const
+{
+ if (undefined_p ())
+ return false;
+ return m_pos_nan || m_neg_nan;
+}
+
+// Return TRUE if range is possibly a NAN with SIGN.
+
+inline bool
+frange::maybe_isnan (bool sign) const
+{
+ if (undefined_p ())
+ return false;
+ if (sign)
+ return m_neg_nan;
+ return m_pos_nan;
+}
+
+// Return TRUE if range is a +NAN or -NAN.
+
+inline bool
+frange::known_isnan () const
+{
+ return m_kind == VR_NAN;
+}
+
+// If the signbit for the range is known, set it in SIGNBIT and return
+// TRUE.
+
+inline bool
+frange::signbit_p (bool &signbit) const
+{
+ if (undefined_p ())
+ return false;
+
+ // NAN with unknown sign.
+ if (m_pos_nan && m_neg_nan)
+ return false;
+ // No NAN.
+ if (!m_pos_nan && !m_neg_nan)
+ {
+ if (m_min.sign == m_max.sign)
+ {
+ signbit = m_min.sign;
+ return true;
+ }
+ return false;
+ }
+ // NAN with known sign.
+ bool nan_sign = m_neg_nan;
+ if (known_isnan ()
+ || (nan_sign == m_min.sign && nan_sign == m_max.sign))
+ {
+ signbit = nan_sign;
+ return true;
+ }
+ return false;
+}
+
+// If range has a NAN with a known sign, set it in SIGNBIT and return
+// TRUE.
+
+inline bool
+frange::nan_signbit_p (bool &signbit) const
+{
+ if (undefined_p ())
+ return false;
+
+ if (m_pos_nan == m_neg_nan)
+ return false;
+
+ signbit = m_neg_nan;
+ return true;
+}
+
+#endif // GCC_VALUE_RANGE_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-relation.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-relation.h
new file mode 100644
index 0000000..3177ecb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/value-relation.h
@@ -0,0 +1,523 @@
+/* Header file for the value range relational processing.
+ Copyright (C) 2020-2023 Free Software Foundation, Inc.
+ Contributed by Andrew MacLeod <amacleod@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VALUE_RELATION_H
+#define GCC_VALUE_RELATION_H
+
+
+// This file provides access to a relation oracle which can be used to
+// maintain and query relations and equivalences between SSA_NAMES.
+//
+// The general range_query object provided in value-query.h provides
+// access to an oracle, if one is available, via the oracle() method.
+// There are also a couple of access routines provided, which even if there is
+// no oracle, will return the default VREL_VARYING no relation.
+//
+// Typically, when a ranger object is active, there will be an oracle, and
+// any information available can be directly queried. Ranger also sets and
+// utilizes the relation information to enhance it's range calculations, this
+// is totally transparent to the client, and they are free to make queries.
+//
+// relation_kind is a new enum which represents the different relations,
+// often with a direct mapping to tree codes. ie VREL_EQ is equivalent to
+// EQ_EXPR.
+//
+// A query is made requesting the relation between SSA1 and SSA@ in a basic
+// block, or on an edge, the possible return values are:
+//
+// VREL_EQ, VREL_NE, VREL_LT, VREL_LE, VREL_GT, and VREL_GE mean the same.
+// VREL_VARYING : No relation between the 2 names.
+// VREL_UNDEFINED : Impossible relation (ie, A < B && A > B)
+//
+// The oracle maintains VREL_EQ relations with equivalency sets, so if a
+// relation comes back VREL_EQ, it is also possible to query the set of
+// equivalencies. These are basically bitmaps over ssa_names. An iterator is
+// provided later for this activity.
+//
+// Relations are maintained via the dominance trees and are optimized assuming
+// they are registered in dominance order. When a new relation is added, it
+// is intersected with whatever existing relation exists in the dominance tree
+// and registered at the specified block.
+
+
+// These codes are arranged such that VREL_VARYING is the first code, and all
+// the rest are contiguous.
+
+typedef enum relation_kind_t
+{
+ VREL_VARYING = 0, // No known relation, AKA varying.
+ VREL_UNDEFINED, // Impossible relation, ie (r1 < r2) && (r2 > r1)
+ VREL_LT, // r1 < r2
+ VREL_LE, // r1 <= r2
+ VREL_GT, // r1 > r2
+ VREL_GE, // r1 >= r2
+ VREL_EQ, // r1 == r2
+ VREL_NE, // r1 != r2
+ VREL_PE8, // 8 bit partial equivalency
+ VREL_PE16, // 16 bit partial equivalency
+ VREL_PE32, // 32 bit partial equivalency
+ VREL_PE64, // 64 bit partial equivalency
+ VREL_LAST // terminate, not a real relation.
+} relation_kind;
+
+// General relation kind transformations.
+relation_kind relation_union (relation_kind r1, relation_kind r2);
+relation_kind relation_intersect (relation_kind r1, relation_kind r2);
+relation_kind relation_negate (relation_kind r);
+relation_kind relation_swap (relation_kind r);
+inline bool relation_lt_le_gt_ge_p (relation_kind r)
+ { return (r >= VREL_LT && r <= VREL_GE); }
+inline bool relation_partial_equiv_p (relation_kind r)
+ { return (r >= VREL_PE8 && r <= VREL_PE64); }
+inline bool relation_equiv_p (relation_kind r)
+ { return r == VREL_EQ || relation_partial_equiv_p (r); }
+
+void print_relation (FILE *f, relation_kind rel);
+
+class relation_oracle
+{
+public:
+ virtual ~relation_oracle () { }
+ // register a relation between 2 ssa names at a stmt.
+ void register_stmt (gimple *, relation_kind, tree, tree);
+ // register a relation between 2 ssa names on an edge.
+ void register_edge (edge, relation_kind, tree, tree);
+
+ // register a relation between 2 ssa names in a basic block.
+ virtual void register_relation (basic_block, relation_kind, tree, tree) = 0;
+ // Query for a relation between two ssa names in a basic block.
+ virtual relation_kind query_relation (basic_block, tree, tree) = 0;
+
+ relation_kind validate_relation (relation_kind, tree, tree);
+ relation_kind validate_relation (relation_kind, vrange &, vrange &);
+
+ virtual void dump (FILE *, basic_block) const = 0;
+ virtual void dump (FILE *) const = 0;
+ void debug () const;
+protected:
+ friend class equiv_relation_iterator;
+ // Return equivalency set for an SSA name in a basic block.
+ virtual const_bitmap equiv_set (tree, basic_block) = 0;
+ // Return partial equivalency record for an SSA name.
+ virtual const class pe_slice *partial_equiv_set (tree) { return NULL; }
+ void valid_equivs (bitmap b, const_bitmap equivs, basic_block bb);
+ // Query for a relation between two equivalency sets in a basic block.
+ virtual relation_kind query_relation (basic_block, const_bitmap,
+ const_bitmap) = 0;
+ friend class path_oracle;
+};
+
+// This class represents an equivalency set, and contains a link to the next
+// one in the list to be searched.
+
+class equiv_chain
+{
+public:
+ bitmap m_names; // ssa-names in equiv set.
+ basic_block m_bb; // Block this belongs to
+ equiv_chain *m_next; // Next in block list.
+ void dump (FILE *f) const; // Show names in this list.
+ equiv_chain *find (unsigned ssa);
+};
+
+class pe_slice
+{
+public:
+ tree ssa_base; // Slice of this name.
+ relation_kind code; // bits that are equivalent.
+ bitmap members; // Other members in the partial equivalency.
+};
+
+// The equivalency oracle maintains equivalencies using the dominator tree.
+// Equivalencies apply to an entire basic block. Equivalencies on edges
+// can be represented only on edges whose destination is a single-pred block,
+// and the equivalence is simply applied to that successor block.
+
+class equiv_oracle : public relation_oracle
+{
+public:
+ equiv_oracle ();
+ ~equiv_oracle ();
+
+ const_bitmap equiv_set (tree ssa, basic_block bb) final override;
+ const pe_slice *partial_equiv_set (tree name) final override;
+ void register_relation (basic_block bb, relation_kind k, tree ssa1,
+ tree ssa2) override;
+
+ void add_partial_equiv (relation_kind, tree, tree);
+ relation_kind partial_equiv (tree ssa1, tree ssa2, tree *base = NULL) const;
+ relation_kind query_relation (basic_block, tree, tree) override;
+ relation_kind query_relation (basic_block, const_bitmap, const_bitmap)
+ override;
+ void dump (FILE *f, basic_block bb) const override;
+ void dump (FILE *f) const override;
+
+protected:
+ bitmap_obstack m_bitmaps;
+ struct obstack m_chain_obstack;
+private:
+ bitmap m_equiv_set; // Index by ssa-name. true if an equivalence exists.
+ vec <equiv_chain *> m_equiv; // Index by BB. list of equivalences.
+ vec <bitmap> m_self_equiv; // Index by ssa-name, self equivalency set.
+ vec <pe_slice> m_partial; // Partial equivalencies.
+
+ void limit_check (basic_block bb = NULL);
+ equiv_chain *find_equiv_block (unsigned ssa, int bb) const;
+ equiv_chain *find_equiv_dom (tree name, basic_block bb) const;
+
+ bitmap register_equiv (basic_block bb, unsigned v, equiv_chain *equiv_1);
+ bitmap register_equiv (basic_block bb, equiv_chain *equiv_1,
+ equiv_chain *equiv_2);
+ void register_initial_def (tree ssa);
+ void add_equiv_to_block (basic_block bb, bitmap equiv);
+};
+
+// Summary block header for relations.
+
+class relation_chain_head
+{
+public:
+ bitmap m_names; // ssa_names with relations in this block.
+ class relation_chain *m_head; // List of relations in block.
+ int m_num_relations; // Number of relations in block.
+ relation_kind find_relation (const_bitmap b1, const_bitmap b2) const;
+};
+
+// A relation oracle maintains a set of relations between ssa_names using the
+// dominator tree structures. Equivalencies are considered a subset of
+// a general relation and maintained by an equivalence oracle by transparently
+// passing any EQ_EXPR relations to it.
+// Relations are handled at the basic block level. All relations apply to
+// an entire block, and are thus kept in a summary index by block.
+// Similar to the equivalence oracle, edges are handled by applying the
+// relation to the destination block of the edge, but ONLY if that block
+// has a single successor. For now.
+
+class dom_oracle : public equiv_oracle
+{
+public:
+ dom_oracle ();
+ ~dom_oracle ();
+
+ void register_relation (basic_block bb, relation_kind k, tree op1, tree op2)
+ final override;
+
+ relation_kind query_relation (basic_block bb, tree ssa1, tree ssa2)
+ final override;
+ relation_kind query_relation (basic_block bb, const_bitmap b1,
+ const_bitmap b2) final override;
+
+ void dump (FILE *f, basic_block bb) const final override;
+ void dump (FILE *f) const final override;
+private:
+ bitmap m_tmp, m_tmp2;
+ bitmap m_relation_set; // Index by ssa-name. True if a relation exists
+ vec <relation_chain_head> m_relations; // Index by BB, list of relations.
+ relation_kind find_relation_block (unsigned bb, const_bitmap b1,
+ const_bitmap b2) const;
+ relation_kind find_relation_block (int bb, unsigned v1, unsigned v2,
+ relation_chain **obj = NULL) const;
+ relation_kind find_relation_dom (basic_block bb, unsigned v1, unsigned v2) const;
+ relation_chain *set_one_relation (basic_block bb, relation_kind k, tree op1,
+ tree op2);
+ void register_transitives (basic_block, const class value_relation &);
+
+};
+
+// A path_oracle implements relations in a list. The only sense of ordering
+// is the latest registered relation is the first found during a search.
+// It can be constructed with an optional "root" oracle which will be used
+// to look up any relations not found in the list.
+// This allows the client to walk paths starting at some block and register
+// and query relations along that path, ignoring other edges.
+//
+// For registering a relation, a query if made of the root oracle if there is
+// any known relationship at block BB, and it is combined with this new
+// relation and entered in the list.
+//
+// Queries are resolved by looking first in the list, and only if nothing is
+// found is the root oracle queried at block BB.
+//
+// reset_path is used to clear all locally registered paths to initial state.
+
+class path_oracle : public relation_oracle
+{
+public:
+ path_oracle (relation_oracle *oracle = NULL);
+ ~path_oracle ();
+ const_bitmap equiv_set (tree, basic_block) final override;
+ void register_relation (basic_block, relation_kind, tree, tree) final override;
+ void killing_def (tree);
+ relation_kind query_relation (basic_block, tree, tree) final override;
+ relation_kind query_relation (basic_block, const_bitmap, const_bitmap)
+ final override;
+ void reset_path (relation_oracle *oracle = NULL);
+ void set_root_oracle (relation_oracle *oracle) { m_root = oracle; }
+ void dump (FILE *, basic_block) const final override;
+ void dump (FILE *) const final override;
+private:
+ void register_equiv (basic_block bb, tree ssa1, tree ssa2);
+ equiv_chain m_equiv;
+ relation_chain_head m_relations;
+ relation_oracle *m_root;
+ bitmap m_killed_defs;
+
+ bitmap_obstack m_bitmaps;
+ struct obstack m_chain_obstack;
+};
+
+// Used to assist with iterating over the equivalence list.
+class equiv_relation_iterator {
+public:
+ equiv_relation_iterator (relation_oracle *oracle, basic_block bb, tree name,
+ bool full = true, bool partial = false);
+ void next ();
+ tree get_name (relation_kind *rel = NULL);
+protected:
+ relation_oracle *m_oracle;
+ const_bitmap m_bm;
+ const pe_slice *m_pe;
+ bitmap_iterator m_bi;
+ unsigned m_y;
+ tree m_name;
+};
+
+#define FOR_EACH_EQUIVALENCE(oracle, bb, name, equiv_name) \
+ for (equiv_relation_iterator iter (oracle, bb, name, true, false); \
+ ((equiv_name) = iter.get_name ()); \
+ iter.next ())
+
+#define FOR_EACH_PARTIAL_EQUIV(oracle, bb, name, equiv_name, equiv_rel) \
+ for (equiv_relation_iterator iter (oracle, bb, name, false, true); \
+ ((equiv_name) = iter.get_name (&equiv_rel)); \
+ iter.next ())
+
+#define FOR_EACH_PARTIAL_AND_FULL_EQUIV(oracle, bb, name, equiv_name, \
+ equiv_rel) \
+ for (equiv_relation_iterator iter (oracle, bb, name, true, true); \
+ ((equiv_name) = iter.get_name (&equiv_rel)); \
+ iter.next ())
+
+// -----------------------------------------------------------------------
+
+// Range-ops deals with a LHS and 2 operands. A relation trio is a set of
+// 3 potential relations packed into a single unsigned value.
+// 1 - LHS relation OP1
+// 2 - LHS relation OP2
+// 3 - OP1 relation OP2
+// VREL_VARYING is a value of 0, and is the default for each position.
+class relation_trio
+{
+public:
+ relation_trio ();
+ relation_trio (relation_kind lhs_op1, relation_kind lhs_op2,
+ relation_kind op1_op2);
+ relation_kind lhs_op1 ();
+ relation_kind lhs_op2 ();
+ relation_kind op1_op2 ();
+ relation_trio swap_op1_op2 ();
+
+ static relation_trio lhs_op1 (relation_kind k);
+ static relation_trio lhs_op2 (relation_kind k);
+ static relation_trio op1_op2 (relation_kind k);
+
+protected:
+ unsigned m_val;
+};
+
+// Default VREL_VARYING for all 3 relations.
+#define TRIO_VARYING relation_trio ()
+
+#define TRIO_SHIFT 4
+#define TRIO_MASK 0x000F
+
+// These 3 classes are shortcuts for when a caller has a single relation to
+// pass as a trio, it can simply construct the appropriate one. The other
+// unspecified relations will be VREL_VARYING.
+
+inline relation_trio::relation_trio ()
+{
+ STATIC_ASSERT (VREL_LAST <= (1 << TRIO_SHIFT));
+ m_val = 0;
+}
+
+inline relation_trio::relation_trio (relation_kind lhs_op1,
+ relation_kind lhs_op2,
+ relation_kind op1_op2)
+{
+ STATIC_ASSERT (VREL_LAST <= (1 << TRIO_SHIFT));
+ unsigned i1 = (unsigned) lhs_op1;
+ unsigned i2 = ((unsigned) lhs_op2) << TRIO_SHIFT;
+ unsigned i3 = ((unsigned) op1_op2) << (TRIO_SHIFT * 2);
+ m_val = i1 | i2 | i3;
+}
+
+inline relation_trio
+relation_trio::lhs_op1 (relation_kind k)
+{
+ return relation_trio (k, VREL_VARYING, VREL_VARYING);
+}
+inline relation_trio
+relation_trio::lhs_op2 (relation_kind k)
+{
+ return relation_trio (VREL_VARYING, k, VREL_VARYING);
+}
+inline relation_trio
+relation_trio::op1_op2 (relation_kind k)
+{
+ return relation_trio (VREL_VARYING, VREL_VARYING, k);
+}
+
+inline relation_kind
+relation_trio::lhs_op1 ()
+{
+ return (relation_kind) (m_val & TRIO_MASK);
+}
+
+inline relation_kind
+relation_trio::lhs_op2 ()
+{
+ return (relation_kind) ((m_val >> TRIO_SHIFT) & TRIO_MASK);
+}
+
+inline relation_kind
+relation_trio::op1_op2 ()
+{
+ return (relation_kind) ((m_val >> (TRIO_SHIFT * 2)) & TRIO_MASK);
+}
+
+inline relation_trio
+relation_trio::swap_op1_op2 ()
+{
+ return relation_trio (lhs_op2 (), lhs_op1 (), relation_swap (op1_op2 ()));
+}
+
+// -----------------------------------------------------------------------
+
+// The value-relation class is used to encapsulate the representation of an
+// individual relation between 2 ssa-names, and to facilitate operating on
+// the relation.
+
+class value_relation
+{
+public:
+ value_relation ();
+ value_relation (relation_kind kind, tree n1, tree n2);
+ void set_relation (relation_kind kind, tree n1, tree n2);
+
+ inline relation_kind kind () const { return related; }
+ inline tree op1 () const { return name1; }
+ inline tree op2 () const { return name2; }
+
+ relation_trio create_trio (tree lhs, tree op1, tree op2);
+ bool union_ (value_relation &p);
+ bool intersect (value_relation &p);
+ void negate ();
+ bool apply_transitive (const value_relation &rel);
+
+ void dump (FILE *f) const;
+private:
+ relation_kind related;
+ tree name1, name2;
+};
+
+// Set relation R between ssa_name N1 and N2.
+
+inline void
+value_relation::set_relation (relation_kind r, tree n1, tree n2)
+{
+ gcc_checking_assert (TREE_CODE (n1) == SSA_NAME
+ && TREE_CODE (n2) == SSA_NAME);
+ related = r;
+ name1 = n1;
+ name2 = n2;
+}
+
+// Default constructor.
+
+inline
+value_relation::value_relation ()
+{
+ related = VREL_VARYING;
+ name1 = NULL_TREE;
+ name2 = NULL_TREE;
+}
+
+// Constructor for relation R between SSA version N1 and N2.
+
+inline
+value_relation::value_relation (relation_kind kind, tree n1, tree n2)
+{
+ set_relation (kind, n1, n2);
+}
+
+// Return the number of bits associated with partial equivalency T.
+// Return 0 if this is not a supported partial equivalency relation.
+
+inline int
+pe_to_bits (relation_kind t)
+{
+ switch (t)
+ {
+ case VREL_PE8:
+ return 8;
+ case VREL_PE16:
+ return 16;
+ case VREL_PE32:
+ return 32;
+ case VREL_PE64:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+// Return the partial equivalency code associated with the number of BITS.
+// return VREL_VARYING if there is no exact match.
+
+inline relation_kind
+bits_to_pe (int bits)
+{
+ switch (bits)
+ {
+ case 8:
+ return VREL_PE8;
+ case 16:
+ return VREL_PE16;
+ case 32:
+ return VREL_PE32;
+ case 64:
+ return VREL_PE64;
+ default:
+ return VREL_VARYING;
+ }
+}
+
+// Given partial equivalencies T1 and T2, return the smallest kind.
+
+inline relation_kind
+pe_min (relation_kind t1, relation_kind t2)
+{
+ gcc_checking_assert (relation_partial_equiv_p (t1));
+ gcc_checking_assert (relation_partial_equiv_p (t2));
+ // VREL_PE are declared small to large, so simple min will suffice.
+ return MIN (t1, t2);
+}
+#endif /* GCC_VALUE_RELATION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/varasm.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/varasm.h
new file mode 100644
index 0000000..e6190ca
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/varasm.h
@@ -0,0 +1,84 @@
+/* Declarations for varasm.h.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VARASM_H
+#define GCC_VARASM_H
+
+/* The following global holds the "function name" for the code in the
+ cold section of a function, if hot/cold function splitting is enabled
+ and there was actually code that went into the cold section. A
+ pseudo function name is needed for the cold section of code for some
+ debugging tools that perform symbolization. */
+extern tree cold_function_name;
+
+extern tree tree_output_constant_def (tree);
+extern void make_decl_rtl (tree);
+extern rtx make_decl_rtl_for_debug (tree);
+extern void make_decl_one_only (tree, tree);
+extern int supports_one_only (void);
+extern void resolve_unique_section (tree, int, int);
+extern void mark_referenced (tree);
+extern void mark_decl_referenced (tree);
+extern void notice_global_symbol (tree);
+extern void set_user_assembler_name (tree, const char *);
+extern void process_pending_assemble_externals (void);
+extern bool decl_replaceable_p (tree, bool);
+extern bool decl_binds_to_current_def_p (const_tree);
+extern enum tls_model decl_default_tls_model (const_tree);
+
+/* Declare DECL to be a weak symbol. */
+extern void declare_weak (tree);
+
+/* Merge weak status. */
+extern void merge_weak (tree, tree);
+
+/* Make one symbol an alias for another. */
+extern void assemble_alias (tree, tree);
+
+/* Return nonzero if VALUE is a valid constant-valued expression
+ for use in initializing a static variable; one that can be an
+ element of a "constant" initializer.
+
+ Return null_pointer_node if the value is absolute;
+ if it is relocatable, return the variable that determines the relocation.
+ We assume that VALUE has been folded as much as possible;
+ therefore, we do not need to check for such things as
+ arithmetic-combinations of integers. */
+extern tree initializer_constant_valid_p (tree, tree, bool = false);
+
+/* Return true if VALUE is a valid constant-valued expression
+ for use in initializing a static bit-field; one that can be
+ an element of a "constant" initializer. */
+extern bool initializer_constant_valid_for_bitfield_p (const_tree);
+
+/* Whether a constructor CTOR is a valid static constant initializer if all
+ its elements are. This used to be internal to initializer_constant_valid_p
+ and has been exposed to let other functions like categorize_ctor_elements
+ evaluate the property while walking a constructor for other purposes. */
+extern bool constructor_static_from_elts_p (const_tree);
+
+extern void init_varasm_status (void);
+
+extern rtx assemble_static_space (unsigned HOST_WIDE_INT);
+
+extern rtx assemble_trampoline_template (void);
+
+extern void switch_to_comdat_section (section *, tree);
+
+#endif // GCC_VARASM_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec-perm-indices.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec-perm-indices.h
new file mode 100644
index 0000000..e698874
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec-perm-indices.h
@@ -0,0 +1,153 @@
+/* A representation of vector permutation indices.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VEC_PERN_INDICES_H
+#define GCC_VEC_PERN_INDICES_H 1
+
+#include "int-vector-builder.h"
+
+/* A vector_builder for building constant permutation vectors.
+ The elements do not need to be clamped to a particular range
+ of input elements. */
+typedef int_vector_builder<poly_int64> vec_perm_builder;
+
+/* This class represents a constant permutation vector, such as that used
+ as the final operand to a VEC_PERM_EXPR.
+
+ Permutation vectors select indices modulo the number of input elements,
+ and the class canonicalizes each permutation vector for a particular
+ number of input vectors and for a particular number of elements per
+ input. For example, the gimple statements:
+
+ _1 = VEC_PERM_EXPR <a, a, { 0, 2, 4, 6, 0, 2, 4, 6 }>;
+ _2 = VEC_PERM_EXPR <a, a, { 0, 2, 4, 6, 8, 10, 12, 14 }>;
+ _3 = VEC_PERM_EXPR <a, a, { 0, 2, 20, 22, 24, 2, 4, 14 }>;
+
+ effectively have only a single vector input "a". If "a" has 8
+ elements, the indices select elements modulo 8, which makes all three
+ VEC_PERM_EXPRs equivalent. The canonical form is for the indices to be
+ in the range [0, number of input elements - 1], so the class treats the
+ second and third permutation vectors as though they had been the first.
+
+ The class copes with cases in which the input and output vectors have
+ different numbers of elements. */
+class vec_perm_indices
+{
+ typedef poly_int64 element_type;
+
+public:
+ vec_perm_indices ();
+ vec_perm_indices (const vec_perm_builder &, unsigned int, poly_uint64);
+
+ void new_vector (const vec_perm_builder &, unsigned int, poly_uint64);
+ void new_expanded_vector (const vec_perm_indices &, unsigned int);
+ bool new_shrunk_vector (const vec_perm_indices &, unsigned int);
+ void rotate_inputs (int delta);
+
+ /* Return the underlying vector encoding. */
+ const vec_perm_builder &encoding () const { return m_encoding; }
+
+ /* Return the number of output elements. This is called length ()
+ so that we present a more vec-like interface. */
+ poly_uint64 length () const { return m_encoding.full_nelts (); }
+
+ /* Return the number of input vectors being permuted. */
+ unsigned int ninputs () const { return m_ninputs; }
+
+ /* Return the number of elements in each input vector. */
+ poly_uint64 nelts_per_input () const { return m_nelts_per_input; }
+
+ /* Return the total number of input elements. */
+ poly_uint64 input_nelts () const { return m_ninputs * m_nelts_per_input; }
+
+ element_type clamp (element_type) const;
+ element_type operator[] (unsigned int i) const;
+ bool series_p (unsigned int, unsigned int, element_type, element_type) const;
+ bool all_in_range_p (element_type, element_type) const;
+ bool all_from_input_p (unsigned int) const;
+
+private:
+ vec_perm_indices (const vec_perm_indices &);
+
+ vec_perm_builder m_encoding;
+ unsigned int m_ninputs;
+ poly_uint64 m_nelts_per_input;
+};
+
+bool tree_to_vec_perm_builder (vec_perm_builder *, tree);
+tree vec_perm_indices_to_tree (tree, const vec_perm_indices &);
+rtx vec_perm_indices_to_rtx (machine_mode, const vec_perm_indices &);
+
+inline
+vec_perm_indices::vec_perm_indices ()
+ : m_ninputs (0),
+ m_nelts_per_input (0)
+{
+}
+
+/* Construct a permutation vector that selects between NINPUTS vector
+ inputs that have NELTS_PER_INPUT elements each. Take the elements of
+ the new vector from ELEMENTS, clamping each one to be in range. */
+
+inline
+vec_perm_indices::vec_perm_indices (const vec_perm_builder &elements,
+ unsigned int ninputs,
+ poly_uint64 nelts_per_input)
+{
+ new_vector (elements, ninputs, nelts_per_input);
+}
+
+/* Return the canonical value for permutation vector element ELT,
+ taking into account the current number of input elements. */
+
+inline vec_perm_indices::element_type
+vec_perm_indices::clamp (element_type elt) const
+{
+ element_type limit = input_nelts (), elem_within_input;
+ HOST_WIDE_INT input;
+ if (!can_div_trunc_p (elt, limit, &input, &elem_within_input))
+ return elt;
+
+ /* Treat negative elements as counting from the end. This only matters
+ if the vector size is not a power of 2. */
+ if (known_lt (elem_within_input, 0))
+ return elem_within_input + limit;
+
+ return elem_within_input;
+}
+
+/* Return the value of vector element I, which might or might not be
+ explicitly encoded. */
+
+inline vec_perm_indices::element_type
+vec_perm_indices::operator[] (unsigned int i) const
+{
+ return clamp (m_encoding.elt (i));
+}
+
+/* Return true if the permutation vector only selects elements from
+ input I. */
+
+inline bool
+vec_perm_indices::all_from_input_p (unsigned int i) const
+{
+ return all_in_range_p (i * m_nelts_per_input, m_nelts_per_input);
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec.h
new file mode 100644
index 0000000..3691891
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vec.h
@@ -0,0 +1,2386 @@
+/* Vector API for GNU compiler.
+ Copyright (C) 2004-2023 Free Software Foundation, Inc.
+ Contributed by Nathan Sidwell <nathan@codesourcery.com>
+ Re-implemented in C++ by Diego Novillo <dnovillo@google.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VEC_H
+#define GCC_VEC_H
+
+/* Some gen* file have no ggc support as the header file gtype-desc.h is
+ missing. Provide these definitions in case ggc.h has not been included.
+ This is not a problem because any code that runs before gengtype is built
+ will never need to use GC vectors.*/
+
+extern void ggc_free (void *);
+extern size_t ggc_round_alloc_size (size_t requested_size);
+extern void *ggc_realloc (void *, size_t MEM_STAT_DECL);
+
+/* Templated vector type and associated interfaces.
+
+ The interface functions are typesafe and use inline functions,
+ sometimes backed by out-of-line generic functions. The vectors are
+ designed to interoperate with the GTY machinery.
+
+ There are both 'index' and 'iterate' accessors. The index accessor
+ is implemented by operator[]. The iterator returns a boolean
+ iteration condition and updates the iteration variable passed by
+ reference. Because the iterator will be inlined, the address-of
+ can be optimized away.
+
+ Each operation that increases the number of active elements is
+ available in 'quick' and 'safe' variants. The former presumes that
+ there is sufficient allocated space for the operation to succeed
+ (it dies if there is not). The latter will reallocate the
+ vector, if needed. Reallocation causes an exponential increase in
+ vector size. If you know you will be adding N elements, it would
+ be more efficient to use the reserve operation before adding the
+ elements with the 'quick' operation. This will ensure there are at
+ least as many elements as you ask for, it will exponentially
+ increase if there are too few spare slots. If you want reserve a
+ specific number of slots, but do not want the exponential increase
+ (for instance, you know this is the last allocation), use the
+ reserve_exact operation. You can also create a vector of a
+ specific size from the get go.
+
+ You should prefer the push and pop operations, as they append and
+ remove from the end of the vector. If you need to remove several
+ items in one go, use the truncate operation. The insert and remove
+ operations allow you to change elements in the middle of the
+ vector. There are two remove operations, one which preserves the
+ element ordering 'ordered_remove', and one which does not
+ 'unordered_remove'. The latter function copies the end element
+ into the removed slot, rather than invoke a memmove operation. The
+ 'lower_bound' function will determine where to place an item in the
+ array using insert that will maintain sorted order.
+
+ Vectors are template types with three arguments: the type of the
+ elements in the vector, the allocation strategy, and the physical
+ layout to use
+
+ Four allocation strategies are supported:
+
+ - Heap: allocation is done using malloc/free. This is the
+ default allocation strategy.
+
+ - GC: allocation is done using ggc_alloc/ggc_free.
+
+ - GC atomic: same as GC with the exception that the elements
+ themselves are assumed to be of an atomic type that does
+ not need to be garbage collected. This means that marking
+ routines do not need to traverse the array marking the
+ individual elements. This increases the performance of
+ GC activities.
+
+ Two physical layouts are supported:
+
+ - Embedded: The vector is structured using the trailing array
+ idiom. The last member of the structure is an array of size
+ 1. When the vector is initially allocated, a single memory
+ block is created to hold the vector's control data and the
+ array of elements. These vectors cannot grow without
+ reallocation (see discussion on embeddable vectors below).
+
+ - Space efficient: The vector is structured as a pointer to an
+ embedded vector. This is the default layout. It means that
+ vectors occupy a single word of storage before initial
+ allocation. Vectors are allowed to grow (the internal
+ pointer is reallocated but the main vector instance does not
+ need to relocate).
+
+ The type, allocation and layout are specified when the vector is
+ declared.
+
+ If you need to directly manipulate a vector, then the 'address'
+ accessor will return the address of the start of the vector. Also
+ the 'space' predicate will tell you whether there is spare capacity
+ in the vector. You will not normally need to use these two functions.
+
+ Notes on the different layout strategies
+
+ * Embeddable vectors (vec<T, A, vl_embed>)
+
+ These vectors are suitable to be embedded in other data
+ structures so that they can be pre-allocated in a contiguous
+ memory block.
+
+ Embeddable vectors are implemented using the trailing array
+ idiom, thus they are not resizeable without changing the address
+ of the vector object itself. This means you cannot have
+ variables or fields of embeddable vector type -- always use a
+ pointer to a vector. The one exception is the final field of a
+ structure, which could be a vector type.
+
+ You will have to use the embedded_size & embedded_init calls to
+ create such objects, and they will not be resizeable (so the
+ 'safe' allocation variants are not available).
+
+ Properties of embeddable vectors:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block. It uses the trailing-vector idiom, so
+ allocation must reserve enough space for all the elements
+ in the vector plus its control data.
+ - The vector cannot be re-allocated.
+ - The vector cannot grow nor shrink.
+ - No indirections needed for access/manipulation.
+ - It requires 2 words of storage (prior to vector allocation).
+
+
+ * Space efficient vector (vec<T, A, vl_ptr>)
+
+ These vectors can grow dynamically and are allocated together
+ with their control data. They are suited to be included in data
+ structures. Prior to initial allocation, they only take a single
+ word of storage.
+
+ These vectors are implemented as a pointer to embeddable vectors.
+ The semantics allow for this pointer to be NULL to represent
+ empty vectors. This way, empty vectors occupy minimal space in
+ the structure containing them.
+
+ Properties:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block.
+ - The whole vector may be re-allocated.
+ - Vector data may grow and shrink.
+ - Access and manipulation requires a pointer test and
+ indirection.
+ - It requires 1 word of storage (prior to vector allocation).
+
+ An example of their use would be,
+
+ struct my_struct {
+ // A space-efficient vector of tree pointers in GC memory.
+ vec<tree, va_gc, vl_ptr> v;
+ };
+
+ struct my_struct *s;
+
+ if (s->v.length ()) { we have some contents }
+ s->v.safe_push (decl); // append some decl onto the end
+ for (ix = 0; s->v.iterate (ix, &elt); ix++)
+ { do something with elt }
+*/
+
+/* Support function for statistics. */
+extern void dump_vec_loc_statistics (void);
+
+/* Hashtable mapping vec addresses to descriptors. */
+extern htab_t vec_mem_usage_hash;
+
+/* Control data for vectors. This contains the number of allocated
+ and used slots inside a vector. */
+
+struct vec_prefix
+{
+ /* FIXME - These fields should be private, but we need to cater to
+ compilers that have stricter notions of PODness for types. */
+
+ /* Memory allocation support routines in vec.cc. */
+ void register_overhead (void *, size_t, size_t CXX_MEM_STAT_INFO);
+ void release_overhead (void *, size_t, size_t, bool CXX_MEM_STAT_INFO);
+ static unsigned calculate_allocation (vec_prefix *, unsigned, bool);
+ static unsigned calculate_allocation_1 (unsigned, unsigned);
+
+ /* Note that vec_prefix should be a base class for vec, but we use
+ offsetof() on vector fields of tree structures (e.g.,
+ tree_binfo::base_binfos), and offsetof only supports base types.
+
+ To compensate, we make vec_prefix a field inside vec and make
+ vec a friend class of vec_prefix so it can access its fields. */
+ template <typename, typename, typename> friend struct vec;
+
+ /* The allocator types also need access to our internals. */
+ friend struct va_gc;
+ friend struct va_gc_atomic;
+ friend struct va_heap;
+
+ unsigned m_alloc : 31;
+ unsigned m_using_auto_storage : 1;
+ unsigned m_num;
+};
+
+/* Calculate the number of slots to reserve a vector, making sure that
+ RESERVE slots are free. If EXACT grow exactly, otherwise grow
+ exponentially. PFX is the control data for the vector. */
+
+inline unsigned
+vec_prefix::calculate_allocation (vec_prefix *pfx, unsigned reserve,
+ bool exact)
+{
+ if (exact)
+ return (pfx ? pfx->m_num : 0) + reserve;
+ else if (!pfx)
+ return MAX (4, reserve);
+ return calculate_allocation_1 (pfx->m_alloc, pfx->m_num + reserve);
+}
+
+template<typename, typename, typename> struct vec;
+
+/* Valid vector layouts
+
+ vl_embed - Embeddable vector that uses the trailing array idiom.
+ vl_ptr - Space efficient vector that uses a pointer to an
+ embeddable vector. */
+struct vl_embed { };
+struct vl_ptr { };
+
+
+/* Types of supported allocations
+
+ va_heap - Allocation uses malloc/free.
+ va_gc - Allocation uses ggc_alloc.
+ va_gc_atomic - Same as GC, but individual elements of the array
+ do not need to be marked during collection. */
+
+/* Allocator type for heap vectors. */
+struct va_heap
+{
+ /* Heap vectors are frequently regular instances, so use the vl_ptr
+ layout for them. */
+ typedef vl_ptr default_layout;
+
+ template<typename T>
+ static void reserve (vec<T, va_heap, vl_embed> *&, unsigned, bool
+ CXX_MEM_STAT_INFO);
+
+ template<typename T>
+ static void release (vec<T, va_heap, vl_embed> *&);
+};
+
+
+/* Allocator for heap memory. Ensure there are at least RESERVE free
+ slots in V. If EXACT is true, grow exactly, else grow
+ exponentially. As a special case, if the vector had not been
+ allocated and RESERVE is 0, no vector will be created. */
+
+template<typename T>
+inline void
+va_heap::reserve (vec<T, va_heap, vl_embed> *&v, unsigned reserve, bool exact
+ MEM_STAT_DECL)
+{
+ size_t elt_size = sizeof (T);
+ unsigned alloc
+ = vec_prefix::calculate_allocation (v ? &v->m_vecpfx : 0, reserve, exact);
+ gcc_checking_assert (alloc);
+
+ if (GATHER_STATISTICS && v)
+ v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
+ v->allocated (), false);
+
+ size_t size = vec<T, va_heap, vl_embed>::embedded_size (alloc);
+ unsigned nelem = v ? v->length () : 0;
+ v = static_cast <vec<T, va_heap, vl_embed> *> (xrealloc (v, size));
+ v->embedded_init (alloc, nelem);
+
+ if (GATHER_STATISTICS)
+ v->m_vecpfx.register_overhead (v, alloc, elt_size PASS_MEM_STAT);
+}
+
+
+#if GCC_VERSION >= 4007
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
+#endif
+
+/* Free the heap space allocated for vector V. */
+
+template<typename T>
+void
+va_heap::release (vec<T, va_heap, vl_embed> *&v)
+{
+ size_t elt_size = sizeof (T);
+ if (v == NULL)
+ return;
+
+ if (GATHER_STATISTICS)
+ v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
+ v->allocated (), true);
+ ::free (v);
+ v = NULL;
+}
+
+#if GCC_VERSION >= 4007
+#pragma GCC diagnostic pop
+#endif
+
+/* Allocator type for GC vectors. Notice that we need the structure
+ declaration even if GC is not enabled. */
+
+struct va_gc
+{
+ /* Use vl_embed as the default layout for GC vectors. Due to GTY
+ limitations, GC vectors must always be pointers, so it is more
+ efficient to use a pointer to the vl_embed layout, rather than
+ using a pointer to a pointer as would be the case with vl_ptr. */
+ typedef vl_embed default_layout;
+
+ template<typename T, typename A>
+ static void reserve (vec<T, A, vl_embed> *&, unsigned, bool
+ CXX_MEM_STAT_INFO);
+
+ template<typename T, typename A>
+ static void release (vec<T, A, vl_embed> *&v);
+};
+
+
+/* Free GC memory used by V and reset V to NULL. */
+
+template<typename T, typename A>
+inline void
+va_gc::release (vec<T, A, vl_embed> *&v)
+{
+ if (v)
+ ::ggc_free (v);
+ v = NULL;
+}
+
+
+/* Allocator for GC memory. Ensure there are at least RESERVE free
+ slots in V. If EXACT is true, grow exactly, else grow
+ exponentially. As a special case, if the vector had not been
+ allocated and RESERVE is 0, no vector will be created. */
+
+template<typename T, typename A>
+void
+va_gc::reserve (vec<T, A, vl_embed> *&v, unsigned reserve, bool exact
+ MEM_STAT_DECL)
+{
+ unsigned alloc
+ = vec_prefix::calculate_allocation (v ? &v->m_vecpfx : 0, reserve, exact);
+ if (!alloc)
+ {
+ ::ggc_free (v);
+ v = NULL;
+ return;
+ }
+
+ /* Calculate the amount of space we want. */
+ size_t size = vec<T, A, vl_embed>::embedded_size (alloc);
+
+ /* Ask the allocator how much space it will really give us. */
+ size = ::ggc_round_alloc_size (size);
+
+ /* Adjust the number of slots accordingly. */
+ size_t vec_offset = sizeof (vec_prefix);
+ size_t elt_size = sizeof (T);
+ alloc = (size - vec_offset) / elt_size;
+
+ /* And finally, recalculate the amount of space we ask for. */
+ size = vec_offset + alloc * elt_size;
+
+ unsigned nelem = v ? v->length () : 0;
+ v = static_cast <vec<T, A, vl_embed> *> (::ggc_realloc (v, size
+ PASS_MEM_STAT));
+ v->embedded_init (alloc, nelem);
+}
+
+
+/* Allocator type for GC vectors. This is for vectors of types
+ atomics w.r.t. collection, so allocation and deallocation is
+ completely inherited from va_gc. */
+struct va_gc_atomic : va_gc
+{
+};
+
+
+/* Generic vector template. Default values for A and L indicate the
+ most commonly used strategies.
+
+ FIXME - Ideally, they would all be vl_ptr to encourage using regular
+ instances for vectors, but the existing GTY machinery is limited
+ in that it can only deal with GC objects that are pointers
+ themselves.
+
+ This means that vector operations that need to deal with
+ potentially NULL pointers, must be provided as free
+ functions (see the vec_safe_* functions above). */
+template<typename T,
+ typename A = va_heap,
+ typename L = typename A::default_layout>
+struct GTY((user)) vec
+{
+};
+
+/* Allow C++11 range-based 'for' to work directly on vec<T>*. */
+template<typename T, typename A, typename L>
+T* begin (vec<T,A,L> *v) { return v ? v->begin () : nullptr; }
+template<typename T, typename A, typename L>
+T* end (vec<T,A,L> *v) { return v ? v->end () : nullptr; }
+template<typename T, typename A, typename L>
+const T* begin (const vec<T,A,L> *v) { return v ? v->begin () : nullptr; }
+template<typename T, typename A, typename L>
+const T* end (const vec<T,A,L> *v) { return v ? v->end () : nullptr; }
+
+/* Generic vec<> debug helpers.
+
+ These need to be instantiated for each vec<TYPE> used throughout
+ the compiler like this:
+
+ DEFINE_DEBUG_VEC (TYPE)
+
+ The reason we have a debug_helper() is because GDB can't
+ disambiguate a plain call to debug(some_vec), and it must be called
+ like debug<TYPE>(some_vec). */
+
+template<typename T>
+void
+debug_helper (vec<T> &ref)
+{
+ unsigned i;
+ for (i = 0; i < ref.length (); ++i)
+ {
+ fprintf (stderr, "[%d] = ", i);
+ debug_slim (ref[i]);
+ fputc ('\n', stderr);
+ }
+}
+
+/* We need a separate va_gc variant here because default template
+ argument for functions cannot be used in c++-98. Once this
+ restriction is removed, those variant should be folded with the
+ above debug_helper. */
+
+template<typename T>
+void
+debug_helper (vec<T, va_gc> &ref)
+{
+ unsigned i;
+ for (i = 0; i < ref.length (); ++i)
+ {
+ fprintf (stderr, "[%d] = ", i);
+ debug_slim (ref[i]);
+ fputc ('\n', stderr);
+ }
+}
+
+/* Macro to define debug(vec<T>) and debug(vec<T, va_gc>) helper
+ functions for a type T. */
+
+#define DEFINE_DEBUG_VEC(T) \
+ template void debug_helper (vec<T> &); \
+ template void debug_helper (vec<T, va_gc> &); \
+ /* Define the vec<T> debug functions. */ \
+ DEBUG_FUNCTION void \
+ debug (vec<T> &ref) \
+ { \
+ debug_helper <T> (ref); \
+ } \
+ DEBUG_FUNCTION void \
+ debug (vec<T> *ptr) \
+ { \
+ if (ptr) \
+ debug (*ptr); \
+ else \
+ fprintf (stderr, "<nil>\n"); \
+ } \
+ /* Define the vec<T, va_gc> debug functions. */ \
+ DEBUG_FUNCTION void \
+ debug (vec<T, va_gc> &ref) \
+ { \
+ debug_helper <T> (ref); \
+ } \
+ DEBUG_FUNCTION void \
+ debug (vec<T, va_gc> *ptr) \
+ { \
+ if (ptr) \
+ debug (*ptr); \
+ else \
+ fprintf (stderr, "<nil>\n"); \
+ }
+
+/* Default-construct N elements in DST. */
+
+template <typename T>
+inline void
+vec_default_construct (T *dst, unsigned n)
+{
+#ifdef BROKEN_VALUE_INITIALIZATION
+ /* Versions of GCC before 4.4 sometimes leave certain objects
+ uninitialized when value initialized, though if the type has
+ user defined default ctor, that ctor is invoked. As a workaround
+ perform clearing first and then the value initialization, which
+ fixes the case when value initialization doesn't initialize due to
+ the bugs and should initialize to all zeros, but still allows
+ vectors for types with user defined default ctor that initializes
+ some or all elements to non-zero. If T has no user defined
+ default ctor and some non-static data members have user defined
+ default ctors that initialize to non-zero the workaround will
+ still not work properly; in that case we just need to provide
+ user defined default ctor. */
+ memset (dst, '\0', sizeof (T) * n);
+#endif
+ for ( ; n; ++dst, --n)
+ ::new (static_cast<void*>(dst)) T ();
+}
+
+/* Copy-construct N elements in DST from *SRC. */
+
+template <typename T>
+inline void
+vec_copy_construct (T *dst, const T *src, unsigned n)
+{
+ for ( ; n; ++dst, ++src, --n)
+ ::new (static_cast<void*>(dst)) T (*src);
+}
+
+/* Type to provide zero-initialized values for vec<T, A, L>. This is
+ used to provide nil initializers for vec instances. Since vec must
+ be a trivially copyable type that can be copied by memcpy and zeroed
+ out by memset, it must have defaulted default and copy ctor and copy
+ assignment. To initialize a vec either use value initialization
+ (e.g., vec() or vec v{ };) or assign it the value vNULL. This isn't
+ needed for file-scope and function-local static vectors, which are
+ zero-initialized by default. */
+struct vnull { };
+constexpr vnull vNULL{ };
+
+
+/* Embeddable vector. These vectors are suitable to be embedded
+ in other data structures so that they can be pre-allocated in a
+ contiguous memory block.
+
+ Embeddable vectors are implemented using the trailing array idiom,
+ thus they are not resizeable without changing the address of the
+ vector object itself. This means you cannot have variables or
+ fields of embeddable vector type -- always use a pointer to a
+ vector. The one exception is the final field of a structure, which
+ could be a vector type.
+
+ You will have to use the embedded_size & embedded_init calls to
+ create such objects, and they will not be resizeable (so the 'safe'
+ allocation variants are not available).
+
+ Properties:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block. It uses the trailing-vector idiom, so
+ allocation must reserve enough space for all the elements
+ in the vector plus its control data.
+ - The vector cannot be re-allocated.
+ - The vector cannot grow nor shrink.
+ - No indirections needed for access/manipulation.
+ - It requires 2 words of storage (prior to vector allocation). */
+
+template<typename T, typename A>
+struct GTY((user)) vec<T, A, vl_embed>
+{
+public:
+ unsigned allocated (void) const { return m_vecpfx.m_alloc; }
+ unsigned length (void) const { return m_vecpfx.m_num; }
+ bool is_empty (void) const { return m_vecpfx.m_num == 0; }
+ T *address (void) { return reinterpret_cast <T *> (this + 1); }
+ const T *address (void) const
+ { return reinterpret_cast <const T *> (this + 1); }
+ T *begin () { return address (); }
+ const T *begin () const { return address (); }
+ T *end () { return address () + length (); }
+ const T *end () const { return address () + length (); }
+ const T &operator[] (unsigned) const;
+ T &operator[] (unsigned);
+ T &last (void);
+ bool space (unsigned) const;
+ bool iterate (unsigned, T *) const;
+ bool iterate (unsigned, T **) const;
+ vec *copy (ALONE_CXX_MEM_STAT_INFO) const;
+ void splice (const vec &);
+ void splice (const vec *src);
+ T *quick_push (const T &);
+ T &pop (void);
+ void truncate (unsigned);
+ void quick_insert (unsigned, const T &);
+ void ordered_remove (unsigned);
+ void unordered_remove (unsigned);
+ void block_remove (unsigned, unsigned);
+ void qsort (int (*) (const void *, const void *));
+ void sort (int (*) (const void *, const void *, void *), void *);
+ void stablesort (int (*) (const void *, const void *, void *), void *);
+ T *bsearch (const void *key, int (*compar) (const void *, const void *));
+ T *bsearch (const void *key,
+ int (*compar)(const void *, const void *, void *), void *);
+ unsigned lower_bound (const T &, bool (*) (const T &, const T &)) const;
+ bool contains (const T &search) const;
+ static size_t embedded_size (unsigned);
+ void embedded_init (unsigned, unsigned = 0, unsigned = 0);
+ void quick_grow (unsigned len);
+ void quick_grow_cleared (unsigned len);
+
+ /* vec class can access our internal data and functions. */
+ template <typename, typename, typename> friend struct vec;
+
+ /* The allocator types also need access to our internals. */
+ friend struct va_gc;
+ friend struct va_gc_atomic;
+ friend struct va_heap;
+
+ /* FIXME - This field should be private, but we need to cater to
+ compilers that have stricter notions of PODness for types. */
+ /* Align m_vecpfx to simplify address (). */
+ alignas (T) alignas (vec_prefix) vec_prefix m_vecpfx;
+};
+
+
+/* Convenience wrapper functions to use when dealing with pointers to
+ embedded vectors. Some functionality for these vectors must be
+ provided via free functions for these reasons:
+
+ 1- The pointer may be NULL (e.g., before initial allocation).
+
+ 2- When the vector needs to grow, it must be reallocated, so
+ the pointer will change its value.
+
+ Because of limitations with the current GC machinery, all vectors
+ in GC memory *must* be pointers. */
+
+
+/* If V contains no room for NELEMS elements, return false. Otherwise,
+ return true. */
+template<typename T, typename A>
+inline bool
+vec_safe_space (const vec<T, A, vl_embed> *v, unsigned nelems)
+{
+ return v ? v->space (nelems) : nelems == 0;
+}
+
+
+/* If V is NULL, return 0. Otherwise, return V->length(). */
+template<typename T, typename A>
+inline unsigned
+vec_safe_length (const vec<T, A, vl_embed> *v)
+{
+ return v ? v->length () : 0;
+}
+
+
+/* If V is NULL, return NULL. Otherwise, return V->address(). */
+template<typename T, typename A>
+inline T *
+vec_safe_address (vec<T, A, vl_embed> *v)
+{
+ return v ? v->address () : NULL;
+}
+
+
+/* If V is NULL, return true. Otherwise, return V->is_empty(). */
+template<typename T, typename A>
+inline bool
+vec_safe_is_empty (vec<T, A, vl_embed> *v)
+{
+ return v ? v->is_empty () : true;
+}
+
+/* If V does not have space for NELEMS elements, call
+ V->reserve(NELEMS, EXACT). */
+template<typename T, typename A>
+inline bool
+vec_safe_reserve (vec<T, A, vl_embed> *&v, unsigned nelems, bool exact = false
+ CXX_MEM_STAT_INFO)
+{
+ bool extend = nelems ? !vec_safe_space (v, nelems) : false;
+ if (extend)
+ A::reserve (v, nelems, exact PASS_MEM_STAT);
+ return extend;
+}
+
+template<typename T, typename A>
+inline bool
+vec_safe_reserve_exact (vec<T, A, vl_embed> *&v, unsigned nelems
+ CXX_MEM_STAT_INFO)
+{
+ return vec_safe_reserve (v, nelems, true PASS_MEM_STAT);
+}
+
+
+/* Allocate GC memory for V with space for NELEMS slots. If NELEMS
+ is 0, V is initialized to NULL. */
+
+template<typename T, typename A>
+inline void
+vec_alloc (vec<T, A, vl_embed> *&v, unsigned nelems CXX_MEM_STAT_INFO)
+{
+ v = NULL;
+ vec_safe_reserve (v, nelems, false PASS_MEM_STAT);
+}
+
+
+/* Free the GC memory allocated by vector V and set it to NULL. */
+
+template<typename T, typename A>
+inline void
+vec_free (vec<T, A, vl_embed> *&v)
+{
+ A::release (v);
+}
+
+
+/* Grow V to length LEN. Allocate it, if necessary. */
+template<typename T, typename A>
+inline void
+vec_safe_grow (vec<T, A, vl_embed> *&v, unsigned len,
+ bool exact = false CXX_MEM_STAT_INFO)
+{
+ unsigned oldlen = vec_safe_length (v);
+ gcc_checking_assert (len >= oldlen);
+ vec_safe_reserve (v, len - oldlen, exact PASS_MEM_STAT);
+ v->quick_grow (len);
+}
+
+
+/* If V is NULL, allocate it. Call V->safe_grow_cleared(LEN). */
+template<typename T, typename A>
+inline void
+vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len,
+ bool exact = false CXX_MEM_STAT_INFO)
+{
+ unsigned oldlen = vec_safe_length (v);
+ vec_safe_grow (v, len, exact PASS_MEM_STAT);
+ vec_default_construct (v->address () + oldlen, len - oldlen);
+}
+
+
+/* Assume V is not NULL. */
+
+template<typename T>
+inline void
+vec_safe_grow_cleared (vec<T, va_heap, vl_ptr> *&v,
+ unsigned len, bool exact = false CXX_MEM_STAT_INFO)
+{
+ v->safe_grow_cleared (len, exact PASS_MEM_STAT);
+}
+
+/* If V does not have space for NELEMS elements, call
+ V->reserve(NELEMS, EXACT). */
+
+template<typename T>
+inline bool
+vec_safe_reserve (vec<T, va_heap, vl_ptr> *&v, unsigned nelems, bool exact = false
+ CXX_MEM_STAT_INFO)
+{
+ return v->reserve (nelems, exact);
+}
+
+
+/* If V is NULL return false, otherwise return V->iterate(IX, PTR). */
+template<typename T, typename A>
+inline bool
+vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T **ptr)
+{
+ if (v)
+ return v->iterate (ix, ptr);
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
+}
+
+template<typename T, typename A>
+inline bool
+vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T *ptr)
+{
+ if (v)
+ return v->iterate (ix, ptr);
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
+}
+
+
+/* If V has no room for one more element, reallocate it. Then call
+ V->quick_push(OBJ). */
+template<typename T, typename A>
+inline T *
+vec_safe_push (vec<T, A, vl_embed> *&v, const T &obj CXX_MEM_STAT_INFO)
+{
+ vec_safe_reserve (v, 1, false PASS_MEM_STAT);
+ return v->quick_push (obj);
+}
+
+
+/* if V has no room for one more element, reallocate it. Then call
+ V->quick_insert(IX, OBJ). */
+template<typename T, typename A>
+inline void
+vec_safe_insert (vec<T, A, vl_embed> *&v, unsigned ix, const T &obj
+ CXX_MEM_STAT_INFO)
+{
+ vec_safe_reserve (v, 1, false PASS_MEM_STAT);
+ v->quick_insert (ix, obj);
+}
+
+
+/* If V is NULL, do nothing. Otherwise, call V->truncate(SIZE). */
+template<typename T, typename A>
+inline void
+vec_safe_truncate (vec<T, A, vl_embed> *v, unsigned size)
+{
+ if (v)
+ v->truncate (size);
+}
+
+
+/* If SRC is not NULL, return a pointer to a copy of it. */
+template<typename T, typename A>
+inline vec<T, A, vl_embed> *
+vec_safe_copy (vec<T, A, vl_embed> *src CXX_MEM_STAT_INFO)
+{
+ return src ? src->copy (ALONE_PASS_MEM_STAT) : NULL;
+}
+
+/* Copy the elements from SRC to the end of DST as if by memcpy.
+ Reallocate DST, if necessary. */
+template<typename T, typename A>
+inline void
+vec_safe_splice (vec<T, A, vl_embed> *&dst, const vec<T, A, vl_embed> *src
+ CXX_MEM_STAT_INFO)
+{
+ unsigned src_len = vec_safe_length (src);
+ if (src_len)
+ {
+ vec_safe_reserve_exact (dst, vec_safe_length (dst) + src_len
+ PASS_MEM_STAT);
+ dst->splice (*src);
+ }
+}
+
+/* Return true if SEARCH is an element of V. Note that this is O(N) in the
+ size of the vector and so should be used with care. */
+
+template<typename T, typename A>
+inline bool
+vec_safe_contains (vec<T, A, vl_embed> *v, const T &search)
+{
+ return v ? v->contains (search) : false;
+}
+
+/* Index into vector. Return the IX'th element. IX must be in the
+ domain of the vector. */
+
+template<typename T, typename A>
+inline const T &
+vec<T, A, vl_embed>::operator[] (unsigned ix) const
+{
+ gcc_checking_assert (ix < m_vecpfx.m_num);
+ return address ()[ix];
+}
+
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_embed>::operator[] (unsigned ix)
+{
+ gcc_checking_assert (ix < m_vecpfx.m_num);
+ return address ()[ix];
+}
+
+
+/* Get the final element of the vector, which must not be empty. */
+
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_embed>::last (void)
+{
+ gcc_checking_assert (m_vecpfx.m_num > 0);
+ return (*this)[m_vecpfx.m_num - 1];
+}
+
+
+/* If this vector has space for NELEMS additional entries, return
+ true. You usually only need to use this if you are doing your
+ own vector reallocation, for instance on an embedded vector. This
+ returns true in exactly the same circumstances that vec::reserve
+ will. */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::space (unsigned nelems) const
+{
+ return m_vecpfx.m_alloc - m_vecpfx.m_num >= nelems;
+}
+
+
+/* Return iteration condition and update *PTR to (a copy of) the IX'th
+ element of this vector. Use this to iterate over the elements of a
+ vector as follows,
+
+ for (ix = 0; v->iterate (ix, &val); ix++)
+ continue; */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::iterate (unsigned ix, T *ptr) const
+{
+ if (ix < m_vecpfx.m_num)
+ {
+ *ptr = address ()[ix];
+ return true;
+ }
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
+}
+
+
+/* Return iteration condition and update *PTR to point to the
+ IX'th element of this vector. Use this to iterate over the
+ elements of a vector as follows,
+
+ for (ix = 0; v->iterate (ix, &ptr); ix++)
+ continue;
+
+ This variant is for vectors of objects. */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::iterate (unsigned ix, T **ptr) const
+{
+ if (ix < m_vecpfx.m_num)
+ {
+ *ptr = CONST_CAST (T *, &address ()[ix]);
+ return true;
+ }
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
+}
+
+
+/* Return a pointer to a copy of this vector. */
+
+template<typename T, typename A>
+inline vec<T, A, vl_embed> *
+vec<T, A, vl_embed>::copy (ALONE_MEM_STAT_DECL) const
+{
+ vec<T, A, vl_embed> *new_vec = NULL;
+ unsigned len = length ();
+ if (len)
+ {
+ vec_alloc (new_vec, len PASS_MEM_STAT);
+ new_vec->embedded_init (len, len);
+ vec_copy_construct (new_vec->address (), address (), len);
+ }
+ return new_vec;
+}
+
+
+/* Copy the elements from SRC to the end of this vector as if by memcpy.
+ The vector must have sufficient headroom available. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::splice (const vec<T, A, vl_embed> &src)
+{
+ unsigned len = src.length ();
+ if (len)
+ {
+ gcc_checking_assert (space (len));
+ vec_copy_construct (end (), src.address (), len);
+ m_vecpfx.m_num += len;
+ }
+}
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::splice (const vec<T, A, vl_embed> *src)
+{
+ if (src)
+ splice (*src);
+}
+
+
+/* Push OBJ (a new element) onto the end of the vector. There must be
+ sufficient space in the vector. Return a pointer to the slot
+ where OBJ was inserted. */
+
+template<typename T, typename A>
+inline T *
+vec<T, A, vl_embed>::quick_push (const T &obj)
+{
+ gcc_checking_assert (space (1));
+ T *slot = &address ()[m_vecpfx.m_num++];
+ *slot = obj;
+ return slot;
+}
+
+
+/* Pop and return the last element off the end of the vector. */
+
+template<typename T, typename A>
+inline T &
+vec<T, A, vl_embed>::pop (void)
+{
+ gcc_checking_assert (length () > 0);
+ return address ()[--m_vecpfx.m_num];
+}
+
+
+/* Set the length of the vector to SIZE. The new length must be less
+ than or equal to the current length. This is an O(1) operation. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::truncate (unsigned size)
+{
+ gcc_checking_assert (length () >= size);
+ m_vecpfx.m_num = size;
+}
+
+
+/* Insert an element, OBJ, at the IXth position of this vector. There
+ must be sufficient space. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::quick_insert (unsigned ix, const T &obj)
+{
+ gcc_checking_assert (length () < allocated ());
+ gcc_checking_assert (ix <= length ());
+ T *slot = &address ()[ix];
+ memmove (slot + 1, slot, (m_vecpfx.m_num++ - ix) * sizeof (T));
+ *slot = obj;
+}
+
+
+/* Remove an element from the IXth position of this vector. Ordering of
+ remaining elements is preserved. This is an O(N) operation due to
+ memmove. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::ordered_remove (unsigned ix)
+{
+ gcc_checking_assert (ix < length ());
+ T *slot = &address ()[ix];
+ memmove (slot, slot + 1, (--m_vecpfx.m_num - ix) * sizeof (T));
+}
+
+
+/* Remove elements in [START, END) from VEC for which COND holds. Ordering of
+ remaining elements is preserved. This is an O(N) operation. */
+
+#define VEC_ORDERED_REMOVE_IF_FROM_TO(vec, read_index, write_index, \
+ elem_ptr, start, end, cond) \
+ { \
+ gcc_assert ((end) <= (vec).length ()); \
+ for (read_index = write_index = (start); read_index < (end); \
+ ++read_index) \
+ { \
+ elem_ptr = &(vec)[read_index]; \
+ bool remove_p = (cond); \
+ if (remove_p) \
+ continue; \
+ \
+ if (read_index != write_index) \
+ (vec)[write_index] = (vec)[read_index]; \
+ \
+ write_index++; \
+ } \
+ \
+ if (read_index - write_index > 0) \
+ (vec).block_remove (write_index, read_index - write_index); \
+ }
+
+
+/* Remove elements from VEC for which COND holds. Ordering of remaining
+ elements is preserved. This is an O(N) operation. */
+
+#define VEC_ORDERED_REMOVE_IF(vec, read_index, write_index, elem_ptr, \
+ cond) \
+ VEC_ORDERED_REMOVE_IF_FROM_TO ((vec), read_index, write_index, \
+ elem_ptr, 0, (vec).length (), (cond))
+
+/* Remove an element from the IXth position of this vector. Ordering of
+ remaining elements is destroyed. This is an O(1) operation. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::unordered_remove (unsigned ix)
+{
+ gcc_checking_assert (ix < length ());
+ T *p = address ();
+ p[ix] = p[--m_vecpfx.m_num];
+}
+
+
+/* Remove LEN elements starting at the IXth. Ordering is retained.
+ This is an O(N) operation due to memmove. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::block_remove (unsigned ix, unsigned len)
+{
+ gcc_checking_assert (ix + len <= length ());
+ T *slot = &address ()[ix];
+ m_vecpfx.m_num -= len;
+ memmove (slot, slot + len, (m_vecpfx.m_num - ix) * sizeof (T));
+}
+
+
+/* Sort the contents of this vector with qsort. CMP is the comparison
+ function to pass to qsort. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::qsort (int (*cmp) (const void *, const void *))
+{
+ if (length () > 1)
+ gcc_qsort (address (), length (), sizeof (T), cmp);
+}
+
+/* Sort the contents of this vector with qsort. CMP is the comparison
+ function to pass to qsort. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::sort (int (*cmp) (const void *, const void *, void *),
+ void *data)
+{
+ if (length () > 1)
+ gcc_sort_r (address (), length (), sizeof (T), cmp, data);
+}
+
+/* Sort the contents of this vector with gcc_stablesort_r. CMP is the
+ comparison function to pass to qsort. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::stablesort (int (*cmp) (const void *, const void *,
+ void *), void *data)
+{
+ if (length () > 1)
+ gcc_stablesort_r (address (), length (), sizeof (T), cmp, data);
+}
+
+/* Search the contents of the sorted vector with a binary search.
+ CMP is the comparison function to pass to bsearch. */
+
+template<typename T, typename A>
+inline T *
+vec<T, A, vl_embed>::bsearch (const void *key,
+ int (*compar) (const void *, const void *))
+{
+ const void *base = this->address ();
+ size_t nmemb = this->length ();
+ size_t size = sizeof (T);
+ /* The following is a copy of glibc stdlib-bsearch.h. */
+ size_t l, u, idx;
+ const void *p;
+ int comparison;
+
+ l = 0;
+ u = nmemb;
+ while (l < u)
+ {
+ idx = (l + u) / 2;
+ p = (const void *) (((const char *) base) + (idx * size));
+ comparison = (*compar) (key, p);
+ if (comparison < 0)
+ u = idx;
+ else if (comparison > 0)
+ l = idx + 1;
+ else
+ return (T *)const_cast<void *>(p);
+ }
+
+ return NULL;
+}
+
+/* Search the contents of the sorted vector with a binary search.
+ CMP is the comparison function to pass to bsearch. */
+
+template<typename T, typename A>
+inline T *
+vec<T, A, vl_embed>::bsearch (const void *key,
+ int (*compar) (const void *, const void *,
+ void *), void *data)
+{
+ const void *base = this->address ();
+ size_t nmemb = this->length ();
+ size_t size = sizeof (T);
+ /* The following is a copy of glibc stdlib-bsearch.h. */
+ size_t l, u, idx;
+ const void *p;
+ int comparison;
+
+ l = 0;
+ u = nmemb;
+ while (l < u)
+ {
+ idx = (l + u) / 2;
+ p = (const void *) (((const char *) base) + (idx * size));
+ comparison = (*compar) (key, p, data);
+ if (comparison < 0)
+ u = idx;
+ else if (comparison > 0)
+ l = idx + 1;
+ else
+ return (T *)const_cast<void *>(p);
+ }
+
+ return NULL;
+}
+
+/* Return true if SEARCH is an element of V. Note that this is O(N) in the
+ size of the vector and so should be used with care. */
+
+template<typename T, typename A>
+inline bool
+vec<T, A, vl_embed>::contains (const T &search) const
+{
+ unsigned int len = length ();
+ const T *p = address ();
+ for (unsigned int i = 0; i < len; i++)
+ {
+ const T *slot = &p[i];
+ if (*slot == search)
+ return true;
+ }
+
+ return false;
+}
+
+/* Find and return the first position in which OBJ could be inserted
+ without changing the ordering of this vector. LESSTHAN is a
+ function that returns true if the first argument is strictly less
+ than the second. */
+
+template<typename T, typename A>
+unsigned
+vec<T, A, vl_embed>::lower_bound (const T &obj,
+ bool (*lessthan)(const T &, const T &))
+ const
+{
+ unsigned int len = length ();
+ unsigned int half, middle;
+ unsigned int first = 0;
+ while (len > 0)
+ {
+ half = len / 2;
+ middle = first;
+ middle += half;
+ const T &middle_elem = address ()[middle];
+ if (lessthan (middle_elem, obj))
+ {
+ first = middle;
+ ++first;
+ len = len - half - 1;
+ }
+ else
+ len = half;
+ }
+ return first;
+}
+
+
+/* Return the number of bytes needed to embed an instance of an
+ embeddable vec inside another data structure.
+
+ Use these methods to determine the required size and initialization
+ of a vector V of type T embedded within another structure (as the
+ final member):
+
+ size_t vec<T, A, vl_embed>::embedded_size (unsigned alloc);
+ void v->embedded_init (unsigned alloc, unsigned num);
+
+ These allow the caller to perform the memory allocation. */
+
+template<typename T, typename A>
+inline size_t
+vec<T, A, vl_embed>::embedded_size (unsigned alloc)
+{
+ struct alignas (T) U { char data[sizeof (T)]; };
+ typedef vec<U, A, vl_embed> vec_embedded;
+ typedef typename std::conditional<std::is_standard_layout<T>::value,
+ vec, vec_embedded>::type vec_stdlayout;
+ static_assert (sizeof (vec_stdlayout) == sizeof (vec), "");
+ static_assert (alignof (vec_stdlayout) == alignof (vec), "");
+ return sizeof (vec_stdlayout) + alloc * sizeof (T);
+}
+
+
+/* Initialize the vector to contain room for ALLOC elements and
+ NUM active elements. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::embedded_init (unsigned alloc, unsigned num, unsigned aut)
+{
+ m_vecpfx.m_alloc = alloc;
+ m_vecpfx.m_using_auto_storage = aut;
+ m_vecpfx.m_num = num;
+}
+
+
+/* Grow the vector to a specific length. LEN must be as long or longer than
+ the current length. The new elements are uninitialized. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::quick_grow (unsigned len)
+{
+ gcc_checking_assert (length () <= len && len <= m_vecpfx.m_alloc);
+ m_vecpfx.m_num = len;
+}
+
+
+/* Grow the vector to a specific length. LEN must be as long or longer than
+ the current length. The new elements are initialized to zero. */
+
+template<typename T, typename A>
+inline void
+vec<T, A, vl_embed>::quick_grow_cleared (unsigned len)
+{
+ unsigned oldlen = length ();
+ size_t growby = len - oldlen;
+ quick_grow (len);
+ if (growby != 0)
+ vec_default_construct (address () + oldlen, growby);
+}
+
+/* Garbage collection support for vec<T, A, vl_embed>. */
+
+template<typename T>
+void
+gt_ggc_mx (vec<T, va_gc> *v)
+{
+ extern void gt_ggc_mx (T &);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_ggc_mx ((*v)[i]);
+}
+
+template<typename T>
+void
+gt_ggc_mx (vec<T, va_gc_atomic, vl_embed> *v ATTRIBUTE_UNUSED)
+{
+ /* Nothing to do. Vectors of atomic types wrt GC do not need to
+ be traversed. */
+}
+
+
+/* PCH support for vec<T, A, vl_embed>. */
+
+template<typename T, typename A>
+void
+gt_pch_nx (vec<T, A, vl_embed> *v)
+{
+ extern void gt_pch_nx (T &);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_pch_nx ((*v)[i]);
+}
+
+template<typename T, typename A>
+void
+gt_pch_nx (vec<T *, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
+{
+ for (unsigned i = 0; i < v->length (); i++)
+ op (&((*v)[i]), NULL, cookie);
+}
+
+template<typename T, typename A>
+void
+gt_pch_nx (vec<T, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
+{
+ extern void gt_pch_nx (T *, gt_pointer_operator, void *);
+ for (unsigned i = 0; i < v->length (); i++)
+ gt_pch_nx (&((*v)[i]), op, cookie);
+}
+
+
+/* Space efficient vector. These vectors can grow dynamically and are
+ allocated together with their control data. They are suited to be
+ included in data structures. Prior to initial allocation, they
+ only take a single word of storage.
+
+ These vectors are implemented as a pointer to an embeddable vector.
+ The semantics allow for this pointer to be NULL to represent empty
+ vectors. This way, empty vectors occupy minimal space in the
+ structure containing them.
+
+ Properties:
+
+ - The whole vector and control data are allocated in a single
+ contiguous block.
+ - The whole vector may be re-allocated.
+ - Vector data may grow and shrink.
+ - Access and manipulation requires a pointer test and
+ indirection.
+ - It requires 1 word of storage (prior to vector allocation).
+
+
+ Limitations:
+
+ These vectors must be PODs because they are stored in unions.
+ (http://en.wikipedia.org/wiki/Plain_old_data_structures).
+ As long as we use C++03, we cannot have constructors nor
+ destructors in classes that are stored in unions. */
+
+template<typename T, size_t N = 0>
+class auto_vec;
+
+template<typename T>
+struct vec<T, va_heap, vl_ptr>
+{
+public:
+ /* Default ctors to ensure triviality. Use value-initialization
+ (e.g., vec() or vec v{ };) or vNULL to create a zero-initialized
+ instance. */
+ vec () = default;
+ vec (const vec &) = default;
+ /* Initialization from the generic vNULL. */
+ vec (vnull): m_vec () { }
+ /* Same as default ctor: vec storage must be released manually. */
+ ~vec () = default;
+
+ /* Defaulted same as copy ctor. */
+ vec& operator= (const vec &) = default;
+
+ /* Prevent implicit conversion from auto_vec. Use auto_vec::to_vec()
+ instead. */
+ template <size_t N>
+ vec (auto_vec<T, N> &) = delete;
+
+ template <size_t N>
+ void operator= (auto_vec<T, N> &) = delete;
+
+ /* Memory allocation and deallocation for the embedded vector.
+ Needed because we cannot have proper ctors/dtors defined. */
+ void create (unsigned nelems CXX_MEM_STAT_INFO);
+ void release (void);
+
+ /* Vector operations. */
+ bool exists (void) const
+ { return m_vec != NULL; }
+
+ bool is_empty (void) const
+ { return m_vec ? m_vec->is_empty () : true; }
+
+ unsigned allocated (void) const
+ { return m_vec ? m_vec->allocated () : 0; }
+
+ unsigned length (void) const
+ { return m_vec ? m_vec->length () : 0; }
+
+ T *address (void)
+ { return m_vec ? m_vec->address () : NULL; }
+
+ const T *address (void) const
+ { return m_vec ? m_vec->address () : NULL; }
+
+ T *begin () { return address (); }
+ const T *begin () const { return address (); }
+ T *end () { return begin () + length (); }
+ const T *end () const { return begin () + length (); }
+ const T &operator[] (unsigned ix) const
+ { return (*m_vec)[ix]; }
+
+ bool operator!=(const vec &other) const
+ { return !(*this == other); }
+
+ bool operator==(const vec &other) const
+ { return address () == other.address (); }
+
+ T &operator[] (unsigned ix)
+ { return (*m_vec)[ix]; }
+
+ T &last (void)
+ { return m_vec->last (); }
+
+ bool space (int nelems) const
+ { return m_vec ? m_vec->space (nelems) : nelems == 0; }
+
+ bool iterate (unsigned ix, T *p) const;
+ bool iterate (unsigned ix, T **p) const;
+ vec copy (ALONE_CXX_MEM_STAT_INFO) const;
+ bool reserve (unsigned, bool = false CXX_MEM_STAT_INFO);
+ bool reserve_exact (unsigned CXX_MEM_STAT_INFO);
+ void splice (const vec &);
+ void safe_splice (const vec & CXX_MEM_STAT_INFO);
+ T *quick_push (const T &);
+ T *safe_push (const T &CXX_MEM_STAT_INFO);
+ T &pop (void);
+ void truncate (unsigned);
+ void safe_grow (unsigned, bool = false CXX_MEM_STAT_INFO);
+ void safe_grow_cleared (unsigned, bool = false CXX_MEM_STAT_INFO);
+ void quick_grow (unsigned);
+ void quick_grow_cleared (unsigned);
+ void quick_insert (unsigned, const T &);
+ void safe_insert (unsigned, const T & CXX_MEM_STAT_INFO);
+ void ordered_remove (unsigned);
+ void unordered_remove (unsigned);
+ void block_remove (unsigned, unsigned);
+ void qsort (int (*) (const void *, const void *));
+ void sort (int (*) (const void *, const void *, void *), void *);
+ void stablesort (int (*) (const void *, const void *, void *), void *);
+ T *bsearch (const void *key, int (*compar)(const void *, const void *));
+ T *bsearch (const void *key,
+ int (*compar)(const void *, const void *, void *), void *);
+ unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
+ bool contains (const T &search) const;
+ void reverse (void);
+
+ bool using_auto_storage () const;
+
+ /* FIXME - This field should be private, but we need to cater to
+ compilers that have stricter notions of PODness for types. */
+ vec<T, va_heap, vl_embed> *m_vec;
+};
+
+
+/* auto_vec is a subclass of vec that automatically manages creating and
+ releasing the internal vector. If N is non zero then it has N elements of
+ internal storage. The default is no internal storage, and you probably only
+ want to ask for internal storage for vectors on the stack because if the
+ size of the vector is larger than the internal storage that space is wasted.
+ */
+template<typename T, size_t N /* = 0 */>
+class auto_vec : public vec<T, va_heap>
+{
+public:
+ auto_vec ()
+ {
+ m_auto.embedded_init (N, 0, 1);
+ /* ??? Instead of initializing m_vec from &m_auto directly use an
+ expression that avoids refering to a specific member of 'this'
+ to derail the -Wstringop-overflow diagnostic code, avoiding
+ the impression that data accesses are supposed to be to the
+ m_auto member storage. */
+ size_t off = (char *) &m_auto - (char *) this;
+ this->m_vec = (vec<T, va_heap, vl_embed> *) ((char *) this + off);
+ }
+
+ auto_vec (size_t s CXX_MEM_STAT_INFO)
+ {
+ if (s > N)
+ {
+ this->create (s PASS_MEM_STAT);
+ return;
+ }
+
+ m_auto.embedded_init (N, 0, 1);
+ /* ??? See above. */
+ size_t off = (char *) &m_auto - (char *) this;
+ this->m_vec = (vec<T, va_heap, vl_embed> *) ((char *) this + off);
+ }
+
+ ~auto_vec ()
+ {
+ this->release ();
+ }
+
+ /* Explicitly convert to the base class. There is no conversion
+ from a const auto_vec because a copy of the returned vec can
+ be used to modify *THIS.
+ This is a legacy function not to be used in new code. */
+ vec<T, va_heap> to_vec_legacy () {
+ return *static_cast<vec<T, va_heap> *>(this);
+ }
+
+private:
+ vec<T, va_heap, vl_embed> m_auto;
+ unsigned char m_data[sizeof (T) * N];
+};
+
+/* auto_vec is a sub class of vec whose storage is released when it is
+ destroyed. */
+template<typename T>
+class auto_vec<T, 0> : public vec<T, va_heap>
+{
+public:
+ auto_vec () { this->m_vec = NULL; }
+ auto_vec (size_t n CXX_MEM_STAT_INFO) { this->create (n PASS_MEM_STAT); }
+ ~auto_vec () { this->release (); }
+
+ auto_vec (vec<T, va_heap>&& r)
+ {
+ gcc_assert (!r.using_auto_storage ());
+ this->m_vec = r.m_vec;
+ r.m_vec = NULL;
+ }
+
+ auto_vec (auto_vec<T> &&r)
+ {
+ gcc_assert (!r.using_auto_storage ());
+ this->m_vec = r.m_vec;
+ r.m_vec = NULL;
+ }
+
+ auto_vec& operator= (vec<T, va_heap>&& r)
+ {
+ if (this == &r)
+ return *this;
+
+ gcc_assert (!r.using_auto_storage ());
+ this->release ();
+ this->m_vec = r.m_vec;
+ r.m_vec = NULL;
+ return *this;
+ }
+
+ auto_vec& operator= (auto_vec<T> &&r)
+ {
+ if (this == &r)
+ return *this;
+
+ gcc_assert (!r.using_auto_storage ());
+ this->release ();
+ this->m_vec = r.m_vec;
+ r.m_vec = NULL;
+ return *this;
+ }
+
+ /* Explicitly convert to the base class. There is no conversion
+ from a const auto_vec because a copy of the returned vec can
+ be used to modify *THIS.
+ This is a legacy function not to be used in new code. */
+ vec<T, va_heap> to_vec_legacy () {
+ return *static_cast<vec<T, va_heap> *>(this);
+ }
+
+ // You probably don't want to copy a vector, so these are deleted to prevent
+ // unintentional use. If you really need a copy of the vectors contents you
+ // can use copy ().
+ auto_vec(const auto_vec &) = delete;
+ auto_vec &operator= (const auto_vec &) = delete;
+};
+
+
+/* Allocate heap memory for pointer V and create the internal vector
+ with space for NELEMS elements. If NELEMS is 0, the internal
+ vector is initialized to empty. */
+
+template<typename T>
+inline void
+vec_alloc (vec<T> *&v, unsigned nelems CXX_MEM_STAT_INFO)
+{
+ v = new vec<T>;
+ v->create (nelems PASS_MEM_STAT);
+}
+
+
+/* A subclass of auto_vec <char *> that frees all of its elements on
+ deletion. */
+
+class auto_string_vec : public auto_vec <char *>
+{
+ public:
+ ~auto_string_vec ();
+};
+
+/* A subclass of auto_vec <T *> that deletes all of its elements on
+ destruction.
+
+ This is a crude way for a vec to "own" the objects it points to
+ and clean up automatically.
+
+ For example, no attempt is made to delete elements when an item
+ within the vec is overwritten.
+
+ We can't rely on gnu::unique_ptr within a container,
+ since we can't rely on move semantics in C++98. */
+
+template <typename T>
+class auto_delete_vec : public auto_vec <T *>
+{
+ public:
+ auto_delete_vec () {}
+ auto_delete_vec (size_t s) : auto_vec <T *> (s) {}
+
+ ~auto_delete_vec ();
+
+private:
+ DISABLE_COPY_AND_ASSIGN(auto_delete_vec);
+};
+
+/* Conditionally allocate heap memory for VEC and its internal vector. */
+
+template<typename T>
+inline void
+vec_check_alloc (vec<T, va_heap> *&vec, unsigned nelems CXX_MEM_STAT_INFO)
+{
+ if (!vec)
+ vec_alloc (vec, nelems PASS_MEM_STAT);
+}
+
+
+/* Free the heap memory allocated by vector V and set it to NULL. */
+
+template<typename T>
+inline void
+vec_free (vec<T> *&v)
+{
+ if (v == NULL)
+ return;
+
+ v->release ();
+ delete v;
+ v = NULL;
+}
+
+
+/* Return iteration condition and update PTR to point to the IX'th
+ element of this vector. Use this to iterate over the elements of a
+ vector as follows,
+
+ for (ix = 0; v.iterate (ix, &ptr); ix++)
+ continue; */
+
+template<typename T>
+inline bool
+vec<T, va_heap, vl_ptr>::iterate (unsigned ix, T *ptr) const
+{
+ if (m_vec)
+ return m_vec->iterate (ix, ptr);
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
+}
+
+
+/* Return iteration condition and update *PTR to point to the
+ IX'th element of this vector. Use this to iterate over the
+ elements of a vector as follows,
+
+ for (ix = 0; v->iterate (ix, &ptr); ix++)
+ continue;
+
+ This variant is for vectors of objects. */
+
+template<typename T>
+inline bool
+vec<T, va_heap, vl_ptr>::iterate (unsigned ix, T **ptr) const
+{
+ if (m_vec)
+ return m_vec->iterate (ix, ptr);
+ else
+ {
+ *ptr = 0;
+ return false;
+ }
+}
+
+
+/* Convenience macro for forward iteration. */
+#define FOR_EACH_VEC_ELT(V, I, P) \
+ for (I = 0; (V).iterate ((I), &(P)); ++(I))
+
+#define FOR_EACH_VEC_SAFE_ELT(V, I, P) \
+ for (I = 0; vec_safe_iterate ((V), (I), &(P)); ++(I))
+
+/* Likewise, but start from FROM rather than 0. */
+#define FOR_EACH_VEC_ELT_FROM(V, I, P, FROM) \
+ for (I = (FROM); (V).iterate ((I), &(P)); ++(I))
+
+/* Convenience macro for reverse iteration. */
+#define FOR_EACH_VEC_ELT_REVERSE(V, I, P) \
+ for (I = (V).length () - 1; \
+ (V).iterate ((I), &(P)); \
+ (I)--)
+
+#define FOR_EACH_VEC_SAFE_ELT_REVERSE(V, I, P) \
+ for (I = vec_safe_length (V) - 1; \
+ vec_safe_iterate ((V), (I), &(P)); \
+ (I)--)
+
+/* auto_string_vec's dtor, freeing all contained strings, automatically
+ chaining up to ~auto_vec <char *>, which frees the internal buffer. */
+
+inline
+auto_string_vec::~auto_string_vec ()
+{
+ int i;
+ char *str;
+ FOR_EACH_VEC_ELT (*this, i, str)
+ free (str);
+}
+
+/* auto_delete_vec's dtor, deleting all contained items, automatically
+ chaining up to ~auto_vec <T*>, which frees the internal buffer. */
+
+template <typename T>
+inline
+auto_delete_vec<T>::~auto_delete_vec ()
+{
+ int i;
+ T *item;
+ FOR_EACH_VEC_ELT (*this, i, item)
+ delete item;
+}
+
+
+/* Return a copy of this vector. */
+
+template<typename T>
+inline vec<T, va_heap, vl_ptr>
+vec<T, va_heap, vl_ptr>::copy (ALONE_MEM_STAT_DECL) const
+{
+ vec<T, va_heap, vl_ptr> new_vec{ };
+ if (length ())
+ new_vec.m_vec = m_vec->copy (ALONE_PASS_MEM_STAT);
+ return new_vec;
+}
+
+
+/* Ensure that the vector has at least RESERVE slots available (if
+ EXACT is false), or exactly RESERVE slots available (if EXACT is
+ true).
+
+ This may create additional headroom if EXACT is false.
+
+ Note that this can cause the embedded vector to be reallocated.
+ Returns true iff reallocation actually occurred. */
+
+template<typename T>
+inline bool
+vec<T, va_heap, vl_ptr>::reserve (unsigned nelems, bool exact MEM_STAT_DECL)
+{
+ if (space (nelems))
+ return false;
+
+ /* For now play a game with va_heap::reserve to hide our auto storage if any,
+ this is necessary because it doesn't have enough information to know the
+ embedded vector is in auto storage, and so should not be freed. */
+ vec<T, va_heap, vl_embed> *oldvec = m_vec;
+ unsigned int oldsize = 0;
+ bool handle_auto_vec = m_vec && using_auto_storage ();
+ if (handle_auto_vec)
+ {
+ m_vec = NULL;
+ oldsize = oldvec->length ();
+ nelems += oldsize;
+ }
+
+ va_heap::reserve (m_vec, nelems, exact PASS_MEM_STAT);
+ if (handle_auto_vec)
+ {
+ vec_copy_construct (m_vec->address (), oldvec->address (), oldsize);
+ m_vec->m_vecpfx.m_num = oldsize;
+ }
+
+ return true;
+}
+
+
+/* Ensure that this vector has exactly NELEMS slots available. This
+ will not create additional headroom. Note this can cause the
+ embedded vector to be reallocated. Returns true iff reallocation
+ actually occurred. */
+
+template<typename T>
+inline bool
+vec<T, va_heap, vl_ptr>::reserve_exact (unsigned nelems MEM_STAT_DECL)
+{
+ return reserve (nelems, true PASS_MEM_STAT);
+}
+
+
+/* Create the internal vector and reserve NELEMS for it. This is
+ exactly like vec::reserve, but the internal vector is
+ unconditionally allocated from scratch. The old one, if it
+ existed, is lost. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::create (unsigned nelems MEM_STAT_DECL)
+{
+ m_vec = NULL;
+ if (nelems > 0)
+ reserve_exact (nelems PASS_MEM_STAT);
+}
+
+
+/* Free the memory occupied by the embedded vector. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::release (void)
+{
+ if (!m_vec)
+ return;
+
+ if (using_auto_storage ())
+ {
+ m_vec->m_vecpfx.m_num = 0;
+ return;
+ }
+
+ va_heap::release (m_vec);
+}
+
+/* Copy the elements from SRC to the end of this vector as if by memcpy.
+ SRC and this vector must be allocated with the same memory
+ allocation mechanism. This vector is assumed to have sufficient
+ headroom available. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::splice (const vec<T, va_heap, vl_ptr> &src)
+{
+ if (src.length ())
+ m_vec->splice (*(src.m_vec));
+}
+
+
+/* Copy the elements in SRC to the end of this vector as if by memcpy.
+ SRC and this vector must be allocated with the same mechanism.
+ If there is not enough headroom in this vector, it will be reallocated
+ as needed. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::safe_splice (const vec<T, va_heap, vl_ptr> &src
+ MEM_STAT_DECL)
+{
+ if (src.length ())
+ {
+ reserve_exact (src.length ());
+ splice (src);
+ }
+}
+
+
+/* Push OBJ (a new element) onto the end of the vector. There must be
+ sufficient space in the vector. Return a pointer to the slot
+ where OBJ was inserted. */
+
+template<typename T>
+inline T *
+vec<T, va_heap, vl_ptr>::quick_push (const T &obj)
+{
+ return m_vec->quick_push (obj);
+}
+
+
+/* Push a new element OBJ onto the end of this vector. Reallocates
+ the embedded vector, if needed. Return a pointer to the slot where
+ OBJ was inserted. */
+
+template<typename T>
+inline T *
+vec<T, va_heap, vl_ptr>::safe_push (const T &obj MEM_STAT_DECL)
+{
+ reserve (1, false PASS_MEM_STAT);
+ return quick_push (obj);
+}
+
+
+/* Pop and return the last element off the end of the vector. */
+
+template<typename T>
+inline T &
+vec<T, va_heap, vl_ptr>::pop (void)
+{
+ return m_vec->pop ();
+}
+
+
+/* Set the length of the vector to LEN. The new length must be less
+ than or equal to the current length. This is an O(1) operation. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::truncate (unsigned size)
+{
+ if (m_vec)
+ m_vec->truncate (size);
+ else
+ gcc_checking_assert (size == 0);
+}
+
+
+/* Grow the vector to a specific length. LEN must be as long or
+ longer than the current length. The new elements are
+ uninitialized. Reallocate the internal vector, if needed. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::safe_grow (unsigned len, bool exact MEM_STAT_DECL)
+{
+ unsigned oldlen = length ();
+ gcc_checking_assert (oldlen <= len);
+ reserve (len - oldlen, exact PASS_MEM_STAT);
+ if (m_vec)
+ m_vec->quick_grow (len);
+ else
+ gcc_checking_assert (len == 0);
+}
+
+
+/* Grow the embedded vector to a specific length. LEN must be as
+ long or longer than the current length. The new elements are
+ initialized to zero. Reallocate the internal vector, if needed. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::safe_grow_cleared (unsigned len, bool exact
+ MEM_STAT_DECL)
+{
+ unsigned oldlen = length ();
+ size_t growby = len - oldlen;
+ safe_grow (len, exact PASS_MEM_STAT);
+ if (growby != 0)
+ vec_default_construct (address () + oldlen, growby);
+}
+
+
+/* Same as vec::safe_grow but without reallocation of the internal vector.
+ If the vector cannot be extended, a runtime assertion will be triggered. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::quick_grow (unsigned len)
+{
+ gcc_checking_assert (m_vec);
+ m_vec->quick_grow (len);
+}
+
+
+/* Same as vec::quick_grow_cleared but without reallocation of the
+ internal vector. If the vector cannot be extended, a runtime
+ assertion will be triggered. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::quick_grow_cleared (unsigned len)
+{
+ gcc_checking_assert (m_vec);
+ m_vec->quick_grow_cleared (len);
+}
+
+
+/* Insert an element, OBJ, at the IXth position of this vector. There
+ must be sufficient space. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::quick_insert (unsigned ix, const T &obj)
+{
+ m_vec->quick_insert (ix, obj);
+}
+
+
+/* Insert an element, OBJ, at the IXth position of the vector.
+ Reallocate the embedded vector, if necessary. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::safe_insert (unsigned ix, const T &obj MEM_STAT_DECL)
+{
+ reserve (1, false PASS_MEM_STAT);
+ quick_insert (ix, obj);
+}
+
+
+/* Remove an element from the IXth position of this vector. Ordering of
+ remaining elements is preserved. This is an O(N) operation due to
+ a memmove. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::ordered_remove (unsigned ix)
+{
+ m_vec->ordered_remove (ix);
+}
+
+
+/* Remove an element from the IXth position of this vector. Ordering
+ of remaining elements is destroyed. This is an O(1) operation. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::unordered_remove (unsigned ix)
+{
+ m_vec->unordered_remove (ix);
+}
+
+
+/* Remove LEN elements starting at the IXth. Ordering is retained.
+ This is an O(N) operation due to memmove. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::block_remove (unsigned ix, unsigned len)
+{
+ m_vec->block_remove (ix, len);
+}
+
+
+/* Sort the contents of this vector with qsort. CMP is the comparison
+ function to pass to qsort. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::qsort (int (*cmp) (const void *, const void *))
+{
+ if (m_vec)
+ m_vec->qsort (cmp);
+}
+
+/* Sort the contents of this vector with qsort. CMP is the comparison
+ function to pass to qsort. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::sort (int (*cmp) (const void *, const void *,
+ void *), void *data)
+{
+ if (m_vec)
+ m_vec->sort (cmp, data);
+}
+
+/* Sort the contents of this vector with gcc_stablesort_r. CMP is the
+ comparison function to pass to qsort. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::stablesort (int (*cmp) (const void *, const void *,
+ void *), void *data)
+{
+ if (m_vec)
+ m_vec->stablesort (cmp, data);
+}
+
+/* Search the contents of the sorted vector with a binary search.
+ CMP is the comparison function to pass to bsearch. */
+
+template<typename T>
+inline T *
+vec<T, va_heap, vl_ptr>::bsearch (const void *key,
+ int (*cmp) (const void *, const void *))
+{
+ if (m_vec)
+ return m_vec->bsearch (key, cmp);
+ return NULL;
+}
+
+/* Search the contents of the sorted vector with a binary search.
+ CMP is the comparison function to pass to bsearch. */
+
+template<typename T>
+inline T *
+vec<T, va_heap, vl_ptr>::bsearch (const void *key,
+ int (*cmp) (const void *, const void *,
+ void *), void *data)
+{
+ if (m_vec)
+ return m_vec->bsearch (key, cmp, data);
+ return NULL;
+}
+
+
+/* Find and return the first position in which OBJ could be inserted
+ without changing the ordering of this vector. LESSTHAN is a
+ function that returns true if the first argument is strictly less
+ than the second. */
+
+template<typename T>
+inline unsigned
+vec<T, va_heap, vl_ptr>::lower_bound (T obj,
+ bool (*lessthan)(const T &, const T &))
+ const
+{
+ return m_vec ? m_vec->lower_bound (obj, lessthan) : 0;
+}
+
+/* Return true if SEARCH is an element of V. Note that this is O(N) in the
+ size of the vector and so should be used with care. */
+
+template<typename T>
+inline bool
+vec<T, va_heap, vl_ptr>::contains (const T &search) const
+{
+ return m_vec ? m_vec->contains (search) : false;
+}
+
+/* Reverse content of the vector. */
+
+template<typename T>
+inline void
+vec<T, va_heap, vl_ptr>::reverse (void)
+{
+ unsigned l = length ();
+ T *ptr = address ();
+
+ for (unsigned i = 0; i < l / 2; i++)
+ std::swap (ptr[i], ptr[l - i - 1]);
+}
+
+template<typename T>
+inline bool
+vec<T, va_heap, vl_ptr>::using_auto_storage () const
+{
+ return m_vec ? m_vec->m_vecpfx.m_using_auto_storage : false;
+}
+
+/* Release VEC and call release of all element vectors. */
+
+template<typename T>
+inline void
+release_vec_vec (vec<vec<T> > &vec)
+{
+ for (unsigned i = 0; i < vec.length (); i++)
+ vec[i].release ();
+
+ vec.release ();
+}
+
+// Provide a subset of the std::span functionality. (We can't use std::span
+// itself because it's a C++20 feature.)
+//
+// In addition, provide an invalid value that is distinct from all valid
+// sequences (including the empty sequence). This can be used to return
+// failure without having to use std::optional.
+//
+// There is no operator bool because it would be ambiguous whether it is
+// testing for a valid value or an empty sequence.
+template<typename T>
+class array_slice
+{
+ template<typename OtherT> friend class array_slice;
+
+public:
+ using value_type = T;
+ using iterator = T *;
+ using const_iterator = const T *;
+
+ array_slice () : m_base (nullptr), m_size (0) {}
+
+ template<typename OtherT>
+ array_slice (array_slice<OtherT> other)
+ : m_base (other.m_base), m_size (other.m_size) {}
+
+ array_slice (iterator base, unsigned int size)
+ : m_base (base), m_size (size) {}
+
+ template<size_t N>
+ array_slice (T (&array)[N]) : m_base (array), m_size (N) {}
+
+ template<typename OtherT>
+ array_slice (const vec<OtherT> &v)
+ : m_base (v.address ()), m_size (v.length ()) {}
+
+ template<typename OtherT>
+ array_slice (vec<OtherT> &v)
+ : m_base (v.address ()), m_size (v.length ()) {}
+
+ template<typename OtherT>
+ array_slice (const vec<OtherT, va_gc> *v)
+ : m_base (v ? v->address () : nullptr), m_size (v ? v->length () : 0) {}
+
+ template<typename OtherT>
+ array_slice (vec<OtherT, va_gc> *v)
+ : m_base (v ? v->address () : nullptr), m_size (v ? v->length () : 0) {}
+
+ iterator begin () { return m_base; }
+ iterator end () { return m_base + m_size; }
+
+ const_iterator begin () const { return m_base; }
+ const_iterator end () const { return m_base + m_size; }
+
+ value_type &front ();
+ value_type &back ();
+ value_type &operator[] (unsigned int i);
+
+ const value_type &front () const;
+ const value_type &back () const;
+ const value_type &operator[] (unsigned int i) const;
+
+ size_t size () const { return m_size; }
+ size_t size_bytes () const { return m_size * sizeof (T); }
+ bool empty () const { return m_size == 0; }
+
+ // An invalid array_slice that represents a failed operation. This is
+ // distinct from an empty slice, which is a valid result in some contexts.
+ static array_slice invalid () { return { nullptr, ~0U }; }
+
+ // True if the array is valid, false if it is an array like INVALID.
+ bool is_valid () const { return m_base || m_size == 0; }
+
+private:
+ iterator m_base;
+ unsigned int m_size;
+};
+
+template<typename T>
+inline typename array_slice<T>::value_type &
+array_slice<T>::front ()
+{
+ gcc_checking_assert (m_size);
+ return m_base[0];
+}
+
+template<typename T>
+inline const typename array_slice<T>::value_type &
+array_slice<T>::front () const
+{
+ gcc_checking_assert (m_size);
+ return m_base[0];
+}
+
+template<typename T>
+inline typename array_slice<T>::value_type &
+array_slice<T>::back ()
+{
+ gcc_checking_assert (m_size);
+ return m_base[m_size - 1];
+}
+
+template<typename T>
+inline const typename array_slice<T>::value_type &
+array_slice<T>::back () const
+{
+ gcc_checking_assert (m_size);
+ return m_base[m_size - 1];
+}
+
+template<typename T>
+inline typename array_slice<T>::value_type &
+array_slice<T>::operator[] (unsigned int i)
+{
+ gcc_checking_assert (i < m_size);
+ return m_base[i];
+}
+
+template<typename T>
+inline const typename array_slice<T>::value_type &
+array_slice<T>::operator[] (unsigned int i) const
+{
+ gcc_checking_assert (i < m_size);
+ return m_base[i];
+}
+
+template<typename T>
+array_slice<T>
+make_array_slice (T *base, unsigned int size)
+{
+ return array_slice<T> (base, size);
+}
+
+#if (GCC_VERSION >= 3000)
+# pragma GCC poison m_vec m_vecpfx m_vecdata
+#endif
+
+#endif // GCC_VEC_H
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vector-builder.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vector-builder.h
new file mode 100644
index 0000000..0075356
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vector-builder.h
@@ -0,0 +1,612 @@
+/* A class for building vector constant patterns.
+ Copyright (C) 2017-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VECTOR_BUILDER_H
+#define GCC_VECTOR_BUILDER_H
+
+/* This class is a wrapper around auto_vec<T> for building vectors of T.
+ It aims to encode each vector as npatterns interleaved patterns,
+ where each pattern represents a sequence:
+
+ { BASE0, BASE1, BASE1 + STEP, BASE1 + STEP*2, BASE1 + STEP*3, ... }
+
+ The first three elements in each pattern provide enough information
+ to derive the other elements. If all patterns have a STEP of zero,
+ we only need to encode the first two elements in each pattern.
+ If BASE1 is also equal to BASE0 for all patterns, we only need to
+ encode the first element in each pattern. The number of encoded
+ elements per pattern is given by nelts_per_pattern.
+
+ The class can be used in two ways:
+
+ 1. It can be used to build a full image of the vector, which is then
+ canonicalized by finalize (). In this case npatterns is initially
+ the number of elements in the vector and nelts_per_pattern is
+ initially 1.
+
+ 2. It can be used to build a vector that already has a known encoding.
+ This is preferred since it is more efficient and copes with
+ variable-length vectors. finalize () then canonicalizes the encoding
+ to a simpler form if possible.
+
+ Shape is the type that specifies the number of elements in the vector
+ and (where relevant) the type of each element.
+
+ The derived class Derived provides the functionality of this class
+ for specific Ts. Derived needs to provide the following interface:
+
+ bool equal_p (T elt1, T elt2) const;
+
+ Return true if elements ELT1 and ELT2 are equal.
+
+ bool allow_steps_p () const;
+
+ Return true if a stepped representation is OK. We don't allow
+ linear series for anything other than integers, to avoid problems
+ with rounding.
+
+ bool integral_p (T elt) const;
+
+ Return true if element ELT can be interpreted as an integer.
+
+ StepType step (T elt1, T elt2) const;
+
+ Return the value of element ELT2 minus the value of element ELT1,
+ given integral_p (ELT1) && integral_p (ELT2). There is no fixed
+ choice of StepType.
+
+ T apply_step (T base, unsigned int factor, StepType step) const;
+
+ Return a vector element with the value BASE + FACTOR * STEP.
+
+ bool can_elide_p (T elt) const;
+
+ Return true if we can drop element ELT, even if the retained
+ elements are different. This is provided for TREE_OVERFLOW
+ handling.
+
+ void note_representative (T *elt1_ptr, T elt2);
+
+ Record that ELT2 is being elided, given that ELT1_PTR points to
+ the last encoded element for the containing pattern. This is
+ again provided for TREE_OVERFLOW handling.
+
+ static poly_uint64 shape_nelts (Shape shape);
+
+ Return the number of elements in SHAPE.
+
+ The class provides additional functionality for the case in which
+ T can describe a vector constant as well as an individual element.
+ This functionality requires:
+
+ static poly_uint64 nelts_of (T x);
+
+ Return the number of elements in vector constant X.
+
+ static unsigned int npatterns_of (T x);
+
+ Return the number of patterns used to encode vector constant X.
+
+ static unsigned int nelts_per_pattern_of (T x);
+
+ Return the number of elements used to encode each pattern
+ in vector constant X. */
+
+template<typename T, typename Shape, typename Derived>
+class vector_builder : public auto_vec<T, 32>
+{
+public:
+ vector_builder ();
+
+ poly_uint64 full_nelts () const { return m_full_nelts; }
+ unsigned int npatterns () const { return m_npatterns; }
+ unsigned int nelts_per_pattern () const { return m_nelts_per_pattern; }
+ unsigned int encoded_nelts () const;
+ bool encoded_full_vector_p () const;
+ T elt (unsigned int) const;
+ unsigned int count_dups (int, int, int) const;
+
+ bool operator == (const Derived &) const;
+ bool operator != (const Derived &x) const { return !operator == (x); }
+
+ bool new_unary_operation (Shape, T, bool);
+ bool new_binary_operation (Shape, T, T, bool);
+
+ void finalize ();
+
+ static unsigned int binary_encoded_nelts (T, T);
+
+protected:
+ void new_vector (poly_uint64, unsigned int, unsigned int);
+ void reshape (unsigned int, unsigned int);
+ bool repeating_sequence_p (unsigned int, unsigned int, unsigned int);
+ bool stepped_sequence_p (unsigned int, unsigned int, unsigned int);
+ bool try_npatterns (unsigned int);
+
+private:
+ vector_builder (const vector_builder &);
+ vector_builder &operator= (const vector_builder &);
+ Derived *derived () { return static_cast<Derived *> (this); }
+ const Derived *derived () const;
+
+ poly_uint64 m_full_nelts;
+ unsigned int m_npatterns;
+ unsigned int m_nelts_per_pattern;
+};
+
+template<typename T, typename Shape, typename Derived>
+inline const Derived *
+vector_builder<T, Shape, Derived>::derived () const
+{
+ return static_cast<const Derived *> (this);
+}
+
+template<typename T, typename Shape, typename Derived>
+inline
+vector_builder<T, Shape, Derived>::vector_builder ()
+ : m_full_nelts (0),
+ m_npatterns (0),
+ m_nelts_per_pattern (0)
+{}
+
+/* Return the number of elements that are explicitly encoded. The vec
+ starts with these explicitly-encoded elements and may contain additional
+ elided elements. */
+
+template<typename T, typename Shape, typename Derived>
+inline unsigned int
+vector_builder<T, Shape, Derived>::encoded_nelts () const
+{
+ return m_npatterns * m_nelts_per_pattern;
+}
+
+/* Return true if every element of the vector is explicitly encoded. */
+
+template<typename T, typename Shape, typename Derived>
+inline bool
+vector_builder<T, Shape, Derived>::encoded_full_vector_p () const
+{
+ return known_eq (m_npatterns * m_nelts_per_pattern, m_full_nelts);
+}
+
+/* Start building a vector that has FULL_NELTS elements. Initially
+ encode it using NPATTERNS patterns with NELTS_PER_PATTERN each. */
+
+template<typename T, typename Shape, typename Derived>
+void
+vector_builder<T, Shape, Derived>::new_vector (poly_uint64 full_nelts,
+ unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ m_full_nelts = full_nelts;
+ m_npatterns = npatterns;
+ m_nelts_per_pattern = nelts_per_pattern;
+ this->reserve (encoded_nelts ());
+ this->truncate (0);
+}
+
+/* Return true if this vector and OTHER have the same elements and
+ are encoded in the same way. */
+
+template<typename T, typename Shape, typename Derived>
+bool
+vector_builder<T, Shape, Derived>::operator == (const Derived &other) const
+{
+ if (maybe_ne (m_full_nelts, other.m_full_nelts)
+ || m_npatterns != other.m_npatterns
+ || m_nelts_per_pattern != other.m_nelts_per_pattern)
+ return false;
+
+ unsigned int nelts = encoded_nelts ();
+ for (unsigned int i = 0; i < nelts; ++i)
+ if (!derived ()->equal_p ((*this)[i], other[i]))
+ return false;
+
+ return true;
+}
+
+/* Return the value of vector element I, which might or might not be
+ encoded explicitly. */
+
+template<typename T, typename Shape, typename Derived>
+T
+vector_builder<T, Shape, Derived>::elt (unsigned int i) const
+{
+ /* First handle elements that are already present in the underlying
+ vector, regardless of whether they're part of the encoding or not. */
+ if (i < this->length ())
+ return (*this)[i];
+
+ /* Extrapolation is only possible if the encoding has been fully
+ populated. */
+ gcc_checking_assert (encoded_nelts () <= this->length ());
+
+ /* Identify the pattern that contains element I and work out the index of
+ the last encoded element for that pattern. */
+ unsigned int pattern = i % m_npatterns;
+ unsigned int count = i / m_npatterns;
+ unsigned int final_i = encoded_nelts () - m_npatterns + pattern;
+ T final = (*this)[final_i];
+
+ /* If there are no steps, the final encoded value is the right one. */
+ if (m_nelts_per_pattern <= 2)
+ return final;
+
+ /* Otherwise work out the value from the last two encoded elements. */
+ T prev = (*this)[final_i - m_npatterns];
+ return derived ()->apply_step (final, count - 2,
+ derived ()->step (prev, final));
+}
+
+/* Try to start building a new vector of shape SHAPE that holds the result of
+ a unary operation on vector constant VEC. ALLOW_STEPPED_P is true if the
+ operation can handle stepped encodings directly, without having to expand
+ the full sequence.
+
+ Return true if the operation is possible, which it always is when
+ ALLOW_STEPPED_P is true. Leave the builder unchanged otherwise. */
+
+template<typename T, typename Shape, typename Derived>
+bool
+vector_builder<T, Shape, Derived>::new_unary_operation (Shape shape, T vec,
+ bool allow_stepped_p)
+{
+ poly_uint64 full_nelts = Derived::shape_nelts (shape);
+ gcc_assert (known_eq (full_nelts, Derived::nelts_of (vec)));
+ unsigned int npatterns = Derived::npatterns_of (vec);
+ unsigned int nelts_per_pattern = Derived::nelts_per_pattern_of (vec);
+ if (!allow_stepped_p && nelts_per_pattern > 2)
+ {
+ if (!full_nelts.is_constant ())
+ return false;
+ npatterns = full_nelts.to_constant ();
+ nelts_per_pattern = 1;
+ }
+ derived ()->new_vector (shape, npatterns, nelts_per_pattern);
+ return true;
+}
+
+/* Try to start building a new vector of shape SHAPE that holds the result of
+ a binary operation on vector constants VEC1 and VEC2. ALLOW_STEPPED_P is
+ true if the operation can handle stepped encodings directly, without
+ having to expand the full sequence.
+
+ Return true if the operation is possible. Leave the builder unchanged
+ otherwise. */
+
+template<typename T, typename Shape, typename Derived>
+bool
+vector_builder<T, Shape, Derived>::new_binary_operation (Shape shape,
+ T vec1, T vec2,
+ bool allow_stepped_p)
+{
+ poly_uint64 full_nelts = Derived::shape_nelts (shape);
+ gcc_assert (known_eq (full_nelts, Derived::nelts_of (vec1))
+ && known_eq (full_nelts, Derived::nelts_of (vec2)));
+ /* Conceptually we split the patterns in VEC1 and VEC2 until we have
+ an equal number for both. Each split pattern requires the same
+ number of elements per pattern as the original. E.g. splitting:
+
+ { 1, 2, 3, ... }
+
+ into two gives:
+
+ { 1, 3, 5, ... }
+ { 2, 4, 6, ... }
+
+ while splitting:
+
+ { 1, 0, ... }
+
+ into two gives:
+
+ { 1, 0, ... }
+ { 0, 0, ... }. */
+ unsigned int npatterns
+ = least_common_multiple (Derived::npatterns_of (vec1),
+ Derived::npatterns_of (vec2));
+ unsigned int nelts_per_pattern
+ = MAX (Derived::nelts_per_pattern_of (vec1),
+ Derived::nelts_per_pattern_of (vec2));
+ if (!allow_stepped_p && nelts_per_pattern > 2)
+ {
+ if (!full_nelts.is_constant ())
+ return false;
+ npatterns = full_nelts.to_constant ();
+ nelts_per_pattern = 1;
+ }
+ derived ()->new_vector (shape, npatterns, nelts_per_pattern);
+ return true;
+}
+
+/* Return the number of elements that the caller needs to operate on in
+ order to handle a binary operation on vector constants VEC1 and VEC2.
+ This static function is used instead of new_binary_operation if the
+ result of the operation is not a constant vector. */
+
+template<typename T, typename Shape, typename Derived>
+unsigned int
+vector_builder<T, Shape, Derived>::binary_encoded_nelts (T vec1, T vec2)
+{
+ poly_uint64 nelts = Derived::nelts_of (vec1);
+ gcc_assert (known_eq (nelts, Derived::nelts_of (vec2)));
+ /* See new_binary_operation for details. */
+ unsigned int npatterns
+ = least_common_multiple (Derived::npatterns_of (vec1),
+ Derived::npatterns_of (vec2));
+ unsigned int nelts_per_pattern
+ = MAX (Derived::nelts_per_pattern_of (vec1),
+ Derived::nelts_per_pattern_of (vec2));
+ unsigned HOST_WIDE_INT const_nelts;
+ if (nelts.is_constant (&const_nelts))
+ return MIN (npatterns * nelts_per_pattern, const_nelts);
+ return npatterns * nelts_per_pattern;
+}
+
+/* Return the number of leading duplicate elements in the range
+ [START:END:STEP]. The value is always at least 1. */
+
+template<typename T, typename Shape, typename Derived>
+unsigned int
+vector_builder<T, Shape, Derived>::count_dups (int start, int end,
+ int step) const
+{
+ gcc_assert ((end - start) % step == 0);
+
+ unsigned int ndups = 1;
+ for (int i = start + step;
+ i != end && derived ()->equal_p (elt (i), elt (start));
+ i += step)
+ ndups++;
+ return ndups;
+}
+
+/* Change the encoding to NPATTERNS patterns of NELTS_PER_PATTERN each,
+ but without changing the underlying vector. */
+
+template<typename T, typename Shape, typename Derived>
+void
+vector_builder<T, Shape, Derived>::reshape (unsigned int npatterns,
+ unsigned int nelts_per_pattern)
+{
+ unsigned int old_encoded_nelts = encoded_nelts ();
+ unsigned int new_encoded_nelts = npatterns * nelts_per_pattern;
+ gcc_checking_assert (new_encoded_nelts <= old_encoded_nelts);
+ unsigned int next = new_encoded_nelts - npatterns;
+ for (unsigned int i = new_encoded_nelts; i < old_encoded_nelts; ++i)
+ {
+ derived ()->note_representative (&(*this)[next], (*this)[i]);
+ next += 1;
+ if (next == new_encoded_nelts)
+ next -= npatterns;
+ }
+ m_npatterns = npatterns;
+ m_nelts_per_pattern = nelts_per_pattern;
+}
+
+/* Return true if elements [START, END) contain a repeating sequence of
+ STEP elements. */
+
+template<typename T, typename Shape, typename Derived>
+bool
+vector_builder<T, Shape, Derived>::repeating_sequence_p (unsigned int start,
+ unsigned int end,
+ unsigned int step)
+{
+ for (unsigned int i = start; i < end - step; ++i)
+ if (!derived ()->equal_p ((*this)[i], (*this)[i + step]))
+ return false;
+ return true;
+}
+
+/* Return true if elements [START, END) contain STEP interleaved linear
+ series. */
+
+template<typename T, typename Shape, typename Derived>
+bool
+vector_builder<T, Shape, Derived>::stepped_sequence_p (unsigned int start,
+ unsigned int end,
+ unsigned int step)
+{
+ if (!derived ()->allow_steps_p ())
+ return false;
+
+ for (unsigned int i = start + step * 2; i < end; ++i)
+ {
+ T elt1 = (*this)[i - step * 2];
+ T elt2 = (*this)[i - step];
+ T elt3 = (*this)[i];
+
+ if (!derived ()->integral_p (elt1)
+ || !derived ()->integral_p (elt2)
+ || !derived ()->integral_p (elt3))
+ return false;
+
+ if (maybe_ne (derived ()->step (elt1, elt2),
+ derived ()->step (elt2, elt3)))
+ return false;
+
+ if (!derived ()->can_elide_p (elt3))
+ return false;
+ }
+ return true;
+}
+
+/* Try to change the number of encoded patterns to NPATTERNS, returning
+ true on success. */
+
+template<typename T, typename Shape, typename Derived>
+bool
+vector_builder<T, Shape, Derived>::try_npatterns (unsigned int npatterns)
+{
+ if (m_nelts_per_pattern == 1)
+ {
+ /* See whether NPATTERNS is valid with the current 1-element-per-pattern
+ encoding. */
+ if (repeating_sequence_p (0, encoded_nelts (), npatterns))
+ {
+ reshape (npatterns, 1);
+ return true;
+ }
+
+ /* We can only increase the number of elements per pattern if all
+ elements are still encoded explicitly. */
+ if (!encoded_full_vector_p ())
+ return false;
+ }
+
+ if (m_nelts_per_pattern <= 2)
+ {
+ /* See whether NPATTERNS is valid with a 2-element-per-pattern
+ encoding. */
+ if (repeating_sequence_p (npatterns, encoded_nelts (), npatterns))
+ {
+ reshape (npatterns, 2);
+ return true;
+ }
+
+ /* We can only increase the number of elements per pattern if all
+ elements are still encoded explicitly. */
+ if (!encoded_full_vector_p ())
+ return false;
+ }
+
+ if (m_nelts_per_pattern <= 3)
+ {
+ /* See whether we have NPATTERNS interleaved linear series,
+ giving a 3-element-per-pattern encoding. */
+ if (stepped_sequence_p (npatterns, encoded_nelts (), npatterns))
+ {
+ reshape (npatterns, 3);
+ return true;
+ }
+ return false;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Replace the current encoding with the canonical form. */
+
+template<typename T, typename Shape, typename Derived>
+void
+vector_builder<T, Shape, Derived>::finalize ()
+{
+ /* The encoding requires the same number of elements to come from each
+ pattern. */
+ gcc_assert (multiple_p (m_full_nelts, m_npatterns));
+
+ /* Allow the caller to build more elements than necessary. For example,
+ it's often convenient to build a stepped vector from the natural
+ encoding of three elements even if the vector itself only has two. */
+ unsigned HOST_WIDE_INT const_full_nelts;
+ if (m_full_nelts.is_constant (&const_full_nelts)
+ && const_full_nelts <= encoded_nelts ())
+ {
+ m_npatterns = const_full_nelts;
+ m_nelts_per_pattern = 1;
+ }
+
+ /* Try to whittle down the number of elements per pattern. That is:
+
+ 1. If we have stepped patterns whose steps are all 0, reduce the
+ number of elements per pattern from 3 to 2.
+
+ 2. If we have background fill values that are the same as the
+ foreground values, reduce the number of elements per pattern
+ from 2 to 1. */
+ while (m_nelts_per_pattern > 1
+ && repeating_sequence_p (encoded_nelts () - m_npatterns * 2,
+ encoded_nelts (), m_npatterns))
+ /* The last two sequences of M_NPATTERNS elements are equal,
+ so remove the last one. */
+ reshape (m_npatterns, m_nelts_per_pattern - 1);
+
+ if (pow2p_hwi (m_npatterns))
+ {
+ /* Try to halve the number of patterns while doing so gives a
+ valid pattern. This approach is linear in the number of
+ elements, whereas searcing from 1 up would be O(n*log(n)).
+
+ Each halving step tries to keep the number of elements per pattern
+ the same. If that isn't possible, and if all elements are still
+ explicitly encoded, the halving step can instead increase the number
+ of elements per pattern.
+
+ E.g. for:
+
+ { 0, 2, 3, 4, 5, 6, 7, 8 } npatterns == 8 full_nelts == 8
+
+ we first realize that the second half of the sequence is not
+ equal to the first, so we cannot maintain 1 element per pattern
+ for npatterns == 4. Instead we halve the number of patterns
+ and double the number of elements per pattern, treating this
+ as a "foreground" { 0, 2, 3, 4 } against a "background" of
+ { 5, 6, 7, 8 | 5, 6, 7, 8 ... }:
+
+ { 0, 2, 3, 4 | 5, 6, 7, 8 } npatterns == 4
+
+ Next we realize that this is *not* a foreround of { 0, 2 }
+ against a background of { 3, 4 | 3, 4 ... }, so the only
+ remaining option for reducing the number of patterns is
+ to use a foreground of { 0, 2 } against a stepped background
+ of { 1, 2 | 3, 4 | 5, 6 ... }. This is valid because we still
+ haven't elided any elements:
+
+ { 0, 2 | 3, 4 | 5, 6 } npatterns == 2
+
+ This in turn can be reduced to a foreground of { 0 } against a
+ stepped background of { 1 | 2 | 3 ... }:
+
+ { 0 | 2 | 3 } npatterns == 1
+
+ This last step would not have been possible for:
+
+ { 0, 0 | 3, 4 | 5, 6 } npatterns == 2. */
+ while ((m_npatterns & 1) == 0 && try_npatterns (m_npatterns / 2))
+ continue;
+
+ /* Builders of arbitrary fixed-length vectors can use:
+
+ new_vector (x, x, 1)
+
+ so that every element is specified explicitly. Handle cases
+ that are actually wrapping series, like { 0, 1, 2, 3, 0, 1, 2, 3 }
+ would be for 2-bit elements. We'll have treated them as
+ duplicates in the loop above. */
+ if (m_nelts_per_pattern == 1
+ && m_full_nelts.is_constant (&const_full_nelts)
+ && this->length () >= const_full_nelts
+ && (m_npatterns & 3) == 0
+ && stepped_sequence_p (m_npatterns / 4, const_full_nelts,
+ m_npatterns / 4))
+ {
+ reshape (m_npatterns / 4, 3);
+ while ((m_npatterns & 1) == 0 && try_npatterns (m_npatterns / 2))
+ continue;
+ }
+ }
+ else
+ /* For the non-power-of-2 case, do a simple search up from 1. */
+ for (unsigned int i = 1; i <= m_npatterns / 2; ++i)
+ if (m_npatterns % i == 0 && try_npatterns (i))
+ break;
+}
+
+#endif
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/version.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/version.h
new file mode 100644
index 0000000..bf67536
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/version.h
@@ -0,0 +1,26 @@
+#ifndef VERSION_H
+#define VERSION_H
+
+/* Generated automatically by genversion. */
+
+#define GCC_major_version 13
+
+/* The complete version string, assembled from several pieces.
+BASEVER, DATESTAMP, DEVPHASE, and REVISION are defined by the
+Makefile. */
+
+#define version_string "13.2.1 20231009"
+#define pkgversion_string "(Arm GNU Toolchain 13.2.rel1 (Build arm-13.7)) "
+
+/* This is the location of the online document giving instructions for
+reporting bugs. If you distribute a modified version of GCC,
+please configure with --with-bugurl pointing to a document giving
+instructions for reporting bugs to you, not us. (You are of course
+welcome to forward us bugs reported to you, if you determine that
+they are not bugs in your modifications.) */
+
+#define bug_report_url "<https://bugs.linaro.org/>"
+
+#define GCOV_VERSION ((gcov_unsigned_t)0x4233322a) /* B32* */
+
+#endif /* VERSION_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vmsdbg.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vmsdbg.h
new file mode 100644
index 0000000..b5b276c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vmsdbg.h
@@ -0,0 +1,249 @@
+/* Definitions for the data structures and codes used in VMS debugging.
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VMSDBG_H
+#define GCC_VMSDBG_H 1
+
+/* We define types and constants used in VMS Debug output. Note that the
+ structs only approximate the output that is written. We write the output
+ explicitly, field by field. This output would only agree with the
+ structs in this file if no padding were done. The sizes after each
+ struct are the size actually written, which is usually smaller than the
+ size of the struct. */
+
+/* Header type codes. */
+typedef enum _DST_TYPE {DST_K_TBG = 0x17,
+ DST_K_SOURCE = 155, DST_K_PROLOG = 162,
+ DST_K_BLKBEG = 176, DST_K_BLKEND = 177,
+ DST_K_LINE_NUM = 185, DST_K_MODBEG = 188,
+ DST_K_MODEND = 189, DST_K_RTNBEG = 190,
+ DST_K_RTNEND = 191} DST_DTYPE;
+
+/* Header. */
+
+typedef struct _DST_HEADER
+{
+ union
+ {
+ unsigned short int dst_w_length;
+ unsigned short int dst_x_length;
+ } dst__header_length;
+ union
+ {
+ ENUM_BITFIELD (_DST_TYPE) dst_w_type : 16;
+ ENUM_BITFIELD (_DST_TYPE) dst_x_type : 16;
+ } dst__header_type;
+} DST_HEADER;
+#define DST_K_DST_HEADER_SIZE sizeof 4
+
+/* Language type codes. */
+typedef enum _DST_LANGUAGE {DST_K_FORTRAN = 1, DST_K_C = 7, DST_K_ADA = 9,
+ DST_K_UNKNOWN = 10, DST_K_CXX = 15} DST_LANGUAGE;
+
+/* Module header (a module is the result of a single compilation). */
+
+typedef struct _DST_MODULE_BEGIN
+{
+ DST_HEADER dst_a_modbeg_header;
+ struct
+ {
+ unsigned dst_v_modbeg_hide : 1;
+ unsigned dst_v_modbeg_version : 1;
+ unsigned dst_v_modbeg_unused : 6;
+ } dst_b_modbeg_flags;
+ unsigned char dst_b_modbeg_unused;
+ DST_LANGUAGE dst_l_modbeg_language;
+ unsigned short int dst_w_version_major;
+ unsigned short int dst_w_version_minor;
+ unsigned char dst_b_modbeg_name;
+} DST_MODULE_BEGIN;
+#define DST_K_MODBEG_SIZE 15
+
+/* Module trailer. */
+
+typedef struct _DST_MB_TRLR
+{
+ unsigned char dst_b_compiler;
+} DST_MB_TRLR;
+
+#define DST_K_MB_TRLR_SIZE 1
+
+#define DST_K_VERSION_MAJOR 1
+#define DST_K_VERSION_MINOR 13
+
+typedef struct _DST_MODULE_END
+{
+ DST_HEADER dst_a_modend_header;
+} DST_MODULE_END;
+#define DST_K_MODEND_SIZE sizeof 4
+
+/* Routine header. */
+
+typedef struct _DST_ROUTINE_BEGIN
+{
+ DST_HEADER dst_a_rtnbeg_header;
+ struct
+ {
+ unsigned dst_v_rtnbeg_unused : 4;
+ unsigned dst_v_rtnbeg_unalloc : 1;
+ unsigned dst_v_rtnbeg_prototype : 1;
+ unsigned dst_v_rtnbeg_inlined : 1;
+ unsigned dst_v_rtnbeg_no_call : 1;
+ } dst_b_rtnbeg_flags;
+ int *dst_l_rtnbeg_address;
+ int *dst_l_rtnbeg_pd_address;
+ unsigned char dst_b_rtnbeg_name;
+} DST_ROUTINE_BEGIN;
+#define DST_K_RTNBEG_SIZE 14
+
+/* Routine trailer */
+
+typedef struct _DST_ROUTINE_END
+{
+ DST_HEADER dst_a_rtnend_header;
+ char dst_b_rtnend_unused;
+ unsigned int dst_l_rtnend_size;
+} DST_ROUTINE_END;
+#define DST_K_RTNEND_SIZE 9
+
+/* Block header. */
+
+typedef struct _DST_BLOCK_BEGIN
+{
+ DST_HEADER dst_a_blkbeg_header;
+ unsigned char dst_b_blkbeg_unused;
+ int *dst_l_blkbeg_address;
+ unsigned char dst_b_blkbeg_name;
+} DST_BLOCK_BEGIN;
+#define DST_K_BLKBEG_SIZE 10
+
+/* Block trailer. */
+
+typedef struct _DST_BLOCK_END
+{
+ DST_HEADER dst_a_blkend_header;
+ unsigned char dst_b_blkend_unused;
+ unsigned int dst_l_blkend_size;
+} DST_BLOCK_END;
+#define DST_K_BLKEND_SIZE 9
+
+/* Line number header. */
+
+typedef struct _DST_LINE_NUM_HEADER
+{
+ DST_HEADER dst_a_line_num_header;
+} DST_LINE_NUM_HEADER;
+#define DST_K_LINE_NUM_HEADER_SIZE 4
+
+/* PC to Line number correlation. */
+
+typedef struct _DST_PCLINE_COMMANDS
+{
+ char dst_b_pcline_command;
+ union
+ {
+ unsigned int dst_l_pcline_unslong;
+ unsigned short int dst_w_pcline_unsword;
+ unsigned char dst_b_pcline_unsbyte;
+ } dst_a_pcline_access_fields;
+} DST_PCLINE_COMMANDS;
+
+/* PC and Line number correlation codes. */
+
+#define DST_K_PCLINE_COMMANDS_SIZE 5
+#define DST_K_PCLINE_COMMANDS_SIZE_MIN 2
+#define DST_K_PCLINE_COMMANDS_SIZE_MAX 5
+#define DST_K_DELTA_PC_LOW -128
+#define DST_K_DELTA_PC_HIGH 0
+#define DST_K_DELTA_PC_W 1
+#define DST_K_INCR_LINUM 2
+#define DST_K_INCR_LINUM_W 3
+#define DST_K_SET_LINUM 9
+#define DST_K_SET_ABS_PC 16
+#define DST_K_DELTA_PC_L 17
+#define DST_K_INCR_LINUM_L 18
+#define DST_K_SET_LINUM_B 19
+#define DST_K_SET_LINUM_L 20
+
+/* Source file correlation header. */
+
+typedef struct _DST_SOURCE_CORR
+{
+ DST_HEADER dst_a_source_corr_header;
+} DST_SOURCE_CORR;
+#define DST_K_SOURCE_CORR_HEADER_SIZE 4
+
+/* Source file correlation codes. */
+
+#define DST_K_SRC_DECLFILE 1
+#define DST_K_SRC_SETFILE 2
+#define DST_K_SRC_SETREC_L 3
+#define DST_K_SRC_SETREC_W 4
+#define DST_K_SRC_SETLNUM_L 5
+#define DST_K_SRC_SETLNUM_W 6
+#define DST_K_SRC_INCRLNUM_B 7
+#define DST_K_SRC_DEFLINES_W 10
+#define DST_K_SRC_DEFLINES_B 11
+#define DST_K_SRC_FORMFEED 16
+#define DST_K_SRC_MIN_CMD 1
+#define DST_K_SRC_MAX_CMD 16
+
+/* Source file header. */
+
+typedef struct _DST_SRC_COMMAND
+{
+ unsigned char dst_b_src_command;
+ union
+ {
+ struct
+ {
+ unsigned char dst_b_src_df_length;
+ unsigned char dst_b_src_df_flags;
+ unsigned short int dst_w_src_df_fileid;
+ int64_t dst_q_src_df_rms_cdt;
+ unsigned int dst_l_src_df_rms_ebk;
+ unsigned short int dst_w_src_df_rms_ffb;
+ unsigned char dst_b_src_df_rms_rfo;
+ unsigned char dst_b_src_df_filename;
+ } dst_a_src_decl_src;
+ unsigned int dst_l_src_unslong;
+ unsigned short int dst_w_src_unsword;
+ unsigned char dst_b_src_unsbyte;
+ } dst_a_src_cmd_fields;
+} DST_SRC_COMMAND;
+#define DST_K_SRC_COMMAND_SIZE 21
+
+/* Source file trailer. */
+
+typedef struct _DST_SRC_CMDTRLR
+{
+ unsigned char dst_b_src_df_libmodname;
+} DST_SRC_CMDTRLR;
+#define DST_K_SRC_CMDTRLR_SIZE 1
+
+/* Prolog header. */
+
+typedef struct _DST_PROLOG
+{
+ DST_HEADER dst_a_prolog_header;
+ unsigned int dst_l_prolog_bkpt_addr;
+} DST_PROLOG;
+#define DST_K_PROLOG_SIZE 8
+
+#endif /* GCC_VMSDBG_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vr-values.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vr-values.h
new file mode 100644
index 0000000..00fcf77
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vr-values.h
@@ -0,0 +1,85 @@
+/* Support routines for Value Range Propagation (VRP).
+ Copyright (C) 2016-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_VR_VALUES_H
+#define GCC_VR_VALUES_H
+
+#include "value-query.h"
+
+// Abstract class to return a range for a given SSA.
+
+// Class to simplify a statement using range information.
+
+class simplify_using_ranges
+{
+public:
+ simplify_using_ranges (range_query *query = NULL,
+ int not_executable_flag = 0);
+ ~simplify_using_ranges ();
+ bool simplify (gimple_stmt_iterator *);
+ bool fold_cond (gcond *);
+private:
+ void vrp_visit_cond_stmt (gcond *, edge *);
+ tree vrp_evaluate_conditional_warnv_with_ops (gimple *stmt, enum tree_code,
+ tree, tree, bool *, bool *);
+ bool simplify_casted_cond (gcond *);
+ bool simplify_truth_ops_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_div_or_mod_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_abs_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_bit_ops_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_min_or_max_using_ranges (gimple_stmt_iterator *, gimple *);
+ bool simplify_cond_using_ranges_1 (gcond *);
+ bool simplify_switch_using_ranges (gswitch *);
+ bool simplify_float_conversion_using_ranges (gimple_stmt_iterator *,
+ gimple *);
+ bool simplify_internal_call_using_ranges (gimple_stmt_iterator *, gimple *);
+
+ bool two_valued_val_range_p (tree, tree *, tree *, gimple *);
+ bool op_with_boolean_value_range_p (tree, gimple *);
+ tree compare_name_with_value (enum tree_code, tree, tree, bool *, gimple *);
+ const value_range *get_vr_for_comparison (int, value_range *, gimple *s);
+ tree vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code,
+ tree, tree,
+ bool *, gimple *s);
+ void set_and_propagate_unexecutable (edge e);
+ void cleanup_edges_and_switches (void);
+
+ /* Vectors of edges that need removing and switch statements that
+ need updating. It is expected that a pass using the simplification
+ routines will, at the end of the pass, clean up the edges and
+ switch statements. The class dtor will try to detect cases
+ that do not follow that expectation. */
+ struct switch_update {
+ gswitch *stmt;
+ tree vec;
+ };
+
+ vec<edge> to_remove_edges;
+ vec<switch_update> to_update_switch_stmts;
+ class range_query *query;
+ int m_not_executable_flag; // Non zero if not_executable flag exists.
+ vec<edge> m_flag_set_edges; // List of edges with flag to be cleared.
+};
+
+extern bool range_fits_type_p (const value_range *vr,
+ unsigned dest_precision, signop dest_sgn);
+extern bool bounds_of_var_in_loop (tree *min, tree *max, range_query *,
+ class loop *loop, gimple *stmt, tree var);
+
+#endif /* GCC_VR_VALUES_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vtable-verify.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vtable-verify.h
new file mode 100644
index 0000000..37e745a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/vtable-verify.h
@@ -0,0 +1,143 @@
+/* Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Virtual Table Pointer Security. */
+
+#ifndef VTABLE_VERIFY_H
+#define VTABLE_VERIFY_H
+
+#include "sbitmap.h"
+
+/* The function decl used to create calls to __VLTVtableVerify. It must
+ be global because it needs to be initialized in the C++ front end, but
+ used in the middle end (in the vtable verification pass). */
+
+extern tree verify_vtbl_ptr_fndecl;
+
+/* Global variable keeping track of how many vtable map variables we
+ have created. */
+extern unsigned num_vtable_map_nodes;
+
+/* Keep track of how many virtual calls we are actually verifying. */
+extern int total_num_virtual_calls;
+extern int total_num_verified_vcalls;
+
+/* Each vtable map variable corresponds to a virtual class. Each
+ vtable map variable has a hash table associated with it, that keeps
+ track of the vtable pointers for which we have generated a call to
+ __VLTRegisterPair (with the current vtable map variable). This is
+ the hash table node that is used for each entry in this hash table
+ of vtable pointers.
+
+ Sometimes there are multiple valid vtable pointer entries that use
+ the same vtable pointer decl with different offsets. Therefore,
+ for each vtable pointer in the hash table, there is also an array
+ of offsets used with that vtable. */
+
+struct vtable_registration
+{
+ tree vtable_decl; /* The var decl of the vtable. */
+ vec<unsigned> offsets; /* The offsets array. */
+};
+
+struct registration_hasher : nofree_ptr_hash <struct vtable_registration>
+{
+ static inline hashval_t hash (const vtable_registration *);
+ static inline bool equal (const vtable_registration *,
+ const vtable_registration *);
+};
+
+typedef hash_table<registration_hasher> register_table_type;
+typedef register_table_type::iterator registration_iterator_type;
+
+/* This struct is used to represent the class hierarchy information
+ that we need. Each vtable map variable has an associated class
+ hierarchy node (struct vtv_graph_node). Note: In this struct,
+ 'children' means immediate descendants in the class hierarchy;
+ 'descendant' means any descendant however many levels deep. */
+
+struct vtv_graph_node {
+ tree class_type; /* The record_type of the class. */
+ unsigned class_uid; /* A unique, monotonically
+ ascending id for class node.
+ Each vtable map node also has
+ an id. The class uid is the
+ same as the vtable map node id
+ for nodes corresponding to the
+ same class. */
+ unsigned num_processed_children; /* # of children for whom we have
+ computed the class hierarchy
+ transitive closure. */
+ vec<struct vtv_graph_node *> parents; /* Vector of parents in the graph. */
+ vec<struct vtv_graph_node *> children; /* Vector of children in the graph.*/
+ sbitmap descendants; /* Bitmap representing all this node's
+ descendants in the graph. */
+};
+
+/* This is the node used for our hashtable of vtable map variable
+ information. When we create a vtable map variable (var decl) we
+ put it into one of these nodes; create a corresponding
+ vtv_graph_node for our class hierarchy info and store that in this
+ node; generate a unique (monotonically ascending) id for both the
+ vtbl_map_node and the vtv_graph_node; and insert the node into two
+ data structures (to make it easy to find in several different
+ ways): 1). A hash table ("vtbl_map_hash" in vtable-verify.cc).
+ This gives us an easy way to check to see if we already have a node
+ for the vtable map variable or not; and 2). An array (vector) of
+ vtbl_map_nodes, where the array index corresponds to the unique id
+ of the vtbl_map_node, which gives us an easy way to use bitmaps to
+ represent and find the vtable map nodes. */
+
+struct vtbl_map_node {
+ tree vtbl_map_decl; /* The var decl for the vtable map
+ variable. */
+ tree class_name; /* The DECL_ASSEMBLER_NAME of the
+ class. */
+ struct vtv_graph_node *class_info; /* Our class hierarchy info for the
+ class. */
+ unsigned uid; /* The unique id for the vtable map
+ variable. */
+ struct vtbl_map_node *next, *prev; /* Pointers for the linked list
+ structure. */
+ register_table_type *registered; /* Hashtable of vtable pointers for which
+ we have generated a _VLTRegisterPair
+ call with this vtable map variable. */
+ bool is_used; /* Boolean indicating if we used this vtable map
+ variable in a call to __VLTVerifyVtablePointer. */
+};
+
+/* Controls debugging for vtable verification. */
+extern bool vtv_debug;
+
+/* The global vector of vtbl_map_nodes. */
+extern vec<struct vtbl_map_node *> vtbl_map_nodes_vec;
+
+/* The global vectors for mangled class names for anonymous classes. */
+extern GTY(()) vec<tree, va_gc> *vtbl_mangled_name_types;
+extern GTY(()) vec<tree, va_gc> *vtbl_mangled_name_ids;
+
+extern void vtbl_register_mangled_name (tree, tree);
+extern struct vtbl_map_node *vtbl_map_get_node (tree);
+extern struct vtbl_map_node *find_or_create_vtbl_map_node (tree);
+extern void vtbl_map_node_class_insert (struct vtbl_map_node *, unsigned);
+extern bool vtbl_map_node_registration_find (struct vtbl_map_node *,
+ tree, unsigned);
+extern bool vtbl_map_node_registration_insert (struct vtbl_map_node *,
+ tree, unsigned);
+
+#endif /* VTABLE_VERIFY_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-bitmask.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-bitmask.h
new file mode 100644
index 0000000..1cbe567
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-bitmask.h
@@ -0,0 +1,143 @@
+/* Operation with 128 bit bitmask.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_WIDE_INT_BITMASK_H
+#define GCC_WIDE_INT_BITMASK_H
+
+class wide_int_bitmask
+{
+public:
+ constexpr wide_int_bitmask ();
+ constexpr wide_int_bitmask (uint64_t l);
+ constexpr wide_int_bitmask (uint64_t l, uint64_t h);
+ inline wide_int_bitmask &operator &= (wide_int_bitmask);
+ inline wide_int_bitmask &operator |= (wide_int_bitmask);
+ constexpr wide_int_bitmask operator ~ () const;
+ constexpr wide_int_bitmask operator & (wide_int_bitmask) const;
+ constexpr wide_int_bitmask operator | (wide_int_bitmask) const;
+ inline wide_int_bitmask operator >> (int);
+ inline wide_int_bitmask operator << (int);
+ inline bool operator == (wide_int_bitmask) const;
+ inline bool operator != (wide_int_bitmask) const;
+ uint64_t low, high;
+};
+
+constexpr
+wide_int_bitmask::wide_int_bitmask ()
+: low (0), high (0)
+{
+}
+
+constexpr
+wide_int_bitmask::wide_int_bitmask (uint64_t l)
+: low (l), high (0)
+{
+}
+
+constexpr
+wide_int_bitmask::wide_int_bitmask (uint64_t l, uint64_t h)
+: low (l), high (h)
+{
+}
+
+inline wide_int_bitmask &
+wide_int_bitmask::operator &= (wide_int_bitmask b)
+{
+ low &= b.low;
+ high &= b.high;
+ return *this;
+}
+
+inline wide_int_bitmask &
+wide_int_bitmask::operator |= (wide_int_bitmask b)
+{
+ low |= b.low;
+ high |= b.high;
+ return *this;
+}
+
+constexpr wide_int_bitmask
+wide_int_bitmask::operator ~ () const
+{
+ return wide_int_bitmask (~low, ~high);
+}
+
+constexpr wide_int_bitmask
+wide_int_bitmask::operator | (wide_int_bitmask b) const
+{
+ return wide_int_bitmask (low | b.low, high | b.high);
+}
+
+constexpr wide_int_bitmask
+wide_int_bitmask::operator & (wide_int_bitmask b) const
+{
+ return wide_int_bitmask (low & b.low, high & b.high);
+}
+
+inline wide_int_bitmask
+wide_int_bitmask::operator << (int amount)
+{
+ wide_int_bitmask ret;
+ if (amount >= 64)
+ {
+ ret.low = 0;
+ ret.high = low << (amount - 64);
+ }
+ else if (amount == 0)
+ ret = *this;
+ else
+ {
+ ret.low = low << amount;
+ ret.high = (low >> (64 - amount)) | (high << amount);
+ }
+ return ret;
+}
+
+inline wide_int_bitmask
+wide_int_bitmask::operator >> (int amount)
+{
+ wide_int_bitmask ret;
+ if (amount >= 64)
+ {
+ ret.low = high >> (amount - 64);
+ ret.high = 0;
+ }
+ else if (amount == 0)
+ ret = *this;
+ else
+ {
+ ret.low = (high << (64 - amount)) | (low >> amount);
+ ret.high = high >> amount;
+ }
+ return ret;
+}
+
+inline bool
+wide_int_bitmask::operator == (wide_int_bitmask b) const
+{
+ return low == b.low && high == b.high;
+}
+
+inline bool
+wide_int_bitmask::operator != (wide_int_bitmask b) const
+{
+ return low != b.low || high != b.high;
+}
+
+#endif /* ! GCC_WIDE_INT_BITMASK_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-print.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-print.h
new file mode 100644
index 0000000..6d5fe7a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int-print.h
@@ -0,0 +1,38 @@
+/* Print wide integers.
+ Copyright (C) 2013-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef WIDE_INT_PRINT_H
+#define WIDE_INT_PRINT_H
+
+#include <stdio.h>
+
+#define WIDE_INT_PRINT_BUFFER_SIZE (WIDE_INT_MAX_PRECISION / 4 + 4)
+
+/* Printing functions. */
+
+extern void print_dec (const wide_int_ref &wi, char *buf, signop sgn);
+extern void print_dec (const wide_int_ref &wi, FILE *file, signop sgn);
+extern void print_decs (const wide_int_ref &wi, char *buf);
+extern void print_decs (const wide_int_ref &wi, FILE *file);
+extern void print_decu (const wide_int_ref &wi, char *buf);
+extern void print_decu (const wide_int_ref &wi, FILE *file);
+extern void print_hex (const wide_int_ref &wi, char *buf);
+extern void print_hex (const wide_int_ref &wi, FILE *file);
+
+#endif /* WIDE_INT_PRINT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int.h
new file mode 100644
index 0000000..8c1c14f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/wide-int.h
@@ -0,0 +1,3513 @@
+/* Operations with very long integers. -*- C++ -*-
+ Copyright (C) 2012-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef WIDE_INT_H
+#define WIDE_INT_H
+
+/* wide-int.[cc|h] implements a class that efficiently performs
+ mathematical operations on finite precision integers. wide_ints
+ are designed to be transient - they are not for long term storage
+ of values. There is tight integration between wide_ints and the
+ other longer storage GCC representations (rtl and tree).
+
+ The actual precision of a wide_int depends on the flavor. There
+ are three predefined flavors:
+
+ 1) wide_int (the default). This flavor does the math in the
+ precision of its input arguments. It is assumed (and checked)
+ that the precisions of the operands and results are consistent.
+ This is the most efficient flavor. It is not possible to examine
+ bits above the precision that has been specified. Because of
+ this, the default flavor has semantics that are simple to
+ understand and in general model the underlying hardware that the
+ compiler is targetted for.
+
+ This flavor must be used at the RTL level of gcc because there
+ is, in general, not enough information in the RTL representation
+ to extend a value beyond the precision specified in the mode.
+
+ This flavor should also be used at the TREE and GIMPLE levels of
+ the compiler except for the circumstances described in the
+ descriptions of the other two flavors.
+
+ The default wide_int representation does not contain any
+ information inherent about signedness of the represented value,
+ so it can be used to represent both signed and unsigned numbers.
+ For operations where the results depend on signedness (full width
+ multiply, division, shifts, comparisons, and operations that need
+ overflow detected), the signedness must be specified separately.
+
+ 2) offset_int. This is a fixed-precision integer that can hold
+ any address offset, measured in either bits or bytes, with at
+ least one extra sign bit. At the moment the maximum address
+ size GCC supports is 64 bits. With 8-bit bytes and an extra
+ sign bit, offset_int therefore needs to have at least 68 bits
+ of precision. We round this up to 128 bits for efficiency.
+ Values of type T are converted to this precision by sign- or
+ zero-extending them based on the signedness of T.
+
+ The extra sign bit means that offset_int is effectively a signed
+ 128-bit integer, i.e. it behaves like int128_t.
+
+ Since the values are logically signed, there is no need to
+ distinguish between signed and unsigned operations. Sign-sensitive
+ comparison operators <, <=, > and >= are therefore supported.
+ Shift operators << and >> are also supported, with >> being
+ an _arithmetic_ right shift.
+
+ [ Note that, even though offset_int is effectively int128_t,
+ it can still be useful to use unsigned comparisons like
+ wi::leu_p (a, b) as a more efficient short-hand for
+ "a >= 0 && a <= b". ]
+
+ 3) widest_int. This representation is an approximation of
+ infinite precision math. However, it is not really infinite
+ precision math as in the GMP library. It is really finite
+ precision math where the precision is 4 times the size of the
+ largest integer that the target port can represent.
+
+ Like offset_int, widest_int is wider than all the values that
+ it needs to represent, so the integers are logically signed.
+ Sign-sensitive comparison operators <, <=, > and >= are supported,
+ as are << and >>.
+
+ There are several places in the GCC where this should/must be used:
+
+ * Code that does induction variable optimizations. This code
+ works with induction variables of many different types at the
+ same time. Because of this, it ends up doing many different
+ calculations where the operands are not compatible types. The
+ widest_int makes this easy, because it provides a field where
+ nothing is lost when converting from any variable,
+
+ * There are a small number of passes that currently use the
+ widest_int that should use the default. These should be
+ changed.
+
+ There are surprising features of offset_int and widest_int
+ that the users should be careful about:
+
+ 1) Shifts and rotations are just weird. You have to specify a
+ precision in which the shift or rotate is to happen in. The bits
+ above this precision are zeroed. While this is what you
+ want, it is clearly non obvious.
+
+ 2) Larger precision math sometimes does not produce the same
+ answer as would be expected for doing the math at the proper
+ precision. In particular, a multiply followed by a divide will
+ produce a different answer if the first product is larger than
+ what can be represented in the input precision.
+
+ The offset_int and the widest_int flavors are more expensive
+ than the default wide int, so in addition to the caveats with these
+ two, the default is the prefered representation.
+
+ All three flavors of wide_int are represented as a vector of
+ HOST_WIDE_INTs. The default and widest_int vectors contain enough elements
+ to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only
+ enough elements to hold ADDR_MAX_PRECISION bits. The values are stored
+ in the vector with the least significant HOST_BITS_PER_WIDE_INT bits
+ in element 0.
+
+ The default wide_int contains three fields: the vector (VAL),
+ the precision and a length (LEN). The length is the number of HWIs
+ needed to represent the value. widest_int and offset_int have a
+ constant precision that cannot be changed, so they only store the
+ VAL and LEN fields.
+
+ Since most integers used in a compiler are small values, it is
+ generally profitable to use a representation of the value that is
+ as small as possible. LEN is used to indicate the number of
+ elements of the vector that are in use. The numbers are stored as
+ sign extended numbers as a means of compression. Leading
+ HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
+ as long as they can be reconstructed from the top bit that is being
+ represented.
+
+ The precision and length of a wide_int are always greater than 0.
+ Any bits in a wide_int above the precision are sign-extended from the
+ most significant bit. For example, a 4-bit value 0x8 is represented as
+ VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer
+ constants to be represented with undefined bits above the precision.
+ This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
+ so that the INTEGER_CST representation can be used both in TYPE_PRECISION
+ and in wider precisions.
+
+ There are constructors to create the various forms of wide_int from
+ trees, rtl and constants. For trees the options are:
+
+ tree t = ...;
+ wi::to_wide (t) // Treat T as a wide_int
+ wi::to_offset (t) // Treat T as an offset_int
+ wi::to_widest (t) // Treat T as a widest_int
+
+ All three are light-weight accessors that should have no overhead
+ in release builds. If it is useful for readability reasons to
+ store the result in a temporary variable, the preferred method is:
+
+ wi::tree_to_wide_ref twide = wi::to_wide (t);
+ wi::tree_to_offset_ref toffset = wi::to_offset (t);
+ wi::tree_to_widest_ref twidest = wi::to_widest (t);
+
+ To make an rtx into a wide_int, you have to pair it with a mode.
+ The canonical way to do this is with rtx_mode_t as in:
+
+ rtx r = ...
+ wide_int x = rtx_mode_t (r, mode);
+
+ Similarly, a wide_int can only be constructed from a host value if
+ the target precision is given explicitly, such as in:
+
+ wide_int x = wi::shwi (c, prec); // sign-extend C if necessary
+ wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary
+
+ However, offset_int and widest_int have an inherent precision and so
+ can be initialized directly from a host value:
+
+ offset_int x = (int) c; // sign-extend C
+ widest_int x = (unsigned int) c; // zero-extend C
+
+ It is also possible to do arithmetic directly on rtx_mode_ts and
+ constants. For example:
+
+ wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2
+ wi::add (r1, 1); // add 1 to rtx_mode_t r1
+ wi::lshift (1, 100); // 1 << 100 as a widest_int
+
+ Many binary operations place restrictions on the combinations of inputs,
+ using the following rules:
+
+ - {rtx, wide_int} op {rtx, wide_int} -> wide_int
+ The inputs must be the same precision. The result is a wide_int
+ of the same precision
+
+ - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
+ (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int
+ The HOST_WIDE_INT is extended or truncated to the precision of
+ the other input. The result is a wide_int of the same precision
+ as that input.
+
+ - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int
+ The inputs are extended to widest_int precision and produce a
+ widest_int result.
+
+ - offset_int op offset_int -> offset_int
+ offset_int op (un)signed HOST_WIDE_INT -> offset_int
+ (un)signed HOST_WIDE_INT op offset_int -> offset_int
+
+ - widest_int op widest_int -> widest_int
+ widest_int op (un)signed HOST_WIDE_INT -> widest_int
+ (un)signed HOST_WIDE_INT op widest_int -> widest_int
+
+ Other combinations like:
+
+ - widest_int op offset_int and
+ - wide_int op offset_int
+
+ are not allowed. The inputs should instead be extended or truncated
+ so that they match.
+
+ The inputs to comparison functions like wi::eq_p and wi::lts_p
+ follow the same compatibility rules, although their return types
+ are different. Unary functions on X produce the same result as
+ a binary operation X + X. Shift functions X op Y also produce
+ the same result as X + X; the precision of the shift amount Y
+ can be arbitrarily different from X. */
+
+/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
+ early examination of the target's mode file. The WIDE_INT_MAX_ELTS
+ can accomodate at least 1 more bit so that unsigned numbers of that
+ mode can be represented as a signed value. Note that it is still
+ possible to create fixed_wide_ints that have precisions greater than
+ MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
+ double-width multiplication result, for example. */
+#define WIDE_INT_MAX_ELTS \
+ ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
+
+#define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+
+/* This is the max size of any pointer on any machine. It does not
+ seem to be as easy to sniff this out of the machine description as
+ it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
+ multiple address sizes and may have different address sizes for
+ different address spaces. However, currently the largest pointer
+ on any platform is 64 bits. When that changes, then it is likely
+ that a target hook should be defined so that targets can make this
+ value larger for those targets. */
+#define ADDR_MAX_BITSIZE 64
+
+/* This is the internal precision used when doing any address
+ arithmetic. The '4' is really 3 + 1. Three of the bits are for
+ the number of extra bits needed to do bit addresses and the other bit
+ is to allow everything to be signed without loosing any precision.
+ Then everything is rounded up to the next HWI for efficiency. */
+#define ADDR_MAX_PRECISION \
+ ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) \
+ & ~(HOST_BITS_PER_WIDE_INT - 1))
+
+/* The number of HWIs needed to store an offset_int. */
+#define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT)
+
+/* The type of result produced by a binary operation on types T1 and T2.
+ Defined purely for brevity. */
+#define WI_BINARY_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::result_type
+
+/* Likewise for binary operators, which excludes the case in which neither
+ T1 nor T2 is a wide-int-based type. */
+#define WI_BINARY_OPERATOR_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::operator_result
+
+/* The type of result produced by T1 << T2. Leads to substitution failure
+ if the operation isn't supported. Defined purely for brevity. */
+#define WI_SIGNED_SHIFT_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::signed_shift_result_type
+
+/* The type of result produced by a sign-agnostic binary predicate on
+ types T1 and T2. This is bool if wide-int operations make sense for
+ T1 and T2 and leads to substitution failure otherwise. */
+#define WI_BINARY_PREDICATE_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::predicate_result
+
+/* The type of result produced by a signed binary predicate on types T1 and T2.
+ This is bool if signed comparisons make sense for T1 and T2 and leads to
+ substitution failure otherwise. */
+#define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::signed_predicate_result
+
+/* The type of result produced by a unary operation on type T. */
+#define WI_UNARY_RESULT(T) \
+ typename wi::binary_traits <T, T>::result_type
+
+/* Define a variable RESULT to hold the result of a binary operation on
+ X and Y, which have types T1 and T2 respectively. Define VAL to
+ point to the blocks of RESULT. Once the user of the macro has
+ filled in VAL, it should call RESULT.set_len to set the number
+ of initialized blocks. */
+#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
+ WI_BINARY_RESULT (T1, T2) RESULT = \
+ wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
+
+/* Similar for the result of a unary operation on X, which has type T. */
+#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
+ WI_UNARY_RESULT (T) RESULT = \
+ wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
+
+template <typename T> class generic_wide_int;
+template <int N> class fixed_wide_int_storage;
+class wide_int_storage;
+
+/* An N-bit integer. Until we can use typedef templates, use this instead. */
+#define FIXED_WIDE_INT(N) \
+ generic_wide_int < fixed_wide_int_storage <N> >
+
+typedef generic_wide_int <wide_int_storage> wide_int;
+typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
+typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
+/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+typedef generic_wide_int < fixed_wide_int_storage <WIDE_INT_MAX_PRECISION * 2> > widest2_int;
+
+/* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+template <bool SE, bool HDP = true>
+class wide_int_ref_storage;
+
+typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
+
+/* This can be used instead of wide_int_ref if the referenced value is
+ known to have type T. It carries across properties of T's representation,
+ such as whether excess upper bits in a HWI are defined, and can therefore
+ help avoid redundant work.
+
+ The macro could be replaced with a template typedef, once we're able
+ to use those. */
+#define WIDE_INT_REF_FOR(T) \
+ generic_wide_int \
+ <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \
+ wi::int_traits <T>::host_dependent_precision> >
+
+namespace wi
+{
+ /* Operations that calculate overflow do so even for
+ TYPE_OVERFLOW_WRAPS types. For example, adding 1 to +MAX_INT in
+ an unsigned int is 0 and does not overflow in C/C++, but wi::add
+ will set the overflow argument in case it's needed for further
+ analysis.
+
+ For operations that require overflow, these are the different
+ types of overflow. */
+ enum overflow_type {
+ OVF_NONE = 0,
+ OVF_UNDERFLOW = -1,
+ OVF_OVERFLOW = 1,
+ /* There was an overflow, but we are unsure whether it was an
+ overflow or an underflow. */
+ OVF_UNKNOWN = 2
+ };
+
+ /* Classifies an integer based on its precision. */
+ enum precision_type {
+ /* The integer has both a precision and defined signedness. This allows
+ the integer to be converted to any width, since we know whether to fill
+ any extra bits with zeros or signs. */
+ FLEXIBLE_PRECISION,
+
+ /* The integer has a variable precision but no defined signedness. */
+ VAR_PRECISION,
+
+ /* The integer has a constant precision (known at GCC compile time)
+ and is signed. */
+ CONST_PRECISION
+ };
+
+ /* This class, which has no default implementation, is expected to
+ provide the following members:
+
+ static const enum precision_type precision_type;
+ Classifies the type of T.
+
+ static const unsigned int precision;
+ Only defined if precision_type == CONST_PRECISION. Specifies the
+ precision of all integers of type T.
+
+ static const bool host_dependent_precision;
+ True if the precision of T depends (or can depend) on the host.
+
+ static unsigned int get_precision (const T &x)
+ Return the number of bits in X.
+
+ static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, const T &x)
+ Decompose X as a PRECISION-bit integer, returning the associated
+ wi::storage_ref. SCRATCH is available as scratch space if needed.
+ The routine should assert that PRECISION is acceptable. */
+ template <typename T> struct int_traits;
+
+ /* This class provides a single type, result_type, which specifies the
+ type of integer produced by a binary operation whose inputs have
+ types T1 and T2. The definition should be symmetric. */
+ template <typename T1, typename T2,
+ enum precision_type P1 = int_traits <T1>::precision_type,
+ enum precision_type P2 = int_traits <T2>::precision_type>
+ struct binary_traits;
+
+ /* Specify the result type for each supported combination of binary
+ inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
+ mixed, in order to give stronger type checking. When both inputs
+ are CONST_PRECISION, they must have the same precision. */
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef widest_int result_type;
+ /* Don't define operators for this combination. */
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T2>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef wide_int result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ {
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ typedef result_type signed_shift_result_type;
+ typedef bool signed_predicate_result;
+ };
+
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
+ };
+}
+
+/* Public functions for querying and operating on integers. */
+namespace wi
+{
+ template <typename T>
+ unsigned int get_precision (const T &);
+
+ template <typename T1, typename T2>
+ unsigned int get_binary_precision (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ void copy (T1 &, const T2 &);
+
+#define UNARY_PREDICATE \
+ template <typename T> bool
+#define UNARY_FUNCTION \
+ template <typename T> WI_UNARY_RESULT (T)
+#define BINARY_PREDICATE \
+ template <typename T1, typename T2> bool
+#define BINARY_FUNCTION \
+ template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)
+#define SHIFT_FUNCTION \
+ template <typename T1, typename T2> WI_UNARY_RESULT (T1)
+
+ UNARY_PREDICATE fits_shwi_p (const T &);
+ UNARY_PREDICATE fits_uhwi_p (const T &);
+ UNARY_PREDICATE neg_p (const T &, signop = SIGNED);
+
+ template <typename T>
+ HOST_WIDE_INT sign_mask (const T &);
+
+ BINARY_PREDICATE eq_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ne_p (const T1 &, const T2 &);
+ BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE lts_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ltu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE le_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE les_p (const T1 &, const T2 &);
+ BINARY_PREDICATE leu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE gts_p (const T1 &, const T2 &);
+ BINARY_PREDICATE gtu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE ges_p (const T1 &, const T2 &);
+ BINARY_PREDICATE geu_p (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ int cmp (const T1 &, const T2 &, signop);
+
+ template <typename T1, typename T2>
+ int cmps (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ int cmpu (const T1 &, const T2 &);
+
+ UNARY_FUNCTION bit_not (const T &);
+ UNARY_FUNCTION neg (const T &);
+ UNARY_FUNCTION neg (const T &, overflow_type *);
+ UNARY_FUNCTION abs (const T &);
+ UNARY_FUNCTION ext (const T &, unsigned int, signop);
+ UNARY_FUNCTION sext (const T &, unsigned int);
+ UNARY_FUNCTION zext (const T &, unsigned int);
+ UNARY_FUNCTION set_bit (const T &, unsigned int);
+
+ BINARY_FUNCTION min (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smin (const T1 &, const T2 &);
+ BINARY_FUNCTION umin (const T1 &, const T2 &);
+ BINARY_FUNCTION max (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smax (const T1 &, const T2 &);
+ BINARY_FUNCTION umax (const T1 &, const T2 &);
+
+ BINARY_FUNCTION bit_and (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &, signop, overflow_type *);
+ BINARY_FUNCTION sub (const T1 &, const T2 &);
+ BINARY_FUNCTION sub (const T1 &, const T2 &, signop, overflow_type *);
+ BINARY_FUNCTION mul (const T1 &, const T2 &);
+ BINARY_FUNCTION mul (const T1 &, const T2 &, signop, overflow_type *);
+ BINARY_FUNCTION smul (const T1 &, const T2 &, overflow_type *);
+ BINARY_FUNCTION umul (const T1 &, const T2 &, overflow_type *);
+ BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
+ BINARY_FUNCTION div_round (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+ BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED);
+ BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+
+ template <typename T1, typename T2>
+ bool multiple_of_p (const T1 &, const T2 &, signop);
+
+ template <typename T1, typename T2>
+ bool multiple_of_p (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+
+ SHIFT_FUNCTION lshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION lrshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION arshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn);
+ SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0);
+ SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0);
+
+#undef SHIFT_FUNCTION
+#undef BINARY_PREDICATE
+#undef BINARY_FUNCTION
+#undef UNARY_PREDICATE
+#undef UNARY_FUNCTION
+
+ bool only_sign_bit_p (const wide_int_ref &, unsigned int);
+ bool only_sign_bit_p (const wide_int_ref &);
+ int clz (const wide_int_ref &);
+ int clrsb (const wide_int_ref &);
+ int ctz (const wide_int_ref &);
+ int exact_log2 (const wide_int_ref &);
+ int floor_log2 (const wide_int_ref &);
+ int ffs (const wide_int_ref &);
+ int popcount (const wide_int_ref &);
+ int parity (const wide_int_ref &);
+
+ template <typename T>
+ unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int);
+
+ template <typename T>
+ unsigned int min_precision (const T &, signop);
+
+ static inline void accumulate_overflow (overflow_type &, overflow_type);
+}
+
+namespace wi
+{
+ /* Contains the components of a decomposed integer for easy, direct
+ access. */
+ class storage_ref
+ {
+ public:
+ storage_ref () {}
+ storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int);
+
+ const HOST_WIDE_INT *val;
+ unsigned int len;
+ unsigned int precision;
+
+ /* Provide enough trappings for this class to act as storage for
+ generic_wide_int. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ };
+}
+
+inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in,
+ unsigned int len_in,
+ unsigned int precision_in)
+ : val (val_in), len (len_in), precision (precision_in)
+{
+}
+
+inline unsigned int
+wi::storage_ref::get_len () const
+{
+ return len;
+}
+
+inline unsigned int
+wi::storage_ref::get_precision () const
+{
+ return precision;
+}
+
+inline const HOST_WIDE_INT *
+wi::storage_ref::get_val () const
+{
+ return val;
+}
+
+/* This class defines an integer type using the storage provided by the
+ template argument. The storage class must provide the following
+ functions:
+
+ unsigned int get_precision () const
+ Return the number of bits in the integer.
+
+ HOST_WIDE_INT *get_val () const
+ Return a pointer to the array of blocks that encodes the integer.
+
+ unsigned int get_len () const
+ Return the number of blocks in get_val (). If this is smaller
+ than the number of blocks implied by get_precision (), the
+ remaining blocks are sign extensions of block get_len () - 1.
+
+ Although not required by generic_wide_int itself, writable storage
+ classes can also provide the following functions:
+
+ HOST_WIDE_INT *write_val ()
+ Get a modifiable version of get_val ()
+
+ unsigned int set_len (unsigned int len)
+ Set the value returned by get_len () to LEN. */
+template <typename storage>
+class GTY(()) generic_wide_int : public storage
+{
+public:
+ generic_wide_int ();
+
+ template <typename T>
+ generic_wide_int (const T &);
+
+ template <typename T>
+ generic_wide_int (const T &, unsigned int);
+
+ /* Conversions. */
+ HOST_WIDE_INT to_shwi (unsigned int) const;
+ HOST_WIDE_INT to_shwi () const;
+ unsigned HOST_WIDE_INT to_uhwi (unsigned int) const;
+ unsigned HOST_WIDE_INT to_uhwi () const;
+ HOST_WIDE_INT to_short_addr () const;
+
+ /* Public accessors for the interior of a wide int. */
+ HOST_WIDE_INT sign_mask () const;
+ HOST_WIDE_INT elt (unsigned int) const;
+ HOST_WIDE_INT sext_elt (unsigned int) const;
+ unsigned HOST_WIDE_INT ulow () const;
+ unsigned HOST_WIDE_INT uhigh () const;
+ HOST_WIDE_INT slow () const;
+ HOST_WIDE_INT shigh () const;
+
+ template <typename T>
+ generic_wide_int &operator = (const T &);
+
+#define ASSIGNMENT_OPERATOR(OP, F) \
+ template <typename T> \
+ generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
+
+/* Restrict these to cases where the shift operator is defined. */
+#define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
+ template <typename T> \
+ generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
+
+#define INCDEC_OPERATOR(OP, DELTA) \
+ generic_wide_int &OP () { *this += DELTA; return *this; }
+
+ ASSIGNMENT_OPERATOR (operator &=, bit_and)
+ ASSIGNMENT_OPERATOR (operator |=, bit_or)
+ ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
+ ASSIGNMENT_OPERATOR (operator +=, add)
+ ASSIGNMENT_OPERATOR (operator -=, sub)
+ ASSIGNMENT_OPERATOR (operator *=, mul)
+ ASSIGNMENT_OPERATOR (operator <<=, lshift)
+ SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
+ INCDEC_OPERATOR (operator ++, 1)
+ INCDEC_OPERATOR (operator --, -1)
+
+#undef SHIFT_ASSIGNMENT_OPERATOR
+#undef ASSIGNMENT_OPERATOR
+#undef INCDEC_OPERATOR
+
+ /* Debugging functions. */
+ void dump () const;
+
+ static const bool is_sign_extended
+ = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
+};
+
+template <typename storage>
+inline generic_wide_int <storage>::generic_wide_int () {}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x)
+ : storage (x)
+{
+}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x,
+ unsigned int precision)
+ : storage (x, precision)
+{
+}
+
+/* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
+ If THIS does not fit in PRECISION, the information is lost. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi (unsigned int precision) const
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return sext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
+
+/* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi () const
+{
+ if (is_sign_extended)
+ return this->get_val ()[0];
+ else
+ return to_shwi (this->get_precision ());
+}
+
+/* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
+ PRECISION. If THIS does not fit in PRECISION, the information
+ is lost. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi (unsigned int precision) const
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return zext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
+
+/* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi () const
+{
+ return to_uhwi (this->get_precision ());
+}
+
+/* TODO: The compiler is half converted from using HOST_WIDE_INT to
+ represent addresses to using offset_int to represent addresses.
+ We use to_short_addr at the interface from new code to old,
+ unconverted code. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_short_addr () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the implicit value of blocks above get_len (). */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::sign_mask () const
+{
+ unsigned int len = this->get_len ();
+ gcc_assert (len > 0);
+
+ unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
+ if (!is_sign_extended)
+ {
+ unsigned int precision = this->get_precision ();
+ int excess = len * HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ high <<= excess;
+ }
+ return (HOST_WIDE_INT) (high) < 0 ? -1 : 0;
+}
+
+/* Return the signed value of the least-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::slow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the signed value of the most-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::shigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return the unsigned value of the least-significant
+ explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::ulow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the unsigned value of the most-significant
+ explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::uhigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return block I, which might be implicitly or explicit encoded. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::elt (unsigned int i) const
+{
+ if (i >= this->get_len ())
+ return sign_mask ();
+ else
+ return this->get_val ()[i];
+}
+
+/* Like elt, but sign-extend beyond the upper bit, instead of returning
+ the raw encoding. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::sext_elt (unsigned int i) const
+{
+ HOST_WIDE_INT elt_i = elt (i);
+ if (!is_sign_extended)
+ {
+ unsigned int precision = this->get_precision ();
+ unsigned int lsb = i * HOST_BITS_PER_WIDE_INT;
+ if (precision - lsb < HOST_BITS_PER_WIDE_INT)
+ elt_i = sext_hwi (elt_i, precision - lsb);
+ }
+ return elt_i;
+}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage> &
+generic_wide_int <storage>::operator = (const T &x)
+{
+ storage::operator = (x);
+ return *this;
+}
+
+/* Dump the contents of the integer to stderr, for debugging. */
+template <typename storage>
+void
+generic_wide_int <storage>::dump () const
+{
+ unsigned int len = this->get_len ();
+ const HOST_WIDE_INT *val = this->get_val ();
+ unsigned int precision = this->get_precision ();
+ fprintf (stderr, "[");
+ if (len * HOST_BITS_PER_WIDE_INT < precision)
+ fprintf (stderr, "...,");
+ for (unsigned int i = 0; i < len - 1; ++i)
+ fprintf (stderr, HOST_WIDE_INT_PRINT_HEX ",", val[len - 1 - i]);
+ fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "], precision = %d\n",
+ val[0], precision);
+}
+
+namespace wi
+{
+ template <typename storage>
+ struct int_traits < generic_wide_int <storage> >
+ : public wi::int_traits <storage>
+ {
+ static unsigned int get_precision (const generic_wide_int <storage> &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const generic_wide_int <storage> &);
+ };
+}
+
+template <typename storage>
+inline unsigned int
+wi::int_traits < generic_wide_int <storage> >::
+get_precision (const generic_wide_int <storage> &x)
+{
+ return x.get_precision ();
+}
+
+template <typename storage>
+inline wi::storage_ref
+wi::int_traits < generic_wide_int <storage> >::
+decompose (HOST_WIDE_INT *, unsigned int precision,
+ const generic_wide_int <storage> &x)
+{
+ gcc_checking_assert (precision == x.get_precision ());
+ return wi::storage_ref (x.get_val (), x.get_len (), precision);
+}
+
+/* Provide the storage for a wide_int_ref. This acts like a read-only
+ wide_int, with the optimization that VAL is normally a pointer to
+ another integer's storage, so that no array copy is needed. */
+template <bool SE, bool HDP>
+class wide_int_ref_storage : public wi::storage_ref
+{
+private:
+ /* Scratch space that can be used when decomposing the original integer.
+ It must live as long as this object. */
+ HOST_WIDE_INT scratch[2];
+
+public:
+ wide_int_ref_storage () {}
+
+ wide_int_ref_storage (const wi::storage_ref &);
+
+ template <typename T>
+ wide_int_ref_storage (const T &);
+
+ template <typename T>
+ wide_int_ref_storage (const T &, unsigned int);
+};
+
+/* Create a reference from an existing reference. */
+template <bool SE, bool HDP>
+inline wide_int_ref_storage <SE, HDP>::
+wide_int_ref_storage (const wi::storage_ref &x)
+ : storage_ref (x)
+{}
+
+/* Create a reference to integer X in its natural precision. Note
+ that the natural precision is host-dependent for primitive
+ types. */
+template <bool SE, bool HDP>
+template <typename T>
+inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x)
+ : storage_ref (wi::int_traits <T>::decompose (scratch,
+ wi::get_precision (x), x))
+{
+}
+
+/* Create a reference to integer X in precision PRECISION. */
+template <bool SE, bool HDP>
+template <typename T>
+inline wide_int_ref_storage <SE, HDP>::
+wide_int_ref_storage (const T &x, unsigned int precision)
+ : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
+{
+}
+
+namespace wi
+{
+ template <bool SE, bool HDP>
+ struct int_traits <wide_int_ref_storage <SE, HDP> >
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ static const bool host_dependent_precision = HDP;
+ static const bool is_sign_extended = SE;
+ };
+}
+
+namespace wi
+{
+ unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ signop sgn);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool = true);
+}
+
+/* The storage used by wide_int. */
+class GTY(()) wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
+ unsigned int len;
+ unsigned int precision;
+
+public:
+ wide_int_storage ();
+ template <typename T>
+ wide_int_storage (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ template <typename T>
+ wide_int_storage &operator = (const T &);
+
+ static wide_int from (const wide_int_ref &, unsigned int, signop);
+ static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
+ unsigned int, bool = true);
+ static wide_int create (unsigned int);
+
+ /* FIXME: target-dependent, so should disappear. */
+ wide_int bswap () const;
+};
+
+namespace wi
+{
+ template <>
+ struct int_traits <wide_int_storage>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* Guaranteed by a static assert in the wide_int_storage constructor. */
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ template <typename T1, typename T2>
+ static wide_int get_binary_result (const T1 &, const T2 &);
+ };
+}
+
+inline wide_int_storage::wide_int_storage () {}
+
+/* Initialize the storage from integer X, in its natural precision.
+ Note that we do not allow integers with host-dependent precision
+ to become wide_ints; wide_ints must always be logically independent
+ of the host. */
+template <typename T>
+inline wide_int_storage::wide_int_storage (const T &x)
+{
+ { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
+ { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ WIDE_INT_REF_FOR (T) xi (x);
+ precision = xi.precision;
+ wi::copy (*this, xi);
+}
+
+template <typename T>
+inline wide_int_storage&
+wide_int_storage::operator = (const T &x)
+{
+ { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
+ { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ WIDE_INT_REF_FOR (T) xi (x);
+ precision = xi.precision;
+ wi::copy (*this, xi);
+ return *this;
+}
+
+inline unsigned int
+wide_int_storage::get_precision () const
+{
+ return precision;
+}
+
+inline const HOST_WIDE_INT *
+wide_int_storage::get_val () const
+{
+ return val;
+}
+
+inline unsigned int
+wide_int_storage::get_len () const
+{
+ return len;
+}
+
+inline HOST_WIDE_INT *
+wide_int_storage::write_val ()
+{
+ return val;
+}
+
+inline void
+wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
+{
+ len = l;
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
+ val[len - 1] = sext_hwi (val[len - 1],
+ precision % HOST_BITS_PER_WIDE_INT);
+}
+
+/* Treat X as having signedness SGN and convert it to a PRECISION-bit
+ number. */
+inline wide_int
+wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
+ signop sgn)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, precision, sgn));
+ return result;
+}
+
+/* Create a wide_int from the explicit block encoding given by VAL and
+ LEN. PRECISION is the precision of the integer. NEED_CANON_P is
+ true if the encoding may have redundant trailing blocks. */
+inline wide_int
+wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
+ unsigned int precision, bool need_canon_p)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::from_array (result.write_val (), val, len, precision,
+ need_canon_p));
+ return result;
+}
+
+/* Return an uninitialized wide_int with precision PRECISION. */
+inline wide_int
+wide_int_storage::create (unsigned int precision)
+{
+ wide_int x;
+ x.precision = precision;
+ return x;
+}
+
+template <typename T1, typename T2>
+inline wide_int
+wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
+{
+ /* This shouldn't be used for two flexible-precision inputs. */
+ STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
+ || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
+ if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
+ return wide_int::create (wi::get_precision (y));
+ else
+ return wide_int::create (wi::get_precision (x));
+}
+
+/* The storage used by FIXED_WIDE_INT (N). */
+template <int N>
+class GTY(()) fixed_wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT];
+ unsigned int len;
+
+public:
+ fixed_wide_int_storage ();
+ template <typename T>
+ fixed_wide_int_storage (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
+ static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
+ bool = true);
+};
+
+namespace wi
+{
+ template <int N>
+ struct int_traits < fixed_wide_int_storage <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const unsigned int precision = N;
+ template <typename T1, typename T2>
+ static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
+ };
+}
+
+template <int N>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
+
+/* Initialize the storage from integer X, in precision N. */
+template <int N>
+template <typename T>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
+{
+ /* Check for type compatibility. We don't want to initialize a
+ fixed-width integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+}
+
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+fixed_wide_int_storage <N>::get_val () const
+{
+ return val;
+}
+
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_len () const
+{
+ return len;
+}
+
+template <int N>
+inline HOST_WIDE_INT *
+fixed_wide_int_storage <N>::write_val ()
+{
+ return val;
+}
+
+template <int N>
+inline void
+fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
+{
+ len = l;
+ /* There are no excess bits in val[len - 1]. */
+ STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
+}
+
+/* Treat X as having signedness SGN and convert it to an N-bit number. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
+{
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, N, sgn));
+ return result;
+}
+
+/* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
+ VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
+ trailing blocks. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
+ unsigned int len,
+ bool need_canon_p)
+{
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::from_array (result.write_val (), val, len,
+ N, need_canon_p));
+ return result;
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline FIXED_WIDE_INT (N)
+wi::int_traits < fixed_wide_int_storage <N> >::
+get_binary_result (const T1 &, const T2 &)
+{
+ return FIXED_WIDE_INT (N) ();
+}
+
+/* A reference to one element of a trailing_wide_ints structure. */
+class trailing_wide_int_storage
+{
+private:
+ /* The precision of the integer, which is a fixed property of the
+ parent trailing_wide_ints. */
+ unsigned int m_precision;
+
+ /* A pointer to the length field. */
+ unsigned char *m_len;
+
+ /* A pointer to the HWI array. There are enough elements to hold all
+ values of precision M_PRECISION. */
+ HOST_WIDE_INT *m_val;
+
+public:
+ trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ template <typename T>
+ trailing_wide_int_storage &operator = (const T &);
+};
+
+typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int;
+
+/* trailing_wide_int behaves like a wide_int. */
+namespace wi
+{
+ template <>
+ struct int_traits <trailing_wide_int_storage>
+ : public int_traits <wide_int_storage> {};
+}
+
+/* A variable-length array of wide_int-like objects that can be put
+ at the end of a variable-sized structure. The number of objects is
+ at most N and can be set at runtime by using set_precision().
+
+ Use extra_size to calculate how many bytes beyond the
+ sizeof need to be allocated. Use set_precision to initialize the
+ structure. */
+template <int N>
+struct GTY((user)) trailing_wide_ints
+{
+private:
+ /* The shared precision of each number. */
+ unsigned short m_precision;
+
+ /* The shared maximum length of each number. */
+ unsigned char m_max_len;
+
+ /* The number of elements. */
+ unsigned char m_num_elements;
+
+ /* The current length of each number.
+ Avoid char array so the whole structure is not a typeless storage
+ that will, in turn, turn off TBAA on gimple, trees and RTL. */
+ struct {unsigned char len;} m_len[N];
+
+ /* The variable-length part of the structure, which always contains
+ at least one HWI. Element I starts at index I * M_MAX_LEN. */
+ HOST_WIDE_INT m_val[1];
+
+public:
+ typedef WIDE_INT_REF_FOR (trailing_wide_int_storage) const_reference;
+
+ void set_precision (unsigned int precision, unsigned int num_elements = N);
+ unsigned int get_precision () const { return m_precision; }
+ unsigned int num_elements () const { return m_num_elements; }
+ trailing_wide_int operator [] (unsigned int);
+ const_reference operator [] (unsigned int) const;
+ static size_t extra_size (unsigned int precision,
+ unsigned int num_elements = N);
+ size_t extra_size () const { return extra_size (m_precision,
+ m_num_elements); }
+};
+
+inline trailing_wide_int_storage::
+trailing_wide_int_storage (unsigned int precision, unsigned char *len,
+ HOST_WIDE_INT *val)
+ : m_precision (precision), m_len (len), m_val (val)
+{
+}
+
+inline unsigned int
+trailing_wide_int_storage::get_len () const
+{
+ return *m_len;
+}
+
+inline unsigned int
+trailing_wide_int_storage::get_precision () const
+{
+ return m_precision;
+}
+
+inline const HOST_WIDE_INT *
+trailing_wide_int_storage::get_val () const
+{
+ return m_val;
+}
+
+inline HOST_WIDE_INT *
+trailing_wide_int_storage::write_val ()
+{
+ return m_val;
+}
+
+inline void
+trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
+{
+ *m_len = len;
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision)
+ m_val[len - 1] = sext_hwi (m_val[len - 1],
+ m_precision % HOST_BITS_PER_WIDE_INT);
+}
+
+template <typename T>
+inline trailing_wide_int_storage &
+trailing_wide_int_storage::operator = (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x, m_precision);
+ wi::copy (*this, xi);
+ return *this;
+}
+
+/* Initialize the structure and record that all elements have precision
+ PRECISION. NUM_ELEMENTS can be no more than N. */
+template <int N>
+inline void
+trailing_wide_ints <N>::set_precision (unsigned int precision,
+ unsigned int num_elements)
+{
+ gcc_checking_assert (num_elements <= N);
+ m_num_elements = num_elements;
+ m_precision = precision;
+ m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+}
+
+/* Return a reference to element INDEX. */
+template <int N>
+inline trailing_wide_int
+trailing_wide_ints <N>::operator [] (unsigned int index)
+{
+ return trailing_wide_int_storage (m_precision, &m_len[index].len,
+ &m_val[index * m_max_len]);
+}
+
+template <int N>
+inline typename trailing_wide_ints <N>::const_reference
+trailing_wide_ints <N>::operator [] (unsigned int index) const
+{
+ return wi::storage_ref (&m_val[index * m_max_len],
+ m_len[index].len, m_precision);
+}
+
+/* Return how many extra bytes need to be added to the end of the
+ structure in order to handle NUM_ELEMENTS wide_ints of precision
+ PRECISION. NUM_ELEMENTS is the number of elements, and defaults
+ to N. */
+template <int N>
+inline size_t
+trailing_wide_ints <N>::extra_size (unsigned int precision,
+ unsigned int num_elements)
+{
+ unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+ gcc_checking_assert (num_elements <= N);
+ return (num_elements * max_len - 1) * sizeof (HOST_WIDE_INT);
+}
+
+/* This macro is used in structures that end with a trailing_wide_ints field
+ called FIELD. It declares get_NAME() and set_NAME() methods to access
+ element I of FIELD. */
+#define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \
+ trailing_wide_int get_##NAME () { return FIELD[I]; } \
+ template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; }
+
+namespace wi
+{
+ /* Implementation of int_traits for primitive integer types like "int". */
+ template <typename T, bool signed_p>
+ struct primitive_int_traits
+ {
+ static const enum precision_type precision_type = FLEXIBLE_PRECISION;
+ static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = true;
+ static unsigned int get_precision (T);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
+ };
+}
+
+template <typename T, bool signed_p>
+inline unsigned int
+wi::primitive_int_traits <T, signed_p>::get_precision (T)
+{
+ return sizeof (T) * CHAR_BIT;
+}
+
+template <typename T, bool signed_p>
+inline wi::storage_ref
+wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, T x)
+{
+ scratch[0] = x;
+ if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Allow primitive C types to be used in wi:: routines. */
+namespace wi
+{
+ template <>
+ struct int_traits <unsigned char>
+ : public primitive_int_traits <unsigned char, false> {};
+
+ template <>
+ struct int_traits <unsigned short>
+ : public primitive_int_traits <unsigned short, false> {};
+
+ template <>
+ struct int_traits <int>
+ : public primitive_int_traits <int, true> {};
+
+ template <>
+ struct int_traits <unsigned int>
+ : public primitive_int_traits <unsigned int, false> {};
+
+ template <>
+ struct int_traits <long>
+ : public primitive_int_traits <long, true> {};
+
+ template <>
+ struct int_traits <unsigned long>
+ : public primitive_int_traits <unsigned long, false> {};
+
+#if defined HAVE_LONG_LONG
+ template <>
+ struct int_traits <long long>
+ : public primitive_int_traits <long long, true> {};
+
+ template <>
+ struct int_traits <unsigned long long>
+ : public primitive_int_traits <unsigned long long, false> {};
+#endif
+}
+
+namespace wi
+{
+ /* Stores HWI-sized integer VAL, treating it as having signedness SGN
+ and precision PRECISION. */
+ class hwi_with_prec
+ {
+ public:
+ hwi_with_prec () {}
+ hwi_with_prec (HOST_WIDE_INT, unsigned int, signop);
+ HOST_WIDE_INT val;
+ unsigned int precision;
+ signop sgn;
+ };
+
+ hwi_with_prec shwi (HOST_WIDE_INT, unsigned int);
+ hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int);
+
+ hwi_with_prec minus_one (unsigned int);
+ hwi_with_prec zero (unsigned int);
+ hwi_with_prec one (unsigned int);
+ hwi_with_prec two (unsigned int);
+}
+
+inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p,
+ signop s)
+ : precision (p), sgn (s)
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ val = sext_hwi (v, precision);
+ else
+ val = v;
+}
+
+/* Return a signed integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, SIGNED);
+}
+
+/* Return an unsigned integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, UNSIGNED);
+}
+
+/* Return a wide int of -1 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::minus_one (unsigned int precision)
+{
+ return wi::shwi (-1, precision);
+}
+
+/* Return a wide int of 0 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::zero (unsigned int precision)
+{
+ return wi::shwi (0, precision);
+}
+
+/* Return a wide int of 1 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::one (unsigned int precision)
+{
+ return wi::shwi (1, precision);
+}
+
+/* Return a wide int of 2 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::two (unsigned int precision)
+{
+ return wi::shwi (2, precision);
+}
+
+namespace wi
+{
+ /* ints_for<T>::zero (X) returns a zero that, when asssigned to a T,
+ gives that T the same precision as X. */
+ template<typename T, precision_type = int_traits<T>::precision_type>
+ struct ints_for
+ {
+ static int zero (const T &) { return 0; }
+ };
+
+ template<typename T>
+ struct ints_for<T, VAR_PRECISION>
+ {
+ static hwi_with_prec zero (const T &);
+ };
+}
+
+template<typename T>
+inline wi::hwi_with_prec
+wi::ints_for<T, wi::VAR_PRECISION>::zero (const T &x)
+{
+ return wi::zero (wi::get_precision (x));
+}
+
+namespace wi
+{
+ template <>
+ struct int_traits <wi::hwi_with_prec>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* hwi_with_prec has an explicitly-given precision, rather than the
+ precision of HOST_WIDE_INT. */
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static unsigned int get_precision (const wi::hwi_with_prec &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const wi::hwi_with_prec &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
+{
+ return x.precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <wi::hwi_with_prec>::
+decompose (HOST_WIDE_INT *scratch, unsigned int precision,
+ const wi::hwi_with_prec &x)
+{
+ gcc_checking_assert (precision == x.precision);
+ scratch[0] = x.val;
+ if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Private functions for handling large cases out of line. They take
+ individual length and array parameters because that is cheaper for
+ the inline caller than constructing an object on the stack and
+ passing a reference to it. (Although many callers use wide_int_refs,
+ we generally want those to be removed by SRA.) */
+namespace wi
+{
+ bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
+ unsigned int, unsigned int);
+ unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
+ unsigned int, unsigned int);
+ unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, overflow_type *);
+ unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, overflow_type *);
+ unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, signop,
+ overflow_type *, bool);
+ unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
+ HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ signop, overflow_type *);
+}
+
+/* Return the number of bits that integer X can hold. */
+template <typename T>
+inline unsigned int
+wi::get_precision (const T &x)
+{
+ return wi::int_traits <T>::get_precision (x);
+}
+
+/* Return the number of bits that the result of a binary operation can
+ hold when the input operands are X and Y. */
+template <typename T1, typename T2>
+inline unsigned int
+wi::get_binary_precision (const T1 &x, const T2 &y)
+{
+ return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
+ get_binary_result (x, y));
+}
+
+/* Copy the contents of Y to X, but keeping X's current precision. */
+template <typename T1, typename T2>
+inline void
+wi::copy (T1 &x, const T2 &y)
+{
+ HOST_WIDE_INT *xval = x.write_val ();
+ const HOST_WIDE_INT *yval = y.get_val ();
+ unsigned int len = y.get_len ();
+ unsigned int i = 0;
+ do
+ xval[i] = yval[i];
+ while (++i < len);
+ x.set_len (len, y.is_sign_extended);
+}
+
+/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
+template <typename T>
+inline bool
+wi::fits_shwi_p (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ return xi.len == 1;
+}
+
+/* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
+ precision. */
+template <typename T>
+inline bool
+wi::fits_uhwi_p (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ if (xi.precision <= HOST_BITS_PER_WIDE_INT)
+ return true;
+ if (xi.len == 1)
+ return xi.slow () >= 0;
+ return xi.len == 2 && xi.uhigh () == 0;
+}
+
+/* Return true if X is negative based on the interpretation of SGN.
+ For UNSIGNED, this is always false. */
+template <typename T>
+inline bool
+wi::neg_p (const T &x, signop sgn)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ if (sgn == UNSIGNED)
+ return false;
+ return xi.sign_mask () < 0;
+}
+
+/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
+template <typename T>
+inline HOST_WIDE_INT
+wi::sign_mask (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ return xi.sign_mask ();
+}
+
+/* Return true if X == Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
+inline bool
+wi::eq_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (xi.is_sign_extended && yi.is_sign_extended)
+ {
+ /* This case reduces to array equality. */
+ if (xi.len != yi.len)
+ return false;
+ unsigned int i = 0;
+ do
+ if (xi.val[i] != yi.val[i])
+ return false;
+ while (++i != xi.len);
+ return true;
+ }
+ if (LIKELY (yi.len == 1))
+ {
+ /* XI is only equal to YI if it too has a single HWI. */
+ if (xi.len != 1)
+ return false;
+ /* Excess bits in xi.val[0] will be signs or zeros, so comparisons
+ with 0 are simple. */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return xi.val[0] == 0;
+ /* Otherwise flush out any excess bits first. */
+ unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0];
+ int excess = HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ diff <<= excess;
+ return diff == 0;
+ }
+ return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
+}
+
+/* Return true if X != Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
+inline bool
+wi::ne_p (const T1 &x, const T2 &y)
+{
+ return !eq_p (x, y);
+}
+
+/* Return true if X < Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::lts_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* We optimize x < y, where y is 64 or fewer bits. */
+ if (wi::fits_shwi_p (yi))
+ {
+ /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return neg_p (xi);
+ /* If x fits directly into a shwi, we can compare directly. */
+ if (wi::fits_shwi_p (xi))
+ return xi.to_shwi () < yi.to_shwi ();
+ /* If x doesn't fit and is negative, then it must be more
+ negative than any value in y, and hence smaller than y. */
+ if (neg_p (xi))
+ return true;
+ /* If x is positive, then it must be larger than any value in y,
+ and hence greater than y. */
+ return false;
+ }
+ /* Optimize the opposite case, if it can be detected at compile time. */
+ if (STATIC_CONSTANT_P (xi.len == 1))
+ /* If YI is negative it is lower than the least HWI.
+ If YI is positive it is greater than the greatest HWI. */
+ return !neg_p (yi);
+ return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return true if X < Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::ltu_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* Optimize comparisons with constants. */
+ if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
+ return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0];
+ if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
+ return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0];
+ /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
+ for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
+ values does not change the result. */
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl;
+ }
+ return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::lt_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return lts_p (x, y);
+ else
+ return ltu_p (x, y);
+}
+
+/* Return true if X <= Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::les_p (const T1 &x, const T2 &y)
+{
+ return !lts_p (y, x);
+}
+
+/* Return true if X <= Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::leu_p (const T1 &x, const T2 &y)
+{
+ return !ltu_p (y, x);
+}
+
+/* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::le_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return les_p (x, y);
+ else
+ return leu_p (x, y);
+}
+
+/* Return true if X > Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::gts_p (const T1 &x, const T2 &y)
+{
+ return lts_p (y, x);
+}
+
+/* Return true if X > Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::gtu_p (const T1 &x, const T2 &y)
+{
+ return ltu_p (y, x);
+}
+
+/* Return true if X > Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::gt_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return gts_p (x, y);
+ else
+ return gtu_p (x, y);
+}
+
+/* Return true if X >= Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::ges_p (const T1 &x, const T2 &y)
+{
+ return !lts_p (x, y);
+}
+
+/* Return true if X >= Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::geu_p (const T1 &x, const T2 &y)
+{
+ return !ltu_p (x, y);
+}
+
+/* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::ge_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return ges_p (x, y);
+ else
+ return geu_p (x, y);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as signed values. */
+template <typename T1, typename T2>
+inline int
+wi::cmps (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (wi::fits_shwi_p (yi))
+ {
+ /* Special case for comparisons with 0. */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0);
+ /* If x fits into a signed HWI, we can compare directly. */
+ if (wi::fits_shwi_p (xi))
+ {
+ HOST_WIDE_INT xl = xi.to_shwi ();
+ HOST_WIDE_INT yl = yi.to_shwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ /* If x doesn't fit and is negative, then it must be more
+ negative than any signed HWI, and hence smaller than y. */
+ if (neg_p (xi))
+ return -1;
+ /* If x is positive, then it must be larger than any signed HWI,
+ and hence greater than y. */
+ return 1;
+ }
+ /* Optimize the opposite case, if it can be detected at compile time. */
+ if (STATIC_CONSTANT_P (xi.len == 1))
+ /* If YI is negative it is lower than the least HWI.
+ If YI is positive it is greater than the greatest HWI. */
+ return neg_p (yi) ? 1 : -1;
+ return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as unsigned values. */
+template <typename T1, typename T2>
+inline int
+wi::cmpu (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* Optimize comparisons with constants. */
+ if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
+ {
+ /* If XI doesn't fit in a HWI then it must be larger than YI. */
+ if (xi.len != 1)
+ return 1;
+ /* Otherwise compare directly. */
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.val[0];
+ return xl < yl ? -1 : xl > yl;
+ }
+ if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
+ {
+ /* If YI doesn't fit in a HWI then it must be larger than XI. */
+ if (yi.len != 1)
+ return -1;
+ /* Otherwise compare directly. */
+ unsigned HOST_WIDE_INT xl = xi.val[0];
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
+ for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
+ values does not change the result. */
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
+ X and Y indicated by SGN. */
+template <typename T1, typename T2>
+inline int
+wi::cmp (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return cmps (x, y);
+ else
+ return cmpu (x, y);
+}
+
+/* Return ~x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::bit_not (const T &x)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = ~xi.val[i];
+ result.set_len (xi.len);
+ return result;
+}
+
+/* Return -x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x)
+{
+ return sub (0, x);
+}
+
+/* Return -x. Indicate in *OVERFLOW if performing the negation would
+ cause an overflow. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x, overflow_type *overflow)
+{
+ *overflow = only_sign_bit_p (x) ? OVF_OVERFLOW : OVF_NONE;
+ return sub (0, x);
+}
+
+/* Return the absolute value of x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::abs (const T &x)
+{
+ return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x);
+}
+
+/* Return the result of sign-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::sext (const T &x, unsigned int offset)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ if (offset <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow (), offset);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
+ return result;
+}
+
+/* Return the result of zero-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::zext (const T &x, unsigned int offset)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ /* This is not just an optimization, it is actually required to
+ maintain canonization. */
+ if (offset >= precision)
+ {
+ wi::copy (result, xi);
+ return result;
+ }
+
+ /* In these cases we know that at least the top bit will be clear,
+ so no sign extension is necessary. */
+ if (offset < HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = zext_hwi (xi.ulow (), offset);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true);
+ return result;
+}
+
+/* Return the result of extending the low OFFSET bits of X according to
+ signedness SGN. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::ext (const T &x, unsigned int offset, signop sgn)
+{
+ return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
+}
+
+/* Return an integer that represents X | (1 << bit). */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::set_bit (const T &x, unsigned int bit)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit);
+ result.set_len (1);
+ }
+ else
+ result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
+ return result;
+}
+
+/* Return the mininum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::min (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::le_p (x, y, sgn))
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
+ else
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
+ return result;
+}
+
+/* Return the minimum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smin (const T1 &x, const T2 &y)
+{
+ return wi::min (x, y, SIGNED);
+}
+
+/* Return the minimum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umin (const T1 &x, const T2 &y)
+{
+ return wi::min (x, y, UNSIGNED);
+}
+
+/* Return the maxinum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::max (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::ge_p (x, y, sgn))
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
+ else
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
+ return result;
+}
+
+/* Return the maximum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smax (const T1 &x, const T2 &y)
+{
+ return wi::max (x, y, SIGNED);
+}
+
+/* Return the maximum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umax (const T1 &x, const T2 &y)
+{
+ return wi::max (x, y, UNSIGNED);
+}
+
+/* Return X & Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ val[0] = xi.ulow () & yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X & ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ val[0] = xi.ulow () & ~yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X | Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ val[0] = xi.ulow () | yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (or_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision), is_sign_extended);
+ return result;
+}
+
+/* Return X | ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ val[0] = xi.ulow () | ~yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X ^ Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_xor (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (LIKELY (xi.len + yi.len == 2))
+ {
+ val[0] = xi.ulow () ^ yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (xor_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision), is_sign_extended);
+ return result;
+}
+
+/* Return X + Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () + yi.ulow ();
+ result.set_len (1);
+ }
+ /* If the precision is known at compile time to be greater than
+ HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
+ knowing that (a) all bits in those HWIs are significant and
+ (b) the result has room for at least two HWIs. This provides
+ a fast path for things like offset_int and widest_int.
+
+ The STATIC_CONSTANT_P test prevents this path from being
+ used for wide_ints. wide_ints with precisions greater than
+ HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
+ point handling them inline. */
+ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
+ && LIKELY (xi.len + yi.len == 2))
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ val[0] = resultl;
+ val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
+ result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl))
+ >> (HOST_BITS_PER_WIDE_INT - 1)));
+ }
+ else
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ UNSIGNED, 0));
+ return result;
+}
+
+/* Return X + Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ if (sgn == SIGNED)
+ {
+ if ((((resultl ^ xl) & (resultl ^ yl))
+ >> (precision - 1)) & 1)
+ {
+ if (xl > resultl)
+ *overflow = OVF_UNDERFLOW;
+ else if (xl < resultl)
+ *overflow = OVF_OVERFLOW;
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ < (xl << (HOST_BITS_PER_WIDE_INT - precision)))
+ ? OVF_OVERFLOW : OVF_NONE;
+ val[0] = resultl;
+ result.set_len (1);
+ }
+ else
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow));
+ return result;
+}
+
+/* Return X - Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () - yi.ulow ();
+ result.set_len (1);
+ }
+ /* If the precision is known at compile time to be greater than
+ HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
+ knowing that (a) all bits in those HWIs are significant and
+ (b) the result has room for at least two HWIs. This provides
+ a fast path for things like offset_int and widest_int.
+
+ The STATIC_CONSTANT_P test prevents this path from being
+ used for wide_ints. wide_ints with precisions greater than
+ HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
+ point handling them inline. */
+ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
+ && LIKELY (xi.len + yi.len == 2))
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ val[0] = resultl;
+ val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
+ result.set_len (1 + (((resultl ^ xl) & (xl ^ yl))
+ >> (HOST_BITS_PER_WIDE_INT - 1)));
+ }
+ else
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ UNSIGNED, 0));
+ return result;
+}
+
+/* Return X - Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ if (sgn == SIGNED)
+ {
+ if ((((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1)
+ {
+ if (xl > yl)
+ *overflow = OVF_UNDERFLOW;
+ else if (xl < yl)
+ *overflow = OVF_OVERFLOW;
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ > (xl << (HOST_BITS_PER_WIDE_INT - precision)))
+ ? OVF_UNDERFLOW : OVF_NONE;
+ val[0] = resultl;
+ result.set_len (1);
+ }
+ else
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow));
+ return result;
+}
+
+/* Return X * Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () * yi.ulow ();
+ result.set_len (1);
+ }
+ else
+ result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
+ precision, UNSIGNED, 0, false));
+ return result;
+}
+
+/* Return X * Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow, false));
+ return result;
+}
+
+/* Return X * Y, treating both X and Y as signed values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smul (const T1 &x, const T2 &y, overflow_type *overflow)
+{
+ return mul (x, y, SIGNED, overflow);
+}
+
+/* Return X * Y, treating both X and Y as unsigned values. Indicate in
+ *OVERFLOW if the result overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umul (const T1 &x, const T2 &y, overflow_type *overflow)
+{
+ return mul (x, y, UNSIGNED, overflow);
+}
+
+/* Perform a widening multiplication of X and Y, extending the values
+ according to SGN, and return the high part of the result. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul_high (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, 0, true));
+ return result;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
+ precision,
+ yi.val, yi.len, yi.precision,
+ sgn, overflow));
+ return quotient;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_trunc (const T1 &x, const T2 &y)
+{
+ return div_trunc (x, y, SIGNED);
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_trunc (const T1 &x, const T2 &y)
+{
+ return div_trunc (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
+ return quotient - 1;
+ return quotient;
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_floor (const T1 &x, const T2 &y)
+{
+ return div_floor (x, y, SIGNED);
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and udiv_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_floor (const T1 &x, const T2 &y)
+{
+ return div_floor (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards +inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
+ return quotient + 1;
+ return quotient;
+}
+
+/* Return X / Y, rouding towards +inf. Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_ceil (const T1 &x, const T2 &y)
+{
+ return div_ceil (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards nearest with ties away from zero.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the result overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
+ {
+ if (sgn == SIGNED)
+ {
+ WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder);
+ if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
+ {
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
+ return quotient - 1;
+ else
+ return quotient + 1;
+ }
+ }
+ else
+ {
+ if (wi::geu_p (remainder, wi::sub (y, remainder)))
+ return quotient + 1;
+ }
+ }
+ return quotient;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Store the remainder in *REMAINDER_PTR. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *remainder_ptr)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, 0));
+ remainder.set_len (remainder_len);
+
+ *remainder_ptr = remainder;
+ return quotient;
+}
+
+/* Compute the greatest common divisor of two numbers A and B using
+ Euclid's algorithm. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::gcd (const T1 &a, const T2 &b, signop sgn)
+{
+ T1 x, y, z;
+
+ x = wi::abs (a);
+ y = wi::abs (b);
+
+ while (gt_p (x, 0, sgn))
+ {
+ z = mod_trunc (y, x, sgn);
+ y = x;
+ x = z;
+ }
+
+ return y;
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (remainder);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ divmod_internal (0, &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, overflow);
+ remainder.set_len (remainder_len);
+
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smod_trunc (const T1 &x, const T2 &y)
+{
+ return mod_trunc (x, y, SIGNED);
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_trunc (const T1 &x, const T2 &y)
+{
+ return mod_trunc (x, y, UNSIGNED);
+}
+
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
+ return remainder + y;
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and umod_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_floor (const T1 &x, const T2 &y)
+{
+ return mod_floor (x, y, UNSIGNED);
+}
+
+/* Compute X / Y, rouding towards +inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
+ return remainder - y;
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards nearest with ties away from zero,
+ and return the remainder. Treat X and Y as having the signedness
+ given by SGN. Indicate in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
+ {
+ if (sgn == SIGNED)
+ {
+ WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder);
+ if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
+ {
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
+ return remainder + y;
+ else
+ return remainder - y;
+ }
+ }
+ else
+ {
+ if (wi::geu_p (remainder, wi::sub (y, remainder)))
+ return remainder - y;
+ }
+ }
+ return remainder;
+}
+
+/* Return true if X is a multiple of Y. Treat X and Y as having the
+ signedness given by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn)
+{
+ return wi::mod_trunc (x, y, sgn) == 0;
+}
+
+/* Return true if X is a multiple of Y, storing X / Y in *RES if so.
+ Treat X and Y as having the signedness given by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *res)
+{
+ WI_BINARY_RESULT (T1, T2) remainder;
+ WI_BINARY_RESULT (T1, T2) quotient
+ = divmod_trunc (x, y, sgn, &remainder);
+ if (remainder == 0)
+ {
+ *res = quotient;
+ return true;
+ }
+ return false;
+}
+
+/* Return X << Y. Return 0 if Y is greater than or equal to
+ the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::lshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, precision))
+ {
+ val[0] = 0;
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ /* For fixed-precision integers like offset_int and widest_int,
+ handle the case where the shift value is constant and the
+ result is a single nonnegative HWI (meaning that we don't
+ need to worry about val[1]). This is particularly common
+ for converting a byte count to a bit count.
+
+ For variable-precision integers like wide_int, handle HWI
+ and sub-HWI integers inline. */
+ if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
+ ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1)
+ && xi.len == 1
+ && IN_RANGE (xi.val[0], 0, HOST_WIDE_INT_MAX >> shift))
+ : precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () << shift;
+ result.set_len (1);
+ }
+ else
+ result.set_len (lshift_large (val, xi.val, xi.len,
+ precision, shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using a logical shift. Return 0 if Y is greater than
+ or equal to the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::lrshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, xi.precision))
+ {
+ val[0] = 0;
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ /* For fixed-precision integers like offset_int and widest_int,
+ handle the case where the shift value is constant and the
+ shifted value is a single nonnegative HWI (meaning that all
+ bits above the HWI are zero). This is particularly common
+ for converting a bit count to a byte count.
+
+ For variable-precision integers like wide_int, handle HWI
+ and sub-HWI integers inline. */
+ if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
+ ? (shift < HOST_BITS_PER_WIDE_INT
+ && xi.len == 1
+ && xi.val[0] >= 0)
+ : xi.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.to_uhwi () >> shift;
+ result.set_len (1);
+ }
+ else
+ result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using an arithmetic shift. Return a sign mask if
+ Y is greater than or equal to the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::arshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, xi.precision))
+ {
+ val[0] = sign_mask (x);
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ if (xi.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
+ logical shift otherwise. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::rshift (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == UNSIGNED)
+ return lrshift (x, y);
+ else
+ return arshift (x, y);
+}
+
+/* Return the result of rotating the low WIDTH bits of X left by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
+template <typename T1, typename T2>
+WI_UNARY_RESULT (T1)
+wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
+{
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod);
+ WI_UNARY_RESULT (T1) right
+ = wi::lrshift (width != precision ? wi::zext (x, width) : x,
+ wi::sub (width, ymod));
+ if (width != precision)
+ return wi::zext (left, width) | right;
+ return left | right;
+}
+
+/* Return the result of rotating the low WIDTH bits of X right by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
+template <typename T1, typename T2>
+WI_UNARY_RESULT (T1)
+wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
+{
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) right
+ = wi::lrshift (width != precision ? wi::zext (x, width) : x, ymod);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod));
+ if (width != precision)
+ return wi::zext (left, width) | right;
+ return left | right;
+}
+
+/* Return 0 if the number of 1s in X is even and 1 if the number of 1s
+ is odd. */
+inline int
+wi::parity (const wide_int_ref &x)
+{
+ return popcount (x) & 1;
+}
+
+/* Extract WIDTH bits from X, starting at BITPOS. */
+template <typename T>
+inline unsigned HOST_WIDE_INT
+wi::extract_uhwi (const T &x, unsigned int bitpos, unsigned int width)
+{
+ unsigned precision = get_precision (x);
+ if (precision < bitpos + width)
+ precision = bitpos + width;
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ /* Handle this rare case after the above, so that we assert about
+ bogus BITPOS values. */
+ if (width == 0)
+ return 0;
+
+ unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT;
+ unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT;
+ unsigned HOST_WIDE_INT res = xi.elt (start);
+ res >>= shift;
+ if (shift + width > HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT upper = xi.elt (start + 1);
+ res |= upper << (-shift % HOST_BITS_PER_WIDE_INT);
+ }
+ return zext_hwi (res, width);
+}
+
+/* Return the minimum precision needed to store X with sign SGN. */
+template <typename T>
+inline unsigned int
+wi::min_precision (const T &x, signop sgn)
+{
+ if (sgn == SIGNED)
+ return get_precision (x) - clrsb (x);
+ else
+ return get_precision (x) - clz (x);
+}
+
+#define SIGNED_BINARY_PREDICATE(OP, F) \
+ template <typename T1, typename T2> \
+ inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2) \
+ OP (const T1 &x, const T2 &y) \
+ { \
+ return wi::F (x, y); \
+ }
+
+SIGNED_BINARY_PREDICATE (operator <, lts_p)
+SIGNED_BINARY_PREDICATE (operator <=, les_p)
+SIGNED_BINARY_PREDICATE (operator >, gts_p)
+SIGNED_BINARY_PREDICATE (operator >=, ges_p)
+
+#undef SIGNED_BINARY_PREDICATE
+
+#define UNARY_OPERATOR(OP, F) \
+ template<typename T> \
+ WI_UNARY_RESULT (generic_wide_int<T>) \
+ OP (const generic_wide_int<T> &x) \
+ { \
+ return wi::F (x); \
+ }
+
+#define BINARY_PREDICATE(OP, F) \
+ template<typename T1, typename T2> \
+ WI_BINARY_PREDICATE_RESULT (T1, T2) \
+ OP (const T1 &x, const T2 &y) \
+ { \
+ return wi::F (x, y); \
+ }
+
+#define BINARY_OPERATOR(OP, F) \
+ template<typename T1, typename T2> \
+ WI_BINARY_OPERATOR_RESULT (T1, T2) \
+ OP (const T1 &x, const T2 &y) \
+ { \
+ return wi::F (x, y); \
+ }
+
+#define SHIFT_OPERATOR(OP, F) \
+ template<typename T1, typename T2> \
+ WI_BINARY_OPERATOR_RESULT (T1, T1) \
+ OP (const T1 &x, const T2 &y) \
+ { \
+ return wi::F (x, y); \
+ }
+
+UNARY_OPERATOR (operator ~, bit_not)
+UNARY_OPERATOR (operator -, neg)
+BINARY_PREDICATE (operator ==, eq_p)
+BINARY_PREDICATE (operator !=, ne_p)
+BINARY_OPERATOR (operator &, bit_and)
+BINARY_OPERATOR (operator |, bit_or)
+BINARY_OPERATOR (operator ^, bit_xor)
+BINARY_OPERATOR (operator +, add)
+BINARY_OPERATOR (operator -, sub)
+BINARY_OPERATOR (operator *, mul)
+SHIFT_OPERATOR (operator <<, lshift)
+
+#undef UNARY_OPERATOR
+#undef BINARY_PREDICATE
+#undef BINARY_OPERATOR
+#undef SHIFT_OPERATOR
+
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator >> (const T1 &x, const T2 &y)
+{
+ return wi::arshift (x, y);
+}
+
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator / (const T1 &x, const T2 &y)
+{
+ return wi::sdiv_trunc (x, y);
+}
+
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator % (const T1 &x, const T2 &y)
+{
+ return wi::smod_trunc (x, y);
+}
+
+template<typename T>
+void
+gt_ggc_mx (generic_wide_int <T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *, gt_pointer_operator, void *)
+{
+}
+
+template<int N>
+void
+gt_ggc_mx (trailing_wide_ints <N> *)
+{
+}
+
+template<int N>
+void
+gt_pch_nx (trailing_wide_ints <N> *)
+{
+}
+
+template<int N>
+void
+gt_pch_nx (trailing_wide_ints <N> *, gt_pointer_operator, void *)
+{
+}
+
+namespace wi
+{
+ /* Used for overloaded functions in which the only other acceptable
+ scalar type is a pointer. It stops a plain 0 from being treated
+ as a null pointer. */
+ struct never_used1 {};
+ struct never_used2 {};
+
+ wide_int min_value (unsigned int, signop);
+ wide_int min_value (never_used1 *);
+ wide_int min_value (never_used2 *);
+ wide_int max_value (unsigned int, signop);
+ wide_int max_value (never_used1 *);
+ wide_int max_value (never_used2 *);
+
+ /* FIXME: this is target dependent, so should be elsewhere.
+ It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */
+ wide_int from_buffer (const unsigned char *, unsigned int);
+
+#ifndef GENERATOR_FILE
+ void to_mpz (const wide_int_ref &, mpz_t, signop);
+#endif
+
+ wide_int mask (unsigned int, bool, unsigned int);
+ wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int);
+ wide_int set_bit_in_zero (unsigned int, unsigned int);
+ wide_int insert (const wide_int &x, const wide_int &y, unsigned int,
+ unsigned int);
+ wide_int round_down_for_mask (const wide_int &, const wide_int &);
+ wide_int round_up_for_mask (const wide_int &, const wide_int &);
+
+ wide_int mod_inv (const wide_int &a, const wide_int &b);
+
+ template <typename T>
+ T mask (unsigned int, bool);
+
+ template <typename T>
+ T shifted_mask (unsigned int, unsigned int, bool);
+
+ template <typename T>
+ T set_bit_in_zero (unsigned int);
+
+ unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int);
+ unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int,
+ bool, unsigned int);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool);
+}
+
+/* Return a PRECISION-bit integer in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+inline wide_int
+wi::mask (unsigned int width, bool negate_p, unsigned int precision)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (mask (result.write_val (), width, negate_p, precision));
+ return result;
+}
+
+/* Return a PRECISION-bit integer in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear,
+ or the inverse if NEGATE_P. */
+inline wide_int
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
+ unsigned int precision)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ precision));
+ return result;
+}
+
+/* Return a PRECISION-bit integer in which bit BIT is set and all the
+ others are clear. */
+inline wide_int
+wi::set_bit_in_zero (unsigned int bit, unsigned int precision)
+{
+ return shifted_mask (bit, 1, false, precision);
+}
+
+/* Return an integer of type T in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+template <typename T>
+inline T
+wi::mask (unsigned int width, bool negate_p)
+{
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (mask (result.write_val (), width, negate_p,
+ wi::int_traits <T>::precision));
+ return result;
+}
+
+/* Return an integer of type T in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear, or the
+ inverse if NEGATE_P. */
+template <typename T>
+inline T
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
+{
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (shifted_mask (result.write_val (), start, width,
+ negate_p,
+ wi::int_traits <T>::precision));
+ return result;
+}
+
+/* Return an integer of type T in which bit BIT is set and all the
+ others are clear. */
+template <typename T>
+inline T
+wi::set_bit_in_zero (unsigned int bit)
+{
+ return shifted_mask <T> (bit, 1, false);
+}
+
+/* Accumulate a set of overflows into OVERFLOW. */
+
+inline void
+wi::accumulate_overflow (wi::overflow_type &overflow,
+ wi::overflow_type suboverflow)
+{
+ if (!suboverflow)
+ return;
+ if (!overflow)
+ overflow = suboverflow;
+ else if (overflow != suboverflow)
+ overflow = wi::OVF_UNKNOWN;
+}
+
+#endif /* WIDE_INT_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/plugin/include/xcoff.h b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/xcoff.h
new file mode 100644
index 0000000..7933994
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/plugin/include/xcoff.h
@@ -0,0 +1,40 @@
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_XCOFF_H
+#define GCC_XCOFF_H
+
+/* Storage classes in XCOFF object file format designed for DBX's use.
+ This info is from the `Files Reference' manual for IBM's AIX version 3
+ for the RS6000. */
+
+#define C_GSYM 0x80
+#define C_LSYM 0x81
+#define C_PSYM 0x82
+#define C_RSYM 0x83
+#define C_RPSYM 0x84
+#define C_STSYM 0x85
+
+#define C_BCOMM 0x87
+#define C_ECOML 0x88
+#define C_ECOMM 0x89
+#define C_DECL 0x8c
+#define C_ENTRY 0x8d
+#define C_FUN 0x8e
+
+#endif /* GCC_XCOFF_H */
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtbegin.o
new file mode 100644
index 0000000..1fd4791
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtend.o
new file mode 100644
index 0000000..5a37ac8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtfastmath.o
new file mode 100644
index 0000000..3297223
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crti.o
new file mode 100644
index 0000000..f6e6da7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtn.o
new file mode 100644
index 0000000..ebdaeda
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libcaf_single.a
new file mode 100644
index 0000000..f224054
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcc.a
new file mode 100644
index 0000000..f198e0c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcov.a
new file mode 100644
index 0000000..af540e5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtbegin.o
new file mode 100644
index 0000000..6417c27
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtend.o
new file mode 100644
index 0000000..289ef71
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtfastmath.o
new file mode 100644
index 0000000..4d377ea
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crti.o
new file mode 100644
index 0000000..8fe7681
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtn.o
new file mode 100644
index 0000000..390fd02
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libcaf_single.a
new file mode 100644
index 0000000..4c207e3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcc.a
new file mode 100644
index 0000000..ab0ac0b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcov.a
new file mode 100644
index 0000000..9506b36
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v6-m/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtbegin.o
new file mode 100644
index 0000000..58c6912
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtend.o
new file mode 100644
index 0000000..a0a5c00
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtfastmath.o
new file mode 100644
index 0000000..3cf957a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crti.o
new file mode 100644
index 0000000..efff008
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtn.o
new file mode 100644
index 0000000..3e5b6e0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libcaf_single.a
new file mode 100644
index 0000000..4c002b6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcc.a
new file mode 100644
index 0000000..9ffb71e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcov.a
new file mode 100644
index 0000000..7727cc5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtbegin.o
new file mode 100644
index 0000000..4a4a89f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtend.o
new file mode 100644
index 0000000..940f823
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtfastmath.o
new file mode 100644
index 0000000..a30e86e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crti.o
new file mode 100644
index 0000000..efff008
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtn.o
new file mode 100644
index 0000000..3e5b6e0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libcaf_single.a
new file mode 100644
index 0000000..4ebac5d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcc.a
new file mode 100644
index 0000000..d07a9a3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcov.a
new file mode 100644
index 0000000..6dd1825
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7+fp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtbegin.o
new file mode 100644
index 0000000..a05dae6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtend.o
new file mode 100644
index 0000000..3c4be0c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtfastmath.o
new file mode 100644
index 0000000..2174111
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crti.o
new file mode 100644
index 0000000..cf621f7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtn.o
new file mode 100644
index 0000000..f9b6ca0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libcaf_single.a
new file mode 100644
index 0000000..854dffb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcc.a
new file mode 100644
index 0000000..cc99a7a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcov.a
new file mode 100644
index 0000000..4b8bcdd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtbegin.o
new file mode 100644
index 0000000..3b4ea58
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtend.o
new file mode 100644
index 0000000..f8b8263
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtfastmath.o
new file mode 100644
index 0000000..f28ae8a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crti.o
new file mode 100644
index 0000000..cf621f7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtn.o
new file mode 100644
index 0000000..f9b6ca0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libcaf_single.a
new file mode 100644
index 0000000..5746f83
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcc.a
new file mode 100644
index 0000000..e4e26d0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcov.a
new file mode 100644
index 0000000..a7e7248
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+fp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtbegin.o
new file mode 100644
index 0000000..2ee5874
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtend.o
new file mode 100644
index 0000000..e8eeeaf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtfastmath.o
new file mode 100644
index 0000000..8d18afd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crti.o
new file mode 100644
index 0000000..d5b7f6d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtn.o
new file mode 100644
index 0000000..dee1f9a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libcaf_single.a
new file mode 100644
index 0000000..ce01f46
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcc.a
new file mode 100644
index 0000000..04b7da7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcov.a
new file mode 100644
index 0000000..8f0fc96
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtbegin.o
new file mode 100644
index 0000000..eb6d0bd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtend.o
new file mode 100644
index 0000000..c8f2b23
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtfastmath.o
new file mode 100644
index 0000000..8937448
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crti.o
new file mode 100644
index 0000000..d5b7f6d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtn.o
new file mode 100644
index 0000000..dee1f9a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libcaf_single.a
new file mode 100644
index 0000000..a774d17
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcc.a
new file mode 100644
index 0000000..6dba25c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcov.a
new file mode 100644
index 0000000..dd4e826
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a+simd/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtbegin.o
new file mode 100644
index 0000000..a3a2a97
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtend.o
new file mode 100644
index 0000000..2994cd3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtfastmath.o
new file mode 100644
index 0000000..adefd40
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crti.o
new file mode 100644
index 0000000..cdbe7ec
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtn.o
new file mode 100644
index 0000000..9acd795
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libcaf_single.a
new file mode 100644
index 0000000..f5bd615
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcc.a
new file mode 100644
index 0000000..33fc4a3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcov.a
new file mode 100644
index 0000000..8953f71
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-a/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtbegin.o
new file mode 100644
index 0000000..eeb469c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtend.o
new file mode 100644
index 0000000..2713381
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtfastmath.o
new file mode 100644
index 0000000..3778224
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crti.o
new file mode 100644
index 0000000..f101b2d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtn.o
new file mode 100644
index 0000000..69fdc4e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libcaf_single.a
new file mode 100644
index 0000000..0f883c8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcc.a
new file mode 100644
index 0000000..1d1f3e2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcov.a
new file mode 100644
index 0000000..aa3fdd8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-m/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtbegin.o
new file mode 100644
index 0000000..3ac138f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtend.o
new file mode 100644
index 0000000..5d2e57b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtfastmath.o
new file mode 100644
index 0000000..dfe9edb
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crti.o
new file mode 100644
index 0000000..2556eb6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtn.o
new file mode 100644
index 0000000..30eccf0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libcaf_single.a
new file mode 100644
index 0000000..9a15650
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcc.a
new file mode 100644
index 0000000..8ddc881
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcov.a
new file mode 100644
index 0000000..9e8573b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtbegin.o
new file mode 100644
index 0000000..b96e1cd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtend.o
new file mode 100644
index 0000000..60ea5ae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtfastmath.o
new file mode 100644
index 0000000..32ddd9f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crti.o
new file mode 100644
index 0000000..2556eb6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtn.o
new file mode 100644
index 0000000..30eccf0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libcaf_single.a
new file mode 100644
index 0000000..a660944
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcc.a
new file mode 100644
index 0000000..ed843da
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcov.a
new file mode 100644
index 0000000..10421aa
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7-r+fp.sp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtbegin.o
new file mode 100644
index 0000000..8347756
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtend.o
new file mode 100644
index 0000000..3b57004
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtfastmath.o
new file mode 100644
index 0000000..2270b5f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crti.o
new file mode 100644
index 0000000..b3c9c86
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtn.o
new file mode 100644
index 0000000..19f99c5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libcaf_single.a
new file mode 100644
index 0000000..5d8feae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcc.a
new file mode 100644
index 0000000..49c69f1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcov.a
new file mode 100644
index 0000000..c9165ce
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtbegin.o
new file mode 100644
index 0000000..38ec223
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtend.o
new file mode 100644
index 0000000..7208ff3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtfastmath.o
new file mode 100644
index 0000000..5cc4753
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crti.o
new file mode 100644
index 0000000..5e4d3b6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtn.o
new file mode 100644
index 0000000..4cc10dd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libcaf_single.a
new file mode 100644
index 0000000..2c20d74
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcc.a
new file mode 100644
index 0000000..2fbab4d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcov.a
new file mode 100644
index 0000000..25bd674
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtbegin.o
new file mode 100644
index 0000000..c958635
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtend.o
new file mode 100644
index 0000000..5adeab9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtfastmath.o
new file mode 100644
index 0000000..9617a6a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crti.o
new file mode 100644
index 0000000..5e4d3b6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtn.o
new file mode 100644
index 0000000..4cc10dd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libcaf_single.a
new file mode 100644
index 0000000..87e8930
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcc.a
new file mode 100644
index 0000000..633fa00
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcov.a
new file mode 100644
index 0000000..64456d1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+dp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtbegin.o
new file mode 100644
index 0000000..37b17f9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtend.o
new file mode 100644
index 0000000..949b852
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtfastmath.o
new file mode 100644
index 0000000..03e81b6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crti.o
new file mode 100644
index 0000000..8103ec7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtn.o
new file mode 100644
index 0000000..7d4b4b2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libcaf_single.a
new file mode 100644
index 0000000..3413872
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcc.a
new file mode 100644
index 0000000..96d80de
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcov.a
new file mode 100644
index 0000000..af87f04
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtbegin.o
new file mode 100644
index 0000000..a1cb0c3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtend.o
new file mode 100644
index 0000000..94eaead
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtfastmath.o
new file mode 100644
index 0000000..6292e06
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crti.o
new file mode 100644
index 0000000..8103ec7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtn.o
new file mode 100644
index 0000000..7d4b4b2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libcaf_single.a
new file mode 100644
index 0000000..133a149
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcc.a
new file mode 100644
index 0000000..b783fe6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcov.a
new file mode 100644
index 0000000..bd0b2ae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m+fp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtbegin.o
new file mode 100644
index 0000000..cb97f17
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtend.o
new file mode 100644
index 0000000..5c2d488
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtfastmath.o
new file mode 100644
index 0000000..b40633e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crti.o
new file mode 100644
index 0000000..8f487c1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtn.o
new file mode 100644
index 0000000..b85a23d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libcaf_single.a
new file mode 100644
index 0000000..e118eb3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcc.a
new file mode 100644
index 0000000..2aaf2cf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcov.a
new file mode 100644
index 0000000..81236f1
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7e-m/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtbegin.o
new file mode 100644
index 0000000..da4364d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtend.o
new file mode 100644
index 0000000..b6f4d62
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtfastmath.o
new file mode 100644
index 0000000..985ba5f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crti.o
new file mode 100644
index 0000000..567f694
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtn.o
new file mode 100644
index 0000000..d04776d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libcaf_single.a
new file mode 100644
index 0000000..a8dd57c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcc.a
new file mode 100644
index 0000000..96e52f6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcov.a
new file mode 100644
index 0000000..3d2d5ab
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtbegin.o
new file mode 100644
index 0000000..c418621
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtend.o
new file mode 100644
index 0000000..1368f3e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtfastmath.o
new file mode 100644
index 0000000..40720db
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crti.o
new file mode 100644
index 0000000..567f694
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtn.o
new file mode 100644
index 0000000..d04776d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libcaf_single.a
new file mode 100644
index 0000000..e223378
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcc.a
new file mode 100644
index 0000000..985812e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcov.a
new file mode 100644
index 0000000..78ad7c4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v7ve+simd/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtbegin.o
new file mode 100644
index 0000000..38aca59
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtend.o
new file mode 100644
index 0000000..bc153d2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtfastmath.o
new file mode 100644
index 0000000..d4b26e7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crti.o
new file mode 100644
index 0000000..541ecdd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtn.o
new file mode 100644
index 0000000..cfee971
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libcaf_single.a
new file mode 100644
index 0000000..e14e635
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcc.a
new file mode 100644
index 0000000..86c8711
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcov.a
new file mode 100644
index 0000000..837a0cf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtbegin.o
new file mode 100644
index 0000000..2c5b9cc
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtend.o
new file mode 100644
index 0000000..e840749
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtfastmath.o
new file mode 100644
index 0000000..d28d95b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crti.o
new file mode 100644
index 0000000..541ecdd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtn.o
new file mode 100644
index 0000000..cfee971
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libcaf_single.a
new file mode 100644
index 0000000..7b1c475
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcc.a
new file mode 100644
index 0000000..c6b41b4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcov.a
new file mode 100644
index 0000000..af20fae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a+simd/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtbegin.o
new file mode 100644
index 0000000..5aa8b0a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtend.o
new file mode 100644
index 0000000..7b88a82
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtfastmath.o
new file mode 100644
index 0000000..4420e2f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crti.o
new file mode 100644
index 0000000..94755a7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtn.o
new file mode 100644
index 0000000..82703ca
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libcaf_single.a
new file mode 100644
index 0000000..312d3ae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcc.a
new file mode 100644
index 0000000..bfd8784
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcov.a
new file mode 100644
index 0000000..b8eaca8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-a/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtbegin.o
new file mode 100644
index 0000000..5eb938a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtend.o
new file mode 100644
index 0000000..b821703
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtfastmath.o
new file mode 100644
index 0000000..8f45216
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crti.o
new file mode 100644
index 0000000..df2a9c3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtn.o
new file mode 100644
index 0000000..97a46e0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libcaf_single.a
new file mode 100644
index 0000000..5d25f35
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcc.a
new file mode 100644
index 0000000..8c9b836
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcov.a
new file mode 100644
index 0000000..3b51487
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.base/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtbegin.o
new file mode 100644
index 0000000..c36f038
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtend.o
new file mode 100644
index 0000000..c5c7f60
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtfastmath.o
new file mode 100644
index 0000000..8370ed5
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crti.o
new file mode 100644
index 0000000..606c1cf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtn.o
new file mode 100644
index 0000000..15986ff
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libcaf_single.a
new file mode 100644
index 0000000..7365610
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcc.a
new file mode 100644
index 0000000..778c65b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcov.a
new file mode 100644
index 0000000..977a30e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtbegin.o
new file mode 100644
index 0000000..825223d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtend.o
new file mode 100644
index 0000000..4a8a0c6
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtfastmath.o
new file mode 100644
index 0000000..bb698a7
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crti.o
new file mode 100644
index 0000000..606c1cf
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtn.o
new file mode 100644
index 0000000..15986ff
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libcaf_single.a
new file mode 100644
index 0000000..eccbf4a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcc.a
new file mode 100644
index 0000000..9f034f8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcov.a
new file mode 100644
index 0000000..0d9855c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+dp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtbegin.o
new file mode 100644
index 0000000..51b746c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtend.o
new file mode 100644
index 0000000..4dc63d9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtfastmath.o
new file mode 100644
index 0000000..52f539c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crti.o
new file mode 100644
index 0000000..b3e8c84
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtn.o
new file mode 100644
index 0000000..e7dd69a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libcaf_single.a
new file mode 100644
index 0000000..455d90c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcc.a
new file mode 100644
index 0000000..2d2e669
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcov.a
new file mode 100644
index 0000000..774f104
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtbegin.o
new file mode 100644
index 0000000..164e731
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtend.o
new file mode 100644
index 0000000..d107986
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtfastmath.o
new file mode 100644
index 0000000..2348684
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crti.o
new file mode 100644
index 0000000..b3e8c84
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtn.o
new file mode 100644
index 0000000..e7dd69a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libcaf_single.a
new file mode 100644
index 0000000..7cb5e50
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcc.a
new file mode 100644
index 0000000..78de0ad
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcov.a
new file mode 100644
index 0000000..aa38886
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main+fp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtbegin.o
new file mode 100644
index 0000000..8ed9a46
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtend.o
new file mode 100644
index 0000000..15d6d19
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtfastmath.o
new file mode 100644
index 0000000..841ad59
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crti.o
new file mode 100644
index 0000000..6ef829c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtn.o
new file mode 100644
index 0000000..8d5b583
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libcaf_single.a
new file mode 100644
index 0000000..6bca241
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcc.a
new file mode 100644
index 0000000..7b9992a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcov.a
new file mode 100644
index 0000000..4f9b0a8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8-m.main/nofp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtbegin.o
new file mode 100644
index 0000000..f9e8130
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtend.o
new file mode 100644
index 0000000..9f6c2e4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtfastmath.o
new file mode 100644
index 0000000..555b5c3
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crti.o
new file mode 100644
index 0000000..ba19204
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtn.o
new file mode 100644
index 0000000..c07051b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libcaf_single.a
new file mode 100644
index 0000000..2fe9f34
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcc.a
new file mode 100644
index 0000000..cc49063
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcov.a
new file mode 100644
index 0000000..6d5029b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+mve/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtbegin.o
new file mode 100644
index 0000000..ecb21ba
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtend.o
new file mode 100644
index 0000000..c26a40c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtfastmath.o
new file mode 100644
index 0000000..bee9b3f
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crti.o
new file mode 100644
index 0000000..17bdefd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtn.o
new file mode 100644
index 0000000..b10e5b2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libcaf_single.a
new file mode 100644
index 0000000..1cec3ae
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcc.a
new file mode 100644
index 0000000..0c6f4df
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcov.a
new file mode 100644
index 0000000..108927a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtbegin.o
new file mode 100644
index 0000000..0b277de
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtend.o
new file mode 100644
index 0000000..8840407
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtfastmath.o
new file mode 100644
index 0000000..9231cce
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crti.o
new file mode 100644
index 0000000..17bdefd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtn.o
new file mode 100644
index 0000000..b10e5b2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libcaf_single.a
new file mode 100644
index 0000000..825b0cd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcc.a
new file mode 100644
index 0000000..977ede2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcov.a
new file mode 100644
index 0000000..b731935
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+dp/bp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtbegin.o
new file mode 100644
index 0000000..71d0948
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtend.o
new file mode 100644
index 0000000..039fae0
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtfastmath.o
new file mode 100644
index 0000000..a5f31c9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crti.o
new file mode 100644
index 0000000..2e247c8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtn.o
new file mode 100644
index 0000000..6852b3a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libcaf_single.a
new file mode 100644
index 0000000..b439db4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcc.a
new file mode 100644
index 0000000..1693876
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcov.a
new file mode 100644
index 0000000..b9ff6b2
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtbegin.o
new file mode 100644
index 0000000..f631376
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtend.o
new file mode 100644
index 0000000..bfd673e
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtfastmath.o
new file mode 100644
index 0000000..ef6285a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crti.o
new file mode 100644
index 0000000..2e247c8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtn.o
new file mode 100644
index 0000000..6852b3a
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libcaf_single.a
new file mode 100644
index 0000000..743861c
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcc.a
new file mode 100644
index 0000000..ff82944
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcov.a
new file mode 100644
index 0000000..ad67046
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+fp/bp/softfp/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtbegin.o
new file mode 100644
index 0000000..e6e41e9
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtend.o
new file mode 100644
index 0000000..5bd4760
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtfastmath.o
new file mode 100644
index 0000000..35510c4
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crti.o
new file mode 100644
index 0000000..ba19204
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtn.o
new file mode 100644
index 0000000..c07051b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libcaf_single.a
new file mode 100644
index 0000000..636d3fe
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcc.a
new file mode 100644
index 0000000..fb6fdc8
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcov.a
new file mode 100644
index 0000000..2edd26d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti+mve/bp/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtbegin.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtbegin.o
new file mode 100644
index 0000000..b24b982
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtend.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtend.o
new file mode 100644
index 0000000..d9237cd
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtfastmath.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtfastmath.o
new file mode 100644
index 0000000..104b88d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crti.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crti.o
new file mode 100644
index 0000000..be6b82d
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crti.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtn.o b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtn.o
new file mode 100644
index 0000000..2b4c475
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/crtn.o
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_arithmetic.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_arithmetic.mod
new file mode 100644
index 0000000..a584885
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_exceptions.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_exceptions.mod
new file mode 100644
index 0000000..7e66a32
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_features.mod b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_features.mod
new file mode 100644
index 0000000..8e5bd56
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libcaf_single.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libcaf_single.a
new file mode 100644
index 0000000..f3150aa
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcc.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcc.a
new file mode 100644
index 0000000..3383a82
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcov.a b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcov.a
new file mode 100644
index 0000000..6fceb6b
--- /dev/null
+++ b/lib/gcc/arm-none-eabi/13.2.1/thumb/v8.1-m.main+pacbti/bp/nofp/libgcov.a
Binary files differ